Fixed all norme from lexer, parser. minishell and builtins

This commit is contained in:
marcnava-42cursus
2026-02-14 00:02:03 +01:00
parent 7862f3e131
commit 6453abfda3
16 changed files with 667 additions and 571 deletions

View File

@@ -13,26 +13,20 @@
#include "core.h"
#include "parser.h"
static t_token *tokenize(const char *line, size_t *start);
static t_token_type get_token_type(const char *str);
static t_token *token_new(t_token_type type, char *text);
void token_clear(t_token *token);
static t_token *read_token(t_token_type type, const char *line, size_t *i);
static t_token *read_word(const char *line, size_t *i);
static inline bool is_meta(char c);
static t_token *tokenize(const char *line, size_t *start);
/**
* @brief Converts a command line string into a list of tokens.
*
* @return A list of tokens or NULL on error.
*/
t_list *lex(
t_list *lex(
const char *line
)
{
t_list *tokens;
t_token *token;
size_t i;
t_list *tokens;
t_token *token;
size_t i;
tokens = NULL;
i = 0;
@@ -56,13 +50,13 @@ t_list *lex(
/**
* @return A new token or NULL on error.
*/
static t_token *tokenize(
static t_token *tokenize(
const char *line,
size_t *start
)
{
t_token *token;
t_token_type type;
t_token *token;
t_token_type type;
if (line == NULL || line[*start] == '\0')
return (NULL);
@@ -73,106 +67,3 @@ static t_token *tokenize(
token = read_word(line, start);
return (token);
}
static t_token_type get_token_type(
const char *str
)
{
if (str == NULL || str[0] == '\0')
return (TOKEN_WORD);
if (str[0] == '|')
return (TOKEN_PIPE);
if (str[0] == '<')
{
if (str[1] == '<')
return (TOKEN_HEREDOC);
return (TOKEN_REDIRECT_IN);
}
if (str[0] == '>')
{
if (str[1] == '>')
return (TOKEN_APPEND);
return (TOKEN_REDIRECT_OUT);
}
return (TOKEN_WORD);
}
static t_token *token_new(
t_token_type type,
char *text
)
{
t_token *token;
token = (t_token *)malloc(sizeof(t_token));
if (token == NULL)
return (NULL);
token->type = type;
token->value = text;
if (token->type == TOKEN_WORD && token->value == NULL)
{
free(token);
return (NULL);
}
return (token);
}
void token_clear(
t_token *token
)
{
if (token != NULL)
{
free(token->value);
free(token);
}
}
static t_token *read_token(
t_token_type type,
const char *line,
size_t *i
)
{
const size_t start = *i;
size_t end;
while (is_meta(line[*i]))
(*i)++;
end = *i;
while (ft_isspace(line[*i]))
(*i)++;
return (token_new(type, ft_substr(line, start, end - start)));
}
static t_token *read_word(
const char *line,
size_t *i
)
{
const size_t start = *i;
bool in_single_quote;
bool in_double_quote;
in_single_quote = false;
in_double_quote = false;
while (line[*i] != '\0')
{
if (line[*i] == '\'' && !in_double_quote)
in_single_quote = !in_single_quote;
else if (line[*i] == '"' && !in_single_quote)
in_double_quote = !in_double_quote;
else if (!in_single_quote && !in_double_quote
&& (isspace(line[*i]) || is_meta(line[*i])))
break;
(*i)++;
}
return (token_new(TOKEN_WORD, ft_substr(line, start, *i - start)));
}
static inline bool is_meta(
char c
)
{
return (c == '|' || c == '<' || c == '>');
}