Compare commits

2 Commits

Author SHA1 Message Date
74111e3da5 update: removed unused macros
not used anymore by lexer
2026-02-09 23:12:04 +01:00
6ad68a8752 update: added lexer 2026-02-09 23:11:33 +01:00
2 changed files with 131 additions and 45 deletions

View File

@@ -6,7 +6,7 @@
/* By: sede-san <sede-san@student.42madrid.com +#+ +:+ +#+ */ /* By: sede-san <sede-san@student.42madrid.com +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */ /* +#+#+#+#+#+ +#+ */
/* Created: 2025/10/22 19:03:51 by sede-san #+# #+# */ /* Created: 2025/10/22 19:03:51 by sede-san #+# #+# */
/* Updated: 2026/02/09 20:36:19 by sede-san ### ########.fr */ /* Updated: 2026/02/09 21:19:02 by sede-san ### ########.fr */
/* */ /* */
/* ************************************************************************** */ /* ************************************************************************** */
@@ -17,12 +17,6 @@
# include "core.h" # include "core.h"
# include "builtins.h" # include "builtins.h"
# define PIPE_STR "|"
# define REDIRECT_IN_STR "<"
# define REDIRECT_OUT_STR ">"
# define APPEND_STR ">>"
# define HEREDOC_STR "<<"
# define TOKENS_COUNT 5 # define TOKENS_COUNT 5
typedef enum e_token_type typedef enum e_token_type

View File

@@ -6,7 +6,7 @@
/* By: sede-san <sede-san@student.42madrid.com +#+ +:+ +#+ */ /* By: sede-san <sede-san@student.42madrid.com +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */ /* +#+#+#+#+#+ +#+ */
/* Created: 2026/02/09 18:56:41 by sede-san #+# #+# */ /* Created: 2026/02/09 18:56:41 by sede-san #+# #+# */
/* Updated: 2026/02/09 20:42:50 by sede-san ### ########.fr */ /* Updated: 2026/02/09 23:09:28 by sede-san ### ########.fr */
/* */ /* */
/* ************************************************************************** */ /* ************************************************************************** */
@@ -15,65 +15,157 @@
static t_token *tokenize(const char *line, size_t *start); static t_token *tokenize(const char *line, size_t *start);
static t_token_type get_token_type(const char *str); static t_token_type get_token_type(const char *str);
static t_token *token_new(t_token_type type, const char *text);
static void token_clear(t_token *token);
static t_token *read_token(t_token_type type, const char *line, size_t *i);
static t_token *read_word(const char *line, size_t *i);
static inline bool is_meta(char c);
t_list *lex( /**
const char *line * @brief Converts a command line string into a list of tokens.
) { *
t_list *tokens; * @return A list of tokens or NULL on error.
*/
t_list *lex(
const char *line)
{
t_list *tokens;
t_token *token; t_token *token;
size_t i; size_t i;
tokens = NULL; tokens = NULL;
i = 0; i = 0;
while (line[i] != '\0') while (line[i] != '\0')
{ {
// ignore spaces
while (ft_isspace(line[i])) while (ft_isspace(line[i]))
i++; i++;
// create token if (line[i] == '\0')
break;
token = tokenize(line, &i); token = tokenize(line, &i);
// add token to list ft_lstadd_back(&tokens, ft_lstnew(token));
if (token != NULL) if (token == NULL)
ft_lstadd_back(&tokens, ft_lstnew(token)); {
ft_lstclear(&tokens, (void (*)(void *))token_clear);
return (NULL);
}
} }
return (tokens); return (tokens);
} }
static t_token *tokenize(const char *line, size_t *start) { /**
t_token *token; * @return A new token or NULL on error.
t_token_type type; */
static t_token *tokenize(
const char *line,
size_t *start)
{
t_token *token;
t_token_type type;
token = NULL;
if (line == NULL || line[*start] == '\0') if (line == NULL || line[*start] == '\0')
return (NULL); return (NULL);
type = get_token_type(line + *start); type = get_token_type(line + *start);
(void)type; if (type != TOKEN_WORD)
// if (type != TOKEN_WORD) token = read_token(type, line, start);
// token = token_new(type, NULL); else
// else token = read_word(line, start);
// token = read_word(line, start);
// if (token == NULL)
// (*start) += ft_strlen(token->value);
return (token); return (token);
} }
static t_token_type get_token_type(const char *str) static t_token_type get_token_type(
const char *str
)
{ {
size_t i; if (str == NULL || str[0] == '\0')
static const t_map_entry tokens[TOKENS_COUNT] = { return (TOKEN_WORD);
{PIPE_STR, (void *)TOKEN_PIPE}, if (str[0] == '|')
{REDIRECT_IN_STR, (void *)TOKEN_REDIRECT_IN}, return (TOKEN_PIPE);
{REDIRECT_OUT_STR, (void *)TOKEN_REDIRECT_OUT}, if (str[0] == '<')
{APPEND_STR, (void *)TOKEN_APPEND},
{HEREDOC_STR, (void *)TOKEN_HEREDOC}
};
i = 0;
while (i < TOKENS_COUNT)
{ {
if (ft_strcmp(str, tokens[i].key) == 0) if (str[1] == '<')
return ((t_token_type)tokens[i].value); return (TOKEN_HEREDOC);
i++; return (TOKEN_REDIRECT_IN);
}
if (str[0] == '>')
{
if (str[1] == '>')
return (TOKEN_APPEND);
return (TOKEN_REDIRECT_OUT);
} }
return (TOKEN_WORD); return (TOKEN_WORD);
} }
static t_token *token_new(
t_token_type type,
const char *text)
{
t_token *token;
token = (t_token *)malloc(sizeof(t_token));
if (token == NULL)
return (NULL);
ft_putendl("malloc");
token->type = type;
token->value = text;
if (token->type == TOKEN_WORD && token->value == NULL)
{
free(token);
return (NULL);
}
return (token);
}
static void token_clear(
t_token *token)
{
if (token != NULL)
{
if (token->value != NULL)
{
free(token->value);
ft_putendl("free");
}
free(token);
ft_putendl("free");
}
}
static t_token *read_token(
t_token_type type,
const char *line,
size_t *i)
{
while (ft_isspace(line[*i]) || is_meta(line[*i]))
(*i)++;
return (token_new(type, NULL));
}
static t_token *read_word(
const char *line,
size_t *i)
{
const size_t start = *i;
bool in_single_quote;
bool in_double_quote;
in_single_quote = false;
in_double_quote = false;
while (line[*i] != '\0')
{
char c = line[*i];
(void)c;
if (line[*i] == '\'' && !in_double_quote)
in_single_quote = !in_single_quote;
else if (line[*i] == '"' && !in_single_quote)
in_double_quote = !in_double_quote;
else if (!in_single_quote && !in_double_quote && (isspace(line[*i]) || is_meta(line[*i])))
break;
(*i)++;
}
return (token_new(TOKEN_WORD, ft_substr(line, start, *i - start)));
}
static inline bool is_meta(char c)
{
return (c == '|' || c == '<' || c == '>');
}