};
struct pptoken {
- int token;
+ Token token;
char *value;
/* a copy from the lexer */
union {
struct ftepp_t {
lex_file *lex;
- int token;
+ Token token;
unsigned int errors;
bool output_on;
ppcondition *conditions;
util_htrm(ftepp->macros, name, (void (*)(void*))&ppmacro_delete);
}
-static GMQCC_INLINE int ftepp_next(ftepp_t *ftepp)
+static GMQCC_INLINE Token ftepp_next(ftepp_t *ftepp)
{
return (ftepp->token = lex_do(ftepp->lex));
}
/* Important: this does not skip newlines! */
static bool ftepp_skipspace(ftepp_t *ftepp)
{
- if (ftepp->token != TOKEN_WHITE)
+ if (ftepp->token != Token::WHITE)
return true;
- while (ftepp_next(ftepp) == TOKEN_WHITE) {}
- if (ftepp->token >= TOKEN_EOF) {
+ while (ftepp_next(ftepp) == Token::WHITE) {}
+ if (ftepp->token >= Token::END) {
ftepp_error(ftepp, "unexpected end of preprocessor directive");
return false;
}
/* this one skips EOLs as well */
static bool ftepp_skipallwhite(ftepp_t *ftepp)
{
- if (ftepp->token != TOKEN_WHITE && ftepp->token != TOKEN_EOL)
+ if (ftepp->token != Token::WHITE && ftepp->token != Token::EOL)
return true;
do {
ftepp_next(ftepp);
- } while (ftepp->token == TOKEN_WHITE || ftepp->token == TOKEN_EOL);
- if (ftepp->token >= TOKEN_EOF) {
+ } while (ftepp->token == Token::WHITE || ftepp->token == Token::EOL);
+ if (ftepp->token >= Token::END) {
ftepp_error(ftepp, "unexpected end of preprocessor directive");
return false;
}
ftepp_next(ftepp);
if (!ftepp_skipspace(ftepp))
return false;
- if (ftepp->token == ')')
+ if (ftepp->token == Token::PAREN_CLOSE)
break;
switch (ftepp->token) {
- case TOKEN_IDENT:
- case TOKEN_TYPENAME:
- case TOKEN_KEYWORD:
+ case Token::IDENT:
+ case Token::TYPENAME:
+ case Token::KEYWORD:
vec_push(macro->params, util_strdup(ftepp_tokval(ftepp)));
break;
- case TOKEN_DOTS:
+ case Token::DOTS:
macro->variadic = true;
break;
default:
ftepp_next(ftepp);
if (!ftepp_skipspace(ftepp))
return false;
- if (macro->variadic && ftepp->token != ')') {
+ if (macro->variadic && ftepp->token != Token::PAREN_CLOSE) {
ftepp_error(ftepp, "cannot have parameters after the variadic parameters");
return false;
}
- } while (ftepp->token == ',');
+ } while (ftepp->token == Token::COMMA);
- if (ftepp->token != ')') {
+ if (ftepp->token != Token::PAREN_CLOSE) {
ftepp_error(ftepp, "expected closing paren after macro parameter list");
return false;
}
static bool ftepp_define_body(ftepp_t *ftepp, ppmacro *macro)
{
pptoken *ptok;
- while (ftepp->token != TOKEN_EOL && ftepp->token < TOKEN_EOF) {
+ while (ftepp->token != Token::EOL && ftepp->token < Token::END) {
bool subscript = false;
size_t index = 0;
if (macro->variadic && !strcmp(ftepp_tokval(ftepp), "__VA_ARGS__")) {
- subscript = !!(ftepp_next(ftepp) == '#');
+ subscript = !!(ftepp_next(ftepp) == Token::HASH);
- if (subscript && ftepp_next(ftepp) != '#') {
+ if (subscript && ftepp_next(ftepp) != Token::HASH) {
ftepp_error(ftepp, "expected `##` in __VA_ARGS__ for subscripting");
return false;
} else if (subscript) {
- if (ftepp_next(ftepp) == '[') {
- if (ftepp_next(ftepp) != TOKEN_INTCONST) {
+ if (ftepp_next(ftepp) == Token::BRACKET_OPEN) {
+ if (ftepp_next(ftepp) != Token::INTCONST) {
ftepp_error(ftepp, "expected index for __VA_ARGS__ subscript");
return false;
}
index = (int)strtol(ftepp_tokval(ftepp), nullptr, 10);
- if (ftepp_next(ftepp) != ']') {
+ if (ftepp_next(ftepp) != Token::BRACKET_CLOSE) {
ftepp_error(ftepp, "expected `]` in __VA_ARGS__ subscript");
return false;
}
* mark it as an array to be handled later as such and not
* as traditional __VA_ARGS__
*/
- ftepp->token = TOKEN_VA_ARGS_ARRAY;
+ ftepp->token = Token::VA_ARGS_ARRAY;
ptok = pptoken_make(ftepp);
ptok->constval.i = index;
vec_push(macro->output, ptok);
return false;
}
} else {
- int old = ftepp->token;
- ftepp->token = TOKEN_VA_ARGS;
+ auto old = ftepp->token;
+ ftepp->token = Token::VA_ARGS;
ptok = pptoken_make(ftepp);
vec_push(macro->output, ptok);
ftepp->token = old;
}
}
else if (macro->variadic && !strcmp(ftepp_tokval(ftepp), "__VA_COUNT__")) {
- ftepp->token = TOKEN_VA_COUNT;
+ ftepp->token = Token::VA_COUNT;
ptok = pptoken_make(ftepp);
vec_push(macro->output, ptok);
ftepp_next(ftepp);
}
}
/* recursive expansion can cause EOFs here */
- if (ftepp->token != TOKEN_EOL && ftepp->token != TOKEN_EOF) {
+ if (ftepp->token != Token::EOL && ftepp->token != Token::END) {
ftepp_error(ftepp, "unexpected junk after macro or unexpected end of file");
return false;
}
return false;
switch (ftepp->token) {
- case TOKEN_IDENT:
- case TOKEN_TYPENAME:
- case TOKEN_KEYWORD:
+ case Token::IDENT:
+ case Token::TYPENAME:
+ case Token::KEYWORD:
if (OPTS_FLAG(FTEPP_MATHDEFS)) {
for (i = 0; i < GMQCC_ARRAY_COUNT(ftepp_math_constants); i++) {
if (!strcmp(ftepp_math_constants[i][0], ftepp_tokval(ftepp))) {
(void)ftepp_next(ftepp);
- if (ftepp->token == '(') {
+ if (ftepp->token == Token::PAREN_OPEN) {
macro->has_params = true;
if (!ftepp_define_params(ftepp, macro)) {
ppmacro_delete(macro);
if (!ftepp_skipallwhite(ftepp))
return false;
- while (ftepp->token != ')') {
+ while (ftepp->token != Token::PAREN_CLOSE) {
mp.tokens = nullptr;
if (!ftepp_skipallwhite(ftepp))
return false;
- while (parens || ftepp->token != ',') {
- if (ftepp->token == '(')
+ while (parens || ftepp->token != Token::COMMA) {
+ if (ftepp->token == Token::PAREN_OPEN)
++parens;
- else if (ftepp->token == ')') {
+ else if (ftepp->token == Token::PAREN_CLOSE) {
if (!parens)
break;
--parens;
}
ptok = pptoken_make(ftepp);
vec_push(mp.tokens, ptok);
- if (ftepp_next(ftepp) >= TOKEN_EOF) {
+ if (ftepp_next(ftepp) >= Token::END) {
ftepp_error(ftepp, "unexpected end of file in macro call");
goto on_error;
}
}
vec_push(params, mp);
mp.tokens = nullptr;
- if (ftepp->token == ')')
+ if (ftepp->token == Token::PAREN_CLOSE)
break;
- if (ftepp->token != ',') {
+ if (ftepp->token != Token::COMMA) {
ftepp_error(ftepp, "expected closing paren or comma in macro call");
goto on_error;
}
- if (ftepp_next(ftepp) >= TOKEN_EOF) {
+ if (ftepp_next(ftepp) >= Token::END) {
ftepp_error(ftepp, "unexpected end of file in macro call");
goto on_error;
}
const char *ch;
chs[1] = 0;
switch (token->token) {
- case TOKEN_STRINGCONST:
+ case Token::STRINGCONST:
ch = token->value;
while (*ch) {
/* in preprocessor mode strings already are string,
++ch;
}
break;
- /*case TOKEN_WHITE:
+ /*case Token::WHITE:
ftepp_out(ftepp, " ", false);
break;*/
- case TOKEN_EOL:
+ case Token::EOL:
ftepp_out(ftepp, "\\n", false);
break;
default:
pptoken *out;
for (i = 0; i < vec_size(param->tokens); ++i) {
out = param->tokens[i];
- if (out->token == TOKEN_EOL)
+ if (out->token == Token::EOL)
ftepp_out(ftepp, "\n", false);
else {
ppmacro *find = ftepp_macro_find(ftepp, out->value);
bool old_inmacro;
bool strip = false;
- int nextok;
+ Token nextok;
if (vararg_start < vec_size(params))
varargs = vec_size(params) - vararg_start;
for (o = 0; o < vec_size(macro->output); ++o) {
pptoken *out = macro->output[o];
switch (out->token) {
- case TOKEN_VA_ARGS:
+ case Token::VA_ARGS:
if (!macro->variadic) {
- ftepp_error(ftepp, "internal preprocessor error: TOKEN_VA_ARGS in non-variadic macro");
+ ftepp_error(ftepp, "internal preprocessor error: Token::VA_ARGS in non-variadic macro");
vec_free(old_string);
return false;
}
}
break;
- case TOKEN_VA_ARGS_ARRAY:
+ case Token::VA_ARGS_ARRAY:
if ((size_t)out->constval.i >= varargs) {
ftepp_error(ftepp, "subscript of `[%u]` is out of bounds for `__VA_ARGS__`", out->constval.i);
vec_free(old_string);
ftepp_param_out(ftepp, ¶ms[out->constval.i + vararg_start]);
break;
- case TOKEN_VA_COUNT:
+ case Token::VA_COUNT:
util_asprintf(&buffer, "%d", varargs);
ftepp_out(ftepp, buffer, false);
mem_d(buffer);
break;
- case TOKEN_IDENT:
- case TOKEN_TYPENAME:
- case TOKEN_KEYWORD:
+ case Token::IDENT:
+ case Token::TYPENAME:
+ case Token::KEYWORD:
if (!macro_params_find(macro, out->value, &pi)) {
ftepp_out(ftepp, out->value, false);
break;
} else
ftepp_param_out(ftepp, ¶ms[pi]);
break;
- case '#':
+ case Token::HASH:
if (o + 1 < vec_size(macro->output)) {
nextok = macro->output[o+1]->token;
- if (nextok == '#') {
+ if (nextok == Token::HASH) {
/* raw concatenation */
++o;
strip = true;
break;
}
- if ( (nextok == TOKEN_IDENT ||
- nextok == TOKEN_KEYWORD ||
- nextok == TOKEN_TYPENAME) &&
+ if ( (nextok == Token::IDENT ||
+ nextok == Token::KEYWORD ||
+ nextok == Token::TYPENAME) &&
macro_params_find(macro, macro->output[o+1]->value, &pi))
{
++o;
}
ftepp_out(ftepp, "#", false);
break;
- case TOKEN_EOL:
+ case Token::EOL:
ftepp_out(ftepp, "\n", false);
break;
default:
buffer = out->value;
#define buffer_stripable(X) ((X) == ' ' || (X) == '\t')
- if (vec_size(macro->output) > o + 1 && macro->output[o+1]->token == '#' && buffer_stripable(*buffer))
+ if (vec_size(macro->output) > o + 1 && macro->output[o+1]->token == Token::HASH && buffer_stripable(*buffer))
buffer++;
if (strip) {
while (buffer_stripable(*buffer)) buffer++;
if (!ftepp_skipallwhite(ftepp))
return false;
- if (ftepp->token != '(') {
+ if (ftepp->token != Token::PAREN_OPEN) {
ftepp_error(ftepp, "expected macro parameters in parenthesis");
return false;
}
if (!ftepp_skipspace(ftepp))
return false;
- while (ftepp->token == '!') {
+ while (ftepp->token == Token::NOT) {
wasnot = true;
ftepp_next(ftepp);
if (!ftepp_skipspace(ftepp))
return false;
}
- if (ftepp->token == TOKEN_OPERATOR && !strcmp(ftepp_tokval(ftepp), "-"))
+ if (ftepp->token == Token::OPERATOR && !strcmp(ftepp_tokval(ftepp), "-"))
{
wasneg = true;
ftepp_next(ftepp);
}
switch (ftepp->token) {
- case TOKEN_IDENT:
- case TOKEN_TYPENAME:
- case TOKEN_KEYWORD:
+ case Token::IDENT:
+ case Token::TYPENAME:
+ case Token::KEYWORD:
if (!strcmp(ftepp_tokval(ftepp), "defined")) {
ftepp_next(ftepp);
if (!ftepp_skipspace(ftepp))
return false;
- if (ftepp->token != '(') {
+ if (ftepp->token != Token::PAREN_OPEN) {
ftepp_error(ftepp, "`defined` keyword in #if requires a macro name in parenthesis");
return false;
}
ftepp_next(ftepp);
if (!ftepp_skipspace(ftepp))
return false;
- if (ftepp->token != TOKEN_IDENT &&
- ftepp->token != TOKEN_TYPENAME &&
- ftepp->token != TOKEN_KEYWORD)
+ if (ftepp->token != Token::IDENT &&
+ ftepp->token != Token::TYPENAME &&
+ ftepp->token != Token::KEYWORD)
{
ftepp_error(ftepp, "defined() used on an unexpected token type");
return false;
ftepp_next(ftepp);
if (!ftepp_skipspace(ftepp))
return false;
- if (ftepp->token != ')') {
+ if (ftepp->token != Token::PAREN_CLOSE) {
ftepp_error(ftepp, "expected closing paren");
return false;
}
} else {
/* This does not expand recursively! */
switch (macro->output[0]->token) {
- case TOKEN_INTCONST:
+ case Token::INTCONST:
*value_out = macro->output[0]->constval.i;
*out = !!(macro->output[0]->constval.i);
break;
- case TOKEN_FLOATCONST:
+ case Token::FLOATCONST:
*value_out = macro->output[0]->constval.f;
*out = !!(macro->output[0]->constval.f);
break;
}
}
break;
- case TOKEN_STRINGCONST:
+ case Token::STRINGCONST:
*value_out = 0;
*out = false;
break;
- case TOKEN_INTCONST:
+ case Token::INTCONST:
*value_out = ftepp->lex->tok.constval.i;
*out = !!(ftepp->lex->tok.constval.i);
break;
- case TOKEN_FLOATCONST:
+ case Token::FLOATCONST:
*value_out = ftepp->lex->tok.constval.f;
*out = !!(ftepp->lex->tok.constval.f);
break;
- case '(':
+ case Token::PAREN_OPEN:
ftepp_next(ftepp);
if (!ftepp_if_expr(ftepp, out, value_out))
return false;
- if (ftepp->token != ')') {
+ if (ftepp->token != Token::PAREN_CLOSE) {
ftepp_error(ftepp, "expected closing paren in #if expression");
return false;
}
if (!ftepp_if_op(ftepp))
return false;
- if (ftepp->token == ')' || ftepp->token != TOKEN_OPERATOR)
+ if (ftepp->token == Token::PAREN_CLOSE || ftepp->token != Token::OPERATOR)
return true;
/* FTEQCC is all right-associative and no precedence here */
double nextvalue;
(void)nextvalue;
- if (!ftepp_next(ftepp))
+ if (ftepp_next(ftepp) == Token::NONE)
return false;
if (!ftepp_if_expr(ftepp, &next, &nextvalue))
return false;
const char opc1 = ftepp_tokval(ftepp)[1];
double other;
- if (!ftepp_next(ftepp))
+ if (ftepp_next(ftepp) == Token::NONE)
return false;
if (!ftepp_if_expr(ftepp, &next, &other))
return false;
if (!ftepp_skipspace(ftepp))
return false;
- if (ftepp->token == TOKEN_EOL) {
+ if (ftepp->token == Token::EOL) {
ftepp_error(ftepp, "expected expression for #if-directive");
return false;
}
return false;
switch (ftepp->token) {
- case TOKEN_IDENT:
- case TOKEN_TYPENAME:
- case TOKEN_KEYWORD:
+ case Token::IDENT:
+ case Token::TYPENAME:
+ case Token::KEYWORD:
macro = ftepp_macro_find(ftepp, ftepp_tokval(ftepp));
break;
default:
if (!ftepp_skipspace(ftepp))
return false;
/* relaxing this condition
- if (ftepp->token != TOKEN_EOL && ftepp->token != TOKEN_EOF) {
+ if (ftepp->token != Token::EOL && ftepp->token != Token::END) {
ftepp_error(ftepp, "stray tokens after #ifdef");
return false;
}
if (ftepp->output_on) {
switch (ftepp->token) {
- case TOKEN_IDENT:
- case TOKEN_TYPENAME:
- case TOKEN_KEYWORD:
+ case Token::IDENT:
+ case Token::TYPENAME:
+ case Token::KEYWORD:
ftepp_macro_delete(ftepp, ftepp_tokval(ftepp));
break;
default:
if (!ftepp_skipspace(ftepp))
return false;
/* relaxing this condition
- if (ftepp->token != TOKEN_EOL && ftepp->token != TOKEN_EOF) {
+ if (ftepp->token != Token::EOL && ftepp->token != Token::END) {
ftepp_error(ftepp, "stray tokens after #ifdef");
return false;
}
return false;
/* handle the odd non string constant case so it works like C */
- if (ftepp->token != TOKEN_STRINGCONST) {
+ if (ftepp->token != Token::STRINGCONST) {
bool store = false;
vec_append(message, 8, "#warning");
ftepp_next(ftepp);
- while (ftepp->token != TOKEN_EOL) {
+ while (ftepp->token != Token::EOL) {
vec_append(message, strlen(ftepp_tokval(ftepp)), ftepp_tokval(ftepp));
ftepp_next(ftepp);
}
return;
/* handle the odd non string constant case so it works like C */
- if (ftepp->token != TOKEN_STRINGCONST) {
+ if (ftepp->token != Token::STRINGCONST) {
vec_append(message, 6, "#error");
ftepp_next(ftepp);
- while (ftepp->token != TOKEN_EOL) {
+ while (ftepp->token != Token::EOL) {
vec_append(message, strlen(ftepp_tokval(ftepp)), ftepp_tokval(ftepp));
ftepp_next(ftepp);
}
return;
/* handle the odd non string constant case so it works like C */
- if (ftepp->token != TOKEN_STRINGCONST) {
+ if (ftepp->token != Token::STRINGCONST) {
vec_append(message, 8, "#message");
ftepp_next(ftepp);
- while (ftepp->token != TOKEN_EOL) {
+ while (ftepp->token != Token::EOL) {
vec_append(message, strlen(ftepp_tokval(ftepp)), ftepp_tokval(ftepp));
ftepp_next(ftepp);
}
if (!ftepp_skipspace(ftepp))
return false;
- if (ftepp->token != TOKEN_STRINGCONST) {
+ if (ftepp->token != Token::STRINGCONST) {
ppmacro *macro = ftepp_macro_find(ftepp, ftepp_tokval(ftepp));
if (macro) {
char *backup = ftepp->output_string;
(void)ftepp_next(ftepp);
if (!ftepp_skipspace(ftepp))
return false;
- if (ftepp->token != TOKEN_EOL) {
+ if (ftepp->token != Token::EOL) {
ftepp_error(ftepp, "stray tokens after #include");
return false;
}
return false;
switch (ftepp->token) {
- case TOKEN_KEYWORD:
- case TOKEN_IDENT:
- case TOKEN_TYPENAME:
+ case Token::KEYWORD:
+ case Token::IDENT:
+ case Token::TYPENAME:
if (!strcmp(ftepp_tokval(ftepp), "define")) {
ftepp_inmacro(ftepp, "define");
return ftepp_define(ftepp);
default:
ftepp_error(ftepp, "unexpected preprocessor token: `%s`", ftepp_tokval(ftepp));
return false;
- case TOKEN_EOL:
+ case Token::EOL:
ftepp_errorat(ftepp, ctx, "empty preprocessor directive");
return false;
- case TOKEN_EOF:
+ case Token::END:
ftepp_error(ftepp, "missing newline at end of file", ftepp_tokval(ftepp));
return false;
/* Builtins! Don't forget the builtins! */
- case TOKEN_INTCONST:
- case TOKEN_FLOATCONST:
+ case Token::INTCONST:
+ case Token::FLOATCONST:
ftepp_out(ftepp, "#", false);
return true;
}
ftepp_next(ftepp);
do
{
- if (ftepp->token >= TOKEN_EOF)
+ if (ftepp->token >= Token::END)
break;
switch (ftepp->token) {
- case TOKEN_KEYWORD:
- case TOKEN_IDENT:
- case TOKEN_TYPENAME:
+ case Token::KEYWORD:
+ case Token::IDENT:
+ case Token::TYPENAME:
/* is it a predef? */
if (OPTS_FLAG(FTEPP_PREDEFS)) {
char *(*predef)(ftepp_t*) = ftepp_predef(ftepp_tokval(ftepp));
break;
}
if (!ftepp_macro_call(ftepp, macro))
- ftepp->token = TOKEN_ERROR;
+ ftepp->token = Token::ERROR;
break;
- case '#':
+ case Token::HASH:
if (!newline) {
ftepp_out(ftepp, ftepp_tokval(ftepp), false);
ftepp_next(ftepp);
break;
}
ftepp->lex->flags.mergelines = true;
- if (ftepp_next(ftepp) >= TOKEN_EOF) {
+ if (ftepp_next(ftepp) >= Token::END) {
ftepp_error(ftepp, "error in preprocessor directive");
- ftepp->token = TOKEN_ERROR;
+ ftepp->token = Token::ERROR;
break;
}
if (!ftepp_hash(ftepp))
- ftepp->token = TOKEN_ERROR;
+ ftepp->token = Token::ERROR;
ftepp->lex->flags.mergelines = false;
break;
- case TOKEN_EOL:
+ case Token::EOL:
newline = true;
ftepp_out(ftepp, "\n", true);
ftepp_next(ftepp);
break;
- case TOKEN_WHITE:
+ case Token::WHITE:
/* same as default but don't set newline=false */
ftepp_out(ftepp, ftepp_tokval(ftepp), true);
ftepp_next(ftepp);
ftepp_next(ftepp);
break;
}
- } while (!ftepp->errors && ftepp->token < TOKEN_EOF);
+ } while (!ftepp->errors && ftepp->token < Token::END);
/* force a 0 at the end but don't count it as added to the output */
vec_push(ftepp->output_string, 0);
vec_shrinkby(ftepp->output_string, 1);
- return (ftepp->token == TOKEN_EOF);
+ return (ftepp->token == Token::END);
}
/* Like in parser.c - files keep the previous state so we have one global
#define util_islower(a) (((unsigned)(a)-'a') < 26)
#define util_isupper(a) (((unsigned)(a)-'A') < 26)
#define util_isprint(a) (((unsigned)(a)-0x20) < 0x5F)
-#define util_isspace(a) (((a) >= 9 && (a) <= 13) || (a) == ' ')
+#define util_isspace(a) (((a) >= 9 && (a) <= 13) || (a) == Token::WS)
bool util_strupper(const char *);
bool util_strdigit(const char *);
qcany_t* prog_getedict (qc_program_t *prog, qcint_t e);
qcint_t prog_tempstring(qc_program_t *prog, const char *_str);
-
-/* parser.c */
-struct parser_t;
-parser_t *parser_create(void);
-bool parser_compile_file(parser_t &parser, const char *);
-bool parser_compile_string(parser_t &parser, const char *, const char *, size_t);
-bool parser_finish(parser_t &parser, const char *);
-
/* ftepp.c */
struct ftepp_t;
ftepp_t *ftepp_create (void);
lex->tok.ctx.column = lex->column;
}
-static void lex_ungetch(lex_file *lex, int ch);
-static int lex_getch(lex_file *lex);
+static void lex_ungetch(lex_file *lex, Token ch);
+static Token lex_getch(lex_file *lex);
lex_file* lex_open(const char *file)
{
/* handle BOM */
if ((read = (lex_getch(lex) << 16) | (lex_getch(lex) << 8) | lex_getch(lex)) != 0xEFBBBF) {
- lex_ungetch(lex, (read & 0x0000FF));
- lex_ungetch(lex, (read & 0x00FF00) >> 8);
- lex_ungetch(lex, (read & 0xFF0000) >> 16);
+ lex_ungetch(lex, static_cast<Token>((read & 0x0000FF)));
+ lex_ungetch(lex, static_cast<Token>((read & 0x00FF00) >> 8));
+ lex_ungetch(lex, static_cast<Token>((read & 0xFF0000) >> 16));
} else {
/*
* otherwise the lexer has advanced 3 bytes for the BOM, we need
-static int lex_fgetc(lex_file *lex)
+static Token lex_fgetc(lex_file *lex)
{
if (lex->file) {
lex->column++;
- return fgetc(lex->file);
+ auto c = fgetc(lex->file);
+ return c == EOF ? Token::END : static_cast<Token>(c);
}
if (lex->open_string) {
if (lex->open_string_pos >= lex->open_string_length)
- return EOF;
+ return Token::END;
lex->column++;
- return lex->open_string[lex->open_string_pos++];
+ auto c = lex->open_string[lex->open_string_pos++];
+ return static_cast<Token>(c);
}
- return EOF;
+ return Token::END;
}
/* Get or put-back data
* are working on.
* The are merely wrapping get/put in order to count line numbers.
*/
-static int lex_try_trigraph(lex_file *lex, int old)
+static Token lex_try_trigraph(lex_file *lex, Token old)
{
- int c2, c3;
- c2 = lex_fgetc(lex);
- if (!lex->push_line && c2 == '\n') {
+ auto c2 = lex_fgetc(lex);
+ if (!lex->push_line && c2 == Token::LF) {
lex->line++;
lex->column = 0;
}
- if (c2 != '?') {
+ if (c2 != Token::QUESTION) {
lex_ungetch(lex, c2);
return old;
}
- c3 = lex_fgetc(lex);
- if (!lex->push_line && c3 == '\n') {
+ auto c3 = lex_fgetc(lex);
+ if (!lex->push_line && c3 == Token::LF) {
lex->line++;
lex->column = 0;
}
switch (c3) {
- case '=': return '#';
- case '/': return '\\';
- case '\'': return '^';
- case '(': return '[';
- case ')': return ']';
- case '!': return '|';
- case '<': return '{';
- case '>': return '}';
- case '-': return '~';
+ case Token::EQ: return Token::HASH;
+ case Token::DIV: return Token::BACKSLASH;
+ case Token::QUOT_SINGLE: return Token::XOR;
+ case Token::PAREN_OPEN: return Token::BRACKET_OPEN;
+ case Token::PAREN_CLOSE: return Token::BRACKET_CLOSE;
+ case Token::NOT: return Token::OR;
+ case Token::LT: return Token::BRACE_OPEN;
+ case Token::GT: return Token::BRACE_CLOSE;
+ case Token::SUB: return Token::BITNOT;
default:
lex_ungetch(lex, c3);
lex_ungetch(lex, c2);
}
}
-static int lex_try_digraph(lex_file *lex, int ch)
+static Token lex_try_digraph(lex_file *lex, Token ch)
{
- int c2;
- c2 = lex_fgetc(lex);
+ auto c2 = lex_fgetc(lex);
/* we just used fgetc() so count lines
* need to offset a \n the ungetch would recognize
*/
- if (!lex->push_line && c2 == '\n')
+ if (!lex->push_line && c2 == Token::LF)
lex->line++;
- if (ch == '<' && c2 == ':')
- return '[';
- else if (ch == ':' && c2 == '>')
- return ']';
- else if (ch == '<' && c2 == '%')
- return '{';
- else if (ch == '%' && c2 == '>')
- return '}';
- else if (ch == '%' && c2 == ':')
- return '#';
+ if (ch == Token::LT && c2 == Token::COLON)
+ return Token::BRACKET_OPEN;
+ else if (ch == Token::COLON && c2 == Token::GT)
+ return Token::BRACKET_CLOSE;
+ else if (ch == Token::LT && c2 == Token::MOD)
+ return Token::BRACE_OPEN;
+ else if (ch == Token::MOD && c2 == Token::GT)
+ return Token::BRACE_CLOSE;
+ else if (ch == Token::MOD && c2 == Token::COLON)
+ return Token::HASH;
lex_ungetch(lex, c2);
return ch;
}
-static int lex_getch(lex_file *lex)
+static Token lex_getch(lex_file *lex)
{
- int ch;
-
if (lex->peekpos) {
lex->peekpos--;
- if (!lex->push_line && lex->peek[lex->peekpos] == '\n') {
+ if (!lex->push_line && lex->peek[lex->peekpos] == Token::LF) {
lex->line++;
lex->column = 0;
}
return lex->peek[lex->peekpos];
}
- ch = lex_fgetc(lex);
- if (!lex->push_line && ch == '\n') {
+ auto ch = lex_fgetc(lex);
+ if (!lex->push_line && ch == Token::LF) {
lex->line++;
lex->column = 0;
}
- else if (ch == '?')
+ else if (ch == Token::QUESTION)
return lex_try_trigraph(lex, ch);
- else if (!lex->flags.nodigraphs && (ch == '<' || ch == ':' || ch == '%'))
+ else if (!lex->flags.nodigraphs && (ch == Token::LT || ch == Token::COLON || ch == Token::MOD))
return lex_try_digraph(lex, ch);
return ch;
}
-static void lex_ungetch(lex_file *lex, int ch)
+static void lex_ungetch(lex_file *lex, Token ch)
{
lex->peek[lex->peekpos++] = ch;
lex->column--;
- if (!lex->push_line && ch == '\n') {
+ if (!lex->push_line && ch == Token::LF) {
lex->line--;
lex->column = 0;
}
static bool lex_try_pragma(lex_file *lex)
{
- int ch;
char *pragma = nullptr;
char *command = nullptr;
char *param = nullptr;
line = lex->line;
- ch = lex_getch(lex);
- if (ch != '#') {
+ auto ch = lex_getch(lex);
+ if (ch != Token::HASH) {
lex_ungetch(lex, ch);
return false;
}
vec_push(pragma, ch);
vec_push(pragma, 0);
- if (ch != ' ' || strcmp(pragma, "pragma")) {
+ if (ch != Token::WS|| strcmp(pragma, "pragma")) {
lex_ungetch(lex, ch);
goto unroll;
}
vec_push(command, ch);
vec_push(command, 0);
- if (ch != '(') {
+ if (ch != Token::PAREN_OPEN) {
lex_ungetch(lex, ch);
goto unroll;
}
- for (ch = lex_getch(lex); vec_size(param) < 1024 && ch != ')' && ch != '\n'; ch = lex_getch(lex))
+ for (ch = lex_getch(lex); vec_size(param) < 1024 && ch != Token::PAREN_CLOSE && ch != Token::LF; ch = lex_getch(lex))
vec_push(param, ch);
vec_push(param, 0);
- if (ch != ')') {
+ if (ch != Token::PAREN_CLOSE) {
lex_ungetch(lex, ch);
goto unroll;
}
goto unroll;
lex->line = line;
- while (ch != '\n' && ch != EOF)
+ while (ch != Token::LF && ch != Token::END)
ch = lex_getch(lex);
vec_free(command);
vec_free(param);
if (command) {
vec_pop(command);
while (vec_size(command)) {
- lex_ungetch(lex, (unsigned char)vec_last(command));
+ lex_ungetch(lex, static_cast<Token>(vec_last(command)));
vec_pop(command);
}
vec_free(command);
- lex_ungetch(lex, ' ');
+ lex_ungetch(lex, Token::WS);
}
if (param) {
vec_pop(param);
while (vec_size(param)) {
- lex_ungetch(lex, (unsigned char)vec_last(param));
+ lex_ungetch(lex, static_cast<Token>(vec_last(param)));
vec_pop(param);
}
vec_free(param);
- lex_ungetch(lex, ' ');
+ lex_ungetch(lex, Token::WS);
}
if (pragma) {
vec_pop(pragma);
while (vec_size(pragma)) {
- lex_ungetch(lex, (unsigned char)vec_last(pragma));
+ lex_ungetch(lex, static_cast<Token>(vec_last(pragma)));
vec_pop(pragma);
}
vec_free(pragma);
}
- lex_ungetch(lex, '#');
+ lex_ungetch(lex, Token::HASH);
lex->line = line;
return false;
* here is to store the line of the first character after skipping
* the initial whitespace in lex->sline, this happens in lex_do.
*/
-static int lex_skipwhite(lex_file *lex, bool hadwhite)
+static Token lex_skipwhite(lex_file *lex, bool hadwhite)
{
- int ch = 0;
+ Token ch;
bool haswhite = hadwhite;
do
{
ch = lex_getch(lex);
- while (ch != EOF && util_isspace(ch)) {
- if (ch == '\n') {
+ while (ch != Token::END && util_isspace(ch)) {
+ if (ch == Token::LF) {
if (lex_try_pragma(lex))
continue;
}
if (lex->flags.preprocessing) {
- if (ch == '\n') {
+ if (ch == Token::LF) {
/* end-of-line */
/* see if there was whitespace first */
if (haswhite) { /* (vec_size(lex->tok.value)) { */
lex_ungetch(lex, ch);
lex_endtoken(lex);
- return TOKEN_WHITE;
+ return Token::WHITE;
}
/* otherwise return EOL */
- return TOKEN_EOL;
+ return Token::EOL;
}
haswhite = true;
lex_tokench(lex, ch);
ch = lex_getch(lex);
}
- if (ch == '/') {
+ if (ch == Token::DIV) {
ch = lex_getch(lex);
- if (ch == '/')
+ if (ch == Token::DIV)
{
/* one line comment */
ch = lex_getch(lex);
if (lex->flags.preprocessing) {
haswhite = true;
- lex_tokench(lex, ' ');
- lex_tokench(lex, ' ');
+ lex_tokench(lex, Token::WS);
+ lex_tokench(lex, Token::WS);
}
- while (ch != EOF && ch != '\n') {
+ while (ch != Token::END && ch != Token::LF) {
if (lex->flags.preprocessing)
- lex_tokench(lex, ' '); /* ch); */
+ lex_tokench(lex, Token::WS); /* ch); */
ch = lex_getch(lex);
}
if (lex->flags.preprocessing) {
- lex_ungetch(lex, '\n');
+ lex_ungetch(lex, Token::LF);
lex_endtoken(lex);
- return TOKEN_WHITE;
+ return Token::WHITE;
}
continue;
}
- if (ch == '*')
+ if (ch == Token::MUL)
{
/* multiline comment */
if (lex->flags.preprocessing) {
haswhite = true;
- lex_tokench(lex, ' ');
- lex_tokench(lex, ' ');
+ lex_tokench(lex, Token::WS);
+ lex_tokench(lex, Token::WS);
}
- while (ch != EOF)
+ while (ch != Token::END)
{
ch = lex_getch(lex);
- if (ch == '*') {
+ if (ch == Token::MUL) {
ch = lex_getch(lex);
- if (ch == '/') {
+ if (ch == Token::DIV) {
if (lex->flags.preprocessing) {
- lex_tokench(lex, ' ');
- lex_tokench(lex, ' ');
+ lex_tokench(lex, Token::WS);
+ lex_tokench(lex, Token::WS);
}
break;
}
lex_ungetch(lex, ch);
}
if (lex->flags.preprocessing) {
- if (ch == '\n')
- lex_tokench(lex, '\n');
+ if (ch == Token::LF)
+ lex_tokench(lex, Token::LF);
else
- lex_tokench(lex, ' ');
+ lex_tokench(lex, Token::WS);
}
}
- ch = ' '; /* cause TRUE in the isspace check */
+ ch = Token::WS; /* cause TRUE in the isspace check */
continue;
}
/* Otherwise roll back to the slash and break out of the loop */
lex_ungetch(lex, ch);
- ch = '/';
+ ch = Token::DIV;
break;
}
- } while (ch != EOF && util_isspace(ch));
+ } while (ch != Token::END && util_isspace(ch));
if (haswhite) {
lex_endtoken(lex);
lex_ungetch(lex, ch);
- return TOKEN_WHITE;
+ return Token::WHITE;
}
return ch;
}
/* Get a token */
static bool GMQCC_WARN lex_finish_ident(lex_file *lex)
{
- int ch;
-
- ch = lex_getch(lex);
- while (ch != EOF && isident(ch))
+ auto ch = lex_getch(lex);
+ while (ch != Token::END && isident(ch))
{
lex_tokench(lex, ch);
ch = lex_getch(lex);
/* read one ident for the frame list */
static int lex_parse_frame(lex_file *lex)
{
- int ch;
-
lex_token_new(lex);
- ch = lex_getch(lex);
- while (ch != EOF && ch != '\n' && util_isspace(ch))
+ auto ch = lex_getch(lex);
+ while (ch != Token::END && ch != Token::LF && util_isspace(ch))
ch = lex_getch(lex);
- if (ch == '\n')
+ if (ch == Token::LF)
return 1;
if (!isident_start(ch)) {
return false;
}
-static int GMQCC_WARN lex_finish_string(lex_file *lex, int quote)
+static Token GMQCC_WARN lex_finish_string(lex_file *lex, int quote)
{
utf8ch_t chr = 0;
int ch = 0, texttype = 0;
- int nextch;
+ Token nextch = Token::NONE;
bool hex;
bool oct;
char u8buf[8]; /* way more than enough */
int u8len, uc;
- while (ch != EOF)
+ while (ch != Token::END)
{
ch = lex_getch(lex);
if (ch == quote)
- return TOKEN_STRINGCONST;
+ return Token::STRINGCONST;
if (lex->flags.preprocessing && ch == '\\') {
lex_tokench(lex, ch);
ch = lex_getch(lex);
- if (ch == EOF) {
+ if (ch == Token::END) {
lexerror(lex, "unexpected end of file");
- lex_ungetch(lex, EOF); /* next token to be TOKEN_EOF */
- return (lex->tok.ttype = TOKEN_ERROR);
+ lex_ungetch(lex, Token::END); /* next token to be Token::END */
+ return (lex->tok.ttype = Token::ERROR);
}
lex_tokench(lex, ch);
}
else if (ch == '\\') {
ch = lex_getch(lex);
- if (ch == EOF) {
+ if (ch == Token::END) {
lexerror(lex, "unexpected end of file");
- lex_ungetch(lex, EOF); /* next token to be TOKEN_EOF */
- return (lex->tok.ttype = TOKEN_ERROR);
+ lex_ungetch(lex, Token::END); /* next token to be Token::END */
+ return (lex->tok.ttype = Token::ERROR);
}
switch (ch) {
else {
lexerror(lex, "bad character code");
lex_ungetch(lex, nextch);
- return (lex->tok.ttype = TOKEN_ERROR);
+ return (lex->tok.ttype = Token::ERROR);
}
ch *= 0x10;
else {
lexerror(lex, "bad character code");
lex_ungetch(lex, nextch);
- return (lex->tok.ttype = TOKEN_ERROR);
+ return (lex->tok.ttype = Token::ERROR);
}
break;
oct = (nextch == '0');
if (!hex && !oct)
lex_ungetch(lex, nextch);
- for (nextch = lex_getch(lex); nextch != '}'; nextch = lex_getch(lex)) {
+ for (nextch = lex_getch(lex); nextch != Token::BRACE_CLOSE; nextch = lex_getch(lex)) {
if (!hex && !oct) {
if (nextch >= '0' && nextch <= '9')
chr = chr * 10 + nextch - '0';
else {
lexerror(lex, "bad character code");
- return (lex->tok.ttype = TOKEN_ERROR);
+ return (lex->tok.ttype = Token::ERROR);
}
} else if (!oct) {
if (nextch >= '0' && nextch <= '9')
chr = chr * 0x10 + nextch - 'A' + 10;
else {
lexerror(lex, "bad character code");
- return (lex->tok.ttype = TOKEN_ERROR);
+ return (lex->tok.ttype = Token::ERROR);
}
} else {
if (nextch >= '0' && nextch <= '9')
chr = chr * 8 + chr - '0';
else {
lexerror(lex, "bad character code");
- return (lex->tok.ttype = TOKEN_ERROR);
+ return (lex->tok.ttype = Token::ERROR);
}
}
if (chr > 0x10FFFF || (!OPTS_FLAG(UTF8) && chr > 255))
{
lexerror(lex, "character code out of range");
- return (lex->tok.ttype = TOKEN_ERROR);
+ return (lex->tok.ttype = Token::ERROR);
}
}
if (OPTS_FLAG(UTF8) && chr >= 128) {
lex_tokench(lex, ch);
}
lexerror(lex, "unexpected end of file within string constant");
- lex_ungetch(lex, EOF); /* next token to be TOKEN_EOF */
- return (lex->tok.ttype = TOKEN_ERROR);
+ lex_ungetch(lex, Token::END); /* next token to be Token::END */
+ return (lex->tok.ttype = Token::ERROR);
}
-static int GMQCC_WARN lex_finish_digit(lex_file *lex, int lastch)
+static Token GMQCC_WARN lex_finish_digit(lex_file *lex, Token lastch)
{
bool ishex = false;
- int ch = lastch;
+ Token ch = lastch;
/* parse a number... */
- if (ch == '.')
- lex->tok.ttype = TOKEN_FLOATCONST;
+ if (ch == Token::DOT)
+ lex->tok.ttype = Token::FLOATCONST;
else
- lex->tok.ttype = TOKEN_INTCONST;
+ lex->tok.ttype = Token::INTCONST;
lex_tokench(lex, ch);
ch = lex_getch(lex);
- if (ch != '.' && !util_isdigit(ch))
+ if (ch != Token::DOT && !util_isdigit(ch))
{
if (lastch != '0' || ch != 'x')
{
/* EOF would have been caught above */
- if (ch != '.')
+ if (ch != Token::DOT)
{
lex_tokench(lex, ch);
ch = lex_getch(lex);
}
}
/* NOT else, '.' can come from above as well */
- if (lex->tok.ttype != TOKEN_FLOATCONST && ch == '.' && !ishex)
+ if (lex->tok.ttype != Token::FLOATCONST && ch == Token::DOT && !ishex)
{
/* Allow floating comma in non-hex mode */
- lex->tok.ttype = TOKEN_FLOATCONST;
+ lex->tok.ttype = Token::FLOATCONST;
lex_tokench(lex, ch);
/* continue digits-only */
}
/* put back the last character */
/* but do not put back the trailing 'f' or a float */
- if (lex->tok.ttype == TOKEN_FLOATCONST && ch == 'f')
+ if (lex->tok.ttype == Token::FLOATCONST && ch == 'f')
ch = lex_getch(lex);
/* generally we don't want words to follow numbers: */
if (isident(ch)) {
lexerror(lex, "unexpected trailing characters after number");
- return (lex->tok.ttype = TOKEN_ERROR);
+ return (lex->tok.ttype = Token::ERROR);
}
lex_ungetch(lex, ch);
lex_endtoken(lex);
- if (lex->tok.ttype == TOKEN_FLOATCONST)
+ if (lex->tok.ttype == Token::FLOATCONST)
lex->tok.constval.f = strtod(lex->tok.value, nullptr);
else
lex->tok.constval.i = strtol(lex->tok.value, nullptr, 0);
return lex->tok.ttype;
}
-int lex_do(lex_file *lex)
+Token lex_do(lex_file *lex)
{
- int ch, nextch, thirdch;
+ Token ch, nextch, thirdch;
bool hadwhite = false;
lex_token_new(lex);
while (true) {
ch = lex_skipwhite(lex, hadwhite);
hadwhite = true;
- if (!lex->flags.mergelines || ch != '\\')
+ if (!lex->flags.mergelines || ch != Token::BACKSLASH)
break;
ch = lex_getch(lex);
- if (ch == '\r')
+ if (ch == Token::CR)
ch = lex_getch(lex);
- if (ch != '\n') {
+ if (ch != Token::LF) {
lex_ungetch(lex, ch);
- ch = '\\';
+ ch = Token::BACKSLASH;
break;
}
/* we reached a linemerge */
lex_tokench(lex, '\n');
- continue;
}
- if (lex->flags.preprocessing && (ch == TOKEN_WHITE || ch == TOKEN_EOL || ch == TOKEN_FATAL)) {
+ if (lex->flags.preprocessing && (ch == Token::WHITE || ch == Token::EOL || ch == Token::FATAL)) {
return (lex->tok.ttype = ch);
}
lex->tok.ctx.file = lex->name;
if (lex->eof)
- return (lex->tok.ttype = TOKEN_FATAL);
+ return (lex->tok.ttype = Token::FATAL);
- if (ch == EOF) {
+ if (ch == Token::END) {
lex->eof = true;
- return (lex->tok.ttype = TOKEN_EOF);
+ return (lex->tok.ttype = Token::END);
}
/* modelgen / spiritgen commands */
- if (ch == '$' && !lex->flags.preprocessing) {
+ if (ch == Token::DOLLAR && !lex->flags.preprocessing) {
const char *v;
size_t frame;
}
lex_tokench(lex, ch);
if (!lex_finish_ident(lex))
- return (lex->tok.ttype = TOKEN_ERROR);
+ return (lex->tok.ttype = Token::ERROR);
lex_endtoken(lex);
/* skip the known commands */
v = lex->tok.value;
* which the parser is unaware of
*/
if (!lex_finish_frames(lex))
- return (lex->tok.ttype = TOKEN_ERROR);
+ return (lex->tok.ttype = Token::ERROR);
return lex_do(lex);
}
if (!strcmp(v, "framevalue"))
{
ch = lex_getch(lex);
- while (ch != EOF && util_isspace(ch) && ch != '\n')
+ while (ch != Token::END && util_isspace(ch) && ch != Token::LF)
ch = lex_getch(lex);
if (!util_isdigit(ch)) {
lex_token_new(lex);
lex->tok.ttype = lex_finish_digit(lex, ch);
lex_endtoken(lex);
- if (lex->tok.ttype != TOKEN_INTCONST) {
+ if (lex->tok.ttype != Token::INTCONST) {
lexerror(lex, "$framevalue requires an integer parameter");
return lex_do(lex);
}
return lex_do(lex);
}
if (rc < 0)
- return (lex->tok.ttype = TOKEN_FATAL);
+ return (lex->tok.ttype = Token::FATAL);
v = lex->tok.value;
for (frame = 0; frame < vec_size(lex->frames); ++frame) {
return lex_do(lex);
}
if (rc < 0)
- return (lex->tok.ttype = TOKEN_FATAL);
+ return (lex->tok.ttype = Token::FATAL);
if (lex->modelname) {
frame_macro m;
vec_free(lex->frames);
/* skip line (fteqcc does it too) */
ch = lex_getch(lex);
- while (ch != EOF && ch != '\n')
+ while (ch != Token::END && ch != Token::LF)
ch = lex_getch(lex);
return lex_do(lex);
}
{
/* skip line */
ch = lex_getch(lex);
- while (ch != EOF && ch != '\n')
+ while (ch != Token::END && ch != Token::LF)
ch = lex_getch(lex);
return lex_do(lex);
}
for (frame = 0; frame < vec_size(lex->frames); ++frame) {
if (!strcmp(v, lex->frames[frame].name)) {
lex->tok.constval.i = lex->frames[frame].value;
- return (lex->tok.ttype = TOKEN_INTCONST);
+ return (lex->tok.ttype = Token::INTCONST);
}
}
/* single-character tokens */
switch (ch)
{
- case '[':
+ case Token::BRACKET_OPEN:
nextch = lex_getch(lex);
- if (nextch == '[') {
+ if (nextch == Token::BRACKET_OPEN) {
lex_tokench(lex, ch);
lex_tokench(lex, nextch);
lex_endtoken(lex);
- return (lex->tok.ttype = TOKEN_ATTRIBUTE_OPEN);
+ return (lex->tok.ttype = Token::ATTRIBUTE_OPEN);
}
lex_ungetch(lex, nextch);
/* FALL THROUGH */
- case '(':
- case ':':
- case '?':
+ case Token::PAREN_OPEN:
+ case Token::COLON:
+ case Token::QUESTION:
lex_tokench(lex, ch);
lex_endtoken(lex);
if (lex->flags.noops)
return (lex->tok.ttype = ch);
else
- return (lex->tok.ttype = TOKEN_OPERATOR);
+ return (lex->tok.ttype = Token::OPERATOR);
- case ']':
+ case Token::BRACKET_CLOSE:
if (lex->flags.noops) {
nextch = lex_getch(lex);
- if (nextch == ']') {
+ if (nextch == Token::BRACKET_CLOSE) {
lex_tokench(lex, ch);
lex_tokench(lex, nextch);
lex_endtoken(lex);
- return (lex->tok.ttype = TOKEN_ATTRIBUTE_CLOSE);
+ return (lex->tok.ttype = Token::ATTRIBUTE_CLOSE);
}
lex_ungetch(lex, nextch);
}
/* FALL THROUGH */
- case ')':
- case ';':
- case '{':
- case '}':
+ case Token::PAREN_CLOSE:
+ case Token::SEMICOLON:
+ case Token::BRACE_OPEN:
+ case Token::BRACE_CLOSE:
- case '#':
+ case Token::HASH:
lex_tokench(lex, ch);
lex_endtoken(lex);
return (lex->tok.ttype = ch);
break;
}
- if (ch == '.') {
+ if (ch == Token::DOT) {
nextch = lex_getch(lex);
/* digits starting with a dot */
if (util_isdigit(nextch)) {
*/
switch (ch)
{
- case '*':
- case '/':
- case '<':
- case '>':
- case '=':
- case '&':
- case '|':
- case '^':
- case '~':
- case ',':
- case '!':
+ case Token::MUL:
+ case Token::DIV:
+ case Token::LT:
+ case Token::GT:
+ case Token::EQ:
+ case Token::AND:
+ case Token::OR:
+ case Token::XOR:
+ case Token::BITNOT:
+ case Token::COMMA:
+ case Token::NOT:
lex_tokench(lex, ch);
lex_endtoken(lex);
return (lex->tok.ttype = ch);
}
}
- if (ch == '.')
+ if (ch == Token::DOT)
{
lex_tokench(lex, ch);
/* peak ahead once */
nextch = lex_getch(lex);
- if (nextch != '.') {
+ if (nextch != Token::DOT) {
lex_ungetch(lex, nextch);
lex_endtoken(lex);
if (lex->flags.noops)
return (lex->tok.ttype = ch);
else
- return (lex->tok.ttype = TOKEN_OPERATOR);
+ return (lex->tok.ttype = Token::OPERATOR);
}
/* peak ahead again */
nextch = lex_getch(lex);
- if (nextch != '.') {
+ if (nextch != Token::DOT) {
lex_ungetch(lex, nextch);
- lex_ungetch(lex, '.');
+ lex_ungetch(lex, Token::DOT);
lex_endtoken(lex);
if (lex->flags.noops)
return (lex->tok.ttype = ch);
else
- return (lex->tok.ttype = TOKEN_OPERATOR);
+ return (lex->tok.ttype = Token::OPERATOR);
}
/* fill the token to be "..." */
lex_tokench(lex, ch);
lex_tokench(lex, ch);
lex_endtoken(lex);
- return (lex->tok.ttype = TOKEN_DOTS);
+ return (lex->tok.ttype = Token::DOTS);
}
- if (ch == ',' || ch == '.') {
+ if (ch == Token::COMMA || ch == Token::DOT) {
lex_tokench(lex, ch);
lex_endtoken(lex);
- return (lex->tok.ttype = TOKEN_OPERATOR);
+ return (lex->tok.ttype = Token::OPERATOR);
}
- if (ch == '+' || ch == '-' || /* ++, --, +=, -= and -> as well! */
- ch == '>' || ch == '<' || /* <<, >>, <=, >= and >< as well! */
- ch == '=' || ch == '!' || /* <=>, ==, != */
- ch == '&' || ch == '|' || /* &&, ||, &=, |= */
- ch == '~' || ch == '^' /* ~=, ~, ^ */
+ if (ch == Token::ADD || ch == Token::SUB || /* ++, --, +=, -= and -> as well! */
+ ch == Token::GT || ch == Token::LT|| /* <<, >>, <=, >= and >< as well! */
+ ch == Token::EQ || ch == Token::NOT || /* <=>, ==, != */
+ ch == Token::AND || ch == Token::OR || /* &&, ||, &=, |= */
+ ch == Token::BITNOT || ch == Token::XOR /* ~=, ~, ^ */
) {
lex_tokench(lex, ch);
nextch = lex_getch(lex);
- if ((nextch == '=' && ch != '<') || (nextch == '<' && ch == '>'))
+ if ((nextch == Token::EQ && ch != Token::LT) || (nextch == Token::LT && ch == Token::GT))
lex_tokench(lex, nextch);
- else if (nextch == ch && ch != '!') {
+ else if (nextch == ch && ch != Token::NOT) {
lex_tokench(lex, nextch);
- if ((thirdch = lex_getch(lex)) == '=')
+ if ((thirdch = lex_getch(lex)) == Token::EQ)
lex_tokench(lex, thirdch);
else
lex_ungetch(lex, thirdch);
- } else if (ch == '<' && nextch == '=') {
+ } else if (ch == Token::LT && nextch == Token::EQ) {
lex_tokench(lex, nextch);
- if ((thirdch = lex_getch(lex)) == '>')
+ if ((thirdch = lex_getch(lex)) == Token::GT)
lex_tokench(lex, thirdch);
else
lex_ungetch(lex, thirdch);
- } else if (ch == '-' && nextch == '>') {
+ } else if (ch == Token::SUB && nextch == Token::GT) {
lex_tokench(lex, nextch);
- } else if (ch == '&' && nextch == '~') {
+ } else if (ch == Token::AND && nextch == Token::BITNOT) {
thirdch = lex_getch(lex);
- if (thirdch != '=') {
+ if (thirdch != Token::EQ) {
lex_ungetch(lex, thirdch);
lex_ungetch(lex, nextch);
}
}
}
else if (lex->flags.preprocessing &&
- ch == '-' && util_isdigit(nextch))
+ ch == Token::SUB && util_isdigit(nextch))
{
lex->tok.ttype = lex_finish_digit(lex, nextch);
- if (lex->tok.ttype == TOKEN_INTCONST)
+ if (lex->tok.ttype == Token::INTCONST)
lex->tok.constval.i = -lex->tok.constval.i;
else
lex->tok.constval.f = -lex->tok.constval.f;
}
lex_endtoken(lex);
- return (lex->tok.ttype = TOKEN_OPERATOR);
+ return (lex->tok.ttype = Token::OPERATOR);
}
- if (ch == '*' || ch == '/') /* *=, /= */
+ if (ch == Token::MUL || ch == Token::DIV) /* *=, /= */
{
lex_tokench(lex, ch);
nextch = lex_getch(lex);
- if (nextch == '=' || nextch == '*') {
+ if (nextch == Token::EQ || nextch == Token::MUL) {
lex_tokench(lex, nextch);
} else
lex_ungetch(lex, nextch);
lex_endtoken(lex);
- return (lex->tok.ttype = TOKEN_OPERATOR);
+ return (lex->tok.ttype = Token::OPERATOR);
}
- if (ch == '%') {
+ if (ch == Token::MOD) {
lex_tokench(lex, ch);
lex_endtoken(lex);
- return (lex->tok.ttype = TOKEN_OPERATOR);
+ return (lex->tok.ttype = Token::OPERATOR);
}
if (isident_start(ch))
lex_tokench(lex, ch);
if (!lex_finish_ident(lex)) {
/* error? */
- return (lex->tok.ttype = TOKEN_ERROR);
+ return (lex->tok.ttype = Token::ERROR);
}
lex_endtoken(lex);
- lex->tok.ttype = TOKEN_IDENT;
+ lex->tok.ttype = Token::IDENT;
v = lex->tok.value;
if (!strcmp(v, "void")) {
- lex->tok.ttype = TOKEN_TYPENAME;
+ lex->tok.ttype = Token::TYPENAME;
lex->tok.constval.t = TYPE_VOID;
} else if (!strcmp(v, "int")) {
- lex->tok.ttype = TOKEN_TYPENAME;
+ lex->tok.ttype = Token::TYPENAME;
lex->tok.constval.t = TYPE_INTEGER;
} else if (!strcmp(v, "float")) {
- lex->tok.ttype = TOKEN_TYPENAME;
+ lex->tok.ttype = Token::TYPENAME;
lex->tok.constval.t = TYPE_FLOAT;
} else if (!strcmp(v, "string")) {
- lex->tok.ttype = TOKEN_TYPENAME;
+ lex->tok.ttype = Token::TYPENAME;
lex->tok.constval.t = TYPE_STRING;
} else if (!strcmp(v, "entity")) {
- lex->tok.ttype = TOKEN_TYPENAME;
+ lex->tok.ttype = Token::TYPENAME;
lex->tok.constval.t = TYPE_ENTITY;
} else if (!strcmp(v, "vector")) {
- lex->tok.ttype = TOKEN_TYPENAME;
+ lex->tok.ttype = Token::TYPENAME;
lex->tok.constval.t = TYPE_VECTOR;
} else if (!strcmp(v, "_length")) {
- lex->tok.ttype = TOKEN_OPERATOR;
+ lex->tok.ttype = Token::OPERATOR;
} else {
size_t kw;
for (kw = 0; kw < GMQCC_ARRAY_COUNT(keywords_qc); ++kw) {
if (!strcmp(v, keywords_qc[kw]))
- return (lex->tok.ttype = TOKEN_KEYWORD);
+ return (lex->tok.ttype = Token::KEYWORD);
}
if (OPTS_OPTION_U32(OPTION_STANDARD) != COMPILER_QCC) {
for (kw = 0; kw < GMQCC_ARRAY_COUNT(keywords_fg); ++kw) {
if (!strcmp(v, keywords_fg[kw]))
- return (lex->tok.ttype = TOKEN_KEYWORD);
+ return (lex->tok.ttype = Token::KEYWORD);
}
}
}
return lex->tok.ttype;
}
- if (ch == '"')
+ if (ch == Token::QUOT_DOUBLE)
{
lex->flags.nodigraphs = true;
if (lex->flags.preprocessing)
lex_tokench(lex, ch);
- lex->tok.ttype = lex_finish_string(lex, '"');
+ lex->tok.ttype = lex_finish_string(lex, Token::QUOT_DOUBLE);
if (lex->flags.preprocessing)
lex_tokench(lex, ch);
- while (!lex->flags.preprocessing && lex->tok.ttype == TOKEN_STRINGCONST)
+ while (!lex->flags.preprocessing && lex->tok.ttype == Token::STRINGCONST)
{
/* Allow c style "string" "continuation" */
ch = lex_skipwhite(lex, false);
- if (ch != '"') {
+ if (ch != Token::QUOT_DOUBLE) {
lex_ungetch(lex, ch);
break;
}
- lex->tok.ttype = lex_finish_string(lex, '"');
+ lex->tok.ttype = lex_finish_string(lex, Token::QUOT_DOUBLE);
}
lex->flags.nodigraphs = false;
lex_endtoken(lex);
return lex->tok.ttype;
}
- if (ch == '\'')
+ if (ch == Token::QUOT_SINGLE)
{
/* we parse character constants like string,
- * but return TOKEN_CHARCONST, or a vector type if it fits...
+ * but return Token::CHARCONST, or a vector type if it fits...
* Likewise actual unescaping has to be done by the parser.
* The difference is we don't allow 'char' 'continuation'.
*/
if (lex->flags.preprocessing)
lex_tokench(lex, ch);
- lex->tok.ttype = lex_finish_string(lex, '\'');
+ lex->tok.ttype = lex_finish_string(lex, Token::QUOT_SINGLE);
if (lex->flags.preprocessing)
lex_tokench(lex, ch);
lex_endtoken(lex);
- lex->tok.ttype = TOKEN_CHARCONST;
+ lex->tok.ttype = Token::CHARCONST;
/* It's a vector if we can successfully scan 3 floats */
if (util_sscanf(lex->tok.value, " %f %f %f ",
&lex->tok.constval.v.x, &lex->tok.constval.v.y, &lex->tok.constval.v.z) == 3)
{
- lex->tok.ttype = TOKEN_VECTORCONST;
+ lex->tok.ttype = Token::VECTORCONST;
}
else
{
( OPTS_FLAG(UTF8) ? "invalid multibyte character sequence `%s`"
: "multibyte character: `%s`" ),
lex->tok.value))
- return (lex->tok.ttype = TOKEN_ERROR);
+ return (lex->tok.ttype = Token::ERROR);
}
else
lex->tok.constval.i = u8char;
}
if (lex->flags.preprocessing) {
- lex_tokench(lex, ch);
+ lex_tokench(lex, static_cast<int>(ch));
lex_endtoken(lex);
return (lex->tok.ttype = ch);
}
lexerror(lex, "unknown token: `%c`", ch);
- return (lex->tok.ttype = TOKEN_ERROR);
+ return (lex->tok.ttype = Token::ERROR);
}
#define GMQCC_LEXER_HDR
#include "gmqcc.h"
-struct token {
- int ttype;
- char *value;
- union {
- vec3_t v;
- int i;
- qcfloat_t f;
- qc_type t; /* type */
- } constval;
- lex_ctx_t ctx;
-};
-
/* Lexer
*
*/
-enum {
+enum Token : int { // todo: enum class
/* Other tokens which we can return: */
- TOKEN_NONE = 0,
- TOKEN_START = 128,
+ NONE = 0,
+
+ CR = '\r',
+ LF = '\n',
+ WS = ' ',
+ BACKSLASH = '\\',
+
+ HASH = '#',
+ DOLLAR = '$',
+
+ DOT = '.',
+ COMMA = ',',
+ COLON = ':',
+ SEMICOLON = ';',
+
+ AND = '&',
+ OR = '|',
+ XOR = '^',
+ BITNOT = '~',
+ NOT = '!',
+
+ LT = '<',
+ GT = '>',
+ EQ = '=',
- TOKEN_IDENT,
+ MUL = '*',
+ DIV = '/',
+ MOD = '%',
- TOKEN_TYPENAME,
+ ADD = '+',
+ SUB = '-',
- TOKEN_OPERATOR,
+ QUOT_SINGLE = '\'',
+ QUOT_DOUBLE = '"',
- TOKEN_KEYWORD, /* loop */
+ QUESTION = '?',
- TOKEN_DOTS, /* 3 dots, ... */
+ BRACE_OPEN = '{', BRACE_CLOSE = '}',
+ BRACKET_OPEN = '[', BRACKET_CLOSE = ']',
+ PAREN_OPEN = '(', PAREN_CLOSE = ')',
- TOKEN_ATTRIBUTE_OPEN, /* [[ */
- TOKEN_ATTRIBUTE_CLOSE, /* ]] */
+ START = 128,
- TOKEN_VA_ARGS, /* for the ftepp only */
- TOKEN_VA_ARGS_ARRAY, /* for the ftepp only */
- TOKEN_VA_COUNT, /* to get the count of vaargs */
+ IDENT,
- TOKEN_STRINGCONST, /* not the typename but an actual "string" */
- TOKEN_CHARCONST,
- TOKEN_VECTORCONST,
- TOKEN_INTCONST,
- TOKEN_FLOATCONST,
+ TYPENAME,
- TOKEN_WHITE,
- TOKEN_EOL,
+ OPERATOR,
+
+ KEYWORD, /* loop */
+
+ DOTS, /* 3 dots, ... */
+
+ ATTRIBUTE_OPEN, /* [[ */
+ ATTRIBUTE_CLOSE, /* ]] */
+
+ VA_ARGS, /* for the ftepp only */
+ VA_ARGS_ARRAY, /* for the ftepp only */
+ VA_COUNT, /* to get the count of vaargs */
+
+ STRINGCONST, /* not the typename but an actual "string" */
+ CHARCONST,
+ VECTORCONST,
+ INTCONST,
+ FLOATCONST,
+
+ WHITE,
+ EOL,
/* if we add additional tokens before this, the exposed API
* should not be broken anyway, but EOF/ERROR/... should
* still be at the bottom
*/
- TOKEN_EOF = 1024,
+ END = 1024,
- /* We use '< TOKEN_ERROR', so TOKEN_FATAL must come after it and any
+ /* We use '< ERROR', so FATAL must come after it and any
* other error related tokens as well
*/
- TOKEN_ERROR,
- TOKEN_FATAL /* internal error, eg out of memory */
+ ERROR,
+ FATAL /* internal error, eg out of memory */
+};
+
+struct token {
+ Token ttype;
+ char *value;
+ union {
+ vec3_t v;
+ int i;
+ qcfloat_t f;
+ qc_type t; /* type */
+ } constval;
+ lex_ctx_t ctx;
};
struct frame_macro {
size_t sline; /* line at the start of a token */
size_t column;
- int peek[256];
+ Token peek[256];
size_t peekpos;
bool eof;
lex_file* lex_open (const char *file);
lex_file* lex_open_string(const char *str, size_t len, const char *name);
void lex_close(lex_file *lex);
-int lex_do (lex_file *lex);
+Token lex_do (lex_file *lex);
void lex_cleanup(void);
/* Parser
#include <stdlib.h>
#include "gmqcc.h"
+#include "lexer.h"
const unsigned int opts_opt_oflag[COUNT_OPTIMIZATIONS+1] = {
# define GMQCC_TYPE_OPTIMIZATIONS
char *read_name;
char *read_value;
- while (util_getline(&line, &linesize, filehandle) != EOF) {
+ while (util_getline(&line, &linesize, filehandle) != Token::END) {
parse_beg = line;
/* handle BOM */
{
/* lex_do kills the previous token */
parser.tok = lex_do(parser.lex);
- if (parser.tok == TOKEN_EOF)
+ if (parser.tok == Token::END)
return true;
- if (parser.tok >= TOKEN_ERROR) {
+ if (parser.tok >= Token::ERROR) {
parseerror(parser, "lex error");
return false;
}
#define parser_tokval(p) ((p).lex->tok.value)
#define parser_token(p) (&((p).lex->tok))
-char *parser_strdup(const char *str)
-{
- if (str && !*str) {
- /* actually dup empty strings */
- char *out = (char*)mem_a(1);
- *out = 0;
- return out;
- }
- return util_strdup(str);
-}
-
static ast_expression* parser_find_field(parser_t &parser, const char *name) {
return (ast_expression*)util_htget(parser.htfields, name);
}
static void parser_reclassify_token(parser_t &parser)
{
size_t i;
- if (parser.tok >= TOKEN_START)
+ if (parser.tok >= Token::START)
return;
for (i = 0; i < operator_count; ++i) {
if (!strcmp(parser_tokval(parser), operators[i].op)) {
- parser.tok = TOKEN_OPERATOR;
+ parser.tok = Token::OPERATOR;
return;
}
}
return out;
}
- if (!parser_next(parser) || (parser.tok != TOKEN_IDENT && parser.tok != TOKEN_TYPENAME)) {
+ if (!parser_next(parser) || (parser.tok != Token::IDENT && parser.tok != Token::TYPENAME)) {
ast_unref(idx);
parseerror(parser, "expected typename for vararg");
return nullptr;
static bool parse_sya_operand(parser_t &parser, shunt *sy, bool with_labels)
{
if (OPTS_FLAG(TRANSLATABLE_STRINGS) &&
- parser.tok == TOKEN_IDENT &&
+ parser.tok == Token::IDENT &&
!strcmp(parser_tokval(parser), "_"))
{
/* a translatable string */
return false;
}
parser.lex->flags.noops = false;
- if (!parser_next(parser) || parser.tok != TOKEN_STRINGCONST) {
+ if (!parser_next(parser) || parser.tok != Token::STRINGCONST) {
parseerror(parser, "expected a constant string in translatable-string extension");
return false;
}
}
return true;
}
- else if (parser.tok == TOKEN_DOTS)
+ else if (parser.tok == Token::DOTS)
{
ast_expression *va;
if (!OPTS_FLAG(VARIADIC_ARGS)) {
sy->out.push_back(syexp(parser_ctx(parser), va));
return true;
}
- else if (parser.tok == TOKEN_FLOATCONST) {
+ else if (parser.tok == Token::FLOATCONST) {
ast_expression *val = parser.m_fold.constgen_float((parser_token(parser)->constval.f), false);
if (!val)
return false;
sy->out.push_back(syexp(parser_ctx(parser), val));
return true;
}
- else if (parser.tok == TOKEN_INTCONST || parser.tok == TOKEN_CHARCONST) {
+ else if (parser.tok == Token::INTCONST || parser.tok == Token::CHARCONST) {
ast_expression *val = parser.m_fold.constgen_float((qcfloat_t)(parser_token(parser)->constval.i), false);
if (!val)
return false;
sy->out.push_back(syexp(parser_ctx(parser), val));
return true;
}
- else if (parser.tok == TOKEN_STRINGCONST) {
+ else if (parser.tok == Token::STRINGCONST) {
ast_expression *val = parser.m_fold.constgen_string(parser_tokval(parser), false);
if (!val)
return false;
sy->out.push_back(syexp(parser_ctx(parser), val));
return true;
}
- else if (parser.tok == TOKEN_VECTORCONST) {
+ else if (parser.tok == Token::VECTORCONST) {
ast_expression *val = parser.m_fold.constgen_vector(parser_token(parser)->constval.v);
if (!val)
return false;
sy->out.push_back(syexp(parser_ctx(parser), val));
return true;
}
- else if (parser.tok == TOKEN_IDENT)
+ else if (parser.tok == Token::IDENT)
{
const char *ctoken = parser_tokval(parser);
ast_expression *prev = sy->out.size() ? sy->out.back().out : nullptr;
while (true)
{
- if (parser.tok == TOKEN_TYPENAME) {
+ if (parser.tok == Token::TYPENAME) {
parseerror(parser, "unexpected typename `%s`", parser_tokval(parser));
goto onerr;
}
- if (parser.tok == TOKEN_OPERATOR)
+ if (parser.tok == Token::OPERATOR)
{
/* classify the operator */
const oper_info *op;
/* when declaring variables, a comma starts a new variable */
if (op->id == opid1(',') && sy.paren.empty() && stopatcomma) {
/* fixup the token */
- parser.tok = ',';
+ parser.tok = Token::COMMA;
break;
}
/* a colon without a pervious question mark cannot be a ternary */
if (!ternaries && op->id == opid2(':','?')) {
- parser.tok = ':';
+ parser.tok = Token::COLON;
break;
}
else {
/* in this case we might want to allow constant string concatenation */
bool concatenated = false;
- if (parser.tok == TOKEN_STRINGCONST && sy.out.size()) {
+ if (parser.tok == Token::STRINGCONST && sy.out.size()) {
ast_expression *lexpr = sy.out.back().out;
if (ast_istype(lexpr, ast_value)) {
ast_value *last = (ast_value*)lexpr;
parseerror(parser, "expected condition or 'not'");
return false;
}
- if (parser.tok == TOKEN_IDENT && !strcmp(parser_tokval(parser), "not")) {
+ if (parser.tok == Token::IDENT && !strcmp(parser_tokval(parser), "not")) {
ifnot = true;
if (!parser_next(parser)) {
parseerror(parser, "expected condition in parenthesis");
if (parser.tok == ':') {
if (!OPTS_FLAG(LOOP_LABELS))
parseerror(parser, "labeled loops not activated, try using -floop-labels");
- if (!parser_next(parser) || parser.tok != TOKEN_IDENT) {
+ if (!parser_next(parser) || parser.tok != Token::IDENT) {
parseerror(parser, "expected loop label");
return false;
}
if (parser.tok == ':') {
if (!OPTS_FLAG(LOOP_LABELS))
parseerror(parser, "labeled loops not activated, try using -floop-labels");
- if (!parser_next(parser) || parser.tok != TOKEN_IDENT) {
+ if (!parser_next(parser) || parser.tok != Token::IDENT) {
parseerror(parser, "expected loop label");
return false;
}
return false;
/* expect the "while" */
- if (parser.tok != TOKEN_KEYWORD ||
+ if (parser.tok != Token::KEYWORD ||
strcmp(parser_tokval(parser), "while"))
{
parseerror(parser, "expected 'while' and condition");
if (parser.tok == ':') {
if (!OPTS_FLAG(LOOP_LABELS))
parseerror(parser, "labeled loops not activated, try using -floop-labels");
- if (!parser_next(parser) || parser.tok != TOKEN_IDENT) {
+ if (!parser_next(parser) || parser.tok != Token::IDENT) {
parseerror(parser, "expected loop label");
return false;
}
}
typevar = nullptr;
- if (parser.tok == TOKEN_IDENT)
+ if (parser.tok == Token::IDENT)
typevar = parser_find_typedef(parser, parser_tokval(parser), 0);
- if (typevar || parser.tok == TOKEN_TYPENAME) {
+ if (typevar || parser.tok == Token::TYPENAME) {
if (!parse_variable(parser, block, true, CV_VAR, typevar, false, false, 0, nullptr))
goto onerr;
}
parseerror(parser, "`break` can only be used inside loops or switches");
}
- if (parser.tok == TOKEN_IDENT) {
+ if (parser.tok == Token::IDENT) {
if (!OPTS_FLAG(LOOP_LABELS))
parseerror(parser, "labeled loops not activated, try using -floop-labels");
i = loops.size();
for (;;) {
size_t i;
- if (parser.tok == TOKEN_ATTRIBUTE_OPEN) {
+ if (parser.tok == Token::ATTRIBUTE_OPEN) {
had_attrib = true;
/* parse an attribute */
if (!parser_next(parser)) {
for (i = 0; i < GMQCC_ARRAY_COUNT(attributes); i++) {
if (!strcmp(parser_tokval(parser), attributes[i].name)) {
flags |= attributes[i].flag;
- if (!parser_next(parser) || parser.tok != TOKEN_ATTRIBUTE_CLOSE) {
+ if (!parser_next(parser) || parser.tok != Token::ATTRIBUTE_CLOSE) {
parseerror(parser, "`%s` attribute has no parameters, expected `]]`",
attributes[i].name);
*cvq = CV_WRONG;
if (!strcmp(parser_tokval(parser), "noref")) {
had_noref = true;
- if (!parser_next(parser) || parser.tok != TOKEN_ATTRIBUTE_CLOSE) {
+ if (!parser_next(parser) || parser.tok != Token::ATTRIBUTE_CLOSE) {
parseerror(parser, "`noref` attribute has no parameters, expected `]]`");
*cvq = CV_WRONG;
return false;
}
if (parser.tok == '(') {
- if (!parser_next(parser) || parser.tok != TOKEN_STRINGCONST) {
+ if (!parser_next(parser) || parser.tok != Token::STRINGCONST) {
parseerror(parser, "`alias` attribute missing parameter");
goto argerr;
}
}
}
- if (parser.tok != TOKEN_ATTRIBUTE_CLOSE) {
+ if (parser.tok != Token::ATTRIBUTE_CLOSE) {
parseerror(parser, "`alias` attribute expected `]]`");
goto argerr;
}
}
if (parser.tok == '(') {
- if (!parser_next(parser) || parser.tok != TOKEN_STRINGCONST) {
+ if (!parser_next(parser) || parser.tok != Token::STRINGCONST) {
parseerror(parser, "`deprecated` attribute missing parameter");
goto argerr;
}
}
}
/* no message */
- if (parser.tok != TOKEN_ATTRIBUTE_CLOSE) {
+ if (parser.tok != Token::ATTRIBUTE_CLOSE) {
parseerror(parser, "`deprecated` attribute expected `]]`");
argerr: /* ugly */
}
if (parser.tok != ')') {
do {
- if (parser.tok != TOKEN_IDENT)
+ if (parser.tok != Token::IDENT)
goto bad_coverage_arg;
if (!strcmp(parser_tokval(parser), "block"))
flags |= AST_FLAG_BLOCK_COVERAGE;
{
/* Skip tokens until we hit a ]] */
(void)!parsewarning(parser, WARN_UNKNOWN_ATTRIBUTE, "unknown attribute starting with `%s`", parser_tokval(parser));
- while (parser.tok != TOKEN_ATTRIBUTE_CLOSE) {
+ while (parser.tok != Token::ATTRIBUTE_CLOSE) {
if (!parser_next(parser)) {
parseerror(parser, "error inside attribute");
*cvq = CV_WRONG;
if (parser.tok == ':') {
if (!OPTS_FLAG(LOOP_LABELS))
parseerror(parser, "labeled loops not activated, try using -floop-labels");
- if (!parser_next(parser) || parser.tok != TOKEN_IDENT) {
+ if (!parser_next(parser) || parser.tok != Token::IDENT) {
parseerror(parser, "expected loop label");
return false;
}
parser_enterblock(parser);
while (true) {
typevar = nullptr;
- if (parser.tok == TOKEN_IDENT)
+ if (parser.tok == Token::IDENT)
typevar = parser_find_typedef(parser, parser_tokval(parser), 0);
- if (typevar || parser.tok == TOKEN_TYPENAME) {
+ if (typevar || parser.tok == Token::TYPENAME) {
if (!parse_variable(parser, block, true, CV_NONE, typevar, false, false, 0, nullptr)) {
delete switchnode;
return false;
ast_expression *expr;
if (parser.tok == '}')
break;
- if (parser.tok == TOKEN_KEYWORD) {
+ if (parser.tok == Token::KEYWORD) {
if (!strcmp(parser_tokval(parser), "case") ||
!strcmp(parser_tokval(parser), "default"))
{
if (!parser_next(parser))
return false;
- if (parser.tok != TOKEN_IDENT) {
+ if (parser.tok != Token::IDENT) {
ast_expression *expression;
/* could be an expression i.e computed goto :-) */
do {
if (!parser_next(parser))
return false;
- } while (parser.tok == TOKEN_WHITE && parser.tok < TOKEN_ERROR);
- return parser.tok < TOKEN_ERROR;
+ } while (parser.tok == Token::WHITE && parser.tok < Token::ERROR);
+ return parser.tok < Token::ERROR;
}
static bool parse_eol(parser_t &parser)
{
if (!parse_skipwhite(parser))
return false;
- return parser.tok == TOKEN_EOL;
+ return parser.tok == Token::EOL;
}
static bool parse_pragma_do(parser_t &parser)
{
if (!parser_next(parser) ||
- parser.tok != TOKEN_IDENT ||
+ parser.tok != Token::IDENT ||
strcmp(parser_tokval(parser), "pragma"))
{
parseerror(parser, "expected `pragma` keyword after `#`, got `%s`", parser_tokval(parser));
return false;
}
- if (!parse_skipwhite(parser) || parser.tok != TOKEN_IDENT) {
+ if (!parse_skipwhite(parser) || parser.tok != Token::IDENT) {
parseerror(parser, "expected pragma, got `%s`", parser_tokval(parser));
return false;
}
if (!strcmp(parser_tokval(parser), "noref")) {
- if (!parse_skipwhite(parser) || parser.tok != TOKEN_INTCONST) {
+ if (!parse_skipwhite(parser) || parser.tok != Token::INTCONST) {
parseerror(parser, "`noref` pragma requires an argument: 0 or 1");
return false;
}
parser.lex->flags.preprocessing = true;
parser.lex->flags.mergelines = true;
auto ret = parse_pragma_do(parser);
- if (parser.tok != TOKEN_EOL) {
+ if (parser.tok != Token::EOL) {
parseerror(parser, "junk after pragma");
ret = false;
}
*out = nullptr;
- if (parser.tok == TOKEN_IDENT)
+ if (parser.tok == Token::IDENT)
typevar = parser_find_typedef(parser, parser_tokval(parser), 0);
- if (typevar || parser.tok == TOKEN_TYPENAME || parser.tok == '.' || parser.tok == TOKEN_DOTS)
+ if (typevar || parser.tok == Token::TYPENAME || parser.tok == '.' || parser.tok == Token::DOTS)
{
/* local variable */
if (!block) {
return false;
return parse_variable(parser, block, false, cvq, nullptr, noref, is_static, qflags, vstring);
}
- else if (parser.tok == TOKEN_KEYWORD)
+ else if (parser.tok == Token::KEYWORD)
{
if (!strcmp(parser_tokval(parser), "__builtin_debug_printtype"))
{
return false;
}
- if (parser.tok == TOKEN_IDENT && (tdef = parser_find_typedef(parser, parser_tokval(parser), 0)))
+ if (parser.tok == Token::IDENT && (tdef = parser_find_typedef(parser, parser_tokval(parser), 0)))
{
ast_type_to_string(tdef, ty, sizeof(ty));
con_out("__builtin_debug_printtype: `%s`=`%s`\n", tdef->m_name.c_str(), ty);
parseerror(parser, "expected label name");
return false;
}
- if (parser.tok != TOKEN_IDENT) {
+ if (parser.tok != Token::IDENT) {
parseerror(parser, "label must be an identifier");
return false;
}
/* enumeration attributes (can add more later) */
if (parser.tok == ':') {
- if (!parser_next(parser) || parser.tok != TOKEN_IDENT){
+ if (!parser_next(parser) || parser.tok != Token::IDENT){
parseerror(parser, "expected `flag` or `reverse` for enumeration attribute");
return false;
}
}
while (true) {
- if (!parser_next(parser) || parser.tok != TOKEN_IDENT) {
+ if (!parser_next(parser) || parser.tok != Token::IDENT) {
if (parser.tok == '}') {
/* allow an empty enum */
break;
goto cleanup;
}
- while (parser.tok != TOKEN_EOF && parser.tok < TOKEN_ERROR)
+ while (parser.tok != Token::END && parser.tok < Token::ERROR)
{
ast_expression *expr = nullptr;
if (parser.tok == '}')
return false;
}
- if (parser.tok == TOKEN_IDENT && !parser_find_var(parser, parser_tokval(parser)))
+ if (parser.tok == Token::IDENT && !parser_find_var(parser, parser_tokval(parser)))
{
/* qc allows the use of not-yet-declared functions here
* - this automatically creates a prototype */
if (is_varargs) {
/* '...' indicates a varargs function */
variadic = true;
- if (parser.tok != ')' && parser.tok != TOKEN_IDENT) {
+ if (parser.tok != ')' && parser.tok != Token::IDENT) {
parseerror(parser, "`...` must be the last parameter of a variadic function declaration");
goto on_error;
}
- if (parser.tok == TOKEN_IDENT) {
+ if (parser.tok == Token::IDENT) {
argcounter = util_strdup(parser_tokval(parser));
if (!parser_next(parser) || parser.tok != ')') {
parseerror(parser, "`...` must be the last parameter of a variadic function declaration");
goto on_error;
}
/* type-restricted varargs */
- if (parser.tok == TOKEN_DOTS) {
+ if (parser.tok == Token::DOTS) {
variadic = true;
varparam = params.back().release();
params.pop_back();
- if (!parser_next(parser) || (parser.tok != ')' && parser.tok != TOKEN_IDENT)) {
+ if (!parser_next(parser) || (parser.tok != ')' && parser.tok != Token::IDENT)) {
parseerror(parser, "`...` must be the last parameter of a variadic function declaration");
goto on_error;
}
- if (parser.tok == TOKEN_IDENT) {
+ if (parser.tok == Token::IDENT) {
argcounter = util_strdup(parser_tokval(parser));
param->m_name = argcounter;
if (!parser_next(parser) || parser.tok != ')') {
bool wasarray = false;
size_t morefields = 0;
- bool vararg = (parser.tok == TOKEN_DOTS);
+ bool vararg = (parser.tok == Token::DOTS);
ctx = parser_ctx(parser);
/* types may start with a dot */
- if (parser.tok == '.' || parser.tok == TOKEN_DOTS) {
+ if (parser.tok == '.' || parser.tok == Token::DOTS) {
isfield = true;
- if (parser.tok == TOKEN_DOTS)
+ if (parser.tok == Token::DOTS)
morefields += 2;
/* if we parsed a dot we need a typename now */
if (!parser_next(parser)) {
while (true) {
if (parser.tok == '.')
++morefields;
- else if (parser.tok == TOKEN_DOTS)
+ else if (parser.tok == Token::DOTS)
morefields += 3;
else
break;
}
}
}
- if (parser.tok == TOKEN_IDENT)
+ if (parser.tok == Token::IDENT)
cached_typedef = parser_find_typedef(parser, parser_tokval(parser), 0);
- if (!cached_typedef && parser.tok != TOKEN_TYPENAME) {
+ if (!cached_typedef && parser.tok != Token::TYPENAME) {
if (vararg && is_vararg) {
*is_vararg = true;
return nullptr;
}
/* there may be a name now */
- if (parser.tok == TOKEN_IDENT || parser.tok == TOKEN_KEYWORD) {
+ if (parser.tok == Token::IDENT || parser.tok == Token::KEYWORD) {
if (!strcmp(parser_tokval(parser), "break"))
(void)!parsewarning(parser, WARN_BREAKDEF, "break definition ignored (suggest removing it)");
- else if (parser.tok == TOKEN_KEYWORD)
+ else if (parser.tok == Token::KEYWORD)
goto leave;
name = util_strdup(parser_tokval(parser));
/* we only want the integral part anyways */
builtin_num = integral;
- } else if (parser.tok == TOKEN_INTCONST) {
+ } else if (parser.tok == Token::INTCONST) {
builtin_num = parser_token(parser)->constval.i;
} else {
parseerror(parser, "builtin number must be a compile time constant");
break;
}
- if (parser.tok != TOKEN_IDENT) {
+ if (parser.tok != Token::IDENT) {
parseerror(parser, "expected another variable");
break;
}
static bool parser_global_statement(parser_t &parser)
{
ast_value *istype = nullptr;
- if ((parser.tok == TOKEN_IDENT && (istype = parser_find_typedef(parser, parser_tokval(parser), 0)) != nullptr)
- || parser.tok == TOKEN_TYPENAME
- || parser.tok == '.' || parser.tok == TOKEN_DOTS) {
+ if ((parser.tok == Token::IDENT && (istype = parser_find_typedef(parser, parser_tokval(parser), 0)) != nullptr)
+ || parser.tok == Token::TYPENAME
+ || parser.tok == '.' || parser.tok == Token::DOTS) {
return parse_variable(parser, nullptr, false, CV_NONE, istype, false, false, 0, nullptr);
}
return parse_variable(parser, nullptr, false, cvq, nullptr, noref, is_static, qflags, vstring);
}
- if (parser.tok == TOKEN_IDENT && strcmp(parser_tokval(parser), "enum") == 0) {
+ if (parser.tok == Token::IDENT && strcmp(parser_tokval(parser), "enum") == 0) {
return parse_enum(parser);
}
- if (parser.tok == TOKEN_KEYWORD) {
+ if (parser.tok == Token::KEYWORD) {
if (strcmp(parser_tokval(parser), "typedef") == 0) {
if (!parser_next(parser)) {
parseerror(parser, "expected type definition after 'typedef'");
parser_t::parser_t()
: lex(nullptr)
- , tok(0)
+ , tok(Token::NONE)
, ast_cleaned(false)
, translated(0)
, crc_globals(0)
compile_errors = true;
goto cleanup;
}
- while (parser.tok != TOKEN_EOF && parser.tok < TOKEN_ERROR) {
+ while (parser.tok != Token::END && parser.tok < Token::ERROR) {
if (parser_global_statement(parser)) continue;
- if (parser.tok == TOKEN_EOF) {
+ if (parser.tok == Token::END) {
parseerror(parser, "unexpected end of file");
} else if (compile_errors) {
parseerror(parser, "there have been errors, bailing out");
#include "intrin.h"
#include "fold.h"
-struct parser_t;
-
#define parser_ctx(p) ((p).lex->tok.ctx)
struct parser_t {
void remove_ast();
lex_file *lex;
- int tok;
+ Token tok;
bool ast_cleaned;
intrin m_intrin;
};
-
/* parser.c */
-char *parser_strdup (const char *str);
+inline char *parser_strdup(const char *str)
+{
+ if (str && !*str) {
+ /* actually dup empty strings */
+ auto *out = reinterpret_cast<char*>(mem_a(1));
+ *out = 0;
+ return out;
+ }
+ return util_strdup(str);
+}
ast_expression *parser_find_global(parser_t &parser, const char *name);
+parser_t *parser_create();
+bool parser_compile_file(parser_t &parser, const char *);
+bool parser_compile_string(parser_t &parser, const char *, const char *, size_t);
+bool parser_finish(parser_t &parser, const char *);
#endif