+ lexerror(lex, "invalid frame macro");
+ return lex_do(lex);
+ }
+
+ /* single-character tokens */
+ switch (ch)
+ {
+ case '(':
+ if (!lex_tokench(lex, ch) ||
+ !lex_endtoken(lex))
+ {
+ return (lex->tok->ttype = TOKEN_FATAL);
+ }
+ if (lex->flags.noops)
+ return (lex->tok->ttype = ch);
+ else
+ return (lex->tok->ttype = TOKEN_OPERATOR);
+ case ')':
+ case ';':
+ case '{':
+ case '}':
+ case '[':
+ case ']':
+
+ case '#':
+ if (!lex_tokench(lex, ch) ||
+ !lex_endtoken(lex))
+ {
+ return (lex->tok->ttype = TOKEN_FATAL);
+ }
+ return (lex->tok->ttype = ch);
+ default:
+ break;
+ }
+
+ if (lex->flags.noops)
+ {
+ /* Detect characters early which are normally
+ * operators OR PART of an operator.
+ */
+ switch (ch)
+ {
+ case '+':
+ case '-':
+ case '*':
+ case '/':
+ case '<':
+ case '>':
+ case '=':
+ case '&':
+ case '|':
+ case '^':
+ case '~':
+ case ',':
+ case '!':
+ if (!lex_tokench(lex, ch) ||
+ !lex_endtoken(lex))
+ {
+ return (lex->tok->ttype = TOKEN_FATAL);
+ }
+ return (lex->tok->ttype = ch);
+ default:
+ break;
+ }
+
+ if (ch == '.')
+ {
+ if (!lex_tokench(lex, ch))
+ return (lex->tok->ttype = TOKEN_FATAL);
+ /* peak ahead once */
+ nextch = lex_getch(lex);
+ if (nextch != '.') {
+ lex_ungetch(lex, nextch);
+ if (!lex_endtoken(lex))
+ return (lex->tok->ttype = TOKEN_FATAL);
+ return (lex->tok->ttype = ch);
+ }
+ /* peak ahead again */
+ nextch = lex_getch(lex);
+ if (nextch != '.') {
+ lex_ungetch(lex, nextch);
+ lex_ungetch(lex, nextch);
+ if (!lex_endtoken(lex))
+ return (lex->tok->ttype = TOKEN_FATAL);
+ return (lex->tok->ttype = ch);
+ }
+ /* fill the token to be "..." */
+ if (!lex_tokench(lex, ch) ||
+ !lex_tokench(lex, ch) ||
+ !lex_endtoken(lex))
+ {
+ return (lex->tok->ttype = TOKEN_FATAL);
+ }
+ return (lex->tok->ttype = TOKEN_DOTS);
+ }
+ }
+
+ if (ch == ',' || ch == '.') {
+ if (!lex_tokench(lex, ch) ||
+ !lex_endtoken(lex))
+ {
+ return (lex->tok->ttype = TOKEN_FATAL);
+ }
+ return (lex->tok->ttype = TOKEN_OPERATOR);
+ }
+
+ if (ch == '+' || ch == '-' || /* ++, --, +=, -= and -> as well! */
+ ch == '>' || ch == '<' || /* <<, >>, <=, >= */
+ ch == '=' || ch == '!' || /* ==, != */
+ ch == '&' || ch == '|') /* &&, ||, &=, |= */
+ {
+ if (!lex_tokench(lex, ch))
+ return (lex->tok->ttype = TOKEN_FATAL);
+
+ nextch = lex_getch(lex);
+ if (nextch == ch || nextch == '=') {
+ if (!lex_tokench(lex, nextch))
+ return (lex->tok->ttype = TOKEN_FATAL);
+ } else if (ch == '-' && nextch == '>') {
+ if (!lex_tokench(lex, nextch))
+ return (lex->tok->ttype = TOKEN_FATAL);
+ } else
+ lex_ungetch(lex, nextch);
+
+ if (!lex_endtoken(lex))
+ return (lex->tok->ttype = TOKEN_FATAL);
+ return (lex->tok->ttype = TOKEN_OPERATOR);
+ }
+
+ /*
+ if (ch == '^' || ch == '~' || ch == '!')
+ {
+ if (!lex_tokench(lex, ch) ||
+ !lex_endtoken(lex))
+ {
+ return (lex->tok->ttype = TOKEN_FATAL);
+ }
+ return (lex->tok->ttype = TOKEN_OPERATOR);
+ }
+ */
+
+ if (ch == '*' || ch == '/') /* *=, /= */
+ {
+ if (!lex_tokench(lex, ch))
+ return (lex->tok->ttype = TOKEN_FATAL);
+
+ nextch = lex_getch(lex);
+ if (nextch == '=') {
+ if (!lex_tokench(lex, nextch))
+ return (lex->tok->ttype = TOKEN_FATAL);
+ } else
+ lex_ungetch(lex, nextch);
+
+ if (!lex_endtoken(lex))
+ return (lex->tok->ttype = TOKEN_FATAL);
+ return (lex->tok->ttype = TOKEN_OPERATOR);
+ }
+
+ if (isident_start(ch))
+ {
+ const char *v;
+
+ if (!lex_tokench(lex, ch))
+ return (lex->tok->ttype = TOKEN_FATAL);
+ if (!lex_finish_ident(lex)) {
+ /* error? */
+ return (lex->tok->ttype = TOKEN_ERROR);
+ }
+ if (!lex_endtoken(lex))
+ return (lex->tok->ttype = TOKEN_FATAL);
+ lex->tok->ttype = TOKEN_IDENT;
+
+ v = lex->tok->value;
+ if (!strcmp(v, "void")) {
+ lex->tok->ttype = TOKEN_TYPENAME;
+ lex->tok->constval.t = TYPE_VOID;
+ } else if (!strcmp(v, "int")) {
+ lex->tok->ttype = TOKEN_TYPENAME;
+ lex->tok->constval.t = TYPE_INTEGER;
+ } else if (!strcmp(v, "float")) {
+ lex->tok->ttype = TOKEN_TYPENAME;
+ lex->tok->constval.t = TYPE_FLOAT;
+ } else if (!strcmp(v, "string")) {
+ lex->tok->ttype = TOKEN_TYPENAME;
+ lex->tok->constval.t = TYPE_STRING;
+ } else if (!strcmp(v, "entity")) {
+ lex->tok->ttype = TOKEN_TYPENAME;
+ lex->tok->constval.t = TYPE_ENTITY;
+ } else if (!strcmp(v, "vector")) {
+ lex->tok->ttype = TOKEN_TYPENAME;
+ lex->tok->constval.t = TYPE_VECTOR;
+ } else if (!strcmp(v, "for") ||
+ !strcmp(v, "while") ||
+ !strcmp(v, "do") ||
+ !strcmp(v, "if") ||
+ !strcmp(v, "else") ||
+ !strcmp(v, "local") ||
+ !strcmp(v, "return") ||
+ !strcmp(v, "const"))
+ lex->tok->ttype = TOKEN_KEYWORD;
+
+ return lex->tok->ttype;
+ }
+
+ if (ch == '"')
+ {
+ lex->tok->ttype = lex_finish_string(lex, '"');
+ while (lex->tok->ttype == TOKEN_STRINGCONST)
+ {
+ /* Allow c style "string" "continuation" */
+ ch = lex_skipwhite(lex);
+ if (ch != '"') {
+ lex_ungetch(lex, ch);
+ break;
+ }
+
+ lex->tok->ttype = lex_finish_string(lex, '"');
+ }
+ if (!lex_endtoken(lex))
+ return (lex->tok->ttype = TOKEN_FATAL);
+ return lex->tok->ttype;
+ }
+
+ if (ch == '\'')
+ {
+ /* we parse character constants like string,
+ * but return TOKEN_CHARCONST, or a vector type if it fits...
+ * Likewise actual unescaping has to be done by the parser.
+ * The difference is we don't allow 'char' 'continuation'.
+ */
+ lex->tok->ttype = lex_finish_string(lex, '\'');
+ if (!lex_endtoken(lex))
+ return (lex->tok->ttype = TOKEN_FATAL);
+
+ /* It's a vector if we can successfully scan 3 floats */