+ lexerror(lex, "invalid frame macro");
+ return lex_do(lex);
+ }
+
+ /* single-character tokens */
+ switch (ch)
+ {
+ case '[':
+ case '(':
+ case ':':
+ case '?':
+ lex_tokench(lex, ch);
+ lex_endtoken(lex);
+ if (lex->flags.noops)
+ return (lex->tok.ttype = ch);
+ else
+ return (lex->tok.ttype = TOKEN_OPERATOR);
+ case ')':
+ case ';':
+ case '{':
+ case '}':
+ case ']':
+
+ case '#':
+ lex_tokench(lex, ch);
+ lex_endtoken(lex);
+ return (lex->tok.ttype = ch);
+ default:
+ break;
+ }
+
+ if (ch == '.') {
+ nextch = lex_getch(lex);
+ /* digits starting with a dot */
+ if (isdigit(nextch)) {
+ lex_ungetch(lex, nextch);
+ lex->tok.ttype = lex_finish_digit(lex, ch);
+ lex_endtoken(lex);
+ return lex->tok.ttype;
+ }
+ lex_ungetch(lex, nextch);
+ }
+
+ if (lex->flags.noops)
+ {
+ /* Detect characters early which are normally
+ * operators OR PART of an operator.
+ */
+ switch (ch)
+ {
+ /*
+ case '+':
+ case '-':
+ */
+ case '*':
+ case '/':
+ case '<':
+ case '>':
+ case '=':
+ case '&':
+ case '|':
+ case '^':
+ case '~':
+ case ',':
+ case '!':
+ lex_tokench(lex, ch);
+ lex_endtoken(lex);
+ return (lex->tok.ttype = ch);
+ default:
+ break;
+ }
+
+ if (ch == '.')
+ {
+ lex_tokench(lex, ch);
+ /* peak ahead once */
+ nextch = lex_getch(lex);
+ if (nextch != '.') {
+ lex_ungetch(lex, nextch);
+ lex_endtoken(lex);
+ return (lex->tok.ttype = ch);
+ }
+ /* peak ahead again */
+ nextch = lex_getch(lex);
+ if (nextch != '.') {
+ lex_ungetch(lex, nextch);
+ lex_ungetch(lex, '.');
+ lex_endtoken(lex);
+ return (lex->tok.ttype = ch);
+ }
+ /* fill the token to be "..." */
+ lex_tokench(lex, ch);
+ lex_tokench(lex, ch);
+ lex_endtoken(lex);
+ return (lex->tok.ttype = TOKEN_DOTS);
+ }
+ }
+
+ if (ch == ',' || ch == '.') {
+ lex_tokench(lex, ch);
+ lex_endtoken(lex);
+ return (lex->tok.ttype = TOKEN_OPERATOR);
+ }
+
+ if (ch == '+' || ch == '-' || /* ++, --, +=, -= and -> as well! */
+ ch == '>' || ch == '<' || /* <<, >>, <=, >= */
+ ch == '=' || ch == '!' || /* ==, != */
+ ch == '&' || ch == '|') /* &&, ||, &=, |= */
+ {
+ lex_tokench(lex, ch);
+
+ nextch = lex_getch(lex);
+ if (nextch == '=' || (nextch == ch && ch != '!')) {
+ lex_tokench(lex, nextch);
+ } else if (ch == '-' && nextch == '>') {
+ lex_tokench(lex, nextch);
+ } else if (ch == '&' && nextch == '~') {
+ thirdch = lex_getch(lex);
+ if (thirdch != '=') {
+ lex_ungetch(lex, thirdch);
+ lex_ungetch(lex, nextch);
+ }
+ else {
+ lex_tokench(lex, nextch);
+ lex_tokench(lex, thirdch);
+ }
+ } else
+ lex_ungetch(lex, nextch);
+
+ lex_endtoken(lex);
+ return (lex->tok.ttype = TOKEN_OPERATOR);
+ }
+
+ /*
+ if (ch == '^' || ch == '~' || ch == '!')
+ {
+ lex_tokench(lex, ch);
+ lex_endtoken(lex);
+ return (lex->tok.ttype = TOKEN_OPERATOR);
+ }
+ */
+
+ if (ch == '*' || ch == '/') /* *=, /= */
+ {
+ lex_tokench(lex, ch);
+
+ nextch = lex_getch(lex);
+ if (nextch == '=') {
+ lex_tokench(lex, nextch);
+ } else
+ lex_ungetch(lex, nextch);
+
+ lex_endtoken(lex);
+ return (lex->tok.ttype = TOKEN_OPERATOR);
+ }
+
+ if (isident_start(ch))
+ {
+ const char *v;
+
+ lex_tokench(lex, ch);
+ if (!lex_finish_ident(lex)) {
+ /* error? */
+ return (lex->tok.ttype = TOKEN_ERROR);
+ }
+ lex_endtoken(lex);
+ lex->tok.ttype = TOKEN_IDENT;
+
+ v = lex->tok.value;
+ if (!strcmp(v, "void")) {
+ lex->tok.ttype = TOKEN_TYPENAME;
+ lex->tok.constval.t = TYPE_VOID;
+ } else if (!strcmp(v, "int")) {
+ lex->tok.ttype = TOKEN_TYPENAME;
+ lex->tok.constval.t = TYPE_INTEGER;
+ } else if (!strcmp(v, "float")) {
+ lex->tok.ttype = TOKEN_TYPENAME;
+ lex->tok.constval.t = TYPE_FLOAT;
+ } else if (!strcmp(v, "string")) {
+ lex->tok.ttype = TOKEN_TYPENAME;
+ lex->tok.constval.t = TYPE_STRING;
+ } else if (!strcmp(v, "entity")) {
+ lex->tok.ttype = TOKEN_TYPENAME;
+ lex->tok.constval.t = TYPE_ENTITY;
+ } else if (!strcmp(v, "vector")) {
+ lex->tok.ttype = TOKEN_TYPENAME;
+ lex->tok.constval.t = TYPE_VECTOR;
+ } else {
+ size_t kw;
+ for (kw = 0; kw < num_keywords_qc; ++kw) {
+ if (!strcmp(v, keywords_qc[kw]))
+ return (lex->tok.ttype = TOKEN_KEYWORD);
+ }
+ if (opts_standard != COMPILER_QCC) {
+ for (kw = 0; kw < num_keywords_fg; ++kw) {
+ if (!strcmp(v, keywords_fg[kw]))
+ return (lex->tok.ttype = TOKEN_KEYWORD);
+ }
+ }
+ }
+
+ return lex->tok.ttype;
+ }
+
+ if (ch == '"')
+ {
+ lex->flags.nodigraphs = true;
+ if (lex->flags.preprocessing)
+ lex_tokench(lex, ch);
+ lex->tok.ttype = lex_finish_string(lex, '"');
+ if (lex->flags.preprocessing)
+ lex_tokench(lex, ch);
+ while (!lex->flags.preprocessing && lex->tok.ttype == TOKEN_STRINGCONST)
+ {
+ /* Allow c style "string" "continuation" */
+ ch = lex_skipwhite(lex, false);
+ if (ch != '"') {
+ lex_ungetch(lex, ch);
+ break;
+ }
+
+ lex->tok.ttype = lex_finish_string(lex, '"');
+ }
+ lex->flags.nodigraphs = false;
+ lex_endtoken(lex);
+ return lex->tok.ttype;
+ }
+
+ if (ch == '\'')
+ {
+ /* we parse character constants like string,
+ * but return TOKEN_CHARCONST, or a vector type if it fits...
+ * Likewise actual unescaping has to be done by the parser.
+ * The difference is we don't allow 'char' 'continuation'.
+ */
+ if (lex->flags.preprocessing)
+ lex_tokench(lex, ch);
+ lex->tok.ttype = lex_finish_string(lex, '\'');
+ if (lex->flags.preprocessing)
+ lex_tokench(lex, ch);
+ lex_endtoken(lex);
+
+ lex->tok.ttype = TOKEN_CHARCONST;
+ /* It's a vector if we can successfully scan 3 floats */