+ /* Here, relocations ARE possible - in fteqcc-enhanced-qc:
+ * void() foo; <- proto
+ * void() *fooptr = &foo;
+ * void() foo = { code }
+ */
+ if (!target->code.globaladdr) {
+ /* FIXME: Check for the constant nullptr ir_value!
+ * because then code.globaladdr being 0 is valid.
+ */
+ irerror(global->context, "FIXME: Relocation support");
+ return false;
+ }
+
+ ir_value_code_setaddr(global, vec_size(code->globals));
+ vec_push(code->globals, target->code.globaladdr);
+ }
+ else
+ {
+ ir_value_code_setaddr(global, vec_size(code->globals));
+ vec_push(code->globals, 0);
+ }
+ if (global->code.globaladdr < 0)
+ return false;
+ return true;
+}
+
+static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *block)
+{
+ prog_section_statement_t stmt;
+ ir_instr *instr;
+ ir_block *target;
+ ir_block *ontrue;
+ ir_block *onfalse;
+ size_t stidx;
+ size_t i;
+ int j;
+
+ block->generated = true;
+ block->code_start = vec_size(code->statements);
+ for (i = 0; i < vec_size(block->instr); ++i)
+ {
+ instr = block->instr[i];
+
+ if (instr->opcode == VINSTR_PHI) {
+ irerror(block->context, "cannot generate virtual instruction (phi)");
+ return false;
+ }
+
+ if (instr->opcode == VINSTR_JUMP) {
+ target = instr->bops[0];
+ /* for uncoditional jumps, if the target hasn't been generated
+ * yet, we generate them right here.
+ */
+ if (!target->generated)
+ return gen_blocks_recursive(code, func, target);
+
+ /* otherwise we generate a jump instruction */
+ stmt.opcode = INSTR_GOTO;
+ stmt.o1.s1 = (target->code_start) - vec_size(code->statements);
+ stmt.o2.s1 = 0;
+ stmt.o3.s1 = 0;
+ if (stmt.o1.s1 != 1)
+ code_push_statement(code, &stmt, instr->context);
+
+ /* no further instructions can be in this block */
+ return true;
+ }
+
+ if (instr->opcode == VINSTR_BITXOR) {
+ stmt.opcode = INSTR_BITOR;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context);
+ stmt.opcode = INSTR_BITAND;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]);
+ stmt.o3.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]);
+ code_push_statement(code, &stmt, instr->context);
+ stmt.opcode = INSTR_SUB_F;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[0]);
+ stmt.o2.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context);
+
+ /* instruction generated */
+ continue;
+ }
+
+ if (instr->opcode == VINSTR_BITAND_V) {
+ stmt.opcode = INSTR_BITAND;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context);
+ ++stmt.o1.s1;
+ ++stmt.o2.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context);
+ ++stmt.o1.s1;
+ ++stmt.o2.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context);
+
+ /* instruction generated */
+ continue;
+ }
+
+ if (instr->opcode == VINSTR_BITOR_V) {
+ stmt.opcode = INSTR_BITOR;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context);
+ ++stmt.o1.s1;
+ ++stmt.o2.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context);
+ ++stmt.o1.s1;
+ ++stmt.o2.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context);
+
+ /* instruction generated */
+ continue;
+ }
+
+ if (instr->opcode == VINSTR_BITXOR_V) {
+ for (j = 0; j < 3; ++j) {
+ stmt.opcode = INSTR_BITOR;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]) + j;
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]) + j;
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]) + j;
+ code_push_statement(code, &stmt, instr->context);
+ stmt.opcode = INSTR_BITAND;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]) + j;
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]) + j;
+ stmt.o3.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]) + j;
+ code_push_statement(code, &stmt, instr->context);
+ }
+ stmt.opcode = INSTR_SUB_V;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[0]);
+ stmt.o2.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context);
+
+ /* instruction generated */
+ continue;
+ }
+
+ if (instr->opcode == VINSTR_BITAND_VF) {
+ stmt.opcode = INSTR_BITAND;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context);
+ ++stmt.o1.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context);
+ ++stmt.o1.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context);
+
+ /* instruction generated */
+ continue;
+ }
+
+ if (instr->opcode == VINSTR_BITOR_VF) {
+ stmt.opcode = INSTR_BITOR;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context);
+ ++stmt.o1.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context);
+ ++stmt.o1.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context);
+
+ /* instruction generated */
+ continue;
+ }
+
+ if (instr->opcode == VINSTR_BITXOR_VF) {
+ for (j = 0; j < 3; ++j) {
+ stmt.opcode = INSTR_BITOR;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]) + j;
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]) + j;
+ code_push_statement(code, &stmt, instr->context);
+ stmt.opcode = INSTR_BITAND;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]) + j;
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]);
+ stmt.o3.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]) + j;
+ code_push_statement(code, &stmt, instr->context);
+ }
+ stmt.opcode = INSTR_SUB_V;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[0]);
+ stmt.o2.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context);
+
+ /* instruction generated */
+ continue;
+ }
+
+ if (instr->opcode == VINSTR_CROSS) {
+ stmt.opcode = INSTR_MUL_F;
+ for (j = 0; j < 3; ++j) {
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]) + (j + 1) % 3;
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]) + (j + 2) % 3;
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]) + j;
+ code_push_statement(code, &stmt, instr->context);
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]) + (j + 2) % 3;
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]) + (j + 1) % 3;
+ stmt.o3.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]) + j;
+ code_push_statement(code, &stmt, instr->context);
+ }
+ stmt.opcode = INSTR_SUB_V;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[0]);
+ stmt.o2.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context);
+
+ /* instruction generated */
+ continue;
+ }
+
+ if (instr->opcode == VINSTR_COND) {
+ ontrue = instr->bops[0];
+ onfalse = instr->bops[1];
+ /* TODO: have the AST signal which block should
+ * come first: eg. optimize IFs without ELSE...
+ */
+
+ stmt.o1.u1 = ir_value_code_addr(instr->_ops[0]);
+ stmt.o2.u1 = 0;
+ stmt.o3.s1 = 0;
+
+ if (ontrue->generated) {
+ stmt.opcode = INSTR_IF;
+ stmt.o2.s1 = (ontrue->code_start) - vec_size(code->statements);
+ if (stmt.o2.s1 != 1)
+ code_push_statement(code, &stmt, instr->context);
+ }
+ if (onfalse->generated) {
+ stmt.opcode = INSTR_IFNOT;
+ stmt.o2.s1 = (onfalse->code_start) - vec_size(code->statements);
+ if (stmt.o2.s1 != 1)
+ code_push_statement(code, &stmt, instr->context);
+ }
+ if (!ontrue->generated) {
+ if (onfalse->generated)
+ return gen_blocks_recursive(code, func, ontrue);
+ }
+ if (!onfalse->generated) {
+ if (ontrue->generated)
+ return gen_blocks_recursive(code, func, onfalse);
+ }
+ /* neither ontrue nor onfalse exist */
+ stmt.opcode = INSTR_IFNOT;
+ if (!instr->likely) {
+ /* Honor the likelyhood hint */
+ ir_block *tmp = onfalse;
+ stmt.opcode = INSTR_IF;
+ onfalse = ontrue;
+ ontrue = tmp;
+ }
+ stidx = vec_size(code->statements);
+ code_push_statement(code, &stmt, instr->context);
+ /* on false we jump, so add ontrue-path */
+ if (!gen_blocks_recursive(code, func, ontrue))
+ return false;
+ /* fixup the jump address */
+ code->statements[stidx].o2.s1 = vec_size(code->statements) - stidx;
+ /* generate onfalse path */
+ if (onfalse->generated) {
+ /* fixup the jump address */
+ code->statements[stidx].o2.s1 = (onfalse->code_start) - (stidx);
+ if (stidx+2 == vec_size(code->statements) && code->statements[stidx].o2.s1 == 1) {
+ code->statements[stidx] = code->statements[stidx+1];
+ if (code->statements[stidx].o1.s1 < 0)
+ code->statements[stidx].o1.s1++;
+ code_pop_statement(code);
+ }
+ stmt.opcode = vec_last(code->statements).opcode;
+ if (stmt.opcode == INSTR_GOTO ||
+ stmt.opcode == INSTR_IF ||
+ stmt.opcode == INSTR_IFNOT ||
+ stmt.opcode == INSTR_RETURN ||
+ stmt.opcode == INSTR_DONE)
+ {
+ /* no use jumping from here */
+ return true;
+ }
+ /* may have been generated in the previous recursive call */
+ stmt.opcode = INSTR_GOTO;
+ stmt.o1.s1 = (onfalse->code_start) - vec_size(code->statements);
+ stmt.o2.s1 = 0;
+ stmt.o3.s1 = 0;
+ if (stmt.o1.s1 != 1)
+ code_push_statement(code, &stmt, instr->context);
+ return true;
+ }
+ else if (stidx+2 == vec_size(code->statements) && code->statements[stidx].o2.s1 == 1) {
+ code->statements[stidx] = code->statements[stidx+1];
+ if (code->statements[stidx].o1.s1 < 0)
+ code->statements[stidx].o1.s1++;
+ code_pop_statement(code);
+ }
+ /* if not, generate now */
+ return gen_blocks_recursive(code, func, onfalse);
+ }
+
+ if ( (instr->opcode >= INSTR_CALL0 && instr->opcode <= INSTR_CALL8)
+ || instr->opcode == VINSTR_NRCALL)
+ {
+ size_t p, first;
+ ir_value *retvalue;
+
+ first = vec_size(instr->params);
+ if (first > 8)
+ first = 8;
+ for (p = 0; p < first; ++p)
+ {
+ ir_value *param = instr->params[p];
+ if (param->callparam)
+ continue;
+
+ stmt.opcode = INSTR_STORE_F;
+ stmt.o3.u1 = 0;
+
+ if (param->vtype == TYPE_FIELD)
+ stmt.opcode = field_store_instr[param->fieldtype];
+ else if (param->vtype == TYPE_NIL)
+ stmt.opcode = INSTR_STORE_V;
+ else
+ stmt.opcode = type_store_instr[param->vtype];
+ stmt.o1.u1 = ir_value_code_addr(param);
+ stmt.o2.u1 = OFS_PARM0 + 3 * p;
+
+ if (param->vtype == TYPE_VECTOR && (param->flags & IR_FLAG_SPLIT_VECTOR)) {
+ /* fetch 3 separate floats */
+ stmt.opcode = INSTR_STORE_F;
+ stmt.o1.u1 = ir_value_code_addr(param->members[0]);
+ code_push_statement(code, &stmt, instr->context);
+ stmt.o2.u1++;
+ stmt.o1.u1 = ir_value_code_addr(param->members[1]);
+ code_push_statement(code, &stmt, instr->context);
+ stmt.o2.u1++;
+ stmt.o1.u1 = ir_value_code_addr(param->members[2]);
+ code_push_statement(code, &stmt, instr->context);
+ }
+ else
+ code_push_statement(code, &stmt, instr->context);
+ }
+ /* Now handle extparams */
+ first = vec_size(instr->params);
+ for (; p < first; ++p)
+ {
+ ir_builder *ir = func->owner;
+ ir_value *param = instr->params[p];
+ ir_value *targetparam;
+
+ if (param->callparam)
+ continue;
+
+ if (p-8 >= vec_size(ir->extparams))
+ ir_gen_extparam(ir);
+
+ targetparam = ir->extparams[p-8];
+
+ stmt.opcode = INSTR_STORE_F;
+ stmt.o3.u1 = 0;
+
+ if (param->vtype == TYPE_FIELD)
+ stmt.opcode = field_store_instr[param->fieldtype];
+ else if (param->vtype == TYPE_NIL)
+ stmt.opcode = INSTR_STORE_V;
+ else
+ stmt.opcode = type_store_instr[param->vtype];
+ stmt.o1.u1 = ir_value_code_addr(param);
+ stmt.o2.u1 = ir_value_code_addr(targetparam);
+ if (param->vtype == TYPE_VECTOR && (param->flags & IR_FLAG_SPLIT_VECTOR)) {
+ /* fetch 3 separate floats */
+ stmt.opcode = INSTR_STORE_F;
+ stmt.o1.u1 = ir_value_code_addr(param->members[0]);
+ code_push_statement(code, &stmt, instr->context);
+ stmt.o2.u1++;
+ stmt.o1.u1 = ir_value_code_addr(param->members[1]);
+ code_push_statement(code, &stmt, instr->context);
+ stmt.o2.u1++;
+ stmt.o1.u1 = ir_value_code_addr(param->members[2]);
+ code_push_statement(code, &stmt, instr->context);
+ }
+ else
+ code_push_statement(code, &stmt, instr->context);
+ }
+
+ stmt.opcode = INSTR_CALL0 + vec_size(instr->params);
+ if (stmt.opcode > INSTR_CALL8)
+ stmt.opcode = INSTR_CALL8;
+ stmt.o1.u1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o2.u1 = 0;
+ stmt.o3.u1 = 0;
+ code_push_statement(code, &stmt, instr->context);
+
+ retvalue = instr->_ops[0];
+ if (retvalue && retvalue->store != store_return &&
+ (retvalue->store == store_global || vec_size(retvalue->life)))
+ {
+ /* not to be kept in OFS_RETURN */
+ if (retvalue->vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS))
+ stmt.opcode = field_store_instr[retvalue->fieldtype];
+ else
+ stmt.opcode = type_store_instr[retvalue->vtype];
+ stmt.o1.u1 = OFS_RETURN;
+ stmt.o2.u1 = ir_value_code_addr(retvalue);
+ stmt.o3.u1 = 0;
+ code_push_statement(code, &stmt, instr->context);
+ }
+ continue;
+ }
+
+ if (instr->opcode == INSTR_STATE) {
+ stmt.opcode = instr->opcode;
+ if (instr->_ops[0])
+ stmt.o1.u1 = ir_value_code_addr(instr->_ops[0]);
+ if (instr->_ops[1])
+ stmt.o2.u1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o3.u1 = 0;
+ code_push_statement(code, &stmt, instr->context);
+ continue;
+ }
+
+ stmt.opcode = instr->opcode;
+ stmt.o1.u1 = 0;
+ stmt.o2.u1 = 0;
+ stmt.o3.u1 = 0;
+
+ /* This is the general order of operands */
+ if (instr->_ops[0])
+ stmt.o3.u1 = ir_value_code_addr(instr->_ops[0]);
+
+ if (instr->_ops[1])
+ stmt.o1.u1 = ir_value_code_addr(instr->_ops[1]);
+
+ if (instr->_ops[2])
+ stmt.o2.u1 = ir_value_code_addr(instr->_ops[2]);
+
+ if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
+ {
+ stmt.o1.u1 = stmt.o3.u1;
+ stmt.o3.u1 = 0;
+ }
+ else if ((stmt.opcode >= INSTR_STORE_F &&
+ stmt.opcode <= INSTR_STORE_FNC) ||
+ (stmt.opcode >= INSTR_STOREP_F &&
+ stmt.opcode <= INSTR_STOREP_FNC))
+ {
+ /* 2-operand instructions with A -> B */
+ stmt.o2.u1 = stmt.o3.u1;
+ stmt.o3.u1 = 0;
+
+ /* tiny optimization, don't output
+ * STORE a, a
+ */
+ if (stmt.o2.u1 == stmt.o1.u1 &&
+ OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
+ {
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ continue;
+ }
+ }
+ code_push_statement(code, &stmt, instr->context);
+ }
+ return true;
+}
+
+static bool gen_function_code(code_t *code, ir_function *self)
+{
+ ir_block *block;
+ prog_section_statement_t stmt, *retst;
+
+ /* Starting from entry point, we generate blocks "as they come"
+ * for now. Dead blocks will not be translated obviously.
+ */
+ if (!vec_size(self->blocks)) {
+ irerror(self->context, "Function '%s' declared without body.", self->name);
+ return false;
+ }
+
+ block = self->blocks[0];
+ if (block->generated)
+ return true;
+
+ if (!gen_blocks_recursive(code, self, block)) {
+ irerror(self->context, "failed to generate blocks for '%s'", self->name);
+ return false;
+ }
+
+ /* code_write and qcvm -disasm need to know that the function ends here */
+ retst = &vec_last(code->statements);
+ if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) &&
+ self->outtype == TYPE_VOID &&
+ retst->opcode == INSTR_RETURN &&
+ !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1)
+ {
+ retst->opcode = INSTR_DONE;
+ ++opts_optimizationcount[OPTIM_VOID_RETURN];
+ } else {
+ lex_ctx_t last;
+
+ stmt.opcode = INSTR_DONE;
+ stmt.o1.u1 = 0;
+ stmt.o2.u1 = 0;
+ stmt.o3.u1 = 0;
+ last.line = vec_last(code->linenums);
+ last.column = vec_last(code->columnnums);
+
+ code_push_statement(code, &stmt, last);
+ }
+ return true;
+}
+
+static qcint_t ir_builder_filestring(ir_builder *ir, const char *filename)
+{
+ /* NOTE: filename pointers are copied, we never strdup them,
+ * thus we can use pointer-comparison to find the string.
+ */
+ size_t i;
+ qcint_t str;
+
+ for (i = 0; i < vec_size(ir->filenames); ++i) {
+ if (ir->filenames[i] == filename)
+ return ir->filestrings[i];
+ }
+
+ str = code_genstring(ir->code, filename);
+ vec_push(ir->filenames, filename);
+ vec_push(ir->filestrings, str);
+ return str;
+}
+
+static bool gen_global_function(ir_builder *ir, ir_value *global)
+{
+ prog_section_function_t fun;
+ ir_function *irfun;
+
+ size_t i;
+
+ if (!global->hasvalue || (!global->constval.vfunc))
+ {
+ irerror(global->context, "Invalid state of function-global: not constant: %s", global->name);
+ return false;
+ }
+
+ irfun = global->constval.vfunc;
+
+ fun.name = global->code.name;
+ fun.file = ir_builder_filestring(ir, global->context.file);
+ fun.profile = 0; /* always 0 */
+ fun.nargs = vec_size(irfun->params);
+ if (fun.nargs > 8)
+ fun.nargs = 8;
+
+ for (i = 0;i < 8; ++i) {
+ if ((int32_t)i >= fun.nargs)
+ fun.argsize[i] = 0;
+ else
+ fun.argsize[i] = type_sizeof_[irfun->params[i]];
+ }
+
+ fun.firstlocal = 0;
+ fun.locals = irfun->allocated_locals;
+
+ if (irfun->builtin)
+ fun.entry = irfun->builtin+1;
+ else {
+ irfun->code_function_def = vec_size(ir->code->functions);
+ fun.entry = vec_size(ir->code->statements);
+ }
+
+ vec_push(ir->code->functions, fun);
+ return true;
+}
+
+static ir_value* ir_gen_extparam_proto(ir_builder *ir)
+{
+ ir_value *global;
+ char name[128];
+
+ util_snprintf(name, sizeof(name), "EXTPARM#%i", (int)(vec_size(ir->extparam_protos)));
+ global = ir_value_var(name, store_global, TYPE_VECTOR);
+
+ vec_push(ir->extparam_protos, global);
+ return global;
+}
+
+static void ir_gen_extparam(ir_builder *ir)
+{
+ prog_section_def_t def;
+ ir_value *global;
+
+ if (vec_size(ir->extparam_protos) < vec_size(ir->extparams)+1)
+ global = ir_gen_extparam_proto(ir);
+ else
+ global = ir->extparam_protos[vec_size(ir->extparams)];
+
+ def.name = code_genstring(ir->code, global->name);
+ def.type = TYPE_VECTOR;
+ def.offset = vec_size(ir->code->globals);
+
+ vec_push(ir->code->defs, def);
+
+ ir_value_code_setaddr(global, def.offset);
+
+ vec_push(ir->code->globals, 0);
+ vec_push(ir->code->globals, 0);
+ vec_push(ir->code->globals, 0);
+
+ vec_push(ir->extparams, global);
+}
+
+static bool gen_function_extparam_copy(code_t *code, ir_function *self)
+{
+ size_t i, ext, numparams;
+
+ ir_builder *ir = self->owner;
+ ir_value *ep;
+ prog_section_statement_t stmt;
+
+ numparams = vec_size(self->params);
+ if (!numparams)
+ return true;
+
+ stmt.opcode = INSTR_STORE_F;
+ stmt.o3.s1 = 0;
+ for (i = 8; i < numparams; ++i) {
+ ext = i - 8;
+ if (ext >= vec_size(ir->extparams))
+ ir_gen_extparam(ir);
+
+ ep = ir->extparams[ext];
+
+ stmt.opcode = type_store_instr[self->locals[i]->vtype];
+ if (self->locals[i]->vtype == TYPE_FIELD &&
+ self->locals[i]->fieldtype == TYPE_VECTOR)
+ {
+ stmt.opcode = INSTR_STORE_V;
+ }
+ stmt.o1.u1 = ir_value_code_addr(ep);
+ stmt.o2.u1 = ir_value_code_addr(self->locals[i]);
+ code_push_statement(code, &stmt, self->context);
+ }
+
+ return true;
+}
+
+static bool gen_function_varargs_copy(code_t *code, ir_function *self)
+{
+ size_t i, ext, numparams, maxparams;
+
+ ir_builder *ir = self->owner;
+ ir_value *ep;
+ prog_section_statement_t stmt;
+
+ numparams = vec_size(self->params);
+ if (!numparams)
+ return true;
+
+ stmt.opcode = INSTR_STORE_V;
+ stmt.o3.s1 = 0;
+ maxparams = numparams + self->max_varargs;
+ for (i = numparams; i < maxparams; ++i) {
+ if (i < 8) {
+ stmt.o1.u1 = OFS_PARM0 + 3*i;
+ stmt.o2.u1 = ir_value_code_addr(self->locals[i]);
+ code_push_statement(code, &stmt, self->context);
+ continue;
+ }
+ ext = i - 8;
+ while (ext >= vec_size(ir->extparams))
+ ir_gen_extparam(ir);
+
+ ep = ir->extparams[ext];
+
+ stmt.o1.u1 = ir_value_code_addr(ep);
+ stmt.o2.u1 = ir_value_code_addr(self->locals[i]);
+ code_push_statement(code, &stmt, self->context);
+ }
+
+ return true;
+}
+
+static bool gen_function_locals(ir_builder *ir, ir_value *global)
+{
+ prog_section_function_t *def;
+ ir_function *irfun;
+ size_t i;
+ uint32_t firstlocal, firstglobal;
+
+ irfun = global->constval.vfunc;
+ def = ir->code->functions + irfun->code_function_def;
+
+ if (OPTS_OPTION_BOOL(OPTION_G) ||
+ !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS) ||
+ (irfun->flags & IR_FLAG_MASK_NO_OVERLAP))
+ {
+ firstlocal = def->firstlocal = vec_size(ir->code->globals);
+ } else {
+ firstlocal = def->firstlocal = ir->first_common_local;
+ ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
+ }
+
+ firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? ir->first_common_globaltemp : firstlocal);
+
+ for (i = vec_size(ir->code->globals); i < firstlocal + irfun->allocated_locals; ++i)
+ vec_push(ir->code->globals, 0);
+ for (i = 0; i < vec_size(irfun->locals); ++i) {
+ ir_value *v = irfun->locals[i];
+ if (v->locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
+ ir_value_code_setaddr(v, firstlocal + v->code.local);
+ if (!ir_builder_gen_global(ir, irfun->locals[i], true)) {
+ irerror(irfun->locals[i]->context, "failed to generate local %s", irfun->locals[i]->name);
+ return false;
+ }
+ }
+ else
+ ir_value_code_setaddr(v, firstglobal + v->code.local);
+ }
+ for (i = 0; i < vec_size(irfun->values); ++i)
+ {
+ ir_value *v = irfun->values[i];
+ if (v->callparam)
+ continue;
+ if (v->locked)
+ ir_value_code_setaddr(v, firstlocal + v->code.local);
+ else
+ ir_value_code_setaddr(v, firstglobal + v->code.local);
+ }
+ return true;
+}
+
+static bool gen_global_function_code(ir_builder *ir, ir_value *global)
+{
+ prog_section_function_t *fundef;
+ ir_function *irfun;
+
+ (void)ir;
+
+ irfun = global->constval.vfunc;
+ if (!irfun) {
+ if (global->cvq == CV_NONE) {
+ if (irwarning(global->context, WARN_IMPLICIT_FUNCTION_POINTER,
+ "function `%s` has no body and in QC implicitly becomes a function-pointer",
+ global->name))
+ {
+ /* Not bailing out just now. If this happens a lot you don't want to have
+ * to rerun gmqcc for each such function.
+ */
+
+ /* return false; */
+ }
+ }
+ /* this was a function pointer, don't generate code for those */
+ return true;
+ }
+
+ if (irfun->builtin)
+ return true;
+
+ /*
+ * If there is no definition and the thing is eraseable, we can ignore
+ * outputting the function to begin with.
+ */
+ if (global->flags & IR_FLAG_ERASABLE && irfun->code_function_def < 0) {
+ return true;
+ }
+
+ if (irfun->code_function_def < 0) {
+ irerror(irfun->context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->name);
+ return false;
+ }
+ fundef = &ir->code->functions[irfun->code_function_def];
+
+ fundef->entry = vec_size(ir->code->statements);
+ if (!gen_function_locals(ir, global)) {
+ irerror(irfun->context, "Failed to generate locals for function %s", irfun->name);
+ return false;
+ }
+ if (!gen_function_extparam_copy(ir->code, irfun)) {
+ irerror(irfun->context, "Failed to generate extparam-copy code for function %s", irfun->name);
+ return false;
+ }
+ if (irfun->max_varargs && !gen_function_varargs_copy(ir->code, irfun)) {
+ irerror(irfun->context, "Failed to generate vararg-copy code for function %s", irfun->name);
+ return false;
+ }
+ if (!gen_function_code(ir->code, irfun)) {
+ irerror(irfun->context, "Failed to generate code for function %s", irfun->name);
+ return false;
+ }
+ return true;
+}
+
+static void gen_vector_defs(code_t *code, prog_section_def_t def, const char *name)
+{
+ char *component;
+ size_t len, i;
+
+ if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS))
+ return;
+
+ def.type = TYPE_FLOAT;
+
+ len = strlen(name);
+
+ component = (char*)mem_a(len+3);
+ memcpy(component, name, len);
+ len += 2;
+ component[len-0] = 0;
+ component[len-2] = '_';
+
+ component[len-1] = 'x';
+
+ for (i = 0; i < 3; ++i) {
+ def.name = code_genstring(code, component);
+ vec_push(code->defs, def);
+ def.offset++;
+ component[len-1]++;
+ }
+
+ mem_d(component);
+}
+
+static void gen_vector_fields(code_t *code, prog_section_field_t fld, const char *name)
+{
+ char *component;
+ size_t len, i;
+
+ if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
+ return;
+
+ fld.type = TYPE_FLOAT;
+
+ len = strlen(name);
+
+ component = (char*)mem_a(len+3);
+ memcpy(component, name, len);
+ len += 2;
+ component[len-0] = 0;
+ component[len-2] = '_';
+
+ component[len-1] = 'x';
+
+ for (i = 0; i < 3; ++i) {
+ fld.name = code_genstring(code, component);
+ vec_push(code->fields, fld);
+ fld.offset++;
+ component[len-1]++;
+ }
+
+ mem_d(component);
+}
+
+static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool islocal)
+{
+ size_t i;
+ int32_t *iptr;
+ prog_section_def_t def;
+ bool pushdef = opts.optimizeoff;
+
+ /* we don't generate split-vectors */
+ if (global->vtype == TYPE_VECTOR && (global->flags & IR_FLAG_SPLIT_VECTOR))
+ return true;
+
+ def.type = global->vtype;
+ def.offset = vec_size(self->code->globals);
+ def.name = 0;
+ if (OPTS_OPTION_BOOL(OPTION_G) || !islocal)
+ {
+ pushdef = true;
+
+ /*
+ * if we're eraseable and the function isn't referenced ignore outputting
+ * the function.
+ */
+ if (global->flags & IR_FLAG_ERASABLE && vec_size(global->reads) == 0) {
+ return true;
+ }
+
+ if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) &&
+ !(global->flags & IR_FLAG_INCLUDE_DEF) &&
+ (global->name[0] == '#' || global->cvq == CV_CONST))
+ {
+ pushdef = false;
+ }
+
+ if (pushdef) {
+ if (global->name[0] == '#') {
+ if (!self->str_immediate)
+ self->str_immediate = code_genstring(self->code, "IMMEDIATE");
+ def.name = global->code.name = self->str_immediate;
+ }
+ else
+ def.name = global->code.name = code_genstring(self->code, global->name);
+ }
+ else
+ def.name = 0;
+ if (islocal) {
+ def.offset = ir_value_code_addr(global);
+ vec_push(self->code->defs, def);
+ if (global->vtype == TYPE_VECTOR)
+ gen_vector_defs(self->code, def, global->name);
+ else if (global->vtype == TYPE_FIELD && global->fieldtype == TYPE_VECTOR)
+ gen_vector_defs(self->code, def, global->name);
+ return true;
+ }
+ }
+ if (islocal)
+ return true;
+
+ switch (global->vtype)
+ {
+ case TYPE_VOID:
+ if (!strcmp(global->name, "end_sys_globals")) {
+ /* TODO: remember this point... all the defs before this one
+ * should be checksummed and added to progdefs.h when we generate it.
+ */
+ }
+ else if (!strcmp(global->name, "end_sys_fields")) {
+ /* TODO: same as above but for entity-fields rather than globsl
+ */
+ }
+ else if(irwarning(global->context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`",
+ global->name))
+ {
+ /* Not bailing out */
+ /* return false; */
+ }
+ /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far
+ * the system fields actually go? Though the engine knows this anyway...
+ * Maybe this could be an -foption
+ * fteqcc creates data for end_sys_* - of size 1, so let's do the same
+ */
+ ir_value_code_setaddr(global, vec_size(self->code->globals));
+ vec_push(self->code->globals, 0);
+ /* Add the def */
+ if (pushdef) vec_push(self->code->defs, def);
+ return true;
+ case TYPE_POINTER:
+ if (pushdef) vec_push(self->code->defs, def);
+ return gen_global_pointer(self->code, global);
+ case TYPE_FIELD:
+ if (pushdef) {
+ vec_push(self->code->defs, def);
+ if (global->fieldtype == TYPE_VECTOR)
+ gen_vector_defs(self->code, def, global->name);
+ }
+ return gen_global_field(self->code, global);
+ case TYPE_ENTITY:
+ /* fall through */
+ case TYPE_FLOAT:
+ {
+ ir_value_code_setaddr(global, vec_size(self->code->globals));
+ if (global->hasvalue) {
+ if (global->cvq == CV_CONST && !vec_size(global->reads))
+ return true;
+ iptr = (int32_t*)&global->constval.ivec[0];
+ vec_push(self->code->globals, *iptr);
+ } else {
+ vec_push(self->code->globals, 0);
+ }
+ if (!islocal && global->cvq != CV_CONST)
+ def.type |= DEF_SAVEGLOBAL;
+ if (pushdef) vec_push(self->code->defs, def);
+
+ return global->code.globaladdr >= 0;
+ }
+ case TYPE_STRING:
+ {
+ ir_value_code_setaddr(global, vec_size(self->code->globals));
+ if (global->hasvalue) {
+ uint32_t load;
+ if (global->cvq == CV_CONST && !vec_size(global->reads))
+ return true;
+ load = code_genstring(self->code, global->constval.vstring);
+ vec_push(self->code->globals, load);
+ } else {
+ vec_push(self->code->globals, 0);
+ }
+ if (!islocal && global->cvq != CV_CONST)
+ def.type |= DEF_SAVEGLOBAL;
+ if (pushdef) vec_push(self->code->defs, def);
+ return global->code.globaladdr >= 0;
+ }
+ case TYPE_VECTOR:
+ {
+ size_t d;
+ ir_value_code_setaddr(global, vec_size(self->code->globals));
+ if (global->hasvalue) {
+ iptr = (int32_t*)&global->constval.ivec[0];
+ vec_push(self->code->globals, iptr[0]);
+ if (global->code.globaladdr < 0)
+ return false;
+ for (d = 1; d < type_sizeof_[global->vtype]; ++d) {
+ vec_push(self->code->globals, iptr[d]);
+ }
+ } else {
+ vec_push(self->code->globals, 0);
+ if (global->code.globaladdr < 0)
+ return false;
+ for (d = 1; d < type_sizeof_[global->vtype]; ++d) {
+ vec_push(self->code->globals, 0);
+ }
+ }
+ if (!islocal && global->cvq != CV_CONST)
+ def.type |= DEF_SAVEGLOBAL;
+
+ if (pushdef) {
+ vec_push(self->code->defs, def);
+ def.type &= ~DEF_SAVEGLOBAL;
+ gen_vector_defs(self->code, def, global->name);
+ }
+ return global->code.globaladdr >= 0;
+ }
+ case TYPE_FUNCTION:
+ ir_value_code_setaddr(global, vec_size(self->code->globals));
+ if (!global->hasvalue) {
+ vec_push(self->code->globals, 0);
+ if (global->code.globaladdr < 0)
+ return false;
+ } else {
+ vec_push(self->code->globals, vec_size(self->code->functions));
+ if (!gen_global_function(self, global))
+ return false;
+ }
+ if (!islocal && global->cvq != CV_CONST)
+ def.type |= DEF_SAVEGLOBAL;
+ if (pushdef) vec_push(self->code->defs, def);
+ return true;
+ case TYPE_VARIANT:
+ /* assume biggest type */
+ ir_value_code_setaddr(global, vec_size(self->code->globals));
+ vec_push(self->code->globals, 0);
+ for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i)
+ vec_push(self->code->globals, 0);
+ return true;
+ default:
+ /* refuse to create 'void' type or any other fancy business. */
+ irerror(global->context, "Invalid type for global variable `%s`: %s",
+ global->name, type_name[global->vtype]);
+ return false;
+ }
+}
+
+static GMQCC_INLINE void ir_builder_prepare_field(code_t *code, ir_value *field)
+{
+ field->code.fieldaddr = code_alloc_field(code, type_sizeof_[field->fieldtype]);
+}
+
+static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
+{
+ prog_section_def_t def;
+ prog_section_field_t fld;
+
+ (void)self;
+
+ def.type = (uint16_t)field->vtype;
+ def.offset = (uint16_t)vec_size(self->code->globals);
+
+ /* create a global named the same as the field */
+ if (OPTS_OPTION_U32(OPTION_STANDARD) == COMPILER_GMQCC) {
+ /* in our standard, the global gets a dot prefix */
+ size_t len = strlen(field->name);
+ char name[1024];
+
+ /* we really don't want to have to allocate this, and 1024
+ * bytes is more than enough for a variable/field name
+ */
+ if (len+2 >= sizeof(name)) {
+ irerror(field->context, "invalid field name size: %u", (unsigned int)len);
+ return false;
+ }
+
+ name[0] = '.';
+ memcpy(name+1, field->name, len); /* no strncpy - we used strlen above */
+ name[len+1] = 0;
+
+ def.name = code_genstring(self->code, name);
+ fld.name = def.name + 1; /* we reuse that string table entry */
+ } else {
+ /* in plain QC, there cannot be a global with the same name,
+ * and so we also name the global the same.
+ * FIXME: fteqcc should create a global as well
+ * check if it actually uses the same name. Probably does
+ */
+ def.name = code_genstring(self->code, field->name);
+ fld.name = def.name;
+ }
+
+ field->code.name = def.name;
+
+ vec_push(self->code->defs, def);
+
+ fld.type = field->fieldtype;
+
+ if (fld.type == TYPE_VOID) {
+ irerror(field->context, "field is missing a type: %s - don't know its size", field->name);
+ return false;
+ }
+
+ fld.offset = field->code.fieldaddr;
+
+ vec_push(self->code->fields, fld);
+
+ ir_value_code_setaddr(field, vec_size(self->code->globals));
+ vec_push(self->code->globals, fld.offset);
+ if (fld.type == TYPE_VECTOR) {
+ vec_push(self->code->globals, fld.offset+1);
+ vec_push(self->code->globals, fld.offset+2);
+ }
+
+ if (field->fieldtype == TYPE_VECTOR) {
+ gen_vector_defs (self->code, def, field->name);
+ gen_vector_fields(self->code, fld, field->name);
+ }
+
+ return field->code.globaladdr >= 0;
+}
+
+static void ir_builder_collect_reusables(ir_builder *builder) {
+ size_t i;
+ ir_value **reusables = NULL;
+ for (i = 0; i < vec_size(builder->globals); ++i) {
+ ir_value *value = builder->globals[i];
+ if (value->vtype != TYPE_FLOAT || !value->hasvalue)
+ continue;
+ if (value->cvq == CV_CONST || (value->name && value->name[0] == '#')) {
+ vec_push(reusables, value);
+ }
+ }
+ builder->const_floats = reusables;
+}
+
+static void ir_builder_split_vector(ir_builder *self, ir_value *vec) {
+ size_t i, count;
+ ir_value* found[3] = { NULL, NULL, NULL };
+
+ /* must not be written to */
+ if (vec_size(vec->writes))
+ return;
+ /* must not be trying to access individual members */
+ if (vec->members[0] || vec->members[1] || vec->members[2])
+ return;
+ /* should be actually used otherwise it won't be generated anyway */
+ count = vec_size(vec->reads);
+ if (!count)
+ return;
+
+ /* may only be used directly as function parameters, so if we find some other instruction cancel */
+ for (i = 0; i != count; ++i) {
+ /* we only split vectors if they're used directly as parameter to a call only! */
+ ir_instr *user = vec->reads[i];
+ if ((user->opcode < INSTR_CALL0 || user->opcode > INSTR_CALL8) && user->opcode != VINSTR_NRCALL)
+ return;
+ }
+
+ vec->flags |= IR_FLAG_SPLIT_VECTOR;
+
+ /* find existing floats making up the split */
+ count = vec_size(self->const_floats);
+ for (i = 0; i != count; ++i) {
+ ir_value *c = self->const_floats[i];
+ if (!found[0] && c->constval.vfloat == vec->constval.vvec.x)
+ found[0] = c;
+ if (!found[1] && c->constval.vfloat == vec->constval.vvec.y)
+ found[1] = c;
+ if (!found[2] && c->constval.vfloat == vec->constval.vvec.z)
+ found[2] = c;
+ if (found[0] && found[1] && found[2])
+ break;
+ }
+
+ /* generate floats for not yet found components */
+ if (!found[0])
+ found[0] = ir_builder_imm_float(self, vec->constval.vvec.x, true);
+ if (!found[1]) {
+ if (vec->constval.vvec.y == vec->constval.vvec.x)
+ found[1] = found[0];
+ else
+ found[1] = ir_builder_imm_float(self, vec->constval.vvec.y, true);
+ }
+ if (!found[2]) {
+ if (vec->constval.vvec.z == vec->constval.vvec.x)
+ found[2] = found[0];
+ else if (vec->constval.vvec.z == vec->constval.vvec.y)
+ found[2] = found[1];
+ else
+ found[2] = ir_builder_imm_float(self, vec->constval.vvec.z, true);
+ }
+
+ /* the .members array should be safe to use here. */
+ vec->members[0] = found[0];
+ vec->members[1] = found[1];
+ vec->members[2] = found[2];
+
+ /* register the readers for these floats */
+ count = vec_size(vec->reads);
+ for (i = 0; i != count; ++i) {
+ vec_push(found[0]->reads, vec->reads[i]);
+ vec_push(found[1]->reads, vec->reads[i]);
+ vec_push(found[2]->reads, vec->reads[i]);
+ }
+}
+
+static void ir_builder_split_vectors(ir_builder *self) {
+ size_t i, count = vec_size(self->globals);
+ for (i = 0; i != count; ++i) {
+ ir_value *v = self->globals[i];
+ if (v->vtype != TYPE_VECTOR || !v->name || v->name[0] != '#')
+ continue;
+ ir_builder_split_vector(self, self->globals[i]);
+ }
+}
+
+bool ir_builder_generate(ir_builder *self, const char *filename)
+{
+ prog_section_statement_t stmt;
+ size_t i;
+ char *lnofile = NULL;
+
+ if (OPTS_FLAG(SPLIT_VECTOR_PARAMETERS)) {
+ ir_builder_collect_reusables(self);
+ if (vec_size(self->const_floats) > 0)
+ ir_builder_split_vectors(self);
+ }
+
+ for (i = 0; i < vec_size(self->fields); ++i)
+ {
+ ir_builder_prepare_field(self->code, self->fields[i]);
+ }
+
+ for (i = 0; i < vec_size(self->globals); ++i)
+ {
+ if (!ir_builder_gen_global(self, self->globals[i], false)) {
+ return false;
+ }
+ if (self->globals[i]->vtype == TYPE_FUNCTION) {
+ ir_function *func = self->globals[i]->constval.vfunc;
+ if (func && self->max_locals < func->allocated_locals &&
+ !(func->flags & IR_FLAG_MASK_NO_OVERLAP))
+ {
+ self->max_locals = func->allocated_locals;
+ }
+ if (func && self->max_globaltemps < func->globaltemps)
+ self->max_globaltemps = func->globaltemps;
+ }
+ }
+
+ for (i = 0; i < vec_size(self->fields); ++i)
+ {
+ if (!ir_builder_gen_field(self, self->fields[i])) {
+ return false;
+ }
+ }
+
+ /* generate nil */
+ ir_value_code_setaddr(self->nil, vec_size(self->code->globals));
+ vec_push(self->code->globals, 0);
+ vec_push(self->code->globals, 0);
+ vec_push(self->code->globals, 0);
+
+ /* generate virtual-instruction temps */
+ for (i = 0; i < IR_MAX_VINSTR_TEMPS; ++i) {
+ ir_value_code_setaddr(self->vinstr_temp[i], vec_size(self->code->globals));
+ vec_push(self->code->globals, 0);
+ vec_push(self->code->globals, 0);
+ vec_push(self->code->globals, 0);
+ }
+
+ /* generate global temps */
+ self->first_common_globaltemp = vec_size(self->code->globals);
+ for (i = 0; i < self->max_globaltemps; ++i) {
+ vec_push(self->code->globals, 0);
+ }
+ /* generate common locals */
+ self->first_common_local = vec_size(self->code->globals);
+ for (i = 0; i < self->max_locals; ++i) {
+ vec_push(self->code->globals, 0);
+ }
+
+ /* generate function code */
+ for (i = 0; i < vec_size(self->globals); ++i)
+ {
+ if (self->globals[i]->vtype == TYPE_FUNCTION) {
+ if (!gen_global_function_code(self, self->globals[i])) {
+ return false;
+ }
+ }
+ }
+
+ if (vec_size(self->code->globals) >= 65536) {
+ irerror(vec_last(self->globals)->context, "This progs file would require more globals than the metadata can handle (%u). Bailing out.", (unsigned int)vec_size(self->code->globals));
+ return false;
+ }
+
+ /* DP errors if the last instruction is not an INSTR_DONE. */
+ if (vec_last(self->code->statements).opcode != INSTR_DONE)
+ {
+ lex_ctx_t last;
+
+ stmt.opcode = INSTR_DONE;
+ stmt.o1.u1 = 0;
+ stmt.o2.u1 = 0;
+ stmt.o3.u1 = 0;
+ last.line = vec_last(self->code->linenums);
+ last.column = vec_last(self->code->columnnums);
+
+ code_push_statement(self->code, &stmt, last);
+ }
+
+ if (OPTS_OPTION_BOOL(OPTION_PP_ONLY))
+ return true;
+
+ if (vec_size(self->code->statements) != vec_size(self->code->linenums)) {
+ con_err("Linecounter wrong: %lu != %lu\n",
+ (unsigned long)vec_size(self->code->statements),
+ (unsigned long)vec_size(self->code->linenums));
+ } else if (OPTS_FLAG(LNO)) {
+ char *dot;
+ size_t filelen = strlen(filename);
+
+ memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
+ dot = strrchr(lnofile, '.');
+ if (!dot) {
+ vec_pop(lnofile);
+ } else {
+ vec_shrinkto(lnofile, dot - lnofile);
+ }
+ memcpy(vec_add(lnofile, 5), ".lno", 5);
+ }
+
+ if (!code_write(self->code, filename, lnofile)) {
+ vec_free(lnofile);
+ return false;
+ }
+
+ vec_free(lnofile);
+ return true;