+}
+
+/***********************************************************************
+ *IR Code-Generation
+ *
+ * Since the IR has the convention of putting 'write' operands
+ * at the beginning, we have to rotate the operands of instructions
+ * properly in order to generate valid QCVM code.
+ *
+ * Having destinations at a fixed position is more convenient. In QC
+ * this is *mostly* OPC, but FTE adds at least 2 instructions which
+ * read from from OPA, and store to OPB rather than OPC. Which is
+ * partially the reason why the implementation of these instructions
+ * in darkplaces has been delayed for so long.
+ *
+ * Breaking conventions is annoying...
+ */
+static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool islocal);
+
+static bool gen_global_field(ir_value *global)
+{
+ if (global->hasvalue)
+ {
+ ir_value *fld = global->constval.vpointer;
+ if (!fld) {
+ irerror(global->context, "Invalid field constant with no field: %s", global->name);
+ return false;
+ }
+
+ /* copy the field's value */
+ ir_value_code_setaddr(global, vec_size(code_globals));
+ vec_push(code_globals, fld->code.fieldaddr);
+ if (global->fieldtype == TYPE_VECTOR) {
+ vec_push(code_globals, fld->code.fieldaddr+1);
+ vec_push(code_globals, fld->code.fieldaddr+2);
+ }
+ }
+ else
+ {
+ ir_value_code_setaddr(global, vec_size(code_globals));
+ vec_push(code_globals, 0);
+ if (global->fieldtype == TYPE_VECTOR) {
+ vec_push(code_globals, 0);
+ vec_push(code_globals, 0);
+ }
+ }
+ if (global->code.globaladdr < 0)
+ return false;
+ return true;
+}
+
+static bool gen_global_pointer(ir_value *global)
+{
+ if (global->hasvalue)
+ {
+ ir_value *target = global->constval.vpointer;
+ if (!target) {
+ irerror(global->context, "Invalid pointer constant: %s", global->name);
+ /* NULL pointers are pointing to the NULL constant, which also
+ * sits at address 0, but still has an ir_value for itself.
+ */
+ return false;
+ }
+
+ /* Here, relocations ARE possible - in fteqcc-enhanced-qc:
+ * void() foo; <- proto
+ * void() *fooptr = &foo;
+ * void() foo = { code }
+ */
+ if (!target->code.globaladdr) {
+ /* FIXME: Check for the constant nullptr ir_value!
+ * because then code.globaladdr being 0 is valid.
+ */
+ irerror(global->context, "FIXME: Relocation support");
+ return false;
+ }
+
+ ir_value_code_setaddr(global, vec_size(code_globals));
+ vec_push(code_globals, target->code.globaladdr);
+ }
+ else
+ {
+ ir_value_code_setaddr(global, vec_size(code_globals));
+ vec_push(code_globals, 0);
+ }
+ if (global->code.globaladdr < 0)
+ return false;
+ return true;
+}
+
+static bool gen_blocks_recursive(ir_function *func, ir_block *block)
+{
+ prog_section_statement stmt;
+ ir_instr *instr;
+ ir_block *target;
+ ir_block *ontrue;
+ ir_block *onfalse;
+ size_t stidx;
+ size_t i;
+
+tailcall:
+ block->generated = true;
+ block->code_start = vec_size(code_statements);
+ for (i = 0; i < vec_size(block->instr); ++i)
+ {
+ instr = block->instr[i];
+
+ if (instr->opcode == VINSTR_PHI) {
+ irerror(block->context, "cannot generate virtual instruction (phi)");
+ return false;
+ }
+
+ if (instr->opcode == VINSTR_JUMP) {
+ target = instr->bops[0];
+ /* for uncoditional jumps, if the target hasn't been generated
+ * yet, we generate them right here.
+ */
+ if (!target->generated) {
+ block = target;
+ goto tailcall;
+ }
+
+ /* otherwise we generate a jump instruction */
+ stmt.opcode = INSTR_GOTO;
+ stmt.o1.s1 = (target->code_start) - vec_size(code_statements);
+ stmt.o2.s1 = 0;
+ stmt.o3.s1 = 0;
+ code_push_statement(&stmt, instr->context.line);
+
+ /* no further instructions can be in this block */
+ return true;
+ }
+
+ if (instr->opcode == VINSTR_COND) {
+ ontrue = instr->bops[0];
+ onfalse = instr->bops[1];
+ /* TODO: have the AST signal which block should
+ * come first: eg. optimize IFs without ELSE...
+ */
+
+ stmt.o1.u1 = ir_value_code_addr(instr->_ops[0]);
+ stmt.o2.u1 = 0;
+ stmt.o3.s1 = 0;
+
+ if (ontrue->generated) {
+ stmt.opcode = INSTR_IF;
+ stmt.o2.s1 = (ontrue->code_start) - vec_size(code_statements);
+ code_push_statement(&stmt, instr->context.line);
+ }
+ if (onfalse->generated) {
+ stmt.opcode = INSTR_IFNOT;
+ stmt.o2.s1 = (onfalse->code_start) - vec_size(code_statements);
+ code_push_statement(&stmt, instr->context.line);
+ }
+ if (!ontrue->generated) {
+ if (onfalse->generated) {
+ block = ontrue;
+ goto tailcall;
+ }
+ }
+ if (!onfalse->generated) {
+ if (ontrue->generated) {
+ block = onfalse;
+ goto tailcall;
+ }
+ }
+ /* neither ontrue nor onfalse exist */
+ stmt.opcode = INSTR_IFNOT;
+ if (!instr->likely) {
+ /* Honor the likelyhood hint */
+ ir_block *tmp = onfalse;
+ stmt.opcode = INSTR_IF;
+ onfalse = ontrue;
+ ontrue = tmp;
+ }
+ stidx = vec_size(code_statements);
+ code_push_statement(&stmt, instr->context.line);
+ /* on false we jump, so add ontrue-path */
+ if (!gen_blocks_recursive(func, ontrue))
+ return false;
+ /* fixup the jump address */
+ code_statements[stidx].o2.s1 = vec_size(code_statements) - stidx;
+ /* generate onfalse path */
+ if (onfalse->generated) {
+ /* fixup the jump address */
+ code_statements[stidx].o2.s1 = (onfalse->code_start) - (stidx);
+ stmt.opcode = vec_last(code_statements).opcode;
+ if (stmt.opcode == INSTR_GOTO ||
+ stmt.opcode == INSTR_IF ||
+ stmt.opcode == INSTR_IFNOT ||
+ stmt.opcode == INSTR_RETURN ||
+ stmt.opcode == INSTR_DONE)
+ {
+ /* no use jumping from here */
+ return true;
+ }
+ /* may have been generated in the previous recursive call */
+ stmt.opcode = INSTR_GOTO;
+ stmt.o1.s1 = (onfalse->code_start) - vec_size(code_statements);
+ stmt.o2.s1 = 0;
+ stmt.o3.s1 = 0;
+ code_push_statement(&stmt, instr->context.line);
+ return true;
+ }
+ /* if not, generate now */
+ block = onfalse;
+ goto tailcall;
+ }
+
+ if (instr->opcode >= INSTR_CALL0 && instr->opcode <= INSTR_CALL8) {
+ /* Trivial call translation:
+ * copy all params to OFS_PARM*
+ * if the output's storetype is not store_return,
+ * add append a STORE instruction!
+ *
+ * NOTES on how to do it better without much trouble:
+ * -) The liferanges!
+ * Simply check the liferange of all parameters for
+ * other CALLs. For each param with no CALL in its
+ * liferange, we can store it in an OFS_PARM at
+ * generation already. This would even include later
+ * reuse.... probably... :)
+ */
+ size_t p, first;
+ ir_value *retvalue;
+
+ first = vec_size(instr->params);
+ if (first > 8)
+ first = 8;
+ for (p = 0; p < first; ++p)
+ {
+ ir_value *param = instr->params[p];
+
+ stmt.opcode = INSTR_STORE_F;
+ stmt.o3.u1 = 0;
+
+ if (param->vtype == TYPE_FIELD)
+ stmt.opcode = field_store_instr[param->fieldtype];
+ else
+ stmt.opcode = type_store_instr[param->vtype];
+ stmt.o1.u1 = ir_value_code_addr(param);
+ stmt.o2.u1 = OFS_PARM0 + 3 * p;
+ code_push_statement(&stmt, instr->context.line);
+ }
+ /* Now handle extparams */
+ first = vec_size(instr->params);
+ for (; p < first; ++p)
+ {
+ ir_builder *ir = func->owner;
+ ir_value *param = instr->params[p];
+ ir_value *targetparam;
+
+ if (p-8 >= vec_size(ir->extparams)) {
+ irerror(instr->context, "Not enough extparam-globals have been created");
+ return false;
+ }
+
+ targetparam = ir->extparams[p-8];
+
+ stmt.opcode = INSTR_STORE_F;
+ stmt.o3.u1 = 0;
+
+ if (param->vtype == TYPE_FIELD)
+ stmt.opcode = field_store_instr[param->fieldtype];
+ else
+ stmt.opcode = type_store_instr[param->vtype];
+ stmt.o1.u1 = ir_value_code_addr(param);
+ stmt.o2.u1 = ir_value_code_addr(targetparam);
+ code_push_statement(&stmt, instr->context.line);
+ }
+
+ stmt.opcode = INSTR_CALL0 + vec_size(instr->params);
+ if (stmt.opcode > INSTR_CALL8)
+ stmt.opcode = INSTR_CALL8;
+ stmt.o1.u1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o2.u1 = 0;
+ stmt.o3.u1 = 0;
+ code_push_statement(&stmt, instr->context.line);
+
+ retvalue = instr->_ops[0];
+ if (retvalue && retvalue->store != store_return && vec_size(retvalue->life))
+ {
+ /* not to be kept in OFS_RETURN */
+ if (retvalue->vtype == TYPE_FIELD)
+ stmt.opcode = field_store_instr[retvalue->vtype];
+ else
+ stmt.opcode = type_store_instr[retvalue->vtype];
+ stmt.o1.u1 = OFS_RETURN;
+ stmt.o2.u1 = ir_value_code_addr(retvalue);
+ stmt.o3.u1 = 0;
+ code_push_statement(&stmt, instr->context.line);
+ }
+ continue;
+ }
+
+ if (instr->opcode == INSTR_STATE) {
+ irerror(block->context, "TODO: state instruction");
+ return false;
+ }
+
+ stmt.opcode = instr->opcode;
+ stmt.o1.u1 = 0;
+ stmt.o2.u1 = 0;
+ stmt.o3.u1 = 0;
+
+ /* This is the general order of operands */
+ if (instr->_ops[0])
+ stmt.o3.u1 = ir_value_code_addr(instr->_ops[0]);
+
+ if (instr->_ops[1])
+ stmt.o1.u1 = ir_value_code_addr(instr->_ops[1]);
+
+ if (instr->_ops[2])
+ stmt.o2.u1 = ir_value_code_addr(instr->_ops[2]);
+
+ if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
+ {
+ stmt.o1.u1 = stmt.o3.u1;
+ stmt.o3.u1 = 0;
+ }
+ else if ((stmt.opcode >= INSTR_STORE_F &&
+ stmt.opcode <= INSTR_STORE_FNC) ||
+ (stmt.opcode >= INSTR_STOREP_F &&
+ stmt.opcode <= INSTR_STOREP_FNC))
+ {
+ /* 2-operand instructions with A -> B */
+ stmt.o2.u1 = stmt.o3.u1;
+ stmt.o3.u1 = 0;
+ }
+
+ code_push_statement(&stmt, instr->context.line);
+ }
+ return true;
+}
+
+static bool gen_function_code(ir_function *self)
+{
+ ir_block *block;
+ prog_section_statement stmt;
+
+ /* Starting from entry point, we generate blocks "as they come"
+ * for now. Dead blocks will not be translated obviously.
+ */
+ if (!vec_size(self->blocks)) {
+ irerror(self->context, "Function '%s' declared without body.", self->name);
+ return false;
+ }
+
+ block = self->blocks[0];
+ if (block->generated)
+ return true;
+
+ if (!gen_blocks_recursive(self, block)) {
+ irerror(self->context, "failed to generate blocks for '%s'", self->name);
+ return false;
+ }
+
+ /* otherwise code_write crashes since it debug-prints functions until AINSTR_END */
+ stmt.opcode = AINSTR_END;
+ stmt.o1.u1 = 0;
+ stmt.o2.u1 = 0;
+ stmt.o3.u1 = 0;
+ code_push_statement(&stmt, vec_last(code_linenums));
+ return true;
+}
+
+static qcint ir_builder_filestring(ir_builder *ir, const char *filename)
+{
+ /* NOTE: filename pointers are copied, we never strdup them,
+ * thus we can use pointer-comparison to find the string.
+ */
+ size_t i;
+ qcint str;
+
+ for (i = 0; i < vec_size(ir->filenames); ++i) {
+ if (ir->filenames[i] == filename)
+ return ir->filestrings[i];
+ }
+
+ str = code_genstring(filename);
+ vec_push(ir->filenames, filename);
+ vec_push(ir->filestrings, str);
+ return str;
+}
+
+static bool gen_global_function(ir_builder *ir, ir_value *global)
+{
+ prog_section_function fun;
+ ir_function *irfun;
+
+ size_t i;
+ size_t local_var_end;
+
+ if (!global->hasvalue || (!global->constval.vfunc))
+ {
+ irerror(global->context, "Invalid state of function-global: not constant: %s", global->name);
+ return false;
+ }
+
+ irfun = global->constval.vfunc;
+
+ fun.name = global->code.name;
+ fun.file = ir_builder_filestring(ir, global->context.file);
+ fun.profile = 0; /* always 0 */
+ fun.nargs = vec_size(irfun->params);
+ if (fun.nargs > 8)
+ fun.nargs = 8;
+
+ for (i = 0;i < 8; ++i) {
+ if ((int32_t)i >= fun.nargs)
+ fun.argsize[i] = 0;
+ else
+ fun.argsize[i] = type_sizeof[irfun->params[i]];
+ }
+
+ fun.firstlocal = vec_size(code_globals);
+
+ local_var_end = fun.firstlocal;
+ for (i = 0; i < vec_size(irfun->locals); ++i) {
+ if (!ir_builder_gen_global(ir, irfun->locals[i], true)) {
+ irerror(irfun->locals[i]->context, "Failed to generate local %s", irfun->locals[i]->name);
+ return false;
+ }
+ }
+ if (vec_size(irfun->locals)) {
+ ir_value *last = vec_last(irfun->locals);
+ local_var_end = last->code.globaladdr;
+ local_var_end += type_sizeof[last->vtype];
+ }
+ for (i = 0; i < vec_size(irfun->values); ++i)
+ {
+ /* generate code.globaladdr for ssa values */
+ ir_value *v = irfun->values[i];
+ ir_value_code_setaddr(v, local_var_end + v->code.local);
+ }
+ for (i = 0; i < irfun->allocated_locals; ++i) {
+ /* fill the locals with zeros */
+ vec_push(code_globals, 0);
+ }
+
+ fun.locals = vec_size(code_globals) - fun.firstlocal;
+
+ if (irfun->builtin)
+ fun.entry = irfun->builtin;
+ else {
+ irfun->code_function_def = vec_size(code_functions);
+ fun.entry = vec_size(code_statements);
+ }
+
+ vec_push(code_functions, fun);
+ return true;
+}
+
+static void ir_gen_extparam(ir_builder *ir)
+{
+ prog_section_def def;
+ ir_value *global;
+ char name[128];
+
+ snprintf(name, sizeof(name), "EXTPARM#%i", (int)(vec_size(ir->extparams)+8));
+ global = ir_value_var(name, store_global, TYPE_VECTOR);
+
+ def.name = code_genstring(name);
+ def.type = TYPE_VECTOR;
+ def.offset = vec_size(code_globals);
+
+ vec_push(code_defs, def);
+ ir_value_code_setaddr(global, def.offset);
+ vec_push(code_globals, 0);
+ vec_push(code_globals, 0);
+ vec_push(code_globals, 0);
+
+ vec_push(ir->extparams, global);
+}
+
+static bool gen_function_extparam_copy(ir_function *self)
+{
+ size_t i, ext, numparams;
+
+ ir_builder *ir = self->owner;
+ ir_value *ep;
+ prog_section_statement stmt;
+
+ numparams = vec_size(self->params);
+ if (!numparams)
+ return true;
+
+ stmt.opcode = INSTR_STORE_F;
+ stmt.o3.s1 = 0;
+ for (i = 8; i < numparams; ++i) {
+ ext = i - 8;
+ if (ext >= vec_size(ir->extparams))
+ ir_gen_extparam(ir);
+
+ ep = ir->extparams[ext];
+
+ stmt.opcode = type_store_instr[self->locals[i]->vtype];
+ if (self->locals[i]->vtype == TYPE_FIELD &&
+ self->locals[i]->fieldtype == TYPE_VECTOR)
+ {
+ stmt.opcode = INSTR_STORE_V;
+ }
+ stmt.o1.u1 = ir_value_code_addr(ep);
+ stmt.o2.u1 = ir_value_code_addr(self->locals[i]);
+ code_push_statement(&stmt, self->context.line);
+ }
+
+ return true;
+}
+
+static bool gen_global_function_code(ir_builder *ir, ir_value *global)
+{
+ prog_section_function *fundef;
+ ir_function *irfun;
+
+ (void)ir;
+
+ irfun = global->constval.vfunc;
+ if (!irfun) {
+ if (global->cvq == CV_NONE) {
+ irwarning(global->context, WARN_IMPLICIT_FUNCTION_POINTER,
+ "function `%s` has no body and in QC implicitly becomes a function-pointer", global->name);
+ }
+ /* this was a function pointer, don't generate code for those */
+ return true;
+ }
+
+ if (irfun->builtin)
+ return true;
+
+ if (irfun->code_function_def < 0) {
+ irerror(irfun->context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->name);
+ return false;
+ }
+ fundef = &code_functions[irfun->code_function_def];
+
+ fundef->entry = vec_size(code_statements);
+ if (!gen_function_extparam_copy(irfun)) {
+ irerror(irfun->context, "Failed to generate extparam-copy code for function %s", irfun->name);
+ return false;
+ }
+ if (!gen_function_code(irfun)) {
+ irerror(irfun->context, "Failed to generate code for function %s", irfun->name);
+ return false;
+ }
+ return true;
+}
+
+static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool islocal)
+{
+ size_t i;
+ int32_t *iptr;
+ prog_section_def def;
+
+ def.type = global->vtype;
+ def.offset = vec_size(code_globals);
+
+ if (global->name) {
+ if (global->name[0] == '#') {
+ if (!self->str_immediate)
+ self->str_immediate = code_genstring("IMMEDIATE");
+ def.name = global->code.name = self->str_immediate;
+ }
+ else
+ def.name = global->code.name = code_genstring(global->name);
+ }
+ else
+ def.name = 0;
+
+ switch (global->vtype)
+ {
+ case TYPE_VOID:
+ if (!strcmp(global->name, "end_sys_globals")) {
+ /* TODO: remember this point... all the defs before this one
+ * should be checksummed and added to progdefs.h when we generate it.
+ */
+ }
+ else if (!strcmp(global->name, "end_sys_fields")) {
+ /* TODO: same as above but for entity-fields rather than globsl
+ */
+ }
+ else
+ irwarning(global->context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`",
+ global->name);
+ /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far
+ * the system fields actually go? Though the engine knows this anyway...
+ * Maybe this could be an -foption
+ * fteqcc creates data for end_sys_* - of size 1, so let's do the same
+ */
+ ir_value_code_setaddr(global, vec_size(code_globals));
+ vec_push(code_globals, 0);
+ /* Add the def */
+ vec_push(code_defs, def);
+ return true;
+ case TYPE_POINTER:
+ vec_push(code_defs, def);
+ return gen_global_pointer(global);
+ case TYPE_FIELD:
+ vec_push(code_defs, def);
+ return gen_global_field(global);
+ case TYPE_ENTITY:
+ /* fall through */
+ case TYPE_FLOAT:
+ {
+ ir_value_code_setaddr(global, vec_size(code_globals));
+ if (global->hasvalue) {
+ iptr = (int32_t*)&global->constval.ivec[0];
+ vec_push(code_globals, *iptr);
+ } else {
+ vec_push(code_globals, 0);
+ if (!islocal)
+ def.type |= DEF_SAVEGLOBAL;
+ }
+ vec_push(code_defs, def);
+
+ return global->code.globaladdr >= 0;
+ }
+ case TYPE_STRING:
+ {
+ ir_value_code_setaddr(global, vec_size(code_globals));
+ if (global->hasvalue) {
+ vec_push(code_globals, code_genstring(global->constval.vstring));
+ } else {
+ vec_push(code_globals, 0);
+ if (!islocal)
+ def.type |= DEF_SAVEGLOBAL;
+ }
+ vec_push(code_defs, def);
+ return global->code.globaladdr >= 0;
+ }
+ case TYPE_VECTOR:
+ {
+ size_t d;
+ ir_value_code_setaddr(global, vec_size(code_globals));
+ if (global->hasvalue) {
+ iptr = (int32_t*)&global->constval.ivec[0];
+ vec_push(code_globals, iptr[0]);
+ if (global->code.globaladdr < 0)
+ return false;
+ for (d = 1; d < type_sizeof[global->vtype]; ++d)
+ {
+ vec_push(code_globals, iptr[d]);
+ }
+ } else {
+ vec_push(code_globals, 0);
+ if (global->code.globaladdr < 0)
+ return false;
+ for (d = 1; d < type_sizeof[global->vtype]; ++d)
+ {
+ vec_push(code_globals, 0);
+ }
+ if (!islocal)
+ def.type |= DEF_SAVEGLOBAL;
+ }
+
+ vec_push(code_defs, def);
+ return global->code.globaladdr >= 0;
+ }
+ case TYPE_FUNCTION:
+ ir_value_code_setaddr(global, vec_size(code_globals));
+ if (!global->hasvalue) {
+ vec_push(code_globals, 0);
+ if (global->code.globaladdr < 0)
+ return false;
+ } else {
+ vec_push(code_globals, vec_size(code_functions));
+ if (!gen_global_function(self, global))
+ return false;
+ if (!islocal)
+ def.type |= DEF_SAVEGLOBAL;
+ }
+ vec_push(code_defs, def);
+ return true;
+ case TYPE_VARIANT:
+ /* assume biggest type */
+ ir_value_code_setaddr(global, vec_size(code_globals));
+ vec_push(code_globals, 0);
+ for (i = 1; i < type_sizeof[TYPE_VARIANT]; ++i)
+ vec_push(code_globals, 0);
+ return true;
+ default:
+ /* refuse to create 'void' type or any other fancy business. */
+ irerror(global->context, "Invalid type for global variable `%s`: %s",
+ global->name, type_name[global->vtype]);
+ return false;
+ }
+}
+
+static void ir_builder_prepare_field(ir_value *field)
+{
+ field->code.fieldaddr = code_alloc_field(type_sizeof[field->fieldtype]);
+}
+
+static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
+{
+ prog_section_def def;
+ prog_section_field fld;
+
+ (void)self;
+
+ def.type = (uint16_t)field->vtype;
+ def.offset = (uint16_t)vec_size(code_globals);
+
+ /* create a global named the same as the field */
+ if (opts_standard == COMPILER_GMQCC) {
+ /* in our standard, the global gets a dot prefix */
+ size_t len = strlen(field->name);
+ char name[1024];
+
+ /* we really don't want to have to allocate this, and 1024
+ * bytes is more than enough for a variable/field name
+ */
+ if (len+2 >= sizeof(name)) {
+ irerror(field->context, "invalid field name size: %u", (unsigned int)len);
+ return false;
+ }
+
+ name[0] = '.';
+ memcpy(name+1, field->name, len); /* no strncpy - we used strlen above */
+ name[len+1] = 0;
+
+ def.name = code_genstring(name);
+ fld.name = def.name + 1; /* we reuse that string table entry */
+ } else {
+ /* in plain QC, there cannot be a global with the same name,
+ * and so we also name the global the same.
+ * FIXME: fteqcc should create a global as well
+ * check if it actually uses the same name. Probably does
+ */
+ def.name = code_genstring(field->name);
+ fld.name = def.name;
+ }
+
+ field->code.name = def.name;
+
+ vec_push(code_defs, def);
+
+ fld.type = field->fieldtype;
+
+ if (fld.type == TYPE_VOID) {
+ irerror(field->context, "field is missing a type: %s - don't know its size", field->name);
+ return false;
+ }
+
+ fld.offset = field->code.fieldaddr;
+
+ vec_push(code_fields, fld);
+
+ ir_value_code_setaddr(field, vec_size(code_globals));
+ vec_push(code_globals, fld.offset);
+ if (fld.type == TYPE_VECTOR) {
+ vec_push(code_globals, fld.offset+1);
+ vec_push(code_globals, fld.offset+2);
+ }
+
+ return field->code.globaladdr >= 0;
+}
+
+bool ir_builder_generate(ir_builder *self, const char *filename)
+{
+ prog_section_statement stmt;
+ size_t i;
+ char *lnofile = NULL;
+
+ code_init();
+
+ for (i = 0; i < vec_size(self->fields); ++i)
+ {
+ ir_builder_prepare_field(self->fields[i]);
+ }
+
+ for (i = 0; i < vec_size(self->globals); ++i)
+ {
+ if (!ir_builder_gen_global(self, self->globals[i], false)) {
+ return false;
+ }
+ }
+
+ for (i = 0; i < vec_size(self->fields); ++i)
+ {
+ if (!ir_builder_gen_field(self, self->fields[i])) {
+ return false;
+ }
+ }
+
+ /* generate function code */
+ for (i = 0; i < vec_size(self->globals); ++i)
+ {
+ if (self->globals[i]->vtype == TYPE_FUNCTION) {
+ if (!gen_global_function_code(self, self->globals[i])) {
+ return false;
+ }
+ }
+ }
+
+ if (vec_size(code_globals) >= 65536) {
+ irerror(vec_last(self->globals)->context, "This progs file would require more globals than the metadata can handle. Bailing out.");
+ return false;
+ }
+
+ /* DP errors if the last instruction is not an INSTR_DONE
+ * and for debugging purposes we add an additional AINSTR_END
+ * to the end of functions, so here it goes:
+ */
+ stmt.opcode = INSTR_DONE;
+ stmt.o1.u1 = 0;
+ stmt.o2.u1 = 0;
+ stmt.o3.u1 = 0;
+ code_push_statement(&stmt, vec_last(code_linenums));
+
+ if (opts_pp_only)
+ return true;
+
+ if (vec_size(code_statements) != vec_size(code_linenums)) {
+ con_err("Linecounter wrong: %lu != %lu\n",
+ (unsigned long)vec_size(code_statements),
+ (unsigned long)vec_size(code_linenums));
+ } else if (OPTS_FLAG(LNO)) {
+ char *dot;
+ size_t filelen = strlen(filename);
+
+ memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
+ dot = strrchr(lnofile, '.');
+ if (!dot) {
+ vec_pop(lnofile);
+ } else {
+ vec_shrinkto(lnofile, dot - lnofile);
+ }
+ memcpy(vec_add(lnofile, 5), ".lno", 5);
+ }
+
+ if (lnofile)
+ con_out("writing '%s' and '%s'...\n", filename, lnofile);
+ else
+ con_out("writing '%s'\n", filename);
+ if (!code_write(filename, lnofile)) {
+ vec_free(lnofile);
+ return false;
+ }
+ vec_free(lnofile);
+ return true;