X-Git-Url: https://git.xonotic.org/?a=blobdiff_plain;f=ir.c;h=ccf9da548e734da3cc46f2db1b90bd2d616284ed;hb=21b70055415a67d80494fecbad900060af09e5aa;hp=7e6645979e43ad4372c26b6207b8bd1fbf0f892a;hpb=340daeabc43b7ef290797ed3c791ef2d08dd2a12;p=xonotic%2Fgmqcc.git diff --git a/ir.c b/ir.c index 7e66459..ccf9da5 100644 --- a/ir.c +++ b/ir.c @@ -34,6 +34,9 @@ ir_builder* ir_builder_new(const char *modulename) ir_builder* self; self = (ir_builder*)mem_a(sizeof(*self)); + if (!self) + return NULL; + MEM_VECTOR_INIT(self, functions); MEM_VECTOR_INIT(self, globals); self->name = NULL; @@ -135,11 +138,16 @@ ir_value* ir_builder_create_global(ir_builder *self, const char *name, int vtype bool ir_function_naive_phi(ir_function*); void ir_function_enumerate(ir_function*); bool ir_function_calculate_liferanges(ir_function*); +bool ir_function_allocate_locals(ir_function*); ir_function* ir_function_new(ir_builder* owner) { ir_function *self; self = (ir_function*)mem_a(sizeof(*self)); + + if (!self) + return NULL; + self->name = NULL; if (!ir_function_set_name(self, "<@unnamed>")) { mem_d(self); @@ -216,6 +224,9 @@ bool ir_function_finalize(ir_function *self) if (!ir_function_calculate_liferanges(self)) return false; + + if (!ir_function_allocate_locals(self)) + return false; return true; } @@ -252,6 +263,11 @@ ir_block* ir_block_new(ir_function* owner, const char *name) { ir_block *self; self = (ir_block*)mem_a(sizeof(*self)); + if (!self) + return NULL; + + memset(self, 0, sizeof(*self)); + self->label = NULL; if (!ir_block_set_label(self, name)) { mem_d(self); @@ -269,6 +285,9 @@ ir_block* ir_block_new(ir_function* owner, const char *name) self->is_return = false; self->run_id = 0; MEM_VECTOR_INIT(self, living); + + self->generated = false; + return self; } MEM_VEC_FUNCTIONS(ir_block, ir_instr*, instr) @@ -305,6 +324,9 @@ ir_instr* ir_instr_new(ir_block* owner, int op) { ir_instr *self; self = (ir_instr*)mem_a(sizeof(*self)); + if (!self) + return NULL; + self->owner = owner; self->context.file = "<@no context>"; self->context.line = 0; @@ -381,6 +403,7 @@ ir_value* ir_value_var(const char *name, int storetype, int vtype) ir_value *self; self = (ir_value*)mem_a(sizeof(*self)); self->vtype = vtype; + self->fieldtype = TYPE_VOID; self->store = storetype; MEM_VECTOR_INIT(self, reads); MEM_VECTOR_INIT(self, writes); @@ -390,6 +413,9 @@ ir_value* ir_value_var(const char *name, int storetype, int vtype) self->name = NULL; ir_value_set_name(self, name); + memset(&self->constval, 0, sizeof(self->constval)); + memset(&self->code, 0, sizeof(self->code)); + MEM_VECTOR_INIT(self, life); return self; } @@ -412,7 +438,8 @@ ir_value* ir_value_out(ir_function *owner, const char *name, int storetype, int void ir_value_delete(ir_value* self) { - mem_d((void*)self->name); + if (self->name) + mem_d((void*)self->name); if (self->isconst) { if (self->vtype == TYPE_STRING) @@ -558,6 +585,142 @@ bool ir_value_life_merge(ir_value *self, size_t s) return ir_value_life_insert(self, i, new_entry); } +bool ir_value_life_merge_into(ir_value *self, const ir_value *other) +{ + size_t i, myi; + + if (!other->life_count) + return true; + + if (!self->life_count) { + for (i = 0; i < other->life_count; ++i) { + if (!ir_value_life_add(self, other->life[i])) + return false; + } + return true; + } + + myi = 0; + for (i = 0; i < other->life_count; ++i) + { + const ir_life_entry_t *life = &other->life[i]; + while (true) + { + ir_life_entry_t *entry = &self->life[myi]; + + if (life->end+1 < entry->start) + { + /* adding an interval before entry */ + if (!ir_value_life_insert(self, myi, *life)) + return false; + ++myi; + break; + } + + if (life->start < entry->start && + life->end >= entry->start) + { + /* starts earlier and overlaps */ + entry->start = life->start; + } + + if (life->end > entry->end && + life->start-1 <= entry->end) + { + /* ends later and overlaps */ + entry->end = life->end; + } + + /* see if our change combines it with the next ranges */ + while (myi+1 < self->life_count && + entry->end+1 >= self->life[1+myi].start) + { + /* overlaps with (myi+1) */ + if (entry->end < self->life[1+myi].end) + entry->end = self->life[1+myi].end; + if (!ir_value_life_remove(self, myi+1)) + return false; + entry = &self->life[myi]; + } + + /* see if we're after the entry */ + if (life->start > entry->end) + { + ++myi; + /* append if we're at the end */ + if (myi >= self->life_count) { + if (!ir_value_life_add(self, *life)) + return false; + break; + } + /* otherweise check the next range */ + continue; + } + break; + } + } + return true; +} + +bool ir_values_overlap(const ir_value *a, const ir_value *b) +{ + /* For any life entry in A see if it overlaps with + * any life entry in B. + * Note that the life entries are orderes, so we can make a + * more efficient algorithm there than naively translating the + * statement above. + */ + + ir_life_entry_t *la, *lb, *enda, *endb; + + /* first of all, if either has no life range, they cannot clash */ + if (!a->life_count || !b->life_count) + return false; + + la = a->life; + lb = b->life; + enda = la + a->life_count; + endb = lb + b->life_count; + while (true) + { + /* check if the entries overlap, for that, + * both must start before the other one ends. + */ +#if defined(LIFE_RANGE_WITHOUT_LAST_READ) + if (la->start <= lb->end && + lb->start <= la->end) +#else + if (la->start < lb->end && + lb->start < la->end) +#endif + { + return true; + } + + /* entries are ordered + * one entry is earlier than the other + * that earlier entry will be moved forward + */ + if (la->start < lb->start) + { + /* order: A B, move A forward + * check if we hit the end with A + */ + if (++la == enda) + break; + } + else if (lb->start < la->start) + { + /* order: B A, move B forward + * check if we hit the end with B + */ + if (++lb == endb) + break; + } + } + return false; +} + /*********************************************************************** *IR main operations */ @@ -566,6 +729,7 @@ bool ir_block_create_store_op(ir_block *self, int op, ir_value *target, ir_value { if (target->store == store_value) { fprintf(stderr, "cannot store to an SSA value\n"); + fprintf(stderr, "trying to store: %s <- %s\n", target->name, what->name); return false; } else { ir_instr *in = ir_instr_new(self, op); @@ -608,6 +772,9 @@ bool ir_block_create_store(ir_block *self, ir_value *target, ir_value *what) case TYPE_STRING: op = INSTR_STORE_S; break; + case TYPE_FIELD: + op = INSTR_STORE_FLD; + break; #if 0 case TYPE_INTEGER: if (what->vtype == TYPE_INTEGER) @@ -634,16 +801,14 @@ bool ir_block_create_storep(ir_block *self, ir_value *target, ir_value *what) { int op = 0; int vtype; - if (target->vtype == TYPE_VARIANT) - vtype = what->vtype; - else - vtype = target->vtype; - if (vtype != what->vtype) - { - /* Cannot implicitly convert when storing through a pointer */ + if (target->vtype != TYPE_POINTER) return false; - } + + /* storing using pointer - target is a pointer, type must be + * inferred from source + */ + vtype = what->vtype; switch (vtype) { case TYPE_FLOAT: @@ -658,6 +823,9 @@ bool ir_block_create_storep(ir_block *self, ir_value *target, ir_value *what) case TYPE_STRING: op = INSTR_STOREP_S; break; + case TYPE_FIELD: + op = INSTR_STOREP_FLD; + break; #if 0 case TYPE_INTEGER: op = INSTR_STOREP_I; @@ -788,7 +956,7 @@ ir_instr* ir_block_create_phi(ir_block *self, const char *label, int ot) in = ir_instr_new(self, VINSTR_PHI); if (!in) return NULL; - out = ir_value_out(self->owner, label, store_local, ot); + out = ir_value_out(self->owner, label, store_value, ot); if (!out) { ir_instr_delete(in); return NULL; @@ -836,9 +1004,6 @@ ir_value* ir_block_create_binop(ir_block *self, const char *label, int opcode, ir_value *left, ir_value *right) { - ir_value *out = NULL; - ir_instr *in = NULL; - int ot = TYPE_VOID; switch (opcode) { case INSTR_ADD_F: @@ -924,33 +1089,82 @@ ir_value* ir_block_create_binop(ir_block *self, return NULL; } - out = ir_value_out(self->owner, label, store_local, ot); + return ir_block_create_general_instr(self, label, opcode, left, right, ot); +} + +ir_value* ir_block_create_general_instr(ir_block *self, const char *label, + int op, ir_value *a, ir_value *b, int outype) +{ + ir_instr *instr; + ir_value *out; + + out = ir_value_out(self->owner, label, store_value, outype); if (!out) return NULL; - in = ir_instr_new(self, opcode); - if (!in) { + instr = ir_instr_new(self, op); + if (!instr) { ir_value_delete(out); return NULL; } - if (!ir_instr_op(in, 0, out, true) || - !ir_instr_op(in, 1, left, false) || - !ir_instr_op(in, 2, right, false) ) + if (!ir_instr_op(instr, 0, out, true) || + !ir_instr_op(instr, 1, a, false) || + !ir_instr_op(instr, 2, b, false) ) { goto on_error; } - if (!ir_block_instr_add(self, in)) + if (!ir_block_instr_add(self, instr)) goto on_error; return out; on_error: + ir_instr_delete(instr); ir_value_delete(out); - ir_instr_delete(in); return NULL; } +ir_value* ir_block_create_fieldaddress(ir_block *self, const char *label, ir_value *ent, ir_value *field) +{ + /* Support for various pointer types todo if so desired */ + if (ent->vtype != TYPE_ENTITY) + return NULL; + + if (field->vtype != TYPE_FIELD) + return NULL; + + return ir_block_create_general_instr(self, label, INSTR_ADDRESS, ent, field, TYPE_POINTER); +} + +ir_value* ir_block_create_load_from_ent(ir_block *self, const char *label, ir_value *ent, ir_value *field, int outype) +{ + int op; + if (ent->vtype != TYPE_ENTITY) + return NULL; + + /* at some point we could redirect for TYPE_POINTER... but that could lead to carelessness */ + if (field->vtype != TYPE_FIELD) + return NULL; + + switch (outype) + { + case TYPE_FLOAT: op = INSTR_LOAD_F; break; + case TYPE_VECTOR: op = INSTR_LOAD_V; break; + case TYPE_STRING: op = INSTR_LOAD_S; break; + case TYPE_FIELD: op = INSTR_LOAD_FLD; break; + case TYPE_ENTITY: op = INSTR_LOAD_ENT; break; +#if 0 + case TYPE_POINTER: op = INSTR_LOAD_I; break; + case TYPE_INTEGER: op = INSTR_LOAD_I; break; +#endif + default: + return NULL; + } + + return ir_block_create_general_instr(self, label, op, ent, field, outype); +} + ir_value* ir_block_create_add(ir_block *self, const char *label, ir_value *left, ir_value *right) @@ -1173,7 +1387,7 @@ static bool ir_block_naive_phi(ir_block *self) if (v->writes[w]->_ops[0] == v) v->writes[w]->_ops[0] = instr->_ops[0]; - if (old->store != store_local) + if (old->store != store_value && old->store != store_local) { /* If it originally wrote to a global we need to store the value * there as welli @@ -1282,6 +1496,131 @@ bool ir_function_calculate_liferanges(ir_function *self) return true; } +/* Local-value allocator + * After finishing creating the liferange of all values used in a function + * we can allocate their global-positions. + * This is the counterpart to register-allocation in register machines. + */ +typedef struct { + MEM_VECTOR_MAKE(ir_value*, locals); + MEM_VECTOR_MAKE(size_t, sizes); + MEM_VECTOR_MAKE(size_t, positions); +} function_allocator; +MEM_VEC_FUNCTIONS(function_allocator, ir_value*, locals) +MEM_VEC_FUNCTIONS(function_allocator, size_t, sizes) +MEM_VEC_FUNCTIONS(function_allocator, size_t, positions) + +static bool function_allocator_alloc(function_allocator *alloc, const ir_value *var) +{ + ir_value *slot; + size_t vsize = 1; + + slot = ir_value_var("reg", store_global, var->vtype); + if (!slot) + return false; + + if (slot->vtype == TYPE_VECTOR || slot->vtype == TYPE_VARIANT) + vsize = 3; + + if (!ir_value_life_merge_into(slot, var)) + goto localerror; + + if (!function_allocator_locals_add(alloc, slot)) + goto localerror; + + if (!function_allocator_sizes_add(alloc, vsize)) + goto localerror; + + return true; + +localerror: + ir_value_delete(slot); + return false; +} + +bool ir_function_allocate_locals(ir_function *self) +{ + size_t i, a; + bool retval = true; + size_t pos; + + ir_value *slot; + const ir_value *v; + + function_allocator alloc; + + MEM_VECTOR_INIT(&alloc, locals); + MEM_VECTOR_INIT(&alloc, sizes); + MEM_VECTOR_INIT(&alloc, positions); + + for (i = 0; i < self->locals_count; ++i) + { + if (!function_allocator_alloc(&alloc, self->locals[i])) + goto error; + } + + /* Allocate a slot for any value that still exists */ + for (i = 0; i < self->values_count; ++i) + { + v = self->values[i]; + + if (!v->life_count) + continue; + + for (a = 0; a < alloc.locals_count; ++a) + { + slot = alloc.locals[a]; + + if (ir_values_overlap(v, slot)) + continue; + + if (!ir_value_life_merge_into(slot, v)) + goto error; + + /* adjust size for this slot */ + if (v->vtype == TYPE_VECTOR || v->vtype == TYPE_VARIANT) + alloc.sizes[a] = 3; + + self->values[i]->code.local = a; + break; + } + if (a >= alloc.locals_count) { + self->values[i]->code.local = alloc.locals_count; + if (!function_allocator_alloc(&alloc, v)) + goto error; + } + } + + /* Adjust slot positions based on sizes */ + if (!function_allocator_positions_add(&alloc, 0)) + goto error; + + for (i = 1; i < alloc.sizes_count; ++i) + { + pos = alloc.positions[i-1] + alloc.sizes[i-1]; + if (!function_allocator_positions_add(&alloc, pos)) + goto error; + } + + self->allocated_locals = pos + alloc.sizes[alloc.sizes_count-1]; + + /* Take over the actual slot positions */ + for (i = 0; i < self->values_count; ++i) + self->values[i]->code.local = alloc.positions[self->values[i]->code.local]; + + goto cleanup; + +error: + retval = false; +cleanup: + for (i = 0; i < alloc.locals_count; ++i) + ir_value_delete(alloc.locals[i]); + MEM_VECTOR_CLEAR(&alloc, locals); + MEM_VECTOR_CLEAR(&alloc, sizes); + MEM_VECTOR_CLEAR(&alloc, positions); + return retval; +} + /* Get information about which operand * is read from, or written to. */ @@ -1368,14 +1707,19 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change ir_instr *instr; ir_value *value; bool tempbool; - size_t i, o, p, rd; + size_t i, o, p; /* bitmasks which operands are read from or written to */ size_t read, write; +#if defined(LIFE_RANGE_WITHOUT_LAST_READ) + size_t rd; new_reads_t new_reads; +#endif char dbg_ind[16] = { '#', '0' }; (void)dbg_ind; +#if defined(LIFE_RANGE_WITHOUT_LAST_READ) MEM_VECTOR_INIT(&new_reads, v); +#endif if (prev) { @@ -1392,16 +1736,19 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change for (p = 0; p < instr->phi_count; ++p) { value = instr->phi[p].value; - /* used this before new_reads - puts the last read into the life range as well - if (!ir_block_living_find(self, value, NULL)) - ir_block_living_add(self, value); - */ - /* fprintf(stderr, "read: %s\n", value->_name); */ +#if ! defined(LIFE_RANGE_WITHOUT_LAST_READ) + if (!ir_block_living_find(self, value, NULL) && + !ir_block_living_add(self, value)) + { + goto on_error; + } +#else if (!new_reads_t_v_find(&new_reads, value, NULL)) { if (!new_reads_t_v_add(&new_reads, value)) goto on_error; } +#endif } /* See which operands are read and write operands */ @@ -1423,16 +1770,20 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change /* read operands */ if (read & (1<_name); */ if (!new_reads_t_v_find(&new_reads, value, NULL)) { if (!new_reads_t_v_add(&new_reads, value)) goto on_error; } +#endif } /* write operands */ @@ -1442,10 +1793,15 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change */ if (write & (1<run_id == self->owner->run_id) @@ -1517,10 +1878,447 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change return true; on_error: +#if defined(LIFE_RANGE_WITHOUT_LAST_READ) MEM_VECTOR_CLEAR(&new_reads, v); +#endif return false; } +/*********************************************************************** + *IR Code-Generation + * + * Since the IR has the convention of putting 'write' operands + * at the beginning, we have to rotate the operands of instructions + * properly in order to generate valid QCVM code. + * + * Having destinations at a fixed position is more convenient. In QC + * this is *mostly* OPC, but FTE adds at least 2 instructions which + * read from from OPA, and store to OPB rather than OPC. Which is + * partially the reason why the implementation of these instructions + * in darkplaces has been delayed for so long. + * + * Breaking conventions is annoying... + */ +static bool ir_builder_gen_global(ir_builder *self, ir_value *global); + +static bool gen_global_field(ir_value *global) +{ + if (global->isconst) + { + ir_value *fld = global->constval.vpointer; + if (!fld) { + printf("Invalid field constant with no field: %s\n", global->name); + return false; + } + + /* Now, in this case, a relocation would be impossible to code + * since it looks like this: + * .vector v = origin; <- parse error, wtf is 'origin'? + * .vector origin; + * + * But we will need a general relocation support later anyway + * for functions... might as well support that here. + */ + if (!fld->code.globaladdr) { + printf("FIXME: Relocation support\n"); + return false; + } + + /* copy the field's value */ + global->code.globaladdr = code_globals_add(code_globals_data[fld->code.globaladdr]); + } + else + { + prog_section_field fld; + + fld.name = global->code.name; + fld.offset = code_fields_elements; + fld.type = global->fieldtype; + + if (fld.type == TYPE_VOID) { + printf("Field is missing a type: %s\n", global->name); + return false; + } + + if (code_fields_add(fld) < 0) + return false; + + global->code.globaladdr = code_globals_add(fld.offset); + } + if (global->code.globaladdr < 0) + return false; + return true; +} + +static bool gen_global_pointer(ir_value *global) +{ + if (global->isconst) + { + ir_value *target = global->constval.vpointer; + if (!target) { + printf("Invalid pointer constant: %s\n", global->name); + /* NULL pointers are pointing to the NULL constant, which also + * sits at address 0, but still has an ir_value for itself. + */ + return false; + } + + /* Here, relocations ARE possible - in fteqcc-enhanced-qc: + * void() foo; <- proto + * void() *fooptr = &foo; + * void() foo = { code } + */ + if (!target->code.globaladdr) { + /* FIXME: Check for the constant nullptr ir_value! + * because then code.globaladdr being 0 is valid. + */ + printf("FIXME: Relocation support\n"); + return false; + } + + global->code.globaladdr = code_globals_add(target->code.globaladdr); + } + else + { + global->code.globaladdr = code_globals_add(0); + } + if (global->code.globaladdr < 0) + return false; + return true; +} + +static bool gen_blocks_recursive(ir_function *func, ir_block *block) +{ + prog_section_statement stmt; + prog_section_statement *stptr; + ir_instr *instr; + ir_block *target; + ir_block *ontrue; + ir_block *onfalse; + size_t stidx; + size_t i; + +tailcall: + block->generated = true; + block->code_start = code_statements_elements; + for (i = 0; i < block->instr_count; ++i) + { + instr = block->instr[i]; + + if (instr->opcode == VINSTR_PHI) { + printf("cannot generate virtual instruction (phi)\n"); + return false; + } + + if (instr->opcode == VINSTR_JUMP) { + target = instr->bops[0]; + /* for uncoditional jumps, if the target hasn't been generated + * yet, we generate them right here. + */ + if (!target->generated) { + block = target; + goto tailcall; + } + + /* otherwise we generate a jump instruction */ + stmt.opcode = INSTR_GOTO; + stmt.o1.s1 = (target->code_start-1) - code_statements_elements; + stmt.o2.s1 = 0; + stmt.o3.s1 = 0; + if (code_statements_add(stmt) < 0) + return false; + + /* no further instructions can be in this block */ + return true; + } + + if (instr->opcode == VINSTR_COND) { + ontrue = instr->bops[0]; + onfalse = instr->bops[1]; + /* TODO: have the AST signal which block should + * come first: eg. optimize IFs without ELSE... + */ + + stmt.o1.u1 = instr->_ops[0]->code.globaladdr; + + stmt.o3.s1 = 0; + if (ontrue->generated) { + stmt.opcode = INSTR_IF; + stmt.o2.s1 = (ontrue->code_start-1) - code_statements_elements; + if (code_statements_add(stmt) < 0) + return false; + } + if (onfalse->generated) { + stmt.opcode = INSTR_IFNOT; + stmt.o2.s1 = (onfalse->code_start-1) - code_statements_elements; + if (code_statements_add(stmt) < 0) + return false; + } + if (!ontrue->generated) { + if (onfalse->generated) { + block = ontrue; + goto tailcall; + } + } + if (!onfalse->generated) { + if (ontrue->generated) { + block = onfalse; + goto tailcall; + } + } + /* neither ontrue nor onfalse exist */ + stmt.opcode = INSTR_IFNOT; + stidx = code_statements_elements - 1; + if (code_statements_add(stmt) < 0) + return false; + stptr = &code_statements_data[stidx]; + /* on false we jump, so add ontrue-path */ + if (!gen_blocks_recursive(func, ontrue)) + return false; + /* fixup the jump address */ + stptr->o2.s1 = (ontrue->code_start-1) - (stidx+1); + /* generate onfalse path */ + if (onfalse->generated) { + /* may have been generated in the previous recursive call */ + stmt.opcode = INSTR_GOTO; + stmt.o2.s1 = 0; + stmt.o3.s1 = 0; + stmt.o1.s1 = (onfalse->code_start-1) - code_statements_elements; + return (code_statements_add(stmt) >= 0); + } + /* if not, generate now */ + block = onfalse; + goto tailcall; + } + + if (instr->opcode >= INSTR_CALL0 && instr->opcode <= INSTR_CALL8) { + printf("TODO: call instruction\n"); + return false; + } + + if (instr->opcode == INSTR_STATE) { + printf("TODO: state instruction\n"); + return false; + } + + stmt.opcode = instr->opcode; + stmt.o1.u1 = 0; + stmt.o2.u1 = 0; + stmt.o3.u1 = 0; + + /* This is the general order of operands */ + if (instr->_ops[0]) + stmt.o3.u1 = instr->_ops[0]->code.globaladdr; + + if (instr->_ops[1]) + stmt.o1.u1 = instr->_ops[1]->code.globaladdr; + + if (instr->_ops[2]) + stmt.o2.u1 = instr->_ops[2]->code.globaladdr; + + if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE) + { + stmt.o1.u1 = stmt.o3.u1; + stmt.o3.u1 = 0; + } + else if ((stmt.opcode >= INSTR_STORE_F && + stmt.opcode <= INSTR_STORE_FNC) || + (stmt.opcode >= INSTR_NOT_F && + stmt.opcode <= INSTR_NOT_FNC)) + { + /* 2-operand instructions with A -> B */ + stmt.o2.u1 = stmt.o3.u1; + stmt.o3.u1 = 0; + } + + if (code_statements_add(stmt) < 0) + return false; + } + return true; +} + +static bool gen_function_code(ir_function *self) +{ + ir_block *block; + + /* Starting from entry point, we generate blocks "as they come" + * for now. Dead blocks will not be translated obviously. + */ + if (!self->blocks_count) { + printf("Function '%s' declared without body.\n", self->name); + return false; + } + + block = self->blocks[0]; + if (block->generated) + return true; + + if (!gen_blocks_recursive(self, block)) { + printf("failed to generate blocks for '%s'\n", self->name); + return false; + } + return true; +} + +static bool gen_global_function(ir_builder *ir, ir_value *global) +{ + prog_section_function fun; + ir_function *irfun; + + size_t i; + + if (!global->isconst || + !global->constval.vfunc) + { + printf("Invalid state of function-global: not constant: %s\n", global->name); + return false; + } + + irfun = global->constval.vfunc; + + fun.name = global->code.name; + fun.file = code_cachedstring(global->context.file); + fun.profile = 0; /* always 0 */ + fun.nargs = irfun->params_count; + + for (i = 0;i < 8; ++i) { + if (i >= fun.nargs) + fun.argsize[i] = 0; + else if (irfun->params[i] == TYPE_VECTOR) + fun.argsize[i] = 3; + else + fun.argsize[i] = 1; + } + + fun.firstlocal = code_globals_elements; + fun.locals = irfun->locals_count; + for (i = 0; i < irfun->locals_count; ++i) { + if (!ir_builder_gen_global(ir, irfun->locals[i])) { + printf("Failed to generate global %s\n", irfun->locals[i]->name); + return false; + } + } + + fun.entry = code_statements_elements; + if (!gen_function_code(irfun)) { + printf("Failed to generate code for function %s\n", irfun->name); + return false; + } + + return (code_functions_add(fun) >= 0); +} + +static bool ir_builder_gen_global(ir_builder *self, ir_value *global) +{ + int32_t *iptr; + prog_section_def def; + + def.type = global->vtype; + def.offset = code_globals_elements; + def.name = global->code.name = code_genstring(global->name); + + switch (global->vtype) + { + case TYPE_POINTER: + if (code_defs_add(def) < 0) + return false; + return gen_global_pointer(global); + case TYPE_FIELD: + if (code_defs_add(def) < 0) + return false; + return gen_global_field(global); + case TYPE_ENTITY: + /* fall through */ + case TYPE_FLOAT: + { + if (code_defs_add(def) < 0) + return false; + + if (global->isconst) { + iptr = (int32_t*)&global->constval.vfloat; + global->code.globaladdr = code_globals_add(*iptr); + } else + global->code.globaladdr = code_globals_add(0); + + return global->code.globaladdr >= 0; + } + case TYPE_STRING: + { + if (code_defs_add(def) < 0) + return false; + if (global->isconst) + global->code.globaladdr = code_globals_add(code_cachedstring(global->constval.vstring)); + else + global->code.globaladdr = code_globals_add(0); + return global->code.globaladdr >= 0; + } + case TYPE_VECTOR: + { + if (code_defs_add(def) < 0) + return false; + + if (global->isconst) { + iptr = (int32_t*)&global->constval.vvec; + global->code.globaladdr = code_globals_add(iptr[0]); + if (code_globals_add(iptr[1]) < 0 || code_globals_add(iptr[2]) < 0) + return false; + } else { + global->code.globaladdr = code_globals_add(0); + if (code_globals_add(0) < 0 || code_globals_add(0) < 0) + return false; + } + return global->code.globaladdr >= 0; + } + case TYPE_FUNCTION: + if (code_defs_add(def) < 0) + return false; + code_globals_add(code_functions_elements); + return gen_global_function(self, global); + case TYPE_VARIANT: + /* assume biggest type */ + global->code.globaladdr = code_globals_add(0); + code_globals_add(0); + code_globals_add(0); + return true; + default: + /* refuse to create 'void' type or any other fancy business. */ + printf("Invalid type for global variable %s\n", global->name); + return false; + } +} + +bool ir_builder_generate(ir_builder *self, const char *filename) +{ + size_t i; + + code_init(); + + /* FIXME: generate TYPE_FUNCTION globals and link them + * to their ir_function. + */ + + for (i = 0; i < self->functions_count; ++i) + { + ir_value *funval; + ir_function *fun = self->functions[i]; + + funval = ir_builder_create_global(self, fun->name, TYPE_FUNCTION); + funval->isconst = true; + funval->constval.vfunc = fun; + funval->context = fun->context; + } + + for (i = 0; i < self->globals_count; ++i) + { + if (!ir_builder_gen_global(self, self->globals[i])) { + return false; + } + } + + printf("writing '%s'...\n", filename); + return code_write(filename); +} + /*********************************************************************** *IR DEBUG Dump functions... */ @@ -1578,10 +2376,13 @@ void ir_function_dump(ir_function *f, char *ind, } if (f->blocks_count) { - - oprintf("%slife passes: %i\n", ind, (int)f->blocks[0]->run_id); - for (i = 0; i < f->blocks_count; ++i) + oprintf("%slife passes (check): %i\n", ind, (int)f->run_id); + for (i = 0; i < f->blocks_count; ++i) { + if (f->blocks[i]->run_id != f->run_id) { + oprintf("%slife pass check fail! %i != %i\n", ind, (int)f->blocks[i]->run_id, (int)f->run_id); + } ir_block_dump(f->blocks[i], ind, oprintf); + } } ind[strlen(ind)-1] = 0;