X-Git-Url: https://git.xonotic.org/?p=xonotic%2Fgmqcc.git;a=blobdiff_plain;f=ir.c;h=42999c53564770bb53e3910993b630d57568f7ae;hp=8e92d4da6b368c9f2577146562b8f88c2b8a4018;hb=9c2e6a2334132a6c22a1cf48263bc3df420da22f;hpb=50f00fb0a75533c467461590c405c20360df0747 diff --git a/ir.c b/ir.c index 8e92d4d..42999c5 100644 --- a/ir.c +++ b/ir.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 + * Copyright (C) 2012 * Wolfgang Bumiller * * Permission is hereby granted, free of charge, to any person obtaining a copy of @@ -25,6 +25,57 @@ #include "gmqcc.h" #include "ir.h" +/*********************************************************************** + * Type sizes used at multiple points in the IR codegen + */ + +size_t type_sizeof[TYPE_COUNT] = { + 1, /* TYPE_VOID */ + 1, /* TYPE_STRING */ + 1, /* TYPE_FLOAT */ + 3, /* TYPE_VECTOR */ + 1, /* TYPE_ENTITY */ + 1, /* TYPE_FIELD */ + 1, /* TYPE_FUNCTION */ + 1, /* TYPE_POINTER */ +#if 0 + 1, /* TYPE_INTEGER */ +#endif + 3, /* TYPE_VARIANT */ +}; + +uint16_t type_store_instr[TYPE_COUNT] = { + INSTR_STORE_F, /* should use I when having integer support */ + INSTR_STORE_S, + INSTR_STORE_F, + INSTR_STORE_V, + INSTR_STORE_ENT, + INSTR_STORE_FLD, + INSTR_STORE_FNC, + INSTR_STORE_ENT, /* should use I */ +#if 0 + INSTR_STORE_ENT, /* integer type */ +#endif + INSTR_STORE_V, /* variant, should never be accessed */ +}; + +uint16_t type_storep_instr[TYPE_COUNT] = { + INSTR_STOREP_F, /* should use I when having integer support */ + INSTR_STOREP_S, + INSTR_STOREP_F, + INSTR_STOREP_V, + INSTR_STOREP_ENT, + INSTR_STOREP_FLD, + INSTR_STOREP_FNC, + INSTR_STOREP_ENT, /* should use I */ +#if 0 + INSTR_STOREP_ENT, /* integer type */ +#endif + INSTR_STOREP_V, /* variant, should never be accessed */ +}; + +MEM_VEC_FUNCTIONS(ir_value_vector, ir_value*, v) + /*********************************************************************** *IR Builder */ @@ -34,8 +85,12 @@ ir_builder* ir_builder_new(const char *modulename) ir_builder* self; self = (ir_builder*)mem_a(sizeof(*self)); + if (!self) + return NULL; + MEM_VECTOR_INIT(self, functions); MEM_VECTOR_INIT(self, globals); + MEM_VECTOR_INIT(self, fields); self->name = NULL; if (!ir_builder_set_name(self, modulename)) { mem_d(self); @@ -51,6 +106,7 @@ ir_builder* ir_builder_new(const char *modulename) } MEM_VEC_FUNCTIONS(ir_builder, ir_value*, globals) +MEM_VEC_FUNCTIONS(ir_builder, ir_value*, fields) MEM_VEC_FUNCTIONS(ir_builder, ir_function*, functions) void ir_builder_delete(ir_builder* self) @@ -64,7 +120,11 @@ void ir_builder_delete(ir_builder* self) for (i = 0; i != self->globals_count; ++i) { ir_value_delete(self->globals[i]); } - MEM_VECTOR_CLEAR(self, globals); + MEM_VECTOR_CLEAR(self, fields); + for (i = 0; i != self->fields_count; ++i) { + ir_value_delete(self->fields[i]); + } + MEM_VECTOR_CLEAR(self, fields); mem_d(self); } @@ -86,20 +146,32 @@ ir_function* ir_builder_get_function(ir_builder *self, const char *name) return NULL; } -ir_function* ir_builder_create_function(ir_builder *self, const char *name) +ir_function* ir_builder_create_function(ir_builder *self, const char *name, int outtype) { ir_function *fn = ir_builder_get_function(self, name); if (fn) { return NULL; } - fn = ir_function_new(self); + fn = ir_function_new(self, outtype); if (!ir_function_set_name(fn, name) || !ir_builder_functions_add(self, fn) ) { ir_function_delete(fn); return NULL; } + + fn->value = ir_builder_create_global(self, fn->name, TYPE_FUNCTION); + if (!fn->value) { + ir_function_delete(fn); + return NULL; + } + + fn->value->isconst = true; + fn->value->outtype = outtype; + fn->value->constval.vfunc = fn; + fn->value->context = fn->context; + return fn; } @@ -128,6 +200,33 @@ ir_value* ir_builder_create_global(ir_builder *self, const char *name, int vtype return ve; } +ir_value* ir_builder_get_field(ir_builder *self, const char *name) +{ + size_t i; + for (i = 0; i < self->fields_count; ++i) { + if (!strcmp(self->fields[i]->name, name)) + return self->fields[i]; + } + return NULL; +} + + +ir_value* ir_builder_create_field(ir_builder *self, const char *name, int vtype) +{ + ir_value *ve = ir_builder_get_field(self, name); + if (ve) { + return NULL; + } + + ve = ir_value_var(name, store_global, TYPE_FIELD); + ve->fieldtype = vtype; + if (!ir_builder_fields_add(self, ve)) { + ir_value_delete(ve); + return NULL; + } + return ve; +} + /*********************************************************************** *IR Function */ @@ -135,11 +234,16 @@ ir_value* ir_builder_create_global(ir_builder *self, const char *name, int vtype bool ir_function_naive_phi(ir_function*); void ir_function_enumerate(ir_function*); bool ir_function_calculate_liferanges(ir_function*); +bool ir_function_allocate_locals(ir_function*); -ir_function* ir_function_new(ir_builder* owner) +ir_function* ir_function_new(ir_builder* owner, int outtype) { ir_function *self; self = (ir_function*)mem_a(sizeof(*self)); + + if (!self) + return NULL; + self->name = NULL; if (!ir_function_set_name(self, "<@unnamed>")) { mem_d(self); @@ -148,7 +252,9 @@ ir_function* ir_function_new(ir_builder* owner) self->owner = owner; self->context.file = "<@no context>"; self->context.line = 0; - self->retype = TYPE_VOID; + self->outtype = outtype; + self->value = NULL; + self->builtin = 0; MEM_VECTOR_INIT(self, params); MEM_VECTOR_INIT(self, blocks); MEM_VECTOR_INIT(self, values); @@ -160,6 +266,7 @@ ir_function* ir_function_new(ir_builder* owner) MEM_VEC_FUNCTIONS(ir_function, ir_value*, values) MEM_VEC_FUNCTIONS(ir_function, ir_block*, blocks) MEM_VEC_FUNCTIONS(ir_function, ir_value*, locals) +MEM_VEC_FUNCTIONS(ir_function, int, params) bool ir_function_set_name(ir_function *self, const char *name) { @@ -188,6 +295,8 @@ void ir_function_delete(ir_function *self) ir_value_delete(self->locals[i]); MEM_VECTOR_CLEAR(self, locals); + /* self->value is deleted by the builder */ + mem_d(self); } @@ -209,6 +318,9 @@ ir_block* ir_function_create_block(ir_function *self, const char *label) bool ir_function_finalize(ir_function *self) { + if (self->builtin) + return true; + if (!ir_function_naive_phi(self)) return false; @@ -216,6 +328,9 @@ bool ir_function_finalize(ir_function *self) if (!ir_function_calculate_liferanges(self)) return false; + + if (!ir_function_allocate_locals(self)) + return false; return true; } @@ -229,14 +344,21 @@ ir_value* ir_function_get_local(ir_function *self, const char *name) return NULL; } -ir_value* ir_function_create_local(ir_function *self, const char *name, int vtype) +ir_value* ir_function_create_local(ir_function *self, const char *name, int vtype, bool param) { ir_value *ve = ir_function_get_local(self, name); if (ve) { return NULL; } - ve = ir_value_var(name, store_local, vtype); + if (param && + self->locals_count && + self->locals[self->locals_count-1]->store != store_param) { + printf("cannot add parameters after adding locals\n"); + return NULL; + } + + ve = ir_value_var(name, (param ? store_param : store_local), vtype); if (!ir_function_locals_add(self, ve)) { ir_value_delete(ve); return NULL; @@ -252,6 +374,11 @@ ir_block* ir_block_new(ir_function* owner, const char *name) { ir_block *self; self = (ir_block*)mem_a(sizeof(*self)); + if (!self) + return NULL; + + memset(self, 0, sizeof(*self)); + self->label = NULL; if (!ir_block_set_label(self, name)) { mem_d(self); @@ -269,6 +396,9 @@ ir_block* ir_block_new(ir_function* owner, const char *name) self->is_return = false; self->run_id = 0; MEM_VECTOR_INIT(self, living); + + self->generated = false; + return self; } MEM_VEC_FUNCTIONS(ir_block, ir_instr*, instr) @@ -305,6 +435,9 @@ ir_instr* ir_instr_new(ir_block* owner, int op) { ir_instr *self; self = (ir_instr*)mem_a(sizeof(*self)); + if (!self) + return NULL; + self->owner = owner; self->context.file = "<@no context>"; self->context.line = 0; @@ -315,11 +448,13 @@ ir_instr* ir_instr_new(ir_block* owner, int op) self->bops[0] = NULL; self->bops[1] = NULL; MEM_VECTOR_INIT(self, phi); + MEM_VECTOR_INIT(self, params); self->eid = 0; return self; } MEM_VEC_FUNCTIONS(ir_instr, ir_phi_entry_t, phi) +MEM_VEC_FUNCTIONS(ir_instr, ir_value*, params) void ir_instr_delete(ir_instr *self) { @@ -333,14 +468,22 @@ void ir_instr_delete(ir_instr *self) for (i = 0; i < self->phi_count; ++i) { size_t idx; if (ir_value_writes_find(self->phi[i].value, self, &idx)) - if (ir_value_writes_remove(self->phi[i].value, idx)); + if (ir_value_writes_remove(self->phi[i].value, idx)) GMQCC_SUPPRESS_EMPTY_BODY; if (ir_value_reads_find(self->phi[i].value, self, &idx)) - if (ir_value_reads_remove(self->phi[i].value, idx)); + if (ir_value_reads_remove (self->phi[i].value, idx)) GMQCC_SUPPRESS_EMPTY_BODY; } MEM_VECTOR_CLEAR(self, phi); - if (ir_instr_op(self, 0, NULL, false)); - if (ir_instr_op(self, 1, NULL, false)); - if (ir_instr_op(self, 2, NULL, false)); + for (i = 0; i < self->params_count; ++i) { + size_t idx; + if (ir_value_writes_find(self->params[i], self, &idx)) + if (ir_value_writes_remove(self->params[i], idx)) GMQCC_SUPPRESS_EMPTY_BODY; + if (ir_value_reads_find(self->params[i], self, &idx)) + if (ir_value_reads_remove (self->params[i], idx)) GMQCC_SUPPRESS_EMPTY_BODY; + } + MEM_VECTOR_CLEAR(self, params); + if (ir_instr_op(self, 0, NULL, false)) GMQCC_SUPPRESS_EMPTY_BODY; + if (ir_instr_op(self, 1, NULL, false)) GMQCC_SUPPRESS_EMPTY_BODY; + if (ir_instr_op(self, 2, NULL, false)) GMQCC_SUPPRESS_EMPTY_BODY; mem_d(self); } @@ -376,11 +519,18 @@ bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing) *IR Value */ +int32_t ir_value_code_addr(const ir_value *self) +{ + return self->code.globaladdr + self->code.addroffset; +} + ir_value* ir_value_var(const char *name, int storetype, int vtype) { ir_value *self; self = (ir_value*)mem_a(sizeof(*self)); self->vtype = vtype; + self->fieldtype = TYPE_VOID; + self->outtype = TYPE_VOID; self->store = storetype; MEM_VECTOR_INIT(self, reads); MEM_VECTOR_INIT(self, writes); @@ -390,9 +540,33 @@ ir_value* ir_value_var(const char *name, int storetype, int vtype) self->name = NULL; ir_value_set_name(self, name); + memset(&self->constval, 0, sizeof(self->constval)); + memset(&self->code, 0, sizeof(self->code)); + MEM_VECTOR_INIT(self, life); return self; } + +ir_value* ir_value_vector_member(ir_value *self, unsigned int member) +{ + ir_value *m; + if (member >= 3) + return NULL; + + if (self->members[member]) + return self->members[member]; + + m = ir_value_var(self->name, self->store, TYPE_FLOAT); + if (!m) + return NULL; + m->context = self->context; + + self->members[member] = m; + m->code.addroffset = member; + + return m; +} + MEM_VEC_FUNCTIONS(ir_value, ir_life_entry_t, life) MEM_VEC_FUNCTIONS_ALL(ir_value, ir_instr*, reads) MEM_VEC_FUNCTIONS_ALL(ir_value, ir_instr*, writes) @@ -412,12 +586,18 @@ ir_value* ir_value_out(ir_function *owner, const char *name, int storetype, int void ir_value_delete(ir_value* self) { - mem_d((void*)self->name); + size_t i; + if (self->name) + mem_d((void*)self->name); if (self->isconst) { if (self->vtype == TYPE_STRING) mem_d((void*)self->constval.vstring); } + for (i = 0; i < 3; ++i) { + if (self->members[i]) + ir_value_delete(self->members[i]); + } MEM_VECTOR_CLEAR(self, reads); MEM_VECTOR_CLEAR(self, writes); MEM_VECTOR_CLEAR(self, life); @@ -440,6 +620,15 @@ bool ir_value_set_float(ir_value *self, float f) return true; } +bool ir_value_set_func(ir_value *self, int f) +{ + if (self->vtype != TYPE_FUNCTION) + return false; + self->constval.vint = f; + self->isconst = true; + return true; +} + bool ir_value_set_vector(ir_value *self, vector v) { if (self->vtype != TYPE_VECTOR) @@ -511,6 +700,7 @@ bool ir_value_life_merge(ir_value *self, size_t s) } /* nothing found? append */ if (i == self->life_count) { + ir_life_entry_t e; if (life && life->end+1 == s) { /* previous life range can be merged in */ @@ -519,7 +709,6 @@ bool ir_value_life_merge(ir_value *self, size_t s) } if (life && life->end >= s) return false; - ir_life_entry_t e; e.start = e.end = s; if (!ir_value_life_add(self, e)) return false; /* failing */ @@ -558,6 +747,142 @@ bool ir_value_life_merge(ir_value *self, size_t s) return ir_value_life_insert(self, i, new_entry); } +bool ir_value_life_merge_into(ir_value *self, const ir_value *other) +{ + size_t i, myi; + + if (!other->life_count) + return true; + + if (!self->life_count) { + for (i = 0; i < other->life_count; ++i) { + if (!ir_value_life_add(self, other->life[i])) + return false; + } + return true; + } + + myi = 0; + for (i = 0; i < other->life_count; ++i) + { + const ir_life_entry_t *life = &other->life[i]; + while (true) + { + ir_life_entry_t *entry = &self->life[myi]; + + if (life->end+1 < entry->start) + { + /* adding an interval before entry */ + if (!ir_value_life_insert(self, myi, *life)) + return false; + ++myi; + break; + } + + if (life->start < entry->start && + life->end >= entry->start) + { + /* starts earlier and overlaps */ + entry->start = life->start; + } + + if (life->end > entry->end && + life->start-1 <= entry->end) + { + /* ends later and overlaps */ + entry->end = life->end; + } + + /* see if our change combines it with the next ranges */ + while (myi+1 < self->life_count && + entry->end+1 >= self->life[1+myi].start) + { + /* overlaps with (myi+1) */ + if (entry->end < self->life[1+myi].end) + entry->end = self->life[1+myi].end; + if (!ir_value_life_remove(self, myi+1)) + return false; + entry = &self->life[myi]; + } + + /* see if we're after the entry */ + if (life->start > entry->end) + { + ++myi; + /* append if we're at the end */ + if (myi >= self->life_count) { + if (!ir_value_life_add(self, *life)) + return false; + break; + } + /* otherweise check the next range */ + continue; + } + break; + } + } + return true; +} + +bool ir_values_overlap(const ir_value *a, const ir_value *b) +{ + /* For any life entry in A see if it overlaps with + * any life entry in B. + * Note that the life entries are orderes, so we can make a + * more efficient algorithm there than naively translating the + * statement above. + */ + + ir_life_entry_t *la, *lb, *enda, *endb; + + /* first of all, if either has no life range, they cannot clash */ + if (!a->life_count || !b->life_count) + return false; + + la = a->life; + lb = b->life; + enda = la + a->life_count; + endb = lb + b->life_count; + while (true) + { + /* check if the entries overlap, for that, + * both must start before the other one ends. + */ +#if defined(LIFE_RANGE_WITHOUT_LAST_READ) + if (la->start <= lb->end && + lb->start <= la->end) +#else + if (la->start < lb->end && + lb->start < la->end) +#endif + { + return true; + } + + /* entries are ordered + * one entry is earlier than the other + * that earlier entry will be moved forward + */ + if (la->start < lb->start) + { + /* order: A B, move A forward + * check if we hit the end with A + */ + if (++la == enda) + break; + } + else if (lb->start < la->start) + { + /* order: B A, move B forward + * check if we hit the end with B + */ + if (++lb == endb) + break; + } + } + return false; +} + /*********************************************************************** *IR main operations */ @@ -566,6 +891,7 @@ bool ir_block_create_store_op(ir_block *self, int op, ir_value *target, ir_value { if (target->store == store_value) { fprintf(stderr, "cannot store to an SSA value\n"); + fprintf(stderr, "trying to store: %s <- %s\n", target->name, what->name); return false; } else { ir_instr *in = ir_instr_new(self, op); @@ -590,40 +916,32 @@ bool ir_block_create_store(ir_block *self, ir_value *target, ir_value *what) else vtype = target->vtype; - switch (vtype) { - case TYPE_FLOAT: -#if 0 - if (what->vtype == TYPE_INTEGER) - op = INSTR_CONV_ITOF; - else -#endif - op = INSTR_STORE_F; - break; - case TYPE_VECTOR: - op = INSTR_STORE_V; - break; - case TYPE_ENTITY: - op = INSTR_STORE_ENT; - break; - case TYPE_STRING: - op = INSTR_STORE_S; - break; -#if 0 - case TYPE_INTEGER: - if (what->vtype == TYPE_INTEGER) - op = INSTR_CONV_FTOI; - else - op = INSTR_STORE_I; - break; -#endif - case TYPE_POINTER: #if 0 - op = INSTR_STORE_I; -#else - op = INSTR_STORE_ENT; + if (vtype == TYPE_FLOAT && what->vtype == TYPE_INTEGER) + op = INSTR_CONV_ITOF; + else if (vtype == TYPE_INTEGER && what->vtype == TYPE_FLOAT) + op = INSTR_CONV_FTOI; #endif - break; - } + op = type_store_instr[vtype]; + + return ir_block_create_store_op(self, op, target, what); +} + +bool ir_block_create_storep(ir_block *self, ir_value *target, ir_value *what) +{ + int op = 0; + int vtype; + + if (target->vtype != TYPE_POINTER) + return false; + + /* storing using pointer - target is a pointer, type must be + * inferred from source + */ + vtype = what->vtype; + + op = type_storep_instr[vtype]; + return ir_block_create_store_op(self, op, target, what); } @@ -657,7 +975,7 @@ bool ir_block_create_if(ir_block *self, ir_value *v, return false; } self->final = true; - //in = ir_instr_new(self, (v->vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F)); + /*in = ir_instr_new(self, (v->vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/ in = ir_instr_new(self, VINSTR_COND); if (!in) return false; @@ -738,7 +1056,7 @@ ir_instr* ir_block_create_phi(ir_block *self, const char *label, int ot) in = ir_instr_new(self, VINSTR_PHI); if (!in) return NULL; - out = ir_value_out(self->owner, label, store_local, ot); + out = ir_value_out(self->owner, label, store_value, ot); if (!out) { ir_instr_delete(in); return NULL; @@ -780,6 +1098,47 @@ bool ir_phi_add(ir_instr* self, ir_block *b, ir_value *v) return ir_instr_phi_add(self, pe); } +/* call related code */ +ir_instr* ir_block_create_call(ir_block *self, const char *label, ir_value *func) +{ + ir_value *out; + ir_instr *in; + in = ir_instr_new(self, INSTR_CALL0); + if (!in) + return NULL; + out = ir_value_out(self->owner, label, store_return, func->outtype); + if (!out) { + ir_instr_delete(in); + return NULL; + } + if (!ir_instr_op(in, 0, out, true) || + !ir_instr_op(in, 1, func, false) || + !ir_block_instr_add(self, in)) + { + ir_instr_delete(in); + ir_value_delete(out); + return NULL; + } + return in; +} + +ir_value* ir_call_value(ir_instr *self) +{ + return self->_ops[0]; +} + +bool ir_call_param(ir_instr* self, ir_value *v) +{ + if (!ir_instr_params_add(self, v)) + return false; + if (!ir_value_reads_add(v, self)) { + if (!ir_instr_params_remove(self, self->params_count-1)) + GMQCC_SUPPRESS_EMPTY_BODY; + return false; + } + return true; +} + /* binary op related code */ ir_value* ir_block_create_binop(ir_block *self, @@ -854,7 +1213,7 @@ ir_value* ir_block_create_binop(ir_block *self, break; #endif default: - // ranges: + /* ranges: */ /* boolean operations result in floats */ if (opcode >= INSTR_EQ_F && opcode <= INSTR_GT) ot = TYPE_FLOAT; @@ -871,33 +1230,119 @@ ir_value* ir_block_create_binop(ir_block *self, return NULL; } - ir_value *out = ir_value_out(self->owner, label, store_local, ot); + return ir_block_create_general_instr(self, label, opcode, left, right, ot); +} + +ir_value* ir_block_create_unary(ir_block *self, + const char *label, int opcode, + ir_value *operand) +{ + int ot = TYPE_FLOAT; + switch (opcode) { + case INSTR_NOT_F: + case INSTR_NOT_V: + case INSTR_NOT_S: + case INSTR_NOT_ENT: + case INSTR_NOT_FNC: +#if 0 + case INSTR_NOT_I: +#endif + ot = TYPE_FLOAT; + break; + /* QC doesn't have other unary operations. We expect extensions to fill + * the above list, otherwise we assume out-type = in-type, eg for an + * unary minus + */ + default: + ot = operand->vtype; + break; + }; + if (ot == TYPE_VOID) { + /* The AST or parser were supposed to check this! */ + return NULL; + } + + /* let's use the general instruction creator and pass NULL for OPB */ + return ir_block_create_general_instr(self, label, opcode, operand, NULL, ot); +} + +ir_value* ir_block_create_general_instr(ir_block *self, const char *label, + int op, ir_value *a, ir_value *b, int outype) +{ + ir_instr *instr; + ir_value *out; + + out = ir_value_out(self->owner, label, store_value, outype); if (!out) return NULL; - ir_instr *in = ir_instr_new(self, opcode); - if (!in) { + instr = ir_instr_new(self, op); + if (!instr) { ir_value_delete(out); return NULL; } - if (!ir_instr_op(in, 0, out, true) || - !ir_instr_op(in, 1, left, false) || - !ir_instr_op(in, 2, right, false) ) + if (!ir_instr_op(instr, 0, out, true) || + !ir_instr_op(instr, 1, a, false) || + !ir_instr_op(instr, 2, b, false) ) { goto on_error; } - if (!ir_block_instr_add(self, in)) + if (!ir_block_instr_add(self, instr)) goto on_error; return out; on_error: + ir_instr_delete(instr); ir_value_delete(out); - ir_instr_delete(in); return NULL; } +ir_value* ir_block_create_fieldaddress(ir_block *self, const char *label, ir_value *ent, ir_value *field) +{ + ir_value *v; + + /* Support for various pointer types todo if so desired */ + if (ent->vtype != TYPE_ENTITY) + return NULL; + + if (field->vtype != TYPE_FIELD) + return NULL; + + v = ir_block_create_general_instr(self, label, INSTR_ADDRESS, ent, field, TYPE_POINTER); + v->fieldtype = field->fieldtype; + return v; +} + +ir_value* ir_block_create_load_from_ent(ir_block *self, const char *label, ir_value *ent, ir_value *field, int outype) +{ + int op; + if (ent->vtype != TYPE_ENTITY) + return NULL; + + /* at some point we could redirect for TYPE_POINTER... but that could lead to carelessness */ + if (field->vtype != TYPE_FIELD) + return NULL; + + switch (outype) + { + case TYPE_FLOAT: op = INSTR_LOAD_F; break; + case TYPE_VECTOR: op = INSTR_LOAD_V; break; + case TYPE_STRING: op = INSTR_LOAD_S; break; + case TYPE_FIELD: op = INSTR_LOAD_FLD; break; + case TYPE_ENTITY: op = INSTR_LOAD_ENT; break; +#if 0 + case TYPE_POINTER: op = INSTR_LOAD_I; break; + case TYPE_INTEGER: op = INSTR_LOAD_I; break; +#endif + default: + return NULL; + } + + return ir_block_create_general_instr(self, label, op, ent, field, outype); +} + ir_value* ir_block_create_add(ir_block *self, const char *label, ir_value *left, ir_value *right) @@ -1120,7 +1565,7 @@ static bool ir_block_naive_phi(ir_block *self) if (v->writes[w]->_ops[0] == v) v->writes[w]->_ops[0] = instr->_ops[0]; - if (old->store != store_local) + if (old->store != store_value && old->store != store_local && old->store != store_param) { /* If it originally wrote to a global we need to store the value * there as welli @@ -1229,7 +1674,136 @@ bool ir_function_calculate_liferanges(ir_function *self) return true; } -/* Get information about which operand +/* Local-value allocator + * After finishing creating the liferange of all values used in a function + * we can allocate their global-positions. + * This is the counterpart to register-allocation in register machines. + */ +typedef struct { + MEM_VECTOR_MAKE(ir_value*, locals); + MEM_VECTOR_MAKE(size_t, sizes); + MEM_VECTOR_MAKE(size_t, positions); +} function_allocator; +MEM_VEC_FUNCTIONS(function_allocator, ir_value*, locals) +MEM_VEC_FUNCTIONS(function_allocator, size_t, sizes) +MEM_VEC_FUNCTIONS(function_allocator, size_t, positions) + +static bool function_allocator_alloc(function_allocator *alloc, const ir_value *var) +{ + ir_value *slot; + size_t vsize = type_sizeof[var->vtype]; + + slot = ir_value_var("reg", store_global, var->vtype); + if (!slot) + return false; + + if (!ir_value_life_merge_into(slot, var)) + goto localerror; + + if (!function_allocator_locals_add(alloc, slot)) + goto localerror; + + if (!function_allocator_sizes_add(alloc, vsize)) + goto localerror; + + return true; + +localerror: + ir_value_delete(slot); + return false; +} + +bool ir_function_allocate_locals(ir_function *self) +{ + size_t i, a; + bool retval = true; + size_t pos; + + ir_value *slot; + const ir_value *v; + + function_allocator alloc; + + if (!self->locals_count) + return true; + + MEM_VECTOR_INIT(&alloc, locals); + MEM_VECTOR_INIT(&alloc, sizes); + MEM_VECTOR_INIT(&alloc, positions); + + for (i = 0; i < self->locals_count; ++i) + { + if (!function_allocator_alloc(&alloc, self->locals[i])) + goto error; + } + + /* Allocate a slot for any value that still exists */ + for (i = 0; i < self->values_count; ++i) + { + v = self->values[i]; + + if (!v->life_count) + continue; + + for (a = 0; a < alloc.locals_count; ++a) + { + slot = alloc.locals[a]; + + if (ir_values_overlap(v, slot)) + continue; + + if (!ir_value_life_merge_into(slot, v)) + goto error; + + /* adjust size for this slot */ + if (alloc.sizes[a] < type_sizeof[v->vtype]) + alloc.sizes[a] = type_sizeof[v->vtype]; + + self->values[i]->code.local = a; + break; + } + if (a >= alloc.locals_count) { + self->values[i]->code.local = alloc.locals_count; + if (!function_allocator_alloc(&alloc, v)) + goto error; + } + } + + /* Adjust slot positions based on sizes */ + if (!function_allocator_positions_add(&alloc, 0)) + goto error; + + if (alloc.sizes_count) + pos = alloc.positions[0] + alloc.sizes[0]; + else + pos = 0; + for (i = 1; i < alloc.sizes_count; ++i) + { + pos = alloc.positions[i-1] + alloc.sizes[i-1]; + if (!function_allocator_positions_add(&alloc, pos)) + goto error; + } + + self->allocated_locals = pos + alloc.sizes[alloc.sizes_count-1]; + + /* Take over the actual slot positions */ + for (i = 0; i < self->values_count; ++i) + self->values[i]->code.local = alloc.positions[self->values[i]->code.local]; + + goto cleanup; + +error: + retval = false; +cleanup: + for (i = 0; i < alloc.locals_count; ++i) + ir_value_delete(alloc.locals[i]); + MEM_VECTOR_CLEAR(&alloc, locals); + MEM_VECTOR_CLEAR(&alloc, sizes); + MEM_VECTOR_CLEAR(&alloc, positions); + return retval; +} + +/* Get information about which operand * is read from, or written to. */ static void ir_op_read_write(int op, size_t *read, size_t *write) @@ -1315,14 +1889,19 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change ir_instr *instr; ir_value *value; bool tempbool; - size_t i, o, p, rd; + size_t i, o, p; /* bitmasks which operands are read from or written to */ size_t read, write; +#if defined(LIFE_RANGE_WITHOUT_LAST_READ) + size_t rd; new_reads_t new_reads; +#endif char dbg_ind[16] = { '#', '0' }; (void)dbg_ind; +#if defined(LIFE_RANGE_WITHOUT_LAST_READ) MEM_VECTOR_INIT(&new_reads, v); +#endif if (prev) { @@ -1339,16 +1918,19 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change for (p = 0; p < instr->phi_count; ++p) { value = instr->phi[p].value; - /* used this before new_reads - puts the last read into the life range as well - if (!ir_block_living_find(self, value, NULL)) - ir_block_living_add(self, value); - */ - /* fprintf(stderr, "read: %s\n", value->_name); */ +#if ! defined(LIFE_RANGE_WITHOUT_LAST_READ) + if (!ir_block_living_find(self, value, NULL) && + !ir_block_living_add(self, value)) + { + goto on_error; + } +#else if (!new_reads_t_v_find(&new_reads, value, NULL)) { if (!new_reads_t_v_add(&new_reads, value)) goto on_error; } +#endif } /* See which operands are read and write operands */ @@ -1363,23 +1945,30 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change value = instr->_ops[o]; /* We only care about locals */ + /* we also calculate parameter liferanges so that locals + * can take up parameter slots */ if (value->store != store_value && - value->store != store_local) + value->store != store_local && + value->store != store_param) continue; /* read operands */ if (read & (1<_name); */ if (!new_reads_t_v_find(&new_reads, value, NULL)) { if (!new_reads_t_v_add(&new_reads, value)) goto on_error; } +#endif } /* write operands */ @@ -1389,10 +1978,15 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change */ if (write & (1<eid); - //fprintf(stderr, "living added values\n"); + /*fprintf(stderr, "living added values\n");*/ *changed = *changed || tempbool; +#if defined(LIFE_RANGE_WITHOUT_LAST_READ) /* new reads: */ for (rd = 0; rd < new_reads.v_count; ++rd) { @@ -1449,6 +2047,7 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change } } MEM_VECTOR_CLEAR(&new_reads, v); +#endif } if (self->run_id == self->owner->run_id) @@ -1464,10 +2063,544 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change return true; on_error: +#if defined(LIFE_RANGE_WITHOUT_LAST_READ) MEM_VECTOR_CLEAR(&new_reads, v); +#endif return false; } +/*********************************************************************** + *IR Code-Generation + * + * Since the IR has the convention of putting 'write' operands + * at the beginning, we have to rotate the operands of instructions + * properly in order to generate valid QCVM code. + * + * Having destinations at a fixed position is more convenient. In QC + * this is *mostly* OPC, but FTE adds at least 2 instructions which + * read from from OPA, and store to OPB rather than OPC. Which is + * partially the reason why the implementation of these instructions + * in darkplaces has been delayed for so long. + * + * Breaking conventions is annoying... + */ +static bool ir_builder_gen_global(ir_builder *self, ir_value *global); + +static bool gen_global_field(ir_value *global) +{ + if (global->isconst) + { + ir_value *fld = global->constval.vpointer; + if (!fld) { + printf("Invalid field constant with no field: %s\n", global->name); + return false; + } + + /* Now, in this case, a relocation would be impossible to code + * since it looks like this: + * .vector v = origin; <- parse error, wtf is 'origin'? + * .vector origin; + * + * But we will need a general relocation support later anyway + * for functions... might as well support that here. + */ + if (!fld->code.globaladdr) { + printf("FIXME: Relocation support\n"); + return false; + } + + /* copy the field's value */ + global->code.globaladdr = code_globals_add(code_globals_data[fld->code.globaladdr]); + } + else + { + global->code.globaladdr = code_globals_add(0); + } + if (global->code.globaladdr < 0) + return false; + return true; +} + +static bool gen_global_pointer(ir_value *global) +{ + if (global->isconst) + { + ir_value *target = global->constval.vpointer; + if (!target) { + printf("Invalid pointer constant: %s\n", global->name); + /* NULL pointers are pointing to the NULL constant, which also + * sits at address 0, but still has an ir_value for itself. + */ + return false; + } + + /* Here, relocations ARE possible - in fteqcc-enhanced-qc: + * void() foo; <- proto + * void() *fooptr = &foo; + * void() foo = { code } + */ + if (!target->code.globaladdr) { + /* FIXME: Check for the constant nullptr ir_value! + * because then code.globaladdr being 0 is valid. + */ + printf("FIXME: Relocation support\n"); + return false; + } + + global->code.globaladdr = code_globals_add(target->code.globaladdr); + } + else + { + global->code.globaladdr = code_globals_add(0); + } + if (global->code.globaladdr < 0) + return false; + return true; +} + +static bool gen_blocks_recursive(ir_function *func, ir_block *block) +{ + prog_section_statement stmt; + ir_instr *instr; + ir_block *target; + ir_block *ontrue; + ir_block *onfalse; + size_t stidx; + size_t i; + +tailcall: + block->generated = true; + block->code_start = code_statements_elements; + for (i = 0; i < block->instr_count; ++i) + { + instr = block->instr[i]; + + if (instr->opcode == VINSTR_PHI) { + printf("cannot generate virtual instruction (phi)\n"); + return false; + } + + if (instr->opcode == VINSTR_JUMP) { + target = instr->bops[0]; + /* for uncoditional jumps, if the target hasn't been generated + * yet, we generate them right here. + */ + if (!target->generated) { + block = target; + goto tailcall; + } + + /* otherwise we generate a jump instruction */ + stmt.opcode = INSTR_GOTO; + stmt.o1.s1 = (target->code_start) - code_statements_elements; + stmt.o2.s1 = 0; + stmt.o3.s1 = 0; + if (code_statements_add(stmt) < 0) + return false; + + /* no further instructions can be in this block */ + return true; + } + + if (instr->opcode == VINSTR_COND) { + ontrue = instr->bops[0]; + onfalse = instr->bops[1]; + /* TODO: have the AST signal which block should + * come first: eg. optimize IFs without ELSE... + */ + + stmt.o1.u1 = ir_value_code_addr(instr->_ops[0]); + stmt.o2.u1 = 0; + stmt.o3.s1 = 0; + + if (ontrue->generated) { + stmt.opcode = INSTR_IF; + stmt.o2.s1 = (ontrue->code_start-1) - code_statements_elements; + if (code_statements_add(stmt) < 0) + return false; + } + if (onfalse->generated) { + stmt.opcode = INSTR_IFNOT; + stmt.o2.s1 = (onfalse->code_start-1) - code_statements_elements; + if (code_statements_add(stmt) < 0) + return false; + } + if (!ontrue->generated) { + if (onfalse->generated) { + block = ontrue; + goto tailcall; + } + } + if (!onfalse->generated) { + if (ontrue->generated) { + block = onfalse; + goto tailcall; + } + } + /* neither ontrue nor onfalse exist */ + stmt.opcode = INSTR_IFNOT; + stidx = code_statements_elements; + if (code_statements_add(stmt) < 0) + return false; + /* on false we jump, so add ontrue-path */ + if (!gen_blocks_recursive(func, ontrue)) + return false; + /* fixup the jump address */ + code_statements_data[stidx].o2.s1 = code_statements_elements - stidx; + /* generate onfalse path */ + if (onfalse->generated) { + /* fixup the jump address */ + code_statements_data[stidx].o2.s1 = (onfalse->code_start) - (stidx); + /* may have been generated in the previous recursive call */ + stmt.opcode = INSTR_GOTO; + stmt.o1.s1 = (onfalse->code_start) - code_statements_elements; + stmt.o2.s1 = 0; + stmt.o3.s1 = 0; + return (code_statements_add(stmt) >= 0); + } + /* if not, generate now */ + block = onfalse; + goto tailcall; + } + + if (instr->opcode >= INSTR_CALL0 && instr->opcode <= INSTR_CALL8) { + /* Trivial call translation: + * copy all params to OFS_PARM* + * if the output's storetype is not store_return, + * add append a STORE instruction! + * + * NOTES on how to do it better without much trouble: + * -) The liferanges! + * Simply check the liferange of all parameters for + * other CALLs. For each param with no CALL in its + * liferange, we can store it in an OFS_PARM at + * generation already. This would even include later + * reuse.... probably... :) + */ + size_t p; + ir_value *retvalue; + + for (p = 0; p < instr->params_count; ++p) + { + ir_value *param = instr->params[p]; + + stmt.opcode = INSTR_STORE_F; + stmt.o3.u1 = 0; + + stmt.opcode = type_store_instr[param->vtype]; + stmt.o1.u1 = ir_value_code_addr(param); + stmt.o2.u1 = OFS_PARM0 + 3 * p; + if (code_statements_add(stmt) < 0) + return false; + } + stmt.opcode = INSTR_CALL0 + instr->params_count; + if (stmt.opcode > INSTR_CALL8) + stmt.opcode = INSTR_CALL8; + stmt.o1.u1 = ir_value_code_addr(instr->_ops[1]); + stmt.o2.u1 = 0; + stmt.o3.u1 = 0; + if (code_statements_add(stmt) < 0) + return false; + + retvalue = instr->_ops[0]; + if (retvalue && retvalue->store != store_return && retvalue->life_count) + { + /* not to be kept in OFS_RETURN */ + stmt.opcode = type_store_instr[retvalue->vtype]; + stmt.o1.u1 = OFS_RETURN; + stmt.o2.u1 = ir_value_code_addr(retvalue); + stmt.o3.u1 = 0; + if (code_statements_add(stmt) < 0) + return false; + } + continue; + } + + if (instr->opcode == INSTR_STATE) { + printf("TODO: state instruction\n"); + return false; + } + + stmt.opcode = instr->opcode; + stmt.o1.u1 = 0; + stmt.o2.u1 = 0; + stmt.o3.u1 = 0; + + /* This is the general order of operands */ + if (instr->_ops[0]) + stmt.o3.u1 = ir_value_code_addr(instr->_ops[0]); + + if (instr->_ops[1]) + stmt.o1.u1 = ir_value_code_addr(instr->_ops[1]); + + if (instr->_ops[2]) + stmt.o2.u1 = ir_value_code_addr(instr->_ops[2]); + + if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE) + { + stmt.o1.u1 = stmt.o3.u1; + stmt.o3.u1 = 0; + } + else if (stmt.opcode >= INSTR_STORE_F && + stmt.opcode <= INSTR_STORE_FNC) + { + /* 2-operand instructions with A -> B */ + stmt.o2.u1 = stmt.o3.u1; + stmt.o3.u1 = 0; + } + + if (code_statements_add(stmt) < 0) + return false; + } + return true; +} + +static bool gen_function_code(ir_function *self) +{ + ir_block *block; + prog_section_statement stmt; + + /* Starting from entry point, we generate blocks "as they come" + * for now. Dead blocks will not be translated obviously. + */ + if (!self->blocks_count) { + printf("Function '%s' declared without body.\n", self->name); + return false; + } + + block = self->blocks[0]; + if (block->generated) + return true; + + if (!gen_blocks_recursive(self, block)) { + printf("failed to generate blocks for '%s'\n", self->name); + return false; + } + + /* otherwise code_write crashes since it debug-prints functions until AINSTR_END */ + stmt.opcode = AINSTR_END; + stmt.o1.u1 = 0; + stmt.o2.u1 = 0; + stmt.o3.u1 = 0; + if (code_statements_add(stmt) < 0) + return false; + return true; +} + +static bool gen_global_function(ir_builder *ir, ir_value *global) +{ + prog_section_function fun; + ir_function *irfun; + + size_t i; + size_t local_var_end; + + if (!global->isconst || (!global->constval.vfunc)) + { + printf("Invalid state of function-global: not constant: %s\n", global->name); + return false; + } + + irfun = global->constval.vfunc; + + fun.name = global->code.name; + fun.file = code_cachedstring(global->context.file); + fun.profile = 0; /* always 0 */ + fun.nargs = irfun->params_count; + + for (i = 0;i < 8; ++i) { + if (i >= fun.nargs) + fun.argsize[i] = 0; + else + fun.argsize[i] = type_sizeof[irfun->params[i]]; + } + + fun.firstlocal = code_globals_elements; + fun.locals = irfun->allocated_locals + irfun->locals_count; + + local_var_end = 0; + for (i = 0; i < irfun->locals_count; ++i) { + if (!ir_builder_gen_global(ir, irfun->locals[i])) { + printf("Failed to generate global %s\n", irfun->locals[i]->name); + return false; + } + } + if (irfun->locals_count) { + ir_value *last = irfun->locals[irfun->locals_count-1]; + local_var_end = last->code.globaladdr; + local_var_end += type_sizeof[last->vtype]; + } + for (i = 0; i < irfun->values_count; ++i) + { + /* generate code.globaladdr for ssa values */ + ir_value *v = irfun->values[i]; + v->code.globaladdr = local_var_end + v->code.local; + } + for (i = 0; i < irfun->locals_count; ++i) { + /* fill the locals with zeros */ + code_globals_add(0); + } + + if (irfun->builtin) + fun.entry = irfun->builtin; + else { + fun.entry = code_statements_elements; + if (!gen_function_code(irfun)) { + printf("Failed to generate code for function %s\n", irfun->name); + return false; + } + } + + return (code_functions_add(fun) >= 0); +} + +static bool ir_builder_gen_global(ir_builder *self, ir_value *global) +{ + size_t i; + int32_t *iptr; + prog_section_def def; + + def.type = global->vtype; + def.offset = code_globals_elements; + def.name = global->code.name = code_genstring(global->name); + + switch (global->vtype) + { + case TYPE_POINTER: + if (code_defs_add(def) < 0) + return false; + return gen_global_pointer(global); + case TYPE_FIELD: + if (code_defs_add(def) < 0) + return false; + return gen_global_field(global); + case TYPE_ENTITY: + /* fall through */ + case TYPE_FLOAT: + { + if (code_defs_add(def) < 0) + return false; + + if (global->isconst) { + iptr = (int32_t*)&global->constval.vfloat; + global->code.globaladdr = code_globals_add(*iptr); + } else + global->code.globaladdr = code_globals_add(0); + + return global->code.globaladdr >= 0; + } + case TYPE_STRING: + { + if (code_defs_add(def) < 0) + return false; + if (global->isconst) + global->code.globaladdr = code_globals_add(code_cachedstring(global->constval.vstring)); + else + global->code.globaladdr = code_globals_add(0); + return global->code.globaladdr >= 0; + } + case TYPE_VECTOR: + { + size_t d; + if (code_defs_add(def) < 0) + return false; + + if (global->isconst) { + iptr = (int32_t*)&global->constval.vvec; + global->code.globaladdr = code_globals_add(iptr[0]); + if (global->code.globaladdr < 0) + return false; + for (d = 1; d < type_sizeof[global->vtype]; ++d) + { + if (code_globals_add(iptr[d]) < 0) + return false; + } + } else { + global->code.globaladdr = code_globals_add(0); + if (global->code.globaladdr < 0) + return false; + for (d = 1; d < type_sizeof[global->vtype]; ++d) + { + if (code_globals_add(0) < 0) + return false; + } + } + return global->code.globaladdr >= 0; + } + case TYPE_FUNCTION: + if (code_defs_add(def) < 0) + return false; + global->code.globaladdr = code_globals_elements; + code_globals_add(code_functions_elements); + return gen_global_function(self, global); + case TYPE_VARIANT: + /* assume biggest type */ + global->code.globaladdr = code_globals_add(0); + for (i = 1; i < type_sizeof[TYPE_VARIANT]; ++i) + code_globals_add(0); + return true; + default: + /* refuse to create 'void' type or any other fancy business. */ + printf("Invalid type for global variable %s\n", global->name); + return false; + } +} + +static bool ir_builder_gen_field(ir_builder *self, ir_value *field) +{ + prog_section_def def; + prog_section_field fld; + + def.type = field->vtype; + def.offset = code_globals_elements; + def.name = field->code.name = code_genstring(field->name); + + if (code_defs_add(def) < 0) + return false; + + fld.name = def.name; + fld.offset = code_fields_elements; + fld.type = field->fieldtype; + + if (fld.type == TYPE_VOID) { + printf("field is missing a type: %s - don't know its size\n", field->name); + return false; + } + + if (code_fields_add(fld) < 0) + return false; + + if (!code_globals_add(code_alloc_field(type_sizeof[field->fieldtype]))) + return false; + + field->code.globaladdr = code_globals_add(fld.offset); + return field->code.globaladdr >= 0; +} + +bool ir_builder_generate(ir_builder *self, const char *filename) +{ + size_t i; + + code_init(); + + for (i = 0; i < self->fields_count; ++i) + { + if (!ir_builder_gen_field(self, self->fields[i])) { + return false; + } + } + + for (i = 0; i < self->globals_count; ++i) + { + if (!ir_builder_gen_global(self, self->globals[i])) { + return false; + } + } + + printf("writing '%s'...\n", filename); + return code_write(filename); +} + /*********************************************************************** *IR DEBUG Dump functions... */ @@ -1512,6 +2645,10 @@ void ir_function_dump(ir_function *f, char *ind, int (*oprintf)(const char*, ...)) { size_t i; + if (f->builtin != 0) { + oprintf("%sfunction %s = builtin %i\n", ind, f->name, -f->builtin); + return; + } oprintf("%sfunction %s\n", ind, f->name); strncat(ind, "\t", IND_BUFSZ); if (f->locals_count) @@ -1525,10 +2662,13 @@ void ir_function_dump(ir_function *f, char *ind, } if (f->blocks_count) { - - oprintf("%slife passes: %i\n", ind, (int)f->blocks[0]->run_id); - for (i = 0; i < f->blocks_count; ++i) + oprintf("%slife passes (check): %i\n", ind, (int)f->run_id); + for (i = 0; i < f->blocks_count; ++i) { + if (f->blocks[i]->run_id != f->run_id) { + oprintf("%slife pass check fail! %i != %i\n", ind, (int)f->blocks[i]->run_id, (int)f->run_id); + } ir_block_dump(f->blocks[i], ind, oprintf); + } } ind[strlen(ind)-1] = 0;