X-Git-Url: https://git.xonotic.org/?p=xonotic%2Fgmqcc.git;a=blobdiff_plain;f=ir.c;h=bd8f12fb20f79a1d2a72ec4ec6b6d9aea981fb13;hp=b2bdeb0bcc3b44c0d54e32567dde30316488c382;hb=6024e377ba11dcbcf24577100d95355536e0c9db;hpb=600ecda8602a6fa173c7e0afaebee81f2ab82864 diff --git a/ir.c b/ir.c index b2bdeb0..73cbc9a 100644 --- a/ir.c +++ b/ir.c @@ -1,6 +1,7 @@ /* - * Copyright (C) 2012 + * Copyright (C) 2012, 2013, 2014, 2015 * Wolfgang Bumiller + * Dale Weiler * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in @@ -22,6 +23,7 @@ */ #include #include + #include "gmqcc.h" #include "ir.h" @@ -42,10 +44,13 @@ const char *type_name[TYPE_COUNT] = { "variant", "struct", "union", - "array" + "array", + + "nil", + "" }; -size_t type_sizeof[TYPE_COUNT] = { +static size_t type_sizeof_[TYPE_COUNT] = { 1, /* TYPE_VOID */ 1, /* TYPE_STRING */ 1, /* TYPE_FLOAT */ @@ -59,9 +64,11 @@ size_t type_sizeof[TYPE_COUNT] = { 0, /* TYPE_STRUCT */ 0, /* TYPE_UNION */ 0, /* TYPE_ARRAY */ + 0, /* TYPE_NIL */ + 0, /* TYPE_NOESPR */ }; -uint16_t type_store_instr[TYPE_COUNT] = { +const uint16_t type_store_instr[TYPE_COUNT] = { INSTR_STORE_F, /* should use I when having integer support */ INSTR_STORE_S, INSTR_STORE_F, @@ -78,12 +85,14 @@ uint16_t type_store_instr[TYPE_COUNT] = { INSTR_STORE_V, /* variant, should never be accessed */ - AINSTR_END, /* struct */ - AINSTR_END, /* union */ - AINSTR_END, /* array */ + VINSTR_END, /* struct */ + VINSTR_END, /* union */ + VINSTR_END, /* array */ + VINSTR_END, /* nil */ + VINSTR_END, /* noexpr */ }; -uint16_t field_store_instr[TYPE_COUNT] = { +const uint16_t field_store_instr[TYPE_COUNT] = { INSTR_STORE_FLD, INSTR_STORE_FLD, INSTR_STORE_FLD, @@ -100,12 +109,14 @@ uint16_t field_store_instr[TYPE_COUNT] = { INSTR_STORE_V, /* variant, should never be accessed */ - AINSTR_END, /* struct */ - AINSTR_END, /* union */ - AINSTR_END, /* array */ + VINSTR_END, /* struct */ + VINSTR_END, /* union */ + VINSTR_END, /* array */ + VINSTR_END, /* nil */ + VINSTR_END, /* noexpr */ }; -uint16_t type_storep_instr[TYPE_COUNT] = { +const uint16_t type_storep_instr[TYPE_COUNT] = { INSTR_STOREP_F, /* should use I when having integer support */ INSTR_STOREP_S, INSTR_STOREP_F, @@ -122,12 +133,14 @@ uint16_t type_storep_instr[TYPE_COUNT] = { INSTR_STOREP_V, /* variant, should never be accessed */ - AINSTR_END, /* struct */ - AINSTR_END, /* union */ - AINSTR_END, /* array */ + VINSTR_END, /* struct */ + VINSTR_END, /* union */ + VINSTR_END, /* array */ + VINSTR_END, /* nil */ + VINSTR_END, /* noexpr */ }; -uint16_t type_eq_instr[TYPE_COUNT] = { +const uint16_t type_eq_instr[TYPE_COUNT] = { INSTR_EQ_F, /* should use I when having integer support */ INSTR_EQ_S, INSTR_EQ_F, @@ -144,12 +157,14 @@ uint16_t type_eq_instr[TYPE_COUNT] = { INSTR_EQ_V, /* variant, should never be accessed */ - AINSTR_END, /* struct */ - AINSTR_END, /* union */ - AINSTR_END, /* array */ + VINSTR_END, /* struct */ + VINSTR_END, /* union */ + VINSTR_END, /* array */ + VINSTR_END, /* nil */ + VINSTR_END, /* noexpr */ }; -uint16_t type_ne_instr[TYPE_COUNT] = { +const uint16_t type_ne_instr[TYPE_COUNT] = { INSTR_NE_F, /* should use I when having integer support */ INSTR_NE_S, INSTR_NE_F, @@ -166,14 +181,16 @@ uint16_t type_ne_instr[TYPE_COUNT] = { INSTR_NE_V, /* variant, should never be accessed */ - AINSTR_END, /* struct */ - AINSTR_END, /* union */ - AINSTR_END, /* array */ + VINSTR_END, /* struct */ + VINSTR_END, /* union */ + VINSTR_END, /* array */ + VINSTR_END, /* nil */ + VINSTR_END, /* noexpr */ }; -uint16_t type_not_instr[TYPE_COUNT] = { +const uint16_t type_not_instr[TYPE_COUNT] = { INSTR_NOT_F, /* should use I when having integer support */ - INSTR_NOT_S, + VINSTR_END, /* not to be used, depends on string related -f flags */ INSTR_NOT_F, INSTR_NOT_V, INSTR_NOT_ENT, @@ -188,42 +205,64 @@ uint16_t type_not_instr[TYPE_COUNT] = { INSTR_NOT_V, /* variant, should never be accessed */ - AINSTR_END, /* struct */ - AINSTR_END, /* union */ - AINSTR_END, /* array */ + VINSTR_END, /* struct */ + VINSTR_END, /* union */ + VINSTR_END, /* array */ + VINSTR_END, /* nil */ + VINSTR_END, /* noexpr */ }; -static void irerror(lex_ctx ctx, const char *msg, ...) +/* protos */ +static ir_value* ir_value_var(const char *name, int st, int vtype); +static bool ir_value_set_name(ir_value*, const char *name); +static void ir_value_dump(ir_value*, int (*oprintf)(const char*,...)); + +static ir_value* ir_gen_extparam_proto(ir_builder *ir); +static void ir_gen_extparam (ir_builder *ir); + +static bool ir_builder_set_name(ir_builder *self, const char *name); + +static ir_function* ir_function_new(struct ir_builder_s *owner, int returntype); +static bool ir_function_set_name(ir_function*, const char *name); +static void ir_function_delete(ir_function*); +static void ir_function_dump(ir_function*, char *ind, int (*oprintf)(const char*,...)); + +static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t, const char *label, + int op, ir_value *a, ir_value *b, int outype); +static void ir_block_delete(ir_block*); +static ir_block* ir_block_new(struct ir_function_s *owner, const char *label); +static bool GMQCC_WARN ir_block_create_store(ir_block*, lex_ctx_t, ir_value *target, ir_value *what); +static bool ir_block_set_label(ir_block*, const char *label); +static void ir_block_dump(ir_block*, char *ind, int (*oprintf)(const char*,...)); + +static bool ir_instr_op(ir_instr*, int op, ir_value *value, bool writing); +static void ir_instr_delete(ir_instr*); +static void ir_instr_dump(ir_instr* in, char *ind, int (*oprintf)(const char*,...)); +/* error functions */ + +static void irerror(lex_ctx_t ctx, const char *msg, ...) { va_list ap; va_start(ap, msg); - con_cvprintmsg((void*)&ctx, LVL_ERROR, "internal error", msg, ap); + con_cvprintmsg(ctx, LVL_ERROR, "internal error", msg, ap); va_end(ap); } -static bool irwarning(lex_ctx ctx, int warntype, const char *fmt, ...) +static bool GMQCC_WARN irwarning(lex_ctx_t ctx, int warntype, const char *fmt, ...) { - va_list ap; - int lvl = LVL_WARNING; - - if (warntype && !OPTS_WARN(warntype)) - return false; - - if (opts_werror) - lvl = LVL_ERROR; - - va_start(ap, fmt); - con_vprintmsg(lvl, ctx.file, ctx.line, "warning", fmt, ap); - va_end(ap); - - return opts_werror; + bool r; + va_list ap; + va_start(ap, fmt); + r = vcompile_warning(ctx, warntype, fmt, ap); + va_end(ap); + return r; } /*********************************************************************** * Vector utility functions */ -bool GMQCC_WARN vec_ir_value_find(ir_value **vec, ir_value *what, size_t *idx) +static bool GMQCC_WARN vec_ir_value_find(ir_value **vec, const ir_value *what, size_t *idx) { size_t i; size_t len = vec_size(vec); @@ -236,7 +275,7 @@ bool GMQCC_WARN vec_ir_value_find(ir_value **vec, ir_value *what, size_t *idx) return false; } -bool GMQCC_WARN vec_ir_block_find(ir_block **vec, ir_block *what, size_t *idx) +static bool GMQCC_WARN vec_ir_block_find(ir_block **vec, ir_block *what, size_t *idx) { size_t i; size_t len = vec_size(vec); @@ -249,7 +288,7 @@ bool GMQCC_WARN vec_ir_block_find(ir_block **vec, ir_block *what, size_t *idx) return false; } -bool GMQCC_WARN vec_ir_instr_find(ir_instr **vec, ir_instr *what, size_t *idx) +static bool GMQCC_WARN vec_ir_instr_find(ir_instr **vec, ir_instr *what, size_t *idx) { size_t i; size_t len = vec_size(vec); @@ -273,6 +312,7 @@ static void ir_function_delete_quick(ir_function *self); ir_builder* ir_builder_new(const char *modulename) { ir_builder* self; + size_t i; self = (ir_builder*)mem_a(sizeof(*self)); if (!self) @@ -281,13 +321,20 @@ ir_builder* ir_builder_new(const char *modulename) self->functions = NULL; self->globals = NULL; self->fields = NULL; - self->extparams = NULL; self->filenames = NULL; self->filestrings = NULL; self->htglobals = util_htnew(IR_HT_SIZE); self->htfields = util_htnew(IR_HT_SIZE); self->htfunctions = util_htnew(IR_HT_SIZE); + self->extparams = NULL; + self->extparam_protos = NULL; + + self->first_common_globaltemp = 0; + self->max_globaltemps = 0; + self->first_common_local = 0; + self->max_locals = 0; + self->str_immediate = 0; self->name = NULL; if (!ir_builder_set_name(self, modulename)) { @@ -295,6 +342,23 @@ ir_builder* ir_builder_new(const char *modulename) return NULL; } + self->nil = ir_value_var("nil", store_value, TYPE_NIL); + self->nil->cvq = CV_CONST; + + for (i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) { + /* we write to them, but they're not supposed to be used outside the IR, so + * let's not allow the generation of ir_instrs which use these. + * So it's a constant noexpr. + */ + self->vinstr_temp[i] = ir_value_var("vinstr_temp", store_value, TYPE_NOEXPR); + self->vinstr_temp[i]->cvq = CV_CONST; + } + + self->reserved_va_count = NULL; + self->coverage_func = NULL; + + self->code = code_init(); + return self; } @@ -313,6 +377,7 @@ void ir_builder_delete(ir_builder* self) ir_value_delete(self->extparams[i]); } vec_free(self->extparams); + vec_free(self->extparam_protos); for (i = 0; i != vec_size(self->globals); ++i) { ir_value_delete(self->globals[i]); } @@ -320,9 +385,15 @@ void ir_builder_delete(ir_builder* self) for (i = 0; i != vec_size(self->fields); ++i) { ir_value_delete(self->fields[i]); } + ir_value_delete(self->nil); + for (i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) { + ir_value_delete(self->vinstr_temp[i]); + } vec_free(self->fields); vec_free(self->filenames); vec_free(self->filestrings); + + code_cleanup(self->code); mem_d(self); } @@ -334,7 +405,7 @@ bool ir_builder_set_name(ir_builder *self, const char *name) return !!self->name; } -ir_function* ir_builder_get_function(ir_builder *self, const char *name) +static ir_function* ir_builder_get_function(ir_builder *self, const char *name) { return (ir_function*)util_htget(self->htfunctions, name); } @@ -369,7 +440,7 @@ ir_function* ir_builder_create_function(ir_builder *self, const char *name, int return fn; } -ir_value* ir_builder_get_global(ir_builder *self, const char *name) +static ir_value* ir_builder_get_global(ir_builder *self, const char *name) { return (ir_value*)util_htget(self->htglobals, name); } @@ -378,7 +449,7 @@ ir_value* ir_builder_create_global(ir_builder *self, const char *name, int vtype { ir_value *ve; - if (name && name[0] != '#') + if (name[0] != '#') { ve = ir_builder_get_global(self, name); if (ve) { @@ -392,7 +463,14 @@ ir_value* ir_builder_create_global(ir_builder *self, const char *name, int vtype return ve; } -ir_value* ir_builder_get_field(ir_builder *self, const char *name) +ir_value* ir_builder_get_va_count(ir_builder *self) +{ + if (self->reserved_va_count) + return self->reserved_va_count; + return (self->reserved_va_count = ir_builder_create_global(self, "reserved:va_count", TYPE_FLOAT)); +} + +static ir_value* ir_builder_get_field(ir_builder *self, const char *name) { return (ir_value*)util_htget(self->htfields, name); } @@ -416,10 +494,10 @@ ir_value* ir_builder_create_field(ir_builder *self, const char *name, int vtype) *IR Function */ -bool ir_function_naive_phi(ir_function*); -void ir_function_enumerate(ir_function*); -bool ir_function_calculate_liferanges(ir_function*); -bool ir_function_allocate_locals(ir_function*); +static bool ir_function_naive_phi(ir_function*); +static void ir_function_enumerate(ir_function*); +static bool ir_function_calculate_liferanges(ir_function*); +static bool ir_function_allocate_locals(ir_function*); ir_function* ir_function_new(ir_builder* owner, int outtype) { @@ -436,6 +514,8 @@ ir_function* ir_function_new(ir_builder* owner, int outtype) mem_d(self); return NULL; } + self->flags = 0; + self->owner = owner; self->context.file = "<@no context>"; self->context.line = 0; @@ -448,8 +528,11 @@ ir_function* ir_function_new(ir_builder* owner, int outtype) self->values = NULL; self->locals = NULL; + self->max_varargs = 0; + self->code_function_def = -1; self->allocated_locals = 0; + self->globaltemps = 0; self->run_id = 0; return self; @@ -511,20 +594,152 @@ void ir_function_delete(ir_function *self) mem_d(self); } -void ir_function_collect_value(ir_function *self, ir_value *v) +static void ir_function_collect_value(ir_function *self, ir_value *v) { vec_push(self->values, v); } -ir_block* ir_function_create_block(lex_ctx ctx, ir_function *self, const char *label) +ir_block* ir_function_create_block(lex_ctx_t ctx, ir_function *self, const char *label) { ir_block* bn = ir_block_new(self, label); bn->context = ctx; vec_push(self->blocks, bn); + + if ((self->flags & IR_FLAG_BLOCK_COVERAGE) && self->owner->coverage_func) + (void)ir_block_create_call(bn, ctx, NULL, self->owner->coverage_func, false); + return bn; } -bool ir_function_pass_tailcall(ir_function *self) +static bool instr_is_operation(uint16_t op) +{ + return ( (op >= INSTR_MUL_F && op <= INSTR_GT) || + (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) || + (op == INSTR_ADDRESS) || + (op >= INSTR_NOT_F && op <= INSTR_NOT_FNC) || + (op >= INSTR_AND && op <= INSTR_BITOR) || + (op >= INSTR_CALL0 && op <= INSTR_CALL8) || + (op >= VINSTR_BITAND_V && op <= VINSTR_NEG_V) ); +} + +static bool ir_function_pass_peephole(ir_function *self) +{ + size_t b; + + for (b = 0; b < vec_size(self->blocks); ++b) { + size_t i; + ir_block *block = self->blocks[b]; + + for (i = 0; i < vec_size(block->instr); ++i) { + ir_instr *inst; + inst = block->instr[i]; + + if (i >= 1 && + (inst->opcode >= INSTR_STORE_F && + inst->opcode <= INSTR_STORE_FNC)) + { + ir_instr *store; + ir_instr *oper; + ir_value *value; + + store = inst; + + oper = block->instr[i-1]; + if (!instr_is_operation(oper->opcode)) + continue; + + /* Don't change semantics of MUL_VF in engines where these may not alias. */ + if (OPTS_FLAG(LEGACY_VECTOR_MATHS)) { + if (oper->opcode == INSTR_MUL_VF && oper->_ops[2]->memberof == oper->_ops[1]) + continue; + if (oper->opcode == INSTR_MUL_FV && oper->_ops[1]->memberof == oper->_ops[2]) + continue; + } + + value = oper->_ops[0]; + + /* only do it for SSA values */ + if (value->store != store_value) + continue; + + /* don't optimize out the temp if it's used later again */ + if (vec_size(value->reads) != 1) + continue; + + /* The very next store must use this value */ + if (value->reads[0] != store) + continue; + + /* And of course the store must _read_ from it, so it's in + * OP 1 */ + if (store->_ops[1] != value) + continue; + + ++opts_optimizationcount[OPTIM_PEEPHOLE]; + (void)!ir_instr_op(oper, 0, store->_ops[0], true); + + vec_remove(block->instr, i, 1); + ir_instr_delete(store); + } + else if (inst->opcode == VINSTR_COND) + { + /* COND on a value resulting from a NOT could + * remove the NOT and swap its operands + */ + while (true) { + ir_block *tmp; + size_t inotid; + ir_instr *inot; + ir_value *value; + value = inst->_ops[0]; + + if (value->store != store_value || + vec_size(value->reads) != 1 || + value->reads[0] != inst) + { + break; + } + + inot = value->writes[0]; + if (inot->_ops[0] != value || + inot->opcode < INSTR_NOT_F || + inot->opcode > INSTR_NOT_FNC || + inot->opcode == INSTR_NOT_V || /* can't do these */ + inot->opcode == INSTR_NOT_S) + { + break; + } + + /* count */ + ++opts_optimizationcount[OPTIM_PEEPHOLE]; + /* change operand */ + (void)!ir_instr_op(inst, 0, inot->_ops[1], false); + /* remove NOT */ + tmp = inot->owner; + for (inotid = 0; inotid < vec_size(tmp->instr); ++inotid) { + if (tmp->instr[inotid] == inot) + break; + } + if (inotid >= vec_size(tmp->instr)) { + compile_error(inst->context, "sanity-check failed: failed to find instruction to optimize out"); + return false; + } + vec_remove(tmp->instr, inotid, 1); + ir_instr_delete(inot); + /* swap ontrue/onfalse */ + tmp = inst->bops[0]; + inst->bops[0] = inst->bops[1]; + inst->bops[1] = tmp; + } + continue; + } + } + } + + return true; +} + +static bool ir_function_pass_tailrecursion(ir_function *self) { size_t b, p; @@ -564,7 +779,7 @@ bool ir_function_pass_tailcall(ir_function *self) ret->_ops[0] == store->_ops[0] && store->_ops[1] == call->_ops[0]) { - ++optimization_count[OPTIM_MINOR]; + ++opts_optimizationcount[OPTIM_PEEPHOLE]; call->_ops[0] = store->_ops[0]; vec_remove(block->instr, vec_size(block->instr) - 2, 1); ir_instr_delete(store); @@ -586,7 +801,7 @@ bool ir_function_pass_tailcall(ir_function *self) if (ret->_ops[0] && call->_ops[0] != ret->_ops[0]) continue; - ++optimization_count[OPTIM_TAIL_RECURSION]; + ++opts_optimizationcount[OPTIM_TAIL_RECURSION]; vec_shrinkby(block->instr, 2); block->final = false; /* open it back up */ @@ -594,12 +809,12 @@ bool ir_function_pass_tailcall(ir_function *self) /* emite parameter-stores */ for (p = 0; p < vec_size(call->params); ++p) { /* assert(call->params_count <= self->locals_count); */ - if (!ir_block_create_store(block, self->locals[p], call->params[p])) { + if (!ir_block_create_store(block, call->context, self->locals[p], call->params[p])) { irerror(call->context, "failed to create tailcall store instruction for parameter %i", (int)p); return false; } } - if (!ir_block_create_jump(block, self->blocks[0])) { + if (!ir_block_create_jump(block, call->context, self->blocks[0])) { irerror(call->context, "failed to create tailcall jump"); return false; } @@ -613,18 +828,50 @@ bool ir_function_pass_tailcall(ir_function *self) bool ir_function_finalize(ir_function *self) { + size_t i; + if (self->builtin) return true; + if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) { + if (!ir_function_pass_peephole(self)) { + irerror(self->context, "generic optimization pass broke something in `%s`", self->name); + return false; + } + } + if (OPTS_OPTIMIZATION(OPTIM_TAIL_RECURSION)) { - if (!ir_function_pass_tailcall(self)) { - irerror(self->context, "tailcall optimization pass broke something in `%s`", self->name); + if (!ir_function_pass_tailrecursion(self)) { + irerror(self->context, "tail-recursion optimization pass broke something in `%s`", self->name); return false; } } - if (!ir_function_naive_phi(self)) + if (!ir_function_naive_phi(self)) { + irerror(self->context, "internal error: ir_function_naive_phi failed"); return false; + } + + for (i = 0; i < vec_size(self->locals); ++i) { + ir_value *v = self->locals[i]; + if (v->vtype == TYPE_VECTOR || + (v->vtype == TYPE_FIELD && v->outtype == TYPE_VECTOR)) + { + ir_value_vector_member(v, 0); + ir_value_vector_member(v, 1); + ir_value_vector_member(v, 2); + } + } + for (i = 0; i < vec_size(self->values); ++i) { + ir_value *v = self->values[i]; + if (v->vtype == TYPE_VECTOR || + (v->vtype == TYPE_FIELD && v->outtype == TYPE_VECTOR)) + { + ir_value_vector_member(v, 0); + ir_value_vector_member(v, 1); + ir_value_vector_member(v, 2); + } + } ir_function_enumerate(self); @@ -647,6 +894,8 @@ ir_value* ir_function_create_local(ir_function *self, const char *name, int vtyp } ve = ir_value_var(name, (param ? store_param : store_local), vtype); + if (param) + ve->locked = true; vec_push(self->locals, ve); return ve; } @@ -680,7 +929,6 @@ ir_block* ir_block_new(ir_function* owner, const char *name) self->eid = 0; self->is_return = false; - self->run_id = 0; self->living = NULL; @@ -727,7 +975,7 @@ bool ir_block_set_label(ir_block *self, const char *name) *IR Instructions */ -ir_instr* ir_instr_new(ir_block* owner, int op) +static ir_instr* ir_instr_new(lex_ctx_t ctx, ir_block* owner, int op) { ir_instr *self; self = (ir_instr*)mem_a(sizeof(*self)); @@ -735,8 +983,7 @@ ir_instr* ir_instr_new(ir_block* owner, int op) return NULL; self->owner = owner; - self->context.file = "<@no context>"; - self->context.line = 0; + self->context = ctx; self->opcode = op; self->_ops[0] = NULL; self->_ops[1] = NULL; @@ -760,7 +1007,7 @@ static void ir_instr_delete_quick(ir_instr *self) mem_d(self); } -void ir_instr_delete(ir_instr *self) +static void ir_instr_delete(ir_instr *self) { size_t i; /* The following calls can only delete from @@ -791,8 +1038,13 @@ void ir_instr_delete(ir_instr *self) mem_d(self); } -bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing) +static bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing) { + if (v && v->vtype == TYPE_NOEXPR) { + irerror(self->context, "tried to use a NOEXPR value"); + return false; + } + if (self->_ops[op]) { size_t idx; if (writing && vec_ir_instr_find(self->_ops[op]->writes, self, &idx)) @@ -814,7 +1066,7 @@ bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing) *IR Value */ -void ir_value_code_setaddr(ir_value *self, int32_t gaddr) +static void ir_value_code_setaddr(ir_value *self, int32_t gaddr) { self->code.globaladdr = gaddr; if (self->members[0]) self->members[0]->code.globaladdr = gaddr; @@ -822,7 +1074,7 @@ void ir_value_code_setaddr(ir_value *self, int32_t gaddr) if (self->members[2]) self->members[2]->code.globaladdr = gaddr; } -int32_t ir_value_code_addr(const ir_value *self) +static int32_t ir_value_code_addr(const ir_value *self) { if (self->store == store_return) return OFS_RETURN + self->code.addroffset; @@ -837,6 +1089,7 @@ ir_value* ir_value_var(const char *name, int storetype, int vtype) self->fieldtype = TYPE_VOID; self->outtype = TYPE_VOID; self->store = storetype; + self->flags = 0; self->reads = NULL; self->writes = NULL; @@ -860,12 +1113,32 @@ ir_value* ir_value_var(const char *name, int storetype, int vtype) self->members[2] = NULL; self->memberof = NULL; + self->unique_life = false; + self->locked = false; + self->callparam = false; + self->life = NULL; return self; } +/* helper function */ +static ir_value* ir_builder_imm_float(ir_builder *self, float value, bool add_to_list) { + ir_value *v = ir_value_var("#IMMEDIATE", store_global, TYPE_FLOAT); + v->flags |= IR_FLAG_ERASABLE; + v->hasvalue = true; + v->cvq = CV_CONST; + v->constval.vfloat = value; + + vec_push(self->globals, v); + if (add_to_list) + vec_push(self->const_floats, v); + return v; +} + ir_value* ir_value_vector_member(ir_value *self, unsigned int member) { + char *name; + size_t len; ir_value *m; if (member >= 3) return NULL; @@ -873,9 +1146,22 @@ ir_value* ir_value_vector_member(ir_value *self, unsigned int member) if (self->members[member]) return self->members[member]; + if (self->name) { + len = strlen(self->name); + name = (char*)mem_a(len + 3); + memcpy(name, self->name, len); + name[len+0] = '_'; + name[len+1] = 'x' + member; + name[len+2] = '\0'; + } + else + name = NULL; + if (self->vtype == TYPE_VECTOR) { - m = ir_value_var(self->name, self->store, TYPE_FLOAT); + m = ir_value_var(name, self->store, TYPE_FLOAT); + if (name) + mem_d(name); if (!m) return NULL; m->context = self->context; @@ -887,7 +1173,9 @@ ir_value* ir_value_vector_member(ir_value *self, unsigned int member) { if (self->fieldtype != TYPE_VECTOR) return NULL; - m = ir_value_var(self->name, self->store, TYPE_FIELD); + m = ir_value_var(name, self->store, TYPE_FIELD); + if (name) + mem_d(name); if (!m) return NULL; m->fieldtype = TYPE_FLOAT; @@ -906,7 +1194,14 @@ ir_value* ir_value_vector_member(ir_value *self, unsigned int member) return m; } -ir_value* ir_value_out(ir_function *owner, const char *name, int storetype, int vtype) +static GMQCC_INLINE size_t ir_value_sizeof(const ir_value *self) +{ + if (self->vtype == TYPE_FIELD && self->fieldtype == TYPE_VECTOR) + return type_sizeof_[TYPE_VECTOR]; + return type_sizeof_[self->vtype]; +} + +static ir_value* ir_value_out(ir_function *owner, const char *name, int storetype, int vtype) { ir_value *v = ir_value_var(name, storetype, vtype); if (!v) @@ -925,9 +1220,11 @@ void ir_value_delete(ir_value* self) if (self->vtype == TYPE_STRING) mem_d((void*)self->constval.vstring); } - for (i = 0; i < 3; ++i) { - if (self->members[i]) - ir_value_delete(self->members[i]); + if (!(self->flags & IR_FLAG_SPLIT_VECTOR)) { + for (i = 0; i < 3; ++i) { + if (self->members[i]) + ir_value_delete(self->members[i]); + } } vec_free(self->reads); vec_free(self->writes); @@ -961,7 +1258,7 @@ bool ir_value_set_func(ir_value *self, int f) return true; } -bool ir_value_set_vector(ir_value *self, vector v) +bool ir_value_set_vector(ir_value *self, vec3_t v) { if (self->vtype != TYPE_VECTOR) return false; @@ -979,22 +1276,11 @@ bool ir_value_set_field(ir_value *self, ir_value *fld) return true; } -static char *ir_strdup(const char *str) -{ - if (str && !*str) { - /* actually dup empty strings */ - char *out = mem_a(1); - *out = 0; - return out; - } - return util_strdup(str); -} - bool ir_value_set_string(ir_value *self, const char *str) { if (self->vtype != TYPE_STRING) return false; - self->constval.vstring = ir_strdup(str); + self->constval.vstring = util_strdupe(str); self->hasvalue = true; return true; } @@ -1024,7 +1310,7 @@ bool ir_value_lives(ir_value *self, size_t at) return false; } -bool ir_value_life_insert(ir_value *self, size_t idx, ir_life_entry_t e) +static bool ir_value_life_insert(ir_value *self, size_t idx, ir_life_entry_t e) { size_t k; vec_push(self->life, e); @@ -1034,15 +1320,16 @@ bool ir_value_life_insert(ir_value *self, size_t idx, ir_life_entry_t e) return true; } -bool ir_value_life_merge(ir_value *self, size_t s) +static bool ir_value_life_merge(ir_value *self, size_t s) { size_t i; + const size_t vs = vec_size(self->life); ir_life_entry_t *life = NULL; ir_life_entry_t *before = NULL; ir_life_entry_t new_entry; /* Find the first range >= s */ - for (i = 0; i < vec_size(self->life); ++i) + for (i = 0; i < vs; ++i) { before = life; life = &self->life[i]; @@ -1050,7 +1337,7 @@ bool ir_value_life_merge(ir_value *self, size_t s) break; } /* nothing found? append */ - if (i == vec_size(self->life)) { + if (i == vs) { ir_life_entry_t e; if (life && life->end+1 == s) { @@ -1096,7 +1383,7 @@ bool ir_value_life_merge(ir_value *self, size_t s) return ir_value_life_insert(self, i, new_entry); } -bool ir_value_life_merge_into(ir_value *self, const ir_value *other) +static bool ir_value_life_merge_into(ir_value *self, const ir_value *other) { size_t i, myi; @@ -1170,7 +1457,7 @@ bool ir_value_life_merge_into(ir_value *self, const ir_value *other) return true; } -bool ir_values_overlap(const ir_value *a, const ir_value *b) +static bool ir_values_overlap(const ir_value *a, const ir_value *b) { /* For any life entry in A see if it overlaps with * any life entry in B. @@ -1228,15 +1515,19 @@ bool ir_values_overlap(const ir_value *a, const ir_value *b) *IR main operations */ -bool ir_block_create_store_op(ir_block *self, int op, ir_value *target, ir_value *what) +static bool ir_check_unreachable(ir_block *self) +{ + /* The IR should never have to deal with unreachable code */ + if (!self->final/* || OPTS_FLAG(ALLOW_UNREACHABLE_CODE)*/) + return true; + irerror(self->context, "unreachable statement (%s)", self->label); + return false; +} + +bool ir_block_create_store_op(ir_block *self, lex_ctx_t ctx, int op, ir_value *target, ir_value *what) { ir_instr *in; - if (self->final) { - irerror(self->context, "unreachable statement (%s)", self->label); - return false; - } - in = ir_instr_new(self, op); - if (!in) + if (!ir_check_unreachable(self)) return false; if (target->store == store_value && @@ -1244,20 +1535,45 @@ bool ir_block_create_store_op(ir_block *self, int op, ir_value *target, ir_value { irerror(self->context, "cannot store to an SSA value"); irerror(self->context, "trying to store: %s <- %s", target->name, what->name); - irerror(self->context, "instruction: %s", asm_instr[op].m); + irerror(self->context, "instruction: %s", util_instr_str[op]); return false; } - if (!ir_instr_op(in, 0, target, true) || + in = ir_instr_new(ctx, self, op); + if (!in) + return false; + + if (!ir_instr_op(in, 0, target, (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC)) || !ir_instr_op(in, 1, what, false)) { + ir_instr_delete(in); + return false; + } + vec_push(self->instr, in); + return true; +} + +bool ir_block_create_state_op(ir_block *self, lex_ctx_t ctx, ir_value *frame, ir_value *think) +{ + ir_instr *in; + if (!ir_check_unreachable(self)) + return false; + + in = ir_instr_new(ctx, self, INSTR_STATE); + if (!in) + return false; + + if (!ir_instr_op(in, 0, frame, false) || + !ir_instr_op(in, 1, think, false)) + { + ir_instr_delete(in); return false; } vec_push(self->instr, in); return true; } -bool ir_block_create_store(ir_block *self, ir_value *target, ir_value *what) +static bool ir_block_create_store(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what) { int op = 0; int vtype; @@ -1279,10 +1595,10 @@ bool ir_block_create_store(ir_block *self, ir_value *target, ir_value *what) op = INSTR_STORE_V; } - return ir_block_create_store_op(self, op, target, what); + return ir_block_create_store_op(self, ctx, op, target, what); } -bool ir_block_create_storep(ir_block *self, ir_value *target, ir_value *what) +bool ir_block_create_storep(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what) { int op = 0; int vtype; @@ -1301,40 +1617,40 @@ bool ir_block_create_storep(ir_block *self, ir_value *target, ir_value *what) op = INSTR_STOREP_V; } - return ir_block_create_store_op(self, op, target, what); + return ir_block_create_store_op(self, ctx, op, target, what); } -bool ir_block_create_return(ir_block *self, ir_value *v) +bool ir_block_create_return(ir_block *self, lex_ctx_t ctx, ir_value *v) { ir_instr *in; - if (self->final) { - irerror(self->context, "unreachable statement (%s)", self->label); + if (!ir_check_unreachable(self)) return false; - } + self->final = true; + self->is_return = true; - in = ir_instr_new(self, INSTR_RETURN); + in = ir_instr_new(ctx, self, INSTR_RETURN); if (!in) return false; - if (v && !ir_instr_op(in, 0, v, false)) + if (v && !ir_instr_op(in, 0, v, false)) { + ir_instr_delete(in); return false; + } vec_push(self->instr, in); return true; } -bool ir_block_create_if(ir_block *self, ir_value *v, +bool ir_block_create_if(ir_block *self, lex_ctx_t ctx, ir_value *v, ir_block *ontrue, ir_block *onfalse) { ir_instr *in; - if (self->final) { - irerror(self->context, "unreachable statement (%s)", self->label); + if (!ir_check_unreachable(self)) return false; - } self->final = true; - /*in = ir_instr_new(self, (v->vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/ - in = ir_instr_new(self, VINSTR_COND); + /*in = ir_instr_new(ctx, self, (v->vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/ + in = ir_instr_new(ctx, self, VINSTR_COND); if (!in) return false; @@ -1355,15 +1671,13 @@ bool ir_block_create_if(ir_block *self, ir_value *v, return true; } -bool ir_block_create_jump(ir_block *self, ir_block *to) +bool ir_block_create_jump(ir_block *self, lex_ctx_t ctx, ir_block *to) { ir_instr *in; - if (self->final) { - irerror(self->context, "unreachable statement (%s)", self->label); + if (!ir_check_unreachable(self)) return false; - } self->final = true; - in = ir_instr_new(self, VINSTR_JUMP); + in = ir_instr_new(ctx, self, VINSTR_JUMP); if (!in) return false; @@ -1375,31 +1689,19 @@ bool ir_block_create_jump(ir_block *self, ir_block *to) return true; } -bool ir_block_create_goto(ir_block *self, ir_block *to) +bool ir_block_create_goto(ir_block *self, lex_ctx_t ctx, ir_block *to) { - ir_instr *in; - if (self->final) { - irerror(self->context, "unreachable statement (%s)", self->label); - return false; - } - self->final = true; - in = ir_instr_new(self, INSTR_GOTO); - if (!in) - return false; - - in->bops[0] = to; - vec_push(self->instr, in); - - vec_push(self->exits, to); - vec_push(to->entries, self); - return true; + self->owner->flags |= IR_FLAG_HAS_GOTO; + return ir_block_create_jump(self, ctx, to); } -ir_instr* ir_block_create_phi(ir_block *self, const char *label, int ot) +ir_instr* ir_block_create_phi(ir_block *self, lex_ctx_t ctx, const char *label, int ot) { ir_value *out; ir_instr *in; - in = ir_instr_new(self, VINSTR_PHI); + if (!ir_check_unreachable(self)) + return NULL; + in = ir_instr_new(ctx, self, VINSTR_PHI); if (!in) return NULL; out = ir_value_out(self->owner, label, store_value, ot); @@ -1430,7 +1732,7 @@ void ir_phi_add(ir_instr* self, ir_block *b, ir_value *v) * is doing something wrong. */ irerror(self->context, "Invalid entry block for PHI"); - abort(); + exit(EXIT_FAILURE); } pe.value = v; @@ -1440,13 +1742,19 @@ void ir_phi_add(ir_instr* self, ir_block *b, ir_value *v) } /* call related code */ -ir_instr* ir_block_create_call(ir_block *self, const char *label, ir_value *func) +ir_instr* ir_block_create_call(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *func, bool noreturn) { ir_value *out; ir_instr *in; - in = ir_instr_new(self, INSTR_CALL0); + if (!ir_check_unreachable(self)) + return NULL; + in = ir_instr_new(ctx, self, (noreturn ? VINSTR_NRCALL : INSTR_CALL0)); if (!in) return NULL; + if (noreturn) { + self->final = true; + self->is_return = true; + } out = ir_value_out(self->owner, label, (func->outtype == TYPE_VOID) ? store_return : store_value, func->outtype); if (!out) { ir_instr_delete(in); @@ -1460,6 +1768,15 @@ ir_instr* ir_block_create_call(ir_block *self, const char *label, ir_value *func return NULL; } vec_push(self->instr, in); + /* + if (noreturn) { + if (!ir_block_create_return(self, ctx, NULL)) { + compile_error(ctx, "internal error: failed to generate dummy-return instruction"); + ir_instr_delete(in); + return NULL; + } + } + */ return in; } @@ -1476,7 +1793,7 @@ void ir_call_param(ir_instr* self, ir_value *v) /* binary op related code */ -ir_value* ir_block_create_binop(ir_block *self, +ir_value* ir_block_create_binop(ir_block *self, lex_ctx_t ctx, const char *label, int opcode, ir_value *left, ir_value *right) { @@ -1499,6 +1816,7 @@ ir_value* ir_block_create_binop(ir_block *self, #endif case INSTR_BITAND: case INSTR_BITOR: + case VINSTR_BITXOR: #if 0 case INSTR_SUB_S: /* -- offset of string as float */ case INSTR_MUL_IF: @@ -1535,6 +1853,13 @@ ir_value* ir_block_create_binop(ir_block *self, case INSTR_SUB_V: case INSTR_MUL_VF: case INSTR_MUL_FV: + case VINSTR_BITAND_V: + case VINSTR_BITOR_V: + case VINSTR_BITXOR_V: + case VINSTR_BITAND_VF: + case VINSTR_BITOR_VF: + case VINSTR_BITXOR_VF: + case VINSTR_CROSS: #if 0 case INSTR_DIV_VF: case INSTR_MUL_IV: @@ -1547,14 +1872,33 @@ ir_value* ir_block_create_binop(ir_block *self, ot = TYPE_POINTER; break; #endif + /* + * after the following default case, the value of opcode can never + * be 1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65 + */ default: /* ranges: */ /* boolean operations result in floats */ + + /* + * opcode >= 10 takes true branch opcode is at least 10 + * opcode <= 23 takes false branch opcode is at least 24 + */ if (opcode >= INSTR_EQ_F && opcode <= INSTR_GT) ot = TYPE_FLOAT; + + /* + * At condition "opcode <= 23", the value of "opcode" must be + * at least 24. + * At condition "opcode <= 23", the value of "opcode" cannot be + * equal to any of {1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65}. + * The condition "opcode <= 23" cannot be true. + * + * Thus ot=2 (TYPE_FLOAT) can never be true + */ +#if 0 else if (opcode >= INSTR_LE && opcode <= INSTR_GT) ot = TYPE_FLOAT; -#if 0 else if (opcode >= INSTR_LE_I && opcode <= INSTR_EQ_FI) ot = TYPE_FLOAT; #endif @@ -1565,29 +1909,32 @@ ir_value* ir_block_create_binop(ir_block *self, return NULL; } - return ir_block_create_general_instr(self, label, opcode, left, right, ot); + return ir_block_create_general_instr(self, ctx, label, opcode, left, right, ot); } -ir_value* ir_block_create_unary(ir_block *self, +ir_value* ir_block_create_unary(ir_block *self, lex_ctx_t ctx, const char *label, int opcode, ir_value *operand) { int ot = TYPE_FLOAT; + ir_value *minus_1 = NULL; + if (opcode == VINSTR_NEG_F || opcode == VINSTR_NEG_V) + minus_1 = ir_builder_imm_float(self->owner->owner, -1.0f, false); switch (opcode) { case INSTR_NOT_F: case INSTR_NOT_V: case INSTR_NOT_S: case INSTR_NOT_ENT: - case INSTR_NOT_FNC: -#if 0 - case INSTR_NOT_I: -#endif + case INSTR_NOT_FNC: /* + case INSTR_NOT_I: */ ot = TYPE_FLOAT; break; - /* QC doesn't have other unary operations. We expect extensions to fill - * the above list, otherwise we assume out-type = in-type, eg for an - * unary minus - */ + /* Negation is implemented as -1 * */ + case VINSTR_NEG_F: + return ir_block_create_general_instr(self, ctx, label, INSTR_MUL_F, minus_1, operand, TYPE_FLOAT); + case VINSTR_NEG_V: + return ir_block_create_general_instr(self, ctx, label, INSTR_MUL_FV, minus_1, operand, TYPE_VECTOR); + default: ot = operand->vtype; break; @@ -1598,10 +1945,10 @@ ir_value* ir_block_create_unary(ir_block *self, } /* let's use the general instruction creator and pass NULL for OPB */ - return ir_block_create_general_instr(self, label, opcode, operand, NULL, ot); + return ir_block_create_general_instr(self, ctx, label, opcode, operand, NULL, ot); } -ir_value* ir_block_create_general_instr(ir_block *self, const char *label, +static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t ctx, const char *label, int op, ir_value *a, ir_value *b, int outype) { ir_instr *instr; @@ -1611,7 +1958,7 @@ ir_value* ir_block_create_general_instr(ir_block *self, const char *label, if (!out) return NULL; - instr = ir_instr_new(self, op); + instr = ir_instr_new(ctx, self, op); if (!instr) { ir_value_delete(out); return NULL; @@ -1633,7 +1980,7 @@ on_error: return NULL; } -ir_value* ir_block_create_fieldaddress(ir_block *self, const char *label, ir_value *ent, ir_value *field) +ir_value* ir_block_create_fieldaddress(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field) { ir_value *v; @@ -1644,12 +1991,12 @@ ir_value* ir_block_create_fieldaddress(ir_block *self, const char *label, ir_val if (field->vtype != TYPE_FIELD) return NULL; - v = ir_block_create_general_instr(self, label, INSTR_ADDRESS, ent, field, TYPE_POINTER); + v = ir_block_create_general_instr(self, ctx, label, INSTR_ADDRESS, ent, field, TYPE_POINTER); v->fieldtype = field->fieldtype; return v; } -ir_value* ir_block_create_load_from_ent(ir_block *self, const char *label, ir_value *ent, ir_value *field, int outype) +ir_value* ir_block_create_load_from_ent(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field, int outype) { int op; if (ent->vtype != TYPE_ENTITY) @@ -1676,227 +2023,38 @@ ir_value* ir_block_create_load_from_ent(ir_block *self, const char *label, ir_va return NULL; } - return ir_block_create_general_instr(self, label, op, ent, field, outype); + return ir_block_create_general_instr(self, ctx, label, op, ent, field, outype); } -ir_value* ir_block_create_add(ir_block *self, - const char *label, - ir_value *left, ir_value *right) -{ - int op = 0; - int l = left->vtype; - int r = right->vtype; - if (l == r) { - switch (l) { - default: - irerror(self->context, "invalid type for ir_block_create_add: %s", type_name[l]); - return NULL; - case TYPE_FLOAT: - op = INSTR_ADD_F; - break; -#if 0 - case TYPE_INTEGER: - op = INSTR_ADD_I; - break; -#endif - case TYPE_VECTOR: - op = INSTR_ADD_V; - break; - } - } else { -#if 0 - if ( (l == TYPE_FLOAT && r == TYPE_INTEGER) ) - op = INSTR_ADD_FI; - else if ( (l == TYPE_INTEGER && r == TYPE_FLOAT) ) - op = INSTR_ADD_IF; - else -#endif - { - irerror(self->context, "invalid type for ir_block_create_add: %s", type_name[l]); - return NULL; - } - } - return ir_block_create_binop(self, label, op, left, right); -} +/* PHI resolving breaks the SSA, and must thus be the last + * step before life-range calculation. + */ -ir_value* ir_block_create_sub(ir_block *self, - const char *label, - ir_value *left, ir_value *right) +static bool ir_block_naive_phi(ir_block *self); +bool ir_function_naive_phi(ir_function *self) { - int op = 0; - int l = left->vtype; - int r = right->vtype; - if (l == r) { + size_t i; - switch (l) { - default: - irerror(self->context, "invalid type for ir_block_create_sub: %s", type_name[l]); - return NULL; - case TYPE_FLOAT: - op = INSTR_SUB_F; - break; -#if 0 - case TYPE_INTEGER: - op = INSTR_SUB_I; - break; -#endif - case TYPE_VECTOR: - op = INSTR_SUB_V; - break; - } - } else { -#if 0 - if ( (l == TYPE_FLOAT && r == TYPE_INTEGER) ) - op = INSTR_SUB_FI; - else if ( (l == TYPE_INTEGER && r == TYPE_FLOAT) ) - op = INSTR_SUB_IF; - else -#endif - { - irerror(self->context, "invalid type for ir_block_create_sub: %s", type_name[l]); - return NULL; - } + for (i = 0; i < vec_size(self->blocks); ++i) + { + if (!ir_block_naive_phi(self->blocks[i])) + return false; } - return ir_block_create_binop(self, label, op, left, right); + return true; } -ir_value* ir_block_create_mul(ir_block *self, - const char *label, - ir_value *left, ir_value *right) +static bool ir_block_naive_phi(ir_block *self) { - int op = 0; - int l = left->vtype; - int r = right->vtype; - if (l == r) { - - switch (l) { - default: - irerror(self->context, "invalid type for ir_block_create_mul: %s", type_name[l]); - return NULL; - case TYPE_FLOAT: - op = INSTR_MUL_F; - break; -#if 0 - case TYPE_INTEGER: - op = INSTR_MUL_I; - break; -#endif - case TYPE_VECTOR: - op = INSTR_MUL_V; - break; - } - } else { - if ( (l == TYPE_VECTOR && r == TYPE_FLOAT) ) - op = INSTR_MUL_VF; - else if ( (l == TYPE_FLOAT && r == TYPE_VECTOR) ) - op = INSTR_MUL_FV; -#if 0 - else if ( (l == TYPE_VECTOR && r == TYPE_INTEGER) ) - op = INSTR_MUL_VI; - else if ( (l == TYPE_INTEGER && r == TYPE_VECTOR) ) - op = INSTR_MUL_IV; - else if ( (l == TYPE_FLOAT && r == TYPE_INTEGER) ) - op = INSTR_MUL_FI; - else if ( (l == TYPE_INTEGER && r == TYPE_FLOAT) ) - op = INSTR_MUL_IF; -#endif - else { - irerror(self->context, "invalid type for ir_block_create_mul: %s", type_name[l]); - return NULL; - } - } - return ir_block_create_binop(self, label, op, left, right); -} - -ir_value* ir_block_create_div(ir_block *self, - const char *label, - ir_value *left, ir_value *right) -{ - int op = 0; - int l = left->vtype; - int r = right->vtype; - if (l == r) { - - switch (l) { - default: - irerror(self->context, "invalid type for ir_block_create_div: %s", type_name[l]); - return NULL; - case TYPE_FLOAT: - op = INSTR_DIV_F; - break; -#if 0 - case TYPE_INTEGER: - op = INSTR_DIV_I; - break; -#endif - } - } else { -#if 0 - if ( (l == TYPE_VECTOR && r == TYPE_FLOAT) ) - op = INSTR_DIV_VF; - else if ( (l == TYPE_FLOAT && r == TYPE_INTEGER) ) - op = INSTR_DIV_FI; - else if ( (l == TYPE_INTEGER && r == TYPE_FLOAT) ) - op = INSTR_DIV_IF; - else -#endif - { - irerror(self->context, "invalid type for ir_block_create_div: %s", type_name[l]); - return NULL; - } - } - return ir_block_create_binop(self, label, op, left, right); -} - -/* PHI resolving breaks the SSA, and must thus be the last - * step before life-range calculation. - */ - -static bool ir_block_naive_phi(ir_block *self); -bool ir_function_naive_phi(ir_function *self) -{ - size_t i; - - for (i = 0; i < vec_size(self->blocks); ++i) - { - if (!ir_block_naive_phi(self->blocks[i])) - return false; - } - return true; -} - -#if 0 -static bool ir_naive_phi_emit_store(ir_block *block, size_t iid, ir_value *old, ir_value *what) -{ - ir_instr *instr; - size_t i; - - /* create a store */ - if (!ir_block_create_store(block, old, what)) - return false; - - /* we now move it up */ - instr = vec_last(block->instr); - for (i = vec_size(block->instr)-1; i > iid; --i) - block->instr[i] = block->instr[i-1]; - block->instr[i] = instr; - - return true; -} -#endif - -static bool ir_block_naive_phi(ir_block *self) -{ - size_t i, p; /*, w;*/ - /* FIXME: optionally, create_phi can add the phis - * to a list so we don't need to loop through blocks - * - anyway: "don't optimize YET" - */ - for (i = 0; i < vec_size(self->instr); ++i) - { - ir_instr *instr = self->instr[i]; - if (instr->opcode != VINSTR_PHI) - continue; + size_t i, p; /*, w;*/ + /* FIXME: optionally, create_phi can add the phis + * to a list so we don't need to loop through blocks + * - anyway: "don't optimize YET" + */ + for (i = 0; i < vec_size(self->instr); ++i) + { + ir_instr *instr = self->instr[i]; + if (instr->opcode != VINSTR_PHI) + continue; vec_remove(self->instr, i, 1); --i; /* NOTE: i+1 below */ @@ -1921,64 +2079,12 @@ static bool ir_block_naive_phi(ir_block *self) vec_pop(b->instr); b->final = false; instr->_ops[0]->store = store_global; - if (!ir_block_create_store(b, instr->_ops[0], v)) + if (!ir_block_create_store(b, instr->context, instr->_ops[0], v)) return false; instr->_ops[0]->store = store_value; vec_push(b->instr, prevjump); b->final = true; } - -#if 0 - ir_value *v = instr->phi[p].value; - for (w = 0; w < vec_size(v->writes); ++w) { - ir_value *old; - - if (!v->writes[w]->_ops[0]) - continue; - - /* When the write was to a global, we have to emit a mov */ - old = v->writes[w]->_ops[0]; - - /* The original instruction now writes to the PHI target local */ - if (v->writes[w]->_ops[0] == v) - v->writes[w]->_ops[0] = instr->_ops[0]; - - if (old->store != store_value && old->store != store_local && old->store != store_param) - { - /* If it originally wrote to a global we need to store the value - * there as welli - */ - if (!ir_naive_phi_emit_store(self, i+1, old, v)) - return false; - if (i+1 < vec_size(self->instr)) - instr = self->instr[i+1]; - else - instr = NULL; - /* In case I forget and access instr later, it'll be NULL - * when it's a problem, to make sure we crash, rather than accessing - * invalid data. - */ - } - else - { - /* If it didn't, we can replace all reads by the phi target now. */ - size_t r; - for (r = 0; r < vec_size(old->reads); ++r) - { - size_t op; - ir_instr *ri = old->reads[r]; - for (op = 0; op < vec_size(ri->phi); ++op) { - if (ri->phi[op].value == old) - ri->phi[op].value = v; - } - for (op = 0; op < 3; ++op) { - if (ri->_ops[op] == old) - ri->_ops[op] = v; - } - } - } - } -#endif } ir_instr_delete(instr); } @@ -2018,47 +2124,17 @@ void ir_function_enumerate(ir_function *self) size_t instruction_id = 0; for (i = 0; i < vec_size(self->blocks); ++i) { + /* each block now gets an additional "entry" instruction id + * we can use to avoid point-life issues + */ + self->blocks[i]->entry_id = instruction_id; + ++instruction_id; + self->blocks[i]->eid = i; - self->blocks[i]->run_id = 0; ir_block_enumerate(self->blocks[i], &instruction_id); } } -static bool ir_block_life_propagate(ir_block *b, ir_block *prev, bool *changed); -bool ir_function_calculate_liferanges(ir_function *self) -{ - size_t i; - bool changed; - - do { - self->run_id++; - changed = false; - for (i = 0; i != vec_size(self->blocks); ++i) - { - if (self->blocks[i]->is_return) - { - vec_free(self->blocks[i]->living); - if (!ir_block_life_propagate(self->blocks[i], NULL, &changed)) - return false; - } - } - } while (changed); - if (vec_size(self->blocks)) { - ir_block *block = self->blocks[0]; - for (i = 0; i < vec_size(block->living); ++i) { - ir_value *v = block->living[i]; - if (v->memberof || v->store != store_local) - continue; - if (irwarning(v->context, WARN_USED_UNINITIALIZED, - "variable `%s` may be used uninitialized in this function", v->name)) - { - return false; - } - } - } - return true; -} - /* Local-value allocator * After finishing creating the liferange of all values used in a function * we can allocate their global-positions. @@ -2068,12 +2144,15 @@ typedef struct { ir_value **locals; size_t *sizes; size_t *positions; + bool *unique; } function_allocator; -static bool function_allocator_alloc(function_allocator *alloc, const ir_value *var) +static bool function_allocator_alloc(function_allocator *alloc, ir_value *var) { ir_value *slot; - size_t vsize = type_sizeof[var->vtype]; + size_t vsize = ir_value_sizeof(var); + + var->code.local = vec_size(alloc->locals); slot = ir_value_var("reg", store_global, var->vtype); if (!slot) @@ -2084,6 +2163,7 @@ static bool function_allocator_alloc(function_allocator *alloc, const ir_value * vec_push(alloc->locals, slot); vec_push(alloc->sizes, vsize); + vec_push(alloc->unique, var->unique_life); return true; @@ -2092,27 +2172,94 @@ localerror: return false; } +static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v) +{ + size_t a; + ir_value *slot; + + if (v->unique_life) + return function_allocator_alloc(alloc, v); + + for (a = 0; a < vec_size(alloc->locals); ++a) + { + /* if it's reserved for a unique liferange: skip */ + if (alloc->unique[a]) + continue; + + slot = alloc->locals[a]; + + /* never resize parameters + * will be required later when overlapping temps + locals + */ + if (a < vec_size(self->params) && + alloc->sizes[a] < ir_value_sizeof(v)) + { + continue; + } + + if (ir_values_overlap(v, slot)) + continue; + + if (!ir_value_life_merge_into(slot, v)) + return false; + + /* adjust size for this slot */ + if (alloc->sizes[a] < ir_value_sizeof(v)) + alloc->sizes[a] = ir_value_sizeof(v); + + v->code.local = a; + return true; + } + if (a >= vec_size(alloc->locals)) { + if (!function_allocator_alloc(alloc, v)) + return false; + } + return true; +} + bool ir_function_allocate_locals(ir_function *self) { - size_t i, a; + size_t i; bool retval = true; size_t pos; + bool opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS); - ir_value *slot; - const ir_value *v; + ir_value *v; - function_allocator alloc; + function_allocator lockalloc, globalloc; if (!vec_size(self->locals) && !vec_size(self->values)) return true; - alloc.locals = NULL; - alloc.sizes = NULL; - alloc.positions = NULL; + globalloc.locals = NULL; + globalloc.sizes = NULL; + globalloc.positions = NULL; + globalloc.unique = NULL; + lockalloc.locals = NULL; + lockalloc.sizes = NULL; + lockalloc.positions = NULL; + lockalloc.unique = NULL; for (i = 0; i < vec_size(self->locals); ++i) { - if (!function_allocator_alloc(&alloc, self->locals[i])) + v = self->locals[i]; + if ((self->flags & IR_FLAG_MASK_NO_LOCAL_TEMPS) || !OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) { + v->locked = true; + v->unique_life = true; + } + else if (i >= vec_size(self->params)) + break; + else + v->locked = true; /* lock parameters locals */ + if (!function_allocator_alloc((v->locked || !opt_gt ? &lockalloc : &globalloc), v)) + goto error; + } + for (; i < vec_size(self->locals); ++i) + { + v = self->locals[i]; + if (!vec_size(v->life)) + continue; + if (!ir_function_allocator_assign(self, (v->locked || !opt_gt ? &lockalloc : &globalloc), v)) goto error; } @@ -2124,52 +2271,100 @@ bool ir_function_allocate_locals(ir_function *self) if (!vec_size(v->life)) continue; - for (a = 0; a < vec_size(alloc.locals); ++a) - { - slot = alloc.locals[a]; - - if (ir_values_overlap(v, slot)) + /* CALL optimization: + * If the value is a parameter-temp: 1 write, 1 read from a CALL + * and it's not "locked", write it to the OFS_PARM directly. + */ + if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->locked && !v->unique_life) { + if (vec_size(v->reads) == 1 && vec_size(v->writes) == 1 && + (v->reads[0]->opcode == VINSTR_NRCALL || + (v->reads[0]->opcode >= INSTR_CALL0 && v->reads[0]->opcode <= INSTR_CALL8) + ) + ) + { + size_t param; + ir_instr *call = v->reads[0]; + if (!vec_ir_value_find(call->params, v, ¶m)) { + irerror(call->context, "internal error: unlocked parameter %s not found", v->name); + goto error; + } + ++opts_optimizationcount[OPTIM_CALL_STORES]; + v->callparam = true; + if (param < 8) + ir_value_code_setaddr(v, OFS_PARM0 + 3*param); + else { + size_t nprotos = vec_size(self->owner->extparam_protos); + ir_value *ep; + param -= 8; + if (nprotos > param) + ep = self->owner->extparam_protos[param]; + else + { + ep = ir_gen_extparam_proto(self->owner); + while (++nprotos <= param) + ep = ir_gen_extparam_proto(self->owner); + } + ir_instr_op(v->writes[0], 0, ep, true); + call->params[param+8] = ep; + } continue; - - if (!ir_value_life_merge_into(slot, v)) - goto error; - - /* adjust size for this slot */ - if (alloc.sizes[a] < type_sizeof[v->vtype]) - alloc.sizes[a] = type_sizeof[v->vtype]; - - self->values[i]->code.local = a; - break; - } - if (a >= vec_size(alloc.locals)) { - self->values[i]->code.local = vec_size(alloc.locals); - if (!function_allocator_alloc(&alloc, v)) - goto error; + } + if (vec_size(v->writes) == 1 && v->writes[0]->opcode == INSTR_CALL0) + { + v->store = store_return; + if (v->members[0]) v->members[0]->store = store_return; + if (v->members[1]) v->members[1]->store = store_return; + if (v->members[2]) v->members[2]->store = store_return; + ++opts_optimizationcount[OPTIM_CALL_STORES]; + continue; + } } + + if (!ir_function_allocator_assign(self, (v->locked || !opt_gt ? &lockalloc : &globalloc), v)) + goto error; } - if (!alloc.sizes) { + if (!lockalloc.sizes && !globalloc.sizes) { goto cleanup; } + vec_push(lockalloc.positions, 0); + vec_push(globalloc.positions, 0); /* Adjust slot positions based on sizes */ - vec_push(alloc.positions, 0); - - if (vec_size(alloc.sizes)) - pos = alloc.positions[0] + alloc.sizes[0]; - else - pos = 0; - for (i = 1; i < vec_size(alloc.sizes); ++i) - { - pos = alloc.positions[i-1] + alloc.sizes[i-1]; - vec_push(alloc.positions, pos); + if (lockalloc.sizes) { + pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0); + for (i = 1; i < vec_size(lockalloc.sizes); ++i) + { + pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1]; + vec_push(lockalloc.positions, pos); + } + self->allocated_locals = pos + vec_last(lockalloc.sizes); + } + if (globalloc.sizes) { + pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0); + for (i = 1; i < vec_size(globalloc.sizes); ++i) + { + pos = globalloc.positions[i-1] + globalloc.sizes[i-1]; + vec_push(globalloc.positions, pos); + } + self->globaltemps = pos + vec_last(globalloc.sizes); } - self->allocated_locals = pos + vec_last(alloc.sizes); - - /* Take over the actual slot positions */ + /* Locals need to know their new position */ + for (i = 0; i < vec_size(self->locals); ++i) { + v = self->locals[i]; + if (v->locked || !opt_gt) + v->code.local = lockalloc.positions[v->code.local]; + else + v->code.local = globalloc.positions[v->code.local]; + } + /* Take over the actual slot positions on values */ for (i = 0; i < vec_size(self->values); ++i) { - self->values[i]->code.local = alloc.positions[self->values[i]->code.local]; + v = self->values[i]; + if (v->locked || !opt_gt) + v->code.local = lockalloc.positions[v->code.local]; + else + v->code.local = globalloc.positions[v->code.local]; } goto cleanup; @@ -2177,11 +2372,18 @@ bool ir_function_allocate_locals(ir_function *self) error: retval = false; cleanup: - for (i = 0; i < vec_size(alloc.locals); ++i) - ir_value_delete(alloc.locals[i]); - vec_free(alloc.locals); - vec_free(alloc.sizes); - vec_free(alloc.positions); + for (i = 0; i < vec_size(lockalloc.locals); ++i) + ir_value_delete(lockalloc.locals[i]); + for (i = 0; i < vec_size(globalloc.locals); ++i) + ir_value_delete(globalloc.locals[i]); + vec_free(globalloc.unique); + vec_free(globalloc.locals); + vec_free(globalloc.sizes); + vec_free(globalloc.positions); + vec_free(lockalloc.unique); + vec_free(lockalloc.locals); + vec_free(lockalloc.sizes); + vec_free(lockalloc.positions); return retval; } @@ -2226,71 +2428,53 @@ static void ir_op_read_write(int op, size_t *read, size_t *write) static bool ir_block_living_add_instr(ir_block *self, size_t eid) { - size_t i; - bool changed = false; - bool tempbool; - for (i = 0; i != vec_size(self->living); ++i) + size_t i; + const size_t vs = vec_size(self->living); + bool changed = false; + for (i = 0; i != vs; ++i) { - tempbool = ir_value_life_merge(self->living[i], eid); - /* debug - if (tempbool) - irerror(self->context, "block_living_add_instr() value instruction added %s: %i", self->living[i]->_name, (int)eid); - */ - changed = changed || tempbool; + if (ir_value_life_merge(self->living[i], eid)) + changed = true; } return changed; } -static bool ir_block_life_prop_previous(ir_block* self, ir_block *prev, bool *changed) +static bool ir_block_living_lock(ir_block *self) { size_t i; - - (void)changed; - - /* values which have been read in a previous iteration are now - * in the "living" array even if the previous block doesn't use them. - * So we have to remove whatever does not exist in the previous block. - * They will be re-added on-read, but the liferange merge won't cause - * a change. - */ - for (i = 0; i < vec_size(self->living); ++i) + bool changed = false; + for (i = 0; i != vec_size(self->living); ++i) { - if (!vec_ir_value_find(prev->living, self->living[i], NULL)) { - vec_remove(self->living, i, 1); - --i; + if (!self->living[i]->locked) { + self->living[i]->locked = true; + changed = true; } } - - /* Whatever the previous block still has in its living set - * must now be added to ours as well. - */ - for (i = 0; i < vec_size(prev->living); ++i) - { - if (vec_ir_value_find(self->living, prev->living[i], NULL)) - continue; - vec_push(self->living, prev->living[i]); - /* - irerror(self->contextt from prev: %s", self->label, prev->living[i]->_name); - */ - } - return true; + return changed; } -static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *changed) +static bool ir_block_life_propagate(ir_block *self, bool *changed) { ir_instr *instr; ir_value *value; - bool tempbool; - size_t i, o, p; + size_t i, o, p, mem, cnt; /* bitmasks which operands are read from or written to */ size_t read, write; - char dbg_ind[16] = { '#', '0' }; + char dbg_ind[16]; + dbg_ind[0] = '#'; + dbg_ind[1] = '0'; (void)dbg_ind; - if (prev) - { - if (!ir_block_life_prop_previous(self, prev, changed)) - return false; + vec_free(self->living); + + p = vec_size(self->exits); + for (i = 0; i < p; ++i) { + ir_block *prev = self->exits[i]; + cnt = vec_size(prev->living); + for (o = 0; o < cnt; ++o) { + if (!vec_ir_value_find(self->living, prev->living[o], NULL)) + vec_push(self->living, prev->living[o]); + } } i = vec_size(self->instr); @@ -2298,51 +2482,18 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change { --i; instr = self->instr[i]; - /* PHI operands are always read operands */ - for (p = 0; p < vec_size(instr->phi); ++p) - { - value = instr->phi[p].value; - if (value->memberof) - value = value->memberof; - if (!vec_ir_value_find(self->living, value, NULL)) - vec_push(self->living, value); - } - - /* call params are read operands too */ - for (p = 0; p < vec_size(instr->params); ++p) - { - value = instr->params[p]; - if (value->memberof) - value = value->memberof; - if (!vec_ir_value_find(self->living, value, NULL)) - vec_push(self->living, value); - } - /* See which operands are read and write operands */ ir_op_read_write(instr->opcode, &read, &write); - if (instr->opcode == INSTR_MUL_VF) - { - /* the float source will get an additional lifetime */ - tempbool = ir_value_life_merge(instr->_ops[2], instr->eid+1); - *changed = *changed || tempbool; - } - else if (instr->opcode == INSTR_MUL_FV) - { - /* the float source will get an additional lifetime */ - tempbool = ir_value_life_merge(instr->_ops[1], instr->eid+1); - *changed = *changed || tempbool; - } - - /* Go through the 3 main operands */ + /* Go through the 3 main operands + * writes first, then reads + */ for (o = 0; o < 3; ++o) { if (!instr->_ops[o]) /* no such operand */ continue; value = instr->_ops[o]; - if (value->memberof) - value = value->memberof; /* We only care about locals */ /* we also calculate parameter liferanges so that locals @@ -2352,13 +2503,6 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change value->store != store_param) continue; - /* read operands */ - if (read & (1<living, value, NULL)) - vec_push(self->living, value); - } - /* write operands */ /* When we write to a local, we consider it "dead" for the * remaining upper part of the function, since in SSA a value @@ -2377,51 +2521,226 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change * and make sure it's only printed once * since this function is run multiple times. */ - /* For now: debug info: */ /* con_err( "Value only written %s\n", value->name); */ - tempbool = ir_value_life_merge(value, instr->eid); - *changed = *changed || tempbool; - /* - ir_instr_dump(instr, dbg_ind, printf); - abort(); - */ + if (ir_value_life_merge(value, instr->eid)) + *changed = true; } else { /* since 'living' won't contain it * anymore, merge the value, since * (A) doesn't. */ - tempbool = ir_value_life_merge(value, instr->eid); - /* - if (tempbool) - con_err( "value added id %s %i\n", value->name, (int)instr->eid); - */ - *changed = *changed || tempbool; + if (ir_value_life_merge(value, instr->eid)) + *changed = true; /* Then remove */ vec_remove(self->living, idx, 1); } + /* Removing a vector removes all members */ + for (mem = 0; mem < 3; ++mem) { + if (value->members[mem] && vec_ir_value_find(self->living, value->members[mem], &idx)) { + if (ir_value_life_merge(value->members[mem], instr->eid)) + *changed = true; + vec_remove(self->living, idx, 1); + } + } + /* Removing the last member removes the vector */ + if (value->memberof) { + value = value->memberof; + for (mem = 0; mem < 3; ++mem) { + if (value->members[mem] && vec_ir_value_find(self->living, value->members[mem], NULL)) + break; + } + if (mem == 3 && vec_ir_value_find(self->living, value, &idx)) { + if (ir_value_life_merge(value, instr->eid)) + *changed = true; + vec_remove(self->living, idx, 1); + } + } } } - /* (A) */ - tempbool = ir_block_living_add_instr(self, instr->eid); - /*con_err( "living added values\n");*/ - *changed = *changed || tempbool; - } + /* These operations need a special case as they can break when using + * same source and destination operand otherwise, as the engine may + * read the source multiple times. */ + if (instr->opcode == INSTR_MUL_VF || + instr->opcode == VINSTR_BITAND_VF || + instr->opcode == VINSTR_BITOR_VF || + instr->opcode == VINSTR_BITXOR || + instr->opcode == VINSTR_BITXOR_VF || + instr->opcode == VINSTR_BITXOR_V || + instr->opcode == VINSTR_CROSS) + { + value = instr->_ops[2]; + /* the float source will get an additional lifetime */ + if (ir_value_life_merge(value, instr->eid+1)) + *changed = true; + if (value->memberof && ir_value_life_merge(value->memberof, instr->eid+1)) + *changed = true; + } - if (self->run_id == self->owner->run_id) - return true; + if (instr->opcode == INSTR_MUL_FV || + instr->opcode == INSTR_LOAD_V || + instr->opcode == VINSTR_BITXOR || + instr->opcode == VINSTR_BITXOR_VF || + instr->opcode == VINSTR_BITXOR_V || + instr->opcode == VINSTR_CROSS) + { + value = instr->_ops[1]; + /* the float source will get an additional lifetime */ + if (ir_value_life_merge(value, instr->eid+1)) + *changed = true; + if (value->memberof && ir_value_life_merge(value->memberof, instr->eid+1)) + *changed = true; + } - self->run_id = self->owner->run_id; + for (o = 0; o < 3; ++o) + { + if (!instr->_ops[o]) /* no such operand */ + continue; - for (i = 0; i < vec_size(self->entries); ++i) - { - ir_block *entry = self->entries[i]; - ir_block_life_propagate(entry, self, changed); + value = instr->_ops[o]; + + /* We only care about locals */ + /* we also calculate parameter liferanges so that locals + * can take up parameter slots */ + if (value->store != store_value && + value->store != store_local && + value->store != store_param) + continue; + + /* read operands */ + if (read & (1<living, value, NULL)) + vec_push(self->living, value); + /* reading adds the full vector */ + if (value->memberof && !vec_ir_value_find(self->living, value->memberof, NULL)) + vec_push(self->living, value->memberof); + for (mem = 0; mem < 3; ++mem) { + if (value->members[mem] && !vec_ir_value_find(self->living, value->members[mem], NULL)) + vec_push(self->living, value->members[mem]); + } + } + } + /* PHI operands are always read operands */ + for (p = 0; p < vec_size(instr->phi); ++p) + { + value = instr->phi[p].value; + if (!vec_ir_value_find(self->living, value, NULL)) + vec_push(self->living, value); + /* reading adds the full vector */ + if (value->memberof && !vec_ir_value_find(self->living, value->memberof, NULL)) + vec_push(self->living, value->memberof); + for (mem = 0; mem < 3; ++mem) { + if (value->members[mem] && !vec_ir_value_find(self->living, value->members[mem], NULL)) + vec_push(self->living, value->members[mem]); + } + } + + /* on a call, all these values must be "locked" */ + if (instr->opcode >= INSTR_CALL0 && instr->opcode <= INSTR_CALL8) { + if (ir_block_living_lock(self)) + *changed = true; + } + /* call params are read operands too */ + for (p = 0; p < vec_size(instr->params); ++p) + { + value = instr->params[p]; + if (!vec_ir_value_find(self->living, value, NULL)) + vec_push(self->living, value); + /* reading adds the full vector */ + if (value->memberof && !vec_ir_value_find(self->living, value->memberof, NULL)) + vec_push(self->living, value->memberof); + for (mem = 0; mem < 3; ++mem) { + if (value->members[mem] && !vec_ir_value_find(self->living, value->members[mem], NULL)) + vec_push(self->living, value->members[mem]); + } + } + + /* (A) */ + if (ir_block_living_add_instr(self, instr->eid)) + *changed = true; } + /* the "entry" instruction ID */ + if (ir_block_living_add_instr(self, self->entry_id)) + *changed = true; return true; } +bool ir_function_calculate_liferanges(ir_function *self) +{ + size_t i, s; + bool changed; + + /* parameters live at 0 */ + for (i = 0; i < vec_size(self->params); ++i) + if (!ir_value_life_merge(self->locals[i], 0)) + compile_error(self->context, "internal error: failed value-life merging"); + + do { + self->run_id++; + changed = false; + i = vec_size(self->blocks); + while (i--) { + ir_block_life_propagate(self->blocks[i], &changed); + } + } while (changed); + + if (vec_size(self->blocks)) { + ir_block *block = self->blocks[0]; + for (i = 0; i < vec_size(block->living); ++i) { + ir_value *v = block->living[i]; + if (v->store != store_local) + continue; + if (v->vtype == TYPE_VECTOR) + continue; + self->flags |= IR_FLAG_HAS_UNINITIALIZED; + /* find the instruction reading from it */ + for (s = 0; s < vec_size(v->reads); ++s) { + if (v->reads[s]->eid == v->life[0].end) + break; + } + if (s < vec_size(v->reads)) { + if (irwarning(v->context, WARN_USED_UNINITIALIZED, + "variable `%s` may be used uninitialized in this function\n" + " -> %s:%i", + v->name, + v->reads[s]->context.file, v->reads[s]->context.line) + ) + { + return false; + } + continue; + } + if (v->memberof) { + ir_value *vec = v->memberof; + for (s = 0; s < vec_size(vec->reads); ++s) { + if (vec->reads[s]->eid == v->life[0].end) + break; + } + if (s < vec_size(vec->reads)) { + if (irwarning(v->context, WARN_USED_UNINITIALIZED, + "variable `%s` may be used uninitialized in this function\n" + " -> %s:%i", + v->name, + vec->reads[s]->context.file, vec->reads[s]->context.line) + ) + { + return false; + } + continue; + } + } + if (irwarning(v->context, WARN_USED_UNINITIALIZED, + "variable `%s` may be used uninitialized in this function", v->name)) + { + return false; + } + } + } + return true; +} + /*********************************************************************** *IR Code-Generation * @@ -2439,7 +2758,7 @@ static bool ir_block_life_propagate(ir_block *self, ir_block *prev, bool *change */ static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool islocal); -static bool gen_global_field(ir_value *global) +static bool gen_global_field(code_t *code, ir_value *global) { if (global->hasvalue) { @@ -2449,34 +2768,21 @@ static bool gen_global_field(ir_value *global) return false; } - /* Now, in this case, a relocation would be impossible to code - * since it looks like this: - * .vector v = origin; <- parse error, wtf is 'origin'? - * .vector origin; - * - * But we will need a general relocation support later anyway - * for functions... might as well support that here. - */ - if (!fld->code.globaladdr) { - irerror(global->context, "FIXME: Relocation support"); - return false; - } - /* copy the field's value */ - ir_value_code_setaddr(global, vec_size(code_globals)); - vec_push(code_globals, code_globals[fld->code.globaladdr]); + ir_value_code_setaddr(global, vec_size(code->globals)); + vec_push(code->globals, fld->code.fieldaddr); if (global->fieldtype == TYPE_VECTOR) { - vec_push(code_globals, code_globals[fld->code.globaladdr]+1); - vec_push(code_globals, code_globals[fld->code.globaladdr]+2); + vec_push(code->globals, fld->code.fieldaddr+1); + vec_push(code->globals, fld->code.fieldaddr+2); } } else { - ir_value_code_setaddr(global, vec_size(code_globals)); - vec_push(code_globals, 0); + ir_value_code_setaddr(global, vec_size(code->globals)); + vec_push(code->globals, 0); if (global->fieldtype == TYPE_VECTOR) { - vec_push(code_globals, 0); - vec_push(code_globals, 0); + vec_push(code->globals, 0); + vec_push(code->globals, 0); } } if (global->code.globaladdr < 0) @@ -2484,7 +2790,7 @@ static bool gen_global_field(ir_value *global) return true; } -static bool gen_global_pointer(ir_value *global) +static bool gen_global_pointer(code_t *code, ir_value *global) { if (global->hasvalue) { @@ -2510,32 +2816,32 @@ static bool gen_global_pointer(ir_value *global) return false; } - ir_value_code_setaddr(global, vec_size(code_globals)); - vec_push(code_globals, target->code.globaladdr); + ir_value_code_setaddr(global, vec_size(code->globals)); + vec_push(code->globals, target->code.globaladdr); } else { - ir_value_code_setaddr(global, vec_size(code_globals)); - vec_push(code_globals, 0); + ir_value_code_setaddr(global, vec_size(code->globals)); + vec_push(code->globals, 0); } if (global->code.globaladdr < 0) return false; return true; } -static bool gen_blocks_recursive(ir_function *func, ir_block *block) +static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *block) { - prog_section_statement stmt; + prog_section_statement_t stmt; ir_instr *instr; ir_block *target; ir_block *ontrue; ir_block *onfalse; size_t stidx; size_t i; + int j; -tailcall: block->generated = true; - block->code_start = vec_size(code_statements); + block->code_start = vec_size(code->statements); for (i = 0; i < vec_size(block->instr); ++i) { instr = block->instr[i]; @@ -2545,25 +2851,185 @@ tailcall: return false; } - if (instr->opcode == VINSTR_JUMP) { - target = instr->bops[0]; - /* for uncoditional jumps, if the target hasn't been generated - * yet, we generate them right here. - */ - if (!target->generated) { - block = target; - goto tailcall; + if (instr->opcode == VINSTR_JUMP) { + target = instr->bops[0]; + /* for uncoditional jumps, if the target hasn't been generated + * yet, we generate them right here. + */ + if (!target->generated) + return gen_blocks_recursive(code, func, target); + + /* otherwise we generate a jump instruction */ + stmt.opcode = INSTR_GOTO; + stmt.o1.s1 = (target->code_start) - vec_size(code->statements); + stmt.o2.s1 = 0; + stmt.o3.s1 = 0; + if (stmt.o1.s1 != 1) + code_push_statement(code, &stmt, instr->context); + + /* no further instructions can be in this block */ + return true; + } + + if (instr->opcode == VINSTR_BITXOR) { + stmt.opcode = INSTR_BITOR; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]); + stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]); + stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]); + code_push_statement(code, &stmt, instr->context); + stmt.opcode = INSTR_BITAND; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]); + stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]); + stmt.o3.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]); + code_push_statement(code, &stmt, instr->context); + stmt.opcode = INSTR_SUB_F; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[0]); + stmt.o2.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]); + stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]); + code_push_statement(code, &stmt, instr->context); + + /* instruction generated */ + continue; + } + + if (instr->opcode == VINSTR_BITAND_V) { + stmt.opcode = INSTR_BITAND; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]); + stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]); + stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]); + code_push_statement(code, &stmt, instr->context); + ++stmt.o1.s1; + ++stmt.o2.s1; + ++stmt.o3.s1; + code_push_statement(code, &stmt, instr->context); + ++stmt.o1.s1; + ++stmt.o2.s1; + ++stmt.o3.s1; + code_push_statement(code, &stmt, instr->context); + + /* instruction generated */ + continue; + } + + if (instr->opcode == VINSTR_BITOR_V) { + stmt.opcode = INSTR_BITOR; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]); + stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]); + stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]); + code_push_statement(code, &stmt, instr->context); + ++stmt.o1.s1; + ++stmt.o2.s1; + ++stmt.o3.s1; + code_push_statement(code, &stmt, instr->context); + ++stmt.o1.s1; + ++stmt.o2.s1; + ++stmt.o3.s1; + code_push_statement(code, &stmt, instr->context); + + /* instruction generated */ + continue; + } + + if (instr->opcode == VINSTR_BITXOR_V) { + for (j = 0; j < 3; ++j) { + stmt.opcode = INSTR_BITOR; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]) + j; + stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]) + j; + stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]) + j; + code_push_statement(code, &stmt, instr->context); + stmt.opcode = INSTR_BITAND; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]) + j; + stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]) + j; + stmt.o3.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]) + j; + code_push_statement(code, &stmt, instr->context); + } + stmt.opcode = INSTR_SUB_V; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[0]); + stmt.o2.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]); + stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]); + code_push_statement(code, &stmt, instr->context); + + /* instruction generated */ + continue; + } + + if (instr->opcode == VINSTR_BITAND_VF) { + stmt.opcode = INSTR_BITAND; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]); + stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]); + stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]); + code_push_statement(code, &stmt, instr->context); + ++stmt.o1.s1; + ++stmt.o3.s1; + code_push_statement(code, &stmt, instr->context); + ++stmt.o1.s1; + ++stmt.o3.s1; + code_push_statement(code, &stmt, instr->context); + + /* instruction generated */ + continue; + } + + if (instr->opcode == VINSTR_BITOR_VF) { + stmt.opcode = INSTR_BITOR; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]); + stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]); + stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]); + code_push_statement(code, &stmt, instr->context); + ++stmt.o1.s1; + ++stmt.o3.s1; + code_push_statement(code, &stmt, instr->context); + ++stmt.o1.s1; + ++stmt.o3.s1; + code_push_statement(code, &stmt, instr->context); + + /* instruction generated */ + continue; + } + + if (instr->opcode == VINSTR_BITXOR_VF) { + for (j = 0; j < 3; ++j) { + stmt.opcode = INSTR_BITOR; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]) + j; + stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]); + stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]) + j; + code_push_statement(code, &stmt, instr->context); + stmt.opcode = INSTR_BITAND; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]) + j; + stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]); + stmt.o3.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]) + j; + code_push_statement(code, &stmt, instr->context); } + stmt.opcode = INSTR_SUB_V; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[0]); + stmt.o2.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]); + stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]); + code_push_statement(code, &stmt, instr->context); - /* otherwise we generate a jump instruction */ - stmt.opcode = INSTR_GOTO; - stmt.o1.s1 = (target->code_start) - vec_size(code_statements); - stmt.o2.s1 = 0; - stmt.o3.s1 = 0; - vec_push(code_statements, stmt); + /* instruction generated */ + continue; + } - /* no further instructions can be in this block */ - return true; + if (instr->opcode == VINSTR_CROSS) { + stmt.opcode = INSTR_MUL_F; + for (j = 0; j < 3; ++j) { + stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]) + (j + 1) % 3; + stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]) + (j + 2) % 3; + stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]) + j; + code_push_statement(code, &stmt, instr->context); + stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]) + (j + 2) % 3; + stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]) + (j + 1) % 3; + stmt.o3.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]) + j; + code_push_statement(code, &stmt, instr->context); + } + stmt.opcode = INSTR_SUB_V; + stmt.o1.s1 = ir_value_code_addr(instr->_ops[0]); + stmt.o2.s1 = ir_value_code_addr(func->owner->vinstr_temp[0]); + stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]); + code_push_statement(code, &stmt, instr->context); + + /* instruction generated */ + continue; } if (instr->opcode == VINSTR_COND) { @@ -2579,25 +3045,23 @@ tailcall: if (ontrue->generated) { stmt.opcode = INSTR_IF; - stmt.o2.s1 = (ontrue->code_start) - vec_size(code_statements); - vec_push(code_statements, stmt); + stmt.o2.s1 = (ontrue->code_start) - vec_size(code->statements); + if (stmt.o2.s1 != 1) + code_push_statement(code, &stmt, instr->context); } if (onfalse->generated) { stmt.opcode = INSTR_IFNOT; - stmt.o2.s1 = (onfalse->code_start) - vec_size(code_statements); - vec_push(code_statements, stmt); + stmt.o2.s1 = (onfalse->code_start) - vec_size(code->statements); + if (stmt.o2.s1 != 1) + code_push_statement(code, &stmt, instr->context); } if (!ontrue->generated) { - if (onfalse->generated) { - block = ontrue; - goto tailcall; - } + if (onfalse->generated) + return gen_blocks_recursive(code, func, ontrue); } if (!onfalse->generated) { - if (ontrue->generated) { - block = onfalse; - goto tailcall; - } + if (ontrue->generated) + return gen_blocks_recursive(code, func, onfalse); } /* neither ontrue nor onfalse exist */ stmt.opcode = INSTR_IFNOT; @@ -2608,18 +3072,24 @@ tailcall: onfalse = ontrue; ontrue = tmp; } - stidx = vec_size(code_statements); - vec_push(code_statements, stmt); + stidx = vec_size(code->statements); + code_push_statement(code, &stmt, instr->context); /* on false we jump, so add ontrue-path */ - if (!gen_blocks_recursive(func, ontrue)) + if (!gen_blocks_recursive(code, func, ontrue)) return false; /* fixup the jump address */ - code_statements[stidx].o2.s1 = vec_size(code_statements) - stidx; + code->statements[stidx].o2.s1 = vec_size(code->statements) - stidx; /* generate onfalse path */ if (onfalse->generated) { /* fixup the jump address */ - code_statements[stidx].o2.s1 = (onfalse->code_start) - (stidx); - stmt.opcode = vec_last(code_statements).opcode; + code->statements[stidx].o2.s1 = (onfalse->code_start) - (stidx); + if (stidx+2 == vec_size(code->statements) && code->statements[stidx].o2.s1 == 1) { + code->statements[stidx] = code->statements[stidx+1]; + if (code->statements[stidx].o1.s1 < 0) + code->statements[stidx].o1.s1++; + code_pop_statement(code); + } + stmt.opcode = vec_last(code->statements).opcode; if (stmt.opcode == INSTR_GOTO || stmt.opcode == INSTR_IF || stmt.opcode == INSTR_IFNOT || @@ -2631,31 +3101,26 @@ tailcall: } /* may have been generated in the previous recursive call */ stmt.opcode = INSTR_GOTO; - stmt.o1.s1 = (onfalse->code_start) - vec_size(code_statements); + stmt.o1.s1 = (onfalse->code_start) - vec_size(code->statements); stmt.o2.s1 = 0; stmt.o3.s1 = 0; - vec_push(code_statements, stmt); + if (stmt.o1.s1 != 1) + code_push_statement(code, &stmt, instr->context); return true; } + else if (stidx+2 == vec_size(code->statements) && code->statements[stidx].o2.s1 == 1) { + code->statements[stidx] = code->statements[stidx+1]; + if (code->statements[stidx].o1.s1 < 0) + code->statements[stidx].o1.s1++; + code_pop_statement(code); + } /* if not, generate now */ - block = onfalse; - goto tailcall; + return gen_blocks_recursive(code, func, onfalse); } - if (instr->opcode >= INSTR_CALL0 && instr->opcode <= INSTR_CALL8) { - /* Trivial call translation: - * copy all params to OFS_PARM* - * if the output's storetype is not store_return, - * add append a STORE instruction! - * - * NOTES on how to do it better without much trouble: - * -) The liferanges! - * Simply check the liferange of all parameters for - * other CALLs. For each param with no CALL in its - * liferange, we can store it in an OFS_PARM at - * generation already. This would even include later - * reuse.... probably... :) - */ + if ( (instr->opcode >= INSTR_CALL0 && instr->opcode <= INSTR_CALL8) + || instr->opcode == VINSTR_NRCALL) + { size_t p, first; ir_value *retvalue; @@ -2665,17 +3130,35 @@ tailcall: for (p = 0; p < first; ++p) { ir_value *param = instr->params[p]; + if (param->callparam) + continue; stmt.opcode = INSTR_STORE_F; stmt.o3.u1 = 0; if (param->vtype == TYPE_FIELD) stmt.opcode = field_store_instr[param->fieldtype]; + else if (param->vtype == TYPE_NIL) + stmt.opcode = INSTR_STORE_V; else stmt.opcode = type_store_instr[param->vtype]; stmt.o1.u1 = ir_value_code_addr(param); stmt.o2.u1 = OFS_PARM0 + 3 * p; - vec_push(code_statements, stmt); + + if (param->vtype == TYPE_VECTOR && (param->flags & IR_FLAG_SPLIT_VECTOR)) { + /* fetch 3 separate floats */ + stmt.opcode = INSTR_STORE_F; + stmt.o1.u1 = ir_value_code_addr(param->members[0]); + code_push_statement(code, &stmt, instr->context); + stmt.o2.u1++; + stmt.o1.u1 = ir_value_code_addr(param->members[1]); + code_push_statement(code, &stmt, instr->context); + stmt.o2.u1++; + stmt.o1.u1 = ir_value_code_addr(param->members[2]); + code_push_statement(code, &stmt, instr->context); + } + else + code_push_statement(code, &stmt, instr->context); } /* Now handle extparams */ first = vec_size(instr->params); @@ -2685,10 +3168,11 @@ tailcall: ir_value *param = instr->params[p]; ir_value *targetparam; - if (p-8 >= vec_size(ir->extparams)) { - irerror(instr->context, "Not enough extparam-globals have been created"); - return false; - } + if (param->callparam) + continue; + + if (p-8 >= vec_size(ir->extparams)) + ir_gen_extparam(ir); targetparam = ir->extparams[p-8]; @@ -2697,11 +3181,26 @@ tailcall: if (param->vtype == TYPE_FIELD) stmt.opcode = field_store_instr[param->fieldtype]; + else if (param->vtype == TYPE_NIL) + stmt.opcode = INSTR_STORE_V; else stmt.opcode = type_store_instr[param->vtype]; stmt.o1.u1 = ir_value_code_addr(param); stmt.o2.u1 = ir_value_code_addr(targetparam); - vec_push(code_statements, stmt); + if (param->vtype == TYPE_VECTOR && (param->flags & IR_FLAG_SPLIT_VECTOR)) { + /* fetch 3 separate floats */ + stmt.opcode = INSTR_STORE_F; + stmt.o1.u1 = ir_value_code_addr(param->members[0]); + code_push_statement(code, &stmt, instr->context); + stmt.o2.u1++; + stmt.o1.u1 = ir_value_code_addr(param->members[1]); + code_push_statement(code, &stmt, instr->context); + stmt.o2.u1++; + stmt.o1.u1 = ir_value_code_addr(param->members[2]); + code_push_statement(code, &stmt, instr->context); + } + else + code_push_statement(code, &stmt, instr->context); } stmt.opcode = INSTR_CALL0 + vec_size(instr->params); @@ -2710,27 +3209,34 @@ tailcall: stmt.o1.u1 = ir_value_code_addr(instr->_ops[1]); stmt.o2.u1 = 0; stmt.o3.u1 = 0; - vec_push(code_statements, stmt); + code_push_statement(code, &stmt, instr->context); retvalue = instr->_ops[0]; - if (retvalue && retvalue->store != store_return && vec_size(retvalue->life)) + if (retvalue && retvalue->store != store_return && + (retvalue->store == store_global || vec_size(retvalue->life))) { /* not to be kept in OFS_RETURN */ - if (retvalue->vtype == TYPE_FIELD) - stmt.opcode = field_store_instr[retvalue->vtype]; + if (retvalue->vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS)) + stmt.opcode = field_store_instr[retvalue->fieldtype]; else stmt.opcode = type_store_instr[retvalue->vtype]; stmt.o1.u1 = OFS_RETURN; stmt.o2.u1 = ir_value_code_addr(retvalue); stmt.o3.u1 = 0; - vec_push(code_statements, stmt); + code_push_statement(code, &stmt, instr->context); } continue; } if (instr->opcode == INSTR_STATE) { - irerror(block->context, "TODO: state instruction"); - return false; + stmt.opcode = instr->opcode; + if (instr->_ops[0]) + stmt.o1.u1 = ir_value_code_addr(instr->_ops[0]); + if (instr->_ops[1]) + stmt.o2.u1 = ir_value_code_addr(instr->_ops[1]); + stmt.o3.u1 = 0; + code_push_statement(code, &stmt, instr->context); + continue; } stmt.opcode = instr->opcode; @@ -2761,17 +3267,26 @@ tailcall: /* 2-operand instructions with A -> B */ stmt.o2.u1 = stmt.o3.u1; stmt.o3.u1 = 0; - } - vec_push(code_statements, stmt); + /* tiny optimization, don't output + * STORE a, a + */ + if (stmt.o2.u1 == stmt.o1.u1 && + OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) + { + ++opts_optimizationcount[OPTIM_PEEPHOLE]; + continue; + } + } + code_push_statement(code, &stmt, instr->context); } return true; } -static bool gen_function_code(ir_function *self) +static bool gen_function_code(code_t *code, ir_function *self) { ir_block *block; - prog_section_statement stmt; + prog_section_statement_t stmt, *retst; /* Starting from entry point, we generate blocks "as they come" * for now. Dead blocks will not be translated obviously. @@ -2785,34 +3300,49 @@ static bool gen_function_code(ir_function *self) if (block->generated) return true; - if (!gen_blocks_recursive(self, block)) { + if (!gen_blocks_recursive(code, self, block)) { irerror(self->context, "failed to generate blocks for '%s'", self->name); return false; } - /* otherwise code_write crashes since it debug-prints functions until AINSTR_END */ - stmt.opcode = AINSTR_END; - stmt.o1.u1 = 0; - stmt.o2.u1 = 0; - stmt.o3.u1 = 0; - vec_push(code_statements, stmt); + /* code_write and qcvm -disasm need to know that the function ends here */ + retst = &vec_last(code->statements); + if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) && + self->outtype == TYPE_VOID && + retst->opcode == INSTR_RETURN && + !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1) + { + retst->opcode = INSTR_DONE; + ++opts_optimizationcount[OPTIM_VOID_RETURN]; + } else { + lex_ctx_t last; + + stmt.opcode = INSTR_DONE; + stmt.o1.u1 = 0; + stmt.o2.u1 = 0; + stmt.o3.u1 = 0; + last.line = vec_last(code->linenums); + last.column = vec_last(code->columnnums); + + code_push_statement(code, &stmt, last); + } return true; } -static qcint ir_builder_filestring(ir_builder *ir, const char *filename) +static qcint_t ir_builder_filestring(ir_builder *ir, const char *filename) { /* NOTE: filename pointers are copied, we never strdup them, * thus we can use pointer-comparison to find the string. */ size_t i; - qcint str; + qcint_t str; for (i = 0; i < vec_size(ir->filenames); ++i) { if (ir->filenames[i] == filename) return ir->filestrings[i]; } - str = code_genstring(filename); + str = code_genstring(ir->code, filename); vec_push(ir->filenames, filename); vec_push(ir->filestrings, str); return str; @@ -2820,11 +3350,10 @@ static qcint ir_builder_filestring(ir_builder *ir, const char *filename) static bool gen_global_function(ir_builder *ir, ir_value *global) { - prog_section_function fun; - ir_function *irfun; + prog_section_function_t fun; + ir_function *irfun; size_t i; - size_t local_var_end; if (!global->hasvalue || (!global->constval.vfunc)) { @@ -2842,79 +3371,70 @@ static bool gen_global_function(ir_builder *ir, ir_value *global) fun.nargs = 8; for (i = 0;i < 8; ++i) { - if (i >= fun.nargs) + if ((int32_t)i >= fun.nargs) fun.argsize[i] = 0; else - fun.argsize[i] = type_sizeof[irfun->params[i]]; + fun.argsize[i] = type_sizeof_[irfun->params[i]]; } - fun.firstlocal = vec_size(code_globals); - - local_var_end = fun.firstlocal; - for (i = 0; i < vec_size(irfun->locals); ++i) { - if (!ir_builder_gen_global(ir, irfun->locals[i], true)) { - irerror(irfun->locals[i]->context, "Failed to generate local %s", irfun->locals[i]->name); - return false; - } - } - if (vec_size(irfun->locals)) { - ir_value *last = vec_last(irfun->locals); - local_var_end = last->code.globaladdr; - local_var_end += type_sizeof[last->vtype]; - } - for (i = 0; i < vec_size(irfun->values); ++i) - { - /* generate code.globaladdr for ssa values */ - ir_value *v = irfun->values[i]; - ir_value_code_setaddr(v, local_var_end + v->code.local); - } - for (i = 0; i < irfun->allocated_locals; ++i) { - /* fill the locals with zeros */ - vec_push(code_globals, 0); - } - - fun.locals = vec_size(code_globals) - fun.firstlocal; + fun.firstlocal = 0; + fun.locals = irfun->allocated_locals; if (irfun->builtin) - fun.entry = irfun->builtin; + fun.entry = irfun->builtin+1; else { - irfun->code_function_def = vec_size(code_functions); - fun.entry = vec_size(code_statements); + irfun->code_function_def = vec_size(ir->code->functions); + fun.entry = vec_size(ir->code->statements); } - vec_push(code_functions, fun); + vec_push(ir->code->functions, fun); return true; } -static void ir_gen_extparam(ir_builder *ir) +static ir_value* ir_gen_extparam_proto(ir_builder *ir) { - prog_section_def def; - ir_value *global; - char name[128]; + ir_value *global; + char name[128]; - snprintf(name, sizeof(name), "EXTPARM#%i", (int)(vec_size(ir->extparams)+8)); + util_snprintf(name, sizeof(name), "EXTPARM#%i", (int)(vec_size(ir->extparam_protos))); global = ir_value_var(name, store_global, TYPE_VECTOR); - def.name = code_genstring(name); - def.type = TYPE_VECTOR; - def.offset = vec_size(code_globals); + vec_push(ir->extparam_protos, global); + return global; +} + +static void ir_gen_extparam(ir_builder *ir) +{ + prog_section_def_t def; + ir_value *global; + + if (vec_size(ir->extparam_protos) < vec_size(ir->extparams)+1) + global = ir_gen_extparam_proto(ir); + else + global = ir->extparam_protos[vec_size(ir->extparams)]; + + def.name = code_genstring(ir->code, global->name); + def.type = TYPE_VECTOR; + def.offset = vec_size(ir->code->globals); + + vec_push(ir->code->defs, def); - vec_push(code_defs, def); ir_value_code_setaddr(global, def.offset); - vec_push(code_globals, 0); - vec_push(code_globals, 0); - vec_push(code_globals, 0); + + vec_push(ir->code->globals, 0); + vec_push(ir->code->globals, 0); + vec_push(ir->code->globals, 0); vec_push(ir->extparams, global); } -static bool gen_function_extparam_copy(ir_function *self) +static bool gen_function_extparam_copy(code_t *code, ir_function *self) { size_t i, ext, numparams; ir_builder *ir = self->owner; ir_value *ep; - prog_section_statement stmt; + prog_section_statement_t stmt; numparams = vec_size(self->params); if (!numparams) @@ -2937,24 +3457,117 @@ static bool gen_function_extparam_copy(ir_function *self) } stmt.o1.u1 = ir_value_code_addr(ep); stmt.o2.u1 = ir_value_code_addr(self->locals[i]); - vec_push(code_statements, stmt); + code_push_statement(code, &stmt, self->context); + } + + return true; +} + +static bool gen_function_varargs_copy(code_t *code, ir_function *self) +{ + size_t i, ext, numparams, maxparams; + + ir_builder *ir = self->owner; + ir_value *ep; + prog_section_statement_t stmt; + + numparams = vec_size(self->params); + if (!numparams) + return true; + + stmt.opcode = INSTR_STORE_V; + stmt.o3.s1 = 0; + maxparams = numparams + self->max_varargs; + for (i = numparams; i < maxparams; ++i) { + if (i < 8) { + stmt.o1.u1 = OFS_PARM0 + 3*i; + stmt.o2.u1 = ir_value_code_addr(self->locals[i]); + code_push_statement(code, &stmt, self->context); + continue; + } + ext = i - 8; + while (ext >= vec_size(ir->extparams)) + ir_gen_extparam(ir); + + ep = ir->extparams[ext]; + + stmt.o1.u1 = ir_value_code_addr(ep); + stmt.o2.u1 = ir_value_code_addr(self->locals[i]); + code_push_statement(code, &stmt, self->context); + } + + return true; +} + +static bool gen_function_locals(ir_builder *ir, ir_value *global) +{ + prog_section_function_t *def; + ir_function *irfun; + size_t i; + uint32_t firstlocal, firstglobal; + + irfun = global->constval.vfunc; + def = ir->code->functions + irfun->code_function_def; + + if (OPTS_OPTION_BOOL(OPTION_G) || + !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS) || + (irfun->flags & IR_FLAG_MASK_NO_OVERLAP)) + { + firstlocal = def->firstlocal = vec_size(ir->code->globals); + } else { + firstlocal = def->firstlocal = ir->first_common_local; + ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS]; } + firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? ir->first_common_globaltemp : firstlocal); + + for (i = vec_size(ir->code->globals); i < firstlocal + irfun->allocated_locals; ++i) + vec_push(ir->code->globals, 0); + for (i = 0; i < vec_size(irfun->locals); ++i) { + ir_value *v = irfun->locals[i]; + if (v->locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) { + ir_value_code_setaddr(v, firstlocal + v->code.local); + if (!ir_builder_gen_global(ir, irfun->locals[i], true)) { + irerror(irfun->locals[i]->context, "failed to generate local %s", irfun->locals[i]->name); + return false; + } + } + else + ir_value_code_setaddr(v, firstglobal + v->code.local); + } + for (i = 0; i < vec_size(irfun->values); ++i) + { + ir_value *v = irfun->values[i]; + if (v->callparam) + continue; + if (v->locked) + ir_value_code_setaddr(v, firstlocal + v->code.local); + else + ir_value_code_setaddr(v, firstglobal + v->code.local); + } return true; } static bool gen_global_function_code(ir_builder *ir, ir_value *global) { - prog_section_function *fundef; - ir_function *irfun; + prog_section_function_t *fundef; + ir_function *irfun; (void)ir; irfun = global->constval.vfunc; if (!irfun) { if (global->cvq == CV_NONE) { - irwarning(global->context, WARN_IMPLICIT_FUNCTION_POINTER, - "function `%s` has no body and in QC implicitly becomes a function-pointer", global->name); + if (irwarning(global->context, WARN_IMPLICIT_FUNCTION_POINTER, + "function `%s` has no body and in QC implicitly becomes a function-pointer", + global->name)) + { + /* Not bailing out just now. If this happens a lot you don't want to have + * to rerun gmqcc for each such function. + */ + + /* return false; */ + } } /* this was a function pointer, don't generate code for those */ return true; @@ -2963,44 +3576,156 @@ static bool gen_global_function_code(ir_builder *ir, ir_value *global) if (irfun->builtin) return true; + /* + * If there is no definition and the thing is eraseable, we can ignore + * outputting the function to begin with. + */ + if (global->flags & IR_FLAG_ERASABLE && irfun->code_function_def < 0) { + return true; + } + if (irfun->code_function_def < 0) { irerror(irfun->context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->name); return false; } - fundef = &code_functions[irfun->code_function_def]; + fundef = &ir->code->functions[irfun->code_function_def]; - fundef->entry = vec_size(code_statements); - if (!gen_function_extparam_copy(irfun)) { + fundef->entry = vec_size(ir->code->statements); + if (!gen_function_locals(ir, global)) { + irerror(irfun->context, "Failed to generate locals for function %s", irfun->name); + return false; + } + if (!gen_function_extparam_copy(ir->code, irfun)) { irerror(irfun->context, "Failed to generate extparam-copy code for function %s", irfun->name); return false; } - if (!gen_function_code(irfun)) { + if (irfun->max_varargs && !gen_function_varargs_copy(ir->code, irfun)) { + irerror(irfun->context, "Failed to generate vararg-copy code for function %s", irfun->name); + return false; + } + if (!gen_function_code(ir->code, irfun)) { irerror(irfun->context, "Failed to generate code for function %s", irfun->name); return false; } return true; } +static void gen_vector_defs(code_t *code, prog_section_def_t def, const char *name) +{ + char *component; + size_t len, i; + + if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS)) + return; + + def.type = TYPE_FLOAT; + + len = strlen(name); + + component = (char*)mem_a(len+3); + memcpy(component, name, len); + len += 2; + component[len-0] = 0; + component[len-2] = '_'; + + component[len-1] = 'x'; + + for (i = 0; i < 3; ++i) { + def.name = code_genstring(code, component); + vec_push(code->defs, def); + def.offset++; + component[len-1]++; + } + + mem_d(component); +} + +static void gen_vector_fields(code_t *code, prog_section_field_t fld, const char *name) +{ + char *component; + size_t len, i; + + if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS)) + return; + + fld.type = TYPE_FLOAT; + + len = strlen(name); + + component = (char*)mem_a(len+3); + memcpy(component, name, len); + len += 2; + component[len-0] = 0; + component[len-2] = '_'; + + component[len-1] = 'x'; + + for (i = 0; i < 3; ++i) { + fld.name = code_genstring(code, component); + vec_push(code->fields, fld); + fld.offset++; + component[len-1]++; + } + + mem_d(component); +} + static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool islocal) { - size_t i; - int32_t *iptr; - prog_section_def def; + size_t i; + int32_t *iptr; + prog_section_def_t def; + bool pushdef = opts.optimizeoff; + + /* we don't generate split-vectors */ + if (global->vtype == TYPE_VECTOR && (global->flags & IR_FLAG_SPLIT_VECTOR)) + return true; def.type = global->vtype; - def.offset = vec_size(code_globals); + def.offset = vec_size(self->code->globals); + def.name = 0; + if (OPTS_OPTION_BOOL(OPTION_G) || !islocal) + { + pushdef = true; + + /* + * if we're eraseable and the function isn't referenced ignore outputting + * the function. + */ + if (global->flags & IR_FLAG_ERASABLE && vec_size(global->reads) == 0) { + return true; + } + + if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) && + !(global->flags & IR_FLAG_INCLUDE_DEF) && + (global->name[0] == '#' || global->cvq == CV_CONST)) + { + pushdef = false; + } - if (global->name) { - if (global->name[0] == '#') { - if (!self->str_immediate) - self->str_immediate = code_genstring("IMMEDIATE"); - def.name = global->code.name = self->str_immediate; + if (pushdef) { + if (global->name[0] == '#') { + if (!self->str_immediate) + self->str_immediate = code_genstring(self->code, "IMMEDIATE"); + def.name = global->code.name = self->str_immediate; + } + else + def.name = global->code.name = code_genstring(self->code, global->name); } else - def.name = global->code.name = code_genstring(global->name); + def.name = 0; + if (islocal) { + def.offset = ir_value_code_addr(global); + vec_push(self->code->defs, def); + if (global->vtype == TYPE_VECTOR) + gen_vector_defs(self->code, def, global->name); + else if (global->vtype == TYPE_FIELD && global->fieldtype == TYPE_VECTOR) + gen_vector_defs(self->code, def, global->name); + return true; + } } - else - def.name = 0; + if (islocal) + return true; switch (global->vtype) { @@ -3014,104 +3739,119 @@ static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool isloc /* TODO: same as above but for entity-fields rather than globsl */ } - else - irwarning(global->context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`", - global->name); + else if(irwarning(global->context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`", + global->name)) + { + /* Not bailing out */ + /* return false; */ + } /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far * the system fields actually go? Though the engine knows this anyway... * Maybe this could be an -foption * fteqcc creates data for end_sys_* - of size 1, so let's do the same */ - ir_value_code_setaddr(global, vec_size(code_globals)); - vec_push(code_globals, 0); + ir_value_code_setaddr(global, vec_size(self->code->globals)); + vec_push(self->code->globals, 0); /* Add the def */ - vec_push(code_defs, def); + if (pushdef) vec_push(self->code->defs, def); return true; case TYPE_POINTER: - vec_push(code_defs, def); - return gen_global_pointer(global); + if (pushdef) vec_push(self->code->defs, def); + return gen_global_pointer(self->code, global); case TYPE_FIELD: - vec_push(code_defs, def); - return gen_global_field(global); + if (pushdef) { + vec_push(self->code->defs, def); + if (global->fieldtype == TYPE_VECTOR) + gen_vector_defs(self->code, def, global->name); + } + return gen_global_field(self->code, global); case TYPE_ENTITY: /* fall through */ case TYPE_FLOAT: { - ir_value_code_setaddr(global, vec_size(code_globals)); + ir_value_code_setaddr(global, vec_size(self->code->globals)); if (global->hasvalue) { + if (global->cvq == CV_CONST && !vec_size(global->reads)) + return true; iptr = (int32_t*)&global->constval.ivec[0]; - vec_push(code_globals, *iptr); + vec_push(self->code->globals, *iptr); } else { - vec_push(code_globals, 0); - if (!islocal) - def.type |= DEF_SAVEGLOBAL; + vec_push(self->code->globals, 0); } - vec_push(code_defs, def); + if (!islocal && global->cvq != CV_CONST) + def.type |= DEF_SAVEGLOBAL; + if (pushdef) vec_push(self->code->defs, def); return global->code.globaladdr >= 0; } case TYPE_STRING: { - ir_value_code_setaddr(global, vec_size(code_globals)); + ir_value_code_setaddr(global, vec_size(self->code->globals)); if (global->hasvalue) { - vec_push(code_globals, code_genstring(global->constval.vstring)); + uint32_t load; + if (global->cvq == CV_CONST && !vec_size(global->reads)) + return true; + load = code_genstring(self->code, global->constval.vstring); + vec_push(self->code->globals, load); } else { - vec_push(code_globals, 0); - if (!islocal) - def.type |= DEF_SAVEGLOBAL; + vec_push(self->code->globals, 0); } - vec_push(code_defs, def); + if (!islocal && global->cvq != CV_CONST) + def.type |= DEF_SAVEGLOBAL; + if (pushdef) vec_push(self->code->defs, def); return global->code.globaladdr >= 0; } case TYPE_VECTOR: { size_t d; - ir_value_code_setaddr(global, vec_size(code_globals)); + ir_value_code_setaddr(global, vec_size(self->code->globals)); if (global->hasvalue) { iptr = (int32_t*)&global->constval.ivec[0]; - vec_push(code_globals, iptr[0]); + vec_push(self->code->globals, iptr[0]); if (global->code.globaladdr < 0) return false; - for (d = 1; d < type_sizeof[global->vtype]; ++d) - { - vec_push(code_globals, iptr[d]); + for (d = 1; d < type_sizeof_[global->vtype]; ++d) { + vec_push(self->code->globals, iptr[d]); } } else { - vec_push(code_globals, 0); + vec_push(self->code->globals, 0); if (global->code.globaladdr < 0) return false; - for (d = 1; d < type_sizeof[global->vtype]; ++d) - { - vec_push(code_globals, 0); + for (d = 1; d < type_sizeof_[global->vtype]; ++d) { + vec_push(self->code->globals, 0); } - if (!islocal) - def.type |= DEF_SAVEGLOBAL; } + if (!islocal && global->cvq != CV_CONST) + def.type |= DEF_SAVEGLOBAL; - vec_push(code_defs, def); + if (pushdef) { + vec_push(self->code->defs, def); + def.type &= ~DEF_SAVEGLOBAL; + gen_vector_defs(self->code, def, global->name); + } return global->code.globaladdr >= 0; } case TYPE_FUNCTION: - ir_value_code_setaddr(global, vec_size(code_globals)); + ir_value_code_setaddr(global, vec_size(self->code->globals)); if (!global->hasvalue) { - vec_push(code_globals, 0); + vec_push(self->code->globals, 0); if (global->code.globaladdr < 0) return false; } else { - vec_push(code_globals, vec_size(code_functions)); + vec_push(self->code->globals, vec_size(self->code->functions)); if (!gen_global_function(self, global)) return false; - if (!islocal) - def.type |= DEF_SAVEGLOBAL; } - vec_push(code_defs, def); + if (!islocal && global->cvq != CV_CONST) + def.type |= DEF_SAVEGLOBAL; + if (pushdef) vec_push(self->code->defs, def); return true; case TYPE_VARIANT: /* assume biggest type */ - ir_value_code_setaddr(global, vec_size(code_globals)); - vec_push(code_globals, 0); - for (i = 1; i < type_sizeof[TYPE_VARIANT]; ++i) - vec_push(code_globals, 0); + ir_value_code_setaddr(global, vec_size(self->code->globals)); + vec_push(self->code->globals, 0); + for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i) + vec_push(self->code->globals, 0); return true; default: /* refuse to create 'void' type or any other fancy business. */ @@ -3121,18 +3861,23 @@ static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool isloc } } +static GMQCC_INLINE void ir_builder_prepare_field(code_t *code, ir_value *field) +{ + field->code.fieldaddr = code_alloc_field(code, type_sizeof_[field->fieldtype]); +} + static bool ir_builder_gen_field(ir_builder *self, ir_value *field) { - prog_section_def def; - prog_section_field fld; + prog_section_def_t def; + prog_section_field_t fld; (void)self; def.type = (uint16_t)field->vtype; - def.offset = (uint16_t)vec_size(code_globals); + def.offset = (uint16_t)vec_size(self->code->globals); /* create a global named the same as the field */ - if (opts_standard == COMPILER_GMQCC) { + if (OPTS_OPTION_U32(OPTION_STANDARD) == COMPILER_GMQCC) { /* in our standard, the global gets a dot prefix */ size_t len = strlen(field->name); char name[1024]; @@ -3149,7 +3894,7 @@ static bool ir_builder_gen_field(ir_builder *self, ir_value *field) memcpy(name+1, field->name, len); /* no strncpy - we used strlen above */ name[len+1] = 0; - def.name = code_genstring(name); + def.name = code_genstring(self->code, name); fld.name = def.name + 1; /* we reuse that string table entry */ } else { /* in plain QC, there cannot be a global with the same name, @@ -3157,13 +3902,13 @@ static bool ir_builder_gen_field(ir_builder *self, ir_value *field) * FIXME: fteqcc should create a global as well * check if it actually uses the same name. Probably does */ - def.name = code_genstring(field->name); + def.name = code_genstring(self->code, field->name); fld.name = def.name; } field->code.name = def.name; - vec_push(code_defs, def); + vec_push(self->code->defs, def); fld.type = field->fieldtype; @@ -3172,32 +3917,152 @@ static bool ir_builder_gen_field(ir_builder *self, ir_value *field) return false; } - fld.offset = code_alloc_field(type_sizeof[field->fieldtype]); + fld.offset = field->code.fieldaddr; - vec_push(code_fields, fld); + vec_push(self->code->fields, fld); - ir_value_code_setaddr(field, vec_size(code_globals)); - vec_push(code_globals, fld.offset); + ir_value_code_setaddr(field, vec_size(self->code->globals)); + vec_push(self->code->globals, fld.offset); if (fld.type == TYPE_VECTOR) { - vec_push(code_globals, fld.offset+1); - vec_push(code_globals, fld.offset+2); + vec_push(self->code->globals, fld.offset+1); + vec_push(self->code->globals, fld.offset+2); + } + + if (field->fieldtype == TYPE_VECTOR) { + gen_vector_defs (self->code, def, field->name); + gen_vector_fields(self->code, fld, field->name); } return field->code.globaladdr >= 0; } +static void ir_builder_collect_reusables(ir_builder *builder) { + size_t i; + ir_value **reusables = NULL; + for (i = 0; i < vec_size(builder->globals); ++i) { + ir_value *value = builder->globals[i]; + if (value->vtype != TYPE_FLOAT || !value->hasvalue) + continue; + if (value->cvq == CV_CONST || (value->name && value->name[0] == '#')) { + vec_push(reusables, value); + } + } + builder->const_floats = reusables; +} + +static void ir_builder_split_vector(ir_builder *self, ir_value *vec) { + size_t i, count; + ir_value* found[3] = { NULL, NULL, NULL }; + + /* must not be written to */ + if (vec_size(vec->writes)) + return; + /* must not be trying to access individual members */ + if (vec->members[0] || vec->members[1] || vec->members[2]) + return; + /* should be actually used otherwise it won't be generated anyway */ + count = vec_size(vec->reads); + if (!count) + return; + + /* may only be used directly as function parameters, so if we find some other instruction cancel */ + for (i = 0; i != count; ++i) { + /* we only split vectors if they're used directly as parameter to a call only! */ + ir_instr *user = vec->reads[i]; + if ((user->opcode < INSTR_CALL0 || user->opcode > INSTR_CALL8) && user->opcode != VINSTR_NRCALL) + return; + } + + vec->flags |= IR_FLAG_SPLIT_VECTOR; + + /* find existing floats making up the split */ + count = vec_size(self->const_floats); + for (i = 0; i != count; ++i) { + ir_value *c = self->const_floats[i]; + if (!found[0] && c->constval.vfloat == vec->constval.vvec.x) + found[0] = c; + if (!found[1] && c->constval.vfloat == vec->constval.vvec.y) + found[1] = c; + if (!found[2] && c->constval.vfloat == vec->constval.vvec.z) + found[2] = c; + if (found[0] && found[1] && found[2]) + break; + } + + /* generate floats for not yet found components */ + if (!found[0]) + found[0] = ir_builder_imm_float(self, vec->constval.vvec.x, true); + if (!found[1]) { + if (vec->constval.vvec.y == vec->constval.vvec.x) + found[1] = found[0]; + else + found[1] = ir_builder_imm_float(self, vec->constval.vvec.y, true); + } + if (!found[2]) { + if (vec->constval.vvec.z == vec->constval.vvec.x) + found[2] = found[0]; + else if (vec->constval.vvec.z == vec->constval.vvec.y) + found[2] = found[1]; + else + found[2] = ir_builder_imm_float(self, vec->constval.vvec.z, true); + } + + /* the .members array should be safe to use here. */ + vec->members[0] = found[0]; + vec->members[1] = found[1]; + vec->members[2] = found[2]; + + /* register the readers for these floats */ + count = vec_size(vec->reads); + for (i = 0; i != count; ++i) { + vec_push(found[0]->reads, vec->reads[i]); + vec_push(found[1]->reads, vec->reads[i]); + vec_push(found[2]->reads, vec->reads[i]); + } +} + +static void ir_builder_split_vectors(ir_builder *self) { + size_t i, count = vec_size(self->globals); + for (i = 0; i != count; ++i) { + ir_value *v = self->globals[i]; + if (v->vtype != TYPE_VECTOR || !v->name || v->name[0] != '#') + continue; + ir_builder_split_vector(self, self->globals[i]); + } +} + bool ir_builder_generate(ir_builder *self, const char *filename) { - prog_section_statement stmt; + prog_section_statement_t stmt; size_t i; + char *lnofile = NULL; + + if (OPTS_FLAG(SPLIT_VECTOR_PARAMETERS)) { + ir_builder_collect_reusables(self); + if (vec_size(self->const_floats) > 0) + ir_builder_split_vectors(self); + } - code_init(); + for (i = 0; i < vec_size(self->fields); ++i) + { + ir_builder_prepare_field(self->code, self->fields[i]); + } for (i = 0; i < vec_size(self->globals); ++i) { if (!ir_builder_gen_global(self, self->globals[i], false)) { return false; } + if (self->globals[i]->vtype == TYPE_FUNCTION) { + ir_function *func = self->globals[i]->constval.vfunc; + if (func && self->max_locals < func->allocated_locals && + !(func->flags & IR_FLAG_MASK_NO_OVERLAP)) + { + self->max_locals = func->allocated_locals; + } + if (func && self->max_globaltemps < func->globaltemps) + self->max_globaltemps = func->globaltemps; + } } for (i = 0; i < vec_size(self->fields); ++i) @@ -3207,6 +4072,31 @@ bool ir_builder_generate(ir_builder *self, const char *filename) } } + /* generate nil */ + ir_value_code_setaddr(self->nil, vec_size(self->code->globals)); + vec_push(self->code->globals, 0); + vec_push(self->code->globals, 0); + vec_push(self->code->globals, 0); + + /* generate virtual-instruction temps */ + for (i = 0; i < IR_MAX_VINSTR_TEMPS; ++i) { + ir_value_code_setaddr(self->vinstr_temp[i], vec_size(self->code->globals)); + vec_push(self->code->globals, 0); + vec_push(self->code->globals, 0); + vec_push(self->code->globals, 0); + } + + /* generate global temps */ + self->first_common_globaltemp = vec_size(self->code->globals); + for (i = 0; i < self->max_globaltemps; ++i) { + vec_push(self->code->globals, 0); + } + /* generate common locals */ + self->first_common_local = vec_size(self->code->globals); + for (i = 0; i < self->max_locals; ++i) { + vec_push(self->code->globals, 0); + } + /* generate function code */ for (i = 0; i < vec_size(self->globals); ++i) { @@ -3217,24 +4107,54 @@ bool ir_builder_generate(ir_builder *self, const char *filename) } } - if (vec_size(code_globals) >= 65536) { - irerror(vec_last(self->globals)->context, "This progs file would require more globals than the metadata can handle. Bailing out."); + if (vec_size(self->code->globals) >= 65536) { + irerror(vec_last(self->globals)->context, "This progs file would require more globals than the metadata can handle (%u). Bailing out.", (unsigned int)vec_size(self->code->globals)); return false; } - /* DP errors if the last instruction is not an INSTR_DONE - * and for debugging purposes we add an additional AINSTR_END - * to the end of functions, so here it goes: - */ - stmt.opcode = INSTR_DONE; - stmt.o1.u1 = 0; - stmt.o2.u1 = 0; - stmt.o3.u1 = 0; - vec_push(code_statements, stmt); + /* DP errors if the last instruction is not an INSTR_DONE. */ + if (vec_last(self->code->statements).opcode != INSTR_DONE) + { + lex_ctx_t last; + + stmt.opcode = INSTR_DONE; + stmt.o1.u1 = 0; + stmt.o2.u1 = 0; + stmt.o3.u1 = 0; + last.line = vec_last(self->code->linenums); + last.column = vec_last(self->code->columnnums); + + code_push_statement(self->code, &stmt, last); + } + + if (OPTS_OPTION_BOOL(OPTION_PP_ONLY)) + return true; + + if (vec_size(self->code->statements) != vec_size(self->code->linenums)) { + con_err("Linecounter wrong: %lu != %lu\n", + (unsigned long)vec_size(self->code->statements), + (unsigned long)vec_size(self->code->linenums)); + } else if (OPTS_FLAG(LNO)) { + char *dot; + size_t filelen = strlen(filename); + + memcpy(vec_add(lnofile, filelen+1), filename, filelen+1); + dot = strrchr(lnofile, '.'); + if (!dot) { + vec_pop(lnofile); + } else { + vec_shrinkto(lnofile, dot - lnofile); + } + memcpy(vec_add(lnofile, 5), ".lno", 5); + } + + if (!code_write(self->code, filename, lnofile)) { + vec_free(lnofile); + return false; + } - if (!opts_pp_only) - con_out("writing '%s'...\n", filename); - return code_write(filename); + vec_free(lnofile); + return true; } /*********************************************************************** @@ -3243,20 +4163,27 @@ bool ir_builder_generate(ir_builder *self, const char *filename) #define IND_BUFSZ 1024 -#ifdef WIN32 -# define strncat(dst, src, sz) strncat_s(dst, sz, src, _TRUNCATE) -#endif - -const char *qc_opname(int op) +static const char *qc_opname(int op) { if (op < 0) return ""; - if (op < (int)( sizeof(asm_instr) / sizeof(asm_instr[0]) )) - return asm_instr[op].m; + if (op < VINSTR_END) + return util_instr_str[op]; switch (op) { - case VINSTR_PHI: return "PHI"; - case VINSTR_JUMP: return "JUMP"; - case VINSTR_COND: return "COND"; - default: return ""; + case VINSTR_END: return "END"; + case VINSTR_PHI: return "PHI"; + case VINSTR_JUMP: return "JUMP"; + case VINSTR_COND: return "COND"; + case VINSTR_BITXOR: return "BITXOR"; + case VINSTR_BITAND_V: return "BITAND_V"; + case VINSTR_BITOR_V: return "BITOR_V"; + case VINSTR_BITXOR_V: return "BITXOR_V"; + case VINSTR_BITAND_VF: return "BITAND_VF"; + case VINSTR_BITOR_VF: return "BITOR_VF"; + case VINSTR_BITXOR_VF: return "BITXOR_VF"; + case VINSTR_CROSS: return "CROSS"; + case VINSTR_NEG_F: return "NEG_F"; + case VINSTR_NEG_V: return "NEG_V"; + default: return ""; } } @@ -3281,6 +4208,10 @@ void ir_builder_dump(ir_builder *b, int (*oprintf)(const char*, ...)) oprintf("endmodule %s\n", b->name); } +static const char *storenames[] = { + "[global]", "[local]", "[param]", "[value]", "[return]" +}; + void ir_function_dump(ir_function *f, char *ind, int (*oprintf)(const char*, ...)) { @@ -3290,7 +4221,7 @@ void ir_function_dump(ir_function *f, char *ind, return; } oprintf("%sfunction %s\n", ind, f->name); - strncat(ind, "\t", IND_BUFSZ); + util_strncat(ind, "\t", IND_BUFSZ-1); if (vec_size(f->locals)) { oprintf("%s%i locals:\n", ind, (int)vec_size(f->locals)); @@ -3302,30 +4233,77 @@ void ir_function_dump(ir_function *f, char *ind, } oprintf("%sliferanges:\n", ind); for (i = 0; i < vec_size(f->locals); ++i) { - size_t l; + const char *attr = ""; + size_t l, m; ir_value *v = f->locals[i]; - oprintf("%s\t%s: unique ", ind, v->name); + if (v->unique_life && v->locked) + attr = "unique,locked "; + else if (v->unique_life) + attr = "unique "; + else if (v->locked) + attr = "locked "; + oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->name, type_name[v->vtype], + storenames[v->store], + attr, (v->callparam ? "callparam " : ""), + (int)v->code.local); + if (!v->life) + oprintf("[null]"); for (l = 0; l < vec_size(v->life); ++l) { oprintf("[%i,%i] ", v->life[l].start, v->life[l].end); } oprintf("\n"); + for (m = 0; m < 3; ++m) { + ir_value *vm = v->members[m]; + if (!vm) + continue; + oprintf("%s\t%s: @%i ", ind, vm->name, (int)vm->code.local); + for (l = 0; l < vec_size(vm->life); ++l) { + oprintf("[%i,%i] ", vm->life[l].start, vm->life[l].end); + } + oprintf("\n"); + } } for (i = 0; i < vec_size(f->values); ++i) { - size_t l; + const char *attr = ""; + size_t l, m; ir_value *v = f->values[i]; - oprintf("%s\t%s: @%i ", ind, v->name, (int)v->code.local); + if (v->unique_life && v->locked) + attr = "unique,locked "; + else if (v->unique_life) + attr = "unique "; + else if (v->locked) + attr = "locked "; + oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->name, type_name[v->vtype], + storenames[v->store], + attr, (v->callparam ? "callparam " : ""), + (int)v->code.local); + if (!v->life) + oprintf("[null]"); for (l = 0; l < vec_size(v->life); ++l) { oprintf("[%i,%i] ", v->life[l].start, v->life[l].end); } oprintf("\n"); + for (m = 0; m < 3; ++m) { + ir_value *vm = v->members[m]; + if (!vm) + continue; + if (vm->unique_life && vm->locked) + attr = "unique,locked "; + else if (vm->unique_life) + attr = "unique "; + else if (vm->locked) + attr = "locked "; + oprintf("%s\t%s: %s@%i ", ind, vm->name, attr, (int)vm->code.local); + for (l = 0; l < vec_size(vm->life); ++l) { + oprintf("[%i,%i] ", vm->life[l].start, vm->life[l].end); + } + oprintf("\n"); + } } if (vec_size(f->blocks)) { - oprintf("%slife passes (check): %i\n", ind, (int)f->run_id); + oprintf("%slife passes: %i\n", ind, (int)f->run_id); for (i = 0; i < vec_size(f->blocks); ++i) { - if (f->blocks[i]->run_id != f->run_id) { - oprintf("%slife pass check fail! %i != %i\n", ind, (int)f->blocks[i]->run_id, (int)f->run_id); - } ir_block_dump(f->blocks[i], ind, oprintf); } @@ -3339,14 +4317,16 @@ void ir_block_dump(ir_block* b, char *ind, { size_t i; oprintf("%s:%s\n", ind, b->label); - strncat(ind, "\t", IND_BUFSZ); + util_strncat(ind, "\t", IND_BUFSZ-1); + if (b->instr && b->instr[0]) + oprintf("%s (%i) [entry]\n", ind, (int)(b->instr[0]->eid-1)); for (i = 0; i < vec_size(b->instr); ++i) ir_instr_dump(b->instr[i], ind, oprintf); ind[strlen(ind)-1] = 0; } -void dump_phi(ir_instr *in, int (*oprintf)(const char*, ...)) +static void dump_phi(ir_instr *in, int (*oprintf)(const char*, ...)) { size_t i; oprintf("%s <- phi ", in->_ops[0]->name); @@ -3371,14 +4351,14 @@ void ir_instr_dump(ir_instr *in, char *ind, return; } - strncat(ind, "\t", IND_BUFSZ); + util_strncat(ind, "\t", IND_BUFSZ-1); if (in->_ops[0] && (in->_ops[1] || in->_ops[2])) { ir_value_dump(in->_ops[0], oprintf); if (in->_ops[1] || in->_ops[2]) oprintf(" <- "); } - if (in->opcode == INSTR_CALL0) { + if (in->opcode == INSTR_CALL0 || in->opcode == VINSTR_NRCALL) { oprintf("CALL%i\t", vec_size(in->params)); } else oprintf("%s\t", qc_opname(in->opcode)); @@ -3416,7 +4396,7 @@ void ir_instr_dump(ir_instr *in, char *ind, ind[strlen(ind)-1] = 0; } -void ir_value_dump_string(const char *str, int (*oprintf)(const char*, ...)) +static void ir_value_dump_string(const char *str, int (*oprintf)(const char*, ...)) { oprintf("\""); for (; *str; ++str) {