7 /***********************************************************************
8 * Type sizes used at multiple points in the IR codegen
11 const char *type_name[TYPE_COUNT] = {
30 static size_t type_sizeof_[TYPE_COUNT] = {
37 1, /* TYPE_FUNCTION */
48 const uint16_t type_store_instr[TYPE_COUNT] = {
49 INSTR_STORE_F, /* should use I when having integer support */
56 INSTR_STORE_ENT, /* should use I */
58 INSTR_STORE_I, /* integer type */
63 INSTR_STORE_V, /* variant, should never be accessed */
65 VINSTR_END, /* struct */
66 VINSTR_END, /* union */
67 VINSTR_END, /* array */
69 VINSTR_END, /* noexpr */
72 const uint16_t field_store_instr[TYPE_COUNT] = {
82 INSTR_STORE_FLD, /* integer type */
87 INSTR_STORE_V, /* variant, should never be accessed */
89 VINSTR_END, /* struct */
90 VINSTR_END, /* union */
91 VINSTR_END, /* array */
93 VINSTR_END, /* noexpr */
96 const uint16_t type_storep_instr[TYPE_COUNT] = {
97 INSTR_STOREP_F, /* should use I when having integer support */
104 INSTR_STOREP_ENT, /* should use I */
106 INSTR_STOREP_ENT, /* integer type */
111 INSTR_STOREP_V, /* variant, should never be accessed */
113 VINSTR_END, /* struct */
114 VINSTR_END, /* union */
115 VINSTR_END, /* array */
116 VINSTR_END, /* nil */
117 VINSTR_END, /* noexpr */
120 const uint16_t type_eq_instr[TYPE_COUNT] = {
121 INSTR_EQ_F, /* should use I when having integer support */
126 INSTR_EQ_E, /* FLD has no comparison */
128 INSTR_EQ_E, /* should use I */
135 INSTR_EQ_V, /* variant, should never be accessed */
137 VINSTR_END, /* struct */
138 VINSTR_END, /* union */
139 VINSTR_END, /* array */
140 VINSTR_END, /* nil */
141 VINSTR_END, /* noexpr */
144 const uint16_t type_ne_instr[TYPE_COUNT] = {
145 INSTR_NE_F, /* should use I when having integer support */
150 INSTR_NE_E, /* FLD has no comparison */
152 INSTR_NE_E, /* should use I */
159 INSTR_NE_V, /* variant, should never be accessed */
161 VINSTR_END, /* struct */
162 VINSTR_END, /* union */
163 VINSTR_END, /* array */
164 VINSTR_END, /* nil */
165 VINSTR_END, /* noexpr */
168 const uint16_t type_not_instr[TYPE_COUNT] = {
169 INSTR_NOT_F, /* should use I when having integer support */
170 VINSTR_END, /* not to be used, depends on string related -f flags */
176 INSTR_NOT_ENT, /* should use I */
178 INSTR_NOT_I, /* integer type */
183 INSTR_NOT_V, /* variant, should never be accessed */
185 VINSTR_END, /* struct */
186 VINSTR_END, /* union */
187 VINSTR_END, /* array */
188 VINSTR_END, /* nil */
189 VINSTR_END, /* noexpr */
193 static void ir_function_dump(ir_function*, char *ind, int (*oprintf)(const char*,...));
195 static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t, const char *label,
196 int op, ir_value *a, ir_value *b, qc_type outype);
197 static bool GMQCC_WARN ir_block_create_store(ir_block*, lex_ctx_t, ir_value *target, ir_value *what);
198 static void ir_block_dump(ir_block*, char *ind, int (*oprintf)(const char*,...));
200 static bool ir_instr_op(ir_instr*, int op, ir_value *value, bool writing);
201 static void ir_instr_dump(ir_instr* in, char *ind, int (*oprintf)(const char*,...));
202 /* error functions */
204 static void irerror(lex_ctx_t ctx, const char *msg, ...)
208 con_cvprintmsg(ctx, LVL_ERROR, "internal error", msg, ap);
212 static bool GMQCC_WARN irwarning(lex_ctx_t ctx, int warntype, const char *fmt, ...)
217 r = vcompile_warning(ctx, warntype, fmt, ap);
222 /***********************************************************************
223 * Vector utility functions
226 static bool GMQCC_WARN vec_ir_value_find(std::vector<ir_value *> &vec, const ir_value *what, size_t *idx)
228 for (auto &it : vec) {
232 *idx = &it - &vec[0];
238 static bool GMQCC_WARN vec_ir_block_find(ir_block **vec, ir_block *what, size_t *idx)
241 size_t len = vec_size(vec);
242 for (i = 0; i < len; ++i) {
243 if (vec[i] == what) {
251 static bool GMQCC_WARN vec_ir_instr_find(std::vector<ir_instr *> &vec, ir_instr *what, size_t *idx)
253 for (auto &it : vec) {
257 *idx = &it - &vec[0];
263 /***********************************************************************
267 static void ir_block_delete_quick(ir_block* self);
268 static void ir_instr_delete_quick(ir_instr *self);
269 static void ir_function_delete_quick(ir_function *self);
271 ir_builder::ir_builder(const std::string& modulename)
272 : m_name(modulename),
275 m_htglobals = util_htnew(IR_HT_SIZE);
276 m_htfields = util_htnew(IR_HT_SIZE);
277 m_htfunctions = util_htnew(IR_HT_SIZE);
279 m_nil = new ir_value("nil", store_value, TYPE_NIL);
280 m_nil->m_cvq = CV_CONST;
282 for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
283 /* we write to them, but they're not supposed to be used outside the IR, so
284 * let's not allow the generation of ir_instrs which use these.
285 * So it's a constant noexpr.
287 m_vinstr_temp[i] = new ir_value("vinstr_temp", store_value, TYPE_NOEXPR);
288 m_vinstr_temp[i]->m_cvq = CV_CONST;
292 ir_builder::~ir_builder()
294 util_htdel(m_htglobals);
295 util_htdel(m_htfields);
296 util_htdel(m_htfunctions);
297 for (auto& f : m_functions)
298 ir_function_delete_quick(f.release());
299 m_functions.clear(); // delete them now before deleting the rest:
303 for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
304 delete m_vinstr_temp[i];
308 m_extparam_protos.clear();
311 ir_function* ir_builder::createFunction(const std::string& name, qc_type outtype)
313 ir_function *fn = (ir_function*)util_htget(m_htfunctions, name.c_str());
317 fn = new ir_function(this, outtype);
319 m_functions.emplace_back(fn);
320 util_htset(m_htfunctions, name.c_str(), fn);
322 fn->m_value = createGlobal(fn->m_name, TYPE_FUNCTION);
328 fn->m_value->m_hasvalue = true;
329 fn->m_value->m_outtype = outtype;
330 fn->m_value->m_constval.vfunc = fn;
331 fn->m_value->m_context = fn->m_context;
336 ir_value* ir_builder::createGlobal(const std::string& name, qc_type vtype)
342 ve = (ir_value*)util_htget(m_htglobals, name.c_str());
348 ve = new ir_value(std::string(name), store_global, vtype);
349 m_globals.emplace_back(ve);
350 util_htset(m_htglobals, name.c_str(), ve);
354 ir_value* ir_builder::get_va_count()
356 if (m_reserved_va_count)
357 return m_reserved_va_count;
358 return (m_reserved_va_count = createGlobal("reserved:va_count", TYPE_FLOAT));
361 ir_value* ir_builder::createField(const std::string& name, qc_type vtype)
363 ir_value *ve = (ir_value*)util_htget(m_htfields, name.c_str());
368 ve = new ir_value(std::string(name), store_global, TYPE_FIELD);
369 ve->m_fieldtype = vtype;
370 m_fields.emplace_back(ve);
371 util_htset(m_htfields, name.c_str(), ve);
375 /***********************************************************************
379 static bool ir_function_naive_phi(ir_function*);
380 static void ir_function_enumerate(ir_function*);
381 static bool ir_function_calculate_liferanges(ir_function*);
382 static bool ir_function_allocate_locals(ir_function*);
384 ir_function::ir_function(ir_builder* owner_, qc_type outtype_)
386 m_name("<@unnamed>"),
389 m_context.file = "<@no context>";
393 ir_function::~ir_function()
397 static void ir_function_delete_quick(ir_function *self)
399 for (auto& b : self->m_blocks)
400 ir_block_delete_quick(b.release());
404 static void ir_function_collect_value(ir_function *self, ir_value *v)
406 self->m_values.emplace_back(v);
409 ir_block* ir_function_create_block(lex_ctx_t ctx, ir_function *self, const char *label)
411 ir_block* bn = new ir_block(self, label ? std::string(label) : std::string());
413 self->m_blocks.emplace_back(bn);
415 if ((self->m_flags & IR_FLAG_BLOCK_COVERAGE) && self->m_owner->m_coverage_func)
416 (void)ir_block_create_call(bn, ctx, nullptr, self->m_owner->m_coverage_func, false);
421 static bool instr_is_operation(uint16_t op)
423 return ( (op >= INSTR_MUL_F && op <= INSTR_GT) ||
424 (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) ||
425 (op == INSTR_ADDRESS) ||
426 (op >= INSTR_NOT_F && op <= INSTR_NOT_FNC) ||
427 (op >= INSTR_AND && op <= INSTR_BITOR) ||
428 (op >= INSTR_CALL0 && op <= INSTR_CALL8) ||
429 (op >= VINSTR_BITAND_V && op <= VINSTR_NEG_V) );
432 static bool ir_function_pass_peephole(ir_function *self)
434 for (auto& bp : self->m_blocks) {
435 ir_block *block = bp.get();
436 for (size_t i = 0; i < vec_size(block->m_instr); ++i) {
438 inst = block->m_instr[i];
441 (inst->m_opcode >= INSTR_STORE_F &&
442 inst->m_opcode <= INSTR_STORE_FNC))
450 oper = block->m_instr[i-1];
451 if (!instr_is_operation(oper->m_opcode))
454 /* Don't change semantics of MUL_VF in engines where these may not alias. */
455 if (OPTS_FLAG(LEGACY_VECTOR_MATHS)) {
456 if (oper->m_opcode == INSTR_MUL_VF && oper->_m_ops[2]->m_memberof == oper->_m_ops[1])
458 if (oper->m_opcode == INSTR_MUL_FV && oper->_m_ops[1]->m_memberof == oper->_m_ops[2])
462 value = oper->_m_ops[0];
464 /* only do it for SSA values */
465 if (value->m_store != store_value)
468 /* don't optimize out the temp if it's used later again */
469 if (value->m_reads.size() != 1)
472 /* The very next store must use this value */
473 if (value->m_reads[0] != store)
476 /* And of course the store must _read_ from it, so it's in
478 if (store->_m_ops[1] != value)
481 ++opts_optimizationcount[OPTIM_PEEPHOLE];
482 (void)!ir_instr_op(oper, 0, store->_m_ops[0], true);
484 vec_remove(block->m_instr, i, 1);
487 else if (inst->m_opcode == VINSTR_COND)
489 /* COND on a value resulting from a NOT could
490 * remove the NOT and swap its operands
497 value = inst->_m_ops[0];
499 if (value->m_store != store_value || value->m_reads.size() != 1 || value->m_reads[0] != inst)
502 inot = value->m_writes[0];
503 if (inot->_m_ops[0] != value ||
504 inot->m_opcode < INSTR_NOT_F ||
505 inot->m_opcode > INSTR_NOT_FNC ||
506 inot->m_opcode == INSTR_NOT_V || /* can't do these */
507 inot->m_opcode == INSTR_NOT_S)
513 ++opts_optimizationcount[OPTIM_PEEPHOLE];
515 (void)!ir_instr_op(inst, 0, inot->_m_ops[1], false);
518 for (inotid = 0; inotid < vec_size(tmp->m_instr); ++inotid) {
519 if (tmp->m_instr[inotid] == inot)
522 if (inotid >= vec_size(tmp->m_instr)) {
523 compile_error(inst->m_context, "sanity-check failed: failed to find instruction to optimize out");
526 vec_remove(tmp->m_instr, inotid, 1);
528 /* swap ontrue/onfalse */
529 tmp = inst->m_bops[0];
530 inst->m_bops[0] = inst->m_bops[1];
531 inst->m_bops[1] = tmp;
541 static bool ir_function_pass_tailrecursion(ir_function *self)
545 for (auto& bp : self->m_blocks) {
546 ir_block *block = bp.get();
549 ir_instr *ret, *call, *store = nullptr;
551 if (!block->m_final || vec_size(block->m_instr) < 2)
554 ret = block->m_instr[vec_size(block->m_instr)-1];
555 if (ret->m_opcode != INSTR_DONE && ret->m_opcode != INSTR_RETURN)
558 call = block->m_instr[vec_size(block->m_instr)-2];
559 if (call->m_opcode >= INSTR_STORE_F && call->m_opcode <= INSTR_STORE_FNC) {
560 /* account for the unoptimized
562 * STORE %return, %tmp
566 if (vec_size(block->m_instr) < 3)
570 call = block->m_instr[vec_size(block->m_instr)-3];
573 if (call->m_opcode < INSTR_CALL0 || call->m_opcode > INSTR_CALL8)
577 /* optimize out the STORE */
578 if (ret->_m_ops[0] &&
579 ret->_m_ops[0] == store->_m_ops[0] &&
580 store->_m_ops[1] == call->_m_ops[0])
582 ++opts_optimizationcount[OPTIM_PEEPHOLE];
583 call->_m_ops[0] = store->_m_ops[0];
584 vec_remove(block->m_instr, vec_size(block->m_instr) - 2, 1);
591 if (!call->_m_ops[0])
594 funcval = call->_m_ops[1];
597 if (funcval->m_vtype != TYPE_FUNCTION || funcval->m_constval.vfunc != self)
600 /* now we have a CALL and a RET, check if it's a tailcall */
601 if (ret->_m_ops[0] && call->_m_ops[0] != ret->_m_ops[0])
604 ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
605 vec_shrinkby(block->m_instr, 2);
607 block->m_final = false; /* open it back up */
609 /* emite parameter-stores */
610 for (p = 0; p < call->m_params.size(); ++p) {
611 /* assert(call->params_count <= self->locals_count); */
612 if (!ir_block_create_store(block, call->m_context, self->m_locals[p].get(), call->m_params[p])) {
613 irerror(call->m_context, "failed to create tailcall store instruction for parameter %i", (int)p);
617 if (!ir_block_create_jump(block, call->m_context, self->m_blocks[0].get())) {
618 irerror(call->m_context, "failed to create tailcall jump");
629 bool ir_function_finalize(ir_function *self)
634 for (auto& lp : self->m_locals) {
635 ir_value *v = lp.get();
636 if (v->m_reads.empty() && v->m_writes.size()
637 && irwarning(v->m_context, WARN_UNUSED_VARIABLE,
638 "unused variable: `%s`", v->m_name.c_str())) return false;
641 if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
642 if (!ir_function_pass_peephole(self)) {
643 irerror(self->m_context, "generic optimization pass broke something in `%s`", self->m_name.c_str());
648 if (OPTS_OPTIMIZATION(OPTIM_TAIL_RECURSION)) {
649 if (!ir_function_pass_tailrecursion(self)) {
650 irerror(self->m_context, "tail-recursion optimization pass broke something in `%s`", self->m_name.c_str());
655 if (!ir_function_naive_phi(self)) {
656 irerror(self->m_context, "internal error: ir_function_naive_phi failed");
660 for (auto& lp : self->m_locals) {
661 ir_value *v = lp.get();
662 if (v->m_vtype == TYPE_VECTOR ||
663 (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
670 for (auto& vp : self->m_values) {
671 ir_value *v = vp.get();
672 if (v->m_vtype == TYPE_VECTOR ||
673 (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
681 ir_function_enumerate(self);
683 if (!ir_function_calculate_liferanges(self))
685 if (!ir_function_allocate_locals(self))
690 ir_value* ir_function_create_local(ir_function *self, const std::string& name, qc_type vtype, bool param)
695 !self->m_locals.empty() &&
696 self->m_locals.back()->m_store != store_param)
698 irerror(self->m_context, "cannot add parameters after adding locals");
702 ve = new ir_value(std::string(name), (param ? store_param : store_local), vtype);
705 self->m_locals.emplace_back(ve);
709 /***********************************************************************
713 ir_block::ir_block(ir_function* owner, const std::string& name)
717 m_context.file = "<@no context>";
721 ir_block::~ir_block()
723 for (size_t i = 0; i != vec_size(m_instr); ++i)
730 static void ir_block_delete_quick(ir_block* self)
733 for (i = 0; i != vec_size(self->m_instr); ++i)
734 ir_instr_delete_quick(self->m_instr[i]);
735 vec_free(self->m_instr);
739 /***********************************************************************
743 ir_instr::ir_instr(lex_ctx_t ctx, ir_block* owner_, int op)
750 ir_instr::~ir_instr()
752 // The following calls can only delete from
753 // vectors, we still want to delete this instruction
754 // so ignore the return value. Since with the warn_unused_result attribute
755 // gcc doesn't care about an explicit: (void)foo(); to ignore the result,
756 // I have to improvise here and use if(foo());
757 for (auto &it : m_phi) {
759 if (vec_ir_instr_find(it.value->m_writes, this, &idx))
760 it.value->m_writes.erase(it.value->m_writes.begin() + idx);
761 if (vec_ir_instr_find(it.value->m_reads, this, &idx))
762 it.value->m_reads.erase(it.value->m_reads.begin() + idx);
764 for (auto &it : m_params) {
766 if (vec_ir_instr_find(it->m_writes, this, &idx))
767 it->m_writes.erase(it->m_writes.begin() + idx);
768 if (vec_ir_instr_find(it->m_reads, this, &idx))
769 it->m_reads.erase(it->m_reads.begin() + idx);
771 (void)!ir_instr_op(this, 0, nullptr, false);
772 (void)!ir_instr_op(this, 1, nullptr, false);
773 (void)!ir_instr_op(this, 2, nullptr, false);
776 static void ir_instr_delete_quick(ir_instr *self)
779 self->m_params.clear();
780 self->_m_ops[0] = nullptr;
781 self->_m_ops[1] = nullptr;
782 self->_m_ops[2] = nullptr;
786 static bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing)
788 if (v && v->m_vtype == TYPE_NOEXPR) {
789 irerror(self->m_context, "tried to use a NOEXPR value");
793 if (self->_m_ops[op]) {
795 if (writing && vec_ir_instr_find(self->_m_ops[op]->m_writes, self, &idx))
796 self->_m_ops[op]->m_writes.erase(self->_m_ops[op]->m_writes.begin() + idx);
797 else if (vec_ir_instr_find(self->_m_ops[op]->m_reads, self, &idx))
798 self->_m_ops[op]->m_reads.erase(self->_m_ops[op]->m_reads.begin() + idx);
802 v->m_writes.push_back(self);
804 v->m_reads.push_back(self);
806 self->_m_ops[op] = v;
810 /***********************************************************************
814 void ir_value::setCodeAddress(int32_t gaddr)
816 m_code.globaladdr = gaddr;
817 if (m_members[0]) m_members[0]->m_code.globaladdr = gaddr;
818 if (m_members[1]) m_members[1]->m_code.globaladdr = gaddr;
819 if (m_members[2]) m_members[2]->m_code.globaladdr = gaddr;
822 int32_t ir_value::codeAddress() const
824 if (m_store == store_return)
825 return OFS_RETURN + m_code.addroffset;
826 return m_code.globaladdr + m_code.addroffset;
829 ir_value::ir_value(std::string&& name_, store_type store_, qc_type vtype_)
830 : m_name(move(name_))
834 m_fieldtype = TYPE_VOID;
835 m_outtype = TYPE_VOID;
840 m_context.file = "<@no context>";
843 memset(&m_constval, 0, sizeof(m_constval));
844 memset(&m_code, 0, sizeof(m_code));
846 m_members[0] = nullptr;
847 m_members[1] = nullptr;
848 m_members[2] = nullptr;
849 m_memberof = nullptr;
851 m_unique_life = false;
856 ir_value::ir_value(ir_function *owner, std::string&& name, store_type storetype, qc_type vtype)
857 : ir_value(move(name), storetype, vtype)
859 ir_function_collect_value(owner, this);
862 ir_value::~ir_value()
866 if (m_vtype == TYPE_STRING)
867 mem_d((void*)m_constval.vstring);
869 if (!(m_flags & IR_FLAG_SPLIT_VECTOR)) {
870 for (i = 0; i < 3; ++i) {
878 /* helper function */
879 ir_value* ir_builder::literalFloat(float value, bool add_to_list) {
880 ir_value *v = new ir_value("#IMMEDIATE", store_global, TYPE_FLOAT);
881 v->m_flags |= IR_FLAG_ERASABLE;
882 v->m_hasvalue = true;
884 v->m_constval.vfloat = value;
886 m_globals.emplace_back(v);
888 m_const_floats.emplace_back(v);
892 ir_value* ir_value::vectorMember(unsigned int member)
899 if (m_members[member])
900 return m_members[member];
902 if (!m_name.empty()) {
903 char member_name[3] = { '_', char('x' + member), 0 };
904 name = m_name + member_name;
907 if (m_vtype == TYPE_VECTOR)
909 m = new ir_value(move(name), m_store, TYPE_FLOAT);
912 m->m_context = m_context;
914 m_members[member] = m;
915 m->m_code.addroffset = member;
917 else if (m_vtype == TYPE_FIELD)
919 if (m_fieldtype != TYPE_VECTOR)
921 m = new ir_value(move(name), m_store, TYPE_FIELD);
924 m->m_fieldtype = TYPE_FLOAT;
925 m->m_context = m_context;
927 m_members[member] = m;
928 m->m_code.addroffset = member;
932 irerror(m_context, "invalid member access on %s", m_name.c_str());
936 m->m_memberof = this;
940 size_t ir_value::size() const {
941 if (m_vtype == TYPE_FIELD && m_fieldtype == TYPE_VECTOR)
942 return type_sizeof_[TYPE_VECTOR];
943 return type_sizeof_[m_vtype];
946 bool ir_value::setFloat(float f)
948 if (m_vtype != TYPE_FLOAT)
950 m_constval.vfloat = f;
955 bool ir_value::setFunc(int f)
957 if (m_vtype != TYPE_FUNCTION)
964 bool ir_value::setVector(vec3_t v)
966 if (m_vtype != TYPE_VECTOR)
973 bool ir_value::setField(ir_value *fld)
975 if (m_vtype != TYPE_FIELD)
977 m_constval.vpointer = fld;
982 bool ir_value::setString(const char *str)
984 if (m_vtype != TYPE_STRING)
986 m_constval.vstring = util_strdupe(str);
992 bool ir_value::setInt(int i)
994 if (m_vtype != TYPE_INTEGER)
1002 bool ir_value::lives(size_t at)
1004 for (auto& l : m_life) {
1005 if (l.start <= at && at <= l.end)
1007 if (l.start > at) /* since it's ordered */
1013 bool ir_value::insertLife(size_t idx, ir_life_entry_t e)
1015 m_life.insert(m_life.begin() + idx, e);
1019 bool ir_value::setAlive(size_t s)
1022 const size_t vs = m_life.size();
1023 ir_life_entry_t *life_found = nullptr;
1024 ir_life_entry_t *before = nullptr;
1025 ir_life_entry_t new_entry;
1027 /* Find the first range >= s */
1028 for (i = 0; i < vs; ++i)
1030 before = life_found;
1031 life_found = &m_life[i];
1032 if (life_found->start > s)
1035 /* nothing found? append */
1038 if (life_found && life_found->end+1 == s)
1040 /* previous life range can be merged in */
1044 if (life_found && life_found->end >= s)
1046 e.start = e.end = s;
1047 m_life.emplace_back(e);
1053 if (before->end + 1 == s &&
1054 life_found->start - 1 == s)
1057 before->end = life_found->end;
1058 m_life.erase(m_life.begin()+i);
1061 if (before->end + 1 == s)
1067 /* already contained */
1068 if (before->end >= s)
1072 if (life_found->start - 1 == s)
1074 life_found->start--;
1077 /* insert a new entry */
1078 new_entry.start = new_entry.end = s;
1079 return insertLife(i, new_entry);
1082 bool ir_value::mergeLife(const ir_value *other)
1086 if (other->m_life.empty())
1089 if (m_life.empty()) {
1090 m_life = other->m_life;
1095 for (i = 0; i < other->m_life.size(); ++i)
1097 const ir_life_entry_t &otherlife = other->m_life[i];
1100 ir_life_entry_t *entry = &m_life[myi];
1102 if (otherlife.end+1 < entry->start)
1104 /* adding an interval before entry */
1105 if (!insertLife(myi, otherlife))
1111 if (otherlife.start < entry->start &&
1112 otherlife.end+1 >= entry->start)
1114 /* starts earlier and overlaps */
1115 entry->start = otherlife.start;
1118 if (otherlife.end > entry->end &&
1119 otherlife.start <= entry->end+1)
1121 /* ends later and overlaps */
1122 entry->end = otherlife.end;
1125 /* see if our change combines it with the next ranges */
1126 while (myi+1 < m_life.size() &&
1127 entry->end+1 >= m_life[1+myi].start)
1129 /* overlaps with (myi+1) */
1130 if (entry->end < m_life[1+myi].end)
1131 entry->end = m_life[1+myi].end;
1132 m_life.erase(m_life.begin() + (myi + 1));
1133 entry = &m_life[myi];
1136 /* see if we're after the entry */
1137 if (otherlife.start > entry->end)
1140 /* append if we're at the end */
1141 if (myi >= m_life.size()) {
1142 m_life.emplace_back(otherlife);
1145 /* otherweise check the next range */
1154 static bool ir_values_overlap(const ir_value *a, const ir_value *b)
1156 /* For any life entry in A see if it overlaps with
1157 * any life entry in B.
1158 * Note that the life entries are orderes, so we can make a
1159 * more efficient algorithm there than naively translating the
1163 const ir_life_entry_t *la, *lb, *enda, *endb;
1165 /* first of all, if either has no life range, they cannot clash */
1166 if (a->m_life.empty() || b->m_life.empty())
1169 la = &a->m_life.front();
1170 lb = &b->m_life.front();
1171 enda = &a->m_life.back() + 1;
1172 endb = &b->m_life.back() + 1;
1175 /* check if the entries overlap, for that,
1176 * both must start before the other one ends.
1178 if (la->start < lb->end &&
1179 lb->start < la->end)
1184 /* entries are ordered
1185 * one entry is earlier than the other
1186 * that earlier entry will be moved forward
1188 if (la->start < lb->start)
1190 /* order: A B, move A forward
1191 * check if we hit the end with A
1196 else /* if (lb->start < la->start) actually <= */
1198 /* order: B A, move B forward
1199 * check if we hit the end with B
1208 /***********************************************************************
1212 static bool ir_check_unreachable(ir_block *self)
1214 /* The IR should never have to deal with unreachable code */
1215 if (!self->m_final/* || OPTS_FLAG(ALLOW_UNREACHABLE_CODE)*/)
1217 irerror(self->m_context, "unreachable statement (%s)", self->m_label.c_str());
1221 bool ir_block_create_store_op(ir_block *self, lex_ctx_t ctx, int op, ir_value *target, ir_value *what)
1224 if (!ir_check_unreachable(self))
1227 if (target->m_store == store_value &&
1228 (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC))
1230 irerror(self->m_context, "cannot store to an SSA value");
1231 irerror(self->m_context, "trying to store: %s <- %s", target->m_name.c_str(), what->m_name.c_str());
1232 irerror(self->m_context, "instruction: %s", util_instr_str[op]);
1236 in = new ir_instr(ctx, self, op);
1240 if (!ir_instr_op(in, 0, target, (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC)) ||
1241 !ir_instr_op(in, 1, what, false))
1246 vec_push(self->m_instr, in);
1250 bool ir_block_create_state_op(ir_block *self, lex_ctx_t ctx, ir_value *frame, ir_value *think)
1253 if (!ir_check_unreachable(self))
1256 in = new ir_instr(ctx, self, INSTR_STATE);
1260 if (!ir_instr_op(in, 0, frame, false) ||
1261 !ir_instr_op(in, 1, think, false))
1266 vec_push(self->m_instr, in);
1270 static bool ir_block_create_store(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1274 if (target->m_vtype == TYPE_VARIANT)
1275 vtype = what->m_vtype;
1277 vtype = target->m_vtype;
1280 if (vtype == TYPE_FLOAT && what->m_vtype == TYPE_INTEGER)
1281 op = INSTR_CONV_ITOF;
1282 else if (vtype == TYPE_INTEGER && what->m_vtype == TYPE_FLOAT)
1283 op = INSTR_CONV_FTOI;
1285 op = type_store_instr[vtype];
1287 if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1288 if (op == INSTR_STORE_FLD && what->m_fieldtype == TYPE_VECTOR)
1292 return ir_block_create_store_op(self, ctx, op, target, what);
1295 bool ir_block_create_storep(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1300 if (target->m_vtype != TYPE_POINTER)
1303 /* storing using pointer - target is a pointer, type must be
1304 * inferred from source
1306 vtype = what->m_vtype;
1308 op = type_storep_instr[vtype];
1309 if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1310 if (op == INSTR_STOREP_FLD && what->m_fieldtype == TYPE_VECTOR)
1311 op = INSTR_STOREP_V;
1314 return ir_block_create_store_op(self, ctx, op, target, what);
1317 bool ir_block_create_return(ir_block *self, lex_ctx_t ctx, ir_value *v)
1320 if (!ir_check_unreachable(self))
1323 self->m_final = true;
1325 self->m_is_return = true;
1326 in = new ir_instr(ctx, self, INSTR_RETURN);
1330 if (v && !ir_instr_op(in, 0, v, false)) {
1335 vec_push(self->m_instr, in);
1339 bool ir_block_create_if(ir_block *self, lex_ctx_t ctx, ir_value *v,
1340 ir_block *ontrue, ir_block *onfalse)
1343 if (!ir_check_unreachable(self))
1345 self->m_final = true;
1346 /*in = new ir_instr(ctx, self, (v->m_vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/
1347 in = new ir_instr(ctx, self, VINSTR_COND);
1351 if (!ir_instr_op(in, 0, v, false)) {
1356 in->m_bops[0] = ontrue;
1357 in->m_bops[1] = onfalse;
1359 vec_push(self->m_instr, in);
1361 vec_push(self->m_exits, ontrue);
1362 vec_push(self->m_exits, onfalse);
1363 vec_push(ontrue->m_entries, self);
1364 vec_push(onfalse->m_entries, self);
1368 bool ir_block_create_jump(ir_block *self, lex_ctx_t ctx, ir_block *to)
1371 if (!ir_check_unreachable(self))
1373 self->m_final = true;
1374 in = new ir_instr(ctx, self, VINSTR_JUMP);
1379 vec_push(self->m_instr, in);
1381 vec_push(self->m_exits, to);
1382 vec_push(to->m_entries, self);
1386 bool ir_block_create_goto(ir_block *self, lex_ctx_t ctx, ir_block *to)
1388 self->m_owner->m_flags |= IR_FLAG_HAS_GOTO;
1389 return ir_block_create_jump(self, ctx, to);
1392 ir_instr* ir_block_create_phi(ir_block *self, lex_ctx_t ctx, const char *label, qc_type ot)
1396 if (!ir_check_unreachable(self))
1398 in = new ir_instr(ctx, self, VINSTR_PHI);
1401 out = new ir_value(self->m_owner, label ? label : "", store_value, ot);
1406 if (!ir_instr_op(in, 0, out, true)) {
1410 vec_push(self->m_instr, in);
1414 ir_value* ir_phi_value(ir_instr *self)
1416 return self->_m_ops[0];
1419 void ir_phi_add(ir_instr* self, ir_block *b, ir_value *v)
1423 if (!vec_ir_block_find(self->m_owner->m_entries, b, nullptr)) {
1424 // Must not be possible to cause this, otherwise the AST
1425 // is doing something wrong.
1426 irerror(self->m_context, "Invalid entry block for PHI");
1432 v->m_reads.push_back(self);
1433 self->m_phi.push_back(pe);
1436 /* call related code */
1437 ir_instr* ir_block_create_call(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *func, bool noreturn)
1441 if (!ir_check_unreachable(self))
1443 in = new ir_instr(ctx, self, (noreturn ? VINSTR_NRCALL : INSTR_CALL0));
1447 self->m_final = true;
1448 self->m_is_return = true;
1450 out = new ir_value(self->m_owner, label ? label : "", (func->m_outtype == TYPE_VOID) ? store_return : store_value, func->m_outtype);
1455 if (!ir_instr_op(in, 0, out, true) ||
1456 !ir_instr_op(in, 1, func, false))
1461 vec_push(self->m_instr, in);
1464 if (!ir_block_create_return(self, ctx, nullptr)) {
1465 compile_error(ctx, "internal error: failed to generate dummy-return instruction");
1474 ir_value* ir_call_value(ir_instr *self)
1476 return self->_m_ops[0];
1479 void ir_call_param(ir_instr* self, ir_value *v)
1481 self->m_params.push_back(v);
1482 v->m_reads.push_back(self);
1485 /* binary op related code */
1487 ir_value* ir_block_create_binop(ir_block *self, lex_ctx_t ctx,
1488 const char *label, int opcode,
1489 ir_value *left, ir_value *right)
1491 qc_type ot = TYPE_VOID;
1512 case INSTR_SUB_S: /* -- offset of string as float */
1517 case INSTR_BITOR_IF:
1518 case INSTR_BITOR_FI:
1519 case INSTR_BITAND_FI:
1520 case INSTR_BITAND_IF:
1535 case INSTR_BITAND_I:
1538 case INSTR_RSHIFT_I:
1539 case INSTR_LSHIFT_I:
1547 case VINSTR_BITAND_V:
1548 case VINSTR_BITOR_V:
1549 case VINSTR_BITXOR_V:
1550 case VINSTR_BITAND_VF:
1551 case VINSTR_BITOR_VF:
1552 case VINSTR_BITXOR_VF:
1567 * after the following default case, the value of opcode can never
1568 * be 1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65
1572 /* boolean operations result in floats */
1575 * opcode >= 10 takes true branch opcode is at least 10
1576 * opcode <= 23 takes false branch opcode is at least 24
1578 if (opcode >= INSTR_EQ_F && opcode <= INSTR_GT)
1582 * At condition "opcode <= 23", the value of "opcode" must be
1584 * At condition "opcode <= 23", the value of "opcode" cannot be
1585 * equal to any of {1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65}.
1586 * The condition "opcode <= 23" cannot be true.
1588 * Thus ot=2 (TYPE_FLOAT) can never be true
1591 else if (opcode >= INSTR_LE && opcode <= INSTR_GT)
1593 else if (opcode >= INSTR_LE_I && opcode <= INSTR_EQ_FI)
1598 if (ot == TYPE_VOID) {
1599 /* The AST or parser were supposed to check this! */
1603 return ir_block_create_general_instr(self, ctx, label, opcode, left, right, ot);
1606 ir_value* ir_block_create_unary(ir_block *self, lex_ctx_t ctx,
1607 const char *label, int opcode,
1610 qc_type ot = TYPE_FLOAT;
1616 case INSTR_NOT_FNC: /*
1617 case INSTR_NOT_I: */
1622 * Negation for virtual instructions is emulated with 0-value. Thankfully
1623 * the operand for 0 already exists so we just source it from here.
1626 return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_F, nullptr, operand, ot);
1628 return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_V, nullptr, operand, TYPE_VECTOR);
1631 ot = operand->m_vtype;
1634 if (ot == TYPE_VOID) {
1635 /* The AST or parser were supposed to check this! */
1639 /* let's use the general instruction creator and pass nullptr for OPB */
1640 return ir_block_create_general_instr(self, ctx, label, opcode, operand, nullptr, ot);
1643 static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t ctx, const char *label,
1644 int op, ir_value *a, ir_value *b, qc_type outype)
1649 out = new ir_value(self->m_owner, label ? label : "", store_value, outype);
1653 instr = new ir_instr(ctx, self, op);
1658 if (!ir_instr_op(instr, 0, out, true) ||
1659 !ir_instr_op(instr, 1, a, false) ||
1660 !ir_instr_op(instr, 2, b, false) )
1665 vec_push(self->m_instr, instr);
1673 ir_value* ir_block_create_fieldaddress(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field)
1677 /* Support for various pointer types todo if so desired */
1678 if (ent->m_vtype != TYPE_ENTITY)
1681 if (field->m_vtype != TYPE_FIELD)
1684 v = ir_block_create_general_instr(self, ctx, label, INSTR_ADDRESS, ent, field, TYPE_POINTER);
1685 v->m_fieldtype = field->m_fieldtype;
1689 ir_value* ir_block_create_load_from_ent(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field, qc_type outype)
1692 if (ent->m_vtype != TYPE_ENTITY)
1695 /* at some point we could redirect for TYPE_POINTER... but that could lead to carelessness */
1696 if (field->m_vtype != TYPE_FIELD)
1701 case TYPE_FLOAT: op = INSTR_LOAD_F; break;
1702 case TYPE_VECTOR: op = INSTR_LOAD_V; break;
1703 case TYPE_STRING: op = INSTR_LOAD_S; break;
1704 case TYPE_FIELD: op = INSTR_LOAD_FLD; break;
1705 case TYPE_ENTITY: op = INSTR_LOAD_ENT; break;
1706 case TYPE_FUNCTION: op = INSTR_LOAD_FNC; break;
1708 case TYPE_POINTER: op = INSTR_LOAD_I; break;
1709 case TYPE_INTEGER: op = INSTR_LOAD_I; break;
1712 irerror(self->m_context, "invalid type for ir_block_create_load_from_ent: %s", type_name[outype]);
1716 return ir_block_create_general_instr(self, ctx, label, op, ent, field, outype);
1719 /* PHI resolving breaks the SSA, and must thus be the last
1720 * step before life-range calculation.
1723 static bool ir_block_naive_phi(ir_block *self);
1724 bool ir_function_naive_phi(ir_function *self)
1726 for (auto& b : self->m_blocks)
1727 if (!ir_block_naive_phi(b.get()))
1732 static bool ir_block_naive_phi(ir_block *self)
1735 /* FIXME: optionally, create_phi can add the phis
1736 * to a list so we don't need to loop through blocks
1737 * - anyway: "don't optimize YET"
1739 for (i = 0; i < vec_size(self->m_instr); ++i)
1741 ir_instr *instr = self->m_instr[i];
1742 if (instr->m_opcode != VINSTR_PHI)
1745 vec_remove(self->m_instr, i, 1);
1746 --i; /* NOTE: i+1 below */
1748 for (auto &it : instr->m_phi) {
1749 ir_value *v = it.value;
1750 ir_block *b = it.from;
1751 if (v->m_store == store_value && v->m_reads.size() == 1 && v->m_writes.size() == 1) {
1752 /* replace the value */
1753 if (!ir_instr_op(v->m_writes[0], 0, instr->_m_ops[0], true))
1756 /* force a move instruction */
1757 ir_instr *prevjump = vec_last(b->m_instr);
1758 vec_pop(b->m_instr);
1760 instr->_m_ops[0]->m_store = store_global;
1761 if (!ir_block_create_store(b, instr->m_context, instr->_m_ops[0], v))
1763 instr->_m_ops[0]->m_store = store_value;
1764 vec_push(b->m_instr, prevjump);
1773 /***********************************************************************
1774 *IR Temp allocation code
1775 * Propagating value life ranges by walking through the function backwards
1776 * until no more changes are made.
1777 * In theory this should happen once more than once for every nested loop
1779 * Though this implementation might run an additional time for if nests.
1782 /* Enumerate instructions used by value's life-ranges
1784 static void ir_block_enumerate(ir_block *self, size_t *_eid)
1788 for (i = 0; i < vec_size(self->m_instr); ++i)
1790 self->m_instr[i]->m_eid = eid++;
1795 /* Enumerate blocks and instructions.
1796 * The block-enumeration is unordered!
1797 * We do not really use the block enumreation, however
1798 * the instruction enumeration is important for life-ranges.
1800 void ir_function_enumerate(ir_function *self)
1802 size_t instruction_id = 0;
1803 size_t block_eid = 0;
1804 for (auto& block : self->m_blocks)
1806 /* each block now gets an additional "entry" instruction id
1807 * we can use to avoid point-life issues
1809 block->m_entry_id = instruction_id;
1810 block->m_eid = block_eid;
1814 ir_block_enumerate(block.get(), &instruction_id);
1818 /* Local-value allocator
1819 * After finishing creating the liferange of all values used in a function
1820 * we can allocate their global-positions.
1821 * This is the counterpart to register-allocation in register machines.
1823 struct function_allocator {
1830 static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
1833 size_t vsize = var->size();
1835 var->m_code.local = vec_size(alloc->locals);
1837 slot = new ir_value("reg", store_global, var->m_vtype);
1841 if (!slot->mergeLife(var))
1844 vec_push(alloc->locals, slot);
1845 vec_push(alloc->sizes, vsize);
1846 vec_push(alloc->unique, var->m_unique_life);
1855 static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
1860 if (v->m_unique_life)
1861 return function_allocator_alloc(alloc, v);
1863 for (a = 0; a < vec_size(alloc->locals); ++a)
1865 /* if it's reserved for a unique liferange: skip */
1866 if (alloc->unique[a])
1869 slot = alloc->locals[a];
1871 /* never resize parameters
1872 * will be required later when overlapping temps + locals
1874 if (a < vec_size(self->m_params) &&
1875 alloc->sizes[a] < v->size())
1880 if (ir_values_overlap(v, slot))
1883 if (!slot->mergeLife(v))
1886 /* adjust size for this slot */
1887 if (alloc->sizes[a] < v->size())
1888 alloc->sizes[a] = v->size();
1890 v->m_code.local = a;
1893 if (a >= vec_size(alloc->locals)) {
1894 if (!function_allocator_alloc(alloc, v))
1900 bool ir_function_allocate_locals(ir_function *self)
1904 bool opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
1906 function_allocator lockalloc, globalloc;
1908 if (self->m_locals.empty() && self->m_values.empty())
1911 globalloc.locals = nullptr;
1912 globalloc.sizes = nullptr;
1913 globalloc.positions = nullptr;
1914 globalloc.unique = nullptr;
1915 lockalloc.locals = nullptr;
1916 lockalloc.sizes = nullptr;
1917 lockalloc.positions = nullptr;
1918 lockalloc.unique = nullptr;
1921 for (i = 0; i < self->m_locals.size(); ++i)
1923 ir_value *v = self->m_locals[i].get();
1924 if ((self->m_flags & IR_FLAG_MASK_NO_LOCAL_TEMPS) || !OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) {
1926 v->m_unique_life = true;
1928 else if (i >= vec_size(self->m_params))
1931 v->m_locked = true; /* lock parameters locals */
1932 if (!function_allocator_alloc((v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1935 for (; i < self->m_locals.size(); ++i)
1937 ir_value *v = self->m_locals[i].get();
1938 if (v->m_life.empty())
1940 if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1944 /* Allocate a slot for any value that still exists */
1945 for (i = 0; i < self->m_values.size(); ++i)
1947 ir_value *v = self->m_values[i].get();
1949 if (v->m_life.empty())
1952 /* CALL optimization:
1953 * If the value is a parameter-temp: 1 write, 1 read from a CALL
1954 * and it's not "locked", write it to the OFS_PARM directly.
1956 if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->m_locked && !v->m_unique_life) {
1957 if (v->m_reads.size() == 1 && v->m_writes.size() == 1 &&
1958 (v->m_reads[0]->m_opcode == VINSTR_NRCALL ||
1959 (v->m_reads[0]->m_opcode >= INSTR_CALL0 && v->m_reads[0]->m_opcode <= INSTR_CALL8)
1964 ir_instr *call = v->m_reads[0];
1965 if (!vec_ir_value_find(call->m_params, v, ¶m)) {
1966 irerror(call->m_context, "internal error: unlocked parameter %s not found", v->m_name.c_str());
1969 ++opts_optimizationcount[OPTIM_CALL_STORES];
1970 v->m_callparam = true;
1972 v->setCodeAddress(OFS_PARM0 + 3*param);
1974 size_t nprotos = self->m_owner->m_extparam_protos.size();
1977 if (nprotos > param)
1978 ep = self->m_owner->m_extparam_protos[param].get();
1981 ep = self->m_owner->generateExtparamProto();
1982 while (++nprotos <= param)
1983 ep = self->m_owner->generateExtparamProto();
1985 ir_instr_op(v->m_writes[0], 0, ep, true);
1986 call->m_params[param+8] = ep;
1990 if (v->m_writes.size() == 1 && v->m_writes[0]->m_opcode == INSTR_CALL0) {
1991 v->m_store = store_return;
1992 if (v->m_members[0]) v->m_members[0]->m_store = store_return;
1993 if (v->m_members[1]) v->m_members[1]->m_store = store_return;
1994 if (v->m_members[2]) v->m_members[2]->m_store = store_return;
1995 ++opts_optimizationcount[OPTIM_CALL_STORES];
2000 if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
2004 if (!lockalloc.sizes && !globalloc.sizes) {
2007 vec_push(lockalloc.positions, 0);
2008 vec_push(globalloc.positions, 0);
2010 /* Adjust slot positions based on sizes */
2011 if (lockalloc.sizes) {
2012 pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
2013 for (i = 1; i < vec_size(lockalloc.sizes); ++i)
2015 pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
2016 vec_push(lockalloc.positions, pos);
2018 self->m_allocated_locals = pos + vec_last(lockalloc.sizes);
2020 if (globalloc.sizes) {
2021 pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
2022 for (i = 1; i < vec_size(globalloc.sizes); ++i)
2024 pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
2025 vec_push(globalloc.positions, pos);
2027 self->m_globaltemps = pos + vec_last(globalloc.sizes);
2030 /* Locals need to know their new position */
2031 for (auto& local : self->m_locals) {
2032 if (local->m_locked || !opt_gt)
2033 local->m_code.local = lockalloc.positions[local->m_code.local];
2035 local->m_code.local = globalloc.positions[local->m_code.local];
2037 /* Take over the actual slot positions on values */
2038 for (auto& value : self->m_values) {
2039 if (value->m_locked || !opt_gt)
2040 value->m_code.local = lockalloc.positions[value->m_code.local];
2042 value->m_code.local = globalloc.positions[value->m_code.local];
2050 for (i = 0; i < vec_size(lockalloc.locals); ++i)
2051 delete lockalloc.locals[i];
2052 for (i = 0; i < vec_size(globalloc.locals); ++i)
2053 delete globalloc.locals[i];
2054 vec_free(globalloc.unique);
2055 vec_free(globalloc.locals);
2056 vec_free(globalloc.sizes);
2057 vec_free(globalloc.positions);
2058 vec_free(lockalloc.unique);
2059 vec_free(lockalloc.locals);
2060 vec_free(lockalloc.sizes);
2061 vec_free(lockalloc.positions);
2065 /* Get information about which operand
2066 * is read from, or written to.
2068 static void ir_op_read_write(int op, size_t *read, size_t *write)
2088 case INSTR_STOREP_F:
2089 case INSTR_STOREP_V:
2090 case INSTR_STOREP_S:
2091 case INSTR_STOREP_ENT:
2092 case INSTR_STOREP_FLD:
2093 case INSTR_STOREP_FNC:
2104 static bool ir_block_living_add_instr(ir_block *self, size_t eid) {
2105 bool changed = false;
2106 for (auto &it : self->m_living)
2107 if (it->setAlive(eid))
2112 static bool ir_block_living_lock(ir_block *self) {
2113 bool changed = false;
2114 for (auto &it : self->m_living) {
2117 it->m_locked = true;
2123 static bool ir_block_life_propagate(ir_block *self, bool *changed)
2127 size_t i, o, p, mem;
2128 // bitmasks which operands are read from or written to
2131 self->m_living.clear();
2133 p = vec_size(self->m_exits);
2134 for (i = 0; i < p; ++i) {
2135 ir_block *prev = self->m_exits[i];
2136 for (auto &it : prev->m_living)
2137 if (!vec_ir_value_find(self->m_living, it, nullptr))
2138 self->m_living.push_back(it);
2141 i = vec_size(self->m_instr);
2144 instr = self->m_instr[i];
2146 /* See which operands are read and write operands */
2147 ir_op_read_write(instr->m_opcode, &read, &write);
2149 /* Go through the 3 main operands
2150 * writes first, then reads
2152 for (o = 0; o < 3; ++o)
2154 if (!instr->_m_ops[o]) /* no such operand */
2157 value = instr->_m_ops[o];
2159 /* We only care about locals */
2160 /* we also calculate parameter liferanges so that locals
2161 * can take up parameter slots */
2162 if (value->m_store != store_value &&
2163 value->m_store != store_local &&
2164 value->m_store != store_param)
2167 /* write operands */
2168 /* When we write to a local, we consider it "dead" for the
2169 * remaining upper part of the function, since in SSA a value
2170 * can only be written once (== created)
2175 bool in_living = vec_ir_value_find(self->m_living, value, &idx);
2178 /* If the value isn't alive it hasn't been read before... */
2179 /* TODO: See if the warning can be emitted during parsing or AST processing
2180 * otherwise have warning printed here.
2181 * IF printing a warning here: include filecontext_t,
2182 * and make sure it's only printed once
2183 * since this function is run multiple times.
2185 /* con_err( "Value only written %s\n", value->m_name); */
2186 if (value->setAlive(instr->m_eid))
2189 /* since 'living' won't contain it
2190 * anymore, merge the value, since
2193 if (value->setAlive(instr->m_eid))
2196 self->m_living.erase(self->m_living.begin() + idx);
2198 /* Removing a vector removes all members */
2199 for (mem = 0; mem < 3; ++mem) {
2200 if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], &idx)) {
2201 if (value->m_members[mem]->setAlive(instr->m_eid))
2203 self->m_living.erase(self->m_living.begin() + idx);
2206 /* Removing the last member removes the vector */
2207 if (value->m_memberof) {
2208 value = value->m_memberof;
2209 for (mem = 0; mem < 3; ++mem) {
2210 if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2213 if (mem == 3 && vec_ir_value_find(self->m_living, value, &idx)) {
2214 if (value->setAlive(instr->m_eid))
2216 self->m_living.erase(self->m_living.begin() + idx);
2222 /* These operations need a special case as they can break when using
2223 * same source and destination operand otherwise, as the engine may
2224 * read the source multiple times. */
2225 if (instr->m_opcode == INSTR_MUL_VF ||
2226 instr->m_opcode == VINSTR_BITAND_VF ||
2227 instr->m_opcode == VINSTR_BITOR_VF ||
2228 instr->m_opcode == VINSTR_BITXOR ||
2229 instr->m_opcode == VINSTR_BITXOR_VF ||
2230 instr->m_opcode == VINSTR_BITXOR_V ||
2231 instr->m_opcode == VINSTR_CROSS)
2233 value = instr->_m_ops[2];
2234 /* the float source will get an additional lifetime */
2235 if (value->setAlive(instr->m_eid+1))
2237 if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2241 if (instr->m_opcode == INSTR_MUL_FV ||
2242 instr->m_opcode == INSTR_LOAD_V ||
2243 instr->m_opcode == VINSTR_BITXOR ||
2244 instr->m_opcode == VINSTR_BITXOR_VF ||
2245 instr->m_opcode == VINSTR_BITXOR_V ||
2246 instr->m_opcode == VINSTR_CROSS)
2248 value = instr->_m_ops[1];
2249 /* the float source will get an additional lifetime */
2250 if (value->setAlive(instr->m_eid+1))
2252 if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2256 for (o = 0; o < 3; ++o)
2258 if (!instr->_m_ops[o]) /* no such operand */
2261 value = instr->_m_ops[o];
2263 /* We only care about locals */
2264 /* we also calculate parameter liferanges so that locals
2265 * can take up parameter slots */
2266 if (value->m_store != store_value &&
2267 value->m_store != store_local &&
2268 value->m_store != store_param)
2274 if (!vec_ir_value_find(self->m_living, value, nullptr))
2275 self->m_living.push_back(value);
2276 /* reading adds the full vector */
2277 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2278 self->m_living.push_back(value->m_memberof);
2279 for (mem = 0; mem < 3; ++mem) {
2280 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2281 self->m_living.push_back(value->m_members[mem]);
2285 /* PHI operands are always read operands */
2286 for (auto &it : instr->m_phi) {
2288 if (!vec_ir_value_find(self->m_living, value, nullptr))
2289 self->m_living.push_back(value);
2290 /* reading adds the full vector */
2291 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2292 self->m_living.push_back(value->m_memberof);
2293 for (mem = 0; mem < 3; ++mem) {
2294 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2295 self->m_living.push_back(value->m_members[mem]);
2299 /* on a call, all these values must be "locked" */
2300 if (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8) {
2301 if (ir_block_living_lock(self))
2304 /* call params are read operands too */
2305 for (auto &it : instr->m_params) {
2307 if (!vec_ir_value_find(self->m_living, value, nullptr))
2308 self->m_living.push_back(value);
2309 /* reading adds the full vector */
2310 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2311 self->m_living.push_back(value->m_memberof);
2312 for (mem = 0; mem < 3; ++mem) {
2313 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2314 self->m_living.push_back(value->m_members[mem]);
2319 if (ir_block_living_add_instr(self, instr->m_eid))
2322 /* the "entry" instruction ID */
2323 if (ir_block_living_add_instr(self, self->m_entry_id))
2329 bool ir_function_calculate_liferanges(ir_function *self)
2331 /* parameters live at 0 */
2332 for (size_t i = 0; i < vec_size(self->m_params); ++i)
2333 if (!self->m_locals[i].get()->setAlive(0))
2334 compile_error(self->m_context, "internal error: failed value-life merging");
2340 for (auto i = self->m_blocks.rbegin(); i != self->m_blocks.rend(); ++i)
2341 ir_block_life_propagate(i->get(), &changed);
2344 if (self->m_blocks.size()) {
2345 ir_block *block = self->m_blocks[0].get();
2346 for (auto &it : block->m_living) {
2348 if (v->m_store != store_local)
2350 if (v->m_vtype == TYPE_VECTOR)
2352 self->m_flags |= IR_FLAG_HAS_UNINITIALIZED;
2353 /* find the instruction reading from it */
2355 for (; s < v->m_reads.size(); ++s) {
2356 if (v->m_reads[s]->m_eid == v->m_life[0].end)
2359 if (s < v->m_reads.size()) {
2360 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2361 "variable `%s` may be used uninitialized in this function\n"
2364 v->m_reads[s]->m_context.file, v->m_reads[s]->m_context.line)
2371 if (v->m_memberof) {
2372 ir_value *vec = v->m_memberof;
2373 for (s = 0; s < vec->m_reads.size(); ++s) {
2374 if (vec->m_reads[s]->m_eid == v->m_life[0].end)
2377 if (s < vec->m_reads.size()) {
2378 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2379 "variable `%s` may be used uninitialized in this function\n"
2382 vec->m_reads[s]->m_context.file, vec->m_reads[s]->m_context.line)
2390 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2391 "variable `%s` may be used uninitialized in this function", v->m_name.c_str()))
2400 /***********************************************************************
2403 * Since the IR has the convention of putting 'write' operands
2404 * at the beginning, we have to rotate the operands of instructions
2405 * properly in order to generate valid QCVM code.
2407 * Having destinations at a fixed position is more convenient. In QC
2408 * this is *mostly* OPC, but FTE adds at least 2 instructions which
2409 * read from from OPA, and store to OPB rather than OPC. Which is
2410 * partially the reason why the implementation of these instructions
2411 * in darkplaces has been delayed for so long.
2413 * Breaking conventions is annoying...
2415 static bool gen_global_field(code_t *code, ir_value *global)
2417 if (global->m_hasvalue)
2419 ir_value *fld = global->m_constval.vpointer;
2421 irerror(global->m_context, "Invalid field constant with no field: %s", global->m_name.c_str());
2425 /* copy the field's value */
2426 global->setCodeAddress(code->globals.size());
2427 code->globals.push_back(fld->m_code.fieldaddr);
2428 if (global->m_fieldtype == TYPE_VECTOR) {
2429 code->globals.push_back(fld->m_code.fieldaddr+1);
2430 code->globals.push_back(fld->m_code.fieldaddr+2);
2435 global->setCodeAddress(code->globals.size());
2436 code->globals.push_back(0);
2437 if (global->m_fieldtype == TYPE_VECTOR) {
2438 code->globals.push_back(0);
2439 code->globals.push_back(0);
2442 if (global->m_code.globaladdr < 0)
2447 static bool gen_global_pointer(code_t *code, ir_value *global)
2449 if (global->m_hasvalue)
2451 ir_value *target = global->m_constval.vpointer;
2453 irerror(global->m_context, "Invalid pointer constant: %s", global->m_name.c_str());
2454 /* nullptr pointers are pointing to the nullptr constant, which also
2455 * sits at address 0, but still has an ir_value for itself.
2460 /* Here, relocations ARE possible - in fteqcc-enhanced-qc:
2461 * void() foo; <- proto
2462 * void() *fooptr = &foo;
2463 * void() foo = { code }
2465 if (!target->m_code.globaladdr) {
2466 /* FIXME: Check for the constant nullptr ir_value!
2467 * because then code.globaladdr being 0 is valid.
2469 irerror(global->m_context, "FIXME: Relocation support");
2473 global->setCodeAddress(code->globals.size());
2474 code->globals.push_back(target->m_code.globaladdr);
2478 global->setCodeAddress(code->globals.size());
2479 code->globals.push_back(0);
2481 if (global->m_code.globaladdr < 0)
2486 static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *block)
2488 prog_section_statement_t stmt;
2497 block->m_generated = true;
2498 block->m_code_start = code->statements.size();
2499 for (i = 0; i < vec_size(block->m_instr); ++i)
2501 instr = block->m_instr[i];
2503 if (instr->m_opcode == VINSTR_PHI) {
2504 irerror(block->m_context, "cannot generate virtual instruction (phi)");
2508 if (instr->m_opcode == VINSTR_JUMP) {
2509 target = instr->m_bops[0];
2510 /* for uncoditional jumps, if the target hasn't been generated
2511 * yet, we generate them right here.
2513 if (!target->m_generated)
2514 return gen_blocks_recursive(code, func, target);
2516 /* otherwise we generate a jump instruction */
2517 stmt.opcode = INSTR_GOTO;
2518 stmt.o1.s1 = target->m_code_start - code->statements.size();
2521 if (stmt.o1.s1 != 1)
2522 code_push_statement(code, &stmt, instr->m_context);
2524 /* no further instructions can be in this block */
2528 if (instr->m_opcode == VINSTR_BITXOR) {
2529 stmt.opcode = INSTR_BITOR;
2530 stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2531 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2532 stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2533 code_push_statement(code, &stmt, instr->m_context);
2534 stmt.opcode = INSTR_BITAND;
2535 stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2536 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2537 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2538 code_push_statement(code, &stmt, instr->m_context);
2539 stmt.opcode = INSTR_SUB_F;
2540 stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2541 stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2542 stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2543 code_push_statement(code, &stmt, instr->m_context);
2545 /* instruction generated */
2549 if (instr->m_opcode == VINSTR_BITAND_V) {
2550 stmt.opcode = INSTR_BITAND;
2551 stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2552 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2553 stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2554 code_push_statement(code, &stmt, instr->m_context);
2558 code_push_statement(code, &stmt, instr->m_context);
2562 code_push_statement(code, &stmt, instr->m_context);
2564 /* instruction generated */
2568 if (instr->m_opcode == VINSTR_BITOR_V) {
2569 stmt.opcode = INSTR_BITOR;
2570 stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2571 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2572 stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2573 code_push_statement(code, &stmt, instr->m_context);
2577 code_push_statement(code, &stmt, instr->m_context);
2581 code_push_statement(code, &stmt, instr->m_context);
2583 /* instruction generated */
2587 if (instr->m_opcode == VINSTR_BITXOR_V) {
2588 for (j = 0; j < 3; ++j) {
2589 stmt.opcode = INSTR_BITOR;
2590 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2591 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2592 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2593 code_push_statement(code, &stmt, instr->m_context);
2594 stmt.opcode = INSTR_BITAND;
2595 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2596 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2597 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2598 code_push_statement(code, &stmt, instr->m_context);
2600 stmt.opcode = INSTR_SUB_V;
2601 stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2602 stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2603 stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2604 code_push_statement(code, &stmt, instr->m_context);
2606 /* instruction generated */
2610 if (instr->m_opcode == VINSTR_BITAND_VF) {
2611 stmt.opcode = INSTR_BITAND;
2612 stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2613 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2614 stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2615 code_push_statement(code, &stmt, instr->m_context);
2618 code_push_statement(code, &stmt, instr->m_context);
2621 code_push_statement(code, &stmt, instr->m_context);
2623 /* instruction generated */
2627 if (instr->m_opcode == VINSTR_BITOR_VF) {
2628 stmt.opcode = INSTR_BITOR;
2629 stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2630 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2631 stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2632 code_push_statement(code, &stmt, instr->m_context);
2635 code_push_statement(code, &stmt, instr->m_context);
2638 code_push_statement(code, &stmt, instr->m_context);
2640 /* instruction generated */
2644 if (instr->m_opcode == VINSTR_BITXOR_VF) {
2645 for (j = 0; j < 3; ++j) {
2646 stmt.opcode = INSTR_BITOR;
2647 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2648 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2649 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2650 code_push_statement(code, &stmt, instr->m_context);
2651 stmt.opcode = INSTR_BITAND;
2652 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2653 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2654 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2655 code_push_statement(code, &stmt, instr->m_context);
2657 stmt.opcode = INSTR_SUB_V;
2658 stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2659 stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2660 stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2661 code_push_statement(code, &stmt, instr->m_context);
2663 /* instruction generated */
2667 if (instr->m_opcode == VINSTR_CROSS) {
2668 stmt.opcode = INSTR_MUL_F;
2669 for (j = 0; j < 3; ++j) {
2670 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 1) % 3;
2671 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 2) % 3;
2672 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2673 code_push_statement(code, &stmt, instr->m_context);
2674 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 2) % 3;
2675 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 1) % 3;
2676 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2677 code_push_statement(code, &stmt, instr->m_context);
2679 stmt.opcode = INSTR_SUB_V;
2680 stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2681 stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2682 stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2683 code_push_statement(code, &stmt, instr->m_context);
2685 /* instruction generated */
2689 if (instr->m_opcode == VINSTR_COND) {
2690 ontrue = instr->m_bops[0];
2691 onfalse = instr->m_bops[1];
2692 /* TODO: have the AST signal which block should
2693 * come first: eg. optimize IFs without ELSE...
2696 stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2700 if (ontrue->m_generated) {
2701 stmt.opcode = INSTR_IF;
2702 stmt.o2.s1 = ontrue->m_code_start - code->statements.size();
2703 if (stmt.o2.s1 != 1)
2704 code_push_statement(code, &stmt, instr->m_context);
2706 if (onfalse->m_generated) {
2707 stmt.opcode = INSTR_IFNOT;
2708 stmt.o2.s1 = onfalse->m_code_start - code->statements.size();
2709 if (stmt.o2.s1 != 1)
2710 code_push_statement(code, &stmt, instr->m_context);
2712 if (!ontrue->m_generated) {
2713 if (onfalse->m_generated)
2714 return gen_blocks_recursive(code, func, ontrue);
2716 if (!onfalse->m_generated) {
2717 if (ontrue->m_generated)
2718 return gen_blocks_recursive(code, func, onfalse);
2720 /* neither ontrue nor onfalse exist */
2721 stmt.opcode = INSTR_IFNOT;
2722 if (!instr->m_likely) {
2723 /* Honor the likelyhood hint */
2724 ir_block *tmp = onfalse;
2725 stmt.opcode = INSTR_IF;
2729 stidx = code->statements.size();
2730 code_push_statement(code, &stmt, instr->m_context);
2731 /* on false we jump, so add ontrue-path */
2732 if (!gen_blocks_recursive(code, func, ontrue))
2734 /* fixup the jump address */
2735 code->statements[stidx].o2.s1 = code->statements.size() - stidx;
2736 /* generate onfalse path */
2737 if (onfalse->m_generated) {
2738 /* fixup the jump address */
2739 code->statements[stidx].o2.s1 = onfalse->m_code_start - stidx;
2740 if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2741 code->statements[stidx] = code->statements[stidx+1];
2742 if (code->statements[stidx].o1.s1 < 0)
2743 code->statements[stidx].o1.s1++;
2744 code_pop_statement(code);
2746 stmt.opcode = code->statements.back().opcode;
2747 if (stmt.opcode == INSTR_GOTO ||
2748 stmt.opcode == INSTR_IF ||
2749 stmt.opcode == INSTR_IFNOT ||
2750 stmt.opcode == INSTR_RETURN ||
2751 stmt.opcode == INSTR_DONE)
2753 /* no use jumping from here */
2756 /* may have been generated in the previous recursive call */
2757 stmt.opcode = INSTR_GOTO;
2758 stmt.o1.s1 = onfalse->m_code_start - code->statements.size();
2761 if (stmt.o1.s1 != 1)
2762 code_push_statement(code, &stmt, instr->m_context);
2765 else if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2766 code->statements[stidx] = code->statements[stidx+1];
2767 if (code->statements[stidx].o1.s1 < 0)
2768 code->statements[stidx].o1.s1++;
2769 code_pop_statement(code);
2771 /* if not, generate now */
2772 return gen_blocks_recursive(code, func, onfalse);
2775 if ( (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8)
2776 || instr->m_opcode == VINSTR_NRCALL)
2781 first = instr->m_params.size();
2784 for (p = 0; p < first; ++p)
2786 ir_value *param = instr->m_params[p];
2787 if (param->m_callparam)
2790 stmt.opcode = INSTR_STORE_F;
2793 if (param->m_vtype == TYPE_FIELD)
2794 stmt.opcode = field_store_instr[param->m_fieldtype];
2795 else if (param->m_vtype == TYPE_NIL)
2796 stmt.opcode = INSTR_STORE_V;
2798 stmt.opcode = type_store_instr[param->m_vtype];
2799 stmt.o1.u1 = param->codeAddress();
2800 stmt.o2.u1 = OFS_PARM0 + 3 * p;
2802 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2803 /* fetch 3 separate floats */
2804 stmt.opcode = INSTR_STORE_F;
2805 stmt.o1.u1 = param->m_members[0]->codeAddress();
2806 code_push_statement(code, &stmt, instr->m_context);
2808 stmt.o1.u1 = param->m_members[1]->codeAddress();
2809 code_push_statement(code, &stmt, instr->m_context);
2811 stmt.o1.u1 = param->m_members[2]->codeAddress();
2812 code_push_statement(code, &stmt, instr->m_context);
2815 code_push_statement(code, &stmt, instr->m_context);
2817 /* Now handle extparams */
2818 first = instr->m_params.size();
2819 for (; p < first; ++p)
2821 ir_builder *ir = func->m_owner;
2822 ir_value *param = instr->m_params[p];
2823 ir_value *targetparam;
2825 if (param->m_callparam)
2828 if (p-8 >= ir->m_extparams.size())
2829 ir->generateExtparam();
2831 targetparam = ir->m_extparams[p-8];
2833 stmt.opcode = INSTR_STORE_F;
2836 if (param->m_vtype == TYPE_FIELD)
2837 stmt.opcode = field_store_instr[param->m_fieldtype];
2838 else if (param->m_vtype == TYPE_NIL)
2839 stmt.opcode = INSTR_STORE_V;
2841 stmt.opcode = type_store_instr[param->m_vtype];
2842 stmt.o1.u1 = param->codeAddress();
2843 stmt.o2.u1 = targetparam->codeAddress();
2844 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2845 /* fetch 3 separate floats */
2846 stmt.opcode = INSTR_STORE_F;
2847 stmt.o1.u1 = param->m_members[0]->codeAddress();
2848 code_push_statement(code, &stmt, instr->m_context);
2850 stmt.o1.u1 = param->m_members[1]->codeAddress();
2851 code_push_statement(code, &stmt, instr->m_context);
2853 stmt.o1.u1 = param->m_members[2]->codeAddress();
2854 code_push_statement(code, &stmt, instr->m_context);
2857 code_push_statement(code, &stmt, instr->m_context);
2860 stmt.opcode = INSTR_CALL0 + instr->m_params.size();
2861 if (stmt.opcode > INSTR_CALL8)
2862 stmt.opcode = INSTR_CALL8;
2863 stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2866 code_push_statement(code, &stmt, instr->m_context);
2868 retvalue = instr->_m_ops[0];
2869 if (retvalue && retvalue->m_store != store_return &&
2870 (retvalue->m_store == store_global || retvalue->m_life.size()))
2872 /* not to be kept in OFS_RETURN */
2873 if (retvalue->m_vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS))
2874 stmt.opcode = field_store_instr[retvalue->m_fieldtype];
2876 stmt.opcode = type_store_instr[retvalue->m_vtype];
2877 stmt.o1.u1 = OFS_RETURN;
2878 stmt.o2.u1 = retvalue->codeAddress();
2880 code_push_statement(code, &stmt, instr->m_context);
2885 if (instr->m_opcode == INSTR_STATE) {
2886 stmt.opcode = instr->m_opcode;
2887 if (instr->_m_ops[0])
2888 stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2889 if (instr->_m_ops[1])
2890 stmt.o2.u1 = instr->_m_ops[1]->codeAddress();
2892 code_push_statement(code, &stmt, instr->m_context);
2896 stmt.opcode = instr->m_opcode;
2901 /* This is the general order of operands */
2902 if (instr->_m_ops[0])
2903 stmt.o3.u1 = instr->_m_ops[0]->codeAddress();
2905 if (instr->_m_ops[1])
2906 stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2908 if (instr->_m_ops[2])
2909 stmt.o2.u1 = instr->_m_ops[2]->codeAddress();
2911 if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
2913 stmt.o1.u1 = stmt.o3.u1;
2916 else if ((stmt.opcode >= INSTR_STORE_F &&
2917 stmt.opcode <= INSTR_STORE_FNC) ||
2918 (stmt.opcode >= INSTR_STOREP_F &&
2919 stmt.opcode <= INSTR_STOREP_FNC))
2921 /* 2-operand instructions with A -> B */
2922 stmt.o2.u1 = stmt.o3.u1;
2925 /* tiny optimization, don't output
2928 if (stmt.o2.u1 == stmt.o1.u1 &&
2929 OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
2931 ++opts_optimizationcount[OPTIM_PEEPHOLE];
2935 code_push_statement(code, &stmt, instr->m_context);
2940 static bool gen_function_code(code_t *code, ir_function *self)
2943 prog_section_statement_t stmt, *retst;
2945 /* Starting from entry point, we generate blocks "as they come"
2946 * for now. Dead blocks will not be translated obviously.
2948 if (self->m_blocks.empty()) {
2949 irerror(self->m_context, "Function '%s' declared without body.", self->m_name.c_str());
2953 block = self->m_blocks[0].get();
2954 if (block->m_generated)
2957 if (!gen_blocks_recursive(code, self, block)) {
2958 irerror(self->m_context, "failed to generate blocks for '%s'", self->m_name.c_str());
2962 /* code_write and qcvm -disasm need to know that the function ends here */
2963 retst = &code->statements.back();
2964 if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) &&
2965 self->m_outtype == TYPE_VOID &&
2966 retst->opcode == INSTR_RETURN &&
2967 !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1)
2969 retst->opcode = INSTR_DONE;
2970 ++opts_optimizationcount[OPTIM_VOID_RETURN];
2974 stmt.opcode = INSTR_DONE;
2978 last.line = code->linenums.back();
2979 last.column = code->columnnums.back();
2981 code_push_statement(code, &stmt, last);
2986 qcint_t ir_builder::filestring(const char *filename)
2988 /* NOTE: filename pointers are copied, we never strdup them,
2989 * thus we can use pointer-comparison to find the string.
2993 for (size_t i = 0; i != m_filenames.size(); ++i) {
2994 if (!strcmp(m_filenames[i], filename))
2998 str = code_genstring(m_code.get(), filename);
2999 m_filenames.push_back(filename);
3000 m_filestrings.push_back(str);
3004 bool ir_builder::generateGlobalFunction(ir_value *global)
3006 prog_section_function_t fun;
3011 if (!global->m_hasvalue || (!global->m_constval.vfunc)) {
3012 irerror(global->m_context, "Invalid state of function-global: not constant: %s", global->m_name.c_str());
3016 irfun = global->m_constval.vfunc;
3017 fun.name = global->m_code.name;
3018 fun.file = filestring(global->m_context.file);
3019 fun.profile = 0; /* always 0 */
3020 fun.nargs = vec_size(irfun->m_params);
3024 for (i = 0; i < 8; ++i) {
3025 if ((int32_t)i >= fun.nargs)
3028 fun.argsize[i] = type_sizeof_[irfun->m_params[i]];
3032 fun.locals = irfun->m_allocated_locals;
3034 if (irfun->m_builtin)
3035 fun.entry = irfun->m_builtin+1;
3037 irfun->m_code_function_def = m_code->functions.size();
3038 fun.entry = m_code->statements.size();
3041 m_code->functions.push_back(fun);
3045 ir_value* ir_builder::generateExtparamProto()
3049 util_snprintf(name, sizeof(name), "EXTPARM#%i", (int)(m_extparam_protos.size()));
3050 ir_value *global = new ir_value(name, store_global, TYPE_VECTOR);
3051 m_extparam_protos.emplace_back(global);
3056 void ir_builder::generateExtparam()
3058 prog_section_def_t def;
3061 if (m_extparam_protos.size() < m_extparams.size()+1)
3062 global = generateExtparamProto();
3064 global = m_extparam_protos[m_extparams.size()].get();
3066 def.name = code_genstring(m_code.get(), global->m_name.c_str());
3067 def.type = TYPE_VECTOR;
3068 def.offset = m_code->globals.size();
3070 m_code->defs.push_back(def);
3072 global->setCodeAddress(def.offset);
3074 m_code->globals.push_back(0);
3075 m_code->globals.push_back(0);
3076 m_code->globals.push_back(0);
3078 m_extparams.emplace_back(global);
3081 static bool gen_function_extparam_copy(code_t *code, ir_function *self)
3083 ir_builder *ir = self->m_owner;
3085 size_t numparams = vec_size(self->m_params);
3089 prog_section_statement_t stmt;
3090 stmt.opcode = INSTR_STORE_F;
3092 for (size_t i = 8; i < numparams; ++i) {
3094 if (ext >= ir->m_extparams.size())
3095 ir->generateExtparam();
3097 ir_value *ep = ir->m_extparams[ext];
3099 stmt.opcode = type_store_instr[self->m_locals[i]->m_vtype];
3100 if (self->m_locals[i]->m_vtype == TYPE_FIELD &&
3101 self->m_locals[i]->m_fieldtype == TYPE_VECTOR)
3103 stmt.opcode = INSTR_STORE_V;
3105 stmt.o1.u1 = ep->codeAddress();
3106 stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3107 code_push_statement(code, &stmt, self->m_context);
3113 static bool gen_function_varargs_copy(code_t *code, ir_function *self)
3115 size_t i, ext, numparams, maxparams;
3117 ir_builder *ir = self->m_owner;
3119 prog_section_statement_t stmt;
3121 numparams = vec_size(self->m_params);
3125 stmt.opcode = INSTR_STORE_V;
3127 maxparams = numparams + self->m_max_varargs;
3128 for (i = numparams; i < maxparams; ++i) {
3130 stmt.o1.u1 = OFS_PARM0 + 3*i;
3131 stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3132 code_push_statement(code, &stmt, self->m_context);
3136 while (ext >= ir->m_extparams.size())
3137 ir->generateExtparam();
3139 ep = ir->m_extparams[ext];
3141 stmt.o1.u1 = ep->codeAddress();
3142 stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3143 code_push_statement(code, &stmt, self->m_context);
3149 bool ir_builder::generateFunctionLocals(ir_value *global)
3151 prog_section_function_t *def;
3153 uint32_t firstlocal, firstglobal;
3155 irfun = global->m_constval.vfunc;
3156 def = &m_code->functions[0] + irfun->m_code_function_def;
3158 if (OPTS_OPTION_BOOL(OPTION_G) ||
3159 !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS) ||
3160 (irfun->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3162 firstlocal = def->firstlocal = m_code->globals.size();
3164 firstlocal = def->firstlocal = m_first_common_local;
3165 ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
3168 firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? m_first_common_globaltemp : firstlocal);
3170 for (size_t i = m_code->globals.size(); i < firstlocal + irfun->m_allocated_locals; ++i)
3171 m_code->globals.push_back(0);
3173 for (auto& lp : irfun->m_locals) {
3174 ir_value *v = lp.get();
3175 if (v->m_locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
3176 v->setCodeAddress(firstlocal + v->m_code.local);
3177 if (!generateGlobal(v, true)) {
3178 irerror(v->m_context, "failed to generate local %s", v->m_name.c_str());
3183 v->setCodeAddress(firstglobal + v->m_code.local);
3185 for (auto& vp : irfun->m_values) {
3186 ir_value *v = vp.get();
3190 v->setCodeAddress(firstlocal + v->m_code.local);
3192 v->setCodeAddress(firstglobal + v->m_code.local);
3197 bool ir_builder::generateGlobalFunctionCode(ir_value *global)
3199 prog_section_function_t *fundef;
3202 irfun = global->m_constval.vfunc;
3204 if (global->m_cvq == CV_NONE) {
3205 if (irwarning(global->m_context, WARN_IMPLICIT_FUNCTION_POINTER,
3206 "function `%s` has no body and in QC implicitly becomes a function-pointer",
3207 global->m_name.c_str()))
3209 /* Not bailing out just now. If this happens a lot you don't want to have
3210 * to rerun gmqcc for each such function.
3216 /* this was a function pointer, don't generate code for those */
3220 if (irfun->m_builtin)
3224 * If there is no definition and the thing is eraseable, we can ignore
3225 * outputting the function to begin with.
3227 if (global->m_flags & IR_FLAG_ERASABLE && irfun->m_code_function_def < 0) {
3231 if (irfun->m_code_function_def < 0) {
3232 irerror(irfun->m_context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->m_name.c_str());
3235 fundef = &m_code->functions[irfun->m_code_function_def];
3237 fundef->entry = m_code->statements.size();
3238 if (!generateFunctionLocals(global)) {
3239 irerror(irfun->m_context, "Failed to generate locals for function %s", irfun->m_name.c_str());
3242 if (!gen_function_extparam_copy(m_code.get(), irfun)) {
3243 irerror(irfun->m_context, "Failed to generate extparam-copy code for function %s", irfun->m_name.c_str());
3246 if (irfun->m_max_varargs && !gen_function_varargs_copy(m_code.get(), irfun)) {
3247 irerror(irfun->m_context, "Failed to generate vararg-copy code for function %s", irfun->m_name.c_str());
3250 if (!gen_function_code(m_code.get(), irfun)) {
3251 irerror(irfun->m_context, "Failed to generate code for function %s", irfun->m_name.c_str());
3257 static void gen_vector_defs(code_t *code, prog_section_def_t def, const char *name)
3262 if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3265 def.type = TYPE_FLOAT;
3269 component = (char*)mem_a(len+3);
3270 memcpy(component, name, len);
3272 component[len-0] = 0;
3273 component[len-2] = '_';
3275 component[len-1] = 'x';
3277 for (i = 0; i < 3; ++i) {
3278 def.name = code_genstring(code, component);
3279 code->defs.push_back(def);
3287 static void gen_vector_fields(code_t *code, prog_section_field_t fld, const char *name)
3292 if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3295 fld.type = TYPE_FLOAT;
3299 component = (char*)mem_a(len+3);
3300 memcpy(component, name, len);
3302 component[len-0] = 0;
3303 component[len-2] = '_';
3305 component[len-1] = 'x';
3307 for (i = 0; i < 3; ++i) {
3308 fld.name = code_genstring(code, component);
3309 code->fields.push_back(fld);
3317 bool ir_builder::generateGlobal(ir_value *global, bool islocal)
3321 prog_section_def_t def;
3322 bool pushdef = opts.optimizeoff;
3324 /* we don't generate split-vectors */
3325 if (global->m_vtype == TYPE_VECTOR && (global->m_flags & IR_FLAG_SPLIT_VECTOR))
3328 def.type = global->m_vtype;
3329 def.offset = m_code->globals.size();
3331 if (OPTS_OPTION_BOOL(OPTION_G) || !islocal)
3336 * if we're eraseable and the function isn't referenced ignore outputting
3339 if (global->m_flags & IR_FLAG_ERASABLE && global->m_reads.empty()) {
3343 if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) &&
3344 !(global->m_flags & IR_FLAG_INCLUDE_DEF) &&
3345 (global->m_name[0] == '#' || global->m_cvq == CV_CONST))
3351 if (global->m_name[0] == '#') {
3352 if (!m_str_immediate)
3353 m_str_immediate = code_genstring(m_code.get(), "IMMEDIATE");
3354 def.name = global->m_code.name = m_str_immediate;
3357 def.name = global->m_code.name = code_genstring(m_code.get(), global->m_name.c_str());
3362 def.offset = global->codeAddress();
3363 m_code->defs.push_back(def);
3364 if (global->m_vtype == TYPE_VECTOR)
3365 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3366 else if (global->m_vtype == TYPE_FIELD && global->m_fieldtype == TYPE_VECTOR)
3367 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3374 switch (global->m_vtype)
3377 if (0 == global->m_name.compare("end_sys_globals")) {
3378 // TODO: remember this point... all the defs before this one
3379 // should be checksummed and added to progdefs.h when we generate it.
3381 else if (0 == global->m_name.compare("end_sys_fields")) {
3382 // TODO: same as above but for entity-fields rather than globsl
3384 else if(irwarning(global->m_context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`",
3385 global->m_name.c_str()))
3387 /* Not bailing out */
3390 /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far
3391 * the system fields actually go? Though the engine knows this anyway...
3392 * Maybe this could be an -foption
3393 * fteqcc creates data for end_sys_* - of size 1, so let's do the same
3395 global->setCodeAddress(m_code->globals.size());
3396 m_code->globals.push_back(0);
3399 m_code->defs.push_back(def);
3403 m_code->defs.push_back(def);
3404 return gen_global_pointer(m_code.get(), global);
3407 m_code->defs.push_back(def);
3408 if (global->m_fieldtype == TYPE_VECTOR)
3409 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3411 return gen_global_field(m_code.get(), global);
3416 global->setCodeAddress(m_code->globals.size());
3417 if (global->m_hasvalue) {
3418 if (global->m_cvq == CV_CONST && global->m_reads.empty())
3420 iptr = (int32_t*)&global->m_constval.ivec[0];
3421 m_code->globals.push_back(*iptr);
3423 m_code->globals.push_back(0);
3425 if (!islocal && global->m_cvq != CV_CONST)
3426 def.type |= DEF_SAVEGLOBAL;
3428 m_code->defs.push_back(def);
3430 return global->m_code.globaladdr >= 0;
3434 global->setCodeAddress(m_code->globals.size());
3435 if (global->m_hasvalue) {
3436 if (global->m_cvq == CV_CONST && global->m_reads.empty())
3438 uint32_t load = code_genstring(m_code.get(), global->m_constval.vstring);
3439 m_code->globals.push_back(load);
3441 m_code->globals.push_back(0);
3443 if (!islocal && global->m_cvq != CV_CONST)
3444 def.type |= DEF_SAVEGLOBAL;
3446 m_code->defs.push_back(def);
3447 return global->m_code.globaladdr >= 0;
3452 global->setCodeAddress(m_code->globals.size());
3453 if (global->m_hasvalue) {
3454 iptr = (int32_t*)&global->m_constval.ivec[0];
3455 m_code->globals.push_back(iptr[0]);
3456 if (global->m_code.globaladdr < 0)
3458 for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3459 m_code->globals.push_back(iptr[d]);
3462 m_code->globals.push_back(0);
3463 if (global->m_code.globaladdr < 0)
3465 for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3466 m_code->globals.push_back(0);
3469 if (!islocal && global->m_cvq != CV_CONST)
3470 def.type |= DEF_SAVEGLOBAL;
3473 m_code->defs.push_back(def);
3474 def.type &= ~DEF_SAVEGLOBAL;
3475 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3477 return global->m_code.globaladdr >= 0;
3480 global->setCodeAddress(m_code->globals.size());
3481 if (!global->m_hasvalue) {
3482 m_code->globals.push_back(0);
3483 if (global->m_code.globaladdr < 0)
3486 m_code->globals.push_back(m_code->functions.size());
3487 if (!generateGlobalFunction(global))
3490 if (!islocal && global->m_cvq != CV_CONST)
3491 def.type |= DEF_SAVEGLOBAL;
3493 m_code->defs.push_back(def);
3496 /* assume biggest type */
3497 global->setCodeAddress(m_code->globals.size());
3498 m_code->globals.push_back(0);
3499 for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i)
3500 m_code->globals.push_back(0);
3503 /* refuse to create 'void' type or any other fancy business. */
3504 irerror(global->m_context, "Invalid type for global variable `%s`: %s",
3505 global->m_name.c_str(), type_name[global->m_vtype]);
3510 static GMQCC_INLINE void ir_builder_prepare_field(code_t *code, ir_value *field)
3512 field->m_code.fieldaddr = code_alloc_field(code, type_sizeof_[field->m_fieldtype]);
3515 static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
3517 prog_section_def_t def;
3518 prog_section_field_t fld;
3522 def.type = (uint16_t)field->m_vtype;
3523 def.offset = (uint16_t)self->m_code->globals.size();
3525 /* create a global named the same as the field */
3526 if (OPTS_OPTION_U32(OPTION_STANDARD) == COMPILER_GMQCC) {
3527 /* in our standard, the global gets a dot prefix */
3528 size_t len = field->m_name.length();
3531 /* we really don't want to have to allocate this, and 1024
3532 * bytes is more than enough for a variable/field name
3534 if (len+2 >= sizeof(name)) {
3535 irerror(field->m_context, "invalid field name size: %u", (unsigned int)len);
3540 memcpy(name+1, field->m_name.c_str(), len); // no strncpy - we used strlen above
3543 def.name = code_genstring(self->m_code.get(), name);
3544 fld.name = def.name + 1; /* we reuse that string table entry */
3546 /* in plain QC, there cannot be a global with the same name,
3547 * and so we also name the global the same.
3548 * FIXME: fteqcc should create a global as well
3549 * check if it actually uses the same name. Probably does
3551 def.name = code_genstring(self->m_code.get(), field->m_name.c_str());
3552 fld.name = def.name;
3555 field->m_code.name = def.name;
3557 self->m_code->defs.push_back(def);
3559 fld.type = field->m_fieldtype;
3561 if (fld.type == TYPE_VOID) {
3562 irerror(field->m_context, "field is missing a type: %s - don't know its size", field->m_name.c_str());
3566 fld.offset = field->m_code.fieldaddr;
3568 self->m_code->fields.push_back(fld);
3570 field->setCodeAddress(self->m_code->globals.size());
3571 self->m_code->globals.push_back(fld.offset);
3572 if (fld.type == TYPE_VECTOR) {
3573 self->m_code->globals.push_back(fld.offset+1);
3574 self->m_code->globals.push_back(fld.offset+2);
3577 if (field->m_fieldtype == TYPE_VECTOR) {
3578 gen_vector_defs (self->m_code.get(), def, field->m_name.c_str());
3579 gen_vector_fields(self->m_code.get(), fld, field->m_name.c_str());
3582 return field->m_code.globaladdr >= 0;
3585 static void ir_builder_collect_reusables(ir_builder *builder) {
3586 std::vector<ir_value*> reusables;
3588 for (auto& gp : builder->m_globals) {
3589 ir_value *value = gp.get();
3590 if (value->m_vtype != TYPE_FLOAT || !value->m_hasvalue)
3592 if (value->m_cvq == CV_CONST || (value->m_name.length() >= 1 && value->m_name[0] == '#'))
3593 reusables.emplace_back(value);
3595 builder->m_const_floats = move(reusables);
3598 static void ir_builder_split_vector(ir_builder *self, ir_value *vec) {
3599 ir_value* found[3] = { nullptr, nullptr, nullptr };
3601 // must not be written to
3602 if (vec->m_writes.size())
3604 // must not be trying to access individual members
3605 if (vec->m_members[0] || vec->m_members[1] || vec->m_members[2])
3607 // should be actually used otherwise it won't be generated anyway
3608 if (vec->m_reads.empty())
3610 //size_t count = vec->m_reads.size();
3614 // may only be used directly as function parameters, so if we find some other instruction cancel
3615 for (ir_instr *user : vec->m_reads) {
3616 // we only split vectors if they're used directly as parameter to a call only!
3617 if ((user->m_opcode < INSTR_CALL0 || user->m_opcode > INSTR_CALL8) && user->m_opcode != VINSTR_NRCALL)
3621 vec->m_flags |= IR_FLAG_SPLIT_VECTOR;
3623 // find existing floats making up the split
3624 for (ir_value *c : self->m_const_floats) {
3625 if (!found[0] && c->m_constval.vfloat == vec->m_constval.vvec.x)
3627 if (!found[1] && c->m_constval.vfloat == vec->m_constval.vvec.y)
3629 if (!found[2] && c->m_constval.vfloat == vec->m_constval.vvec.z)
3631 if (found[0] && found[1] && found[2])
3635 // generate floats for not yet found components
3637 found[0] = self->literalFloat(vec->m_constval.vvec.x, true);
3639 if (vec->m_constval.vvec.y == vec->m_constval.vvec.x)
3640 found[1] = found[0];
3642 found[1] = self->literalFloat(vec->m_constval.vvec.y, true);
3645 if (vec->m_constval.vvec.z == vec->m_constval.vvec.x)
3646 found[2] = found[0];
3647 else if (vec->m_constval.vvec.z == vec->m_constval.vvec.y)
3648 found[2] = found[1];
3650 found[2] = self->literalFloat(vec->m_constval.vvec.z, true);
3653 // the .members array should be safe to use here
3654 vec->m_members[0] = found[0];
3655 vec->m_members[1] = found[1];
3656 vec->m_members[2] = found[2];
3658 // register the readers for these floats
3659 found[0]->m_reads.insert(found[0]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3660 found[1]->m_reads.insert(found[1]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3661 found[2]->m_reads.insert(found[2]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3664 static void ir_builder_split_vectors(ir_builder *self) {
3665 // member values may be added to self->m_globals during this operation, but
3666 // no new vectors will be added, we need to iterate via an index as
3667 // c++ iterators would be invalidated
3668 const size_t count = self->m_globals.size();
3669 for (size_t i = 0; i != count; ++i) {
3670 ir_value *v = self->m_globals[i].get();
3671 if (v->m_vtype != TYPE_VECTOR || !v->m_name.length() || v->m_name[0] != '#')
3673 ir_builder_split_vector(self, v);
3677 bool ir_builder::generate(const char *filename)
3679 prog_section_statement_t stmt;
3680 char *lnofile = nullptr;
3682 if (OPTS_FLAG(SPLIT_VECTOR_PARAMETERS)) {
3683 ir_builder_collect_reusables(this);
3684 if (!m_const_floats.empty())
3685 ir_builder_split_vectors(this);
3688 for (auto& fp : m_fields)
3689 ir_builder_prepare_field(m_code.get(), fp.get());
3691 for (auto& gp : m_globals) {
3692 ir_value *global = gp.get();
3693 if (!generateGlobal(global, false)) {
3696 if (global->m_vtype == TYPE_FUNCTION) {
3697 ir_function *func = global->m_constval.vfunc;
3698 if (func && m_max_locals < func->m_allocated_locals &&
3699 !(func->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3701 m_max_locals = func->m_allocated_locals;
3703 if (func && m_max_globaltemps < func->m_globaltemps)
3704 m_max_globaltemps = func->m_globaltemps;
3708 for (auto& fp : m_fields) {
3709 if (!ir_builder_gen_field(this, fp.get()))
3714 m_nil->setCodeAddress(m_code->globals.size());
3715 m_code->globals.push_back(0);
3716 m_code->globals.push_back(0);
3717 m_code->globals.push_back(0);
3719 // generate virtual-instruction temps
3720 for (size_t i = 0; i < IR_MAX_VINSTR_TEMPS; ++i) {
3721 m_vinstr_temp[i]->setCodeAddress(m_code->globals.size());
3722 m_code->globals.push_back(0);
3723 m_code->globals.push_back(0);
3724 m_code->globals.push_back(0);
3727 // generate global temps
3728 m_first_common_globaltemp = m_code->globals.size();
3729 m_code->globals.insert(m_code->globals.end(), m_max_globaltemps, 0);
3731 //for (size_t i = 0; i < m_max_globaltemps; ++i) {
3732 // m_code->globals.push_back(0);
3734 // generate common locals
3735 m_first_common_local = m_code->globals.size();
3736 m_code->globals.insert(m_code->globals.end(), m_max_locals, 0);
3738 //for (i = 0; i < m_max_locals; ++i) {
3739 // m_code->globals.push_back(0);
3742 // generate function code
3744 for (auto& gp : m_globals) {
3745 ir_value *global = gp.get();
3746 if (global->m_vtype == TYPE_FUNCTION) {
3747 if (!this->generateGlobalFunctionCode(global))
3752 if (m_code->globals.size() >= 65536) {
3753 irerror(m_globals.back()->m_context,
3754 "This progs file would require more globals than the metadata can handle (%zu). Bailing out.",
3755 m_code->globals.size());
3759 /* DP errors if the last instruction is not an INSTR_DONE. */
3760 if (m_code->statements.back().opcode != INSTR_DONE)
3764 stmt.opcode = INSTR_DONE;
3768 last.line = m_code->linenums.back();
3769 last.column = m_code->columnnums.back();
3771 code_push_statement(m_code.get(), &stmt, last);
3774 if (OPTS_OPTION_BOOL(OPTION_PP_ONLY))
3777 if (m_code->statements.size() != m_code->linenums.size()) {
3778 con_err("Linecounter wrong: %lu != %lu\n",
3779 m_code->statements.size(),
3780 m_code->linenums.size());
3781 } else if (OPTS_FLAG(LNO)) {
3783 size_t filelen = strlen(filename);
3785 memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
3786 dot = strrchr(lnofile, '.');
3790 vec_shrinkto(lnofile, dot - lnofile);
3792 memcpy(vec_add(lnofile, 5), ".lno", 5);
3795 if (!code_write(m_code.get(), filename, lnofile)) {
3804 /***********************************************************************
3805 *IR DEBUG Dump functions...
3808 #define IND_BUFSZ 1024
3810 static const char *qc_opname(int op)
3812 if (op < 0) return "<INVALID>";
3813 if (op < VINSTR_END)
3814 return util_instr_str[op];
3816 case VINSTR_END: return "END";
3817 case VINSTR_PHI: return "PHI";
3818 case VINSTR_JUMP: return "JUMP";
3819 case VINSTR_COND: return "COND";
3820 case VINSTR_BITXOR: return "BITXOR";
3821 case VINSTR_BITAND_V: return "BITAND_V";
3822 case VINSTR_BITOR_V: return "BITOR_V";
3823 case VINSTR_BITXOR_V: return "BITXOR_V";
3824 case VINSTR_BITAND_VF: return "BITAND_VF";
3825 case VINSTR_BITOR_VF: return "BITOR_VF";
3826 case VINSTR_BITXOR_VF: return "BITXOR_VF";
3827 case VINSTR_CROSS: return "CROSS";
3828 case VINSTR_NEG_F: return "NEG_F";
3829 case VINSTR_NEG_V: return "NEG_V";
3830 default: return "<UNK>";
3834 void ir_builder::dump(int (*oprintf)(const char*, ...)) const
3837 char indent[IND_BUFSZ];
3841 oprintf("module %s\n", m_name.c_str());
3842 for (i = 0; i < m_globals.size(); ++i)
3845 if (m_globals[i]->m_hasvalue)
3846 oprintf("%s = ", m_globals[i]->m_name.c_str());
3847 m_globals[i].get()->dump(oprintf);
3850 for (i = 0; i < m_functions.size(); ++i)
3851 ir_function_dump(m_functions[i].get(), indent, oprintf);
3852 oprintf("endmodule %s\n", m_name.c_str());
3855 static const char *storenames[] = {
3856 "[global]", "[local]", "[param]", "[value]", "[return]"
3859 void ir_function_dump(ir_function *f, char *ind,
3860 int (*oprintf)(const char*, ...))
3863 if (f->m_builtin != 0) {
3864 oprintf("%sfunction %s = builtin %i\n", ind, f->m_name.c_str(), -f->m_builtin);
3867 oprintf("%sfunction %s\n", ind, f->m_name.c_str());
3868 util_strncat(ind, "\t", IND_BUFSZ-1);
3869 if (f->m_locals.size())
3871 oprintf("%s%i locals:\n", ind, (int)f->m_locals.size());
3872 for (i = 0; i < f->m_locals.size(); ++i) {
3873 oprintf("%s\t", ind);
3874 f->m_locals[i].get()->dump(oprintf);
3878 oprintf("%sliferanges:\n", ind);
3879 for (i = 0; i < f->m_locals.size(); ++i) {
3880 const char *attr = "";
3882 ir_value *v = f->m_locals[i].get();
3883 if (v->m_unique_life && v->m_locked)
3884 attr = "unique,locked ";
3885 else if (v->m_unique_life)
3887 else if (v->m_locked)
3889 oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3890 storenames[v->m_store],
3891 attr, (v->m_callparam ? "callparam " : ""),
3892 (int)v->m_code.local);
3893 if (v->m_life.empty())
3895 for (l = 0; l < v->m_life.size(); ++l) {
3896 oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3899 for (m = 0; m < 3; ++m) {
3900 ir_value *vm = v->m_members[m];
3903 oprintf("%s\t%s: @%i ", ind, vm->m_name.c_str(), (int)vm->m_code.local);
3904 for (l = 0; l < vm->m_life.size(); ++l) {
3905 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3910 for (i = 0; i < f->m_values.size(); ++i) {
3911 const char *attr = "";
3913 ir_value *v = f->m_values[i].get();
3914 if (v->m_unique_life && v->m_locked)
3915 attr = "unique,locked ";
3916 else if (v->m_unique_life)
3918 else if (v->m_locked)
3920 oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3921 storenames[v->m_store],
3922 attr, (v->m_callparam ? "callparam " : ""),
3923 (int)v->m_code.local);
3924 if (v->m_life.empty())
3926 for (l = 0; l < v->m_life.size(); ++l) {
3927 oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3930 for (m = 0; m < 3; ++m) {
3931 ir_value *vm = v->m_members[m];
3934 if (vm->m_unique_life && vm->m_locked)
3935 attr = "unique,locked ";
3936 else if (vm->m_unique_life)
3938 else if (vm->m_locked)
3940 oprintf("%s\t%s: %s@%i ", ind, vm->m_name.c_str(), attr, (int)vm->m_code.local);
3941 for (l = 0; l < vm->m_life.size(); ++l) {
3942 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3947 if (f->m_blocks.size())
3949 oprintf("%slife passes: %i\n", ind, (int)f->m_run_id);
3950 for (i = 0; i < f->m_blocks.size(); ++i) {
3951 ir_block_dump(f->m_blocks[i].get(), ind, oprintf);
3955 ind[strlen(ind)-1] = 0;
3956 oprintf("%sendfunction %s\n", ind, f->m_name.c_str());
3959 void ir_block_dump(ir_block* b, char *ind,
3960 int (*oprintf)(const char*, ...))
3963 oprintf("%s:%s\n", ind, b->m_label.c_str());
3964 util_strncat(ind, "\t", IND_BUFSZ-1);
3966 if (b->m_instr && b->m_instr[0])
3967 oprintf("%s (%i) [entry]\n", ind, (int)(b->m_instr[0]->m_eid-1));
3968 for (i = 0; i < vec_size(b->m_instr); ++i)
3969 ir_instr_dump(b->m_instr[i], ind, oprintf);
3970 ind[strlen(ind)-1] = 0;
3973 static void dump_phi(ir_instr *in, int (*oprintf)(const char*, ...))
3975 oprintf("%s <- phi ", in->_m_ops[0]->m_name.c_str());
3976 for (auto &it : in->m_phi) {
3977 oprintf("([%s] : %s) ", it.from->m_label.c_str(),
3978 it.value->m_name.c_str());
3983 void ir_instr_dump(ir_instr *in, char *ind,
3984 int (*oprintf)(const char*, ...))
3987 const char *comma = nullptr;
3989 oprintf("%s (%i) ", ind, (int)in->m_eid);
3991 if (in->m_opcode == VINSTR_PHI) {
3992 dump_phi(in, oprintf);
3996 util_strncat(ind, "\t", IND_BUFSZ-1);
3998 if (in->_m_ops[0] && (in->_m_ops[1] || in->_m_ops[2])) {
3999 in->_m_ops[0]->dump(oprintf);
4000 if (in->_m_ops[1] || in->_m_ops[2])
4003 if (in->m_opcode == INSTR_CALL0 || in->m_opcode == VINSTR_NRCALL) {
4004 oprintf("CALL%i\t", in->m_params.size());
4006 oprintf("%s\t", qc_opname(in->m_opcode));
4008 if (in->_m_ops[0] && !(in->_m_ops[1] || in->_m_ops[2])) {
4009 in->_m_ops[0]->dump(oprintf);
4014 for (i = 1; i != 3; ++i) {
4015 if (in->_m_ops[i]) {
4018 in->_m_ops[i]->dump(oprintf);
4023 if (in->m_bops[0]) {
4026 oprintf("[%s]", in->m_bops[0]->m_label.c_str());
4030 oprintf("%s[%s]", comma, in->m_bops[1]->m_label.c_str());
4031 if (in->m_params.size()) {
4032 oprintf("\tparams: ");
4033 for (auto &it : in->m_params)
4034 oprintf("%s, ", it->m_name.c_str());
4037 ind[strlen(ind)-1] = 0;
4040 static void ir_value_dump_string(const char *str, int (*oprintf)(const char*, ...))
4043 for (; *str; ++str) {
4045 case '\n': oprintf("\\n"); break;
4046 case '\r': oprintf("\\r"); break;
4047 case '\t': oprintf("\\t"); break;
4048 case '\v': oprintf("\\v"); break;
4049 case '\f': oprintf("\\f"); break;
4050 case '\b': oprintf("\\b"); break;
4051 case '\a': oprintf("\\a"); break;
4052 case '\\': oprintf("\\\\"); break;
4053 case '"': oprintf("\\\""); break;
4054 default: oprintf("%c", *str); break;
4060 void ir_value::dump(int (*oprintf)(const char*, ...)) const
4069 oprintf("fn:%s", m_name.c_str());
4072 oprintf("%g", m_constval.vfloat);
4075 oprintf("'%g %g %g'",
4081 oprintf("(entity)");
4084 ir_value_dump_string(m_constval.vstring, oprintf);
4088 oprintf("%i", m_constval.vint);
4093 m_constval.vpointer->m_name.c_str());
4097 oprintf("%s", m_name.c_str());
4101 void ir_value::dumpLife(int (*oprintf)(const char*,...)) const
4103 oprintf("Life of %12s:", m_name.c_str());
4104 for (size_t i = 0; i < m_life.size(); ++i)
4106 oprintf(" + [%i, %i]\n", m_life[i].start, m_life[i].end);