]> git.xonotic.org Git - xonotic/gmqcc.git/blob - ir.cpp
c++: ir_block::m_entries
[xonotic/gmqcc.git] / ir.cpp
1 #include <stdlib.h>
2 #include <string.h>
3
4 #include "gmqcc.h"
5 #include "ir.h"
6
7 /***********************************************************************
8  * Type sizes used at multiple points in the IR codegen
9  */
10
11 const char *type_name[TYPE_COUNT] = {
12     "void",
13     "string",
14     "float",
15     "vector",
16     "entity",
17     "field",
18     "function",
19     "pointer",
20     "integer",
21     "variant",
22     "struct",
23     "union",
24     "array",
25
26     "nil",
27     "<no-expression>"
28 };
29
30 static size_t type_sizeof_[TYPE_COUNT] = {
31     1, /* TYPE_VOID     */
32     1, /* TYPE_STRING   */
33     1, /* TYPE_FLOAT    */
34     3, /* TYPE_VECTOR   */
35     1, /* TYPE_ENTITY   */
36     1, /* TYPE_FIELD    */
37     1, /* TYPE_FUNCTION */
38     1, /* TYPE_POINTER  */
39     1, /* TYPE_INTEGER  */
40     3, /* TYPE_VARIANT  */
41     0, /* TYPE_STRUCT   */
42     0, /* TYPE_UNION    */
43     0, /* TYPE_ARRAY    */
44     0, /* TYPE_NIL      */
45     0, /* TYPE_NOESPR   */
46 };
47
48 const uint16_t type_store_instr[TYPE_COUNT] = {
49     INSTR_STORE_F, /* should use I when having integer support */
50     INSTR_STORE_S,
51     INSTR_STORE_F,
52     INSTR_STORE_V,
53     INSTR_STORE_ENT,
54     INSTR_STORE_FLD,
55     INSTR_STORE_FNC,
56     INSTR_STORE_ENT, /* should use I */
57 #if 0
58     INSTR_STORE_I, /* integer type */
59 #else
60     INSTR_STORE_F,
61 #endif
62
63     INSTR_STORE_V, /* variant, should never be accessed */
64
65     VINSTR_END, /* struct */
66     VINSTR_END, /* union  */
67     VINSTR_END, /* array  */
68     VINSTR_END, /* nil    */
69     VINSTR_END, /* noexpr */
70 };
71
72 const uint16_t field_store_instr[TYPE_COUNT] = {
73     INSTR_STORE_FLD,
74     INSTR_STORE_FLD,
75     INSTR_STORE_FLD,
76     INSTR_STORE_V,
77     INSTR_STORE_FLD,
78     INSTR_STORE_FLD,
79     INSTR_STORE_FLD,
80     INSTR_STORE_FLD,
81 #if 0
82     INSTR_STORE_FLD, /* integer type */
83 #else
84     INSTR_STORE_FLD,
85 #endif
86
87     INSTR_STORE_V, /* variant, should never be accessed */
88
89     VINSTR_END, /* struct */
90     VINSTR_END, /* union  */
91     VINSTR_END, /* array  */
92     VINSTR_END, /* nil    */
93     VINSTR_END, /* noexpr */
94 };
95
96 const uint16_t type_storep_instr[TYPE_COUNT] = {
97     INSTR_STOREP_F, /* should use I when having integer support */
98     INSTR_STOREP_S,
99     INSTR_STOREP_F,
100     INSTR_STOREP_V,
101     INSTR_STOREP_ENT,
102     INSTR_STOREP_FLD,
103     INSTR_STOREP_FNC,
104     INSTR_STOREP_ENT, /* should use I */
105 #if 0
106     INSTR_STOREP_ENT, /* integer type */
107 #else
108     INSTR_STOREP_F,
109 #endif
110
111     INSTR_STOREP_V, /* variant, should never be accessed */
112
113     VINSTR_END, /* struct */
114     VINSTR_END, /* union  */
115     VINSTR_END, /* array  */
116     VINSTR_END, /* nil    */
117     VINSTR_END, /* noexpr */
118 };
119
120 const uint16_t type_eq_instr[TYPE_COUNT] = {
121     INSTR_EQ_F, /* should use I when having integer support */
122     INSTR_EQ_S,
123     INSTR_EQ_F,
124     INSTR_EQ_V,
125     INSTR_EQ_E,
126     INSTR_EQ_E, /* FLD has no comparison */
127     INSTR_EQ_FNC,
128     INSTR_EQ_E, /* should use I */
129 #if 0
130     INSTR_EQ_I,
131 #else
132     INSTR_EQ_F,
133 #endif
134
135     INSTR_EQ_V, /* variant, should never be accessed */
136
137     VINSTR_END, /* struct */
138     VINSTR_END, /* union  */
139     VINSTR_END, /* array  */
140     VINSTR_END, /* nil    */
141     VINSTR_END, /* noexpr */
142 };
143
144 const uint16_t type_ne_instr[TYPE_COUNT] = {
145     INSTR_NE_F, /* should use I when having integer support */
146     INSTR_NE_S,
147     INSTR_NE_F,
148     INSTR_NE_V,
149     INSTR_NE_E,
150     INSTR_NE_E, /* FLD has no comparison */
151     INSTR_NE_FNC,
152     INSTR_NE_E, /* should use I */
153 #if 0
154     INSTR_NE_I,
155 #else
156     INSTR_NE_F,
157 #endif
158
159     INSTR_NE_V, /* variant, should never be accessed */
160
161     VINSTR_END, /* struct */
162     VINSTR_END, /* union  */
163     VINSTR_END, /* array  */
164     VINSTR_END, /* nil    */
165     VINSTR_END, /* noexpr */
166 };
167
168 const uint16_t type_not_instr[TYPE_COUNT] = {
169     INSTR_NOT_F, /* should use I when having integer support */
170     VINSTR_END,  /* not to be used, depends on string related -f flags */
171     INSTR_NOT_F,
172     INSTR_NOT_V,
173     INSTR_NOT_ENT,
174     INSTR_NOT_ENT,
175     INSTR_NOT_FNC,
176     INSTR_NOT_ENT, /* should use I */
177 #if 0
178     INSTR_NOT_I, /* integer type */
179 #else
180     INSTR_NOT_F,
181 #endif
182
183     INSTR_NOT_V, /* variant, should never be accessed */
184
185     VINSTR_END, /* struct */
186     VINSTR_END, /* union  */
187     VINSTR_END, /* array  */
188     VINSTR_END, /* nil    */
189     VINSTR_END, /* noexpr */
190 };
191
192 /* protos */
193 static void            ir_function_dump(ir_function*, char *ind, int (*oprintf)(const char*,...));
194
195 static ir_value*       ir_block_create_general_instr(ir_block *self, lex_ctx_t, const char *label,
196                                                      int op, ir_value *a, ir_value *b, qc_type outype);
197 static bool GMQCC_WARN ir_block_create_store(ir_block*, lex_ctx_t, ir_value *target, ir_value *what);
198 static void            ir_block_dump(ir_block*, char *ind, int (*oprintf)(const char*,...));
199
200 static bool            ir_instr_op(ir_instr*, int op, ir_value *value, bool writing);
201 static void            ir_instr_dump(ir_instr* in, char *ind, int (*oprintf)(const char*,...));
202 /* error functions */
203
204 static void irerror(lex_ctx_t ctx, const char *msg, ...)
205 {
206     va_list ap;
207     va_start(ap, msg);
208     con_cvprintmsg(ctx, LVL_ERROR, "internal error", msg, ap);
209     va_end(ap);
210 }
211
212 static bool GMQCC_WARN irwarning(lex_ctx_t ctx, int warntype, const char *fmt, ...)
213 {
214     bool    r;
215     va_list ap;
216     va_start(ap, fmt);
217     r = vcompile_warning(ctx, warntype, fmt, ap);
218     va_end(ap);
219     return r;
220 }
221
222 /***********************************************************************
223  * Vector utility functions
224  */
225
226 static bool GMQCC_WARN vec_ir_value_find(std::vector<ir_value *> &vec, const ir_value *what, size_t *idx)
227 {
228     for (auto &it : vec) {
229         if (it != what)
230             continue;
231         if (idx)
232             *idx = &it - &vec[0];
233         return true;
234     }
235     return false;
236 }
237
238 static bool GMQCC_WARN vec_ir_block_find(std::vector<ir_block *> &vec, ir_block *what, size_t *idx)
239 {
240     for (auto &it : vec) {
241         if (it != what)
242             continue;
243         if (idx)
244             *idx = &it - &vec[0];
245         return true;
246     }
247     return false;
248 }
249
250 static bool GMQCC_WARN vec_ir_instr_find(std::vector<ir_instr *> &vec, ir_instr *what, size_t *idx)
251 {
252     for (auto &it : vec) {
253         if (it != what)
254             continue;
255         if (idx)
256             *idx = &it - &vec[0];
257         return true;
258     }
259     return false;
260 }
261
262 /***********************************************************************
263  * IR Builder
264  */
265
266 static void ir_block_delete_quick(ir_block* self);
267 static void ir_instr_delete_quick(ir_instr *self);
268 static void ir_function_delete_quick(ir_function *self);
269
270 ir_builder::ir_builder(const std::string& modulename)
271 : m_name(modulename),
272   m_code(new code_t)
273 {
274     m_htglobals   = util_htnew(IR_HT_SIZE);
275     m_htfields    = util_htnew(IR_HT_SIZE);
276     m_htfunctions = util_htnew(IR_HT_SIZE);
277
278     m_nil = new ir_value("nil", store_value, TYPE_NIL);
279     m_nil->m_cvq = CV_CONST;
280
281     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
282         /* we write to them, but they're not supposed to be used outside the IR, so
283          * let's not allow the generation of ir_instrs which use these.
284          * So it's a constant noexpr.
285          */
286         m_vinstr_temp[i] = new ir_value("vinstr_temp", store_value, TYPE_NOEXPR);
287         m_vinstr_temp[i]->m_cvq = CV_CONST;
288     }
289 }
290
291 ir_builder::~ir_builder()
292 {
293     util_htdel(m_htglobals);
294     util_htdel(m_htfields);
295     util_htdel(m_htfunctions);
296     for (auto& f : m_functions)
297         ir_function_delete_quick(f.release());
298     m_functions.clear(); // delete them now before deleting the rest:
299
300     delete m_nil;
301
302     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
303         delete m_vinstr_temp[i];
304     }
305
306     m_extparams.clear();
307     m_extparam_protos.clear();
308 }
309
310 ir_function* ir_builder::createFunction(const std::string& name, qc_type outtype)
311 {
312     ir_function *fn = (ir_function*)util_htget(m_htfunctions, name.c_str());
313     if (fn)
314         return nullptr;
315
316     fn = new ir_function(this, outtype);
317     fn->m_name = name;
318     m_functions.emplace_back(fn);
319     util_htset(m_htfunctions, name.c_str(), fn);
320
321     fn->m_value = createGlobal(fn->m_name, TYPE_FUNCTION);
322     if (!fn->m_value) {
323         delete fn;
324         return nullptr;
325     }
326
327     fn->m_value->m_hasvalue = true;
328     fn->m_value->m_outtype = outtype;
329     fn->m_value->m_constval.vfunc = fn;
330     fn->m_value->m_context = fn->m_context;
331
332     return fn;
333 }
334
335 ir_value* ir_builder::createGlobal(const std::string& name, qc_type vtype)
336 {
337     ir_value *ve;
338
339     if (name[0] != '#')
340     {
341         ve = (ir_value*)util_htget(m_htglobals, name.c_str());
342         if (ve) {
343             return nullptr;
344         }
345     }
346
347     ve = new ir_value(std::string(name), store_global, vtype);
348     m_globals.emplace_back(ve);
349     util_htset(m_htglobals, name.c_str(), ve);
350     return ve;
351 }
352
353 ir_value* ir_builder::get_va_count()
354 {
355     if (m_reserved_va_count)
356         return m_reserved_va_count;
357     return (m_reserved_va_count = createGlobal("reserved:va_count", TYPE_FLOAT));
358 }
359
360 ir_value* ir_builder::createField(const std::string& name, qc_type vtype)
361 {
362     ir_value *ve = (ir_value*)util_htget(m_htfields, name.c_str());
363     if (ve) {
364         return nullptr;
365     }
366
367     ve = new ir_value(std::string(name), store_global, TYPE_FIELD);
368     ve->m_fieldtype = vtype;
369     m_fields.emplace_back(ve);
370     util_htset(m_htfields, name.c_str(), ve);
371     return ve;
372 }
373
374 /***********************************************************************
375  *IR Function
376  */
377
378 static bool ir_function_naive_phi(ir_function*);
379 static void ir_function_enumerate(ir_function*);
380 static bool ir_function_calculate_liferanges(ir_function*);
381 static bool ir_function_allocate_locals(ir_function*);
382
383 ir_function::ir_function(ir_builder* owner_, qc_type outtype_)
384 : m_owner(owner_),
385   m_name("<@unnamed>"),
386   m_outtype(outtype_)
387 {
388     m_context.file = "<@no context>";
389     m_context.line = 0;
390 }
391
392 ir_function::~ir_function()
393 {
394 }
395
396 static void ir_function_delete_quick(ir_function *self)
397 {
398     for (auto& b : self->m_blocks)
399         ir_block_delete_quick(b.release());
400     delete self;
401 }
402
403 static void ir_function_collect_value(ir_function *self, ir_value *v)
404 {
405     self->m_values.emplace_back(v);
406 }
407
408 ir_block* ir_function_create_block(lex_ctx_t ctx, ir_function *self, const char *label)
409 {
410     ir_block* bn = new ir_block(self, label ? std::string(label) : std::string());
411     bn->m_context = ctx;
412     self->m_blocks.emplace_back(bn);
413
414     if ((self->m_flags & IR_FLAG_BLOCK_COVERAGE) && self->m_owner->m_coverage_func)
415         (void)ir_block_create_call(bn, ctx, nullptr, self->m_owner->m_coverage_func, false);
416
417     return bn;
418 }
419
420 static bool instr_is_operation(uint16_t op)
421 {
422     return ( (op >= INSTR_MUL_F  && op <= INSTR_GT) ||
423              (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) ||
424              (op == INSTR_ADDRESS) ||
425              (op >= INSTR_NOT_F  && op <= INSTR_NOT_FNC) ||
426              (op >= INSTR_AND    && op <= INSTR_BITOR) ||
427              (op >= INSTR_CALL0  && op <= INSTR_CALL8) ||
428              (op >= VINSTR_BITAND_V && op <= VINSTR_NEG_V) );
429 }
430
431 static bool ir_function_pass_peephole(ir_function *self)
432 {
433     for (auto& bp : self->m_blocks) {
434         ir_block *block = bp.get();
435         for (size_t i = 0; i < vec_size(block->m_instr); ++i) {
436             ir_instr *inst;
437             inst = block->m_instr[i];
438
439             if (i >= 1 &&
440                 (inst->m_opcode >= INSTR_STORE_F &&
441                  inst->m_opcode <= INSTR_STORE_FNC))
442             {
443                 ir_instr *store;
444                 ir_instr *oper;
445                 ir_value *value;
446
447                 store = inst;
448
449                 oper  = block->m_instr[i-1];
450                 if (!instr_is_operation(oper->m_opcode))
451                     continue;
452
453                 /* Don't change semantics of MUL_VF in engines where these may not alias. */
454                 if (OPTS_FLAG(LEGACY_VECTOR_MATHS)) {
455                     if (oper->m_opcode == INSTR_MUL_VF && oper->_m_ops[2]->m_memberof == oper->_m_ops[1])
456                         continue;
457                     if (oper->m_opcode == INSTR_MUL_FV && oper->_m_ops[1]->m_memberof == oper->_m_ops[2])
458                         continue;
459                 }
460
461                 value = oper->_m_ops[0];
462
463                 /* only do it for SSA values */
464                 if (value->m_store != store_value)
465                     continue;
466
467                 /* don't optimize out the temp if it's used later again */
468                 if (value->m_reads.size() != 1)
469                     continue;
470
471                 /* The very next store must use this value */
472                 if (value->m_reads[0] != store)
473                     continue;
474
475                 /* And of course the store must _read_ from it, so it's in
476                  * OP 1 */
477                 if (store->_m_ops[1] != value)
478                     continue;
479
480                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
481                 (void)!ir_instr_op(oper, 0, store->_m_ops[0], true);
482
483                 vec_remove(block->m_instr, i, 1);
484                 delete store;
485             }
486             else if (inst->m_opcode == VINSTR_COND)
487             {
488                 /* COND on a value resulting from a NOT could
489                  * remove the NOT and swap its operands
490                  */
491                 while (true) {
492                     ir_block *tmp;
493                     size_t    inotid;
494                     ir_instr *inot;
495                     ir_value *value;
496                     value = inst->_m_ops[0];
497
498                     if (value->m_store != store_value || value->m_reads.size() != 1 || value->m_reads[0] != inst)
499                         break;
500
501                     inot = value->m_writes[0];
502                     if (inot->_m_ops[0] != value ||
503                         inot->m_opcode < INSTR_NOT_F ||
504                         inot->m_opcode > INSTR_NOT_FNC ||
505                         inot->m_opcode == INSTR_NOT_V || /* can't do these */
506                         inot->m_opcode == INSTR_NOT_S)
507                     {
508                         break;
509                     }
510
511                     /* count */
512                     ++opts_optimizationcount[OPTIM_PEEPHOLE];
513                     /* change operand */
514                     (void)!ir_instr_op(inst, 0, inot->_m_ops[1], false);
515                     /* remove NOT */
516                     tmp = inot->m_owner;
517                     for (inotid = 0; inotid < vec_size(tmp->m_instr); ++inotid) {
518                         if (tmp->m_instr[inotid] == inot)
519                             break;
520                     }
521                     if (inotid >= vec_size(tmp->m_instr)) {
522                         compile_error(inst->m_context, "sanity-check failed: failed to find instruction to optimize out");
523                         return false;
524                     }
525                     vec_remove(tmp->m_instr, inotid, 1);
526                     delete inot;
527                     /* swap ontrue/onfalse */
528                     tmp = inst->m_bops[0];
529                     inst->m_bops[0] = inst->m_bops[1];
530                     inst->m_bops[1] = tmp;
531                 }
532                 continue;
533             }
534         }
535     }
536
537     return true;
538 }
539
540 static bool ir_function_pass_tailrecursion(ir_function *self)
541 {
542     size_t p;
543
544     for (auto& bp : self->m_blocks) {
545         ir_block *block = bp.get();
546
547         ir_value *funcval;
548         ir_instr *ret, *call, *store = nullptr;
549
550         if (!block->m_final || vec_size(block->m_instr) < 2)
551             continue;
552
553         ret = block->m_instr[vec_size(block->m_instr)-1];
554         if (ret->m_opcode != INSTR_DONE && ret->m_opcode != INSTR_RETURN)
555             continue;
556
557         call = block->m_instr[vec_size(block->m_instr)-2];
558         if (call->m_opcode >= INSTR_STORE_F && call->m_opcode <= INSTR_STORE_FNC) {
559             /* account for the unoptimized
560              * CALL
561              * STORE %return, %tmp
562              * RETURN %tmp
563              * version
564              */
565             if (vec_size(block->m_instr) < 3)
566                 continue;
567
568             store = call;
569             call = block->m_instr[vec_size(block->m_instr)-3];
570         }
571
572         if (call->m_opcode < INSTR_CALL0 || call->m_opcode > INSTR_CALL8)
573             continue;
574
575         if (store) {
576             /* optimize out the STORE */
577             if (ret->_m_ops[0]   &&
578                 ret->_m_ops[0]   == store->_m_ops[0] &&
579                 store->_m_ops[1] == call->_m_ops[0])
580             {
581                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
582                 call->_m_ops[0] = store->_m_ops[0];
583                 vec_remove(block->m_instr, vec_size(block->m_instr) - 2, 1);
584                 delete store;
585             }
586             else
587                 continue;
588         }
589
590         if (!call->_m_ops[0])
591             continue;
592
593         funcval = call->_m_ops[1];
594         if (!funcval)
595             continue;
596         if (funcval->m_vtype != TYPE_FUNCTION || funcval->m_constval.vfunc != self)
597             continue;
598
599         /* now we have a CALL and a RET, check if it's a tailcall */
600         if (ret->_m_ops[0] && call->_m_ops[0] != ret->_m_ops[0])
601             continue;
602
603         ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
604         vec_shrinkby(block->m_instr, 2);
605
606         block->m_final = false; /* open it back up */
607
608         /* emite parameter-stores */
609         for (p = 0; p < call->m_params.size(); ++p) {
610             /* assert(call->params_count <= self->locals_count); */
611             if (!ir_block_create_store(block, call->m_context, self->m_locals[p].get(), call->m_params[p])) {
612                 irerror(call->m_context, "failed to create tailcall store instruction for parameter %i", (int)p);
613                 return false;
614             }
615         }
616         if (!ir_block_create_jump(block, call->m_context, self->m_blocks[0].get())) {
617             irerror(call->m_context, "failed to create tailcall jump");
618             return false;
619         }
620
621         delete call;
622         delete ret;
623     }
624
625     return true;
626 }
627
628 bool ir_function_finalize(ir_function *self)
629 {
630     if (self->m_builtin)
631         return true;
632
633     for (auto& lp : self->m_locals) {
634         ir_value *v = lp.get();
635         if (v->m_reads.empty() && v->m_writes.size() && !(v->m_flags & IR_FLAG_NOREF)) {
636             // if it's a vector check to ensure all it's members are unused before
637             // claiming it's unused, otherwise skip the vector entierly
638             if (v->m_vtype == TYPE_VECTOR)
639             {
640                 size_t mask = (1 << 3) - 1, bits = 0;
641                 for (size_t i = 0; i < 3; i++)
642                     if (!v->m_members[i] || (v->m_members[i]->m_reads.empty()
643                         && v->m_members[i]->m_writes.size()))
644                         bits |= (1 << i);
645                 // all components are unused so just report the vector
646                 if (bits == mask && irwarning(v->m_context, WARN_UNUSED_VARIABLE,
647                     "unused variable: `%s`", v->m_name.c_str()))
648                     return false;
649                 else if (bits != mask)
650                     // individual components are unused so mention them
651                     for (size_t i = 0; i < 3; i++)
652                         if ((bits & (1 << i))
653                             && irwarning(v->m_context, WARN_UNUSED_COMPONENT,
654                                 "unused vector component: `%s.%c`", v->m_name.c_str(), "xyz"[i]))
655                             return false;
656             }
657             // just a standard variable
658             else if (irwarning(v->m_context, WARN_UNUSED_VARIABLE,
659                     "unused variable: `%s`", v->m_name.c_str())) return false;
660         }
661     }
662
663     if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
664         if (!ir_function_pass_peephole(self)) {
665             irerror(self->m_context, "generic optimization pass broke something in `%s`", self->m_name.c_str());
666             return false;
667         }
668     }
669
670     if (OPTS_OPTIMIZATION(OPTIM_TAIL_RECURSION)) {
671         if (!ir_function_pass_tailrecursion(self)) {
672             irerror(self->m_context, "tail-recursion optimization pass broke something in `%s`", self->m_name.c_str());
673             return false;
674         }
675     }
676
677     if (!ir_function_naive_phi(self)) {
678         irerror(self->m_context, "internal error: ir_function_naive_phi failed");
679         return false;
680     }
681
682     for (auto& lp : self->m_locals) {
683         ir_value *v = lp.get();
684         if (v->m_vtype == TYPE_VECTOR ||
685             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
686         {
687             v->vectorMember(0);
688             v->vectorMember(1);
689             v->vectorMember(2);
690         }
691     }
692     for (auto& vp : self->m_values) {
693         ir_value *v = vp.get();
694         if (v->m_vtype == TYPE_VECTOR ||
695             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
696         {
697             v->vectorMember(0);
698             v->vectorMember(1);
699             v->vectorMember(2);
700         }
701     }
702
703     ir_function_enumerate(self);
704
705     if (!ir_function_calculate_liferanges(self))
706         return false;
707     if (!ir_function_allocate_locals(self))
708         return false;
709     return true;
710 }
711
712 ir_value* ir_function_create_local(ir_function *self, const std::string& name, qc_type vtype, bool param)
713 {
714     ir_value *ve;
715
716     if (param &&
717         !self->m_locals.empty() &&
718         self->m_locals.back()->m_store != store_param)
719     {
720         irerror(self->m_context, "cannot add parameters after adding locals");
721         return nullptr;
722     }
723
724     ve = new ir_value(std::string(name), (param ? store_param : store_local), vtype);
725     if (param)
726         ve->m_locked = true;
727     self->m_locals.emplace_back(ve);
728     return ve;
729 }
730
731 /***********************************************************************
732  *IR Block
733  */
734
735 ir_block::ir_block(ir_function* owner, const std::string& name)
736 : m_owner(owner),
737   m_label(name)
738 {
739     m_context.file = "<@no context>";
740     m_context.line = 0;
741 }
742
743 ir_block::~ir_block()
744 {
745     for (size_t i = 0; i != vec_size(m_instr); ++i)
746         delete m_instr[i];
747     vec_free(m_instr);
748     vec_free(m_exits);
749 }
750
751 static void ir_block_delete_quick(ir_block* self)
752 {
753     size_t i;
754     for (i = 0; i != vec_size(self->m_instr); ++i)
755         ir_instr_delete_quick(self->m_instr[i]);
756     vec_free(self->m_instr);
757     delete self;
758 }
759
760 /***********************************************************************
761  *IR Instructions
762  */
763
764 ir_instr::ir_instr(lex_ctx_t ctx, ir_block* owner_, int op)
765 : m_opcode(op),
766   m_context(ctx),
767   m_owner(owner_)
768 {
769 }
770
771 ir_instr::~ir_instr()
772 {
773     // The following calls can only delete from
774     // vectors, we still want to delete this instruction
775     // so ignore the return value. Since with the warn_unused_result attribute
776     // gcc doesn't care about an explicit: (void)foo(); to ignore the result,
777     // I have to improvise here and use if(foo());
778     for (auto &it : m_phi) {
779         size_t idx;
780         if (vec_ir_instr_find(it.value->m_writes, this, &idx))
781             it.value->m_writes.erase(it.value->m_writes.begin() + idx);
782         if (vec_ir_instr_find(it.value->m_reads, this, &idx))
783             it.value->m_reads.erase(it.value->m_reads.begin() + idx);
784     }
785     for (auto &it : m_params) {
786         size_t idx;
787         if (vec_ir_instr_find(it->m_writes, this, &idx))
788             it->m_writes.erase(it->m_writes.begin() + idx);
789         if (vec_ir_instr_find(it->m_reads, this, &idx))
790             it->m_reads.erase(it->m_reads.begin() + idx);
791     }
792     (void)!ir_instr_op(this, 0, nullptr, false);
793     (void)!ir_instr_op(this, 1, nullptr, false);
794     (void)!ir_instr_op(this, 2, nullptr, false);
795 }
796
797 static void ir_instr_delete_quick(ir_instr *self)
798 {
799     self->m_phi.clear();
800     self->m_params.clear();
801     self->_m_ops[0] = nullptr;
802     self->_m_ops[1] = nullptr;
803     self->_m_ops[2] = nullptr;
804     delete self;
805 }
806
807 static bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing)
808 {
809     if (v && v->m_vtype == TYPE_NOEXPR) {
810         irerror(self->m_context, "tried to use a NOEXPR value");
811         return false;
812     }
813
814     if (self->_m_ops[op]) {
815         size_t idx;
816         if (writing && vec_ir_instr_find(self->_m_ops[op]->m_writes, self, &idx))
817             self->_m_ops[op]->m_writes.erase(self->_m_ops[op]->m_writes.begin() + idx);
818         else if (vec_ir_instr_find(self->_m_ops[op]->m_reads, self, &idx))
819             self->_m_ops[op]->m_reads.erase(self->_m_ops[op]->m_reads.begin() + idx);
820     }
821     if (v) {
822         if (writing)
823             v->m_writes.push_back(self);
824         else
825             v->m_reads.push_back(self);
826     }
827     self->_m_ops[op] = v;
828     return true;
829 }
830
831 /***********************************************************************
832  *IR Value
833  */
834
835 void ir_value::setCodeAddress(int32_t gaddr)
836 {
837     m_code.globaladdr = gaddr;
838     if (m_members[0]) m_members[0]->m_code.globaladdr = gaddr;
839     if (m_members[1]) m_members[1]->m_code.globaladdr = gaddr;
840     if (m_members[2]) m_members[2]->m_code.globaladdr = gaddr;
841 }
842
843 int32_t ir_value::codeAddress() const
844 {
845     if (m_store == store_return)
846         return OFS_RETURN + m_code.addroffset;
847     return m_code.globaladdr + m_code.addroffset;
848 }
849
850 ir_value::ir_value(std::string&& name_, store_type store_, qc_type vtype_)
851     : m_name(move(name_))
852     , m_vtype(vtype_)
853     , m_store(store_)
854 {
855     m_fieldtype = TYPE_VOID;
856     m_outtype = TYPE_VOID;
857     m_flags = 0;
858
859     m_cvq          = CV_NONE;
860     m_hasvalue     = false;
861     m_context.file = "<@no context>";
862     m_context.line = 0;
863
864     memset(&m_constval, 0, sizeof(m_constval));
865     memset(&m_code,     0, sizeof(m_code));
866
867     m_members[0] = nullptr;
868     m_members[1] = nullptr;
869     m_members[2] = nullptr;
870     m_memberof = nullptr;
871
872     m_unique_life = false;
873     m_locked = false;
874     m_callparam  = false;
875 }
876
877 ir_value::ir_value(ir_function *owner, std::string&& name, store_type storetype, qc_type vtype)
878     : ir_value(move(name), storetype, vtype)
879 {
880     ir_function_collect_value(owner, this);
881 }
882
883 ir_value::~ir_value()
884 {
885     size_t i;
886     if (m_hasvalue) {
887         if (m_vtype == TYPE_STRING)
888             mem_d((void*)m_constval.vstring);
889     }
890     if (!(m_flags & IR_FLAG_SPLIT_VECTOR)) {
891         for (i = 0; i < 3; ++i) {
892             if (m_members[i])
893                 delete m_members[i];
894         }
895     }
896 }
897
898
899 /*  helper function */
900 ir_value* ir_builder::literalFloat(float value, bool add_to_list) {
901     ir_value *v = new ir_value("#IMMEDIATE", store_global, TYPE_FLOAT);
902     v->m_flags |= IR_FLAG_ERASABLE;
903     v->m_hasvalue = true;
904     v->m_cvq = CV_CONST;
905     v->m_constval.vfloat = value;
906
907     m_globals.emplace_back(v);
908     if (add_to_list)
909         m_const_floats.emplace_back(v);
910     return v;
911 }
912
913 ir_value* ir_value::vectorMember(unsigned int member)
914 {
915     std::string name;
916     ir_value *m;
917     if (member >= 3)
918         return nullptr;
919
920     if (m_members[member])
921         return m_members[member];
922
923     if (!m_name.empty()) {
924         char member_name[3] = { '_', char('x' + member), 0 };
925         name = m_name + member_name;
926     }
927
928     if (m_vtype == TYPE_VECTOR)
929     {
930         m = new ir_value(move(name), m_store, TYPE_FLOAT);
931         if (!m)
932             return nullptr;
933         m->m_context = m_context;
934
935         m_members[member] = m;
936         m->m_code.addroffset = member;
937     }
938     else if (m_vtype == TYPE_FIELD)
939     {
940         if (m_fieldtype != TYPE_VECTOR)
941             return nullptr;
942         m = new ir_value(move(name), m_store, TYPE_FIELD);
943         if (!m)
944             return nullptr;
945         m->m_fieldtype = TYPE_FLOAT;
946         m->m_context = m_context;
947
948         m_members[member] = m;
949         m->m_code.addroffset = member;
950     }
951     else
952     {
953         irerror(m_context, "invalid member access on %s", m_name.c_str());
954         return nullptr;
955     }
956
957     m->m_memberof = this;
958     return m;
959 }
960
961 size_t ir_value::size() const {
962     if (m_vtype == TYPE_FIELD && m_fieldtype == TYPE_VECTOR)
963         return type_sizeof_[TYPE_VECTOR];
964     return type_sizeof_[m_vtype];
965 }
966
967 bool ir_value::setFloat(float f)
968 {
969     if (m_vtype != TYPE_FLOAT)
970         return false;
971     m_constval.vfloat = f;
972     m_hasvalue = true;
973     return true;
974 }
975
976 bool ir_value::setFunc(int f)
977 {
978     if (m_vtype != TYPE_FUNCTION)
979         return false;
980     m_constval.vint = f;
981     m_hasvalue = true;
982     return true;
983 }
984
985 bool ir_value::setVector(vec3_t v)
986 {
987     if (m_vtype != TYPE_VECTOR)
988         return false;
989     m_constval.vvec = v;
990     m_hasvalue = true;
991     return true;
992 }
993
994 bool ir_value::setField(ir_value *fld)
995 {
996     if (m_vtype != TYPE_FIELD)
997         return false;
998     m_constval.vpointer = fld;
999     m_hasvalue = true;
1000     return true;
1001 }
1002
1003 bool ir_value::setString(const char *str)
1004 {
1005     if (m_vtype != TYPE_STRING)
1006         return false;
1007     m_constval.vstring = util_strdupe(str);
1008     m_hasvalue = true;
1009     return true;
1010 }
1011
1012 #if 0
1013 bool ir_value::setInt(int i)
1014 {
1015     if (m_vtype != TYPE_INTEGER)
1016         return false;
1017     m_constval.vint = i;
1018     m_hasvalue = true;
1019     return true;
1020 }
1021 #endif
1022
1023 bool ir_value::lives(size_t at)
1024 {
1025     for (auto& l : m_life) {
1026         if (l.start <= at && at <= l.end)
1027             return true;
1028         if (l.start > at) /* since it's ordered */
1029             return false;
1030     }
1031     return false;
1032 }
1033
1034 bool ir_value::insertLife(size_t idx, ir_life_entry_t e)
1035 {
1036     m_life.insert(m_life.begin() + idx, e);
1037     return true;
1038 }
1039
1040 bool ir_value::setAlive(size_t s)
1041 {
1042     size_t i;
1043     const size_t vs = m_life.size();
1044     ir_life_entry_t *life_found = nullptr;
1045     ir_life_entry_t *before = nullptr;
1046     ir_life_entry_t new_entry;
1047
1048     /* Find the first range >= s */
1049     for (i = 0; i < vs; ++i)
1050     {
1051         before = life_found;
1052         life_found = &m_life[i];
1053         if (life_found->start > s)
1054             break;
1055     }
1056     /* nothing found? append */
1057     if (i == vs) {
1058         ir_life_entry_t e;
1059         if (life_found && life_found->end+1 == s)
1060         {
1061             /* previous life range can be merged in */
1062             life_found->end++;
1063             return true;
1064         }
1065         if (life_found && life_found->end >= s)
1066             return false;
1067         e.start = e.end = s;
1068         m_life.emplace_back(e);
1069         return true;
1070     }
1071     /* found */
1072     if (before)
1073     {
1074         if (before->end + 1 == s &&
1075             life_found->start - 1 == s)
1076         {
1077             /* merge */
1078             before->end = life_found->end;
1079             m_life.erase(m_life.begin()+i);
1080             return true;
1081         }
1082         if (before->end + 1 == s)
1083         {
1084             /* extend before */
1085             before->end++;
1086             return true;
1087         }
1088         /* already contained */
1089         if (before->end >= s)
1090             return false;
1091     }
1092     /* extend */
1093     if (life_found->start - 1 == s)
1094     {
1095         life_found->start--;
1096         return true;
1097     }
1098     /* insert a new entry */
1099     new_entry.start = new_entry.end = s;
1100     return insertLife(i, new_entry);
1101 }
1102
1103 bool ir_value::mergeLife(const ir_value *other)
1104 {
1105     size_t i, myi;
1106
1107     if (other->m_life.empty())
1108         return true;
1109
1110     if (m_life.empty()) {
1111         m_life = other->m_life;
1112         return true;
1113     }
1114
1115     myi = 0;
1116     for (i = 0; i < other->m_life.size(); ++i)
1117     {
1118         const ir_life_entry_t &otherlife = other->m_life[i];
1119         while (true)
1120         {
1121             ir_life_entry_t *entry = &m_life[myi];
1122
1123             if (otherlife.end+1 < entry->start)
1124             {
1125                 /* adding an interval before entry */
1126                 if (!insertLife(myi, otherlife))
1127                     return false;
1128                 ++myi;
1129                 break;
1130             }
1131
1132             if (otherlife.start <  entry->start &&
1133                 otherlife.end+1 >= entry->start)
1134             {
1135                 /* starts earlier and overlaps */
1136                 entry->start = otherlife.start;
1137             }
1138
1139             if (otherlife.end   >  entry->end &&
1140                 otherlife.start <= entry->end+1)
1141             {
1142                 /* ends later and overlaps */
1143                 entry->end = otherlife.end;
1144             }
1145
1146             /* see if our change combines it with the next ranges */
1147             while (myi+1 < m_life.size() &&
1148                    entry->end+1 >= m_life[1+myi].start)
1149             {
1150                 /* overlaps with (myi+1) */
1151                 if (entry->end < m_life[1+myi].end)
1152                     entry->end = m_life[1+myi].end;
1153                 m_life.erase(m_life.begin() + (myi + 1));
1154                 entry = &m_life[myi];
1155             }
1156
1157             /* see if we're after the entry */
1158             if (otherlife.start > entry->end)
1159             {
1160                 ++myi;
1161                 /* append if we're at the end */
1162                 if (myi >= m_life.size()) {
1163                     m_life.emplace_back(otherlife);
1164                     break;
1165                 }
1166                 /* otherweise check the next range */
1167                 continue;
1168             }
1169             break;
1170         }
1171     }
1172     return true;
1173 }
1174
1175 static bool ir_values_overlap(const ir_value *a, const ir_value *b)
1176 {
1177     /* For any life entry in A see if it overlaps with
1178      * any life entry in B.
1179      * Note that the life entries are orderes, so we can make a
1180      * more efficient algorithm there than naively translating the
1181      * statement above.
1182      */
1183
1184     const ir_life_entry_t *la, *lb, *enda, *endb;
1185
1186     /* first of all, if either has no life range, they cannot clash */
1187     if (a->m_life.empty() || b->m_life.empty())
1188         return false;
1189
1190     la = &a->m_life.front();
1191     lb = &b->m_life.front();
1192     enda = &a->m_life.back() + 1;
1193     endb = &b->m_life.back() + 1;
1194     while (true)
1195     {
1196         /* check if the entries overlap, for that,
1197          * both must start before the other one ends.
1198          */
1199         if (la->start < lb->end &&
1200             lb->start < la->end)
1201         {
1202             return true;
1203         }
1204
1205         /* entries are ordered
1206          * one entry is earlier than the other
1207          * that earlier entry will be moved forward
1208          */
1209         if (la->start < lb->start)
1210         {
1211             /* order: A B, move A forward
1212              * check if we hit the end with A
1213              */
1214             if (++la == enda)
1215                 break;
1216         }
1217         else /* if (lb->start < la->start)  actually <= */
1218         {
1219             /* order: B A, move B forward
1220              * check if we hit the end with B
1221              */
1222             if (++lb == endb)
1223                 break;
1224         }
1225     }
1226     return false;
1227 }
1228
1229 /***********************************************************************
1230  *IR main operations
1231  */
1232
1233 static bool ir_check_unreachable(ir_block *self)
1234 {
1235     /* The IR should never have to deal with unreachable code */
1236     if (!self->m_final/* || OPTS_FLAG(ALLOW_UNREACHABLE_CODE)*/)
1237         return true;
1238     irerror(self->m_context, "unreachable statement (%s)", self->m_label.c_str());
1239     return false;
1240 }
1241
1242 bool ir_block_create_store_op(ir_block *self, lex_ctx_t ctx, int op, ir_value *target, ir_value *what)
1243 {
1244     ir_instr *in;
1245     if (!ir_check_unreachable(self))
1246         return false;
1247
1248     if (target->m_store == store_value &&
1249         (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC))
1250     {
1251         irerror(self->m_context, "cannot store to an SSA value");
1252         irerror(self->m_context, "trying to store: %s <- %s", target->m_name.c_str(), what->m_name.c_str());
1253         irerror(self->m_context, "instruction: %s", util_instr_str[op]);
1254         return false;
1255     }
1256
1257     in = new ir_instr(ctx, self, op);
1258     if (!in)
1259         return false;
1260
1261     if (!ir_instr_op(in, 0, target, (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC)) ||
1262         !ir_instr_op(in, 1, what, false))
1263     {
1264         delete in;
1265         return false;
1266     }
1267     vec_push(self->m_instr, in);
1268     return true;
1269 }
1270
1271 bool ir_block_create_state_op(ir_block *self, lex_ctx_t ctx, ir_value *frame, ir_value *think)
1272 {
1273     ir_instr *in;
1274     if (!ir_check_unreachable(self))
1275         return false;
1276
1277     in = new ir_instr(ctx, self, INSTR_STATE);
1278     if (!in)
1279         return false;
1280
1281     if (!ir_instr_op(in, 0, frame, false) ||
1282         !ir_instr_op(in, 1, think, false))
1283     {
1284         delete in;
1285         return false;
1286     }
1287     vec_push(self->m_instr, in);
1288     return true;
1289 }
1290
1291 static bool ir_block_create_store(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1292 {
1293     int op = 0;
1294     qc_type vtype;
1295     if (target->m_vtype == TYPE_VARIANT)
1296         vtype = what->m_vtype;
1297     else
1298         vtype = target->m_vtype;
1299
1300 #if 0
1301     if      (vtype == TYPE_FLOAT   && what->m_vtype == TYPE_INTEGER)
1302         op = INSTR_CONV_ITOF;
1303     else if (vtype == TYPE_INTEGER && what->m_vtype == TYPE_FLOAT)
1304         op = INSTR_CONV_FTOI;
1305 #endif
1306         op = type_store_instr[vtype];
1307
1308     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1309         if (op == INSTR_STORE_FLD && what->m_fieldtype == TYPE_VECTOR)
1310             op = INSTR_STORE_V;
1311     }
1312
1313     return ir_block_create_store_op(self, ctx, op, target, what);
1314 }
1315
1316 bool ir_block_create_storep(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1317 {
1318     int op = 0;
1319     qc_type vtype;
1320
1321     if (target->m_vtype != TYPE_POINTER)
1322         return false;
1323
1324     /* storing using pointer - target is a pointer, type must be
1325      * inferred from source
1326      */
1327     vtype = what->m_vtype;
1328
1329     op = type_storep_instr[vtype];
1330     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1331         if (op == INSTR_STOREP_FLD && what->m_fieldtype == TYPE_VECTOR)
1332             op = INSTR_STOREP_V;
1333     }
1334
1335     return ir_block_create_store_op(self, ctx, op, target, what);
1336 }
1337
1338 bool ir_block_create_return(ir_block *self, lex_ctx_t ctx, ir_value *v)
1339 {
1340     ir_instr *in;
1341     if (!ir_check_unreachable(self))
1342         return false;
1343
1344     self->m_final = true;
1345
1346     self->m_is_return = true;
1347     in = new ir_instr(ctx, self, INSTR_RETURN);
1348     if (!in)
1349         return false;
1350
1351     if (v && !ir_instr_op(in, 0, v, false)) {
1352         delete in;
1353         return false;
1354     }
1355
1356     vec_push(self->m_instr, in);
1357     return true;
1358 }
1359
1360 bool ir_block_create_if(ir_block *self, lex_ctx_t ctx, ir_value *v,
1361                         ir_block *ontrue, ir_block *onfalse)
1362 {
1363     ir_instr *in;
1364     if (!ir_check_unreachable(self))
1365         return false;
1366     self->m_final = true;
1367     /*in = new ir_instr(ctx, self, (v->m_vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/
1368     in = new ir_instr(ctx, self, VINSTR_COND);
1369     if (!in)
1370         return false;
1371
1372     if (!ir_instr_op(in, 0, v, false)) {
1373         delete in;
1374         return false;
1375     }
1376
1377     in->m_bops[0] = ontrue;
1378     in->m_bops[1] = onfalse;
1379
1380     vec_push(self->m_instr, in);
1381
1382     vec_push(self->m_exits, ontrue);
1383     vec_push(self->m_exits, onfalse);
1384     ontrue->m_entries.push_back(self);
1385     onfalse->m_entries.push_back(self);
1386     return true;
1387 }
1388
1389 bool ir_block_create_jump(ir_block *self, lex_ctx_t ctx, ir_block *to)
1390 {
1391     ir_instr *in;
1392     if (!ir_check_unreachable(self))
1393         return false;
1394     self->m_final = true;
1395     in = new ir_instr(ctx, self, VINSTR_JUMP);
1396     if (!in)
1397         return false;
1398
1399     in->m_bops[0] = to;
1400     vec_push(self->m_instr, in);
1401
1402     vec_push(self->m_exits, to);
1403     to->m_entries.push_back(self);
1404     return true;
1405 }
1406
1407 bool ir_block_create_goto(ir_block *self, lex_ctx_t ctx, ir_block *to)
1408 {
1409     self->m_owner->m_flags |= IR_FLAG_HAS_GOTO;
1410     return ir_block_create_jump(self, ctx, to);
1411 }
1412
1413 ir_instr* ir_block_create_phi(ir_block *self, lex_ctx_t ctx, const char *label, qc_type ot)
1414 {
1415     ir_value *out;
1416     ir_instr *in;
1417     if (!ir_check_unreachable(self))
1418         return nullptr;
1419     in = new ir_instr(ctx, self, VINSTR_PHI);
1420     if (!in)
1421         return nullptr;
1422     out = new ir_value(self->m_owner, label ? label : "", store_value, ot);
1423     if (!out) {
1424         delete in;
1425         return nullptr;
1426     }
1427     if (!ir_instr_op(in, 0, out, true)) {
1428         delete in;
1429         return nullptr;
1430     }
1431     vec_push(self->m_instr, in);
1432     return in;
1433 }
1434
1435 ir_value* ir_phi_value(ir_instr *self)
1436 {
1437     return self->_m_ops[0];
1438 }
1439
1440 void ir_phi_add(ir_instr* self, ir_block *b, ir_value *v)
1441 {
1442     ir_phi_entry_t pe;
1443
1444     if (!vec_ir_block_find(self->m_owner->m_entries, b, nullptr)) {
1445         // Must not be possible to cause this, otherwise the AST
1446         // is doing something wrong.
1447         irerror(self->m_context, "Invalid entry block for PHI");
1448         exit(EXIT_FAILURE);
1449     }
1450
1451     pe.value = v;
1452     pe.from = b;
1453     v->m_reads.push_back(self);
1454     self->m_phi.push_back(pe);
1455 }
1456
1457 /* call related code */
1458 ir_instr* ir_block_create_call(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *func, bool noreturn)
1459 {
1460     ir_value *out;
1461     ir_instr *in;
1462     if (!ir_check_unreachable(self))
1463         return nullptr;
1464     in = new ir_instr(ctx, self, (noreturn ? VINSTR_NRCALL : INSTR_CALL0));
1465     if (!in)
1466         return nullptr;
1467     if (noreturn) {
1468         self->m_final = true;
1469         self->m_is_return = true;
1470     }
1471     out = new ir_value(self->m_owner, label ? label : "", (func->m_outtype == TYPE_VOID) ? store_return : store_value, func->m_outtype);
1472     if (!out) {
1473         delete in;
1474         return nullptr;
1475     }
1476     if (!ir_instr_op(in, 0, out, true) ||
1477         !ir_instr_op(in, 1, func, false))
1478     {
1479         delete in;
1480         return nullptr;
1481     }
1482     vec_push(self->m_instr, in);
1483     /*
1484     if (noreturn) {
1485         if (!ir_block_create_return(self, ctx, nullptr)) {
1486             compile_error(ctx, "internal error: failed to generate dummy-return instruction");
1487             delete in;
1488             return nullptr;
1489         }
1490     }
1491     */
1492     return in;
1493 }
1494
1495 ir_value* ir_call_value(ir_instr *self)
1496 {
1497     return self->_m_ops[0];
1498 }
1499
1500 void ir_call_param(ir_instr* self, ir_value *v)
1501 {
1502     self->m_params.push_back(v);
1503     v->m_reads.push_back(self);
1504 }
1505
1506 /* binary op related code */
1507
1508 ir_value* ir_block_create_binop(ir_block *self, lex_ctx_t ctx,
1509                                 const char *label, int opcode,
1510                                 ir_value *left, ir_value *right)
1511 {
1512     qc_type ot = TYPE_VOID;
1513     switch (opcode) {
1514         case INSTR_ADD_F:
1515         case INSTR_SUB_F:
1516         case INSTR_DIV_F:
1517         case INSTR_MUL_F:
1518         case INSTR_MUL_V:
1519         case INSTR_AND:
1520         case INSTR_OR:
1521 #if 0
1522         case INSTR_AND_I:
1523         case INSTR_AND_IF:
1524         case INSTR_AND_FI:
1525         case INSTR_OR_I:
1526         case INSTR_OR_IF:
1527         case INSTR_OR_FI:
1528 #endif
1529         case INSTR_BITAND:
1530         case INSTR_BITOR:
1531         case VINSTR_BITXOR:
1532 #if 0
1533         case INSTR_SUB_S: /* -- offset of string as float */
1534         case INSTR_MUL_IF:
1535         case INSTR_MUL_FI:
1536         case INSTR_DIV_IF:
1537         case INSTR_DIV_FI:
1538         case INSTR_BITOR_IF:
1539         case INSTR_BITOR_FI:
1540         case INSTR_BITAND_FI:
1541         case INSTR_BITAND_IF:
1542         case INSTR_EQ_I:
1543         case INSTR_NE_I:
1544 #endif
1545             ot = TYPE_FLOAT;
1546             break;
1547 #if 0
1548         case INSTR_ADD_I:
1549         case INSTR_ADD_IF:
1550         case INSTR_ADD_FI:
1551         case INSTR_SUB_I:
1552         case INSTR_SUB_FI:
1553         case INSTR_SUB_IF:
1554         case INSTR_MUL_I:
1555         case INSTR_DIV_I:
1556         case INSTR_BITAND_I:
1557         case INSTR_BITOR_I:
1558         case INSTR_XOR_I:
1559         case INSTR_RSHIFT_I:
1560         case INSTR_LSHIFT_I:
1561             ot = TYPE_INTEGER;
1562             break;
1563 #endif
1564         case INSTR_ADD_V:
1565         case INSTR_SUB_V:
1566         case INSTR_MUL_VF:
1567         case INSTR_MUL_FV:
1568         case VINSTR_BITAND_V:
1569         case VINSTR_BITOR_V:
1570         case VINSTR_BITXOR_V:
1571         case VINSTR_BITAND_VF:
1572         case VINSTR_BITOR_VF:
1573         case VINSTR_BITXOR_VF:
1574         case VINSTR_CROSS:
1575 #if 0
1576         case INSTR_DIV_VF:
1577         case INSTR_MUL_IV:
1578         case INSTR_MUL_VI:
1579 #endif
1580             ot = TYPE_VECTOR;
1581             break;
1582 #if 0
1583         case INSTR_ADD_SF:
1584             ot = TYPE_POINTER;
1585             break;
1586 #endif
1587     /*
1588      * after the following default case, the value of opcode can never
1589      * be 1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65
1590      */
1591         default:
1592             /* ranges: */
1593             /* boolean operations result in floats */
1594
1595             /*
1596              * opcode >= 10 takes true branch opcode is at least 10
1597              * opcode <= 23 takes false branch opcode is at least 24
1598              */
1599             if (opcode >= INSTR_EQ_F && opcode <= INSTR_GT)
1600                 ot = TYPE_FLOAT;
1601
1602             /*
1603              * At condition "opcode <= 23", the value of "opcode" must be
1604              * at least 24.
1605              * At condition "opcode <= 23", the value of "opcode" cannot be
1606              * equal to any of {1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65}.
1607              * The condition "opcode <= 23" cannot be true.
1608              *
1609              * Thus ot=2 (TYPE_FLOAT) can never be true
1610              */
1611 #if 0
1612             else if (opcode >= INSTR_LE && opcode <= INSTR_GT)
1613                 ot = TYPE_FLOAT;
1614             else if (opcode >= INSTR_LE_I && opcode <= INSTR_EQ_FI)
1615                 ot = TYPE_FLOAT;
1616 #endif
1617             break;
1618     };
1619     if (ot == TYPE_VOID) {
1620         /* The AST or parser were supposed to check this! */
1621         return nullptr;
1622     }
1623
1624     return ir_block_create_general_instr(self, ctx, label, opcode, left, right, ot);
1625 }
1626
1627 ir_value* ir_block_create_unary(ir_block *self, lex_ctx_t ctx,
1628                                 const char *label, int opcode,
1629                                 ir_value *operand)
1630 {
1631     qc_type ot = TYPE_FLOAT;
1632     switch (opcode) {
1633         case INSTR_NOT_F:
1634         case INSTR_NOT_V:
1635         case INSTR_NOT_S:
1636         case INSTR_NOT_ENT:
1637         case INSTR_NOT_FNC: /*
1638         case INSTR_NOT_I:   */
1639             ot = TYPE_FLOAT;
1640             break;
1641
1642         /*
1643          * Negation for virtual instructions is emulated with 0-value. Thankfully
1644          * the operand for 0 already exists so we just source it from here.
1645          */
1646         case VINSTR_NEG_F:
1647             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_F, nullptr, operand, ot);
1648         case VINSTR_NEG_V:
1649             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_V, nullptr, operand, TYPE_VECTOR);
1650
1651         default:
1652             ot = operand->m_vtype;
1653             break;
1654     };
1655     if (ot == TYPE_VOID) {
1656         /* The AST or parser were supposed to check this! */
1657         return nullptr;
1658     }
1659
1660     /* let's use the general instruction creator and pass nullptr for OPB */
1661     return ir_block_create_general_instr(self, ctx, label, opcode, operand, nullptr, ot);
1662 }
1663
1664 static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t ctx, const char *label,
1665                                         int op, ir_value *a, ir_value *b, qc_type outype)
1666 {
1667     ir_instr *instr;
1668     ir_value *out;
1669
1670     out = new ir_value(self->m_owner, label ? label : "", store_value, outype);
1671     if (!out)
1672         return nullptr;
1673
1674     instr = new ir_instr(ctx, self, op);
1675     if (!instr) {
1676         return nullptr;
1677     }
1678
1679     if (!ir_instr_op(instr, 0, out, true) ||
1680         !ir_instr_op(instr, 1, a, false) ||
1681         !ir_instr_op(instr, 2, b, false) )
1682     {
1683         goto on_error;
1684     }
1685
1686     vec_push(self->m_instr, instr);
1687
1688     return out;
1689 on_error:
1690     delete instr;
1691     return nullptr;
1692 }
1693
1694 ir_value* ir_block_create_fieldaddress(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field)
1695 {
1696     ir_value *v;
1697
1698     /* Support for various pointer types todo if so desired */
1699     if (ent->m_vtype != TYPE_ENTITY)
1700         return nullptr;
1701
1702     if (field->m_vtype != TYPE_FIELD)
1703         return nullptr;
1704
1705     v = ir_block_create_general_instr(self, ctx, label, INSTR_ADDRESS, ent, field, TYPE_POINTER);
1706     v->m_fieldtype = field->m_fieldtype;
1707     return v;
1708 }
1709
1710 ir_value* ir_block_create_load_from_ent(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field, qc_type outype)
1711 {
1712     int op;
1713     if (ent->m_vtype != TYPE_ENTITY)
1714         return nullptr;
1715
1716     /* at some point we could redirect for TYPE_POINTER... but that could lead to carelessness */
1717     if (field->m_vtype != TYPE_FIELD)
1718         return nullptr;
1719
1720     switch (outype)
1721     {
1722         case TYPE_FLOAT:    op = INSTR_LOAD_F;   break;
1723         case TYPE_VECTOR:   op = INSTR_LOAD_V;   break;
1724         case TYPE_STRING:   op = INSTR_LOAD_S;   break;
1725         case TYPE_FIELD:    op = INSTR_LOAD_FLD; break;
1726         case TYPE_ENTITY:   op = INSTR_LOAD_ENT; break;
1727         case TYPE_FUNCTION: op = INSTR_LOAD_FNC; break;
1728 #if 0
1729         case TYPE_POINTER: op = INSTR_LOAD_I;   break;
1730         case TYPE_INTEGER: op = INSTR_LOAD_I;   break;
1731 #endif
1732         default:
1733             irerror(self->m_context, "invalid type for ir_block_create_load_from_ent: %s", type_name[outype]);
1734             return nullptr;
1735     }
1736
1737     return ir_block_create_general_instr(self, ctx, label, op, ent, field, outype);
1738 }
1739
1740 /* PHI resolving breaks the SSA, and must thus be the last
1741  * step before life-range calculation.
1742  */
1743
1744 static bool ir_block_naive_phi(ir_block *self);
1745 bool ir_function_naive_phi(ir_function *self)
1746 {
1747     for (auto& b : self->m_blocks)
1748         if (!ir_block_naive_phi(b.get()))
1749             return false;
1750     return true;
1751 }
1752
1753 static bool ir_block_naive_phi(ir_block *self)
1754 {
1755     size_t i;
1756     /* FIXME: optionally, create_phi can add the phis
1757      * to a list so we don't need to loop through blocks
1758      * - anyway: "don't optimize YET"
1759      */
1760     for (i = 0; i < vec_size(self->m_instr); ++i)
1761     {
1762         ir_instr *instr = self->m_instr[i];
1763         if (instr->m_opcode != VINSTR_PHI)
1764             continue;
1765
1766         vec_remove(self->m_instr, i, 1);
1767         --i; /* NOTE: i+1 below */
1768
1769         for (auto &it : instr->m_phi) {
1770             ir_value *v = it.value;
1771             ir_block *b = it.from;
1772             if (v->m_store == store_value && v->m_reads.size() == 1 && v->m_writes.size() == 1) {
1773                 /* replace the value */
1774                 if (!ir_instr_op(v->m_writes[0], 0, instr->_m_ops[0], true))
1775                     return false;
1776             } else {
1777                 /* force a move instruction */
1778                 ir_instr *prevjump = vec_last(b->m_instr);
1779                 vec_pop(b->m_instr);
1780                 b->m_final = false;
1781                 instr->_m_ops[0]->m_store = store_global;
1782                 if (!ir_block_create_store(b, instr->m_context, instr->_m_ops[0], v))
1783                     return false;
1784                 instr->_m_ops[0]->m_store = store_value;
1785                 vec_push(b->m_instr, prevjump);
1786                 b->m_final = true;
1787             }
1788         }
1789         delete instr;
1790     }
1791     return true;
1792 }
1793
1794 /***********************************************************************
1795  *IR Temp allocation code
1796  * Propagating value life ranges by walking through the function backwards
1797  * until no more changes are made.
1798  * In theory this should happen once more than once for every nested loop
1799  * level.
1800  * Though this implementation might run an additional time for if nests.
1801  */
1802
1803 /* Enumerate instructions used by value's life-ranges
1804  */
1805 static void ir_block_enumerate(ir_block *self, size_t *_eid)
1806 {
1807     size_t i;
1808     size_t eid = *_eid;
1809     for (i = 0; i < vec_size(self->m_instr); ++i)
1810     {
1811         self->m_instr[i]->m_eid = eid++;
1812     }
1813     *_eid = eid;
1814 }
1815
1816 /* Enumerate blocks and instructions.
1817  * The block-enumeration is unordered!
1818  * We do not really use the block enumreation, however
1819  * the instruction enumeration is important for life-ranges.
1820  */
1821 void ir_function_enumerate(ir_function *self)
1822 {
1823     size_t instruction_id = 0;
1824     size_t block_eid = 0;
1825     for (auto& block : self->m_blocks)
1826     {
1827         /* each block now gets an additional "entry" instruction id
1828          * we can use to avoid point-life issues
1829          */
1830         block->m_entry_id = instruction_id;
1831         block->m_eid      = block_eid;
1832         ++instruction_id;
1833         ++block_eid;
1834
1835         ir_block_enumerate(block.get(), &instruction_id);
1836     }
1837 }
1838
1839 /* Local-value allocator
1840  * After finishing creating the liferange of all values used in a function
1841  * we can allocate their global-positions.
1842  * This is the counterpart to register-allocation in register machines.
1843  */
1844 struct function_allocator {
1845     ir_value **locals;
1846     size_t *sizes;
1847     size_t *positions;
1848     bool *unique;
1849 };
1850
1851 static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
1852 {
1853     ir_value *slot;
1854     size_t vsize = var->size();
1855
1856     var->m_code.local = vec_size(alloc->locals);
1857
1858     slot = new ir_value("reg", store_global, var->m_vtype);
1859     if (!slot)
1860         return false;
1861
1862     if (!slot->mergeLife(var))
1863         goto localerror;
1864
1865     vec_push(alloc->locals, slot);
1866     vec_push(alloc->sizes, vsize);
1867     vec_push(alloc->unique, var->m_unique_life);
1868
1869     return true;
1870
1871 localerror:
1872     delete slot;
1873     return false;
1874 }
1875
1876 static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
1877 {
1878     size_t a;
1879     ir_value *slot;
1880
1881     if (v->m_unique_life)
1882         return function_allocator_alloc(alloc, v);
1883
1884     for (a = 0; a < vec_size(alloc->locals); ++a)
1885     {
1886         /* if it's reserved for a unique liferange: skip */
1887         if (alloc->unique[a])
1888             continue;
1889
1890         slot = alloc->locals[a];
1891
1892         /* never resize parameters
1893          * will be required later when overlapping temps + locals
1894          */
1895         if (a < vec_size(self->m_params) &&
1896             alloc->sizes[a] < v->size())
1897         {
1898             continue;
1899         }
1900
1901         if (ir_values_overlap(v, slot))
1902             continue;
1903
1904         if (!slot->mergeLife(v))
1905             return false;
1906
1907         /* adjust size for this slot */
1908         if (alloc->sizes[a] < v->size())
1909             alloc->sizes[a] = v->size();
1910
1911         v->m_code.local = a;
1912         return true;
1913     }
1914     if (a >= vec_size(alloc->locals)) {
1915         if (!function_allocator_alloc(alloc, v))
1916             return false;
1917     }
1918     return true;
1919 }
1920
1921 bool ir_function_allocate_locals(ir_function *self)
1922 {
1923     bool   retval = true;
1924     size_t pos;
1925     bool   opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
1926
1927     function_allocator lockalloc, globalloc;
1928
1929     if (self->m_locals.empty() && self->m_values.empty())
1930         return true;
1931
1932     globalloc.locals    = nullptr;
1933     globalloc.sizes     = nullptr;
1934     globalloc.positions = nullptr;
1935     globalloc.unique    = nullptr;
1936     lockalloc.locals    = nullptr;
1937     lockalloc.sizes     = nullptr;
1938     lockalloc.positions = nullptr;
1939     lockalloc.unique    = nullptr;
1940
1941     size_t i;
1942     for (i = 0; i < self->m_locals.size(); ++i)
1943     {
1944         ir_value *v = self->m_locals[i].get();
1945         if ((self->m_flags & IR_FLAG_MASK_NO_LOCAL_TEMPS) || !OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) {
1946             v->m_locked      = true;
1947             v->m_unique_life = true;
1948         }
1949         else if (i >= vec_size(self->m_params))
1950             break;
1951         else
1952             v->m_locked = true; /* lock parameters locals */
1953         if (!function_allocator_alloc((v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1954             goto error;
1955     }
1956     for (; i < self->m_locals.size(); ++i)
1957     {
1958         ir_value *v = self->m_locals[i].get();
1959         if (v->m_life.empty())
1960             continue;
1961         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1962             goto error;
1963     }
1964
1965     /* Allocate a slot for any value that still exists */
1966     for (i = 0; i < self->m_values.size(); ++i)
1967     {
1968         ir_value *v = self->m_values[i].get();
1969
1970         if (v->m_life.empty())
1971             continue;
1972
1973         /* CALL optimization:
1974          * If the value is a parameter-temp: 1 write, 1 read from a CALL
1975          * and it's not "locked", write it to the OFS_PARM directly.
1976          */
1977         if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->m_locked && !v->m_unique_life) {
1978             if (v->m_reads.size() == 1 && v->m_writes.size() == 1 &&
1979                 (v->m_reads[0]->m_opcode == VINSTR_NRCALL ||
1980                  (v->m_reads[0]->m_opcode >= INSTR_CALL0 && v->m_reads[0]->m_opcode <= INSTR_CALL8)
1981                 )
1982                )
1983             {
1984                 size_t param;
1985                 ir_instr *call = v->m_reads[0];
1986                 if (!vec_ir_value_find(call->m_params, v, &param)) {
1987                     irerror(call->m_context, "internal error: unlocked parameter %s not found", v->m_name.c_str());
1988                     goto error;
1989                 }
1990                 ++opts_optimizationcount[OPTIM_CALL_STORES];
1991                 v->m_callparam = true;
1992                 if (param < 8)
1993                     v->setCodeAddress(OFS_PARM0 + 3*param);
1994                 else {
1995                     size_t nprotos = self->m_owner->m_extparam_protos.size();
1996                     ir_value *ep;
1997                     param -= 8;
1998                     if (nprotos > param)
1999                         ep = self->m_owner->m_extparam_protos[param].get();
2000                     else
2001                     {
2002                         ep = self->m_owner->generateExtparamProto();
2003                         while (++nprotos <= param)
2004                             ep = self->m_owner->generateExtparamProto();
2005                     }
2006                     ir_instr_op(v->m_writes[0], 0, ep, true);
2007                     call->m_params[param+8] = ep;
2008                 }
2009                 continue;
2010             }
2011             if (v->m_writes.size() == 1 && v->m_writes[0]->m_opcode == INSTR_CALL0) {
2012                 v->m_store = store_return;
2013                 if (v->m_members[0]) v->m_members[0]->m_store = store_return;
2014                 if (v->m_members[1]) v->m_members[1]->m_store = store_return;
2015                 if (v->m_members[2]) v->m_members[2]->m_store = store_return;
2016                 ++opts_optimizationcount[OPTIM_CALL_STORES];
2017                 continue;
2018             }
2019         }
2020
2021         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
2022             goto error;
2023     }
2024
2025     if (!lockalloc.sizes && !globalloc.sizes) {
2026         goto cleanup;
2027     }
2028     vec_push(lockalloc.positions, 0);
2029     vec_push(globalloc.positions, 0);
2030
2031     /* Adjust slot positions based on sizes */
2032     if (lockalloc.sizes) {
2033         pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
2034         for (i = 1; i < vec_size(lockalloc.sizes); ++i)
2035         {
2036             pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
2037             vec_push(lockalloc.positions, pos);
2038         }
2039         self->m_allocated_locals = pos + vec_last(lockalloc.sizes);
2040     }
2041     if (globalloc.sizes) {
2042         pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
2043         for (i = 1; i < vec_size(globalloc.sizes); ++i)
2044         {
2045             pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
2046             vec_push(globalloc.positions, pos);
2047         }
2048         self->m_globaltemps = pos + vec_last(globalloc.sizes);
2049     }
2050
2051     /* Locals need to know their new position */
2052     for (auto& local : self->m_locals) {
2053         if (local->m_locked || !opt_gt)
2054             local->m_code.local = lockalloc.positions[local->m_code.local];
2055         else
2056             local->m_code.local = globalloc.positions[local->m_code.local];
2057     }
2058     /* Take over the actual slot positions on values */
2059     for (auto& value : self->m_values) {
2060         if (value->m_locked || !opt_gt)
2061             value->m_code.local = lockalloc.positions[value->m_code.local];
2062         else
2063             value->m_code.local = globalloc.positions[value->m_code.local];
2064     }
2065
2066     goto cleanup;
2067
2068 error:
2069     retval = false;
2070 cleanup:
2071     for (i = 0; i < vec_size(lockalloc.locals); ++i)
2072         delete lockalloc.locals[i];
2073     for (i = 0; i < vec_size(globalloc.locals); ++i)
2074         delete globalloc.locals[i];
2075     vec_free(globalloc.unique);
2076     vec_free(globalloc.locals);
2077     vec_free(globalloc.sizes);
2078     vec_free(globalloc.positions);
2079     vec_free(lockalloc.unique);
2080     vec_free(lockalloc.locals);
2081     vec_free(lockalloc.sizes);
2082     vec_free(lockalloc.positions);
2083     return retval;
2084 }
2085
2086 /* Get information about which operand
2087  * is read from, or written to.
2088  */
2089 static void ir_op_read_write(int op, size_t *read, size_t *write)
2090 {
2091     switch (op)
2092     {
2093     case VINSTR_JUMP:
2094     case INSTR_GOTO:
2095         *write = 0;
2096         *read = 0;
2097         break;
2098     case INSTR_IF:
2099     case INSTR_IFNOT:
2100 #if 0
2101     case INSTR_IF_S:
2102     case INSTR_IFNOT_S:
2103 #endif
2104     case INSTR_RETURN:
2105     case VINSTR_COND:
2106         *write = 0;
2107         *read = 1;
2108         break;
2109     case INSTR_STOREP_F:
2110     case INSTR_STOREP_V:
2111     case INSTR_STOREP_S:
2112     case INSTR_STOREP_ENT:
2113     case INSTR_STOREP_FLD:
2114     case INSTR_STOREP_FNC:
2115         *write = 0;
2116         *read  = 7;
2117         break;
2118     default:
2119         *write = 1;
2120         *read = 6;
2121         break;
2122     };
2123 }
2124
2125 static bool ir_block_living_add_instr(ir_block *self, size_t eid) {
2126     bool changed = false;
2127     for (auto &it : self->m_living)
2128         if (it->setAlive(eid))
2129             changed = true;
2130     return changed;
2131 }
2132
2133 static bool ir_block_living_lock(ir_block *self) {
2134     bool changed = false;
2135     for (auto &it : self->m_living) {
2136         if (it->m_locked)
2137             continue;
2138         it->m_locked = true;
2139         changed = true;
2140     }
2141     return changed;
2142 }
2143
2144 static bool ir_block_life_propagate(ir_block *self, bool *changed)
2145 {
2146     ir_instr *instr;
2147     ir_value *value;
2148     size_t i, o, p, mem;
2149     // bitmasks which operands are read from or written to
2150     size_t read, write;
2151
2152     self->m_living.clear();
2153
2154     p = vec_size(self->m_exits);
2155     for (i = 0; i < p; ++i) {
2156         ir_block *prev = self->m_exits[i];
2157         for (auto &it : prev->m_living)
2158             if (!vec_ir_value_find(self->m_living, it, nullptr))
2159                 self->m_living.push_back(it);
2160     }
2161
2162     i = vec_size(self->m_instr);
2163     while (i)
2164     { --i;
2165         instr = self->m_instr[i];
2166
2167         /* See which operands are read and write operands */
2168         ir_op_read_write(instr->m_opcode, &read, &write);
2169
2170         /* Go through the 3 main operands
2171          * writes first, then reads
2172          */
2173         for (o = 0; o < 3; ++o)
2174         {
2175             if (!instr->_m_ops[o]) /* no such operand */
2176                 continue;
2177
2178             value = instr->_m_ops[o];
2179
2180             /* We only care about locals */
2181             /* we also calculate parameter liferanges so that locals
2182              * can take up parameter slots */
2183             if (value->m_store != store_value &&
2184                 value->m_store != store_local &&
2185                 value->m_store != store_param)
2186                 continue;
2187
2188             /* write operands */
2189             /* When we write to a local, we consider it "dead" for the
2190              * remaining upper part of the function, since in SSA a value
2191              * can only be written once (== created)
2192              */
2193             if (write & (1<<o))
2194             {
2195                 size_t idx;
2196                 bool in_living = vec_ir_value_find(self->m_living, value, &idx);
2197                 if (!in_living)
2198                 {
2199                     /* If the value isn't alive it hasn't been read before... */
2200                     /* TODO: See if the warning can be emitted during parsing or AST processing
2201                      * otherwise have warning printed here.
2202                      * IF printing a warning here: include filecontext_t,
2203                      * and make sure it's only printed once
2204                      * since this function is run multiple times.
2205                      */
2206                     /* con_err( "Value only written %s\n", value->m_name); */
2207                     if (value->setAlive(instr->m_eid))
2208                         *changed = true;
2209                 } else {
2210                     /* since 'living' won't contain it
2211                      * anymore, merge the value, since
2212                      * (A) doesn't.
2213                      */
2214                     if (value->setAlive(instr->m_eid))
2215                         *changed = true;
2216                     // Then remove
2217                     self->m_living.erase(self->m_living.begin() + idx);
2218                 }
2219                 /* Removing a vector removes all members */
2220                 for (mem = 0; mem < 3; ++mem) {
2221                     if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], &idx)) {
2222                         if (value->m_members[mem]->setAlive(instr->m_eid))
2223                             *changed = true;
2224                         self->m_living.erase(self->m_living.begin() + idx);
2225                     }
2226                 }
2227                 /* Removing the last member removes the vector */
2228                 if (value->m_memberof) {
2229                     value = value->m_memberof;
2230                     for (mem = 0; mem < 3; ++mem) {
2231                         if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2232                             break;
2233                     }
2234                     if (mem == 3 && vec_ir_value_find(self->m_living, value, &idx)) {
2235                         if (value->setAlive(instr->m_eid))
2236                             *changed = true;
2237                         self->m_living.erase(self->m_living.begin() + idx);
2238                     }
2239                 }
2240             }
2241         }
2242
2243         /* These operations need a special case as they can break when using
2244          * same source and destination operand otherwise, as the engine may
2245          * read the source multiple times. */
2246         if (instr->m_opcode == INSTR_MUL_VF ||
2247             instr->m_opcode == VINSTR_BITAND_VF ||
2248             instr->m_opcode == VINSTR_BITOR_VF ||
2249             instr->m_opcode == VINSTR_BITXOR ||
2250             instr->m_opcode == VINSTR_BITXOR_VF ||
2251             instr->m_opcode == VINSTR_BITXOR_V ||
2252             instr->m_opcode == VINSTR_CROSS)
2253         {
2254             value = instr->_m_ops[2];
2255             /* the float source will get an additional lifetime */
2256             if (value->setAlive(instr->m_eid+1))
2257                 *changed = true;
2258             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2259                 *changed = true;
2260         }
2261
2262         if (instr->m_opcode == INSTR_MUL_FV ||
2263             instr->m_opcode == INSTR_LOAD_V ||
2264             instr->m_opcode == VINSTR_BITXOR ||
2265             instr->m_opcode == VINSTR_BITXOR_VF ||
2266             instr->m_opcode == VINSTR_BITXOR_V ||
2267             instr->m_opcode == VINSTR_CROSS)
2268         {
2269             value = instr->_m_ops[1];
2270             /* the float source will get an additional lifetime */
2271             if (value->setAlive(instr->m_eid+1))
2272                 *changed = true;
2273             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2274                 *changed = true;
2275         }
2276
2277         for (o = 0; o < 3; ++o)
2278         {
2279             if (!instr->_m_ops[o]) /* no such operand */
2280                 continue;
2281
2282             value = instr->_m_ops[o];
2283
2284             /* We only care about locals */
2285             /* we also calculate parameter liferanges so that locals
2286              * can take up parameter slots */
2287             if (value->m_store != store_value &&
2288                 value->m_store != store_local &&
2289                 value->m_store != store_param)
2290                 continue;
2291
2292             /* read operands */
2293             if (read & (1<<o))
2294             {
2295                 if (!vec_ir_value_find(self->m_living, value, nullptr))
2296                     self->m_living.push_back(value);
2297                 /* reading adds the full vector */
2298                 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2299                     self->m_living.push_back(value->m_memberof);
2300                 for (mem = 0; mem < 3; ++mem) {
2301                     if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2302                         self->m_living.push_back(value->m_members[mem]);
2303                 }
2304             }
2305         }
2306         /* PHI operands are always read operands */
2307         for (auto &it : instr->m_phi) {
2308             value = it.value;
2309             if (!vec_ir_value_find(self->m_living, value, nullptr))
2310                 self->m_living.push_back(value);
2311             /* reading adds the full vector */
2312             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2313                 self->m_living.push_back(value->m_memberof);
2314             for (mem = 0; mem < 3; ++mem) {
2315                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2316                     self->m_living.push_back(value->m_members[mem]);
2317             }
2318         }
2319
2320         /* on a call, all these values must be "locked" */
2321         if (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8) {
2322             if (ir_block_living_lock(self))
2323                 *changed = true;
2324         }
2325         /* call params are read operands too */
2326         for (auto &it : instr->m_params) {
2327             value = it;
2328             if (!vec_ir_value_find(self->m_living, value, nullptr))
2329                 self->m_living.push_back(value);
2330             /* reading adds the full vector */
2331             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2332                 self->m_living.push_back(value->m_memberof);
2333             for (mem = 0; mem < 3; ++mem) {
2334                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2335                     self->m_living.push_back(value->m_members[mem]);
2336             }
2337         }
2338
2339         /* (A) */
2340         if (ir_block_living_add_instr(self, instr->m_eid))
2341             *changed = true;
2342     }
2343     /* the "entry" instruction ID */
2344     if (ir_block_living_add_instr(self, self->m_entry_id))
2345         *changed = true;
2346
2347     return true;
2348 }
2349
2350 bool ir_function_calculate_liferanges(ir_function *self)
2351 {
2352     /* parameters live at 0 */
2353     for (size_t i = 0; i < vec_size(self->m_params); ++i)
2354         if (!self->m_locals[i].get()->setAlive(0))
2355             compile_error(self->m_context, "internal error: failed value-life merging");
2356
2357     bool changed;
2358     do {
2359         self->m_run_id++;
2360         changed = false;
2361         for (auto i = self->m_blocks.rbegin(); i != self->m_blocks.rend(); ++i)
2362             ir_block_life_propagate(i->get(), &changed);
2363     } while (changed);
2364
2365     if (self->m_blocks.size()) {
2366         ir_block *block = self->m_blocks[0].get();
2367         for (auto &it : block->m_living) {
2368             ir_value *v = it;
2369             if (v->m_store != store_local)
2370                 continue;
2371             if (v->m_vtype == TYPE_VECTOR)
2372                 continue;
2373             self->m_flags |= IR_FLAG_HAS_UNINITIALIZED;
2374             /* find the instruction reading from it */
2375             size_t s = 0;
2376             for (; s < v->m_reads.size(); ++s) {
2377                 if (v->m_reads[s]->m_eid == v->m_life[0].end)
2378                     break;
2379             }
2380             if (s < v->m_reads.size()) {
2381                 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2382                               "variable `%s` may be used uninitialized in this function\n"
2383                               " -> %s:%i",
2384                               v->m_name.c_str(),
2385                               v->m_reads[s]->m_context.file, v->m_reads[s]->m_context.line)
2386                    )
2387                 {
2388                     return false;
2389                 }
2390                 continue;
2391             }
2392             if (v->m_memberof) {
2393                 ir_value *vec = v->m_memberof;
2394                 for (s = 0; s < vec->m_reads.size(); ++s) {
2395                     if (vec->m_reads[s]->m_eid == v->m_life[0].end)
2396                         break;
2397                 }
2398                 if (s < vec->m_reads.size()) {
2399                     if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2400                                   "variable `%s` may be used uninitialized in this function\n"
2401                                   " -> %s:%i",
2402                                   v->m_name.c_str(),
2403                                   vec->m_reads[s]->m_context.file, vec->m_reads[s]->m_context.line)
2404                        )
2405                     {
2406                         return false;
2407                     }
2408                     continue;
2409                 }
2410             }
2411             if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2412                           "variable `%s` may be used uninitialized in this function", v->m_name.c_str()))
2413             {
2414                 return false;
2415             }
2416         }
2417     }
2418     return true;
2419 }
2420
2421 /***********************************************************************
2422  *IR Code-Generation
2423  *
2424  * Since the IR has the convention of putting 'write' operands
2425  * at the beginning, we have to rotate the operands of instructions
2426  * properly in order to generate valid QCVM code.
2427  *
2428  * Having destinations at a fixed position is more convenient. In QC
2429  * this is *mostly* OPC,  but FTE adds at least 2 instructions which
2430  * read from from OPA,  and store to OPB rather than OPC.   Which is
2431  * partially the reason why the implementation of these instructions
2432  * in darkplaces has been delayed for so long.
2433  *
2434  * Breaking conventions is annoying...
2435  */
2436 static bool gen_global_field(code_t *code, ir_value *global)
2437 {
2438     if (global->m_hasvalue)
2439     {
2440         ir_value *fld = global->m_constval.vpointer;
2441         if (!fld) {
2442             irerror(global->m_context, "Invalid field constant with no field: %s", global->m_name.c_str());
2443             return false;
2444         }
2445
2446         /* copy the field's value */
2447         global->setCodeAddress(code->globals.size());
2448         code->globals.push_back(fld->m_code.fieldaddr);
2449         if (global->m_fieldtype == TYPE_VECTOR) {
2450             code->globals.push_back(fld->m_code.fieldaddr+1);
2451             code->globals.push_back(fld->m_code.fieldaddr+2);
2452         }
2453     }
2454     else
2455     {
2456         global->setCodeAddress(code->globals.size());
2457         code->globals.push_back(0);
2458         if (global->m_fieldtype == TYPE_VECTOR) {
2459             code->globals.push_back(0);
2460             code->globals.push_back(0);
2461         }
2462     }
2463     if (global->m_code.globaladdr < 0)
2464         return false;
2465     return true;
2466 }
2467
2468 static bool gen_global_pointer(code_t *code, ir_value *global)
2469 {
2470     if (global->m_hasvalue)
2471     {
2472         ir_value *target = global->m_constval.vpointer;
2473         if (!target) {
2474             irerror(global->m_context, "Invalid pointer constant: %s", global->m_name.c_str());
2475             /* nullptr pointers are pointing to the nullptr constant, which also
2476              * sits at address 0, but still has an ir_value for itself.
2477              */
2478             return false;
2479         }
2480
2481         /* Here, relocations ARE possible - in fteqcc-enhanced-qc:
2482          * void() foo; <- proto
2483          * void() *fooptr = &foo;
2484          * void() foo = { code }
2485          */
2486         if (!target->m_code.globaladdr) {
2487             /* FIXME: Check for the constant nullptr ir_value!
2488              * because then code.globaladdr being 0 is valid.
2489              */
2490             irerror(global->m_context, "FIXME: Relocation support");
2491             return false;
2492         }
2493
2494         global->setCodeAddress(code->globals.size());
2495         code->globals.push_back(target->m_code.globaladdr);
2496     }
2497     else
2498     {
2499         global->setCodeAddress(code->globals.size());
2500         code->globals.push_back(0);
2501     }
2502     if (global->m_code.globaladdr < 0)
2503         return false;
2504     return true;
2505 }
2506
2507 static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *block)
2508 {
2509     prog_section_statement_t stmt;
2510     ir_instr *instr;
2511     ir_block *target;
2512     ir_block *ontrue;
2513     ir_block *onfalse;
2514     size_t    stidx;
2515     size_t    i;
2516     int       j;
2517
2518     block->m_generated = true;
2519     block->m_code_start = code->statements.size();
2520     for (i = 0; i < vec_size(block->m_instr); ++i)
2521     {
2522         instr = block->m_instr[i];
2523
2524         if (instr->m_opcode == VINSTR_PHI) {
2525             irerror(block->m_context, "cannot generate virtual instruction (phi)");
2526             return false;
2527         }
2528
2529         if (instr->m_opcode == VINSTR_JUMP) {
2530             target = instr->m_bops[0];
2531             /* for uncoditional jumps, if the target hasn't been generated
2532              * yet, we generate them right here.
2533              */
2534             if (!target->m_generated)
2535                 return gen_blocks_recursive(code, func, target);
2536
2537             /* otherwise we generate a jump instruction */
2538             stmt.opcode = INSTR_GOTO;
2539             stmt.o1.s1 = target->m_code_start - code->statements.size();
2540             stmt.o2.s1 = 0;
2541             stmt.o3.s1 = 0;
2542             if (stmt.o1.s1 != 1)
2543                 code_push_statement(code, &stmt, instr->m_context);
2544
2545             /* no further instructions can be in this block */
2546             return true;
2547         }
2548
2549         if (instr->m_opcode == VINSTR_BITXOR) {
2550             stmt.opcode = INSTR_BITOR;
2551             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2552             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2553             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2554             code_push_statement(code, &stmt, instr->m_context);
2555             stmt.opcode = INSTR_BITAND;
2556             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2557             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2558             stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2559             code_push_statement(code, &stmt, instr->m_context);
2560             stmt.opcode = INSTR_SUB_F;
2561             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2562             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2563             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2564             code_push_statement(code, &stmt, instr->m_context);
2565
2566             /* instruction generated */
2567             continue;
2568         }
2569
2570         if (instr->m_opcode == VINSTR_BITAND_V) {
2571             stmt.opcode = INSTR_BITAND;
2572             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2573             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2574             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2575             code_push_statement(code, &stmt, instr->m_context);
2576             ++stmt.o1.s1;
2577             ++stmt.o2.s1;
2578             ++stmt.o3.s1;
2579             code_push_statement(code, &stmt, instr->m_context);
2580             ++stmt.o1.s1;
2581             ++stmt.o2.s1;
2582             ++stmt.o3.s1;
2583             code_push_statement(code, &stmt, instr->m_context);
2584
2585             /* instruction generated */
2586             continue;
2587         }
2588
2589         if (instr->m_opcode == VINSTR_BITOR_V) {
2590             stmt.opcode = INSTR_BITOR;
2591             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2592             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2593             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2594             code_push_statement(code, &stmt, instr->m_context);
2595             ++stmt.o1.s1;
2596             ++stmt.o2.s1;
2597             ++stmt.o3.s1;
2598             code_push_statement(code, &stmt, instr->m_context);
2599             ++stmt.o1.s1;
2600             ++stmt.o2.s1;
2601             ++stmt.o3.s1;
2602             code_push_statement(code, &stmt, instr->m_context);
2603
2604             /* instruction generated */
2605             continue;
2606         }
2607
2608         if (instr->m_opcode == VINSTR_BITXOR_V) {
2609             for (j = 0; j < 3; ++j) {
2610                 stmt.opcode = INSTR_BITOR;
2611                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2612                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2613                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2614                 code_push_statement(code, &stmt, instr->m_context);
2615                 stmt.opcode = INSTR_BITAND;
2616                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2617                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2618                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2619                 code_push_statement(code, &stmt, instr->m_context);
2620             }
2621             stmt.opcode = INSTR_SUB_V;
2622             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2623             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2624             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2625             code_push_statement(code, &stmt, instr->m_context);
2626
2627             /* instruction generated */
2628             continue;
2629         }
2630
2631         if (instr->m_opcode == VINSTR_BITAND_VF) {
2632             stmt.opcode = INSTR_BITAND;
2633             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2634             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2635             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2636             code_push_statement(code, &stmt, instr->m_context);
2637             ++stmt.o1.s1;
2638             ++stmt.o3.s1;
2639             code_push_statement(code, &stmt, instr->m_context);
2640             ++stmt.o1.s1;
2641             ++stmt.o3.s1;
2642             code_push_statement(code, &stmt, instr->m_context);
2643
2644             /* instruction generated */
2645             continue;
2646         }
2647
2648         if (instr->m_opcode == VINSTR_BITOR_VF) {
2649             stmt.opcode = INSTR_BITOR;
2650             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2651             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2652             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2653             code_push_statement(code, &stmt, instr->m_context);
2654             ++stmt.o1.s1;
2655             ++stmt.o3.s1;
2656             code_push_statement(code, &stmt, instr->m_context);
2657             ++stmt.o1.s1;
2658             ++stmt.o3.s1;
2659             code_push_statement(code, &stmt, instr->m_context);
2660
2661             /* instruction generated */
2662             continue;
2663         }
2664
2665         if (instr->m_opcode == VINSTR_BITXOR_VF) {
2666             for (j = 0; j < 3; ++j) {
2667                 stmt.opcode = INSTR_BITOR;
2668                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2669                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2670                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2671                 code_push_statement(code, &stmt, instr->m_context);
2672                 stmt.opcode = INSTR_BITAND;
2673                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2674                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2675                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2676                 code_push_statement(code, &stmt, instr->m_context);
2677             }
2678             stmt.opcode = INSTR_SUB_V;
2679             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2680             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2681             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2682             code_push_statement(code, &stmt, instr->m_context);
2683
2684             /* instruction generated */
2685             continue;
2686         }
2687
2688         if (instr->m_opcode == VINSTR_CROSS) {
2689             stmt.opcode = INSTR_MUL_F;
2690             for (j = 0; j < 3; ++j) {
2691                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 1) % 3;
2692                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 2) % 3;
2693                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2694                 code_push_statement(code, &stmt, instr->m_context);
2695                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 2) % 3;
2696                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 1) % 3;
2697                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2698                 code_push_statement(code, &stmt, instr->m_context);
2699             }
2700             stmt.opcode = INSTR_SUB_V;
2701             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2702             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2703             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2704             code_push_statement(code, &stmt, instr->m_context);
2705
2706             /* instruction generated */
2707             continue;
2708         }
2709
2710         if (instr->m_opcode == VINSTR_COND) {
2711             ontrue  = instr->m_bops[0];
2712             onfalse = instr->m_bops[1];
2713             /* TODO: have the AST signal which block should
2714              * come first: eg. optimize IFs without ELSE...
2715              */
2716
2717             stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2718             stmt.o2.u1 = 0;
2719             stmt.o3.s1 = 0;
2720
2721             if (ontrue->m_generated) {
2722                 stmt.opcode = INSTR_IF;
2723                 stmt.o2.s1 = ontrue->m_code_start - code->statements.size();
2724                 if (stmt.o2.s1 != 1)
2725                     code_push_statement(code, &stmt, instr->m_context);
2726             }
2727             if (onfalse->m_generated) {
2728                 stmt.opcode = INSTR_IFNOT;
2729                 stmt.o2.s1 = onfalse->m_code_start - code->statements.size();
2730                 if (stmt.o2.s1 != 1)
2731                     code_push_statement(code, &stmt, instr->m_context);
2732             }
2733             if (!ontrue->m_generated) {
2734                 if (onfalse->m_generated)
2735                     return gen_blocks_recursive(code, func, ontrue);
2736             }
2737             if (!onfalse->m_generated) {
2738                 if (ontrue->m_generated)
2739                     return gen_blocks_recursive(code, func, onfalse);
2740             }
2741             /* neither ontrue nor onfalse exist */
2742             stmt.opcode = INSTR_IFNOT;
2743             if (!instr->m_likely) {
2744                 /* Honor the likelyhood hint */
2745                 ir_block *tmp = onfalse;
2746                 stmt.opcode = INSTR_IF;
2747                 onfalse = ontrue;
2748                 ontrue = tmp;
2749             }
2750             stidx = code->statements.size();
2751             code_push_statement(code, &stmt, instr->m_context);
2752             /* on false we jump, so add ontrue-path */
2753             if (!gen_blocks_recursive(code, func, ontrue))
2754                 return false;
2755             /* fixup the jump address */
2756             code->statements[stidx].o2.s1 = code->statements.size() - stidx;
2757             /* generate onfalse path */
2758             if (onfalse->m_generated) {
2759                 /* fixup the jump address */
2760                 code->statements[stidx].o2.s1 = onfalse->m_code_start - stidx;
2761                 if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2762                     code->statements[stidx] = code->statements[stidx+1];
2763                     if (code->statements[stidx].o1.s1 < 0)
2764                         code->statements[stidx].o1.s1++;
2765                     code_pop_statement(code);
2766                 }
2767                 stmt.opcode = code->statements.back().opcode;
2768                 if (stmt.opcode == INSTR_GOTO ||
2769                     stmt.opcode == INSTR_IF ||
2770                     stmt.opcode == INSTR_IFNOT ||
2771                     stmt.opcode == INSTR_RETURN ||
2772                     stmt.opcode == INSTR_DONE)
2773                 {
2774                     /* no use jumping from here */
2775                     return true;
2776                 }
2777                 /* may have been generated in the previous recursive call */
2778                 stmt.opcode = INSTR_GOTO;
2779                 stmt.o1.s1 = onfalse->m_code_start - code->statements.size();
2780                 stmt.o2.s1 = 0;
2781                 stmt.o3.s1 = 0;
2782                 if (stmt.o1.s1 != 1)
2783                     code_push_statement(code, &stmt, instr->m_context);
2784                 return true;
2785             }
2786             else if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2787                 code->statements[stidx] = code->statements[stidx+1];
2788                 if (code->statements[stidx].o1.s1 < 0)
2789                     code->statements[stidx].o1.s1++;
2790                 code_pop_statement(code);
2791             }
2792             /* if not, generate now */
2793             return gen_blocks_recursive(code, func, onfalse);
2794         }
2795
2796         if ( (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8)
2797            || instr->m_opcode == VINSTR_NRCALL)
2798         {
2799             size_t p, first;
2800             ir_value *retvalue;
2801
2802             first = instr->m_params.size();
2803             if (first > 8)
2804                 first = 8;
2805             for (p = 0; p < first; ++p)
2806             {
2807                 ir_value *param = instr->m_params[p];
2808                 if (param->m_callparam)
2809                     continue;
2810
2811                 stmt.opcode = INSTR_STORE_F;
2812                 stmt.o3.u1 = 0;
2813
2814                 if (param->m_vtype == TYPE_FIELD)
2815                     stmt.opcode = field_store_instr[param->m_fieldtype];
2816                 else if (param->m_vtype == TYPE_NIL)
2817                     stmt.opcode = INSTR_STORE_V;
2818                 else
2819                     stmt.opcode = type_store_instr[param->m_vtype];
2820                 stmt.o1.u1 = param->codeAddress();
2821                 stmt.o2.u1 = OFS_PARM0 + 3 * p;
2822
2823                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2824                     /* fetch 3 separate floats */
2825                     stmt.opcode = INSTR_STORE_F;
2826                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2827                     code_push_statement(code, &stmt, instr->m_context);
2828                     stmt.o2.u1++;
2829                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2830                     code_push_statement(code, &stmt, instr->m_context);
2831                     stmt.o2.u1++;
2832                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2833                     code_push_statement(code, &stmt, instr->m_context);
2834                 }
2835                 else
2836                     code_push_statement(code, &stmt, instr->m_context);
2837             }
2838             /* Now handle extparams */
2839             first = instr->m_params.size();
2840             for (; p < first; ++p)
2841             {
2842                 ir_builder *ir = func->m_owner;
2843                 ir_value *param = instr->m_params[p];
2844                 ir_value *targetparam;
2845
2846                 if (param->m_callparam)
2847                     continue;
2848
2849                 if (p-8 >= ir->m_extparams.size())
2850                     ir->generateExtparam();
2851
2852                 targetparam = ir->m_extparams[p-8];
2853
2854                 stmt.opcode = INSTR_STORE_F;
2855                 stmt.o3.u1 = 0;
2856
2857                 if (param->m_vtype == TYPE_FIELD)
2858                     stmt.opcode = field_store_instr[param->m_fieldtype];
2859                 else if (param->m_vtype == TYPE_NIL)
2860                     stmt.opcode = INSTR_STORE_V;
2861                 else
2862                     stmt.opcode = type_store_instr[param->m_vtype];
2863                 stmt.o1.u1 = param->codeAddress();
2864                 stmt.o2.u1 = targetparam->codeAddress();
2865                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2866                     /* fetch 3 separate floats */
2867                     stmt.opcode = INSTR_STORE_F;
2868                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2869                     code_push_statement(code, &stmt, instr->m_context);
2870                     stmt.o2.u1++;
2871                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2872                     code_push_statement(code, &stmt, instr->m_context);
2873                     stmt.o2.u1++;
2874                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2875                     code_push_statement(code, &stmt, instr->m_context);
2876                 }
2877                 else
2878                     code_push_statement(code, &stmt, instr->m_context);
2879             }
2880
2881             stmt.opcode = INSTR_CALL0 + instr->m_params.size();
2882             if (stmt.opcode > INSTR_CALL8)
2883                 stmt.opcode = INSTR_CALL8;
2884             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2885             stmt.o2.u1 = 0;
2886             stmt.o3.u1 = 0;
2887             code_push_statement(code, &stmt, instr->m_context);
2888
2889             retvalue = instr->_m_ops[0];
2890             if (retvalue && retvalue->m_store != store_return &&
2891                 (retvalue->m_store == store_global || retvalue->m_life.size()))
2892             {
2893                 /* not to be kept in OFS_RETURN */
2894                 if (retvalue->m_vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS))
2895                     stmt.opcode = field_store_instr[retvalue->m_fieldtype];
2896                 else
2897                     stmt.opcode = type_store_instr[retvalue->m_vtype];
2898                 stmt.o1.u1 = OFS_RETURN;
2899                 stmt.o2.u1 = retvalue->codeAddress();
2900                 stmt.o3.u1 = 0;
2901                 code_push_statement(code, &stmt, instr->m_context);
2902             }
2903             continue;
2904         }
2905
2906         if (instr->m_opcode == INSTR_STATE) {
2907             stmt.opcode = instr->m_opcode;
2908             if (instr->_m_ops[0])
2909                 stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2910             if (instr->_m_ops[1])
2911                 stmt.o2.u1 = instr->_m_ops[1]->codeAddress();
2912             stmt.o3.u1 = 0;
2913             code_push_statement(code, &stmt, instr->m_context);
2914             continue;
2915         }
2916
2917         stmt.opcode = instr->m_opcode;
2918         stmt.o1.u1 = 0;
2919         stmt.o2.u1 = 0;
2920         stmt.o3.u1 = 0;
2921
2922         /* This is the general order of operands */
2923         if (instr->_m_ops[0])
2924             stmt.o3.u1 = instr->_m_ops[0]->codeAddress();
2925
2926         if (instr->_m_ops[1])
2927             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2928
2929         if (instr->_m_ops[2])
2930             stmt.o2.u1 = instr->_m_ops[2]->codeAddress();
2931
2932         if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
2933         {
2934             stmt.o1.u1 = stmt.o3.u1;
2935             stmt.o3.u1 = 0;
2936         }
2937         else if ((stmt.opcode >= INSTR_STORE_F &&
2938                   stmt.opcode <= INSTR_STORE_FNC) ||
2939                  (stmt.opcode >= INSTR_STOREP_F &&
2940                   stmt.opcode <= INSTR_STOREP_FNC))
2941         {
2942             /* 2-operand instructions with A -> B */
2943             stmt.o2.u1 = stmt.o3.u1;
2944             stmt.o3.u1 = 0;
2945
2946             /* tiny optimization, don't output
2947              * STORE a, a
2948              */
2949             if (stmt.o2.u1 == stmt.o1.u1 &&
2950                 OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
2951             {
2952                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
2953                 continue;
2954             }
2955         }
2956         code_push_statement(code, &stmt, instr->m_context);
2957     }
2958     return true;
2959 }
2960
2961 static bool gen_function_code(code_t *code, ir_function *self)
2962 {
2963     ir_block *block;
2964     prog_section_statement_t stmt, *retst;
2965
2966     /* Starting from entry point, we generate blocks "as they come"
2967      * for now. Dead blocks will not be translated obviously.
2968      */
2969     if (self->m_blocks.empty()) {
2970         irerror(self->m_context, "Function '%s' declared without body.", self->m_name.c_str());
2971         return false;
2972     }
2973
2974     block = self->m_blocks[0].get();
2975     if (block->m_generated)
2976         return true;
2977
2978     if (!gen_blocks_recursive(code, self, block)) {
2979         irerror(self->m_context, "failed to generate blocks for '%s'", self->m_name.c_str());
2980         return false;
2981     }
2982
2983     /* code_write and qcvm -disasm need to know that the function ends here */
2984     retst = &code->statements.back();
2985     if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) &&
2986         self->m_outtype == TYPE_VOID &&
2987         retst->opcode == INSTR_RETURN &&
2988         !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1)
2989     {
2990         retst->opcode = INSTR_DONE;
2991         ++opts_optimizationcount[OPTIM_VOID_RETURN];
2992     } else {
2993         lex_ctx_t last;
2994
2995         stmt.opcode = INSTR_DONE;
2996         stmt.o1.u1  = 0;
2997         stmt.o2.u1  = 0;
2998         stmt.o3.u1  = 0;
2999         last.line   = code->linenums.back();
3000         last.column = code->columnnums.back();
3001
3002         code_push_statement(code, &stmt, last);
3003     }
3004     return true;
3005 }
3006
3007 qcint_t ir_builder::filestring(const char *filename)
3008 {
3009     /* NOTE: filename pointers are copied, we never strdup them,
3010      * thus we can use pointer-comparison to find the string.
3011      */
3012     qcint_t  str;
3013
3014     for (size_t i = 0; i != m_filenames.size(); ++i) {
3015         if (!strcmp(m_filenames[i], filename))
3016             return i;
3017     }
3018
3019     str = code_genstring(m_code.get(), filename);
3020     m_filenames.push_back(filename);
3021     m_filestrings.push_back(str);
3022     return str;
3023 }
3024
3025 bool ir_builder::generateGlobalFunction(ir_value *global)
3026 {
3027     prog_section_function_t fun;
3028     ir_function            *irfun;
3029
3030     size_t i;
3031
3032     if (!global->m_hasvalue || (!global->m_constval.vfunc)) {
3033         irerror(global->m_context, "Invalid state of function-global: not constant: %s", global->m_name.c_str());
3034         return false;
3035     }
3036
3037     irfun = global->m_constval.vfunc;
3038     fun.name = global->m_code.name;
3039     fun.file = filestring(global->m_context.file);
3040     fun.profile = 0; /* always 0 */
3041     fun.nargs = vec_size(irfun->m_params);
3042     if (fun.nargs > 8)
3043         fun.nargs = 8;
3044
3045     for (i = 0; i < 8; ++i) {
3046         if ((int32_t)i >= fun.nargs)
3047             fun.argsize[i] = 0;
3048         else
3049             fun.argsize[i] = type_sizeof_[irfun->m_params[i]];
3050     }
3051
3052     fun.firstlocal = 0;
3053     fun.locals = irfun->m_allocated_locals;
3054
3055     if (irfun->m_builtin)
3056         fun.entry = irfun->m_builtin+1;
3057     else {
3058         irfun->m_code_function_def = m_code->functions.size();
3059         fun.entry = m_code->statements.size();
3060     }
3061
3062     m_code->functions.push_back(fun);
3063     return true;
3064 }
3065
3066 ir_value* ir_builder::generateExtparamProto()
3067 {
3068     char      name[128];
3069
3070     util_snprintf(name, sizeof(name), "EXTPARM#%i", (int)(m_extparam_protos.size()));
3071     ir_value *global = new ir_value(name, store_global, TYPE_VECTOR);
3072     m_extparam_protos.emplace_back(global);
3073
3074     return global;
3075 }
3076
3077 void ir_builder::generateExtparam()
3078 {
3079     prog_section_def_t def;
3080     ir_value          *global;
3081
3082     if (m_extparam_protos.size() < m_extparams.size()+1)
3083         global = generateExtparamProto();
3084     else
3085         global = m_extparam_protos[m_extparams.size()].get();
3086
3087     def.name = code_genstring(m_code.get(), global->m_name.c_str());
3088     def.type = TYPE_VECTOR;
3089     def.offset = m_code->globals.size();
3090
3091     m_code->defs.push_back(def);
3092
3093     global->setCodeAddress(def.offset);
3094
3095     m_code->globals.push_back(0);
3096     m_code->globals.push_back(0);
3097     m_code->globals.push_back(0);
3098
3099     m_extparams.emplace_back(global);
3100 }
3101
3102 static bool gen_function_extparam_copy(code_t *code, ir_function *self)
3103 {
3104     ir_builder *ir = self->m_owner;
3105
3106     size_t numparams = vec_size(self->m_params);
3107     if (!numparams)
3108         return true;
3109
3110     prog_section_statement_t stmt;
3111     stmt.opcode = INSTR_STORE_F;
3112     stmt.o3.s1 = 0;
3113     for (size_t i = 8; i < numparams; ++i) {
3114         size_t ext = i - 8;
3115         if (ext >= ir->m_extparams.size())
3116             ir->generateExtparam();
3117
3118         ir_value *ep = ir->m_extparams[ext];
3119
3120         stmt.opcode = type_store_instr[self->m_locals[i]->m_vtype];
3121         if (self->m_locals[i]->m_vtype == TYPE_FIELD &&
3122             self->m_locals[i]->m_fieldtype == TYPE_VECTOR)
3123         {
3124             stmt.opcode = INSTR_STORE_V;
3125         }
3126         stmt.o1.u1 = ep->codeAddress();
3127         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3128         code_push_statement(code, &stmt, self->m_context);
3129     }
3130
3131     return true;
3132 }
3133
3134 static bool gen_function_varargs_copy(code_t *code, ir_function *self)
3135 {
3136     size_t i, ext, numparams, maxparams;
3137
3138     ir_builder *ir = self->m_owner;
3139     ir_value   *ep;
3140     prog_section_statement_t stmt;
3141
3142     numparams = vec_size(self->m_params);
3143     if (!numparams)
3144         return true;
3145
3146     stmt.opcode = INSTR_STORE_V;
3147     stmt.o3.s1 = 0;
3148     maxparams = numparams + self->m_max_varargs;
3149     for (i = numparams; i < maxparams; ++i) {
3150         if (i < 8) {
3151             stmt.o1.u1 = OFS_PARM0 + 3*i;
3152             stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3153             code_push_statement(code, &stmt, self->m_context);
3154             continue;
3155         }
3156         ext = i - 8;
3157         while (ext >= ir->m_extparams.size())
3158             ir->generateExtparam();
3159
3160         ep = ir->m_extparams[ext];
3161
3162         stmt.o1.u1 = ep->codeAddress();
3163         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3164         code_push_statement(code, &stmt, self->m_context);
3165     }
3166
3167     return true;
3168 }
3169
3170 bool ir_builder::generateFunctionLocals(ir_value *global)
3171 {
3172     prog_section_function_t *def;
3173     ir_function             *irfun;
3174     uint32_t                 firstlocal, firstglobal;
3175
3176     irfun = global->m_constval.vfunc;
3177     def   = &m_code->functions[0] + irfun->m_code_function_def;
3178
3179     if (OPTS_OPTION_BOOL(OPTION_G) ||
3180         !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS)        ||
3181         (irfun->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3182     {
3183         firstlocal = def->firstlocal = m_code->globals.size();
3184     } else {
3185         firstlocal = def->firstlocal = m_first_common_local;
3186         ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
3187     }
3188
3189     firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? m_first_common_globaltemp : firstlocal);
3190
3191     for (size_t i = m_code->globals.size(); i < firstlocal + irfun->m_allocated_locals; ++i)
3192         m_code->globals.push_back(0);
3193
3194     for (auto& lp : irfun->m_locals) {
3195         ir_value *v = lp.get();
3196         if (v->m_locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
3197             v->setCodeAddress(firstlocal + v->m_code.local);
3198             if (!generateGlobal(v, true)) {
3199                 irerror(v->m_context, "failed to generate local %s", v->m_name.c_str());
3200                 return false;
3201             }
3202         }
3203         else
3204             v->setCodeAddress(firstglobal + v->m_code.local);
3205     }
3206     for (auto& vp : irfun->m_values) {
3207         ir_value *v = vp.get();
3208         if (v->m_callparam)
3209             continue;
3210         if (v->m_locked)
3211             v->setCodeAddress(firstlocal + v->m_code.local);
3212         else
3213             v->setCodeAddress(firstglobal + v->m_code.local);
3214     }
3215     return true;
3216 }
3217
3218 bool ir_builder::generateGlobalFunctionCode(ir_value *global)
3219 {
3220     prog_section_function_t *fundef;
3221     ir_function             *irfun;
3222
3223     irfun = global->m_constval.vfunc;
3224     if (!irfun) {
3225         if (global->m_cvq == CV_NONE) {
3226             if (irwarning(global->m_context, WARN_IMPLICIT_FUNCTION_POINTER,
3227                           "function `%s` has no body and in QC implicitly becomes a function-pointer",
3228                           global->m_name.c_str()))
3229             {
3230                 /* Not bailing out just now. If this happens a lot you don't want to have
3231                  * to rerun gmqcc for each such function.
3232                  */
3233
3234                 /* return false; */
3235             }
3236         }
3237         /* this was a function pointer, don't generate code for those */
3238         return true;
3239     }
3240
3241     if (irfun->m_builtin)
3242         return true;
3243
3244     /*
3245      * If there is no definition and the thing is eraseable, we can ignore
3246      * outputting the function to begin with.
3247      */
3248     if (global->m_flags & IR_FLAG_ERASABLE && irfun->m_code_function_def < 0) {
3249         return true;
3250     }
3251
3252     if (irfun->m_code_function_def < 0) {
3253         irerror(irfun->m_context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->m_name.c_str());
3254         return false;
3255     }
3256     fundef = &m_code->functions[irfun->m_code_function_def];
3257
3258     fundef->entry = m_code->statements.size();
3259     if (!generateFunctionLocals(global)) {
3260         irerror(irfun->m_context, "Failed to generate locals for function %s", irfun->m_name.c_str());
3261         return false;
3262     }
3263     if (!gen_function_extparam_copy(m_code.get(), irfun)) {
3264         irerror(irfun->m_context, "Failed to generate extparam-copy code for function %s", irfun->m_name.c_str());
3265         return false;
3266     }
3267     if (irfun->m_max_varargs && !gen_function_varargs_copy(m_code.get(), irfun)) {
3268         irerror(irfun->m_context, "Failed to generate vararg-copy code for function %s", irfun->m_name.c_str());
3269         return false;
3270     }
3271     if (!gen_function_code(m_code.get(), irfun)) {
3272         irerror(irfun->m_context, "Failed to generate code for function %s", irfun->m_name.c_str());
3273         return false;
3274     }
3275     return true;
3276 }
3277
3278 static void gen_vector_defs(code_t *code, prog_section_def_t def, const char *name)
3279 {
3280     char  *component;
3281     size_t len, i;
3282
3283     if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3284         return;
3285
3286     def.type = TYPE_FLOAT;
3287
3288     len = strlen(name);
3289
3290     component = (char*)mem_a(len+3);
3291     memcpy(component, name, len);
3292     len += 2;
3293     component[len-0] = 0;
3294     component[len-2] = '_';
3295
3296     component[len-1] = 'x';
3297
3298     for (i = 0; i < 3; ++i) {
3299         def.name = code_genstring(code, component);
3300         code->defs.push_back(def);
3301         def.offset++;
3302         component[len-1]++;
3303     }
3304
3305     mem_d(component);
3306 }
3307
3308 static void gen_vector_fields(code_t *code, prog_section_field_t fld, const char *name)
3309 {
3310     char  *component;
3311     size_t len, i;
3312
3313     if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3314         return;
3315
3316     fld.type = TYPE_FLOAT;
3317
3318     len = strlen(name);
3319
3320     component = (char*)mem_a(len+3);
3321     memcpy(component, name, len);
3322     len += 2;
3323     component[len-0] = 0;
3324     component[len-2] = '_';
3325
3326     component[len-1] = 'x';
3327
3328     for (i = 0; i < 3; ++i) {
3329         fld.name = code_genstring(code, component);
3330         code->fields.push_back(fld);
3331         fld.offset++;
3332         component[len-1]++;
3333     }
3334
3335     mem_d(component);
3336 }
3337
3338 bool ir_builder::generateGlobal(ir_value *global, bool islocal)
3339 {
3340     size_t             i;
3341     int32_t           *iptr;
3342     prog_section_def_t def;
3343     bool               pushdef = opts.optimizeoff;
3344
3345     /* we don't generate split-vectors */
3346     if (global->m_vtype == TYPE_VECTOR && (global->m_flags & IR_FLAG_SPLIT_VECTOR))
3347         return true;
3348
3349     def.type = global->m_vtype;
3350     def.offset = m_code->globals.size();
3351     def.name = 0;
3352     if (OPTS_OPTION_BOOL(OPTION_G) || !islocal)
3353     {
3354         pushdef = true;
3355
3356         /*
3357          * if we're eraseable and the function isn't referenced ignore outputting
3358          * the function.
3359          */
3360         if (global->m_flags & IR_FLAG_ERASABLE && global->m_reads.empty()) {
3361             return true;
3362         }
3363
3364         if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) &&
3365             !(global->m_flags & IR_FLAG_INCLUDE_DEF) &&
3366             (global->m_name[0] == '#' || global->m_cvq == CV_CONST))
3367         {
3368             pushdef = false;
3369         }
3370
3371         if (pushdef) {
3372             if (global->m_name[0] == '#') {
3373                 if (!m_str_immediate)
3374                     m_str_immediate = code_genstring(m_code.get(), "IMMEDIATE");
3375                 def.name = global->m_code.name = m_str_immediate;
3376             }
3377             else
3378                 def.name = global->m_code.name = code_genstring(m_code.get(), global->m_name.c_str());
3379         }
3380         else
3381             def.name   = 0;
3382         if (islocal) {
3383             def.offset = global->codeAddress();
3384             m_code->defs.push_back(def);
3385             if (global->m_vtype == TYPE_VECTOR)
3386                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3387             else if (global->m_vtype == TYPE_FIELD && global->m_fieldtype == TYPE_VECTOR)
3388                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3389             return true;
3390         }
3391     }
3392     if (islocal)
3393         return true;
3394
3395     switch (global->m_vtype)
3396     {
3397     case TYPE_VOID:
3398         if (0 == global->m_name.compare("end_sys_globals")) {
3399             // TODO: remember this point... all the defs before this one
3400             // should be checksummed and added to progdefs.h when we generate it.
3401         }
3402         else if (0 == global->m_name.compare("end_sys_fields")) {
3403             // TODO: same as above but for entity-fields rather than globsl
3404         }
3405         else if(irwarning(global->m_context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`",
3406                           global->m_name.c_str()))
3407         {
3408             /* Not bailing out */
3409             /* return false; */
3410         }
3411         /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far
3412          * the system fields actually go? Though the engine knows this anyway...
3413          * Maybe this could be an -foption
3414          * fteqcc creates data for end_sys_* - of size 1, so let's do the same
3415          */
3416         global->setCodeAddress(m_code->globals.size());
3417         m_code->globals.push_back(0);
3418         /* Add the def */
3419         if (pushdef)
3420             m_code->defs.push_back(def);
3421         return true;
3422     case TYPE_POINTER:
3423         if (pushdef)
3424             m_code->defs.push_back(def);
3425         return gen_global_pointer(m_code.get(), global);
3426     case TYPE_FIELD:
3427         if (pushdef) {
3428             m_code->defs.push_back(def);
3429             if (global->m_fieldtype == TYPE_VECTOR)
3430                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3431         }
3432         return gen_global_field(m_code.get(), global);
3433     case TYPE_ENTITY:
3434         /* fall through */
3435     case TYPE_FLOAT:
3436     {
3437         global->setCodeAddress(m_code->globals.size());
3438         if (global->m_hasvalue) {
3439             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3440                 return true;
3441             iptr = (int32_t*)&global->m_constval.ivec[0];
3442             m_code->globals.push_back(*iptr);
3443         } else {
3444             m_code->globals.push_back(0);
3445         }
3446         if (!islocal && global->m_cvq != CV_CONST)
3447             def.type |= DEF_SAVEGLOBAL;
3448         if (pushdef)
3449             m_code->defs.push_back(def);
3450
3451         return global->m_code.globaladdr >= 0;
3452     }
3453     case TYPE_STRING:
3454     {
3455         global->setCodeAddress(m_code->globals.size());
3456         if (global->m_hasvalue) {
3457             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3458                 return true;
3459             uint32_t load = code_genstring(m_code.get(), global->m_constval.vstring);
3460             m_code->globals.push_back(load);
3461         } else {
3462             m_code->globals.push_back(0);
3463         }
3464         if (!islocal && global->m_cvq != CV_CONST)
3465             def.type |= DEF_SAVEGLOBAL;
3466         if (pushdef)
3467             m_code->defs.push_back(def);
3468         return global->m_code.globaladdr >= 0;
3469     }
3470     case TYPE_VECTOR:
3471     {
3472         size_t d;
3473         global->setCodeAddress(m_code->globals.size());
3474         if (global->m_hasvalue) {
3475             iptr = (int32_t*)&global->m_constval.ivec[0];
3476             m_code->globals.push_back(iptr[0]);
3477             if (global->m_code.globaladdr < 0)
3478                 return false;
3479             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3480                 m_code->globals.push_back(iptr[d]);
3481             }
3482         } else {
3483             m_code->globals.push_back(0);
3484             if (global->m_code.globaladdr < 0)
3485                 return false;
3486             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3487                 m_code->globals.push_back(0);
3488             }
3489         }
3490         if (!islocal && global->m_cvq != CV_CONST)
3491             def.type |= DEF_SAVEGLOBAL;
3492
3493         if (pushdef) {
3494             m_code->defs.push_back(def);
3495             def.type &= ~DEF_SAVEGLOBAL;
3496             gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3497         }
3498         return global->m_code.globaladdr >= 0;
3499     }
3500     case TYPE_FUNCTION:
3501         global->setCodeAddress(m_code->globals.size());
3502         if (!global->m_hasvalue) {
3503             m_code->globals.push_back(0);
3504             if (global->m_code.globaladdr < 0)
3505                 return false;
3506         } else {
3507             m_code->globals.push_back(m_code->functions.size());
3508             if (!generateGlobalFunction(global))
3509                 return false;
3510         }
3511         if (!islocal && global->m_cvq != CV_CONST)
3512             def.type |= DEF_SAVEGLOBAL;
3513         if (pushdef)
3514             m_code->defs.push_back(def);
3515         return true;
3516     case TYPE_VARIANT:
3517         /* assume biggest type */
3518             global->setCodeAddress(m_code->globals.size());
3519             m_code->globals.push_back(0);
3520             for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i)
3521                 m_code->globals.push_back(0);
3522             return true;
3523     default:
3524         /* refuse to create 'void' type or any other fancy business. */
3525         irerror(global->m_context, "Invalid type for global variable `%s`: %s",
3526                 global->m_name.c_str(), type_name[global->m_vtype]);
3527         return false;
3528     }
3529 }
3530
3531 static GMQCC_INLINE void ir_builder_prepare_field(code_t *code, ir_value *field)
3532 {
3533     field->m_code.fieldaddr = code_alloc_field(code, type_sizeof_[field->m_fieldtype]);
3534 }
3535
3536 static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
3537 {
3538     prog_section_def_t def;
3539     prog_section_field_t fld;
3540
3541     (void)self;
3542
3543     def.type   = (uint16_t)field->m_vtype;
3544     def.offset = (uint16_t)self->m_code->globals.size();
3545
3546     /* create a global named the same as the field */
3547     if (OPTS_OPTION_U32(OPTION_STANDARD) == COMPILER_GMQCC) {
3548         /* in our standard, the global gets a dot prefix */
3549         size_t len = field->m_name.length();
3550         char name[1024];
3551
3552         /* we really don't want to have to allocate this, and 1024
3553          * bytes is more than enough for a variable/field name
3554          */
3555         if (len+2 >= sizeof(name)) {
3556             irerror(field->m_context, "invalid field name size: %u", (unsigned int)len);
3557             return false;
3558         }
3559
3560         name[0] = '.';
3561         memcpy(name+1, field->m_name.c_str(), len); // no strncpy - we used strlen above
3562         name[len+1] = 0;
3563
3564         def.name = code_genstring(self->m_code.get(), name);
3565         fld.name = def.name + 1; /* we reuse that string table entry */
3566     } else {
3567         /* in plain QC, there cannot be a global with the same name,
3568          * and so we also name the global the same.
3569          * FIXME: fteqcc should create a global as well
3570          * check if it actually uses the same name. Probably does
3571          */
3572         def.name = code_genstring(self->m_code.get(), field->m_name.c_str());
3573         fld.name = def.name;
3574     }
3575
3576     field->m_code.name = def.name;
3577
3578     self->m_code->defs.push_back(def);
3579
3580     fld.type = field->m_fieldtype;
3581
3582     if (fld.type == TYPE_VOID) {
3583         irerror(field->m_context, "field is missing a type: %s - don't know its size", field->m_name.c_str());
3584         return false;
3585     }
3586
3587     fld.offset = field->m_code.fieldaddr;
3588
3589     self->m_code->fields.push_back(fld);
3590
3591     field->setCodeAddress(self->m_code->globals.size());
3592     self->m_code->globals.push_back(fld.offset);
3593     if (fld.type == TYPE_VECTOR) {
3594         self->m_code->globals.push_back(fld.offset+1);
3595         self->m_code->globals.push_back(fld.offset+2);
3596     }
3597
3598     if (field->m_fieldtype == TYPE_VECTOR) {
3599         gen_vector_defs  (self->m_code.get(), def, field->m_name.c_str());
3600         gen_vector_fields(self->m_code.get(), fld, field->m_name.c_str());
3601     }
3602
3603     return field->m_code.globaladdr >= 0;
3604 }
3605
3606 static void ir_builder_collect_reusables(ir_builder *builder) {
3607     std::vector<ir_value*> reusables;
3608
3609     for (auto& gp : builder->m_globals) {
3610         ir_value *value = gp.get();
3611         if (value->m_vtype != TYPE_FLOAT || !value->m_hasvalue)
3612             continue;
3613         if (value->m_cvq == CV_CONST || (value->m_name.length() >= 1 && value->m_name[0] == '#'))
3614             reusables.emplace_back(value);
3615     }
3616     builder->m_const_floats = move(reusables);
3617 }
3618
3619 static void ir_builder_split_vector(ir_builder *self, ir_value *vec) {
3620     ir_value* found[3] = { nullptr, nullptr, nullptr };
3621
3622     // must not be written to
3623     if (vec->m_writes.size())
3624         return;
3625     // must not be trying to access individual members
3626     if (vec->m_members[0] || vec->m_members[1] || vec->m_members[2])
3627         return;
3628     // should be actually used otherwise it won't be generated anyway
3629     if (vec->m_reads.empty())
3630         return;
3631     //size_t count = vec->m_reads.size();
3632     //if (!count)
3633     //    return;
3634
3635     // may only be used directly as function parameters, so if we find some other instruction cancel
3636     for (ir_instr *user : vec->m_reads) {
3637         // we only split vectors if they're used directly as parameter to a call only!
3638         if ((user->m_opcode < INSTR_CALL0 || user->m_opcode > INSTR_CALL8) && user->m_opcode != VINSTR_NRCALL)
3639             return;
3640     }
3641
3642     vec->m_flags |= IR_FLAG_SPLIT_VECTOR;
3643
3644     // find existing floats making up the split
3645     for (ir_value *c : self->m_const_floats) {
3646         if (!found[0] && c->m_constval.vfloat == vec->m_constval.vvec.x)
3647             found[0] = c;
3648         if (!found[1] && c->m_constval.vfloat == vec->m_constval.vvec.y)
3649             found[1] = c;
3650         if (!found[2] && c->m_constval.vfloat == vec->m_constval.vvec.z)
3651             found[2] = c;
3652         if (found[0] && found[1] && found[2])
3653             break;
3654     }
3655
3656     // generate floats for not yet found components
3657     if (!found[0])
3658         found[0] = self->literalFloat(vec->m_constval.vvec.x, true);
3659     if (!found[1]) {
3660         if (vec->m_constval.vvec.y == vec->m_constval.vvec.x)
3661             found[1] = found[0];
3662         else
3663             found[1] = self->literalFloat(vec->m_constval.vvec.y, true);
3664     }
3665     if (!found[2]) {
3666         if (vec->m_constval.vvec.z == vec->m_constval.vvec.x)
3667             found[2] = found[0];
3668         else if (vec->m_constval.vvec.z == vec->m_constval.vvec.y)
3669             found[2] = found[1];
3670         else
3671             found[2] = self->literalFloat(vec->m_constval.vvec.z, true);
3672     }
3673
3674     // the .members array should be safe to use here
3675     vec->m_members[0] = found[0];
3676     vec->m_members[1] = found[1];
3677     vec->m_members[2] = found[2];
3678
3679     // register the readers for these floats
3680     found[0]->m_reads.insert(found[0]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3681     found[1]->m_reads.insert(found[1]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3682     found[2]->m_reads.insert(found[2]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3683 }
3684
3685 static void ir_builder_split_vectors(ir_builder *self) {
3686     // member values may be added to self->m_globals during this operation, but
3687     // no new vectors will be added, we need to iterate via an index as
3688     // c++ iterators would be invalidated
3689     const size_t count = self->m_globals.size();
3690     for (size_t i = 0; i != count; ++i) {
3691         ir_value *v = self->m_globals[i].get();
3692         if (v->m_vtype != TYPE_VECTOR || !v->m_name.length() || v->m_name[0] != '#')
3693             continue;
3694         ir_builder_split_vector(self, v);
3695     }
3696 }
3697
3698 bool ir_builder::generate(const char *filename)
3699 {
3700     prog_section_statement_t stmt;
3701     char  *lnofile = nullptr;
3702
3703     if (OPTS_FLAG(SPLIT_VECTOR_PARAMETERS)) {
3704         ir_builder_collect_reusables(this);
3705         if (!m_const_floats.empty())
3706             ir_builder_split_vectors(this);
3707     }
3708
3709     for (auto& fp : m_fields)
3710         ir_builder_prepare_field(m_code.get(), fp.get());
3711
3712     for (auto& gp : m_globals) {
3713         ir_value *global = gp.get();
3714         if (!generateGlobal(global, false)) {
3715             return false;
3716         }
3717         if (global->m_vtype == TYPE_FUNCTION) {
3718             ir_function *func = global->m_constval.vfunc;
3719             if (func && m_max_locals < func->m_allocated_locals &&
3720                 !(func->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3721             {
3722                 m_max_locals = func->m_allocated_locals;
3723             }
3724             if (func && m_max_globaltemps < func->m_globaltemps)
3725                 m_max_globaltemps = func->m_globaltemps;
3726         }
3727     }
3728
3729     for (auto& fp : m_fields) {
3730         if (!ir_builder_gen_field(this, fp.get()))
3731             return false;
3732     }
3733
3734     // generate nil
3735     m_nil->setCodeAddress(m_code->globals.size());
3736     m_code->globals.push_back(0);
3737     m_code->globals.push_back(0);
3738     m_code->globals.push_back(0);
3739
3740     // generate virtual-instruction temps
3741     for (size_t i = 0; i < IR_MAX_VINSTR_TEMPS; ++i) {
3742         m_vinstr_temp[i]->setCodeAddress(m_code->globals.size());
3743         m_code->globals.push_back(0);
3744         m_code->globals.push_back(0);
3745         m_code->globals.push_back(0);
3746     }
3747
3748     // generate global temps
3749     m_first_common_globaltemp = m_code->globals.size();
3750     m_code->globals.insert(m_code->globals.end(), m_max_globaltemps, 0);
3751     // FIXME:DELME:
3752     //for (size_t i = 0; i < m_max_globaltemps; ++i) {
3753     //    m_code->globals.push_back(0);
3754     //}
3755     // generate common locals
3756     m_first_common_local = m_code->globals.size();
3757     m_code->globals.insert(m_code->globals.end(), m_max_locals, 0);
3758     // FIXME:DELME:
3759     //for (i = 0; i < m_max_locals; ++i) {
3760     //    m_code->globals.push_back(0);
3761     //}
3762
3763     // generate function code
3764
3765     for (auto& gp : m_globals) {
3766         ir_value *global = gp.get();
3767         if (global->m_vtype == TYPE_FUNCTION) {
3768             if (!this->generateGlobalFunctionCode(global))
3769                 return false;
3770         }
3771     }
3772
3773     if (m_code->globals.size() >= 65536) {
3774         irerror(m_globals.back()->m_context,
3775             "This progs file would require more globals than the metadata can handle (%zu). Bailing out.",
3776             m_code->globals.size());
3777         return false;
3778     }
3779
3780     /* DP errors if the last instruction is not an INSTR_DONE. */
3781     if (m_code->statements.back().opcode != INSTR_DONE)
3782     {
3783         lex_ctx_t last;
3784
3785         stmt.opcode = INSTR_DONE;
3786         stmt.o1.u1  = 0;
3787         stmt.o2.u1  = 0;
3788         stmt.o3.u1  = 0;
3789         last.line   = m_code->linenums.back();
3790         last.column = m_code->columnnums.back();
3791
3792         code_push_statement(m_code.get(), &stmt, last);
3793     }
3794
3795     if (OPTS_OPTION_BOOL(OPTION_PP_ONLY))
3796         return true;
3797
3798     if (m_code->statements.size() != m_code->linenums.size()) {
3799         con_err("Linecounter wrong: %lu != %lu\n",
3800                 m_code->statements.size(),
3801                 m_code->linenums.size());
3802     } else if (OPTS_FLAG(LNO)) {
3803         char  *dot;
3804         size_t filelen = strlen(filename);
3805
3806         memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
3807         dot = strrchr(lnofile, '.');
3808         if (!dot) {
3809             vec_pop(lnofile);
3810         } else {
3811             vec_shrinkto(lnofile, dot - lnofile);
3812         }
3813         memcpy(vec_add(lnofile, 5), ".lno", 5);
3814     }
3815
3816     if (!code_write(m_code.get(), filename, lnofile)) {
3817         vec_free(lnofile);
3818         return false;
3819     }
3820
3821     vec_free(lnofile);
3822     return true;
3823 }
3824
3825 /***********************************************************************
3826  *IR DEBUG Dump functions...
3827  */
3828
3829 #define IND_BUFSZ 1024
3830
3831 static const char *qc_opname(int op)
3832 {
3833     if (op < 0) return "<INVALID>";
3834     if (op < VINSTR_END)
3835         return util_instr_str[op];
3836     switch (op) {
3837         case VINSTR_END:       return "END";
3838         case VINSTR_PHI:       return "PHI";
3839         case VINSTR_JUMP:      return "JUMP";
3840         case VINSTR_COND:      return "COND";
3841         case VINSTR_BITXOR:    return "BITXOR";
3842         case VINSTR_BITAND_V:  return "BITAND_V";
3843         case VINSTR_BITOR_V:   return "BITOR_V";
3844         case VINSTR_BITXOR_V:  return "BITXOR_V";
3845         case VINSTR_BITAND_VF: return "BITAND_VF";
3846         case VINSTR_BITOR_VF:  return "BITOR_VF";
3847         case VINSTR_BITXOR_VF: return "BITXOR_VF";
3848         case VINSTR_CROSS:     return "CROSS";
3849         case VINSTR_NEG_F:     return "NEG_F";
3850         case VINSTR_NEG_V:     return "NEG_V";
3851         default:               return "<UNK>";
3852     }
3853 }
3854
3855 void ir_builder::dump(int (*oprintf)(const char*, ...)) const
3856 {
3857     size_t i;
3858     char indent[IND_BUFSZ];
3859     indent[0] = '\t';
3860     indent[1] = 0;
3861
3862     oprintf("module %s\n", m_name.c_str());
3863     for (i = 0; i < m_globals.size(); ++i)
3864     {
3865         oprintf("global ");
3866         if (m_globals[i]->m_hasvalue)
3867             oprintf("%s = ", m_globals[i]->m_name.c_str());
3868         m_globals[i].get()->dump(oprintf);
3869         oprintf("\n");
3870     }
3871     for (i = 0; i < m_functions.size(); ++i)
3872         ir_function_dump(m_functions[i].get(), indent, oprintf);
3873     oprintf("endmodule %s\n", m_name.c_str());
3874 }
3875
3876 static const char *storenames[] = {
3877     "[global]", "[local]", "[param]", "[value]", "[return]"
3878 };
3879
3880 void ir_function_dump(ir_function *f, char *ind,
3881                       int (*oprintf)(const char*, ...))
3882 {
3883     size_t i;
3884     if (f->m_builtin != 0) {
3885         oprintf("%sfunction %s = builtin %i\n", ind, f->m_name.c_str(), -f->m_builtin);
3886         return;
3887     }
3888     oprintf("%sfunction %s\n", ind, f->m_name.c_str());
3889     util_strncat(ind, "\t", IND_BUFSZ-1);
3890     if (f->m_locals.size())
3891     {
3892         oprintf("%s%i locals:\n", ind, (int)f->m_locals.size());
3893         for (i = 0; i < f->m_locals.size(); ++i) {
3894             oprintf("%s\t", ind);
3895             f->m_locals[i].get()->dump(oprintf);
3896             oprintf("\n");
3897         }
3898     }
3899     oprintf("%sliferanges:\n", ind);
3900     for (i = 0; i < f->m_locals.size(); ++i) {
3901         const char *attr = "";
3902         size_t l, m;
3903         ir_value *v = f->m_locals[i].get();
3904         if (v->m_unique_life && v->m_locked)
3905             attr = "unique,locked ";
3906         else if (v->m_unique_life)
3907             attr = "unique ";
3908         else if (v->m_locked)
3909             attr = "locked ";
3910         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3911                 storenames[v->m_store],
3912                 attr, (v->m_callparam ? "callparam " : ""),
3913                 (int)v->m_code.local);
3914         if (v->m_life.empty())
3915             oprintf("[null]");
3916         for (l = 0; l < v->m_life.size(); ++l) {
3917             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3918         }
3919         oprintf("\n");
3920         for (m = 0; m < 3; ++m) {
3921             ir_value *vm = v->m_members[m];
3922             if (!vm)
3923                 continue;
3924             oprintf("%s\t%s: @%i ", ind, vm->m_name.c_str(), (int)vm->m_code.local);
3925             for (l = 0; l < vm->m_life.size(); ++l) {
3926                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3927             }
3928             oprintf("\n");
3929         }
3930     }
3931     for (i = 0; i < f->m_values.size(); ++i) {
3932         const char *attr = "";
3933         size_t l, m;
3934         ir_value *v = f->m_values[i].get();
3935         if (v->m_unique_life && v->m_locked)
3936             attr = "unique,locked ";
3937         else if (v->m_unique_life)
3938             attr = "unique ";
3939         else if (v->m_locked)
3940             attr = "locked ";
3941         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3942                 storenames[v->m_store],
3943                 attr, (v->m_callparam ? "callparam " : ""),
3944                 (int)v->m_code.local);
3945         if (v->m_life.empty())
3946             oprintf("[null]");
3947         for (l = 0; l < v->m_life.size(); ++l) {
3948             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3949         }
3950         oprintf("\n");
3951         for (m = 0; m < 3; ++m) {
3952             ir_value *vm = v->m_members[m];
3953             if (!vm)
3954                 continue;
3955             if (vm->m_unique_life && vm->m_locked)
3956                 attr = "unique,locked ";
3957             else if (vm->m_unique_life)
3958                 attr = "unique ";
3959             else if (vm->m_locked)
3960                 attr = "locked ";
3961             oprintf("%s\t%s: %s@%i ", ind, vm->m_name.c_str(), attr, (int)vm->m_code.local);
3962             for (l = 0; l < vm->m_life.size(); ++l) {
3963                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3964             }
3965             oprintf("\n");
3966         }
3967     }
3968     if (f->m_blocks.size())
3969     {
3970         oprintf("%slife passes: %i\n", ind, (int)f->m_run_id);
3971         for (i = 0; i < f->m_blocks.size(); ++i) {
3972             ir_block_dump(f->m_blocks[i].get(), ind, oprintf);
3973         }
3974
3975     }
3976     ind[strlen(ind)-1] = 0;
3977     oprintf("%sendfunction %s\n", ind, f->m_name.c_str());
3978 }
3979
3980 void ir_block_dump(ir_block* b, char *ind,
3981                    int (*oprintf)(const char*, ...))
3982 {
3983     size_t i;
3984     oprintf("%s:%s\n", ind, b->m_label.c_str());
3985     util_strncat(ind, "\t", IND_BUFSZ-1);
3986
3987     if (b->m_instr && b->m_instr[0])
3988         oprintf("%s (%i) [entry]\n", ind, (int)(b->m_instr[0]->m_eid-1));
3989     for (i = 0; i < vec_size(b->m_instr); ++i)
3990         ir_instr_dump(b->m_instr[i], ind, oprintf);
3991     ind[strlen(ind)-1] = 0;
3992 }
3993
3994 static void dump_phi(ir_instr *in, int (*oprintf)(const char*, ...))
3995 {
3996     oprintf("%s <- phi ", in->_m_ops[0]->m_name.c_str());
3997     for (auto &it : in->m_phi) {
3998         oprintf("([%s] : %s) ", it.from->m_label.c_str(),
3999                                 it.value->m_name.c_str());
4000     }
4001     oprintf("\n");
4002 }
4003
4004 void ir_instr_dump(ir_instr *in, char *ind,
4005                        int (*oprintf)(const char*, ...))
4006 {
4007     size_t i;
4008     const char *comma = nullptr;
4009
4010     oprintf("%s (%i) ", ind, (int)in->m_eid);
4011
4012     if (in->m_opcode == VINSTR_PHI) {
4013         dump_phi(in, oprintf);
4014         return;
4015     }
4016
4017     util_strncat(ind, "\t", IND_BUFSZ-1);
4018
4019     if (in->_m_ops[0] && (in->_m_ops[1] || in->_m_ops[2])) {
4020         in->_m_ops[0]->dump(oprintf);
4021         if (in->_m_ops[1] || in->_m_ops[2])
4022             oprintf(" <- ");
4023     }
4024     if (in->m_opcode == INSTR_CALL0 || in->m_opcode == VINSTR_NRCALL) {
4025         oprintf("CALL%i\t", in->m_params.size());
4026     } else
4027         oprintf("%s\t", qc_opname(in->m_opcode));
4028
4029     if (in->_m_ops[0] && !(in->_m_ops[1] || in->_m_ops[2])) {
4030         in->_m_ops[0]->dump(oprintf);
4031         comma = ",\t";
4032     }
4033     else
4034     {
4035         for (i = 1; i != 3; ++i) {
4036             if (in->_m_ops[i]) {
4037                 if (comma)
4038                     oprintf(comma);
4039                 in->_m_ops[i]->dump(oprintf);
4040                 comma = ",\t";
4041             }
4042         }
4043     }
4044     if (in->m_bops[0]) {
4045         if (comma)
4046             oprintf(comma);
4047         oprintf("[%s]", in->m_bops[0]->m_label.c_str());
4048         comma = ",\t";
4049     }
4050     if (in->m_bops[1])
4051         oprintf("%s[%s]", comma, in->m_bops[1]->m_label.c_str());
4052     if (in->m_params.size()) {
4053         oprintf("\tparams: ");
4054         for (auto &it : in->m_params)
4055             oprintf("%s, ", it->m_name.c_str());
4056     }
4057     oprintf("\n");
4058     ind[strlen(ind)-1] = 0;
4059 }
4060
4061 static void ir_value_dump_string(const char *str, int (*oprintf)(const char*, ...))
4062 {
4063     oprintf("\"");
4064     for (; *str; ++str) {
4065         switch (*str) {
4066             case '\n': oprintf("\\n"); break;
4067             case '\r': oprintf("\\r"); break;
4068             case '\t': oprintf("\\t"); break;
4069             case '\v': oprintf("\\v"); break;
4070             case '\f': oprintf("\\f"); break;
4071             case '\b': oprintf("\\b"); break;
4072             case '\a': oprintf("\\a"); break;
4073             case '\\': oprintf("\\\\"); break;
4074             case '"': oprintf("\\\""); break;
4075             default: oprintf("%c", *str); break;
4076         }
4077     }
4078     oprintf("\"");
4079 }
4080
4081 void ir_value::dump(int (*oprintf)(const char*, ...)) const
4082 {
4083     if (m_hasvalue) {
4084         switch (m_vtype) {
4085             default:
4086             case TYPE_VOID:
4087                 oprintf("(void)");
4088                 break;
4089             case TYPE_FUNCTION:
4090                 oprintf("fn:%s", m_name.c_str());
4091                 break;
4092             case TYPE_FLOAT:
4093                 oprintf("%g", m_constval.vfloat);
4094                 break;
4095             case TYPE_VECTOR:
4096                 oprintf("'%g %g %g'",
4097                         m_constval.vvec.x,
4098                         m_constval.vvec.y,
4099                         m_constval.vvec.z);
4100                 break;
4101             case TYPE_ENTITY:
4102                 oprintf("(entity)");
4103                 break;
4104             case TYPE_STRING:
4105                 ir_value_dump_string(m_constval.vstring, oprintf);
4106                 break;
4107 #if 0
4108             case TYPE_INTEGER:
4109                 oprintf("%i", m_constval.vint);
4110                 break;
4111 #endif
4112             case TYPE_POINTER:
4113                 oprintf("&%s",
4114                     m_constval.vpointer->m_name.c_str());
4115                 break;
4116         }
4117     } else {
4118         oprintf("%s", m_name.c_str());
4119     }
4120 }
4121
4122 void ir_value::dumpLife(int (*oprintf)(const char*,...)) const
4123 {
4124     oprintf("Life of %12s:", m_name.c_str());
4125     for (size_t i = 0; i < m_life.size(); ++i)
4126     {
4127         oprintf(" + [%i, %i]\n", m_life[i].start, m_life[i].end);
4128     }
4129 }