]> git.xonotic.org Git - xonotic/gmqcc.git/blob - ir.cpp
17bd997cc83faf03d30cb84e6924c8bfa8b0b4de
[xonotic/gmqcc.git] / ir.cpp
1 #include <stdlib.h>
2 #include <string.h>
3
4 #include "gmqcc.h"
5 #include "ir.h"
6
7 /***********************************************************************
8  * Type sizes used at multiple points in the IR codegen
9  */
10
11 const char *type_name[TYPE_COUNT] = {
12     "void",
13     "string",
14     "float",
15     "vector",
16     "entity",
17     "field",
18     "function",
19     "pointer",
20     "integer",
21     "variant",
22     "struct",
23     "union",
24     "array",
25
26     "nil",
27     "<no-expression>"
28 };
29
30 static size_t type_sizeof_[TYPE_COUNT] = {
31     1, /* TYPE_VOID     */
32     1, /* TYPE_STRING   */
33     1, /* TYPE_FLOAT    */
34     3, /* TYPE_VECTOR   */
35     1, /* TYPE_ENTITY   */
36     1, /* TYPE_FIELD    */
37     1, /* TYPE_FUNCTION */
38     1, /* TYPE_POINTER  */
39     1, /* TYPE_INTEGER  */
40     3, /* TYPE_VARIANT  */
41     0, /* TYPE_STRUCT   */
42     0, /* TYPE_UNION    */
43     0, /* TYPE_ARRAY    */
44     0, /* TYPE_NIL      */
45     0, /* TYPE_NOESPR   */
46 };
47
48 const uint16_t type_store_instr[TYPE_COUNT] = {
49     INSTR_STORE_F, /* should use I when having integer support */
50     INSTR_STORE_S,
51     INSTR_STORE_F,
52     INSTR_STORE_V,
53     INSTR_STORE_ENT,
54     INSTR_STORE_FLD,
55     INSTR_STORE_FNC,
56     INSTR_STORE_ENT, /* should use I */
57 #if 0
58     INSTR_STORE_I, /* integer type */
59 #else
60     INSTR_STORE_F,
61 #endif
62
63     INSTR_STORE_V, /* variant, should never be accessed */
64
65     VINSTR_END, /* struct */
66     VINSTR_END, /* union  */
67     VINSTR_END, /* array  */
68     VINSTR_END, /* nil    */
69     VINSTR_END, /* noexpr */
70 };
71
72 const uint16_t field_store_instr[TYPE_COUNT] = {
73     INSTR_STORE_FLD,
74     INSTR_STORE_FLD,
75     INSTR_STORE_FLD,
76     INSTR_STORE_V,
77     INSTR_STORE_FLD,
78     INSTR_STORE_FLD,
79     INSTR_STORE_FLD,
80     INSTR_STORE_FLD,
81 #if 0
82     INSTR_STORE_FLD, /* integer type */
83 #else
84     INSTR_STORE_FLD,
85 #endif
86
87     INSTR_STORE_V, /* variant, should never be accessed */
88
89     VINSTR_END, /* struct */
90     VINSTR_END, /* union  */
91     VINSTR_END, /* array  */
92     VINSTR_END, /* nil    */
93     VINSTR_END, /* noexpr */
94 };
95
96 const uint16_t type_storep_instr[TYPE_COUNT] = {
97     INSTR_STOREP_F, /* should use I when having integer support */
98     INSTR_STOREP_S,
99     INSTR_STOREP_F,
100     INSTR_STOREP_V,
101     INSTR_STOREP_ENT,
102     INSTR_STOREP_FLD,
103     INSTR_STOREP_FNC,
104     INSTR_STOREP_ENT, /* should use I */
105 #if 0
106     INSTR_STOREP_ENT, /* integer type */
107 #else
108     INSTR_STOREP_F,
109 #endif
110
111     INSTR_STOREP_V, /* variant, should never be accessed */
112
113     VINSTR_END, /* struct */
114     VINSTR_END, /* union  */
115     VINSTR_END, /* array  */
116     VINSTR_END, /* nil    */
117     VINSTR_END, /* noexpr */
118 };
119
120 const uint16_t type_eq_instr[TYPE_COUNT] = {
121     INSTR_EQ_F, /* should use I when having integer support */
122     INSTR_EQ_S,
123     INSTR_EQ_F,
124     INSTR_EQ_V,
125     INSTR_EQ_E,
126     INSTR_EQ_E, /* FLD has no comparison */
127     INSTR_EQ_FNC,
128     INSTR_EQ_E, /* should use I */
129 #if 0
130     INSTR_EQ_I,
131 #else
132     INSTR_EQ_F,
133 #endif
134
135     INSTR_EQ_V, /* variant, should never be accessed */
136
137     VINSTR_END, /* struct */
138     VINSTR_END, /* union  */
139     VINSTR_END, /* array  */
140     VINSTR_END, /* nil    */
141     VINSTR_END, /* noexpr */
142 };
143
144 const uint16_t type_ne_instr[TYPE_COUNT] = {
145     INSTR_NE_F, /* should use I when having integer support */
146     INSTR_NE_S,
147     INSTR_NE_F,
148     INSTR_NE_V,
149     INSTR_NE_E,
150     INSTR_NE_E, /* FLD has no comparison */
151     INSTR_NE_FNC,
152     INSTR_NE_E, /* should use I */
153 #if 0
154     INSTR_NE_I,
155 #else
156     INSTR_NE_F,
157 #endif
158
159     INSTR_NE_V, /* variant, should never be accessed */
160
161     VINSTR_END, /* struct */
162     VINSTR_END, /* union  */
163     VINSTR_END, /* array  */
164     VINSTR_END, /* nil    */
165     VINSTR_END, /* noexpr */
166 };
167
168 const uint16_t type_not_instr[TYPE_COUNT] = {
169     INSTR_NOT_F, /* should use I when having integer support */
170     VINSTR_END,  /* not to be used, depends on string related -f flags */
171     INSTR_NOT_F,
172     INSTR_NOT_V,
173     INSTR_NOT_ENT,
174     INSTR_NOT_ENT,
175     INSTR_NOT_FNC,
176     INSTR_NOT_ENT, /* should use I */
177 #if 0
178     INSTR_NOT_I, /* integer type */
179 #else
180     INSTR_NOT_F,
181 #endif
182
183     INSTR_NOT_V, /* variant, should never be accessed */
184
185     VINSTR_END, /* struct */
186     VINSTR_END, /* union  */
187     VINSTR_END, /* array  */
188     VINSTR_END, /* nil    */
189     VINSTR_END, /* noexpr */
190 };
191
192 /* protos */
193 static void            ir_function_dump(ir_function*, char *ind, int (*oprintf)(const char*,...));
194
195 static ir_value*       ir_block_create_general_instr(ir_block *self, lex_ctx_t, const char *label,
196                                                      int op, ir_value *a, ir_value *b, qc_type outype);
197 static bool GMQCC_WARN ir_block_create_store(ir_block*, lex_ctx_t, ir_value *target, ir_value *what);
198 static void            ir_block_dump(ir_block*, char *ind, int (*oprintf)(const char*,...));
199
200 static bool            ir_instr_op(ir_instr*, int op, ir_value *value, bool writing);
201 static void            ir_instr_dump(ir_instr* in, char *ind, int (*oprintf)(const char*,...));
202 /* error functions */
203
204 static void irerror(lex_ctx_t ctx, const char *msg, ...)
205 {
206     va_list ap;
207     va_start(ap, msg);
208     con_cvprintmsg(ctx, LVL_ERROR, "internal error", msg, ap);
209     va_end(ap);
210 }
211
212 static bool GMQCC_WARN irwarning(lex_ctx_t ctx, int warntype, const char *fmt, ...)
213 {
214     bool    r;
215     va_list ap;
216     va_start(ap, fmt);
217     r = vcompile_warning(ctx, warntype, fmt, ap);
218     va_end(ap);
219     return r;
220 }
221
222 /***********************************************************************
223  * Vector utility functions
224  */
225
226 static bool GMQCC_WARN vec_ir_value_find(std::vector<ir_value *> &vec, const ir_value *what, size_t *idx)
227 {
228     for (auto &it : vec) {
229         if (it != what)
230             continue;
231         if (idx)
232             *idx = &it - &vec[0];
233         return true;
234     }
235     return false;
236 }
237
238 static bool GMQCC_WARN vec_ir_block_find(std::vector<ir_block *> &vec, ir_block *what, size_t *idx)
239 {
240     for (auto &it : vec) {
241         if (it != what)
242             continue;
243         if (idx)
244             *idx = &it - &vec[0];
245         return true;
246     }
247     return false;
248 }
249
250 static bool GMQCC_WARN vec_ir_instr_find(std::vector<ir_instr *> &vec, ir_instr *what, size_t *idx)
251 {
252     for (auto &it : vec) {
253         if (it != what)
254             continue;
255         if (idx)
256             *idx = &it - &vec[0];
257         return true;
258     }
259     return false;
260 }
261
262 /***********************************************************************
263  * IR Builder
264  */
265
266 static void ir_block_delete_quick(ir_block* self);
267 static void ir_instr_delete_quick(ir_instr *self);
268 static void ir_function_delete_quick(ir_function *self);
269
270 ir_builder::ir_builder(const std::string& modulename)
271 : m_name(modulename),
272   m_code(new code_t)
273 {
274     m_htglobals   = util_htnew(IR_HT_SIZE);
275     m_htfields    = util_htnew(IR_HT_SIZE);
276     m_htfunctions = util_htnew(IR_HT_SIZE);
277
278     m_nil = new ir_value("nil", store_value, TYPE_NIL);
279     m_nil->m_cvq = CV_CONST;
280
281     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
282         /* we write to them, but they're not supposed to be used outside the IR, so
283          * let's not allow the generation of ir_instrs which use these.
284          * So it's a constant noexpr.
285          */
286         m_vinstr_temp[i] = new ir_value("vinstr_temp", store_value, TYPE_NOEXPR);
287         m_vinstr_temp[i]->m_cvq = CV_CONST;
288     }
289 }
290
291 ir_builder::~ir_builder()
292 {
293     util_htdel(m_htglobals);
294     util_htdel(m_htfields);
295     util_htdel(m_htfunctions);
296     for (auto& f : m_functions)
297         ir_function_delete_quick(f.release());
298     m_functions.clear(); // delete them now before deleting the rest:
299
300     delete m_nil;
301
302     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
303         delete m_vinstr_temp[i];
304     }
305
306     m_extparams.clear();
307     m_extparam_protos.clear();
308 }
309
310 ir_function* ir_builder::createFunction(const std::string& name, qc_type outtype)
311 {
312     ir_function *fn = (ir_function*)util_htget(m_htfunctions, name.c_str());
313     if (fn)
314         return nullptr;
315
316     fn = new ir_function(this, outtype);
317     fn->m_name = name;
318     m_functions.emplace_back(fn);
319     util_htset(m_htfunctions, name.c_str(), fn);
320
321     fn->m_value = createGlobal(fn->m_name, TYPE_FUNCTION);
322     if (!fn->m_value) {
323         delete fn;
324         return nullptr;
325     }
326
327     fn->m_value->m_hasvalue = true;
328     fn->m_value->m_outtype = outtype;
329     fn->m_value->m_constval.vfunc = fn;
330     fn->m_value->m_context = fn->m_context;
331
332     return fn;
333 }
334
335 ir_value* ir_builder::createGlobal(const std::string& name, qc_type vtype)
336 {
337     ir_value *ve;
338
339     if (name[0] != '#')
340     {
341         ve = (ir_value*)util_htget(m_htglobals, name.c_str());
342         if (ve) {
343             return nullptr;
344         }
345     }
346
347     ve = new ir_value(std::string(name), store_global, vtype);
348     m_globals.emplace_back(ve);
349     util_htset(m_htglobals, name.c_str(), ve);
350     return ve;
351 }
352
353 ir_value* ir_builder::get_va_count()
354 {
355     if (m_reserved_va_count)
356         return m_reserved_va_count;
357     return (m_reserved_va_count = createGlobal("reserved:va_count", TYPE_FLOAT));
358 }
359
360 ir_value* ir_builder::createField(const std::string& name, qc_type vtype)
361 {
362     ir_value *ve = (ir_value*)util_htget(m_htfields, name.c_str());
363     if (ve) {
364         return nullptr;
365     }
366
367     ve = new ir_value(std::string(name), store_global, TYPE_FIELD);
368     ve->m_fieldtype = vtype;
369     m_fields.emplace_back(ve);
370     util_htset(m_htfields, name.c_str(), ve);
371     return ve;
372 }
373
374 /***********************************************************************
375  *IR Function
376  */
377
378 static bool ir_function_naive_phi(ir_function*);
379 static void ir_function_enumerate(ir_function*);
380 static bool ir_function_calculate_liferanges(ir_function*);
381 static bool ir_function_allocate_locals(ir_function*);
382
383 ir_function::ir_function(ir_builder* owner_, qc_type outtype_)
384 : m_owner(owner_),
385   m_name("<@unnamed>"),
386   m_outtype(outtype_)
387 {
388     m_context.file = "<@no context>";
389     m_context.line = 0;
390 }
391
392 ir_function::~ir_function()
393 {
394 }
395
396 static void ir_function_delete_quick(ir_function *self)
397 {
398     for (auto& b : self->m_blocks)
399         ir_block_delete_quick(b.release());
400     delete self;
401 }
402
403 static void ir_function_collect_value(ir_function *self, ir_value *v)
404 {
405     self->m_values.emplace_back(v);
406 }
407
408 ir_block* ir_function_create_block(lex_ctx_t ctx, ir_function *self, const char *label)
409 {
410     ir_block* bn = new ir_block(self, label ? std::string(label) : std::string());
411     bn->m_context = ctx;
412     self->m_blocks.emplace_back(bn);
413
414     if ((self->m_flags & IR_FLAG_BLOCK_COVERAGE) && self->m_owner->m_coverage_func)
415         (void)ir_block_create_call(bn, ctx, nullptr, self->m_owner->m_coverage_func, false);
416
417     return bn;
418 }
419
420 static bool instr_is_operation(uint16_t op)
421 {
422     return ( (op >= INSTR_MUL_F  && op <= INSTR_GT) ||
423              (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) ||
424              (op == INSTR_ADDRESS) ||
425              (op >= INSTR_NOT_F  && op <= INSTR_NOT_FNC) ||
426              (op >= INSTR_AND    && op <= INSTR_BITOR) ||
427              (op >= INSTR_CALL0  && op <= INSTR_CALL8) ||
428              (op >= VINSTR_BITAND_V && op <= VINSTR_NEG_V) );
429 }
430
431 static bool ir_function_pass_peephole(ir_function *self)
432 {
433     for (auto& bp : self->m_blocks) {
434         ir_block *block = bp.get();
435         for (size_t i = 0; i < vec_size(block->m_instr); ++i) {
436             ir_instr *inst;
437             inst = block->m_instr[i];
438
439             if (i >= 1 &&
440                 (inst->m_opcode >= INSTR_STORE_F &&
441                  inst->m_opcode <= INSTR_STORE_FNC))
442             {
443                 ir_instr *store;
444                 ir_instr *oper;
445                 ir_value *value;
446
447                 store = inst;
448
449                 oper  = block->m_instr[i-1];
450                 if (!instr_is_operation(oper->m_opcode))
451                     continue;
452
453                 /* Don't change semantics of MUL_VF in engines where these may not alias. */
454                 if (OPTS_FLAG(LEGACY_VECTOR_MATHS)) {
455                     if (oper->m_opcode == INSTR_MUL_VF && oper->_m_ops[2]->m_memberof == oper->_m_ops[1])
456                         continue;
457                     if (oper->m_opcode == INSTR_MUL_FV && oper->_m_ops[1]->m_memberof == oper->_m_ops[2])
458                         continue;
459                 }
460
461                 value = oper->_m_ops[0];
462
463                 /* only do it for SSA values */
464                 if (value->m_store != store_value)
465                     continue;
466
467                 /* don't optimize out the temp if it's used later again */
468                 if (value->m_reads.size() != 1)
469                     continue;
470
471                 /* The very next store must use this value */
472                 if (value->m_reads[0] != store)
473                     continue;
474
475                 /* And of course the store must _read_ from it, so it's in
476                  * OP 1 */
477                 if (store->_m_ops[1] != value)
478                     continue;
479
480                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
481                 (void)!ir_instr_op(oper, 0, store->_m_ops[0], true);
482
483                 vec_remove(block->m_instr, i, 1);
484                 delete store;
485             }
486             else if (inst->m_opcode == VINSTR_COND)
487             {
488                 /* COND on a value resulting from a NOT could
489                  * remove the NOT and swap its operands
490                  */
491                 while (true) {
492                     ir_block *tmp;
493                     size_t    inotid;
494                     ir_instr *inot;
495                     ir_value *value;
496                     value = inst->_m_ops[0];
497
498                     if (value->m_store != store_value || value->m_reads.size() != 1 || value->m_reads[0] != inst)
499                         break;
500
501                     inot = value->m_writes[0];
502                     if (inot->_m_ops[0] != value ||
503                         inot->m_opcode < INSTR_NOT_F ||
504                         inot->m_opcode > INSTR_NOT_FNC ||
505                         inot->m_opcode == INSTR_NOT_V || /* can't do these */
506                         inot->m_opcode == INSTR_NOT_S)
507                     {
508                         break;
509                     }
510
511                     /* count */
512                     ++opts_optimizationcount[OPTIM_PEEPHOLE];
513                     /* change operand */
514                     (void)!ir_instr_op(inst, 0, inot->_m_ops[1], false);
515                     /* remove NOT */
516                     tmp = inot->m_owner;
517                     for (inotid = 0; inotid < vec_size(tmp->m_instr); ++inotid) {
518                         if (tmp->m_instr[inotid] == inot)
519                             break;
520                     }
521                     if (inotid >= vec_size(tmp->m_instr)) {
522                         compile_error(inst->m_context, "sanity-check failed: failed to find instruction to optimize out");
523                         return false;
524                     }
525                     vec_remove(tmp->m_instr, inotid, 1);
526                     delete inot;
527                     /* swap ontrue/onfalse */
528                     tmp = inst->m_bops[0];
529                     inst->m_bops[0] = inst->m_bops[1];
530                     inst->m_bops[1] = tmp;
531                 }
532                 continue;
533             }
534         }
535     }
536
537     return true;
538 }
539
540 static bool ir_function_pass_tailrecursion(ir_function *self)
541 {
542     size_t p;
543
544     for (auto& bp : self->m_blocks) {
545         ir_block *block = bp.get();
546
547         ir_value *funcval;
548         ir_instr *ret, *call, *store = nullptr;
549
550         if (!block->m_final || vec_size(block->m_instr) < 2)
551             continue;
552
553         ret = block->m_instr[vec_size(block->m_instr)-1];
554         if (ret->m_opcode != INSTR_DONE && ret->m_opcode != INSTR_RETURN)
555             continue;
556
557         call = block->m_instr[vec_size(block->m_instr)-2];
558         if (call->m_opcode >= INSTR_STORE_F && call->m_opcode <= INSTR_STORE_FNC) {
559             /* account for the unoptimized
560              * CALL
561              * STORE %return, %tmp
562              * RETURN %tmp
563              * version
564              */
565             if (vec_size(block->m_instr) < 3)
566                 continue;
567
568             store = call;
569             call = block->m_instr[vec_size(block->m_instr)-3];
570         }
571
572         if (call->m_opcode < INSTR_CALL0 || call->m_opcode > INSTR_CALL8)
573             continue;
574
575         if (store) {
576             /* optimize out the STORE */
577             if (ret->_m_ops[0]   &&
578                 ret->_m_ops[0]   == store->_m_ops[0] &&
579                 store->_m_ops[1] == call->_m_ops[0])
580             {
581                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
582                 call->_m_ops[0] = store->_m_ops[0];
583                 vec_remove(block->m_instr, vec_size(block->m_instr) - 2, 1);
584                 delete store;
585             }
586             else
587                 continue;
588         }
589
590         if (!call->_m_ops[0])
591             continue;
592
593         funcval = call->_m_ops[1];
594         if (!funcval)
595             continue;
596         if (funcval->m_vtype != TYPE_FUNCTION || funcval->m_constval.vfunc != self)
597             continue;
598
599         /* now we have a CALL and a RET, check if it's a tailcall */
600         if (ret->_m_ops[0] && call->_m_ops[0] != ret->_m_ops[0])
601             continue;
602
603         ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
604         vec_shrinkby(block->m_instr, 2);
605
606         block->m_final = false; /* open it back up */
607
608         /* emite parameter-stores */
609         for (p = 0; p < call->m_params.size(); ++p) {
610             /* assert(call->params_count <= self->locals_count); */
611             if (!ir_block_create_store(block, call->m_context, self->m_locals[p].get(), call->m_params[p])) {
612                 irerror(call->m_context, "failed to create tailcall store instruction for parameter %i", (int)p);
613                 return false;
614             }
615         }
616         if (!ir_block_create_jump(block, call->m_context, self->m_blocks[0].get())) {
617             irerror(call->m_context, "failed to create tailcall jump");
618             return false;
619         }
620
621         delete call;
622         delete ret;
623     }
624
625     return true;
626 }
627
628 bool ir_function_finalize(ir_function *self)
629 {
630     if (self->m_builtin)
631         return true;
632
633     for (auto& lp : self->m_locals) {
634         ir_value *v = lp.get();
635         if (v->m_reads.empty() && v->m_writes.size() && !(v->m_flags & IR_FLAG_NOREF)) {
636             // if it's a vector check to ensure all it's members are unused before
637             // claiming it's unused, otherwise skip the vector entierly
638             if (v->m_vtype == TYPE_VECTOR)
639             {
640                 size_t mask = (1 << 3) - 1, bits = 0;
641                 for (size_t i = 0; i < 3; i++)
642                     if (!v->m_members[i] || (v->m_members[i]->m_reads.empty()
643                         && v->m_members[i]->m_writes.size()))
644                         bits |= (1 << i);
645                 // all components are unused so just report the vector
646                 if (bits == mask && irwarning(v->m_context, WARN_UNUSED_VARIABLE,
647                     "unused variable: `%s`", v->m_name.c_str()))
648                     return false;
649                 else if (bits != mask)
650                     // individual components are unused so mention them
651                     for (size_t i = 0; i < 3; i++)
652                         if ((bits & (1 << i))
653                             && irwarning(v->m_context, WARN_UNUSED_COMPONENT,
654                                 "unused vector component: `%s.%c`", v->m_name.c_str(), "xyz"[i]))
655                             return false;
656             }
657             // just a standard variable
658             else if (irwarning(v->m_context, WARN_UNUSED_VARIABLE,
659                     "unused variable: `%s`", v->m_name.c_str())) return false;
660         }
661     }
662
663     if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
664         if (!ir_function_pass_peephole(self)) {
665             irerror(self->m_context, "generic optimization pass broke something in `%s`", self->m_name.c_str());
666             return false;
667         }
668     }
669
670     if (OPTS_OPTIMIZATION(OPTIM_TAIL_RECURSION)) {
671         if (!ir_function_pass_tailrecursion(self)) {
672             irerror(self->m_context, "tail-recursion optimization pass broke something in `%s`", self->m_name.c_str());
673             return false;
674         }
675     }
676
677     if (!ir_function_naive_phi(self)) {
678         irerror(self->m_context, "internal error: ir_function_naive_phi failed");
679         return false;
680     }
681
682     for (auto& lp : self->m_locals) {
683         ir_value *v = lp.get();
684         if (v->m_vtype == TYPE_VECTOR ||
685             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
686         {
687             v->vectorMember(0);
688             v->vectorMember(1);
689             v->vectorMember(2);
690         }
691     }
692     for (auto& vp : self->m_values) {
693         ir_value *v = vp.get();
694         if (v->m_vtype == TYPE_VECTOR ||
695             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
696         {
697             v->vectorMember(0);
698             v->vectorMember(1);
699             v->vectorMember(2);
700         }
701     }
702
703     ir_function_enumerate(self);
704
705     if (!ir_function_calculate_liferanges(self))
706         return false;
707     if (!ir_function_allocate_locals(self))
708         return false;
709     return true;
710 }
711
712 ir_value* ir_function_create_local(ir_function *self, const std::string& name, qc_type vtype, bool param)
713 {
714     ir_value *ve;
715
716     if (param &&
717         !self->m_locals.empty() &&
718         self->m_locals.back()->m_store != store_param)
719     {
720         irerror(self->m_context, "cannot add parameters after adding locals");
721         return nullptr;
722     }
723
724     ve = new ir_value(std::string(name), (param ? store_param : store_local), vtype);
725     if (param)
726         ve->m_locked = true;
727     self->m_locals.emplace_back(ve);
728     return ve;
729 }
730
731 /***********************************************************************
732  *IR Block
733  */
734
735 ir_block::ir_block(ir_function* owner, const std::string& name)
736 : m_owner(owner),
737   m_label(name)
738 {
739     m_context.file = "<@no context>";
740     m_context.line = 0;
741 }
742
743 ir_block::~ir_block()
744 {
745     for (size_t i = 0; i != vec_size(m_instr); ++i)
746         delete m_instr[i];
747     vec_free(m_instr);
748 }
749
750 static void ir_block_delete_quick(ir_block* self)
751 {
752     size_t i;
753     for (i = 0; i != vec_size(self->m_instr); ++i)
754         ir_instr_delete_quick(self->m_instr[i]);
755     vec_free(self->m_instr);
756     delete self;
757 }
758
759 /***********************************************************************
760  *IR Instructions
761  */
762
763 ir_instr::ir_instr(lex_ctx_t ctx, ir_block* owner_, int op)
764 : m_opcode(op),
765   m_context(ctx),
766   m_owner(owner_)
767 {
768 }
769
770 ir_instr::~ir_instr()
771 {
772     // The following calls can only delete from
773     // vectors, we still want to delete this instruction
774     // so ignore the return value. Since with the warn_unused_result attribute
775     // gcc doesn't care about an explicit: (void)foo(); to ignore the result,
776     // I have to improvise here and use if(foo());
777     for (auto &it : m_phi) {
778         size_t idx;
779         if (vec_ir_instr_find(it.value->m_writes, this, &idx))
780             it.value->m_writes.erase(it.value->m_writes.begin() + idx);
781         if (vec_ir_instr_find(it.value->m_reads, this, &idx))
782             it.value->m_reads.erase(it.value->m_reads.begin() + idx);
783     }
784     for (auto &it : m_params) {
785         size_t idx;
786         if (vec_ir_instr_find(it->m_writes, this, &idx))
787             it->m_writes.erase(it->m_writes.begin() + idx);
788         if (vec_ir_instr_find(it->m_reads, this, &idx))
789             it->m_reads.erase(it->m_reads.begin() + idx);
790     }
791     (void)!ir_instr_op(this, 0, nullptr, false);
792     (void)!ir_instr_op(this, 1, nullptr, false);
793     (void)!ir_instr_op(this, 2, nullptr, false);
794 }
795
796 static void ir_instr_delete_quick(ir_instr *self)
797 {
798     self->m_phi.clear();
799     self->m_params.clear();
800     self->_m_ops[0] = nullptr;
801     self->_m_ops[1] = nullptr;
802     self->_m_ops[2] = nullptr;
803     delete self;
804 }
805
806 static bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing)
807 {
808     if (v && v->m_vtype == TYPE_NOEXPR) {
809         irerror(self->m_context, "tried to use a NOEXPR value");
810         return false;
811     }
812
813     if (self->_m_ops[op]) {
814         size_t idx;
815         if (writing && vec_ir_instr_find(self->_m_ops[op]->m_writes, self, &idx))
816             self->_m_ops[op]->m_writes.erase(self->_m_ops[op]->m_writes.begin() + idx);
817         else if (vec_ir_instr_find(self->_m_ops[op]->m_reads, self, &idx))
818             self->_m_ops[op]->m_reads.erase(self->_m_ops[op]->m_reads.begin() + idx);
819     }
820     if (v) {
821         if (writing)
822             v->m_writes.push_back(self);
823         else
824             v->m_reads.push_back(self);
825     }
826     self->_m_ops[op] = v;
827     return true;
828 }
829
830 /***********************************************************************
831  *IR Value
832  */
833
834 void ir_value::setCodeAddress(int32_t gaddr)
835 {
836     m_code.globaladdr = gaddr;
837     if (m_members[0]) m_members[0]->m_code.globaladdr = gaddr;
838     if (m_members[1]) m_members[1]->m_code.globaladdr = gaddr;
839     if (m_members[2]) m_members[2]->m_code.globaladdr = gaddr;
840 }
841
842 int32_t ir_value::codeAddress() const
843 {
844     if (m_store == store_return)
845         return OFS_RETURN + m_code.addroffset;
846     return m_code.globaladdr + m_code.addroffset;
847 }
848
849 ir_value::ir_value(std::string&& name_, store_type store_, qc_type vtype_)
850     : m_name(move(name_))
851     , m_vtype(vtype_)
852     , m_store(store_)
853 {
854     m_fieldtype = TYPE_VOID;
855     m_outtype = TYPE_VOID;
856     m_flags = 0;
857
858     m_cvq          = CV_NONE;
859     m_hasvalue     = false;
860     m_context.file = "<@no context>";
861     m_context.line = 0;
862
863     memset(&m_constval, 0, sizeof(m_constval));
864     memset(&m_code,     0, sizeof(m_code));
865
866     m_members[0] = nullptr;
867     m_members[1] = nullptr;
868     m_members[2] = nullptr;
869     m_memberof = nullptr;
870
871     m_unique_life = false;
872     m_locked = false;
873     m_callparam  = false;
874 }
875
876 ir_value::ir_value(ir_function *owner, std::string&& name, store_type storetype, qc_type vtype)
877     : ir_value(move(name), storetype, vtype)
878 {
879     ir_function_collect_value(owner, this);
880 }
881
882 ir_value::~ir_value()
883 {
884     size_t i;
885     if (m_hasvalue) {
886         if (m_vtype == TYPE_STRING)
887             mem_d((void*)m_constval.vstring);
888     }
889     if (!(m_flags & IR_FLAG_SPLIT_VECTOR)) {
890         for (i = 0; i < 3; ++i) {
891             if (m_members[i])
892                 delete m_members[i];
893         }
894     }
895 }
896
897
898 /*  helper function */
899 ir_value* ir_builder::literalFloat(float value, bool add_to_list) {
900     ir_value *v = new ir_value("#IMMEDIATE", store_global, TYPE_FLOAT);
901     v->m_flags |= IR_FLAG_ERASABLE;
902     v->m_hasvalue = true;
903     v->m_cvq = CV_CONST;
904     v->m_constval.vfloat = value;
905
906     m_globals.emplace_back(v);
907     if (add_to_list)
908         m_const_floats.emplace_back(v);
909     return v;
910 }
911
912 ir_value* ir_value::vectorMember(unsigned int member)
913 {
914     std::string name;
915     ir_value *m;
916     if (member >= 3)
917         return nullptr;
918
919     if (m_members[member])
920         return m_members[member];
921
922     if (!m_name.empty()) {
923         char member_name[3] = { '_', char('x' + member), 0 };
924         name = m_name + member_name;
925     }
926
927     if (m_vtype == TYPE_VECTOR)
928     {
929         m = new ir_value(move(name), m_store, TYPE_FLOAT);
930         if (!m)
931             return nullptr;
932         m->m_context = m_context;
933
934         m_members[member] = m;
935         m->m_code.addroffset = member;
936     }
937     else if (m_vtype == TYPE_FIELD)
938     {
939         if (m_fieldtype != TYPE_VECTOR)
940             return nullptr;
941         m = new ir_value(move(name), m_store, TYPE_FIELD);
942         if (!m)
943             return nullptr;
944         m->m_fieldtype = TYPE_FLOAT;
945         m->m_context = m_context;
946
947         m_members[member] = m;
948         m->m_code.addroffset = member;
949     }
950     else
951     {
952         irerror(m_context, "invalid member access on %s", m_name.c_str());
953         return nullptr;
954     }
955
956     m->m_memberof = this;
957     return m;
958 }
959
960 size_t ir_value::size() const {
961     if (m_vtype == TYPE_FIELD && m_fieldtype == TYPE_VECTOR)
962         return type_sizeof_[TYPE_VECTOR];
963     return type_sizeof_[m_vtype];
964 }
965
966 bool ir_value::setFloat(float f)
967 {
968     if (m_vtype != TYPE_FLOAT)
969         return false;
970     m_constval.vfloat = f;
971     m_hasvalue = true;
972     return true;
973 }
974
975 bool ir_value::setFunc(int f)
976 {
977     if (m_vtype != TYPE_FUNCTION)
978         return false;
979     m_constval.vint = f;
980     m_hasvalue = true;
981     return true;
982 }
983
984 bool ir_value::setVector(vec3_t v)
985 {
986     if (m_vtype != TYPE_VECTOR)
987         return false;
988     m_constval.vvec = v;
989     m_hasvalue = true;
990     return true;
991 }
992
993 bool ir_value::setField(ir_value *fld)
994 {
995     if (m_vtype != TYPE_FIELD)
996         return false;
997     m_constval.vpointer = fld;
998     m_hasvalue = true;
999     return true;
1000 }
1001
1002 bool ir_value::setString(const char *str)
1003 {
1004     if (m_vtype != TYPE_STRING)
1005         return false;
1006     m_constval.vstring = util_strdupe(str);
1007     m_hasvalue = true;
1008     return true;
1009 }
1010
1011 #if 0
1012 bool ir_value::setInt(int i)
1013 {
1014     if (m_vtype != TYPE_INTEGER)
1015         return false;
1016     m_constval.vint = i;
1017     m_hasvalue = true;
1018     return true;
1019 }
1020 #endif
1021
1022 bool ir_value::lives(size_t at)
1023 {
1024     for (auto& l : m_life) {
1025         if (l.start <= at && at <= l.end)
1026             return true;
1027         if (l.start > at) /* since it's ordered */
1028             return false;
1029     }
1030     return false;
1031 }
1032
1033 bool ir_value::insertLife(size_t idx, ir_life_entry_t e)
1034 {
1035     m_life.insert(m_life.begin() + idx, e);
1036     return true;
1037 }
1038
1039 bool ir_value::setAlive(size_t s)
1040 {
1041     size_t i;
1042     const size_t vs = m_life.size();
1043     ir_life_entry_t *life_found = nullptr;
1044     ir_life_entry_t *before = nullptr;
1045     ir_life_entry_t new_entry;
1046
1047     /* Find the first range >= s */
1048     for (i = 0; i < vs; ++i)
1049     {
1050         before = life_found;
1051         life_found = &m_life[i];
1052         if (life_found->start > s)
1053             break;
1054     }
1055     /* nothing found? append */
1056     if (i == vs) {
1057         ir_life_entry_t e;
1058         if (life_found && life_found->end+1 == s)
1059         {
1060             /* previous life range can be merged in */
1061             life_found->end++;
1062             return true;
1063         }
1064         if (life_found && life_found->end >= s)
1065             return false;
1066         e.start = e.end = s;
1067         m_life.emplace_back(e);
1068         return true;
1069     }
1070     /* found */
1071     if (before)
1072     {
1073         if (before->end + 1 == s &&
1074             life_found->start - 1 == s)
1075         {
1076             /* merge */
1077             before->end = life_found->end;
1078             m_life.erase(m_life.begin()+i);
1079             return true;
1080         }
1081         if (before->end + 1 == s)
1082         {
1083             /* extend before */
1084             before->end++;
1085             return true;
1086         }
1087         /* already contained */
1088         if (before->end >= s)
1089             return false;
1090     }
1091     /* extend */
1092     if (life_found->start - 1 == s)
1093     {
1094         life_found->start--;
1095         return true;
1096     }
1097     /* insert a new entry */
1098     new_entry.start = new_entry.end = s;
1099     return insertLife(i, new_entry);
1100 }
1101
1102 bool ir_value::mergeLife(const ir_value *other)
1103 {
1104     size_t i, myi;
1105
1106     if (other->m_life.empty())
1107         return true;
1108
1109     if (m_life.empty()) {
1110         m_life = other->m_life;
1111         return true;
1112     }
1113
1114     myi = 0;
1115     for (i = 0; i < other->m_life.size(); ++i)
1116     {
1117         const ir_life_entry_t &otherlife = other->m_life[i];
1118         while (true)
1119         {
1120             ir_life_entry_t *entry = &m_life[myi];
1121
1122             if (otherlife.end+1 < entry->start)
1123             {
1124                 /* adding an interval before entry */
1125                 if (!insertLife(myi, otherlife))
1126                     return false;
1127                 ++myi;
1128                 break;
1129             }
1130
1131             if (otherlife.start <  entry->start &&
1132                 otherlife.end+1 >= entry->start)
1133             {
1134                 /* starts earlier and overlaps */
1135                 entry->start = otherlife.start;
1136             }
1137
1138             if (otherlife.end   >  entry->end &&
1139                 otherlife.start <= entry->end+1)
1140             {
1141                 /* ends later and overlaps */
1142                 entry->end = otherlife.end;
1143             }
1144
1145             /* see if our change combines it with the next ranges */
1146             while (myi+1 < m_life.size() &&
1147                    entry->end+1 >= m_life[1+myi].start)
1148             {
1149                 /* overlaps with (myi+1) */
1150                 if (entry->end < m_life[1+myi].end)
1151                     entry->end = m_life[1+myi].end;
1152                 m_life.erase(m_life.begin() + (myi + 1));
1153                 entry = &m_life[myi];
1154             }
1155
1156             /* see if we're after the entry */
1157             if (otherlife.start > entry->end)
1158             {
1159                 ++myi;
1160                 /* append if we're at the end */
1161                 if (myi >= m_life.size()) {
1162                     m_life.emplace_back(otherlife);
1163                     break;
1164                 }
1165                 /* otherweise check the next range */
1166                 continue;
1167             }
1168             break;
1169         }
1170     }
1171     return true;
1172 }
1173
1174 static bool ir_values_overlap(const ir_value *a, const ir_value *b)
1175 {
1176     /* For any life entry in A see if it overlaps with
1177      * any life entry in B.
1178      * Note that the life entries are orderes, so we can make a
1179      * more efficient algorithm there than naively translating the
1180      * statement above.
1181      */
1182
1183     const ir_life_entry_t *la, *lb, *enda, *endb;
1184
1185     /* first of all, if either has no life range, they cannot clash */
1186     if (a->m_life.empty() || b->m_life.empty())
1187         return false;
1188
1189     la = &a->m_life.front();
1190     lb = &b->m_life.front();
1191     enda = &a->m_life.back() + 1;
1192     endb = &b->m_life.back() + 1;
1193     while (true)
1194     {
1195         /* check if the entries overlap, for that,
1196          * both must start before the other one ends.
1197          */
1198         if (la->start < lb->end &&
1199             lb->start < la->end)
1200         {
1201             return true;
1202         }
1203
1204         /* entries are ordered
1205          * one entry is earlier than the other
1206          * that earlier entry will be moved forward
1207          */
1208         if (la->start < lb->start)
1209         {
1210             /* order: A B, move A forward
1211              * check if we hit the end with A
1212              */
1213             if (++la == enda)
1214                 break;
1215         }
1216         else /* if (lb->start < la->start)  actually <= */
1217         {
1218             /* order: B A, move B forward
1219              * check if we hit the end with B
1220              */
1221             if (++lb == endb)
1222                 break;
1223         }
1224     }
1225     return false;
1226 }
1227
1228 /***********************************************************************
1229  *IR main operations
1230  */
1231
1232 static bool ir_check_unreachable(ir_block *self)
1233 {
1234     /* The IR should never have to deal with unreachable code */
1235     if (!self->m_final/* || OPTS_FLAG(ALLOW_UNREACHABLE_CODE)*/)
1236         return true;
1237     irerror(self->m_context, "unreachable statement (%s)", self->m_label.c_str());
1238     return false;
1239 }
1240
1241 bool ir_block_create_store_op(ir_block *self, lex_ctx_t ctx, int op, ir_value *target, ir_value *what)
1242 {
1243     ir_instr *in;
1244     if (!ir_check_unreachable(self))
1245         return false;
1246
1247     if (target->m_store == store_value &&
1248         (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC))
1249     {
1250         irerror(self->m_context, "cannot store to an SSA value");
1251         irerror(self->m_context, "trying to store: %s <- %s", target->m_name.c_str(), what->m_name.c_str());
1252         irerror(self->m_context, "instruction: %s", util_instr_str[op]);
1253         return false;
1254     }
1255
1256     in = new ir_instr(ctx, self, op);
1257     if (!in)
1258         return false;
1259
1260     if (!ir_instr_op(in, 0, target, (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC)) ||
1261         !ir_instr_op(in, 1, what, false))
1262     {
1263         delete in;
1264         return false;
1265     }
1266     vec_push(self->m_instr, in);
1267     return true;
1268 }
1269
1270 bool ir_block_create_state_op(ir_block *self, lex_ctx_t ctx, ir_value *frame, ir_value *think)
1271 {
1272     ir_instr *in;
1273     if (!ir_check_unreachable(self))
1274         return false;
1275
1276     in = new ir_instr(ctx, self, INSTR_STATE);
1277     if (!in)
1278         return false;
1279
1280     if (!ir_instr_op(in, 0, frame, false) ||
1281         !ir_instr_op(in, 1, think, false))
1282     {
1283         delete in;
1284         return false;
1285     }
1286     vec_push(self->m_instr, in);
1287     return true;
1288 }
1289
1290 static bool ir_block_create_store(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1291 {
1292     int op = 0;
1293     qc_type vtype;
1294     if (target->m_vtype == TYPE_VARIANT)
1295         vtype = what->m_vtype;
1296     else
1297         vtype = target->m_vtype;
1298
1299 #if 0
1300     if      (vtype == TYPE_FLOAT   && what->m_vtype == TYPE_INTEGER)
1301         op = INSTR_CONV_ITOF;
1302     else if (vtype == TYPE_INTEGER && what->m_vtype == TYPE_FLOAT)
1303         op = INSTR_CONV_FTOI;
1304 #endif
1305         op = type_store_instr[vtype];
1306
1307     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1308         if (op == INSTR_STORE_FLD && what->m_fieldtype == TYPE_VECTOR)
1309             op = INSTR_STORE_V;
1310     }
1311
1312     return ir_block_create_store_op(self, ctx, op, target, what);
1313 }
1314
1315 bool ir_block_create_storep(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1316 {
1317     int op = 0;
1318     qc_type vtype;
1319
1320     if (target->m_vtype != TYPE_POINTER)
1321         return false;
1322
1323     /* storing using pointer - target is a pointer, type must be
1324      * inferred from source
1325      */
1326     vtype = what->m_vtype;
1327
1328     op = type_storep_instr[vtype];
1329     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1330         if (op == INSTR_STOREP_FLD && what->m_fieldtype == TYPE_VECTOR)
1331             op = INSTR_STOREP_V;
1332     }
1333
1334     return ir_block_create_store_op(self, ctx, op, target, what);
1335 }
1336
1337 bool ir_block_create_return(ir_block *self, lex_ctx_t ctx, ir_value *v)
1338 {
1339     ir_instr *in;
1340     if (!ir_check_unreachable(self))
1341         return false;
1342
1343     self->m_final = true;
1344
1345     self->m_is_return = true;
1346     in = new ir_instr(ctx, self, INSTR_RETURN);
1347     if (!in)
1348         return false;
1349
1350     if (v && !ir_instr_op(in, 0, v, false)) {
1351         delete in;
1352         return false;
1353     }
1354
1355     vec_push(self->m_instr, in);
1356     return true;
1357 }
1358
1359 bool ir_block_create_if(ir_block *self, lex_ctx_t ctx, ir_value *v,
1360                         ir_block *ontrue, ir_block *onfalse)
1361 {
1362     ir_instr *in;
1363     if (!ir_check_unreachable(self))
1364         return false;
1365     self->m_final = true;
1366     /*in = new ir_instr(ctx, self, (v->m_vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/
1367     in = new ir_instr(ctx, self, VINSTR_COND);
1368     if (!in)
1369         return false;
1370
1371     if (!ir_instr_op(in, 0, v, false)) {
1372         delete in;
1373         return false;
1374     }
1375
1376     in->m_bops[0] = ontrue;
1377     in->m_bops[1] = onfalse;
1378
1379     vec_push(self->m_instr, in);
1380
1381     self->m_exits.push_back(ontrue);
1382     self->m_exits.push_back(onfalse);
1383     ontrue->m_entries.push_back(self);
1384     onfalse->m_entries.push_back(self);
1385     return true;
1386 }
1387
1388 bool ir_block_create_jump(ir_block *self, lex_ctx_t ctx, ir_block *to)
1389 {
1390     ir_instr *in;
1391     if (!ir_check_unreachable(self))
1392         return false;
1393     self->m_final = true;
1394     in = new ir_instr(ctx, self, VINSTR_JUMP);
1395     if (!in)
1396         return false;
1397
1398     in->m_bops[0] = to;
1399     vec_push(self->m_instr, in);
1400
1401     self->m_exits.push_back(to);
1402     to->m_entries.push_back(self);
1403     return true;
1404 }
1405
1406 bool ir_block_create_goto(ir_block *self, lex_ctx_t ctx, ir_block *to)
1407 {
1408     self->m_owner->m_flags |= IR_FLAG_HAS_GOTO;
1409     return ir_block_create_jump(self, ctx, to);
1410 }
1411
1412 ir_instr* ir_block_create_phi(ir_block *self, lex_ctx_t ctx, const char *label, qc_type ot)
1413 {
1414     ir_value *out;
1415     ir_instr *in;
1416     if (!ir_check_unreachable(self))
1417         return nullptr;
1418     in = new ir_instr(ctx, self, VINSTR_PHI);
1419     if (!in)
1420         return nullptr;
1421     out = new ir_value(self->m_owner, label ? label : "", store_value, ot);
1422     if (!out) {
1423         delete in;
1424         return nullptr;
1425     }
1426     if (!ir_instr_op(in, 0, out, true)) {
1427         delete in;
1428         return nullptr;
1429     }
1430     vec_push(self->m_instr, in);
1431     return in;
1432 }
1433
1434 ir_value* ir_phi_value(ir_instr *self)
1435 {
1436     return self->_m_ops[0];
1437 }
1438
1439 void ir_phi_add(ir_instr* self, ir_block *b, ir_value *v)
1440 {
1441     ir_phi_entry_t pe;
1442
1443     if (!vec_ir_block_find(self->m_owner->m_entries, b, nullptr)) {
1444         // Must not be possible to cause this, otherwise the AST
1445         // is doing something wrong.
1446         irerror(self->m_context, "Invalid entry block for PHI");
1447         exit(EXIT_FAILURE);
1448     }
1449
1450     pe.value = v;
1451     pe.from = b;
1452     v->m_reads.push_back(self);
1453     self->m_phi.push_back(pe);
1454 }
1455
1456 /* call related code */
1457 ir_instr* ir_block_create_call(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *func, bool noreturn)
1458 {
1459     ir_value *out;
1460     ir_instr *in;
1461     if (!ir_check_unreachable(self))
1462         return nullptr;
1463     in = new ir_instr(ctx, self, (noreturn ? VINSTR_NRCALL : INSTR_CALL0));
1464     if (!in)
1465         return nullptr;
1466     if (noreturn) {
1467         self->m_final = true;
1468         self->m_is_return = true;
1469     }
1470     out = new ir_value(self->m_owner, label ? label : "", (func->m_outtype == TYPE_VOID) ? store_return : store_value, func->m_outtype);
1471     if (!out) {
1472         delete in;
1473         return nullptr;
1474     }
1475     if (!ir_instr_op(in, 0, out, true) ||
1476         !ir_instr_op(in, 1, func, false))
1477     {
1478         delete in;
1479         return nullptr;
1480     }
1481     vec_push(self->m_instr, in);
1482     /*
1483     if (noreturn) {
1484         if (!ir_block_create_return(self, ctx, nullptr)) {
1485             compile_error(ctx, "internal error: failed to generate dummy-return instruction");
1486             delete in;
1487             return nullptr;
1488         }
1489     }
1490     */
1491     return in;
1492 }
1493
1494 ir_value* ir_call_value(ir_instr *self)
1495 {
1496     return self->_m_ops[0];
1497 }
1498
1499 void ir_call_param(ir_instr* self, ir_value *v)
1500 {
1501     self->m_params.push_back(v);
1502     v->m_reads.push_back(self);
1503 }
1504
1505 /* binary op related code */
1506
1507 ir_value* ir_block_create_binop(ir_block *self, lex_ctx_t ctx,
1508                                 const char *label, int opcode,
1509                                 ir_value *left, ir_value *right)
1510 {
1511     qc_type ot = TYPE_VOID;
1512     switch (opcode) {
1513         case INSTR_ADD_F:
1514         case INSTR_SUB_F:
1515         case INSTR_DIV_F:
1516         case INSTR_MUL_F:
1517         case INSTR_MUL_V:
1518         case INSTR_AND:
1519         case INSTR_OR:
1520 #if 0
1521         case INSTR_AND_I:
1522         case INSTR_AND_IF:
1523         case INSTR_AND_FI:
1524         case INSTR_OR_I:
1525         case INSTR_OR_IF:
1526         case INSTR_OR_FI:
1527 #endif
1528         case INSTR_BITAND:
1529         case INSTR_BITOR:
1530         case VINSTR_BITXOR:
1531 #if 0
1532         case INSTR_SUB_S: /* -- offset of string as float */
1533         case INSTR_MUL_IF:
1534         case INSTR_MUL_FI:
1535         case INSTR_DIV_IF:
1536         case INSTR_DIV_FI:
1537         case INSTR_BITOR_IF:
1538         case INSTR_BITOR_FI:
1539         case INSTR_BITAND_FI:
1540         case INSTR_BITAND_IF:
1541         case INSTR_EQ_I:
1542         case INSTR_NE_I:
1543 #endif
1544             ot = TYPE_FLOAT;
1545             break;
1546 #if 0
1547         case INSTR_ADD_I:
1548         case INSTR_ADD_IF:
1549         case INSTR_ADD_FI:
1550         case INSTR_SUB_I:
1551         case INSTR_SUB_FI:
1552         case INSTR_SUB_IF:
1553         case INSTR_MUL_I:
1554         case INSTR_DIV_I:
1555         case INSTR_BITAND_I:
1556         case INSTR_BITOR_I:
1557         case INSTR_XOR_I:
1558         case INSTR_RSHIFT_I:
1559         case INSTR_LSHIFT_I:
1560             ot = TYPE_INTEGER;
1561             break;
1562 #endif
1563         case INSTR_ADD_V:
1564         case INSTR_SUB_V:
1565         case INSTR_MUL_VF:
1566         case INSTR_MUL_FV:
1567         case VINSTR_BITAND_V:
1568         case VINSTR_BITOR_V:
1569         case VINSTR_BITXOR_V:
1570         case VINSTR_BITAND_VF:
1571         case VINSTR_BITOR_VF:
1572         case VINSTR_BITXOR_VF:
1573         case VINSTR_CROSS:
1574 #if 0
1575         case INSTR_DIV_VF:
1576         case INSTR_MUL_IV:
1577         case INSTR_MUL_VI:
1578 #endif
1579             ot = TYPE_VECTOR;
1580             break;
1581 #if 0
1582         case INSTR_ADD_SF:
1583             ot = TYPE_POINTER;
1584             break;
1585 #endif
1586     /*
1587      * after the following default case, the value of opcode can never
1588      * be 1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65
1589      */
1590         default:
1591             /* ranges: */
1592             /* boolean operations result in floats */
1593
1594             /*
1595              * opcode >= 10 takes true branch opcode is at least 10
1596              * opcode <= 23 takes false branch opcode is at least 24
1597              */
1598             if (opcode >= INSTR_EQ_F && opcode <= INSTR_GT)
1599                 ot = TYPE_FLOAT;
1600
1601             /*
1602              * At condition "opcode <= 23", the value of "opcode" must be
1603              * at least 24.
1604              * At condition "opcode <= 23", the value of "opcode" cannot be
1605              * equal to any of {1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65}.
1606              * The condition "opcode <= 23" cannot be true.
1607              *
1608              * Thus ot=2 (TYPE_FLOAT) can never be true
1609              */
1610 #if 0
1611             else if (opcode >= INSTR_LE && opcode <= INSTR_GT)
1612                 ot = TYPE_FLOAT;
1613             else if (opcode >= INSTR_LE_I && opcode <= INSTR_EQ_FI)
1614                 ot = TYPE_FLOAT;
1615 #endif
1616             break;
1617     };
1618     if (ot == TYPE_VOID) {
1619         /* The AST or parser were supposed to check this! */
1620         return nullptr;
1621     }
1622
1623     return ir_block_create_general_instr(self, ctx, label, opcode, left, right, ot);
1624 }
1625
1626 ir_value* ir_block_create_unary(ir_block *self, lex_ctx_t ctx,
1627                                 const char *label, int opcode,
1628                                 ir_value *operand)
1629 {
1630     qc_type ot = TYPE_FLOAT;
1631     switch (opcode) {
1632         case INSTR_NOT_F:
1633         case INSTR_NOT_V:
1634         case INSTR_NOT_S:
1635         case INSTR_NOT_ENT:
1636         case INSTR_NOT_FNC: /*
1637         case INSTR_NOT_I:   */
1638             ot = TYPE_FLOAT;
1639             break;
1640
1641         /*
1642          * Negation for virtual instructions is emulated with 0-value. Thankfully
1643          * the operand for 0 already exists so we just source it from here.
1644          */
1645         case VINSTR_NEG_F:
1646             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_F, nullptr, operand, ot);
1647         case VINSTR_NEG_V:
1648             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_V, nullptr, operand, TYPE_VECTOR);
1649
1650         default:
1651             ot = operand->m_vtype;
1652             break;
1653     };
1654     if (ot == TYPE_VOID) {
1655         /* The AST or parser were supposed to check this! */
1656         return nullptr;
1657     }
1658
1659     /* let's use the general instruction creator and pass nullptr for OPB */
1660     return ir_block_create_general_instr(self, ctx, label, opcode, operand, nullptr, ot);
1661 }
1662
1663 static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t ctx, const char *label,
1664                                         int op, ir_value *a, ir_value *b, qc_type outype)
1665 {
1666     ir_instr *instr;
1667     ir_value *out;
1668
1669     out = new ir_value(self->m_owner, label ? label : "", store_value, outype);
1670     if (!out)
1671         return nullptr;
1672
1673     instr = new ir_instr(ctx, self, op);
1674     if (!instr) {
1675         return nullptr;
1676     }
1677
1678     if (!ir_instr_op(instr, 0, out, true) ||
1679         !ir_instr_op(instr, 1, a, false) ||
1680         !ir_instr_op(instr, 2, b, false) )
1681     {
1682         goto on_error;
1683     }
1684
1685     vec_push(self->m_instr, instr);
1686
1687     return out;
1688 on_error:
1689     delete instr;
1690     return nullptr;
1691 }
1692
1693 ir_value* ir_block_create_fieldaddress(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field)
1694 {
1695     ir_value *v;
1696
1697     /* Support for various pointer types todo if so desired */
1698     if (ent->m_vtype != TYPE_ENTITY)
1699         return nullptr;
1700
1701     if (field->m_vtype != TYPE_FIELD)
1702         return nullptr;
1703
1704     v = ir_block_create_general_instr(self, ctx, label, INSTR_ADDRESS, ent, field, TYPE_POINTER);
1705     v->m_fieldtype = field->m_fieldtype;
1706     return v;
1707 }
1708
1709 ir_value* ir_block_create_load_from_ent(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field, qc_type outype)
1710 {
1711     int op;
1712     if (ent->m_vtype != TYPE_ENTITY)
1713         return nullptr;
1714
1715     /* at some point we could redirect for TYPE_POINTER... but that could lead to carelessness */
1716     if (field->m_vtype != TYPE_FIELD)
1717         return nullptr;
1718
1719     switch (outype)
1720     {
1721         case TYPE_FLOAT:    op = INSTR_LOAD_F;   break;
1722         case TYPE_VECTOR:   op = INSTR_LOAD_V;   break;
1723         case TYPE_STRING:   op = INSTR_LOAD_S;   break;
1724         case TYPE_FIELD:    op = INSTR_LOAD_FLD; break;
1725         case TYPE_ENTITY:   op = INSTR_LOAD_ENT; break;
1726         case TYPE_FUNCTION: op = INSTR_LOAD_FNC; break;
1727 #if 0
1728         case TYPE_POINTER: op = INSTR_LOAD_I;   break;
1729         case TYPE_INTEGER: op = INSTR_LOAD_I;   break;
1730 #endif
1731         default:
1732             irerror(self->m_context, "invalid type for ir_block_create_load_from_ent: %s", type_name[outype]);
1733             return nullptr;
1734     }
1735
1736     return ir_block_create_general_instr(self, ctx, label, op, ent, field, outype);
1737 }
1738
1739 /* PHI resolving breaks the SSA, and must thus be the last
1740  * step before life-range calculation.
1741  */
1742
1743 static bool ir_block_naive_phi(ir_block *self);
1744 bool ir_function_naive_phi(ir_function *self)
1745 {
1746     for (auto& b : self->m_blocks)
1747         if (!ir_block_naive_phi(b.get()))
1748             return false;
1749     return true;
1750 }
1751
1752 static bool ir_block_naive_phi(ir_block *self)
1753 {
1754     size_t i;
1755     /* FIXME: optionally, create_phi can add the phis
1756      * to a list so we don't need to loop through blocks
1757      * - anyway: "don't optimize YET"
1758      */
1759     for (i = 0; i < vec_size(self->m_instr); ++i)
1760     {
1761         ir_instr *instr = self->m_instr[i];
1762         if (instr->m_opcode != VINSTR_PHI)
1763             continue;
1764
1765         vec_remove(self->m_instr, i, 1);
1766         --i; /* NOTE: i+1 below */
1767
1768         for (auto &it : instr->m_phi) {
1769             ir_value *v = it.value;
1770             ir_block *b = it.from;
1771             if (v->m_store == store_value && v->m_reads.size() == 1 && v->m_writes.size() == 1) {
1772                 /* replace the value */
1773                 if (!ir_instr_op(v->m_writes[0], 0, instr->_m_ops[0], true))
1774                     return false;
1775             } else {
1776                 /* force a move instruction */
1777                 ir_instr *prevjump = vec_last(b->m_instr);
1778                 vec_pop(b->m_instr);
1779                 b->m_final = false;
1780                 instr->_m_ops[0]->m_store = store_global;
1781                 if (!ir_block_create_store(b, instr->m_context, instr->_m_ops[0], v))
1782                     return false;
1783                 instr->_m_ops[0]->m_store = store_value;
1784                 vec_push(b->m_instr, prevjump);
1785                 b->m_final = true;
1786             }
1787         }
1788         delete instr;
1789     }
1790     return true;
1791 }
1792
1793 /***********************************************************************
1794  *IR Temp allocation code
1795  * Propagating value life ranges by walking through the function backwards
1796  * until no more changes are made.
1797  * In theory this should happen once more than once for every nested loop
1798  * level.
1799  * Though this implementation might run an additional time for if nests.
1800  */
1801
1802 /* Enumerate instructions used by value's life-ranges
1803  */
1804 static void ir_block_enumerate(ir_block *self, size_t *_eid)
1805 {
1806     size_t i;
1807     size_t eid = *_eid;
1808     for (i = 0; i < vec_size(self->m_instr); ++i)
1809     {
1810         self->m_instr[i]->m_eid = eid++;
1811     }
1812     *_eid = eid;
1813 }
1814
1815 /* Enumerate blocks and instructions.
1816  * The block-enumeration is unordered!
1817  * We do not really use the block enumreation, however
1818  * the instruction enumeration is important for life-ranges.
1819  */
1820 void ir_function_enumerate(ir_function *self)
1821 {
1822     size_t instruction_id = 0;
1823     size_t block_eid = 0;
1824     for (auto& block : self->m_blocks)
1825     {
1826         /* each block now gets an additional "entry" instruction id
1827          * we can use to avoid point-life issues
1828          */
1829         block->m_entry_id = instruction_id;
1830         block->m_eid      = block_eid;
1831         ++instruction_id;
1832         ++block_eid;
1833
1834         ir_block_enumerate(block.get(), &instruction_id);
1835     }
1836 }
1837
1838 /* Local-value allocator
1839  * After finishing creating the liferange of all values used in a function
1840  * we can allocate their global-positions.
1841  * This is the counterpart to register-allocation in register machines.
1842  */
1843 struct function_allocator {
1844     ir_value **locals;
1845     size_t *sizes;
1846     size_t *positions;
1847     bool *unique;
1848 };
1849
1850 static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
1851 {
1852     ir_value *slot;
1853     size_t vsize = var->size();
1854
1855     var->m_code.local = vec_size(alloc->locals);
1856
1857     slot = new ir_value("reg", store_global, var->m_vtype);
1858     if (!slot)
1859         return false;
1860
1861     if (!slot->mergeLife(var))
1862         goto localerror;
1863
1864     vec_push(alloc->locals, slot);
1865     vec_push(alloc->sizes, vsize);
1866     vec_push(alloc->unique, var->m_unique_life);
1867
1868     return true;
1869
1870 localerror:
1871     delete slot;
1872     return false;
1873 }
1874
1875 static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
1876 {
1877     size_t a;
1878     ir_value *slot;
1879
1880     if (v->m_unique_life)
1881         return function_allocator_alloc(alloc, v);
1882
1883     for (a = 0; a < vec_size(alloc->locals); ++a)
1884     {
1885         /* if it's reserved for a unique liferange: skip */
1886         if (alloc->unique[a])
1887             continue;
1888
1889         slot = alloc->locals[a];
1890
1891         /* never resize parameters
1892          * will be required later when overlapping temps + locals
1893          */
1894         if (a < vec_size(self->m_params) &&
1895             alloc->sizes[a] < v->size())
1896         {
1897             continue;
1898         }
1899
1900         if (ir_values_overlap(v, slot))
1901             continue;
1902
1903         if (!slot->mergeLife(v))
1904             return false;
1905
1906         /* adjust size for this slot */
1907         if (alloc->sizes[a] < v->size())
1908             alloc->sizes[a] = v->size();
1909
1910         v->m_code.local = a;
1911         return true;
1912     }
1913     if (a >= vec_size(alloc->locals)) {
1914         if (!function_allocator_alloc(alloc, v))
1915             return false;
1916     }
1917     return true;
1918 }
1919
1920 bool ir_function_allocate_locals(ir_function *self)
1921 {
1922     bool   retval = true;
1923     size_t pos;
1924     bool   opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
1925
1926     function_allocator lockalloc, globalloc;
1927
1928     if (self->m_locals.empty() && self->m_values.empty())
1929         return true;
1930
1931     globalloc.locals    = nullptr;
1932     globalloc.sizes     = nullptr;
1933     globalloc.positions = nullptr;
1934     globalloc.unique    = nullptr;
1935     lockalloc.locals    = nullptr;
1936     lockalloc.sizes     = nullptr;
1937     lockalloc.positions = nullptr;
1938     lockalloc.unique    = nullptr;
1939
1940     size_t i;
1941     for (i = 0; i < self->m_locals.size(); ++i)
1942     {
1943         ir_value *v = self->m_locals[i].get();
1944         if ((self->m_flags & IR_FLAG_MASK_NO_LOCAL_TEMPS) || !OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) {
1945             v->m_locked      = true;
1946             v->m_unique_life = true;
1947         }
1948         else if (i >= vec_size(self->m_params))
1949             break;
1950         else
1951             v->m_locked = true; /* lock parameters locals */
1952         if (!function_allocator_alloc((v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1953             goto error;
1954     }
1955     for (; i < self->m_locals.size(); ++i)
1956     {
1957         ir_value *v = self->m_locals[i].get();
1958         if (v->m_life.empty())
1959             continue;
1960         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1961             goto error;
1962     }
1963
1964     /* Allocate a slot for any value that still exists */
1965     for (i = 0; i < self->m_values.size(); ++i)
1966     {
1967         ir_value *v = self->m_values[i].get();
1968
1969         if (v->m_life.empty())
1970             continue;
1971
1972         /* CALL optimization:
1973          * If the value is a parameter-temp: 1 write, 1 read from a CALL
1974          * and it's not "locked", write it to the OFS_PARM directly.
1975          */
1976         if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->m_locked && !v->m_unique_life) {
1977             if (v->m_reads.size() == 1 && v->m_writes.size() == 1 &&
1978                 (v->m_reads[0]->m_opcode == VINSTR_NRCALL ||
1979                  (v->m_reads[0]->m_opcode >= INSTR_CALL0 && v->m_reads[0]->m_opcode <= INSTR_CALL8)
1980                 )
1981                )
1982             {
1983                 size_t param;
1984                 ir_instr *call = v->m_reads[0];
1985                 if (!vec_ir_value_find(call->m_params, v, &param)) {
1986                     irerror(call->m_context, "internal error: unlocked parameter %s not found", v->m_name.c_str());
1987                     goto error;
1988                 }
1989                 ++opts_optimizationcount[OPTIM_CALL_STORES];
1990                 v->m_callparam = true;
1991                 if (param < 8)
1992                     v->setCodeAddress(OFS_PARM0 + 3*param);
1993                 else {
1994                     size_t nprotos = self->m_owner->m_extparam_protos.size();
1995                     ir_value *ep;
1996                     param -= 8;
1997                     if (nprotos > param)
1998                         ep = self->m_owner->m_extparam_protos[param].get();
1999                     else
2000                     {
2001                         ep = self->m_owner->generateExtparamProto();
2002                         while (++nprotos <= param)
2003                             ep = self->m_owner->generateExtparamProto();
2004                     }
2005                     ir_instr_op(v->m_writes[0], 0, ep, true);
2006                     call->m_params[param+8] = ep;
2007                 }
2008                 continue;
2009             }
2010             if (v->m_writes.size() == 1 && v->m_writes[0]->m_opcode == INSTR_CALL0) {
2011                 v->m_store = store_return;
2012                 if (v->m_members[0]) v->m_members[0]->m_store = store_return;
2013                 if (v->m_members[1]) v->m_members[1]->m_store = store_return;
2014                 if (v->m_members[2]) v->m_members[2]->m_store = store_return;
2015                 ++opts_optimizationcount[OPTIM_CALL_STORES];
2016                 continue;
2017             }
2018         }
2019
2020         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
2021             goto error;
2022     }
2023
2024     if (!lockalloc.sizes && !globalloc.sizes) {
2025         goto cleanup;
2026     }
2027     vec_push(lockalloc.positions, 0);
2028     vec_push(globalloc.positions, 0);
2029
2030     /* Adjust slot positions based on sizes */
2031     if (lockalloc.sizes) {
2032         pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
2033         for (i = 1; i < vec_size(lockalloc.sizes); ++i)
2034         {
2035             pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
2036             vec_push(lockalloc.positions, pos);
2037         }
2038         self->m_allocated_locals = pos + vec_last(lockalloc.sizes);
2039     }
2040     if (globalloc.sizes) {
2041         pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
2042         for (i = 1; i < vec_size(globalloc.sizes); ++i)
2043         {
2044             pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
2045             vec_push(globalloc.positions, pos);
2046         }
2047         self->m_globaltemps = pos + vec_last(globalloc.sizes);
2048     }
2049
2050     /* Locals need to know their new position */
2051     for (auto& local : self->m_locals) {
2052         if (local->m_locked || !opt_gt)
2053             local->m_code.local = lockalloc.positions[local->m_code.local];
2054         else
2055             local->m_code.local = globalloc.positions[local->m_code.local];
2056     }
2057     /* Take over the actual slot positions on values */
2058     for (auto& value : self->m_values) {
2059         if (value->m_locked || !opt_gt)
2060             value->m_code.local = lockalloc.positions[value->m_code.local];
2061         else
2062             value->m_code.local = globalloc.positions[value->m_code.local];
2063     }
2064
2065     goto cleanup;
2066
2067 error:
2068     retval = false;
2069 cleanup:
2070     for (i = 0; i < vec_size(lockalloc.locals); ++i)
2071         delete lockalloc.locals[i];
2072     for (i = 0; i < vec_size(globalloc.locals); ++i)
2073         delete globalloc.locals[i];
2074     vec_free(globalloc.unique);
2075     vec_free(globalloc.locals);
2076     vec_free(globalloc.sizes);
2077     vec_free(globalloc.positions);
2078     vec_free(lockalloc.unique);
2079     vec_free(lockalloc.locals);
2080     vec_free(lockalloc.sizes);
2081     vec_free(lockalloc.positions);
2082     return retval;
2083 }
2084
2085 /* Get information about which operand
2086  * is read from, or written to.
2087  */
2088 static void ir_op_read_write(int op, size_t *read, size_t *write)
2089 {
2090     switch (op)
2091     {
2092     case VINSTR_JUMP:
2093     case INSTR_GOTO:
2094         *write = 0;
2095         *read = 0;
2096         break;
2097     case INSTR_IF:
2098     case INSTR_IFNOT:
2099 #if 0
2100     case INSTR_IF_S:
2101     case INSTR_IFNOT_S:
2102 #endif
2103     case INSTR_RETURN:
2104     case VINSTR_COND:
2105         *write = 0;
2106         *read = 1;
2107         break;
2108     case INSTR_STOREP_F:
2109     case INSTR_STOREP_V:
2110     case INSTR_STOREP_S:
2111     case INSTR_STOREP_ENT:
2112     case INSTR_STOREP_FLD:
2113     case INSTR_STOREP_FNC:
2114         *write = 0;
2115         *read  = 7;
2116         break;
2117     default:
2118         *write = 1;
2119         *read = 6;
2120         break;
2121     };
2122 }
2123
2124 static bool ir_block_living_add_instr(ir_block *self, size_t eid) {
2125     bool changed = false;
2126     for (auto &it : self->m_living)
2127         if (it->setAlive(eid))
2128             changed = true;
2129     return changed;
2130 }
2131
2132 static bool ir_block_living_lock(ir_block *self) {
2133     bool changed = false;
2134     for (auto &it : self->m_living) {
2135         if (it->m_locked)
2136             continue;
2137         it->m_locked = true;
2138         changed = true;
2139     }
2140     return changed;
2141 }
2142
2143 static bool ir_block_life_propagate(ir_block *self, bool *changed)
2144 {
2145     ir_instr *instr;
2146     ir_value *value;
2147     size_t i, o, mem;
2148     // bitmasks which operands are read from or written to
2149     size_t read, write;
2150
2151     self->m_living.clear();
2152
2153     for (auto &prev : self->m_exits) {
2154         for (auto &it : prev->m_living)
2155             if (!vec_ir_value_find(self->m_living, it, nullptr))
2156                 self->m_living.push_back(it);
2157     }
2158
2159     i = vec_size(self->m_instr);
2160     while (i)
2161     { --i;
2162         instr = self->m_instr[i];
2163
2164         /* See which operands are read and write operands */
2165         ir_op_read_write(instr->m_opcode, &read, &write);
2166
2167         /* Go through the 3 main operands
2168          * writes first, then reads
2169          */
2170         for (o = 0; o < 3; ++o)
2171         {
2172             if (!instr->_m_ops[o]) /* no such operand */
2173                 continue;
2174
2175             value = instr->_m_ops[o];
2176
2177             /* We only care about locals */
2178             /* we also calculate parameter liferanges so that locals
2179              * can take up parameter slots */
2180             if (value->m_store != store_value &&
2181                 value->m_store != store_local &&
2182                 value->m_store != store_param)
2183                 continue;
2184
2185             /* write operands */
2186             /* When we write to a local, we consider it "dead" for the
2187              * remaining upper part of the function, since in SSA a value
2188              * can only be written once (== created)
2189              */
2190             if (write & (1<<o))
2191             {
2192                 size_t idx;
2193                 bool in_living = vec_ir_value_find(self->m_living, value, &idx);
2194                 if (!in_living)
2195                 {
2196                     /* If the value isn't alive it hasn't been read before... */
2197                     /* TODO: See if the warning can be emitted during parsing or AST processing
2198                      * otherwise have warning printed here.
2199                      * IF printing a warning here: include filecontext_t,
2200                      * and make sure it's only printed once
2201                      * since this function is run multiple times.
2202                      */
2203                     /* con_err( "Value only written %s\n", value->m_name); */
2204                     if (value->setAlive(instr->m_eid))
2205                         *changed = true;
2206                 } else {
2207                     /* since 'living' won't contain it
2208                      * anymore, merge the value, since
2209                      * (A) doesn't.
2210                      */
2211                     if (value->setAlive(instr->m_eid))
2212                         *changed = true;
2213                     // Then remove
2214                     self->m_living.erase(self->m_living.begin() + idx);
2215                 }
2216                 /* Removing a vector removes all members */
2217                 for (mem = 0; mem < 3; ++mem) {
2218                     if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], &idx)) {
2219                         if (value->m_members[mem]->setAlive(instr->m_eid))
2220                             *changed = true;
2221                         self->m_living.erase(self->m_living.begin() + idx);
2222                     }
2223                 }
2224                 /* Removing the last member removes the vector */
2225                 if (value->m_memberof) {
2226                     value = value->m_memberof;
2227                     for (mem = 0; mem < 3; ++mem) {
2228                         if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2229                             break;
2230                     }
2231                     if (mem == 3 && vec_ir_value_find(self->m_living, value, &idx)) {
2232                         if (value->setAlive(instr->m_eid))
2233                             *changed = true;
2234                         self->m_living.erase(self->m_living.begin() + idx);
2235                     }
2236                 }
2237             }
2238         }
2239
2240         /* These operations need a special case as they can break when using
2241          * same source and destination operand otherwise, as the engine may
2242          * read the source multiple times. */
2243         if (instr->m_opcode == INSTR_MUL_VF ||
2244             instr->m_opcode == VINSTR_BITAND_VF ||
2245             instr->m_opcode == VINSTR_BITOR_VF ||
2246             instr->m_opcode == VINSTR_BITXOR ||
2247             instr->m_opcode == VINSTR_BITXOR_VF ||
2248             instr->m_opcode == VINSTR_BITXOR_V ||
2249             instr->m_opcode == VINSTR_CROSS)
2250         {
2251             value = instr->_m_ops[2];
2252             /* the float source will get an additional lifetime */
2253             if (value->setAlive(instr->m_eid+1))
2254                 *changed = true;
2255             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2256                 *changed = true;
2257         }
2258
2259         if (instr->m_opcode == INSTR_MUL_FV ||
2260             instr->m_opcode == INSTR_LOAD_V ||
2261             instr->m_opcode == VINSTR_BITXOR ||
2262             instr->m_opcode == VINSTR_BITXOR_VF ||
2263             instr->m_opcode == VINSTR_BITXOR_V ||
2264             instr->m_opcode == VINSTR_CROSS)
2265         {
2266             value = instr->_m_ops[1];
2267             /* the float source will get an additional lifetime */
2268             if (value->setAlive(instr->m_eid+1))
2269                 *changed = true;
2270             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2271                 *changed = true;
2272         }
2273
2274         for (o = 0; o < 3; ++o)
2275         {
2276             if (!instr->_m_ops[o]) /* no such operand */
2277                 continue;
2278
2279             value = instr->_m_ops[o];
2280
2281             /* We only care about locals */
2282             /* we also calculate parameter liferanges so that locals
2283              * can take up parameter slots */
2284             if (value->m_store != store_value &&
2285                 value->m_store != store_local &&
2286                 value->m_store != store_param)
2287                 continue;
2288
2289             /* read operands */
2290             if (read & (1<<o))
2291             {
2292                 if (!vec_ir_value_find(self->m_living, value, nullptr))
2293                     self->m_living.push_back(value);
2294                 /* reading adds the full vector */
2295                 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2296                     self->m_living.push_back(value->m_memberof);
2297                 for (mem = 0; mem < 3; ++mem) {
2298                     if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2299                         self->m_living.push_back(value->m_members[mem]);
2300                 }
2301             }
2302         }
2303         /* PHI operands are always read operands */
2304         for (auto &it : instr->m_phi) {
2305             value = it.value;
2306             if (!vec_ir_value_find(self->m_living, value, nullptr))
2307                 self->m_living.push_back(value);
2308             /* reading adds the full vector */
2309             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2310                 self->m_living.push_back(value->m_memberof);
2311             for (mem = 0; mem < 3; ++mem) {
2312                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2313                     self->m_living.push_back(value->m_members[mem]);
2314             }
2315         }
2316
2317         /* on a call, all these values must be "locked" */
2318         if (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8) {
2319             if (ir_block_living_lock(self))
2320                 *changed = true;
2321         }
2322         /* call params are read operands too */
2323         for (auto &it : instr->m_params) {
2324             value = it;
2325             if (!vec_ir_value_find(self->m_living, value, nullptr))
2326                 self->m_living.push_back(value);
2327             /* reading adds the full vector */
2328             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2329                 self->m_living.push_back(value->m_memberof);
2330             for (mem = 0; mem < 3; ++mem) {
2331                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2332                     self->m_living.push_back(value->m_members[mem]);
2333             }
2334         }
2335
2336         /* (A) */
2337         if (ir_block_living_add_instr(self, instr->m_eid))
2338             *changed = true;
2339     }
2340     /* the "entry" instruction ID */
2341     if (ir_block_living_add_instr(self, self->m_entry_id))
2342         *changed = true;
2343
2344     return true;
2345 }
2346
2347 bool ir_function_calculate_liferanges(ir_function *self)
2348 {
2349     /* parameters live at 0 */
2350     for (size_t i = 0; i < vec_size(self->m_params); ++i)
2351         if (!self->m_locals[i].get()->setAlive(0))
2352             compile_error(self->m_context, "internal error: failed value-life merging");
2353
2354     bool changed;
2355     do {
2356         self->m_run_id++;
2357         changed = false;
2358         for (auto i = self->m_blocks.rbegin(); i != self->m_blocks.rend(); ++i)
2359             ir_block_life_propagate(i->get(), &changed);
2360     } while (changed);
2361
2362     if (self->m_blocks.size()) {
2363         ir_block *block = self->m_blocks[0].get();
2364         for (auto &it : block->m_living) {
2365             ir_value *v = it;
2366             if (v->m_store != store_local)
2367                 continue;
2368             if (v->m_vtype == TYPE_VECTOR)
2369                 continue;
2370             self->m_flags |= IR_FLAG_HAS_UNINITIALIZED;
2371             /* find the instruction reading from it */
2372             size_t s = 0;
2373             for (; s < v->m_reads.size(); ++s) {
2374                 if (v->m_reads[s]->m_eid == v->m_life[0].end)
2375                     break;
2376             }
2377             if (s < v->m_reads.size()) {
2378                 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2379                               "variable `%s` may be used uninitialized in this function\n"
2380                               " -> %s:%i",
2381                               v->m_name.c_str(),
2382                               v->m_reads[s]->m_context.file, v->m_reads[s]->m_context.line)
2383                    )
2384                 {
2385                     return false;
2386                 }
2387                 continue;
2388             }
2389             if (v->m_memberof) {
2390                 ir_value *vec = v->m_memberof;
2391                 for (s = 0; s < vec->m_reads.size(); ++s) {
2392                     if (vec->m_reads[s]->m_eid == v->m_life[0].end)
2393                         break;
2394                 }
2395                 if (s < vec->m_reads.size()) {
2396                     if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2397                                   "variable `%s` may be used uninitialized in this function\n"
2398                                   " -> %s:%i",
2399                                   v->m_name.c_str(),
2400                                   vec->m_reads[s]->m_context.file, vec->m_reads[s]->m_context.line)
2401                        )
2402                     {
2403                         return false;
2404                     }
2405                     continue;
2406                 }
2407             }
2408             if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2409                           "variable `%s` may be used uninitialized in this function", v->m_name.c_str()))
2410             {
2411                 return false;
2412             }
2413         }
2414     }
2415     return true;
2416 }
2417
2418 /***********************************************************************
2419  *IR Code-Generation
2420  *
2421  * Since the IR has the convention of putting 'write' operands
2422  * at the beginning, we have to rotate the operands of instructions
2423  * properly in order to generate valid QCVM code.
2424  *
2425  * Having destinations at a fixed position is more convenient. In QC
2426  * this is *mostly* OPC,  but FTE adds at least 2 instructions which
2427  * read from from OPA,  and store to OPB rather than OPC.   Which is
2428  * partially the reason why the implementation of these instructions
2429  * in darkplaces has been delayed for so long.
2430  *
2431  * Breaking conventions is annoying...
2432  */
2433 static bool gen_global_field(code_t *code, ir_value *global)
2434 {
2435     if (global->m_hasvalue)
2436     {
2437         ir_value *fld = global->m_constval.vpointer;
2438         if (!fld) {
2439             irerror(global->m_context, "Invalid field constant with no field: %s", global->m_name.c_str());
2440             return false;
2441         }
2442
2443         /* copy the field's value */
2444         global->setCodeAddress(code->globals.size());
2445         code->globals.push_back(fld->m_code.fieldaddr);
2446         if (global->m_fieldtype == TYPE_VECTOR) {
2447             code->globals.push_back(fld->m_code.fieldaddr+1);
2448             code->globals.push_back(fld->m_code.fieldaddr+2);
2449         }
2450     }
2451     else
2452     {
2453         global->setCodeAddress(code->globals.size());
2454         code->globals.push_back(0);
2455         if (global->m_fieldtype == TYPE_VECTOR) {
2456             code->globals.push_back(0);
2457             code->globals.push_back(0);
2458         }
2459     }
2460     if (global->m_code.globaladdr < 0)
2461         return false;
2462     return true;
2463 }
2464
2465 static bool gen_global_pointer(code_t *code, ir_value *global)
2466 {
2467     if (global->m_hasvalue)
2468     {
2469         ir_value *target = global->m_constval.vpointer;
2470         if (!target) {
2471             irerror(global->m_context, "Invalid pointer constant: %s", global->m_name.c_str());
2472             /* nullptr pointers are pointing to the nullptr constant, which also
2473              * sits at address 0, but still has an ir_value for itself.
2474              */
2475             return false;
2476         }
2477
2478         /* Here, relocations ARE possible - in fteqcc-enhanced-qc:
2479          * void() foo; <- proto
2480          * void() *fooptr = &foo;
2481          * void() foo = { code }
2482          */
2483         if (!target->m_code.globaladdr) {
2484             /* FIXME: Check for the constant nullptr ir_value!
2485              * because then code.globaladdr being 0 is valid.
2486              */
2487             irerror(global->m_context, "FIXME: Relocation support");
2488             return false;
2489         }
2490
2491         global->setCodeAddress(code->globals.size());
2492         code->globals.push_back(target->m_code.globaladdr);
2493     }
2494     else
2495     {
2496         global->setCodeAddress(code->globals.size());
2497         code->globals.push_back(0);
2498     }
2499     if (global->m_code.globaladdr < 0)
2500         return false;
2501     return true;
2502 }
2503
2504 static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *block)
2505 {
2506     prog_section_statement_t stmt;
2507     ir_instr *instr;
2508     ir_block *target;
2509     ir_block *ontrue;
2510     ir_block *onfalse;
2511     size_t    stidx;
2512     size_t    i;
2513     int       j;
2514
2515     block->m_generated = true;
2516     block->m_code_start = code->statements.size();
2517     for (i = 0; i < vec_size(block->m_instr); ++i)
2518     {
2519         instr = block->m_instr[i];
2520
2521         if (instr->m_opcode == VINSTR_PHI) {
2522             irerror(block->m_context, "cannot generate virtual instruction (phi)");
2523             return false;
2524         }
2525
2526         if (instr->m_opcode == VINSTR_JUMP) {
2527             target = instr->m_bops[0];
2528             /* for uncoditional jumps, if the target hasn't been generated
2529              * yet, we generate them right here.
2530              */
2531             if (!target->m_generated)
2532                 return gen_blocks_recursive(code, func, target);
2533
2534             /* otherwise we generate a jump instruction */
2535             stmt.opcode = INSTR_GOTO;
2536             stmt.o1.s1 = target->m_code_start - code->statements.size();
2537             stmt.o2.s1 = 0;
2538             stmt.o3.s1 = 0;
2539             if (stmt.o1.s1 != 1)
2540                 code_push_statement(code, &stmt, instr->m_context);
2541
2542             /* no further instructions can be in this block */
2543             return true;
2544         }
2545
2546         if (instr->m_opcode == VINSTR_BITXOR) {
2547             stmt.opcode = INSTR_BITOR;
2548             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2549             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2550             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2551             code_push_statement(code, &stmt, instr->m_context);
2552             stmt.opcode = INSTR_BITAND;
2553             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2554             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2555             stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2556             code_push_statement(code, &stmt, instr->m_context);
2557             stmt.opcode = INSTR_SUB_F;
2558             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2559             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2560             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2561             code_push_statement(code, &stmt, instr->m_context);
2562
2563             /* instruction generated */
2564             continue;
2565         }
2566
2567         if (instr->m_opcode == VINSTR_BITAND_V) {
2568             stmt.opcode = INSTR_BITAND;
2569             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2570             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2571             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2572             code_push_statement(code, &stmt, instr->m_context);
2573             ++stmt.o1.s1;
2574             ++stmt.o2.s1;
2575             ++stmt.o3.s1;
2576             code_push_statement(code, &stmt, instr->m_context);
2577             ++stmt.o1.s1;
2578             ++stmt.o2.s1;
2579             ++stmt.o3.s1;
2580             code_push_statement(code, &stmt, instr->m_context);
2581
2582             /* instruction generated */
2583             continue;
2584         }
2585
2586         if (instr->m_opcode == VINSTR_BITOR_V) {
2587             stmt.opcode = INSTR_BITOR;
2588             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2589             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2590             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2591             code_push_statement(code, &stmt, instr->m_context);
2592             ++stmt.o1.s1;
2593             ++stmt.o2.s1;
2594             ++stmt.o3.s1;
2595             code_push_statement(code, &stmt, instr->m_context);
2596             ++stmt.o1.s1;
2597             ++stmt.o2.s1;
2598             ++stmt.o3.s1;
2599             code_push_statement(code, &stmt, instr->m_context);
2600
2601             /* instruction generated */
2602             continue;
2603         }
2604
2605         if (instr->m_opcode == VINSTR_BITXOR_V) {
2606             for (j = 0; j < 3; ++j) {
2607                 stmt.opcode = INSTR_BITOR;
2608                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2609                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2610                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2611                 code_push_statement(code, &stmt, instr->m_context);
2612                 stmt.opcode = INSTR_BITAND;
2613                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2614                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2615                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2616                 code_push_statement(code, &stmt, instr->m_context);
2617             }
2618             stmt.opcode = INSTR_SUB_V;
2619             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2620             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2621             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2622             code_push_statement(code, &stmt, instr->m_context);
2623
2624             /* instruction generated */
2625             continue;
2626         }
2627
2628         if (instr->m_opcode == VINSTR_BITAND_VF) {
2629             stmt.opcode = INSTR_BITAND;
2630             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2631             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2632             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2633             code_push_statement(code, &stmt, instr->m_context);
2634             ++stmt.o1.s1;
2635             ++stmt.o3.s1;
2636             code_push_statement(code, &stmt, instr->m_context);
2637             ++stmt.o1.s1;
2638             ++stmt.o3.s1;
2639             code_push_statement(code, &stmt, instr->m_context);
2640
2641             /* instruction generated */
2642             continue;
2643         }
2644
2645         if (instr->m_opcode == VINSTR_BITOR_VF) {
2646             stmt.opcode = INSTR_BITOR;
2647             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2648             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2649             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2650             code_push_statement(code, &stmt, instr->m_context);
2651             ++stmt.o1.s1;
2652             ++stmt.o3.s1;
2653             code_push_statement(code, &stmt, instr->m_context);
2654             ++stmt.o1.s1;
2655             ++stmt.o3.s1;
2656             code_push_statement(code, &stmt, instr->m_context);
2657
2658             /* instruction generated */
2659             continue;
2660         }
2661
2662         if (instr->m_opcode == VINSTR_BITXOR_VF) {
2663             for (j = 0; j < 3; ++j) {
2664                 stmt.opcode = INSTR_BITOR;
2665                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2666                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2667                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2668                 code_push_statement(code, &stmt, instr->m_context);
2669                 stmt.opcode = INSTR_BITAND;
2670                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2671                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2672                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2673                 code_push_statement(code, &stmt, instr->m_context);
2674             }
2675             stmt.opcode = INSTR_SUB_V;
2676             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2677             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2678             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2679             code_push_statement(code, &stmt, instr->m_context);
2680
2681             /* instruction generated */
2682             continue;
2683         }
2684
2685         if (instr->m_opcode == VINSTR_CROSS) {
2686             stmt.opcode = INSTR_MUL_F;
2687             for (j = 0; j < 3; ++j) {
2688                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 1) % 3;
2689                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 2) % 3;
2690                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2691                 code_push_statement(code, &stmt, instr->m_context);
2692                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 2) % 3;
2693                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 1) % 3;
2694                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2695                 code_push_statement(code, &stmt, instr->m_context);
2696             }
2697             stmt.opcode = INSTR_SUB_V;
2698             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2699             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2700             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2701             code_push_statement(code, &stmt, instr->m_context);
2702
2703             /* instruction generated */
2704             continue;
2705         }
2706
2707         if (instr->m_opcode == VINSTR_COND) {
2708             ontrue  = instr->m_bops[0];
2709             onfalse = instr->m_bops[1];
2710             /* TODO: have the AST signal which block should
2711              * come first: eg. optimize IFs without ELSE...
2712              */
2713
2714             stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2715             stmt.o2.u1 = 0;
2716             stmt.o3.s1 = 0;
2717
2718             if (ontrue->m_generated) {
2719                 stmt.opcode = INSTR_IF;
2720                 stmt.o2.s1 = ontrue->m_code_start - code->statements.size();
2721                 if (stmt.o2.s1 != 1)
2722                     code_push_statement(code, &stmt, instr->m_context);
2723             }
2724             if (onfalse->m_generated) {
2725                 stmt.opcode = INSTR_IFNOT;
2726                 stmt.o2.s1 = onfalse->m_code_start - code->statements.size();
2727                 if (stmt.o2.s1 != 1)
2728                     code_push_statement(code, &stmt, instr->m_context);
2729             }
2730             if (!ontrue->m_generated) {
2731                 if (onfalse->m_generated)
2732                     return gen_blocks_recursive(code, func, ontrue);
2733             }
2734             if (!onfalse->m_generated) {
2735                 if (ontrue->m_generated)
2736                     return gen_blocks_recursive(code, func, onfalse);
2737             }
2738             /* neither ontrue nor onfalse exist */
2739             stmt.opcode = INSTR_IFNOT;
2740             if (!instr->m_likely) {
2741                 /* Honor the likelyhood hint */
2742                 ir_block *tmp = onfalse;
2743                 stmt.opcode = INSTR_IF;
2744                 onfalse = ontrue;
2745                 ontrue = tmp;
2746             }
2747             stidx = code->statements.size();
2748             code_push_statement(code, &stmt, instr->m_context);
2749             /* on false we jump, so add ontrue-path */
2750             if (!gen_blocks_recursive(code, func, ontrue))
2751                 return false;
2752             /* fixup the jump address */
2753             code->statements[stidx].o2.s1 = code->statements.size() - stidx;
2754             /* generate onfalse path */
2755             if (onfalse->m_generated) {
2756                 /* fixup the jump address */
2757                 code->statements[stidx].o2.s1 = onfalse->m_code_start - stidx;
2758                 if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2759                     code->statements[stidx] = code->statements[stidx+1];
2760                     if (code->statements[stidx].o1.s1 < 0)
2761                         code->statements[stidx].o1.s1++;
2762                     code_pop_statement(code);
2763                 }
2764                 stmt.opcode = code->statements.back().opcode;
2765                 if (stmt.opcode == INSTR_GOTO ||
2766                     stmt.opcode == INSTR_IF ||
2767                     stmt.opcode == INSTR_IFNOT ||
2768                     stmt.opcode == INSTR_RETURN ||
2769                     stmt.opcode == INSTR_DONE)
2770                 {
2771                     /* no use jumping from here */
2772                     return true;
2773                 }
2774                 /* may have been generated in the previous recursive call */
2775                 stmt.opcode = INSTR_GOTO;
2776                 stmt.o1.s1 = onfalse->m_code_start - code->statements.size();
2777                 stmt.o2.s1 = 0;
2778                 stmt.o3.s1 = 0;
2779                 if (stmt.o1.s1 != 1)
2780                     code_push_statement(code, &stmt, instr->m_context);
2781                 return true;
2782             }
2783             else if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2784                 code->statements[stidx] = code->statements[stidx+1];
2785                 if (code->statements[stidx].o1.s1 < 0)
2786                     code->statements[stidx].o1.s1++;
2787                 code_pop_statement(code);
2788             }
2789             /* if not, generate now */
2790             return gen_blocks_recursive(code, func, onfalse);
2791         }
2792
2793         if ( (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8)
2794            || instr->m_opcode == VINSTR_NRCALL)
2795         {
2796             size_t p, first;
2797             ir_value *retvalue;
2798
2799             first = instr->m_params.size();
2800             if (first > 8)
2801                 first = 8;
2802             for (p = 0; p < first; ++p)
2803             {
2804                 ir_value *param = instr->m_params[p];
2805                 if (param->m_callparam)
2806                     continue;
2807
2808                 stmt.opcode = INSTR_STORE_F;
2809                 stmt.o3.u1 = 0;
2810
2811                 if (param->m_vtype == TYPE_FIELD)
2812                     stmt.opcode = field_store_instr[param->m_fieldtype];
2813                 else if (param->m_vtype == TYPE_NIL)
2814                     stmt.opcode = INSTR_STORE_V;
2815                 else
2816                     stmt.opcode = type_store_instr[param->m_vtype];
2817                 stmt.o1.u1 = param->codeAddress();
2818                 stmt.o2.u1 = OFS_PARM0 + 3 * p;
2819
2820                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2821                     /* fetch 3 separate floats */
2822                     stmt.opcode = INSTR_STORE_F;
2823                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2824                     code_push_statement(code, &stmt, instr->m_context);
2825                     stmt.o2.u1++;
2826                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2827                     code_push_statement(code, &stmt, instr->m_context);
2828                     stmt.o2.u1++;
2829                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2830                     code_push_statement(code, &stmt, instr->m_context);
2831                 }
2832                 else
2833                     code_push_statement(code, &stmt, instr->m_context);
2834             }
2835             /* Now handle extparams */
2836             first = instr->m_params.size();
2837             for (; p < first; ++p)
2838             {
2839                 ir_builder *ir = func->m_owner;
2840                 ir_value *param = instr->m_params[p];
2841                 ir_value *targetparam;
2842
2843                 if (param->m_callparam)
2844                     continue;
2845
2846                 if (p-8 >= ir->m_extparams.size())
2847                     ir->generateExtparam();
2848
2849                 targetparam = ir->m_extparams[p-8];
2850
2851                 stmt.opcode = INSTR_STORE_F;
2852                 stmt.o3.u1 = 0;
2853
2854                 if (param->m_vtype == TYPE_FIELD)
2855                     stmt.opcode = field_store_instr[param->m_fieldtype];
2856                 else if (param->m_vtype == TYPE_NIL)
2857                     stmt.opcode = INSTR_STORE_V;
2858                 else
2859                     stmt.opcode = type_store_instr[param->m_vtype];
2860                 stmt.o1.u1 = param->codeAddress();
2861                 stmt.o2.u1 = targetparam->codeAddress();
2862                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2863                     /* fetch 3 separate floats */
2864                     stmt.opcode = INSTR_STORE_F;
2865                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2866                     code_push_statement(code, &stmt, instr->m_context);
2867                     stmt.o2.u1++;
2868                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2869                     code_push_statement(code, &stmt, instr->m_context);
2870                     stmt.o2.u1++;
2871                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2872                     code_push_statement(code, &stmt, instr->m_context);
2873                 }
2874                 else
2875                     code_push_statement(code, &stmt, instr->m_context);
2876             }
2877
2878             stmt.opcode = INSTR_CALL0 + instr->m_params.size();
2879             if (stmt.opcode > INSTR_CALL8)
2880                 stmt.opcode = INSTR_CALL8;
2881             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2882             stmt.o2.u1 = 0;
2883             stmt.o3.u1 = 0;
2884             code_push_statement(code, &stmt, instr->m_context);
2885
2886             retvalue = instr->_m_ops[0];
2887             if (retvalue && retvalue->m_store != store_return &&
2888                 (retvalue->m_store == store_global || retvalue->m_life.size()))
2889             {
2890                 /* not to be kept in OFS_RETURN */
2891                 if (retvalue->m_vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS))
2892                     stmt.opcode = field_store_instr[retvalue->m_fieldtype];
2893                 else
2894                     stmt.opcode = type_store_instr[retvalue->m_vtype];
2895                 stmt.o1.u1 = OFS_RETURN;
2896                 stmt.o2.u1 = retvalue->codeAddress();
2897                 stmt.o3.u1 = 0;
2898                 code_push_statement(code, &stmt, instr->m_context);
2899             }
2900             continue;
2901         }
2902
2903         if (instr->m_opcode == INSTR_STATE) {
2904             stmt.opcode = instr->m_opcode;
2905             if (instr->_m_ops[0])
2906                 stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2907             if (instr->_m_ops[1])
2908                 stmt.o2.u1 = instr->_m_ops[1]->codeAddress();
2909             stmt.o3.u1 = 0;
2910             code_push_statement(code, &stmt, instr->m_context);
2911             continue;
2912         }
2913
2914         stmt.opcode = instr->m_opcode;
2915         stmt.o1.u1 = 0;
2916         stmt.o2.u1 = 0;
2917         stmt.o3.u1 = 0;
2918
2919         /* This is the general order of operands */
2920         if (instr->_m_ops[0])
2921             stmt.o3.u1 = instr->_m_ops[0]->codeAddress();
2922
2923         if (instr->_m_ops[1])
2924             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2925
2926         if (instr->_m_ops[2])
2927             stmt.o2.u1 = instr->_m_ops[2]->codeAddress();
2928
2929         if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
2930         {
2931             stmt.o1.u1 = stmt.o3.u1;
2932             stmt.o3.u1 = 0;
2933         }
2934         else if ((stmt.opcode >= INSTR_STORE_F &&
2935                   stmt.opcode <= INSTR_STORE_FNC) ||
2936                  (stmt.opcode >= INSTR_STOREP_F &&
2937                   stmt.opcode <= INSTR_STOREP_FNC))
2938         {
2939             /* 2-operand instructions with A -> B */
2940             stmt.o2.u1 = stmt.o3.u1;
2941             stmt.o3.u1 = 0;
2942
2943             /* tiny optimization, don't output
2944              * STORE a, a
2945              */
2946             if (stmt.o2.u1 == stmt.o1.u1 &&
2947                 OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
2948             {
2949                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
2950                 continue;
2951             }
2952         }
2953         code_push_statement(code, &stmt, instr->m_context);
2954     }
2955     return true;
2956 }
2957
2958 static bool gen_function_code(code_t *code, ir_function *self)
2959 {
2960     ir_block *block;
2961     prog_section_statement_t stmt, *retst;
2962
2963     /* Starting from entry point, we generate blocks "as they come"
2964      * for now. Dead blocks will not be translated obviously.
2965      */
2966     if (self->m_blocks.empty()) {
2967         irerror(self->m_context, "Function '%s' declared without body.", self->m_name.c_str());
2968         return false;
2969     }
2970
2971     block = self->m_blocks[0].get();
2972     if (block->m_generated)
2973         return true;
2974
2975     if (!gen_blocks_recursive(code, self, block)) {
2976         irerror(self->m_context, "failed to generate blocks for '%s'", self->m_name.c_str());
2977         return false;
2978     }
2979
2980     /* code_write and qcvm -disasm need to know that the function ends here */
2981     retst = &code->statements.back();
2982     if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) &&
2983         self->m_outtype == TYPE_VOID &&
2984         retst->opcode == INSTR_RETURN &&
2985         !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1)
2986     {
2987         retst->opcode = INSTR_DONE;
2988         ++opts_optimizationcount[OPTIM_VOID_RETURN];
2989     } else {
2990         lex_ctx_t last;
2991
2992         stmt.opcode = INSTR_DONE;
2993         stmt.o1.u1  = 0;
2994         stmt.o2.u1  = 0;
2995         stmt.o3.u1  = 0;
2996         last.line   = code->linenums.back();
2997         last.column = code->columnnums.back();
2998
2999         code_push_statement(code, &stmt, last);
3000     }
3001     return true;
3002 }
3003
3004 qcint_t ir_builder::filestring(const char *filename)
3005 {
3006     /* NOTE: filename pointers are copied, we never strdup them,
3007      * thus we can use pointer-comparison to find the string.
3008      */
3009     qcint_t  str;
3010
3011     for (size_t i = 0; i != m_filenames.size(); ++i) {
3012         if (!strcmp(m_filenames[i], filename))
3013             return i;
3014     }
3015
3016     str = code_genstring(m_code.get(), filename);
3017     m_filenames.push_back(filename);
3018     m_filestrings.push_back(str);
3019     return str;
3020 }
3021
3022 bool ir_builder::generateGlobalFunction(ir_value *global)
3023 {
3024     prog_section_function_t fun;
3025     ir_function            *irfun;
3026
3027     size_t i;
3028
3029     if (!global->m_hasvalue || (!global->m_constval.vfunc)) {
3030         irerror(global->m_context, "Invalid state of function-global: not constant: %s", global->m_name.c_str());
3031         return false;
3032     }
3033
3034     irfun = global->m_constval.vfunc;
3035     fun.name = global->m_code.name;
3036     fun.file = filestring(global->m_context.file);
3037     fun.profile = 0; /* always 0 */
3038     fun.nargs = vec_size(irfun->m_params);
3039     if (fun.nargs > 8)
3040         fun.nargs = 8;
3041
3042     for (i = 0; i < 8; ++i) {
3043         if ((int32_t)i >= fun.nargs)
3044             fun.argsize[i] = 0;
3045         else
3046             fun.argsize[i] = type_sizeof_[irfun->m_params[i]];
3047     }
3048
3049     fun.firstlocal = 0;
3050     fun.locals = irfun->m_allocated_locals;
3051
3052     if (irfun->m_builtin)
3053         fun.entry = irfun->m_builtin+1;
3054     else {
3055         irfun->m_code_function_def = m_code->functions.size();
3056         fun.entry = m_code->statements.size();
3057     }
3058
3059     m_code->functions.push_back(fun);
3060     return true;
3061 }
3062
3063 ir_value* ir_builder::generateExtparamProto()
3064 {
3065     char      name[128];
3066
3067     util_snprintf(name, sizeof(name), "EXTPARM#%i", (int)(m_extparam_protos.size()));
3068     ir_value *global = new ir_value(name, store_global, TYPE_VECTOR);
3069     m_extparam_protos.emplace_back(global);
3070
3071     return global;
3072 }
3073
3074 void ir_builder::generateExtparam()
3075 {
3076     prog_section_def_t def;
3077     ir_value          *global;
3078
3079     if (m_extparam_protos.size() < m_extparams.size()+1)
3080         global = generateExtparamProto();
3081     else
3082         global = m_extparam_protos[m_extparams.size()].get();
3083
3084     def.name = code_genstring(m_code.get(), global->m_name.c_str());
3085     def.type = TYPE_VECTOR;
3086     def.offset = m_code->globals.size();
3087
3088     m_code->defs.push_back(def);
3089
3090     global->setCodeAddress(def.offset);
3091
3092     m_code->globals.push_back(0);
3093     m_code->globals.push_back(0);
3094     m_code->globals.push_back(0);
3095
3096     m_extparams.emplace_back(global);
3097 }
3098
3099 static bool gen_function_extparam_copy(code_t *code, ir_function *self)
3100 {
3101     ir_builder *ir = self->m_owner;
3102
3103     size_t numparams = vec_size(self->m_params);
3104     if (!numparams)
3105         return true;
3106
3107     prog_section_statement_t stmt;
3108     stmt.opcode = INSTR_STORE_F;
3109     stmt.o3.s1 = 0;
3110     for (size_t i = 8; i < numparams; ++i) {
3111         size_t ext = i - 8;
3112         if (ext >= ir->m_extparams.size())
3113             ir->generateExtparam();
3114
3115         ir_value *ep = ir->m_extparams[ext];
3116
3117         stmt.opcode = type_store_instr[self->m_locals[i]->m_vtype];
3118         if (self->m_locals[i]->m_vtype == TYPE_FIELD &&
3119             self->m_locals[i]->m_fieldtype == TYPE_VECTOR)
3120         {
3121             stmt.opcode = INSTR_STORE_V;
3122         }
3123         stmt.o1.u1 = ep->codeAddress();
3124         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3125         code_push_statement(code, &stmt, self->m_context);
3126     }
3127
3128     return true;
3129 }
3130
3131 static bool gen_function_varargs_copy(code_t *code, ir_function *self)
3132 {
3133     size_t i, ext, numparams, maxparams;
3134
3135     ir_builder *ir = self->m_owner;
3136     ir_value   *ep;
3137     prog_section_statement_t stmt;
3138
3139     numparams = vec_size(self->m_params);
3140     if (!numparams)
3141         return true;
3142
3143     stmt.opcode = INSTR_STORE_V;
3144     stmt.o3.s1 = 0;
3145     maxparams = numparams + self->m_max_varargs;
3146     for (i = numparams; i < maxparams; ++i) {
3147         if (i < 8) {
3148             stmt.o1.u1 = OFS_PARM0 + 3*i;
3149             stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3150             code_push_statement(code, &stmt, self->m_context);
3151             continue;
3152         }
3153         ext = i - 8;
3154         while (ext >= ir->m_extparams.size())
3155             ir->generateExtparam();
3156
3157         ep = ir->m_extparams[ext];
3158
3159         stmt.o1.u1 = ep->codeAddress();
3160         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3161         code_push_statement(code, &stmt, self->m_context);
3162     }
3163
3164     return true;
3165 }
3166
3167 bool ir_builder::generateFunctionLocals(ir_value *global)
3168 {
3169     prog_section_function_t *def;
3170     ir_function             *irfun;
3171     uint32_t                 firstlocal, firstglobal;
3172
3173     irfun = global->m_constval.vfunc;
3174     def   = &m_code->functions[0] + irfun->m_code_function_def;
3175
3176     if (OPTS_OPTION_BOOL(OPTION_G) ||
3177         !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS)        ||
3178         (irfun->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3179     {
3180         firstlocal = def->firstlocal = m_code->globals.size();
3181     } else {
3182         firstlocal = def->firstlocal = m_first_common_local;
3183         ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
3184     }
3185
3186     firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? m_first_common_globaltemp : firstlocal);
3187
3188     for (size_t i = m_code->globals.size(); i < firstlocal + irfun->m_allocated_locals; ++i)
3189         m_code->globals.push_back(0);
3190
3191     for (auto& lp : irfun->m_locals) {
3192         ir_value *v = lp.get();
3193         if (v->m_locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
3194             v->setCodeAddress(firstlocal + v->m_code.local);
3195             if (!generateGlobal(v, true)) {
3196                 irerror(v->m_context, "failed to generate local %s", v->m_name.c_str());
3197                 return false;
3198             }
3199         }
3200         else
3201             v->setCodeAddress(firstglobal + v->m_code.local);
3202     }
3203     for (auto& vp : irfun->m_values) {
3204         ir_value *v = vp.get();
3205         if (v->m_callparam)
3206             continue;
3207         if (v->m_locked)
3208             v->setCodeAddress(firstlocal + v->m_code.local);
3209         else
3210             v->setCodeAddress(firstglobal + v->m_code.local);
3211     }
3212     return true;
3213 }
3214
3215 bool ir_builder::generateGlobalFunctionCode(ir_value *global)
3216 {
3217     prog_section_function_t *fundef;
3218     ir_function             *irfun;
3219
3220     irfun = global->m_constval.vfunc;
3221     if (!irfun) {
3222         if (global->m_cvq == CV_NONE) {
3223             if (irwarning(global->m_context, WARN_IMPLICIT_FUNCTION_POINTER,
3224                           "function `%s` has no body and in QC implicitly becomes a function-pointer",
3225                           global->m_name.c_str()))
3226             {
3227                 /* Not bailing out just now. If this happens a lot you don't want to have
3228                  * to rerun gmqcc for each such function.
3229                  */
3230
3231                 /* return false; */
3232             }
3233         }
3234         /* this was a function pointer, don't generate code for those */
3235         return true;
3236     }
3237
3238     if (irfun->m_builtin)
3239         return true;
3240
3241     /*
3242      * If there is no definition and the thing is eraseable, we can ignore
3243      * outputting the function to begin with.
3244      */
3245     if (global->m_flags & IR_FLAG_ERASABLE && irfun->m_code_function_def < 0) {
3246         return true;
3247     }
3248
3249     if (irfun->m_code_function_def < 0) {
3250         irerror(irfun->m_context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->m_name.c_str());
3251         return false;
3252     }
3253     fundef = &m_code->functions[irfun->m_code_function_def];
3254
3255     fundef->entry = m_code->statements.size();
3256     if (!generateFunctionLocals(global)) {
3257         irerror(irfun->m_context, "Failed to generate locals for function %s", irfun->m_name.c_str());
3258         return false;
3259     }
3260     if (!gen_function_extparam_copy(m_code.get(), irfun)) {
3261         irerror(irfun->m_context, "Failed to generate extparam-copy code for function %s", irfun->m_name.c_str());
3262         return false;
3263     }
3264     if (irfun->m_max_varargs && !gen_function_varargs_copy(m_code.get(), irfun)) {
3265         irerror(irfun->m_context, "Failed to generate vararg-copy code for function %s", irfun->m_name.c_str());
3266         return false;
3267     }
3268     if (!gen_function_code(m_code.get(), irfun)) {
3269         irerror(irfun->m_context, "Failed to generate code for function %s", irfun->m_name.c_str());
3270         return false;
3271     }
3272     return true;
3273 }
3274
3275 static void gen_vector_defs(code_t *code, prog_section_def_t def, const char *name)
3276 {
3277     char  *component;
3278     size_t len, i;
3279
3280     if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3281         return;
3282
3283     def.type = TYPE_FLOAT;
3284
3285     len = strlen(name);
3286
3287     component = (char*)mem_a(len+3);
3288     memcpy(component, name, len);
3289     len += 2;
3290     component[len-0] = 0;
3291     component[len-2] = '_';
3292
3293     component[len-1] = 'x';
3294
3295     for (i = 0; i < 3; ++i) {
3296         def.name = code_genstring(code, component);
3297         code->defs.push_back(def);
3298         def.offset++;
3299         component[len-1]++;
3300     }
3301
3302     mem_d(component);
3303 }
3304
3305 static void gen_vector_fields(code_t *code, prog_section_field_t fld, const char *name)
3306 {
3307     char  *component;
3308     size_t len, i;
3309
3310     if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3311         return;
3312
3313     fld.type = TYPE_FLOAT;
3314
3315     len = strlen(name);
3316
3317     component = (char*)mem_a(len+3);
3318     memcpy(component, name, len);
3319     len += 2;
3320     component[len-0] = 0;
3321     component[len-2] = '_';
3322
3323     component[len-1] = 'x';
3324
3325     for (i = 0; i < 3; ++i) {
3326         fld.name = code_genstring(code, component);
3327         code->fields.push_back(fld);
3328         fld.offset++;
3329         component[len-1]++;
3330     }
3331
3332     mem_d(component);
3333 }
3334
3335 bool ir_builder::generateGlobal(ir_value *global, bool islocal)
3336 {
3337     size_t             i;
3338     int32_t           *iptr;
3339     prog_section_def_t def;
3340     bool               pushdef = opts.optimizeoff;
3341
3342     /* we don't generate split-vectors */
3343     if (global->m_vtype == TYPE_VECTOR && (global->m_flags & IR_FLAG_SPLIT_VECTOR))
3344         return true;
3345
3346     def.type = global->m_vtype;
3347     def.offset = m_code->globals.size();
3348     def.name = 0;
3349     if (OPTS_OPTION_BOOL(OPTION_G) || !islocal)
3350     {
3351         pushdef = true;
3352
3353         /*
3354          * if we're eraseable and the function isn't referenced ignore outputting
3355          * the function.
3356          */
3357         if (global->m_flags & IR_FLAG_ERASABLE && global->m_reads.empty()) {
3358             return true;
3359         }
3360
3361         if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) &&
3362             !(global->m_flags & IR_FLAG_INCLUDE_DEF) &&
3363             (global->m_name[0] == '#' || global->m_cvq == CV_CONST))
3364         {
3365             pushdef = false;
3366         }
3367
3368         if (pushdef) {
3369             if (global->m_name[0] == '#') {
3370                 if (!m_str_immediate)
3371                     m_str_immediate = code_genstring(m_code.get(), "IMMEDIATE");
3372                 def.name = global->m_code.name = m_str_immediate;
3373             }
3374             else
3375                 def.name = global->m_code.name = code_genstring(m_code.get(), global->m_name.c_str());
3376         }
3377         else
3378             def.name   = 0;
3379         if (islocal) {
3380             def.offset = global->codeAddress();
3381             m_code->defs.push_back(def);
3382             if (global->m_vtype == TYPE_VECTOR)
3383                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3384             else if (global->m_vtype == TYPE_FIELD && global->m_fieldtype == TYPE_VECTOR)
3385                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3386             return true;
3387         }
3388     }
3389     if (islocal)
3390         return true;
3391
3392     switch (global->m_vtype)
3393     {
3394     case TYPE_VOID:
3395         if (0 == global->m_name.compare("end_sys_globals")) {
3396             // TODO: remember this point... all the defs before this one
3397             // should be checksummed and added to progdefs.h when we generate it.
3398         }
3399         else if (0 == global->m_name.compare("end_sys_fields")) {
3400             // TODO: same as above but for entity-fields rather than globsl
3401         }
3402         else if(irwarning(global->m_context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`",
3403                           global->m_name.c_str()))
3404         {
3405             /* Not bailing out */
3406             /* return false; */
3407         }
3408         /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far
3409          * the system fields actually go? Though the engine knows this anyway...
3410          * Maybe this could be an -foption
3411          * fteqcc creates data for end_sys_* - of size 1, so let's do the same
3412          */
3413         global->setCodeAddress(m_code->globals.size());
3414         m_code->globals.push_back(0);
3415         /* Add the def */
3416         if (pushdef)
3417             m_code->defs.push_back(def);
3418         return true;
3419     case TYPE_POINTER:
3420         if (pushdef)
3421             m_code->defs.push_back(def);
3422         return gen_global_pointer(m_code.get(), global);
3423     case TYPE_FIELD:
3424         if (pushdef) {
3425             m_code->defs.push_back(def);
3426             if (global->m_fieldtype == TYPE_VECTOR)
3427                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3428         }
3429         return gen_global_field(m_code.get(), global);
3430     case TYPE_ENTITY:
3431         /* fall through */
3432     case TYPE_FLOAT:
3433     {
3434         global->setCodeAddress(m_code->globals.size());
3435         if (global->m_hasvalue) {
3436             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3437                 return true;
3438             iptr = (int32_t*)&global->m_constval.ivec[0];
3439             m_code->globals.push_back(*iptr);
3440         } else {
3441             m_code->globals.push_back(0);
3442         }
3443         if (!islocal && global->m_cvq != CV_CONST)
3444             def.type |= DEF_SAVEGLOBAL;
3445         if (pushdef)
3446             m_code->defs.push_back(def);
3447
3448         return global->m_code.globaladdr >= 0;
3449     }
3450     case TYPE_STRING:
3451     {
3452         global->setCodeAddress(m_code->globals.size());
3453         if (global->m_hasvalue) {
3454             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3455                 return true;
3456             uint32_t load = code_genstring(m_code.get(), global->m_constval.vstring);
3457             m_code->globals.push_back(load);
3458         } else {
3459             m_code->globals.push_back(0);
3460         }
3461         if (!islocal && global->m_cvq != CV_CONST)
3462             def.type |= DEF_SAVEGLOBAL;
3463         if (pushdef)
3464             m_code->defs.push_back(def);
3465         return global->m_code.globaladdr >= 0;
3466     }
3467     case TYPE_VECTOR:
3468     {
3469         size_t d;
3470         global->setCodeAddress(m_code->globals.size());
3471         if (global->m_hasvalue) {
3472             iptr = (int32_t*)&global->m_constval.ivec[0];
3473             m_code->globals.push_back(iptr[0]);
3474             if (global->m_code.globaladdr < 0)
3475                 return false;
3476             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3477                 m_code->globals.push_back(iptr[d]);
3478             }
3479         } else {
3480             m_code->globals.push_back(0);
3481             if (global->m_code.globaladdr < 0)
3482                 return false;
3483             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3484                 m_code->globals.push_back(0);
3485             }
3486         }
3487         if (!islocal && global->m_cvq != CV_CONST)
3488             def.type |= DEF_SAVEGLOBAL;
3489
3490         if (pushdef) {
3491             m_code->defs.push_back(def);
3492             def.type &= ~DEF_SAVEGLOBAL;
3493             gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3494         }
3495         return global->m_code.globaladdr >= 0;
3496     }
3497     case TYPE_FUNCTION:
3498         global->setCodeAddress(m_code->globals.size());
3499         if (!global->m_hasvalue) {
3500             m_code->globals.push_back(0);
3501             if (global->m_code.globaladdr < 0)
3502                 return false;
3503         } else {
3504             m_code->globals.push_back(m_code->functions.size());
3505             if (!generateGlobalFunction(global))
3506                 return false;
3507         }
3508         if (!islocal && global->m_cvq != CV_CONST)
3509             def.type |= DEF_SAVEGLOBAL;
3510         if (pushdef)
3511             m_code->defs.push_back(def);
3512         return true;
3513     case TYPE_VARIANT:
3514         /* assume biggest type */
3515             global->setCodeAddress(m_code->globals.size());
3516             m_code->globals.push_back(0);
3517             for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i)
3518                 m_code->globals.push_back(0);
3519             return true;
3520     default:
3521         /* refuse to create 'void' type or any other fancy business. */
3522         irerror(global->m_context, "Invalid type for global variable `%s`: %s",
3523                 global->m_name.c_str(), type_name[global->m_vtype]);
3524         return false;
3525     }
3526 }
3527
3528 static GMQCC_INLINE void ir_builder_prepare_field(code_t *code, ir_value *field)
3529 {
3530     field->m_code.fieldaddr = code_alloc_field(code, type_sizeof_[field->m_fieldtype]);
3531 }
3532
3533 static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
3534 {
3535     prog_section_def_t def;
3536     prog_section_field_t fld;
3537
3538     (void)self;
3539
3540     def.type   = (uint16_t)field->m_vtype;
3541     def.offset = (uint16_t)self->m_code->globals.size();
3542
3543     /* create a global named the same as the field */
3544     if (OPTS_OPTION_U32(OPTION_STANDARD) == COMPILER_GMQCC) {
3545         /* in our standard, the global gets a dot prefix */
3546         size_t len = field->m_name.length();
3547         char name[1024];
3548
3549         /* we really don't want to have to allocate this, and 1024
3550          * bytes is more than enough for a variable/field name
3551          */
3552         if (len+2 >= sizeof(name)) {
3553             irerror(field->m_context, "invalid field name size: %u", (unsigned int)len);
3554             return false;
3555         }
3556
3557         name[0] = '.';
3558         memcpy(name+1, field->m_name.c_str(), len); // no strncpy - we used strlen above
3559         name[len+1] = 0;
3560
3561         def.name = code_genstring(self->m_code.get(), name);
3562         fld.name = def.name + 1; /* we reuse that string table entry */
3563     } else {
3564         /* in plain QC, there cannot be a global with the same name,
3565          * and so we also name the global the same.
3566          * FIXME: fteqcc should create a global as well
3567          * check if it actually uses the same name. Probably does
3568          */
3569         def.name = code_genstring(self->m_code.get(), field->m_name.c_str());
3570         fld.name = def.name;
3571     }
3572
3573     field->m_code.name = def.name;
3574
3575     self->m_code->defs.push_back(def);
3576
3577     fld.type = field->m_fieldtype;
3578
3579     if (fld.type == TYPE_VOID) {
3580         irerror(field->m_context, "field is missing a type: %s - don't know its size", field->m_name.c_str());
3581         return false;
3582     }
3583
3584     fld.offset = field->m_code.fieldaddr;
3585
3586     self->m_code->fields.push_back(fld);
3587
3588     field->setCodeAddress(self->m_code->globals.size());
3589     self->m_code->globals.push_back(fld.offset);
3590     if (fld.type == TYPE_VECTOR) {
3591         self->m_code->globals.push_back(fld.offset+1);
3592         self->m_code->globals.push_back(fld.offset+2);
3593     }
3594
3595     if (field->m_fieldtype == TYPE_VECTOR) {
3596         gen_vector_defs  (self->m_code.get(), def, field->m_name.c_str());
3597         gen_vector_fields(self->m_code.get(), fld, field->m_name.c_str());
3598     }
3599
3600     return field->m_code.globaladdr >= 0;
3601 }
3602
3603 static void ir_builder_collect_reusables(ir_builder *builder) {
3604     std::vector<ir_value*> reusables;
3605
3606     for (auto& gp : builder->m_globals) {
3607         ir_value *value = gp.get();
3608         if (value->m_vtype != TYPE_FLOAT || !value->m_hasvalue)
3609             continue;
3610         if (value->m_cvq == CV_CONST || (value->m_name.length() >= 1 && value->m_name[0] == '#'))
3611             reusables.emplace_back(value);
3612     }
3613     builder->m_const_floats = move(reusables);
3614 }
3615
3616 static void ir_builder_split_vector(ir_builder *self, ir_value *vec) {
3617     ir_value* found[3] = { nullptr, nullptr, nullptr };
3618
3619     // must not be written to
3620     if (vec->m_writes.size())
3621         return;
3622     // must not be trying to access individual members
3623     if (vec->m_members[0] || vec->m_members[1] || vec->m_members[2])
3624         return;
3625     // should be actually used otherwise it won't be generated anyway
3626     if (vec->m_reads.empty())
3627         return;
3628     //size_t count = vec->m_reads.size();
3629     //if (!count)
3630     //    return;
3631
3632     // may only be used directly as function parameters, so if we find some other instruction cancel
3633     for (ir_instr *user : vec->m_reads) {
3634         // we only split vectors if they're used directly as parameter to a call only!
3635         if ((user->m_opcode < INSTR_CALL0 || user->m_opcode > INSTR_CALL8) && user->m_opcode != VINSTR_NRCALL)
3636             return;
3637     }
3638
3639     vec->m_flags |= IR_FLAG_SPLIT_VECTOR;
3640
3641     // find existing floats making up the split
3642     for (ir_value *c : self->m_const_floats) {
3643         if (!found[0] && c->m_constval.vfloat == vec->m_constval.vvec.x)
3644             found[0] = c;
3645         if (!found[1] && c->m_constval.vfloat == vec->m_constval.vvec.y)
3646             found[1] = c;
3647         if (!found[2] && c->m_constval.vfloat == vec->m_constval.vvec.z)
3648             found[2] = c;
3649         if (found[0] && found[1] && found[2])
3650             break;
3651     }
3652
3653     // generate floats for not yet found components
3654     if (!found[0])
3655         found[0] = self->literalFloat(vec->m_constval.vvec.x, true);
3656     if (!found[1]) {
3657         if (vec->m_constval.vvec.y == vec->m_constval.vvec.x)
3658             found[1] = found[0];
3659         else
3660             found[1] = self->literalFloat(vec->m_constval.vvec.y, true);
3661     }
3662     if (!found[2]) {
3663         if (vec->m_constval.vvec.z == vec->m_constval.vvec.x)
3664             found[2] = found[0];
3665         else if (vec->m_constval.vvec.z == vec->m_constval.vvec.y)
3666             found[2] = found[1];
3667         else
3668             found[2] = self->literalFloat(vec->m_constval.vvec.z, true);
3669     }
3670
3671     // the .members array should be safe to use here
3672     vec->m_members[0] = found[0];
3673     vec->m_members[1] = found[1];
3674     vec->m_members[2] = found[2];
3675
3676     // register the readers for these floats
3677     found[0]->m_reads.insert(found[0]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3678     found[1]->m_reads.insert(found[1]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3679     found[2]->m_reads.insert(found[2]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3680 }
3681
3682 static void ir_builder_split_vectors(ir_builder *self) {
3683     // member values may be added to self->m_globals during this operation, but
3684     // no new vectors will be added, we need to iterate via an index as
3685     // c++ iterators would be invalidated
3686     const size_t count = self->m_globals.size();
3687     for (size_t i = 0; i != count; ++i) {
3688         ir_value *v = self->m_globals[i].get();
3689         if (v->m_vtype != TYPE_VECTOR || !v->m_name.length() || v->m_name[0] != '#')
3690             continue;
3691         ir_builder_split_vector(self, v);
3692     }
3693 }
3694
3695 bool ir_builder::generate(const char *filename)
3696 {
3697     prog_section_statement_t stmt;
3698     char  *lnofile = nullptr;
3699
3700     if (OPTS_FLAG(SPLIT_VECTOR_PARAMETERS)) {
3701         ir_builder_collect_reusables(this);
3702         if (!m_const_floats.empty())
3703             ir_builder_split_vectors(this);
3704     }
3705
3706     for (auto& fp : m_fields)
3707         ir_builder_prepare_field(m_code.get(), fp.get());
3708
3709     for (auto& gp : m_globals) {
3710         ir_value *global = gp.get();
3711         if (!generateGlobal(global, false)) {
3712             return false;
3713         }
3714         if (global->m_vtype == TYPE_FUNCTION) {
3715             ir_function *func = global->m_constval.vfunc;
3716             if (func && m_max_locals < func->m_allocated_locals &&
3717                 !(func->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3718             {
3719                 m_max_locals = func->m_allocated_locals;
3720             }
3721             if (func && m_max_globaltemps < func->m_globaltemps)
3722                 m_max_globaltemps = func->m_globaltemps;
3723         }
3724     }
3725
3726     for (auto& fp : m_fields) {
3727         if (!ir_builder_gen_field(this, fp.get()))
3728             return false;
3729     }
3730
3731     // generate nil
3732     m_nil->setCodeAddress(m_code->globals.size());
3733     m_code->globals.push_back(0);
3734     m_code->globals.push_back(0);
3735     m_code->globals.push_back(0);
3736
3737     // generate virtual-instruction temps
3738     for (size_t i = 0; i < IR_MAX_VINSTR_TEMPS; ++i) {
3739         m_vinstr_temp[i]->setCodeAddress(m_code->globals.size());
3740         m_code->globals.push_back(0);
3741         m_code->globals.push_back(0);
3742         m_code->globals.push_back(0);
3743     }
3744
3745     // generate global temps
3746     m_first_common_globaltemp = m_code->globals.size();
3747     m_code->globals.insert(m_code->globals.end(), m_max_globaltemps, 0);
3748     // FIXME:DELME:
3749     //for (size_t i = 0; i < m_max_globaltemps; ++i) {
3750     //    m_code->globals.push_back(0);
3751     //}
3752     // generate common locals
3753     m_first_common_local = m_code->globals.size();
3754     m_code->globals.insert(m_code->globals.end(), m_max_locals, 0);
3755     // FIXME:DELME:
3756     //for (i = 0; i < m_max_locals; ++i) {
3757     //    m_code->globals.push_back(0);
3758     //}
3759
3760     // generate function code
3761
3762     for (auto& gp : m_globals) {
3763         ir_value *global = gp.get();
3764         if (global->m_vtype == TYPE_FUNCTION) {
3765             if (!this->generateGlobalFunctionCode(global))
3766                 return false;
3767         }
3768     }
3769
3770     if (m_code->globals.size() >= 65536) {
3771         irerror(m_globals.back()->m_context,
3772             "This progs file would require more globals than the metadata can handle (%zu). Bailing out.",
3773             m_code->globals.size());
3774         return false;
3775     }
3776
3777     /* DP errors if the last instruction is not an INSTR_DONE. */
3778     if (m_code->statements.back().opcode != INSTR_DONE)
3779     {
3780         lex_ctx_t last;
3781
3782         stmt.opcode = INSTR_DONE;
3783         stmt.o1.u1  = 0;
3784         stmt.o2.u1  = 0;
3785         stmt.o3.u1  = 0;
3786         last.line   = m_code->linenums.back();
3787         last.column = m_code->columnnums.back();
3788
3789         code_push_statement(m_code.get(), &stmt, last);
3790     }
3791
3792     if (OPTS_OPTION_BOOL(OPTION_PP_ONLY))
3793         return true;
3794
3795     if (m_code->statements.size() != m_code->linenums.size()) {
3796         con_err("Linecounter wrong: %lu != %lu\n",
3797                 m_code->statements.size(),
3798                 m_code->linenums.size());
3799     } else if (OPTS_FLAG(LNO)) {
3800         char  *dot;
3801         size_t filelen = strlen(filename);
3802
3803         memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
3804         dot = strrchr(lnofile, '.');
3805         if (!dot) {
3806             vec_pop(lnofile);
3807         } else {
3808             vec_shrinkto(lnofile, dot - lnofile);
3809         }
3810         memcpy(vec_add(lnofile, 5), ".lno", 5);
3811     }
3812
3813     if (!code_write(m_code.get(), filename, lnofile)) {
3814         vec_free(lnofile);
3815         return false;
3816     }
3817
3818     vec_free(lnofile);
3819     return true;
3820 }
3821
3822 /***********************************************************************
3823  *IR DEBUG Dump functions...
3824  */
3825
3826 #define IND_BUFSZ 1024
3827
3828 static const char *qc_opname(int op)
3829 {
3830     if (op < 0) return "<INVALID>";
3831     if (op < VINSTR_END)
3832         return util_instr_str[op];
3833     switch (op) {
3834         case VINSTR_END:       return "END";
3835         case VINSTR_PHI:       return "PHI";
3836         case VINSTR_JUMP:      return "JUMP";
3837         case VINSTR_COND:      return "COND";
3838         case VINSTR_BITXOR:    return "BITXOR";
3839         case VINSTR_BITAND_V:  return "BITAND_V";
3840         case VINSTR_BITOR_V:   return "BITOR_V";
3841         case VINSTR_BITXOR_V:  return "BITXOR_V";
3842         case VINSTR_BITAND_VF: return "BITAND_VF";
3843         case VINSTR_BITOR_VF:  return "BITOR_VF";
3844         case VINSTR_BITXOR_VF: return "BITXOR_VF";
3845         case VINSTR_CROSS:     return "CROSS";
3846         case VINSTR_NEG_F:     return "NEG_F";
3847         case VINSTR_NEG_V:     return "NEG_V";
3848         default:               return "<UNK>";
3849     }
3850 }
3851
3852 void ir_builder::dump(int (*oprintf)(const char*, ...)) const
3853 {
3854     size_t i;
3855     char indent[IND_BUFSZ];
3856     indent[0] = '\t';
3857     indent[1] = 0;
3858
3859     oprintf("module %s\n", m_name.c_str());
3860     for (i = 0; i < m_globals.size(); ++i)
3861     {
3862         oprintf("global ");
3863         if (m_globals[i]->m_hasvalue)
3864             oprintf("%s = ", m_globals[i]->m_name.c_str());
3865         m_globals[i].get()->dump(oprintf);
3866         oprintf("\n");
3867     }
3868     for (i = 0; i < m_functions.size(); ++i)
3869         ir_function_dump(m_functions[i].get(), indent, oprintf);
3870     oprintf("endmodule %s\n", m_name.c_str());
3871 }
3872
3873 static const char *storenames[] = {
3874     "[global]", "[local]", "[param]", "[value]", "[return]"
3875 };
3876
3877 void ir_function_dump(ir_function *f, char *ind,
3878                       int (*oprintf)(const char*, ...))
3879 {
3880     size_t i;
3881     if (f->m_builtin != 0) {
3882         oprintf("%sfunction %s = builtin %i\n", ind, f->m_name.c_str(), -f->m_builtin);
3883         return;
3884     }
3885     oprintf("%sfunction %s\n", ind, f->m_name.c_str());
3886     util_strncat(ind, "\t", IND_BUFSZ-1);
3887     if (f->m_locals.size())
3888     {
3889         oprintf("%s%i locals:\n", ind, (int)f->m_locals.size());
3890         for (i = 0; i < f->m_locals.size(); ++i) {
3891             oprintf("%s\t", ind);
3892             f->m_locals[i].get()->dump(oprintf);
3893             oprintf("\n");
3894         }
3895     }
3896     oprintf("%sliferanges:\n", ind);
3897     for (i = 0; i < f->m_locals.size(); ++i) {
3898         const char *attr = "";
3899         size_t l, m;
3900         ir_value *v = f->m_locals[i].get();
3901         if (v->m_unique_life && v->m_locked)
3902             attr = "unique,locked ";
3903         else if (v->m_unique_life)
3904             attr = "unique ";
3905         else if (v->m_locked)
3906             attr = "locked ";
3907         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3908                 storenames[v->m_store],
3909                 attr, (v->m_callparam ? "callparam " : ""),
3910                 (int)v->m_code.local);
3911         if (v->m_life.empty())
3912             oprintf("[null]");
3913         for (l = 0; l < v->m_life.size(); ++l) {
3914             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3915         }
3916         oprintf("\n");
3917         for (m = 0; m < 3; ++m) {
3918             ir_value *vm = v->m_members[m];
3919             if (!vm)
3920                 continue;
3921             oprintf("%s\t%s: @%i ", ind, vm->m_name.c_str(), (int)vm->m_code.local);
3922             for (l = 0; l < vm->m_life.size(); ++l) {
3923                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3924             }
3925             oprintf("\n");
3926         }
3927     }
3928     for (i = 0; i < f->m_values.size(); ++i) {
3929         const char *attr = "";
3930         size_t l, m;
3931         ir_value *v = f->m_values[i].get();
3932         if (v->m_unique_life && v->m_locked)
3933             attr = "unique,locked ";
3934         else if (v->m_unique_life)
3935             attr = "unique ";
3936         else if (v->m_locked)
3937             attr = "locked ";
3938         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3939                 storenames[v->m_store],
3940                 attr, (v->m_callparam ? "callparam " : ""),
3941                 (int)v->m_code.local);
3942         if (v->m_life.empty())
3943             oprintf("[null]");
3944         for (l = 0; l < v->m_life.size(); ++l) {
3945             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3946         }
3947         oprintf("\n");
3948         for (m = 0; m < 3; ++m) {
3949             ir_value *vm = v->m_members[m];
3950             if (!vm)
3951                 continue;
3952             if (vm->m_unique_life && vm->m_locked)
3953                 attr = "unique,locked ";
3954             else if (vm->m_unique_life)
3955                 attr = "unique ";
3956             else if (vm->m_locked)
3957                 attr = "locked ";
3958             oprintf("%s\t%s: %s@%i ", ind, vm->m_name.c_str(), attr, (int)vm->m_code.local);
3959             for (l = 0; l < vm->m_life.size(); ++l) {
3960                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3961             }
3962             oprintf("\n");
3963         }
3964     }
3965     if (f->m_blocks.size())
3966     {
3967         oprintf("%slife passes: %i\n", ind, (int)f->m_run_id);
3968         for (i = 0; i < f->m_blocks.size(); ++i) {
3969             ir_block_dump(f->m_blocks[i].get(), ind, oprintf);
3970         }
3971
3972     }
3973     ind[strlen(ind)-1] = 0;
3974     oprintf("%sendfunction %s\n", ind, f->m_name.c_str());
3975 }
3976
3977 void ir_block_dump(ir_block* b, char *ind,
3978                    int (*oprintf)(const char*, ...))
3979 {
3980     size_t i;
3981     oprintf("%s:%s\n", ind, b->m_label.c_str());
3982     util_strncat(ind, "\t", IND_BUFSZ-1);
3983
3984     if (b->m_instr && b->m_instr[0])
3985         oprintf("%s (%i) [entry]\n", ind, (int)(b->m_instr[0]->m_eid-1));
3986     for (i = 0; i < vec_size(b->m_instr); ++i)
3987         ir_instr_dump(b->m_instr[i], ind, oprintf);
3988     ind[strlen(ind)-1] = 0;
3989 }
3990
3991 static void dump_phi(ir_instr *in, int (*oprintf)(const char*, ...))
3992 {
3993     oprintf("%s <- phi ", in->_m_ops[0]->m_name.c_str());
3994     for (auto &it : in->m_phi) {
3995         oprintf("([%s] : %s) ", it.from->m_label.c_str(),
3996                                 it.value->m_name.c_str());
3997     }
3998     oprintf("\n");
3999 }
4000
4001 void ir_instr_dump(ir_instr *in, char *ind,
4002                        int (*oprintf)(const char*, ...))
4003 {
4004     size_t i;
4005     const char *comma = nullptr;
4006
4007     oprintf("%s (%i) ", ind, (int)in->m_eid);
4008
4009     if (in->m_opcode == VINSTR_PHI) {
4010         dump_phi(in, oprintf);
4011         return;
4012     }
4013
4014     util_strncat(ind, "\t", IND_BUFSZ-1);
4015
4016     if (in->_m_ops[0] && (in->_m_ops[1] || in->_m_ops[2])) {
4017         in->_m_ops[0]->dump(oprintf);
4018         if (in->_m_ops[1] || in->_m_ops[2])
4019             oprintf(" <- ");
4020     }
4021     if (in->m_opcode == INSTR_CALL0 || in->m_opcode == VINSTR_NRCALL) {
4022         oprintf("CALL%i\t", in->m_params.size());
4023     } else
4024         oprintf("%s\t", qc_opname(in->m_opcode));
4025
4026     if (in->_m_ops[0] && !(in->_m_ops[1] || in->_m_ops[2])) {
4027         in->_m_ops[0]->dump(oprintf);
4028         comma = ",\t";
4029     }
4030     else
4031     {
4032         for (i = 1; i != 3; ++i) {
4033             if (in->_m_ops[i]) {
4034                 if (comma)
4035                     oprintf(comma);
4036                 in->_m_ops[i]->dump(oprintf);
4037                 comma = ",\t";
4038             }
4039         }
4040     }
4041     if (in->m_bops[0]) {
4042         if (comma)
4043             oprintf(comma);
4044         oprintf("[%s]", in->m_bops[0]->m_label.c_str());
4045         comma = ",\t";
4046     }
4047     if (in->m_bops[1])
4048         oprintf("%s[%s]", comma, in->m_bops[1]->m_label.c_str());
4049     if (in->m_params.size()) {
4050         oprintf("\tparams: ");
4051         for (auto &it : in->m_params)
4052             oprintf("%s, ", it->m_name.c_str());
4053     }
4054     oprintf("\n");
4055     ind[strlen(ind)-1] = 0;
4056 }
4057
4058 static void ir_value_dump_string(const char *str, int (*oprintf)(const char*, ...))
4059 {
4060     oprintf("\"");
4061     for (; *str; ++str) {
4062         switch (*str) {
4063             case '\n': oprintf("\\n"); break;
4064             case '\r': oprintf("\\r"); break;
4065             case '\t': oprintf("\\t"); break;
4066             case '\v': oprintf("\\v"); break;
4067             case '\f': oprintf("\\f"); break;
4068             case '\b': oprintf("\\b"); break;
4069             case '\a': oprintf("\\a"); break;
4070             case '\\': oprintf("\\\\"); break;
4071             case '"': oprintf("\\\""); break;
4072             default: oprintf("%c", *str); break;
4073         }
4074     }
4075     oprintf("\"");
4076 }
4077
4078 void ir_value::dump(int (*oprintf)(const char*, ...)) const
4079 {
4080     if (m_hasvalue) {
4081         switch (m_vtype) {
4082             default:
4083             case TYPE_VOID:
4084                 oprintf("(void)");
4085                 break;
4086             case TYPE_FUNCTION:
4087                 oprintf("fn:%s", m_name.c_str());
4088                 break;
4089             case TYPE_FLOAT:
4090                 oprintf("%g", m_constval.vfloat);
4091                 break;
4092             case TYPE_VECTOR:
4093                 oprintf("'%g %g %g'",
4094                         m_constval.vvec.x,
4095                         m_constval.vvec.y,
4096                         m_constval.vvec.z);
4097                 break;
4098             case TYPE_ENTITY:
4099                 oprintf("(entity)");
4100                 break;
4101             case TYPE_STRING:
4102                 ir_value_dump_string(m_constval.vstring, oprintf);
4103                 break;
4104 #if 0
4105             case TYPE_INTEGER:
4106                 oprintf("%i", m_constval.vint);
4107                 break;
4108 #endif
4109             case TYPE_POINTER:
4110                 oprintf("&%s",
4111                     m_constval.vpointer->m_name.c_str());
4112                 break;
4113         }
4114     } else {
4115         oprintf("%s", m_name.c_str());
4116     }
4117 }
4118
4119 void ir_value::dumpLife(int (*oprintf)(const char*,...)) const
4120 {
4121     oprintf("Life of %12s:", m_name.c_str());
4122     for (size_t i = 0; i < m_life.size(); ++i)
4123     {
4124         oprintf(" + [%i, %i]\n", m_life[i].start, m_life[i].end);
4125     }
4126 }