]> git.xonotic.org Git - xonotic/gmqcc.git/blob - ir.cpp
Fix fieldfuncs test and track unused variables through writes as well.
[xonotic/gmqcc.git] / ir.cpp
1 #include <stdlib.h>
2 #include <string.h>
3
4 #include "gmqcc.h"
5 #include "ir.h"
6
7 /***********************************************************************
8  * Type sizes used at multiple points in the IR codegen
9  */
10
11 const char *type_name[TYPE_COUNT] = {
12     "void",
13     "string",
14     "float",
15     "vector",
16     "entity",
17     "field",
18     "function",
19     "pointer",
20     "integer",
21     "variant",
22     "struct",
23     "union",
24     "array",
25
26     "nil",
27     "<no-expression>"
28 };
29
30 static size_t type_sizeof_[TYPE_COUNT] = {
31     1, /* TYPE_VOID     */
32     1, /* TYPE_STRING   */
33     1, /* TYPE_FLOAT    */
34     3, /* TYPE_VECTOR   */
35     1, /* TYPE_ENTITY   */
36     1, /* TYPE_FIELD    */
37     1, /* TYPE_FUNCTION */
38     1, /* TYPE_POINTER  */
39     1, /* TYPE_INTEGER  */
40     3, /* TYPE_VARIANT  */
41     0, /* TYPE_STRUCT   */
42     0, /* TYPE_UNION    */
43     0, /* TYPE_ARRAY    */
44     0, /* TYPE_NIL      */
45     0, /* TYPE_NOESPR   */
46 };
47
48 const uint16_t type_store_instr[TYPE_COUNT] = {
49     INSTR_STORE_F, /* should use I when having integer support */
50     INSTR_STORE_S,
51     INSTR_STORE_F,
52     INSTR_STORE_V,
53     INSTR_STORE_ENT,
54     INSTR_STORE_FLD,
55     INSTR_STORE_FNC,
56     INSTR_STORE_ENT, /* should use I */
57 #if 0
58     INSTR_STORE_I, /* integer type */
59 #else
60     INSTR_STORE_F,
61 #endif
62
63     INSTR_STORE_V, /* variant, should never be accessed */
64
65     VINSTR_END, /* struct */
66     VINSTR_END, /* union  */
67     VINSTR_END, /* array  */
68     VINSTR_END, /* nil    */
69     VINSTR_END, /* noexpr */
70 };
71
72 const uint16_t field_store_instr[TYPE_COUNT] = {
73     INSTR_STORE_FLD,
74     INSTR_STORE_FLD,
75     INSTR_STORE_FLD,
76     INSTR_STORE_V,
77     INSTR_STORE_FLD,
78     INSTR_STORE_FLD,
79     INSTR_STORE_FLD,
80     INSTR_STORE_FLD,
81 #if 0
82     INSTR_STORE_FLD, /* integer type */
83 #else
84     INSTR_STORE_FLD,
85 #endif
86
87     INSTR_STORE_V, /* variant, should never be accessed */
88
89     VINSTR_END, /* struct */
90     VINSTR_END, /* union  */
91     VINSTR_END, /* array  */
92     VINSTR_END, /* nil    */
93     VINSTR_END, /* noexpr */
94 };
95
96 const uint16_t type_storep_instr[TYPE_COUNT] = {
97     INSTR_STOREP_F, /* should use I when having integer support */
98     INSTR_STOREP_S,
99     INSTR_STOREP_F,
100     INSTR_STOREP_V,
101     INSTR_STOREP_ENT,
102     INSTR_STOREP_FLD,
103     INSTR_STOREP_FNC,
104     INSTR_STOREP_ENT, /* should use I */
105 #if 0
106     INSTR_STOREP_ENT, /* integer type */
107 #else
108     INSTR_STOREP_F,
109 #endif
110
111     INSTR_STOREP_V, /* variant, should never be accessed */
112
113     VINSTR_END, /* struct */
114     VINSTR_END, /* union  */
115     VINSTR_END, /* array  */
116     VINSTR_END, /* nil    */
117     VINSTR_END, /* noexpr */
118 };
119
120 const uint16_t type_eq_instr[TYPE_COUNT] = {
121     INSTR_EQ_F, /* should use I when having integer support */
122     INSTR_EQ_S,
123     INSTR_EQ_F,
124     INSTR_EQ_V,
125     INSTR_EQ_E,
126     INSTR_EQ_E, /* FLD has no comparison */
127     INSTR_EQ_FNC,
128     INSTR_EQ_E, /* should use I */
129 #if 0
130     INSTR_EQ_I,
131 #else
132     INSTR_EQ_F,
133 #endif
134
135     INSTR_EQ_V, /* variant, should never be accessed */
136
137     VINSTR_END, /* struct */
138     VINSTR_END, /* union  */
139     VINSTR_END, /* array  */
140     VINSTR_END, /* nil    */
141     VINSTR_END, /* noexpr */
142 };
143
144 const uint16_t type_ne_instr[TYPE_COUNT] = {
145     INSTR_NE_F, /* should use I when having integer support */
146     INSTR_NE_S,
147     INSTR_NE_F,
148     INSTR_NE_V,
149     INSTR_NE_E,
150     INSTR_NE_E, /* FLD has no comparison */
151     INSTR_NE_FNC,
152     INSTR_NE_E, /* should use I */
153 #if 0
154     INSTR_NE_I,
155 #else
156     INSTR_NE_F,
157 #endif
158
159     INSTR_NE_V, /* variant, should never be accessed */
160
161     VINSTR_END, /* struct */
162     VINSTR_END, /* union  */
163     VINSTR_END, /* array  */
164     VINSTR_END, /* nil    */
165     VINSTR_END, /* noexpr */
166 };
167
168 const uint16_t type_not_instr[TYPE_COUNT] = {
169     INSTR_NOT_F, /* should use I when having integer support */
170     VINSTR_END,  /* not to be used, depends on string related -f flags */
171     INSTR_NOT_F,
172     INSTR_NOT_V,
173     INSTR_NOT_ENT,
174     INSTR_NOT_ENT,
175     INSTR_NOT_FNC,
176     INSTR_NOT_ENT, /* should use I */
177 #if 0
178     INSTR_NOT_I, /* integer type */
179 #else
180     INSTR_NOT_F,
181 #endif
182
183     INSTR_NOT_V, /* variant, should never be accessed */
184
185     VINSTR_END, /* struct */
186     VINSTR_END, /* union  */
187     VINSTR_END, /* array  */
188     VINSTR_END, /* nil    */
189     VINSTR_END, /* noexpr */
190 };
191
192 /* protos */
193 static void            ir_function_dump(ir_function*, char *ind, int (*oprintf)(const char*,...));
194
195 static ir_value*       ir_block_create_general_instr(ir_block *self, lex_ctx_t, const char *label,
196                                                      int op, ir_value *a, ir_value *b, qc_type outype);
197 static bool GMQCC_WARN ir_block_create_store(ir_block*, lex_ctx_t, ir_value *target, ir_value *what);
198 static void            ir_block_dump(ir_block*, char *ind, int (*oprintf)(const char*,...));
199
200 static bool            ir_instr_op(ir_instr*, int op, ir_value *value, bool writing);
201 static void            ir_instr_dump(ir_instr* in, char *ind, int (*oprintf)(const char*,...));
202 /* error functions */
203
204 static void irerror(lex_ctx_t ctx, const char *msg, ...)
205 {
206     va_list ap;
207     va_start(ap, msg);
208     con_cvprintmsg(ctx, LVL_ERROR, "internal error", msg, ap);
209     va_end(ap);
210 }
211
212 static bool GMQCC_WARN irwarning(lex_ctx_t ctx, int warntype, const char *fmt, ...)
213 {
214     bool    r;
215     va_list ap;
216     va_start(ap, fmt);
217     r = vcompile_warning(ctx, warntype, fmt, ap);
218     va_end(ap);
219     return r;
220 }
221
222 /***********************************************************************
223  * Vector utility functions
224  */
225
226 static bool GMQCC_WARN vec_ir_value_find(std::vector<ir_value *> &vec, const ir_value *what, size_t *idx)
227 {
228     for (auto &it : vec) {
229         if (it != what)
230             continue;
231         if (idx)
232             *idx = &it - &vec[0];
233         return true;
234     }
235     return false;
236 }
237
238 static bool GMQCC_WARN vec_ir_block_find(ir_block **vec, ir_block *what, size_t *idx)
239 {
240     size_t i;
241     size_t len = vec_size(vec);
242     for (i = 0; i < len; ++i) {
243         if (vec[i] == what) {
244             if (idx) *idx = i;
245             return true;
246         }
247     }
248     return false;
249 }
250
251 static bool GMQCC_WARN vec_ir_instr_find(std::vector<ir_instr *> &vec, ir_instr *what, size_t *idx)
252 {
253     for (auto &it : vec) {
254         if (it != what)
255             continue;
256         if (idx)
257             *idx = &it - &vec[0];
258         return true;
259     }
260     return false;
261 }
262
263 /***********************************************************************
264  * IR Builder
265  */
266
267 static void ir_block_delete_quick(ir_block* self);
268 static void ir_instr_delete_quick(ir_instr *self);
269 static void ir_function_delete_quick(ir_function *self);
270
271 ir_builder::ir_builder(const std::string& modulename)
272 : m_name(modulename),
273   m_code(new code_t)
274 {
275     m_htglobals   = util_htnew(IR_HT_SIZE);
276     m_htfields    = util_htnew(IR_HT_SIZE);
277     m_htfunctions = util_htnew(IR_HT_SIZE);
278
279     m_nil = new ir_value("nil", store_value, TYPE_NIL);
280     m_nil->m_cvq = CV_CONST;
281
282     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
283         /* we write to them, but they're not supposed to be used outside the IR, so
284          * let's not allow the generation of ir_instrs which use these.
285          * So it's a constant noexpr.
286          */
287         m_vinstr_temp[i] = new ir_value("vinstr_temp", store_value, TYPE_NOEXPR);
288         m_vinstr_temp[i]->m_cvq = CV_CONST;
289     }
290 }
291
292 ir_builder::~ir_builder()
293 {
294     util_htdel(m_htglobals);
295     util_htdel(m_htfields);
296     util_htdel(m_htfunctions);
297     for (auto& f : m_functions)
298         ir_function_delete_quick(f.release());
299     m_functions.clear(); // delete them now before deleting the rest:
300
301     delete m_nil;
302
303     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
304         delete m_vinstr_temp[i];
305     }
306
307     m_extparams.clear();
308     m_extparam_protos.clear();
309 }
310
311 ir_function* ir_builder::createFunction(const std::string& name, qc_type outtype)
312 {
313     ir_function *fn = (ir_function*)util_htget(m_htfunctions, name.c_str());
314     if (fn)
315         return nullptr;
316
317     fn = new ir_function(this, outtype);
318     fn->m_name = name;
319     m_functions.emplace_back(fn);
320     util_htset(m_htfunctions, name.c_str(), fn);
321
322     fn->m_value = createGlobal(fn->m_name, TYPE_FUNCTION);
323     if (!fn->m_value) {
324         delete fn;
325         return nullptr;
326     }
327
328     fn->m_value->m_hasvalue = true;
329     fn->m_value->m_outtype = outtype;
330     fn->m_value->m_constval.vfunc = fn;
331     fn->m_value->m_context = fn->m_context;
332
333     return fn;
334 }
335
336 ir_value* ir_builder::createGlobal(const std::string& name, qc_type vtype)
337 {
338     ir_value *ve;
339
340     if (name[0] != '#')
341     {
342         ve = (ir_value*)util_htget(m_htglobals, name.c_str());
343         if (ve) {
344             return nullptr;
345         }
346     }
347
348     ve = new ir_value(std::string(name), store_global, vtype);
349     m_globals.emplace_back(ve);
350     util_htset(m_htglobals, name.c_str(), ve);
351     return ve;
352 }
353
354 ir_value* ir_builder::get_va_count()
355 {
356     if (m_reserved_va_count)
357         return m_reserved_va_count;
358     return (m_reserved_va_count = createGlobal("reserved:va_count", TYPE_FLOAT));
359 }
360
361 ir_value* ir_builder::createField(const std::string& name, qc_type vtype)
362 {
363     ir_value *ve = (ir_value*)util_htget(m_htfields, name.c_str());
364     if (ve) {
365         return nullptr;
366     }
367
368     ve = new ir_value(std::string(name), store_global, TYPE_FIELD);
369     ve->m_fieldtype = vtype;
370     m_fields.emplace_back(ve);
371     util_htset(m_htfields, name.c_str(), ve);
372     return ve;
373 }
374
375 /***********************************************************************
376  *IR Function
377  */
378
379 static bool ir_function_naive_phi(ir_function*);
380 static void ir_function_enumerate(ir_function*);
381 static bool ir_function_calculate_liferanges(ir_function*);
382 static bool ir_function_allocate_locals(ir_function*);
383
384 ir_function::ir_function(ir_builder* owner_, qc_type outtype_)
385 : m_owner(owner_),
386   m_name("<@unnamed>"),
387   m_outtype(outtype_)
388 {
389     m_context.file = "<@no context>";
390     m_context.line = 0;
391 }
392
393 ir_function::~ir_function()
394 {
395 }
396
397 static void ir_function_delete_quick(ir_function *self)
398 {
399     for (auto& b : self->m_blocks)
400         ir_block_delete_quick(b.release());
401     delete self;
402 }
403
404 static void ir_function_collect_value(ir_function *self, ir_value *v)
405 {
406     self->m_values.emplace_back(v);
407 }
408
409 ir_block* ir_function_create_block(lex_ctx_t ctx, ir_function *self, const char *label)
410 {
411     ir_block* bn = new ir_block(self, label ? std::string(label) : std::string());
412     bn->m_context = ctx;
413     self->m_blocks.emplace_back(bn);
414
415     if ((self->m_flags & IR_FLAG_BLOCK_COVERAGE) && self->m_owner->m_coverage_func)
416         (void)ir_block_create_call(bn, ctx, nullptr, self->m_owner->m_coverage_func, false);
417
418     return bn;
419 }
420
421 static bool instr_is_operation(uint16_t op)
422 {
423     return ( (op >= INSTR_MUL_F  && op <= INSTR_GT) ||
424              (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) ||
425              (op == INSTR_ADDRESS) ||
426              (op >= INSTR_NOT_F  && op <= INSTR_NOT_FNC) ||
427              (op >= INSTR_AND    && op <= INSTR_BITOR) ||
428              (op >= INSTR_CALL0  && op <= INSTR_CALL8) ||
429              (op >= VINSTR_BITAND_V && op <= VINSTR_NEG_V) );
430 }
431
432 static bool ir_function_pass_peephole(ir_function *self)
433 {
434     for (auto& bp : self->m_blocks) {
435         ir_block *block = bp.get();
436         for (size_t i = 0; i < vec_size(block->m_instr); ++i) {
437             ir_instr *inst;
438             inst = block->m_instr[i];
439
440             if (i >= 1 &&
441                 (inst->m_opcode >= INSTR_STORE_F &&
442                  inst->m_opcode <= INSTR_STORE_FNC))
443             {
444                 ir_instr *store;
445                 ir_instr *oper;
446                 ir_value *value;
447
448                 store = inst;
449
450                 oper  = block->m_instr[i-1];
451                 if (!instr_is_operation(oper->m_opcode))
452                     continue;
453
454                 /* Don't change semantics of MUL_VF in engines where these may not alias. */
455                 if (OPTS_FLAG(LEGACY_VECTOR_MATHS)) {
456                     if (oper->m_opcode == INSTR_MUL_VF && oper->_m_ops[2]->m_memberof == oper->_m_ops[1])
457                         continue;
458                     if (oper->m_opcode == INSTR_MUL_FV && oper->_m_ops[1]->m_memberof == oper->_m_ops[2])
459                         continue;
460                 }
461
462                 value = oper->_m_ops[0];
463
464                 /* only do it for SSA values */
465                 if (value->m_store != store_value)
466                     continue;
467
468                 /* don't optimize out the temp if it's used later again */
469                 if (value->m_reads.size() != 1)
470                     continue;
471
472                 /* The very next store must use this value */
473                 if (value->m_reads[0] != store)
474                     continue;
475
476                 /* And of course the store must _read_ from it, so it's in
477                  * OP 1 */
478                 if (store->_m_ops[1] != value)
479                     continue;
480
481                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
482                 (void)!ir_instr_op(oper, 0, store->_m_ops[0], true);
483
484                 vec_remove(block->m_instr, i, 1);
485                 delete store;
486             }
487             else if (inst->m_opcode == VINSTR_COND)
488             {
489                 /* COND on a value resulting from a NOT could
490                  * remove the NOT and swap its operands
491                  */
492                 while (true) {
493                     ir_block *tmp;
494                     size_t    inotid;
495                     ir_instr *inot;
496                     ir_value *value;
497                     value = inst->_m_ops[0];
498
499                     if (value->m_store != store_value || value->m_reads.size() != 1 || value->m_reads[0] != inst)
500                         break;
501
502                     inot = value->m_writes[0];
503                     if (inot->_m_ops[0] != value ||
504                         inot->m_opcode < INSTR_NOT_F ||
505                         inot->m_opcode > INSTR_NOT_FNC ||
506                         inot->m_opcode == INSTR_NOT_V || /* can't do these */
507                         inot->m_opcode == INSTR_NOT_S)
508                     {
509                         break;
510                     }
511
512                     /* count */
513                     ++opts_optimizationcount[OPTIM_PEEPHOLE];
514                     /* change operand */
515                     (void)!ir_instr_op(inst, 0, inot->_m_ops[1], false);
516                     /* remove NOT */
517                     tmp = inot->m_owner;
518                     for (inotid = 0; inotid < vec_size(tmp->m_instr); ++inotid) {
519                         if (tmp->m_instr[inotid] == inot)
520                             break;
521                     }
522                     if (inotid >= vec_size(tmp->m_instr)) {
523                         compile_error(inst->m_context, "sanity-check failed: failed to find instruction to optimize out");
524                         return false;
525                     }
526                     vec_remove(tmp->m_instr, inotid, 1);
527                     delete inot;
528                     /* swap ontrue/onfalse */
529                     tmp = inst->m_bops[0];
530                     inst->m_bops[0] = inst->m_bops[1];
531                     inst->m_bops[1] = tmp;
532                 }
533                 continue;
534             }
535         }
536     }
537
538     return true;
539 }
540
541 static bool ir_function_pass_tailrecursion(ir_function *self)
542 {
543     size_t p;
544
545     for (auto& bp : self->m_blocks) {
546         ir_block *block = bp.get();
547
548         ir_value *funcval;
549         ir_instr *ret, *call, *store = nullptr;
550
551         if (!block->m_final || vec_size(block->m_instr) < 2)
552             continue;
553
554         ret = block->m_instr[vec_size(block->m_instr)-1];
555         if (ret->m_opcode != INSTR_DONE && ret->m_opcode != INSTR_RETURN)
556             continue;
557
558         call = block->m_instr[vec_size(block->m_instr)-2];
559         if (call->m_opcode >= INSTR_STORE_F && call->m_opcode <= INSTR_STORE_FNC) {
560             /* account for the unoptimized
561              * CALL
562              * STORE %return, %tmp
563              * RETURN %tmp
564              * version
565              */
566             if (vec_size(block->m_instr) < 3)
567                 continue;
568
569             store = call;
570             call = block->m_instr[vec_size(block->m_instr)-3];
571         }
572
573         if (call->m_opcode < INSTR_CALL0 || call->m_opcode > INSTR_CALL8)
574             continue;
575
576         if (store) {
577             /* optimize out the STORE */
578             if (ret->_m_ops[0]   &&
579                 ret->_m_ops[0]   == store->_m_ops[0] &&
580                 store->_m_ops[1] == call->_m_ops[0])
581             {
582                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
583                 call->_m_ops[0] = store->_m_ops[0];
584                 vec_remove(block->m_instr, vec_size(block->m_instr) - 2, 1);
585                 delete store;
586             }
587             else
588                 continue;
589         }
590
591         if (!call->_m_ops[0])
592             continue;
593
594         funcval = call->_m_ops[1];
595         if (!funcval)
596             continue;
597         if (funcval->m_vtype != TYPE_FUNCTION || funcval->m_constval.vfunc != self)
598             continue;
599
600         /* now we have a CALL and a RET, check if it's a tailcall */
601         if (ret->_m_ops[0] && call->_m_ops[0] != ret->_m_ops[0])
602             continue;
603
604         ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
605         vec_shrinkby(block->m_instr, 2);
606
607         block->m_final = false; /* open it back up */
608
609         /* emite parameter-stores */
610         for (p = 0; p < call->m_params.size(); ++p) {
611             /* assert(call->params_count <= self->locals_count); */
612             if (!ir_block_create_store(block, call->m_context, self->m_locals[p].get(), call->m_params[p])) {
613                 irerror(call->m_context, "failed to create tailcall store instruction for parameter %i", (int)p);
614                 return false;
615             }
616         }
617         if (!ir_block_create_jump(block, call->m_context, self->m_blocks[0].get())) {
618             irerror(call->m_context, "failed to create tailcall jump");
619             return false;
620         }
621
622         delete call;
623         delete ret;
624     }
625
626     return true;
627 }
628
629 bool ir_function_finalize(ir_function *self)
630 {
631     if (self->m_builtin)
632         return true;
633
634     for (auto& lp : self->m_locals) {
635         ir_value *v = lp.get();
636         if (v->m_reads.empty() && v->m_writes.size()
637             && irwarning(v->m_context, WARN_UNUSED_VARIABLE,
638                         "unused variable: `%s`", v->m_name.c_str())) return false;
639     }
640
641     if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
642         if (!ir_function_pass_peephole(self)) {
643             irerror(self->m_context, "generic optimization pass broke something in `%s`", self->m_name.c_str());
644             return false;
645         }
646     }
647
648     if (OPTS_OPTIMIZATION(OPTIM_TAIL_RECURSION)) {
649         if (!ir_function_pass_tailrecursion(self)) {
650             irerror(self->m_context, "tail-recursion optimization pass broke something in `%s`", self->m_name.c_str());
651             return false;
652         }
653     }
654
655     if (!ir_function_naive_phi(self)) {
656         irerror(self->m_context, "internal error: ir_function_naive_phi failed");
657         return false;
658     }
659
660     for (auto& lp : self->m_locals) {
661         ir_value *v = lp.get();
662         if (v->m_vtype == TYPE_VECTOR ||
663             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
664         {
665             v->vectorMember(0);
666             v->vectorMember(1);
667             v->vectorMember(2);
668         }
669     }
670     for (auto& vp : self->m_values) {
671         ir_value *v = vp.get();
672         if (v->m_vtype == TYPE_VECTOR ||
673             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
674         {
675             v->vectorMember(0);
676             v->vectorMember(1);
677             v->vectorMember(2);
678         }
679     }
680
681     ir_function_enumerate(self);
682
683     if (!ir_function_calculate_liferanges(self))
684         return false;
685     if (!ir_function_allocate_locals(self))
686         return false;
687     return true;
688 }
689
690 ir_value* ir_function_create_local(ir_function *self, const std::string& name, qc_type vtype, bool param)
691 {
692     ir_value *ve;
693
694     if (param &&
695         !self->m_locals.empty() &&
696         self->m_locals.back()->m_store != store_param)
697     {
698         irerror(self->m_context, "cannot add parameters after adding locals");
699         return nullptr;
700     }
701
702     ve = new ir_value(std::string(name), (param ? store_param : store_local), vtype);
703     if (param)
704         ve->m_locked = true;
705     self->m_locals.emplace_back(ve);
706     return ve;
707 }
708
709 /***********************************************************************
710  *IR Block
711  */
712
713 ir_block::ir_block(ir_function* owner, const std::string& name)
714 : m_owner(owner),
715   m_label(name)
716 {
717     m_context.file = "<@no context>";
718     m_context.line = 0;
719 }
720
721 ir_block::~ir_block()
722 {
723     for (size_t i = 0; i != vec_size(m_instr); ++i)
724         delete m_instr[i];
725     vec_free(m_instr);
726     vec_free(m_entries);
727     vec_free(m_exits);
728 }
729
730 static void ir_block_delete_quick(ir_block* self)
731 {
732     size_t i;
733     for (i = 0; i != vec_size(self->m_instr); ++i)
734         ir_instr_delete_quick(self->m_instr[i]);
735     vec_free(self->m_instr);
736     delete self;
737 }
738
739 /***********************************************************************
740  *IR Instructions
741  */
742
743 ir_instr::ir_instr(lex_ctx_t ctx, ir_block* owner_, int op)
744 : m_opcode(op),
745   m_context(ctx),
746   m_owner(owner_)
747 {
748 }
749
750 ir_instr::~ir_instr()
751 {
752     // The following calls can only delete from
753     // vectors, we still want to delete this instruction
754     // so ignore the return value. Since with the warn_unused_result attribute
755     // gcc doesn't care about an explicit: (void)foo(); to ignore the result,
756     // I have to improvise here and use if(foo());
757     for (auto &it : m_phi) {
758         size_t idx;
759         if (vec_ir_instr_find(it.value->m_writes, this, &idx))
760             it.value->m_writes.erase(it.value->m_writes.begin() + idx);
761         if (vec_ir_instr_find(it.value->m_reads, this, &idx))
762             it.value->m_reads.erase(it.value->m_reads.begin() + idx);
763     }
764     for (auto &it : m_params) {
765         size_t idx;
766         if (vec_ir_instr_find(it->m_writes, this, &idx))
767             it->m_writes.erase(it->m_writes.begin() + idx);
768         if (vec_ir_instr_find(it->m_reads, this, &idx))
769             it->m_reads.erase(it->m_reads.begin() + idx);
770     }
771     (void)!ir_instr_op(this, 0, nullptr, false);
772     (void)!ir_instr_op(this, 1, nullptr, false);
773     (void)!ir_instr_op(this, 2, nullptr, false);
774 }
775
776 static void ir_instr_delete_quick(ir_instr *self)
777 {
778     self->m_phi.clear();
779     self->m_params.clear();
780     self->_m_ops[0] = nullptr;
781     self->_m_ops[1] = nullptr;
782     self->_m_ops[2] = nullptr;
783     delete self;
784 }
785
786 static bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing)
787 {
788     if (v && v->m_vtype == TYPE_NOEXPR) {
789         irerror(self->m_context, "tried to use a NOEXPR value");
790         return false;
791     }
792
793     if (self->_m_ops[op]) {
794         size_t idx;
795         if (writing && vec_ir_instr_find(self->_m_ops[op]->m_writes, self, &idx))
796             self->_m_ops[op]->m_writes.erase(self->_m_ops[op]->m_writes.begin() + idx);
797         else if (vec_ir_instr_find(self->_m_ops[op]->m_reads, self, &idx))
798             self->_m_ops[op]->m_reads.erase(self->_m_ops[op]->m_reads.begin() + idx);
799     }
800     if (v) {
801         if (writing)
802             v->m_writes.push_back(self);
803         else
804             v->m_reads.push_back(self);
805     }
806     self->_m_ops[op] = v;
807     return true;
808 }
809
810 /***********************************************************************
811  *IR Value
812  */
813
814 void ir_value::setCodeAddress(int32_t gaddr)
815 {
816     m_code.globaladdr = gaddr;
817     if (m_members[0]) m_members[0]->m_code.globaladdr = gaddr;
818     if (m_members[1]) m_members[1]->m_code.globaladdr = gaddr;
819     if (m_members[2]) m_members[2]->m_code.globaladdr = gaddr;
820 }
821
822 int32_t ir_value::codeAddress() const
823 {
824     if (m_store == store_return)
825         return OFS_RETURN + m_code.addroffset;
826     return m_code.globaladdr + m_code.addroffset;
827 }
828
829 ir_value::ir_value(std::string&& name_, store_type store_, qc_type vtype_)
830     : m_name(move(name_))
831     , m_vtype(vtype_)
832     , m_store(store_)
833 {
834     m_fieldtype = TYPE_VOID;
835     m_outtype = TYPE_VOID;
836     m_flags = 0;
837
838     m_cvq          = CV_NONE;
839     m_hasvalue     = false;
840     m_context.file = "<@no context>";
841     m_context.line = 0;
842
843     memset(&m_constval, 0, sizeof(m_constval));
844     memset(&m_code,     0, sizeof(m_code));
845
846     m_members[0] = nullptr;
847     m_members[1] = nullptr;
848     m_members[2] = nullptr;
849     m_memberof = nullptr;
850
851     m_unique_life = false;
852     m_locked = false;
853     m_callparam  = false;
854 }
855
856 ir_value::ir_value(ir_function *owner, std::string&& name, store_type storetype, qc_type vtype)
857     : ir_value(move(name), storetype, vtype)
858 {
859     ir_function_collect_value(owner, this);
860 }
861
862 ir_value::~ir_value()
863 {
864     size_t i;
865     if (m_hasvalue) {
866         if (m_vtype == TYPE_STRING)
867             mem_d((void*)m_constval.vstring);
868     }
869     if (!(m_flags & IR_FLAG_SPLIT_VECTOR)) {
870         for (i = 0; i < 3; ++i) {
871             if (m_members[i])
872                 delete m_members[i];
873         }
874     }
875 }
876
877
878 /*  helper function */
879 ir_value* ir_builder::literalFloat(float value, bool add_to_list) {
880     ir_value *v = new ir_value("#IMMEDIATE", store_global, TYPE_FLOAT);
881     v->m_flags |= IR_FLAG_ERASABLE;
882     v->m_hasvalue = true;
883     v->m_cvq = CV_CONST;
884     v->m_constval.vfloat = value;
885
886     m_globals.emplace_back(v);
887     if (add_to_list)
888         m_const_floats.emplace_back(v);
889     return v;
890 }
891
892 ir_value* ir_value::vectorMember(unsigned int member)
893 {
894     std::string name;
895     ir_value *m;
896     if (member >= 3)
897         return nullptr;
898
899     if (m_members[member])
900         return m_members[member];
901
902     if (!m_name.empty()) {
903         char member_name[3] = { '_', char('x' + member), 0 };
904         name = m_name + member_name;
905     }
906
907     if (m_vtype == TYPE_VECTOR)
908     {
909         m = new ir_value(move(name), m_store, TYPE_FLOAT);
910         if (!m)
911             return nullptr;
912         m->m_context = m_context;
913
914         m_members[member] = m;
915         m->m_code.addroffset = member;
916     }
917     else if (m_vtype == TYPE_FIELD)
918     {
919         if (m_fieldtype != TYPE_VECTOR)
920             return nullptr;
921         m = new ir_value(move(name), m_store, TYPE_FIELD);
922         if (!m)
923             return nullptr;
924         m->m_fieldtype = TYPE_FLOAT;
925         m->m_context = m_context;
926
927         m_members[member] = m;
928         m->m_code.addroffset = member;
929     }
930     else
931     {
932         irerror(m_context, "invalid member access on %s", m_name.c_str());
933         return nullptr;
934     }
935
936     m->m_memberof = this;
937     return m;
938 }
939
940 size_t ir_value::size() const {
941     if (m_vtype == TYPE_FIELD && m_fieldtype == TYPE_VECTOR)
942         return type_sizeof_[TYPE_VECTOR];
943     return type_sizeof_[m_vtype];
944 }
945
946 bool ir_value::setFloat(float f)
947 {
948     if (m_vtype != TYPE_FLOAT)
949         return false;
950     m_constval.vfloat = f;
951     m_hasvalue = true;
952     return true;
953 }
954
955 bool ir_value::setFunc(int f)
956 {
957     if (m_vtype != TYPE_FUNCTION)
958         return false;
959     m_constval.vint = f;
960     m_hasvalue = true;
961     return true;
962 }
963
964 bool ir_value::setVector(vec3_t v)
965 {
966     if (m_vtype != TYPE_VECTOR)
967         return false;
968     m_constval.vvec = v;
969     m_hasvalue = true;
970     return true;
971 }
972
973 bool ir_value::setField(ir_value *fld)
974 {
975     if (m_vtype != TYPE_FIELD)
976         return false;
977     m_constval.vpointer = fld;
978     m_hasvalue = true;
979     return true;
980 }
981
982 bool ir_value::setString(const char *str)
983 {
984     if (m_vtype != TYPE_STRING)
985         return false;
986     m_constval.vstring = util_strdupe(str);
987     m_hasvalue = true;
988     return true;
989 }
990
991 #if 0
992 bool ir_value::setInt(int i)
993 {
994     if (m_vtype != TYPE_INTEGER)
995         return false;
996     m_constval.vint = i;
997     m_hasvalue = true;
998     return true;
999 }
1000 #endif
1001
1002 bool ir_value::lives(size_t at)
1003 {
1004     for (auto& l : m_life) {
1005         if (l.start <= at && at <= l.end)
1006             return true;
1007         if (l.start > at) /* since it's ordered */
1008             return false;
1009     }
1010     return false;
1011 }
1012
1013 bool ir_value::insertLife(size_t idx, ir_life_entry_t e)
1014 {
1015     m_life.insert(m_life.begin() + idx, e);
1016     return true;
1017 }
1018
1019 bool ir_value::setAlive(size_t s)
1020 {
1021     size_t i;
1022     const size_t vs = m_life.size();
1023     ir_life_entry_t *life_found = nullptr;
1024     ir_life_entry_t *before = nullptr;
1025     ir_life_entry_t new_entry;
1026
1027     /* Find the first range >= s */
1028     for (i = 0; i < vs; ++i)
1029     {
1030         before = life_found;
1031         life_found = &m_life[i];
1032         if (life_found->start > s)
1033             break;
1034     }
1035     /* nothing found? append */
1036     if (i == vs) {
1037         ir_life_entry_t e;
1038         if (life_found && life_found->end+1 == s)
1039         {
1040             /* previous life range can be merged in */
1041             life_found->end++;
1042             return true;
1043         }
1044         if (life_found && life_found->end >= s)
1045             return false;
1046         e.start = e.end = s;
1047         m_life.emplace_back(e);
1048         return true;
1049     }
1050     /* found */
1051     if (before)
1052     {
1053         if (before->end + 1 == s &&
1054             life_found->start - 1 == s)
1055         {
1056             /* merge */
1057             before->end = life_found->end;
1058             m_life.erase(m_life.begin()+i);
1059             return true;
1060         }
1061         if (before->end + 1 == s)
1062         {
1063             /* extend before */
1064             before->end++;
1065             return true;
1066         }
1067         /* already contained */
1068         if (before->end >= s)
1069             return false;
1070     }
1071     /* extend */
1072     if (life_found->start - 1 == s)
1073     {
1074         life_found->start--;
1075         return true;
1076     }
1077     /* insert a new entry */
1078     new_entry.start = new_entry.end = s;
1079     return insertLife(i, new_entry);
1080 }
1081
1082 bool ir_value::mergeLife(const ir_value *other)
1083 {
1084     size_t i, myi;
1085
1086     if (other->m_life.empty())
1087         return true;
1088
1089     if (m_life.empty()) {
1090         m_life = other->m_life;
1091         return true;
1092     }
1093
1094     myi = 0;
1095     for (i = 0; i < other->m_life.size(); ++i)
1096     {
1097         const ir_life_entry_t &otherlife = other->m_life[i];
1098         while (true)
1099         {
1100             ir_life_entry_t *entry = &m_life[myi];
1101
1102             if (otherlife.end+1 < entry->start)
1103             {
1104                 /* adding an interval before entry */
1105                 if (!insertLife(myi, otherlife))
1106                     return false;
1107                 ++myi;
1108                 break;
1109             }
1110
1111             if (otherlife.start <  entry->start &&
1112                 otherlife.end+1 >= entry->start)
1113             {
1114                 /* starts earlier and overlaps */
1115                 entry->start = otherlife.start;
1116             }
1117
1118             if (otherlife.end   >  entry->end &&
1119                 otherlife.start <= entry->end+1)
1120             {
1121                 /* ends later and overlaps */
1122                 entry->end = otherlife.end;
1123             }
1124
1125             /* see if our change combines it with the next ranges */
1126             while (myi+1 < m_life.size() &&
1127                    entry->end+1 >= m_life[1+myi].start)
1128             {
1129                 /* overlaps with (myi+1) */
1130                 if (entry->end < m_life[1+myi].end)
1131                     entry->end = m_life[1+myi].end;
1132                 m_life.erase(m_life.begin() + (myi + 1));
1133                 entry = &m_life[myi];
1134             }
1135
1136             /* see if we're after the entry */
1137             if (otherlife.start > entry->end)
1138             {
1139                 ++myi;
1140                 /* append if we're at the end */
1141                 if (myi >= m_life.size()) {
1142                     m_life.emplace_back(otherlife);
1143                     break;
1144                 }
1145                 /* otherweise check the next range */
1146                 continue;
1147             }
1148             break;
1149         }
1150     }
1151     return true;
1152 }
1153
1154 static bool ir_values_overlap(const ir_value *a, const ir_value *b)
1155 {
1156     /* For any life entry in A see if it overlaps with
1157      * any life entry in B.
1158      * Note that the life entries are orderes, so we can make a
1159      * more efficient algorithm there than naively translating the
1160      * statement above.
1161      */
1162
1163     const ir_life_entry_t *la, *lb, *enda, *endb;
1164
1165     /* first of all, if either has no life range, they cannot clash */
1166     if (a->m_life.empty() || b->m_life.empty())
1167         return false;
1168
1169     la = &a->m_life.front();
1170     lb = &b->m_life.front();
1171     enda = &a->m_life.back() + 1;
1172     endb = &b->m_life.back() + 1;
1173     while (true)
1174     {
1175         /* check if the entries overlap, for that,
1176          * both must start before the other one ends.
1177          */
1178         if (la->start < lb->end &&
1179             lb->start < la->end)
1180         {
1181             return true;
1182         }
1183
1184         /* entries are ordered
1185          * one entry is earlier than the other
1186          * that earlier entry will be moved forward
1187          */
1188         if (la->start < lb->start)
1189         {
1190             /* order: A B, move A forward
1191              * check if we hit the end with A
1192              */
1193             if (++la == enda)
1194                 break;
1195         }
1196         else /* if (lb->start < la->start)  actually <= */
1197         {
1198             /* order: B A, move B forward
1199              * check if we hit the end with B
1200              */
1201             if (++lb == endb)
1202                 break;
1203         }
1204     }
1205     return false;
1206 }
1207
1208 /***********************************************************************
1209  *IR main operations
1210  */
1211
1212 static bool ir_check_unreachable(ir_block *self)
1213 {
1214     /* The IR should never have to deal with unreachable code */
1215     if (!self->m_final/* || OPTS_FLAG(ALLOW_UNREACHABLE_CODE)*/)
1216         return true;
1217     irerror(self->m_context, "unreachable statement (%s)", self->m_label.c_str());
1218     return false;
1219 }
1220
1221 bool ir_block_create_store_op(ir_block *self, lex_ctx_t ctx, int op, ir_value *target, ir_value *what)
1222 {
1223     ir_instr *in;
1224     if (!ir_check_unreachable(self))
1225         return false;
1226
1227     if (target->m_store == store_value &&
1228         (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC))
1229     {
1230         irerror(self->m_context, "cannot store to an SSA value");
1231         irerror(self->m_context, "trying to store: %s <- %s", target->m_name.c_str(), what->m_name.c_str());
1232         irerror(self->m_context, "instruction: %s", util_instr_str[op]);
1233         return false;
1234     }
1235
1236     in = new ir_instr(ctx, self, op);
1237     if (!in)
1238         return false;
1239
1240     if (!ir_instr_op(in, 0, target, (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC)) ||
1241         !ir_instr_op(in, 1, what, false))
1242     {
1243         delete in;
1244         return false;
1245     }
1246     vec_push(self->m_instr, in);
1247     return true;
1248 }
1249
1250 bool ir_block_create_state_op(ir_block *self, lex_ctx_t ctx, ir_value *frame, ir_value *think)
1251 {
1252     ir_instr *in;
1253     if (!ir_check_unreachable(self))
1254         return false;
1255
1256     in = new ir_instr(ctx, self, INSTR_STATE);
1257     if (!in)
1258         return false;
1259
1260     if (!ir_instr_op(in, 0, frame, false) ||
1261         !ir_instr_op(in, 1, think, false))
1262     {
1263         delete in;
1264         return false;
1265     }
1266     vec_push(self->m_instr, in);
1267     return true;
1268 }
1269
1270 static bool ir_block_create_store(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1271 {
1272     int op = 0;
1273     qc_type vtype;
1274     if (target->m_vtype == TYPE_VARIANT)
1275         vtype = what->m_vtype;
1276     else
1277         vtype = target->m_vtype;
1278
1279 #if 0
1280     if      (vtype == TYPE_FLOAT   && what->m_vtype == TYPE_INTEGER)
1281         op = INSTR_CONV_ITOF;
1282     else if (vtype == TYPE_INTEGER && what->m_vtype == TYPE_FLOAT)
1283         op = INSTR_CONV_FTOI;
1284 #endif
1285         op = type_store_instr[vtype];
1286
1287     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1288         if (op == INSTR_STORE_FLD && what->m_fieldtype == TYPE_VECTOR)
1289             op = INSTR_STORE_V;
1290     }
1291
1292     return ir_block_create_store_op(self, ctx, op, target, what);
1293 }
1294
1295 bool ir_block_create_storep(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1296 {
1297     int op = 0;
1298     qc_type vtype;
1299
1300     if (target->m_vtype != TYPE_POINTER)
1301         return false;
1302
1303     /* storing using pointer - target is a pointer, type must be
1304      * inferred from source
1305      */
1306     vtype = what->m_vtype;
1307
1308     op = type_storep_instr[vtype];
1309     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1310         if (op == INSTR_STOREP_FLD && what->m_fieldtype == TYPE_VECTOR)
1311             op = INSTR_STOREP_V;
1312     }
1313
1314     return ir_block_create_store_op(self, ctx, op, target, what);
1315 }
1316
1317 bool ir_block_create_return(ir_block *self, lex_ctx_t ctx, ir_value *v)
1318 {
1319     ir_instr *in;
1320     if (!ir_check_unreachable(self))
1321         return false;
1322
1323     self->m_final = true;
1324
1325     self->m_is_return = true;
1326     in = new ir_instr(ctx, self, INSTR_RETURN);
1327     if (!in)
1328         return false;
1329
1330     if (v && !ir_instr_op(in, 0, v, false)) {
1331         delete in;
1332         return false;
1333     }
1334
1335     vec_push(self->m_instr, in);
1336     return true;
1337 }
1338
1339 bool ir_block_create_if(ir_block *self, lex_ctx_t ctx, ir_value *v,
1340                         ir_block *ontrue, ir_block *onfalse)
1341 {
1342     ir_instr *in;
1343     if (!ir_check_unreachable(self))
1344         return false;
1345     self->m_final = true;
1346     /*in = new ir_instr(ctx, self, (v->m_vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/
1347     in = new ir_instr(ctx, self, VINSTR_COND);
1348     if (!in)
1349         return false;
1350
1351     if (!ir_instr_op(in, 0, v, false)) {
1352         delete in;
1353         return false;
1354     }
1355
1356     in->m_bops[0] = ontrue;
1357     in->m_bops[1] = onfalse;
1358
1359     vec_push(self->m_instr, in);
1360
1361     vec_push(self->m_exits, ontrue);
1362     vec_push(self->m_exits, onfalse);
1363     vec_push(ontrue->m_entries,  self);
1364     vec_push(onfalse->m_entries, self);
1365     return true;
1366 }
1367
1368 bool ir_block_create_jump(ir_block *self, lex_ctx_t ctx, ir_block *to)
1369 {
1370     ir_instr *in;
1371     if (!ir_check_unreachable(self))
1372         return false;
1373     self->m_final = true;
1374     in = new ir_instr(ctx, self, VINSTR_JUMP);
1375     if (!in)
1376         return false;
1377
1378     in->m_bops[0] = to;
1379     vec_push(self->m_instr, in);
1380
1381     vec_push(self->m_exits, to);
1382     vec_push(to->m_entries, self);
1383     return true;
1384 }
1385
1386 bool ir_block_create_goto(ir_block *self, lex_ctx_t ctx, ir_block *to)
1387 {
1388     self->m_owner->m_flags |= IR_FLAG_HAS_GOTO;
1389     return ir_block_create_jump(self, ctx, to);
1390 }
1391
1392 ir_instr* ir_block_create_phi(ir_block *self, lex_ctx_t ctx, const char *label, qc_type ot)
1393 {
1394     ir_value *out;
1395     ir_instr *in;
1396     if (!ir_check_unreachable(self))
1397         return nullptr;
1398     in = new ir_instr(ctx, self, VINSTR_PHI);
1399     if (!in)
1400         return nullptr;
1401     out = new ir_value(self->m_owner, label ? label : "", store_value, ot);
1402     if (!out) {
1403         delete in;
1404         return nullptr;
1405     }
1406     if (!ir_instr_op(in, 0, out, true)) {
1407         delete in;
1408         return nullptr;
1409     }
1410     vec_push(self->m_instr, in);
1411     return in;
1412 }
1413
1414 ir_value* ir_phi_value(ir_instr *self)
1415 {
1416     return self->_m_ops[0];
1417 }
1418
1419 void ir_phi_add(ir_instr* self, ir_block *b, ir_value *v)
1420 {
1421     ir_phi_entry_t pe;
1422
1423     if (!vec_ir_block_find(self->m_owner->m_entries, b, nullptr)) {
1424         // Must not be possible to cause this, otherwise the AST
1425         // is doing something wrong.
1426         irerror(self->m_context, "Invalid entry block for PHI");
1427         exit(EXIT_FAILURE);
1428     }
1429
1430     pe.value = v;
1431     pe.from = b;
1432     v->m_reads.push_back(self);
1433     self->m_phi.push_back(pe);
1434 }
1435
1436 /* call related code */
1437 ir_instr* ir_block_create_call(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *func, bool noreturn)
1438 {
1439     ir_value *out;
1440     ir_instr *in;
1441     if (!ir_check_unreachable(self))
1442         return nullptr;
1443     in = new ir_instr(ctx, self, (noreturn ? VINSTR_NRCALL : INSTR_CALL0));
1444     if (!in)
1445         return nullptr;
1446     if (noreturn) {
1447         self->m_final = true;
1448         self->m_is_return = true;
1449     }
1450     out = new ir_value(self->m_owner, label ? label : "", (func->m_outtype == TYPE_VOID) ? store_return : store_value, func->m_outtype);
1451     if (!out) {
1452         delete in;
1453         return nullptr;
1454     }
1455     if (!ir_instr_op(in, 0, out, true) ||
1456         !ir_instr_op(in, 1, func, false))
1457     {
1458         delete in;
1459         return nullptr;
1460     }
1461     vec_push(self->m_instr, in);
1462     /*
1463     if (noreturn) {
1464         if (!ir_block_create_return(self, ctx, nullptr)) {
1465             compile_error(ctx, "internal error: failed to generate dummy-return instruction");
1466             delete in;
1467             return nullptr;
1468         }
1469     }
1470     */
1471     return in;
1472 }
1473
1474 ir_value* ir_call_value(ir_instr *self)
1475 {
1476     return self->_m_ops[0];
1477 }
1478
1479 void ir_call_param(ir_instr* self, ir_value *v)
1480 {
1481     self->m_params.push_back(v);
1482     v->m_reads.push_back(self);
1483 }
1484
1485 /* binary op related code */
1486
1487 ir_value* ir_block_create_binop(ir_block *self, lex_ctx_t ctx,
1488                                 const char *label, int opcode,
1489                                 ir_value *left, ir_value *right)
1490 {
1491     qc_type ot = TYPE_VOID;
1492     switch (opcode) {
1493         case INSTR_ADD_F:
1494         case INSTR_SUB_F:
1495         case INSTR_DIV_F:
1496         case INSTR_MUL_F:
1497         case INSTR_MUL_V:
1498         case INSTR_AND:
1499         case INSTR_OR:
1500 #if 0
1501         case INSTR_AND_I:
1502         case INSTR_AND_IF:
1503         case INSTR_AND_FI:
1504         case INSTR_OR_I:
1505         case INSTR_OR_IF:
1506         case INSTR_OR_FI:
1507 #endif
1508         case INSTR_BITAND:
1509         case INSTR_BITOR:
1510         case VINSTR_BITXOR:
1511 #if 0
1512         case INSTR_SUB_S: /* -- offset of string as float */
1513         case INSTR_MUL_IF:
1514         case INSTR_MUL_FI:
1515         case INSTR_DIV_IF:
1516         case INSTR_DIV_FI:
1517         case INSTR_BITOR_IF:
1518         case INSTR_BITOR_FI:
1519         case INSTR_BITAND_FI:
1520         case INSTR_BITAND_IF:
1521         case INSTR_EQ_I:
1522         case INSTR_NE_I:
1523 #endif
1524             ot = TYPE_FLOAT;
1525             break;
1526 #if 0
1527         case INSTR_ADD_I:
1528         case INSTR_ADD_IF:
1529         case INSTR_ADD_FI:
1530         case INSTR_SUB_I:
1531         case INSTR_SUB_FI:
1532         case INSTR_SUB_IF:
1533         case INSTR_MUL_I:
1534         case INSTR_DIV_I:
1535         case INSTR_BITAND_I:
1536         case INSTR_BITOR_I:
1537         case INSTR_XOR_I:
1538         case INSTR_RSHIFT_I:
1539         case INSTR_LSHIFT_I:
1540             ot = TYPE_INTEGER;
1541             break;
1542 #endif
1543         case INSTR_ADD_V:
1544         case INSTR_SUB_V:
1545         case INSTR_MUL_VF:
1546         case INSTR_MUL_FV:
1547         case VINSTR_BITAND_V:
1548         case VINSTR_BITOR_V:
1549         case VINSTR_BITXOR_V:
1550         case VINSTR_BITAND_VF:
1551         case VINSTR_BITOR_VF:
1552         case VINSTR_BITXOR_VF:
1553         case VINSTR_CROSS:
1554 #if 0
1555         case INSTR_DIV_VF:
1556         case INSTR_MUL_IV:
1557         case INSTR_MUL_VI:
1558 #endif
1559             ot = TYPE_VECTOR;
1560             break;
1561 #if 0
1562         case INSTR_ADD_SF:
1563             ot = TYPE_POINTER;
1564             break;
1565 #endif
1566     /*
1567      * after the following default case, the value of opcode can never
1568      * be 1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65
1569      */
1570         default:
1571             /* ranges: */
1572             /* boolean operations result in floats */
1573
1574             /*
1575              * opcode >= 10 takes true branch opcode is at least 10
1576              * opcode <= 23 takes false branch opcode is at least 24
1577              */
1578             if (opcode >= INSTR_EQ_F && opcode <= INSTR_GT)
1579                 ot = TYPE_FLOAT;
1580
1581             /*
1582              * At condition "opcode <= 23", the value of "opcode" must be
1583              * at least 24.
1584              * At condition "opcode <= 23", the value of "opcode" cannot be
1585              * equal to any of {1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65}.
1586              * The condition "opcode <= 23" cannot be true.
1587              *
1588              * Thus ot=2 (TYPE_FLOAT) can never be true
1589              */
1590 #if 0
1591             else if (opcode >= INSTR_LE && opcode <= INSTR_GT)
1592                 ot = TYPE_FLOAT;
1593             else if (opcode >= INSTR_LE_I && opcode <= INSTR_EQ_FI)
1594                 ot = TYPE_FLOAT;
1595 #endif
1596             break;
1597     };
1598     if (ot == TYPE_VOID) {
1599         /* The AST or parser were supposed to check this! */
1600         return nullptr;
1601     }
1602
1603     return ir_block_create_general_instr(self, ctx, label, opcode, left, right, ot);
1604 }
1605
1606 ir_value* ir_block_create_unary(ir_block *self, lex_ctx_t ctx,
1607                                 const char *label, int opcode,
1608                                 ir_value *operand)
1609 {
1610     qc_type ot = TYPE_FLOAT;
1611     switch (opcode) {
1612         case INSTR_NOT_F:
1613         case INSTR_NOT_V:
1614         case INSTR_NOT_S:
1615         case INSTR_NOT_ENT:
1616         case INSTR_NOT_FNC: /*
1617         case INSTR_NOT_I:   */
1618             ot = TYPE_FLOAT;
1619             break;
1620
1621         /*
1622          * Negation for virtual instructions is emulated with 0-value. Thankfully
1623          * the operand for 0 already exists so we just source it from here.
1624          */
1625         case VINSTR_NEG_F:
1626             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_F, nullptr, operand, ot);
1627         case VINSTR_NEG_V:
1628             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_V, nullptr, operand, TYPE_VECTOR);
1629
1630         default:
1631             ot = operand->m_vtype;
1632             break;
1633     };
1634     if (ot == TYPE_VOID) {
1635         /* The AST or parser were supposed to check this! */
1636         return nullptr;
1637     }
1638
1639     /* let's use the general instruction creator and pass nullptr for OPB */
1640     return ir_block_create_general_instr(self, ctx, label, opcode, operand, nullptr, ot);
1641 }
1642
1643 static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t ctx, const char *label,
1644                                         int op, ir_value *a, ir_value *b, qc_type outype)
1645 {
1646     ir_instr *instr;
1647     ir_value *out;
1648
1649     out = new ir_value(self->m_owner, label ? label : "", store_value, outype);
1650     if (!out)
1651         return nullptr;
1652
1653     instr = new ir_instr(ctx, self, op);
1654     if (!instr) {
1655         return nullptr;
1656     }
1657
1658     if (!ir_instr_op(instr, 0, out, true) ||
1659         !ir_instr_op(instr, 1, a, false) ||
1660         !ir_instr_op(instr, 2, b, false) )
1661     {
1662         goto on_error;
1663     }
1664
1665     vec_push(self->m_instr, instr);
1666
1667     return out;
1668 on_error:
1669     delete instr;
1670     return nullptr;
1671 }
1672
1673 ir_value* ir_block_create_fieldaddress(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field)
1674 {
1675     ir_value *v;
1676
1677     /* Support for various pointer types todo if so desired */
1678     if (ent->m_vtype != TYPE_ENTITY)
1679         return nullptr;
1680
1681     if (field->m_vtype != TYPE_FIELD)
1682         return nullptr;
1683
1684     v = ir_block_create_general_instr(self, ctx, label, INSTR_ADDRESS, ent, field, TYPE_POINTER);
1685     v->m_fieldtype = field->m_fieldtype;
1686     return v;
1687 }
1688
1689 ir_value* ir_block_create_load_from_ent(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field, qc_type outype)
1690 {
1691     int op;
1692     if (ent->m_vtype != TYPE_ENTITY)
1693         return nullptr;
1694
1695     /* at some point we could redirect for TYPE_POINTER... but that could lead to carelessness */
1696     if (field->m_vtype != TYPE_FIELD)
1697         return nullptr;
1698
1699     switch (outype)
1700     {
1701         case TYPE_FLOAT:    op = INSTR_LOAD_F;   break;
1702         case TYPE_VECTOR:   op = INSTR_LOAD_V;   break;
1703         case TYPE_STRING:   op = INSTR_LOAD_S;   break;
1704         case TYPE_FIELD:    op = INSTR_LOAD_FLD; break;
1705         case TYPE_ENTITY:   op = INSTR_LOAD_ENT; break;
1706         case TYPE_FUNCTION: op = INSTR_LOAD_FNC; break;
1707 #if 0
1708         case TYPE_POINTER: op = INSTR_LOAD_I;   break;
1709         case TYPE_INTEGER: op = INSTR_LOAD_I;   break;
1710 #endif
1711         default:
1712             irerror(self->m_context, "invalid type for ir_block_create_load_from_ent: %s", type_name[outype]);
1713             return nullptr;
1714     }
1715
1716     return ir_block_create_general_instr(self, ctx, label, op, ent, field, outype);
1717 }
1718
1719 /* PHI resolving breaks the SSA, and must thus be the last
1720  * step before life-range calculation.
1721  */
1722
1723 static bool ir_block_naive_phi(ir_block *self);
1724 bool ir_function_naive_phi(ir_function *self)
1725 {
1726     for (auto& b : self->m_blocks)
1727         if (!ir_block_naive_phi(b.get()))
1728             return false;
1729     return true;
1730 }
1731
1732 static bool ir_block_naive_phi(ir_block *self)
1733 {
1734     size_t i;
1735     /* FIXME: optionally, create_phi can add the phis
1736      * to a list so we don't need to loop through blocks
1737      * - anyway: "don't optimize YET"
1738      */
1739     for (i = 0; i < vec_size(self->m_instr); ++i)
1740     {
1741         ir_instr *instr = self->m_instr[i];
1742         if (instr->m_opcode != VINSTR_PHI)
1743             continue;
1744
1745         vec_remove(self->m_instr, i, 1);
1746         --i; /* NOTE: i+1 below */
1747
1748         for (auto &it : instr->m_phi) {
1749             ir_value *v = it.value;
1750             ir_block *b = it.from;
1751             if (v->m_store == store_value && v->m_reads.size() == 1 && v->m_writes.size() == 1) {
1752                 /* replace the value */
1753                 if (!ir_instr_op(v->m_writes[0], 0, instr->_m_ops[0], true))
1754                     return false;
1755             } else {
1756                 /* force a move instruction */
1757                 ir_instr *prevjump = vec_last(b->m_instr);
1758                 vec_pop(b->m_instr);
1759                 b->m_final = false;
1760                 instr->_m_ops[0]->m_store = store_global;
1761                 if (!ir_block_create_store(b, instr->m_context, instr->_m_ops[0], v))
1762                     return false;
1763                 instr->_m_ops[0]->m_store = store_value;
1764                 vec_push(b->m_instr, prevjump);
1765                 b->m_final = true;
1766             }
1767         }
1768         delete instr;
1769     }
1770     return true;
1771 }
1772
1773 /***********************************************************************
1774  *IR Temp allocation code
1775  * Propagating value life ranges by walking through the function backwards
1776  * until no more changes are made.
1777  * In theory this should happen once more than once for every nested loop
1778  * level.
1779  * Though this implementation might run an additional time for if nests.
1780  */
1781
1782 /* Enumerate instructions used by value's life-ranges
1783  */
1784 static void ir_block_enumerate(ir_block *self, size_t *_eid)
1785 {
1786     size_t i;
1787     size_t eid = *_eid;
1788     for (i = 0; i < vec_size(self->m_instr); ++i)
1789     {
1790         self->m_instr[i]->m_eid = eid++;
1791     }
1792     *_eid = eid;
1793 }
1794
1795 /* Enumerate blocks and instructions.
1796  * The block-enumeration is unordered!
1797  * We do not really use the block enumreation, however
1798  * the instruction enumeration is important for life-ranges.
1799  */
1800 void ir_function_enumerate(ir_function *self)
1801 {
1802     size_t instruction_id = 0;
1803     size_t block_eid = 0;
1804     for (auto& block : self->m_blocks)
1805     {
1806         /* each block now gets an additional "entry" instruction id
1807          * we can use to avoid point-life issues
1808          */
1809         block->m_entry_id = instruction_id;
1810         block->m_eid      = block_eid;
1811         ++instruction_id;
1812         ++block_eid;
1813
1814         ir_block_enumerate(block.get(), &instruction_id);
1815     }
1816 }
1817
1818 /* Local-value allocator
1819  * After finishing creating the liferange of all values used in a function
1820  * we can allocate their global-positions.
1821  * This is the counterpart to register-allocation in register machines.
1822  */
1823 struct function_allocator {
1824     ir_value **locals;
1825     size_t *sizes;
1826     size_t *positions;
1827     bool *unique;
1828 };
1829
1830 static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
1831 {
1832     ir_value *slot;
1833     size_t vsize = var->size();
1834
1835     var->m_code.local = vec_size(alloc->locals);
1836
1837     slot = new ir_value("reg", store_global, var->m_vtype);
1838     if (!slot)
1839         return false;
1840
1841     if (!slot->mergeLife(var))
1842         goto localerror;
1843
1844     vec_push(alloc->locals, slot);
1845     vec_push(alloc->sizes, vsize);
1846     vec_push(alloc->unique, var->m_unique_life);
1847
1848     return true;
1849
1850 localerror:
1851     delete slot;
1852     return false;
1853 }
1854
1855 static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
1856 {
1857     size_t a;
1858     ir_value *slot;
1859
1860     if (v->m_unique_life)
1861         return function_allocator_alloc(alloc, v);
1862
1863     for (a = 0; a < vec_size(alloc->locals); ++a)
1864     {
1865         /* if it's reserved for a unique liferange: skip */
1866         if (alloc->unique[a])
1867             continue;
1868
1869         slot = alloc->locals[a];
1870
1871         /* never resize parameters
1872          * will be required later when overlapping temps + locals
1873          */
1874         if (a < vec_size(self->m_params) &&
1875             alloc->sizes[a] < v->size())
1876         {
1877             continue;
1878         }
1879
1880         if (ir_values_overlap(v, slot))
1881             continue;
1882
1883         if (!slot->mergeLife(v))
1884             return false;
1885
1886         /* adjust size for this slot */
1887         if (alloc->sizes[a] < v->size())
1888             alloc->sizes[a] = v->size();
1889
1890         v->m_code.local = a;
1891         return true;
1892     }
1893     if (a >= vec_size(alloc->locals)) {
1894         if (!function_allocator_alloc(alloc, v))
1895             return false;
1896     }
1897     return true;
1898 }
1899
1900 bool ir_function_allocate_locals(ir_function *self)
1901 {
1902     bool   retval = true;
1903     size_t pos;
1904     bool   opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
1905
1906     function_allocator lockalloc, globalloc;
1907
1908     if (self->m_locals.empty() && self->m_values.empty())
1909         return true;
1910
1911     globalloc.locals    = nullptr;
1912     globalloc.sizes     = nullptr;
1913     globalloc.positions = nullptr;
1914     globalloc.unique    = nullptr;
1915     lockalloc.locals    = nullptr;
1916     lockalloc.sizes     = nullptr;
1917     lockalloc.positions = nullptr;
1918     lockalloc.unique    = nullptr;
1919
1920     size_t i;
1921     for (i = 0; i < self->m_locals.size(); ++i)
1922     {
1923         ir_value *v = self->m_locals[i].get();
1924         if ((self->m_flags & IR_FLAG_MASK_NO_LOCAL_TEMPS) || !OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) {
1925             v->m_locked      = true;
1926             v->m_unique_life = true;
1927         }
1928         else if (i >= vec_size(self->m_params))
1929             break;
1930         else
1931             v->m_locked = true; /* lock parameters locals */
1932         if (!function_allocator_alloc((v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1933             goto error;
1934     }
1935     for (; i < self->m_locals.size(); ++i)
1936     {
1937         ir_value *v = self->m_locals[i].get();
1938         if (v->m_life.empty())
1939             continue;
1940         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1941             goto error;
1942     }
1943
1944     /* Allocate a slot for any value that still exists */
1945     for (i = 0; i < self->m_values.size(); ++i)
1946     {
1947         ir_value *v = self->m_values[i].get();
1948
1949         if (v->m_life.empty())
1950             continue;
1951
1952         /* CALL optimization:
1953          * If the value is a parameter-temp: 1 write, 1 read from a CALL
1954          * and it's not "locked", write it to the OFS_PARM directly.
1955          */
1956         if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->m_locked && !v->m_unique_life) {
1957             if (v->m_reads.size() == 1 && v->m_writes.size() == 1 &&
1958                 (v->m_reads[0]->m_opcode == VINSTR_NRCALL ||
1959                  (v->m_reads[0]->m_opcode >= INSTR_CALL0 && v->m_reads[0]->m_opcode <= INSTR_CALL8)
1960                 )
1961                )
1962             {
1963                 size_t param;
1964                 ir_instr *call = v->m_reads[0];
1965                 if (!vec_ir_value_find(call->m_params, v, &param)) {
1966                     irerror(call->m_context, "internal error: unlocked parameter %s not found", v->m_name.c_str());
1967                     goto error;
1968                 }
1969                 ++opts_optimizationcount[OPTIM_CALL_STORES];
1970                 v->m_callparam = true;
1971                 if (param < 8)
1972                     v->setCodeAddress(OFS_PARM0 + 3*param);
1973                 else {
1974                     size_t nprotos = self->m_owner->m_extparam_protos.size();
1975                     ir_value *ep;
1976                     param -= 8;
1977                     if (nprotos > param)
1978                         ep = self->m_owner->m_extparam_protos[param].get();
1979                     else
1980                     {
1981                         ep = self->m_owner->generateExtparamProto();
1982                         while (++nprotos <= param)
1983                             ep = self->m_owner->generateExtparamProto();
1984                     }
1985                     ir_instr_op(v->m_writes[0], 0, ep, true);
1986                     call->m_params[param+8] = ep;
1987                 }
1988                 continue;
1989             }
1990             if (v->m_writes.size() == 1 && v->m_writes[0]->m_opcode == INSTR_CALL0) {
1991                 v->m_store = store_return;
1992                 if (v->m_members[0]) v->m_members[0]->m_store = store_return;
1993                 if (v->m_members[1]) v->m_members[1]->m_store = store_return;
1994                 if (v->m_members[2]) v->m_members[2]->m_store = store_return;
1995                 ++opts_optimizationcount[OPTIM_CALL_STORES];
1996                 continue;
1997             }
1998         }
1999
2000         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
2001             goto error;
2002     }
2003
2004     if (!lockalloc.sizes && !globalloc.sizes) {
2005         goto cleanup;
2006     }
2007     vec_push(lockalloc.positions, 0);
2008     vec_push(globalloc.positions, 0);
2009
2010     /* Adjust slot positions based on sizes */
2011     if (lockalloc.sizes) {
2012         pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
2013         for (i = 1; i < vec_size(lockalloc.sizes); ++i)
2014         {
2015             pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
2016             vec_push(lockalloc.positions, pos);
2017         }
2018         self->m_allocated_locals = pos + vec_last(lockalloc.sizes);
2019     }
2020     if (globalloc.sizes) {
2021         pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
2022         for (i = 1; i < vec_size(globalloc.sizes); ++i)
2023         {
2024             pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
2025             vec_push(globalloc.positions, pos);
2026         }
2027         self->m_globaltemps = pos + vec_last(globalloc.sizes);
2028     }
2029
2030     /* Locals need to know their new position */
2031     for (auto& local : self->m_locals) {
2032         if (local->m_locked || !opt_gt)
2033             local->m_code.local = lockalloc.positions[local->m_code.local];
2034         else
2035             local->m_code.local = globalloc.positions[local->m_code.local];
2036     }
2037     /* Take over the actual slot positions on values */
2038     for (auto& value : self->m_values) {
2039         if (value->m_locked || !opt_gt)
2040             value->m_code.local = lockalloc.positions[value->m_code.local];
2041         else
2042             value->m_code.local = globalloc.positions[value->m_code.local];
2043     }
2044
2045     goto cleanup;
2046
2047 error:
2048     retval = false;
2049 cleanup:
2050     for (i = 0; i < vec_size(lockalloc.locals); ++i)
2051         delete lockalloc.locals[i];
2052     for (i = 0; i < vec_size(globalloc.locals); ++i)
2053         delete globalloc.locals[i];
2054     vec_free(globalloc.unique);
2055     vec_free(globalloc.locals);
2056     vec_free(globalloc.sizes);
2057     vec_free(globalloc.positions);
2058     vec_free(lockalloc.unique);
2059     vec_free(lockalloc.locals);
2060     vec_free(lockalloc.sizes);
2061     vec_free(lockalloc.positions);
2062     return retval;
2063 }
2064
2065 /* Get information about which operand
2066  * is read from, or written to.
2067  */
2068 static void ir_op_read_write(int op, size_t *read, size_t *write)
2069 {
2070     switch (op)
2071     {
2072     case VINSTR_JUMP:
2073     case INSTR_GOTO:
2074         *write = 0;
2075         *read = 0;
2076         break;
2077     case INSTR_IF:
2078     case INSTR_IFNOT:
2079 #if 0
2080     case INSTR_IF_S:
2081     case INSTR_IFNOT_S:
2082 #endif
2083     case INSTR_RETURN:
2084     case VINSTR_COND:
2085         *write = 0;
2086         *read = 1;
2087         break;
2088     case INSTR_STOREP_F:
2089     case INSTR_STOREP_V:
2090     case INSTR_STOREP_S:
2091     case INSTR_STOREP_ENT:
2092     case INSTR_STOREP_FLD:
2093     case INSTR_STOREP_FNC:
2094         *write = 0;
2095         *read  = 7;
2096         break;
2097     default:
2098         *write = 1;
2099         *read = 6;
2100         break;
2101     };
2102 }
2103
2104 static bool ir_block_living_add_instr(ir_block *self, size_t eid) {
2105     bool changed = false;
2106     for (auto &it : self->m_living)
2107         if (it->setAlive(eid))
2108             changed = true;
2109     return changed;
2110 }
2111
2112 static bool ir_block_living_lock(ir_block *self) {
2113     bool changed = false;
2114     for (auto &it : self->m_living) {
2115         if (it->m_locked)
2116             continue;
2117         it->m_locked = true;
2118         changed = true;
2119     }
2120     return changed;
2121 }
2122
2123 static bool ir_block_life_propagate(ir_block *self, bool *changed)
2124 {
2125     ir_instr *instr;
2126     ir_value *value;
2127     size_t i, o, p, mem;
2128     // bitmasks which operands are read from or written to
2129     size_t read, write;
2130
2131     self->m_living.clear();
2132
2133     p = vec_size(self->m_exits);
2134     for (i = 0; i < p; ++i) {
2135         ir_block *prev = self->m_exits[i];
2136         for (auto &it : prev->m_living)
2137             if (!vec_ir_value_find(self->m_living, it, nullptr))
2138                 self->m_living.push_back(it);
2139     }
2140
2141     i = vec_size(self->m_instr);
2142     while (i)
2143     { --i;
2144         instr = self->m_instr[i];
2145
2146         /* See which operands are read and write operands */
2147         ir_op_read_write(instr->m_opcode, &read, &write);
2148
2149         /* Go through the 3 main operands
2150          * writes first, then reads
2151          */
2152         for (o = 0; o < 3; ++o)
2153         {
2154             if (!instr->_m_ops[o]) /* no such operand */
2155                 continue;
2156
2157             value = instr->_m_ops[o];
2158
2159             /* We only care about locals */
2160             /* we also calculate parameter liferanges so that locals
2161              * can take up parameter slots */
2162             if (value->m_store != store_value &&
2163                 value->m_store != store_local &&
2164                 value->m_store != store_param)
2165                 continue;
2166
2167             /* write operands */
2168             /* When we write to a local, we consider it "dead" for the
2169              * remaining upper part of the function, since in SSA a value
2170              * can only be written once (== created)
2171              */
2172             if (write & (1<<o))
2173             {
2174                 size_t idx;
2175                 bool in_living = vec_ir_value_find(self->m_living, value, &idx);
2176                 if (!in_living)
2177                 {
2178                     /* If the value isn't alive it hasn't been read before... */
2179                     /* TODO: See if the warning can be emitted during parsing or AST processing
2180                      * otherwise have warning printed here.
2181                      * IF printing a warning here: include filecontext_t,
2182                      * and make sure it's only printed once
2183                      * since this function is run multiple times.
2184                      */
2185                     /* con_err( "Value only written %s\n", value->m_name); */
2186                     if (value->setAlive(instr->m_eid))
2187                         *changed = true;
2188                 } else {
2189                     /* since 'living' won't contain it
2190                      * anymore, merge the value, since
2191                      * (A) doesn't.
2192                      */
2193                     if (value->setAlive(instr->m_eid))
2194                         *changed = true;
2195                     // Then remove
2196                     self->m_living.erase(self->m_living.begin() + idx);
2197                 }
2198                 /* Removing a vector removes all members */
2199                 for (mem = 0; mem < 3; ++mem) {
2200                     if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], &idx)) {
2201                         if (value->m_members[mem]->setAlive(instr->m_eid))
2202                             *changed = true;
2203                         self->m_living.erase(self->m_living.begin() + idx);
2204                     }
2205                 }
2206                 /* Removing the last member removes the vector */
2207                 if (value->m_memberof) {
2208                     value = value->m_memberof;
2209                     for (mem = 0; mem < 3; ++mem) {
2210                         if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2211                             break;
2212                     }
2213                     if (mem == 3 && vec_ir_value_find(self->m_living, value, &idx)) {
2214                         if (value->setAlive(instr->m_eid))
2215                             *changed = true;
2216                         self->m_living.erase(self->m_living.begin() + idx);
2217                     }
2218                 }
2219             }
2220         }
2221
2222         /* These operations need a special case as they can break when using
2223          * same source and destination operand otherwise, as the engine may
2224          * read the source multiple times. */
2225         if (instr->m_opcode == INSTR_MUL_VF ||
2226             instr->m_opcode == VINSTR_BITAND_VF ||
2227             instr->m_opcode == VINSTR_BITOR_VF ||
2228             instr->m_opcode == VINSTR_BITXOR ||
2229             instr->m_opcode == VINSTR_BITXOR_VF ||
2230             instr->m_opcode == VINSTR_BITXOR_V ||
2231             instr->m_opcode == VINSTR_CROSS)
2232         {
2233             value = instr->_m_ops[2];
2234             /* the float source will get an additional lifetime */
2235             if (value->setAlive(instr->m_eid+1))
2236                 *changed = true;
2237             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2238                 *changed = true;
2239         }
2240
2241         if (instr->m_opcode == INSTR_MUL_FV ||
2242             instr->m_opcode == INSTR_LOAD_V ||
2243             instr->m_opcode == VINSTR_BITXOR ||
2244             instr->m_opcode == VINSTR_BITXOR_VF ||
2245             instr->m_opcode == VINSTR_BITXOR_V ||
2246             instr->m_opcode == VINSTR_CROSS)
2247         {
2248             value = instr->_m_ops[1];
2249             /* the float source will get an additional lifetime */
2250             if (value->setAlive(instr->m_eid+1))
2251                 *changed = true;
2252             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2253                 *changed = true;
2254         }
2255
2256         for (o = 0; o < 3; ++o)
2257         {
2258             if (!instr->_m_ops[o]) /* no such operand */
2259                 continue;
2260
2261             value = instr->_m_ops[o];
2262
2263             /* We only care about locals */
2264             /* we also calculate parameter liferanges so that locals
2265              * can take up parameter slots */
2266             if (value->m_store != store_value &&
2267                 value->m_store != store_local &&
2268                 value->m_store != store_param)
2269                 continue;
2270
2271             /* read operands */
2272             if (read & (1<<o))
2273             {
2274                 if (!vec_ir_value_find(self->m_living, value, nullptr))
2275                     self->m_living.push_back(value);
2276                 /* reading adds the full vector */
2277                 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2278                     self->m_living.push_back(value->m_memberof);
2279                 for (mem = 0; mem < 3; ++mem) {
2280                     if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2281                         self->m_living.push_back(value->m_members[mem]);
2282                 }
2283             }
2284         }
2285         /* PHI operands are always read operands */
2286         for (auto &it : instr->m_phi) {
2287             value = it.value;
2288             if (!vec_ir_value_find(self->m_living, value, nullptr))
2289                 self->m_living.push_back(value);
2290             /* reading adds the full vector */
2291             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2292                 self->m_living.push_back(value->m_memberof);
2293             for (mem = 0; mem < 3; ++mem) {
2294                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2295                     self->m_living.push_back(value->m_members[mem]);
2296             }
2297         }
2298
2299         /* on a call, all these values must be "locked" */
2300         if (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8) {
2301             if (ir_block_living_lock(self))
2302                 *changed = true;
2303         }
2304         /* call params are read operands too */
2305         for (auto &it : instr->m_params) {
2306             value = it;
2307             if (!vec_ir_value_find(self->m_living, value, nullptr))
2308                 self->m_living.push_back(value);
2309             /* reading adds the full vector */
2310             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2311                 self->m_living.push_back(value->m_memberof);
2312             for (mem = 0; mem < 3; ++mem) {
2313                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2314                     self->m_living.push_back(value->m_members[mem]);
2315             }
2316         }
2317
2318         /* (A) */
2319         if (ir_block_living_add_instr(self, instr->m_eid))
2320             *changed = true;
2321     }
2322     /* the "entry" instruction ID */
2323     if (ir_block_living_add_instr(self, self->m_entry_id))
2324         *changed = true;
2325
2326     return true;
2327 }
2328
2329 bool ir_function_calculate_liferanges(ir_function *self)
2330 {
2331     /* parameters live at 0 */
2332     for (size_t i = 0; i < vec_size(self->m_params); ++i)
2333         if (!self->m_locals[i].get()->setAlive(0))
2334             compile_error(self->m_context, "internal error: failed value-life merging");
2335
2336     bool changed;
2337     do {
2338         self->m_run_id++;
2339         changed = false;
2340         for (auto i = self->m_blocks.rbegin(); i != self->m_blocks.rend(); ++i)
2341             ir_block_life_propagate(i->get(), &changed);
2342     } while (changed);
2343
2344     if (self->m_blocks.size()) {
2345         ir_block *block = self->m_blocks[0].get();
2346         for (auto &it : block->m_living) {
2347             ir_value *v = it;
2348             if (v->m_store != store_local)
2349                 continue;
2350             if (v->m_vtype == TYPE_VECTOR)
2351                 continue;
2352             self->m_flags |= IR_FLAG_HAS_UNINITIALIZED;
2353             /* find the instruction reading from it */
2354             size_t s = 0;
2355             for (; s < v->m_reads.size(); ++s) {
2356                 if (v->m_reads[s]->m_eid == v->m_life[0].end)
2357                     break;
2358             }
2359             if (s < v->m_reads.size()) {
2360                 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2361                               "variable `%s` may be used uninitialized in this function\n"
2362                               " -> %s:%i",
2363                               v->m_name.c_str(),
2364                               v->m_reads[s]->m_context.file, v->m_reads[s]->m_context.line)
2365                    )
2366                 {
2367                     return false;
2368                 }
2369                 continue;
2370             }
2371             if (v->m_memberof) {
2372                 ir_value *vec = v->m_memberof;
2373                 for (s = 0; s < vec->m_reads.size(); ++s) {
2374                     if (vec->m_reads[s]->m_eid == v->m_life[0].end)
2375                         break;
2376                 }
2377                 if (s < vec->m_reads.size()) {
2378                     if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2379                                   "variable `%s` may be used uninitialized in this function\n"
2380                                   " -> %s:%i",
2381                                   v->m_name.c_str(),
2382                                   vec->m_reads[s]->m_context.file, vec->m_reads[s]->m_context.line)
2383                        )
2384                     {
2385                         return false;
2386                     }
2387                     continue;
2388                 }
2389             }
2390             if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2391                           "variable `%s` may be used uninitialized in this function", v->m_name.c_str()))
2392             {
2393                 return false;
2394             }
2395         }
2396     }
2397     return true;
2398 }
2399
2400 /***********************************************************************
2401  *IR Code-Generation
2402  *
2403  * Since the IR has the convention of putting 'write' operands
2404  * at the beginning, we have to rotate the operands of instructions
2405  * properly in order to generate valid QCVM code.
2406  *
2407  * Having destinations at a fixed position is more convenient. In QC
2408  * this is *mostly* OPC,  but FTE adds at least 2 instructions which
2409  * read from from OPA,  and store to OPB rather than OPC.   Which is
2410  * partially the reason why the implementation of these instructions
2411  * in darkplaces has been delayed for so long.
2412  *
2413  * Breaking conventions is annoying...
2414  */
2415 static bool gen_global_field(code_t *code, ir_value *global)
2416 {
2417     if (global->m_hasvalue)
2418     {
2419         ir_value *fld = global->m_constval.vpointer;
2420         if (!fld) {
2421             irerror(global->m_context, "Invalid field constant with no field: %s", global->m_name.c_str());
2422             return false;
2423         }
2424
2425         /* copy the field's value */
2426         global->setCodeAddress(code->globals.size());
2427         code->globals.push_back(fld->m_code.fieldaddr);
2428         if (global->m_fieldtype == TYPE_VECTOR) {
2429             code->globals.push_back(fld->m_code.fieldaddr+1);
2430             code->globals.push_back(fld->m_code.fieldaddr+2);
2431         }
2432     }
2433     else
2434     {
2435         global->setCodeAddress(code->globals.size());
2436         code->globals.push_back(0);
2437         if (global->m_fieldtype == TYPE_VECTOR) {
2438             code->globals.push_back(0);
2439             code->globals.push_back(0);
2440         }
2441     }
2442     if (global->m_code.globaladdr < 0)
2443         return false;
2444     return true;
2445 }
2446
2447 static bool gen_global_pointer(code_t *code, ir_value *global)
2448 {
2449     if (global->m_hasvalue)
2450     {
2451         ir_value *target = global->m_constval.vpointer;
2452         if (!target) {
2453             irerror(global->m_context, "Invalid pointer constant: %s", global->m_name.c_str());
2454             /* nullptr pointers are pointing to the nullptr constant, which also
2455              * sits at address 0, but still has an ir_value for itself.
2456              */
2457             return false;
2458         }
2459
2460         /* Here, relocations ARE possible - in fteqcc-enhanced-qc:
2461          * void() foo; <- proto
2462          * void() *fooptr = &foo;
2463          * void() foo = { code }
2464          */
2465         if (!target->m_code.globaladdr) {
2466             /* FIXME: Check for the constant nullptr ir_value!
2467              * because then code.globaladdr being 0 is valid.
2468              */
2469             irerror(global->m_context, "FIXME: Relocation support");
2470             return false;
2471         }
2472
2473         global->setCodeAddress(code->globals.size());
2474         code->globals.push_back(target->m_code.globaladdr);
2475     }
2476     else
2477     {
2478         global->setCodeAddress(code->globals.size());
2479         code->globals.push_back(0);
2480     }
2481     if (global->m_code.globaladdr < 0)
2482         return false;
2483     return true;
2484 }
2485
2486 static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *block)
2487 {
2488     prog_section_statement_t stmt;
2489     ir_instr *instr;
2490     ir_block *target;
2491     ir_block *ontrue;
2492     ir_block *onfalse;
2493     size_t    stidx;
2494     size_t    i;
2495     int       j;
2496
2497     block->m_generated = true;
2498     block->m_code_start = code->statements.size();
2499     for (i = 0; i < vec_size(block->m_instr); ++i)
2500     {
2501         instr = block->m_instr[i];
2502
2503         if (instr->m_opcode == VINSTR_PHI) {
2504             irerror(block->m_context, "cannot generate virtual instruction (phi)");
2505             return false;
2506         }
2507
2508         if (instr->m_opcode == VINSTR_JUMP) {
2509             target = instr->m_bops[0];
2510             /* for uncoditional jumps, if the target hasn't been generated
2511              * yet, we generate them right here.
2512              */
2513             if (!target->m_generated)
2514                 return gen_blocks_recursive(code, func, target);
2515
2516             /* otherwise we generate a jump instruction */
2517             stmt.opcode = INSTR_GOTO;
2518             stmt.o1.s1 = target->m_code_start - code->statements.size();
2519             stmt.o2.s1 = 0;
2520             stmt.o3.s1 = 0;
2521             if (stmt.o1.s1 != 1)
2522                 code_push_statement(code, &stmt, instr->m_context);
2523
2524             /* no further instructions can be in this block */
2525             return true;
2526         }
2527
2528         if (instr->m_opcode == VINSTR_BITXOR) {
2529             stmt.opcode = INSTR_BITOR;
2530             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2531             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2532             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2533             code_push_statement(code, &stmt, instr->m_context);
2534             stmt.opcode = INSTR_BITAND;
2535             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2536             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2537             stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2538             code_push_statement(code, &stmt, instr->m_context);
2539             stmt.opcode = INSTR_SUB_F;
2540             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2541             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2542             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2543             code_push_statement(code, &stmt, instr->m_context);
2544
2545             /* instruction generated */
2546             continue;
2547         }
2548
2549         if (instr->m_opcode == VINSTR_BITAND_V) {
2550             stmt.opcode = INSTR_BITAND;
2551             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2552             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2553             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2554             code_push_statement(code, &stmt, instr->m_context);
2555             ++stmt.o1.s1;
2556             ++stmt.o2.s1;
2557             ++stmt.o3.s1;
2558             code_push_statement(code, &stmt, instr->m_context);
2559             ++stmt.o1.s1;
2560             ++stmt.o2.s1;
2561             ++stmt.o3.s1;
2562             code_push_statement(code, &stmt, instr->m_context);
2563
2564             /* instruction generated */
2565             continue;
2566         }
2567
2568         if (instr->m_opcode == VINSTR_BITOR_V) {
2569             stmt.opcode = INSTR_BITOR;
2570             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2571             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2572             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2573             code_push_statement(code, &stmt, instr->m_context);
2574             ++stmt.o1.s1;
2575             ++stmt.o2.s1;
2576             ++stmt.o3.s1;
2577             code_push_statement(code, &stmt, instr->m_context);
2578             ++stmt.o1.s1;
2579             ++stmt.o2.s1;
2580             ++stmt.o3.s1;
2581             code_push_statement(code, &stmt, instr->m_context);
2582
2583             /* instruction generated */
2584             continue;
2585         }
2586
2587         if (instr->m_opcode == VINSTR_BITXOR_V) {
2588             for (j = 0; j < 3; ++j) {
2589                 stmt.opcode = INSTR_BITOR;
2590                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2591                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2592                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2593                 code_push_statement(code, &stmt, instr->m_context);
2594                 stmt.opcode = INSTR_BITAND;
2595                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2596                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2597                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2598                 code_push_statement(code, &stmt, instr->m_context);
2599             }
2600             stmt.opcode = INSTR_SUB_V;
2601             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2602             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2603             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2604             code_push_statement(code, &stmt, instr->m_context);
2605
2606             /* instruction generated */
2607             continue;
2608         }
2609
2610         if (instr->m_opcode == VINSTR_BITAND_VF) {
2611             stmt.opcode = INSTR_BITAND;
2612             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2613             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2614             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2615             code_push_statement(code, &stmt, instr->m_context);
2616             ++stmt.o1.s1;
2617             ++stmt.o3.s1;
2618             code_push_statement(code, &stmt, instr->m_context);
2619             ++stmt.o1.s1;
2620             ++stmt.o3.s1;
2621             code_push_statement(code, &stmt, instr->m_context);
2622
2623             /* instruction generated */
2624             continue;
2625         }
2626
2627         if (instr->m_opcode == VINSTR_BITOR_VF) {
2628             stmt.opcode = INSTR_BITOR;
2629             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2630             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2631             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2632             code_push_statement(code, &stmt, instr->m_context);
2633             ++stmt.o1.s1;
2634             ++stmt.o3.s1;
2635             code_push_statement(code, &stmt, instr->m_context);
2636             ++stmt.o1.s1;
2637             ++stmt.o3.s1;
2638             code_push_statement(code, &stmt, instr->m_context);
2639
2640             /* instruction generated */
2641             continue;
2642         }
2643
2644         if (instr->m_opcode == VINSTR_BITXOR_VF) {
2645             for (j = 0; j < 3; ++j) {
2646                 stmt.opcode = INSTR_BITOR;
2647                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2648                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2649                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2650                 code_push_statement(code, &stmt, instr->m_context);
2651                 stmt.opcode = INSTR_BITAND;
2652                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2653                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2654                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2655                 code_push_statement(code, &stmt, instr->m_context);
2656             }
2657             stmt.opcode = INSTR_SUB_V;
2658             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2659             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2660             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2661             code_push_statement(code, &stmt, instr->m_context);
2662
2663             /* instruction generated */
2664             continue;
2665         }
2666
2667         if (instr->m_opcode == VINSTR_CROSS) {
2668             stmt.opcode = INSTR_MUL_F;
2669             for (j = 0; j < 3; ++j) {
2670                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 1) % 3;
2671                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 2) % 3;
2672                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2673                 code_push_statement(code, &stmt, instr->m_context);
2674                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 2) % 3;
2675                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 1) % 3;
2676                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2677                 code_push_statement(code, &stmt, instr->m_context);
2678             }
2679             stmt.opcode = INSTR_SUB_V;
2680             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2681             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2682             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2683             code_push_statement(code, &stmt, instr->m_context);
2684
2685             /* instruction generated */
2686             continue;
2687         }
2688
2689         if (instr->m_opcode == VINSTR_COND) {
2690             ontrue  = instr->m_bops[0];
2691             onfalse = instr->m_bops[1];
2692             /* TODO: have the AST signal which block should
2693              * come first: eg. optimize IFs without ELSE...
2694              */
2695
2696             stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2697             stmt.o2.u1 = 0;
2698             stmt.o3.s1 = 0;
2699
2700             if (ontrue->m_generated) {
2701                 stmt.opcode = INSTR_IF;
2702                 stmt.o2.s1 = ontrue->m_code_start - code->statements.size();
2703                 if (stmt.o2.s1 != 1)
2704                     code_push_statement(code, &stmt, instr->m_context);
2705             }
2706             if (onfalse->m_generated) {
2707                 stmt.opcode = INSTR_IFNOT;
2708                 stmt.o2.s1 = onfalse->m_code_start - code->statements.size();
2709                 if (stmt.o2.s1 != 1)
2710                     code_push_statement(code, &stmt, instr->m_context);
2711             }
2712             if (!ontrue->m_generated) {
2713                 if (onfalse->m_generated)
2714                     return gen_blocks_recursive(code, func, ontrue);
2715             }
2716             if (!onfalse->m_generated) {
2717                 if (ontrue->m_generated)
2718                     return gen_blocks_recursive(code, func, onfalse);
2719             }
2720             /* neither ontrue nor onfalse exist */
2721             stmt.opcode = INSTR_IFNOT;
2722             if (!instr->m_likely) {
2723                 /* Honor the likelyhood hint */
2724                 ir_block *tmp = onfalse;
2725                 stmt.opcode = INSTR_IF;
2726                 onfalse = ontrue;
2727                 ontrue = tmp;
2728             }
2729             stidx = code->statements.size();
2730             code_push_statement(code, &stmt, instr->m_context);
2731             /* on false we jump, so add ontrue-path */
2732             if (!gen_blocks_recursive(code, func, ontrue))
2733                 return false;
2734             /* fixup the jump address */
2735             code->statements[stidx].o2.s1 = code->statements.size() - stidx;
2736             /* generate onfalse path */
2737             if (onfalse->m_generated) {
2738                 /* fixup the jump address */
2739                 code->statements[stidx].o2.s1 = onfalse->m_code_start - stidx;
2740                 if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2741                     code->statements[stidx] = code->statements[stidx+1];
2742                     if (code->statements[stidx].o1.s1 < 0)
2743                         code->statements[stidx].o1.s1++;
2744                     code_pop_statement(code);
2745                 }
2746                 stmt.opcode = code->statements.back().opcode;
2747                 if (stmt.opcode == INSTR_GOTO ||
2748                     stmt.opcode == INSTR_IF ||
2749                     stmt.opcode == INSTR_IFNOT ||
2750                     stmt.opcode == INSTR_RETURN ||
2751                     stmt.opcode == INSTR_DONE)
2752                 {
2753                     /* no use jumping from here */
2754                     return true;
2755                 }
2756                 /* may have been generated in the previous recursive call */
2757                 stmt.opcode = INSTR_GOTO;
2758                 stmt.o1.s1 = onfalse->m_code_start - code->statements.size();
2759                 stmt.o2.s1 = 0;
2760                 stmt.o3.s1 = 0;
2761                 if (stmt.o1.s1 != 1)
2762                     code_push_statement(code, &stmt, instr->m_context);
2763                 return true;
2764             }
2765             else if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2766                 code->statements[stidx] = code->statements[stidx+1];
2767                 if (code->statements[stidx].o1.s1 < 0)
2768                     code->statements[stidx].o1.s1++;
2769                 code_pop_statement(code);
2770             }
2771             /* if not, generate now */
2772             return gen_blocks_recursive(code, func, onfalse);
2773         }
2774
2775         if ( (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8)
2776            || instr->m_opcode == VINSTR_NRCALL)
2777         {
2778             size_t p, first;
2779             ir_value *retvalue;
2780
2781             first = instr->m_params.size();
2782             if (first > 8)
2783                 first = 8;
2784             for (p = 0; p < first; ++p)
2785             {
2786                 ir_value *param = instr->m_params[p];
2787                 if (param->m_callparam)
2788                     continue;
2789
2790                 stmt.opcode = INSTR_STORE_F;
2791                 stmt.o3.u1 = 0;
2792
2793                 if (param->m_vtype == TYPE_FIELD)
2794                     stmt.opcode = field_store_instr[param->m_fieldtype];
2795                 else if (param->m_vtype == TYPE_NIL)
2796                     stmt.opcode = INSTR_STORE_V;
2797                 else
2798                     stmt.opcode = type_store_instr[param->m_vtype];
2799                 stmt.o1.u1 = param->codeAddress();
2800                 stmt.o2.u1 = OFS_PARM0 + 3 * p;
2801
2802                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2803                     /* fetch 3 separate floats */
2804                     stmt.opcode = INSTR_STORE_F;
2805                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2806                     code_push_statement(code, &stmt, instr->m_context);
2807                     stmt.o2.u1++;
2808                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2809                     code_push_statement(code, &stmt, instr->m_context);
2810                     stmt.o2.u1++;
2811                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2812                     code_push_statement(code, &stmt, instr->m_context);
2813                 }
2814                 else
2815                     code_push_statement(code, &stmt, instr->m_context);
2816             }
2817             /* Now handle extparams */
2818             first = instr->m_params.size();
2819             for (; p < first; ++p)
2820             {
2821                 ir_builder *ir = func->m_owner;
2822                 ir_value *param = instr->m_params[p];
2823                 ir_value *targetparam;
2824
2825                 if (param->m_callparam)
2826                     continue;
2827
2828                 if (p-8 >= ir->m_extparams.size())
2829                     ir->generateExtparam();
2830
2831                 targetparam = ir->m_extparams[p-8];
2832
2833                 stmt.opcode = INSTR_STORE_F;
2834                 stmt.o3.u1 = 0;
2835
2836                 if (param->m_vtype == TYPE_FIELD)
2837                     stmt.opcode = field_store_instr[param->m_fieldtype];
2838                 else if (param->m_vtype == TYPE_NIL)
2839                     stmt.opcode = INSTR_STORE_V;
2840                 else
2841                     stmt.opcode = type_store_instr[param->m_vtype];
2842                 stmt.o1.u1 = param->codeAddress();
2843                 stmt.o2.u1 = targetparam->codeAddress();
2844                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2845                     /* fetch 3 separate floats */
2846                     stmt.opcode = INSTR_STORE_F;
2847                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2848                     code_push_statement(code, &stmt, instr->m_context);
2849                     stmt.o2.u1++;
2850                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2851                     code_push_statement(code, &stmt, instr->m_context);
2852                     stmt.o2.u1++;
2853                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2854                     code_push_statement(code, &stmt, instr->m_context);
2855                 }
2856                 else
2857                     code_push_statement(code, &stmt, instr->m_context);
2858             }
2859
2860             stmt.opcode = INSTR_CALL0 + instr->m_params.size();
2861             if (stmt.opcode > INSTR_CALL8)
2862                 stmt.opcode = INSTR_CALL8;
2863             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2864             stmt.o2.u1 = 0;
2865             stmt.o3.u1 = 0;
2866             code_push_statement(code, &stmt, instr->m_context);
2867
2868             retvalue = instr->_m_ops[0];
2869             if (retvalue && retvalue->m_store != store_return &&
2870                 (retvalue->m_store == store_global || retvalue->m_life.size()))
2871             {
2872                 /* not to be kept in OFS_RETURN */
2873                 if (retvalue->m_vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS))
2874                     stmt.opcode = field_store_instr[retvalue->m_fieldtype];
2875                 else
2876                     stmt.opcode = type_store_instr[retvalue->m_vtype];
2877                 stmt.o1.u1 = OFS_RETURN;
2878                 stmt.o2.u1 = retvalue->codeAddress();
2879                 stmt.o3.u1 = 0;
2880                 code_push_statement(code, &stmt, instr->m_context);
2881             }
2882             continue;
2883         }
2884
2885         if (instr->m_opcode == INSTR_STATE) {
2886             stmt.opcode = instr->m_opcode;
2887             if (instr->_m_ops[0])
2888                 stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2889             if (instr->_m_ops[1])
2890                 stmt.o2.u1 = instr->_m_ops[1]->codeAddress();
2891             stmt.o3.u1 = 0;
2892             code_push_statement(code, &stmt, instr->m_context);
2893             continue;
2894         }
2895
2896         stmt.opcode = instr->m_opcode;
2897         stmt.o1.u1 = 0;
2898         stmt.o2.u1 = 0;
2899         stmt.o3.u1 = 0;
2900
2901         /* This is the general order of operands */
2902         if (instr->_m_ops[0])
2903             stmt.o3.u1 = instr->_m_ops[0]->codeAddress();
2904
2905         if (instr->_m_ops[1])
2906             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2907
2908         if (instr->_m_ops[2])
2909             stmt.o2.u1 = instr->_m_ops[2]->codeAddress();
2910
2911         if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
2912         {
2913             stmt.o1.u1 = stmt.o3.u1;
2914             stmt.o3.u1 = 0;
2915         }
2916         else if ((stmt.opcode >= INSTR_STORE_F &&
2917                   stmt.opcode <= INSTR_STORE_FNC) ||
2918                  (stmt.opcode >= INSTR_STOREP_F &&
2919                   stmt.opcode <= INSTR_STOREP_FNC))
2920         {
2921             /* 2-operand instructions with A -> B */
2922             stmt.o2.u1 = stmt.o3.u1;
2923             stmt.o3.u1 = 0;
2924
2925             /* tiny optimization, don't output
2926              * STORE a, a
2927              */
2928             if (stmt.o2.u1 == stmt.o1.u1 &&
2929                 OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
2930             {
2931                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
2932                 continue;
2933             }
2934         }
2935         code_push_statement(code, &stmt, instr->m_context);
2936     }
2937     return true;
2938 }
2939
2940 static bool gen_function_code(code_t *code, ir_function *self)
2941 {
2942     ir_block *block;
2943     prog_section_statement_t stmt, *retst;
2944
2945     /* Starting from entry point, we generate blocks "as they come"
2946      * for now. Dead blocks will not be translated obviously.
2947      */
2948     if (self->m_blocks.empty()) {
2949         irerror(self->m_context, "Function '%s' declared without body.", self->m_name.c_str());
2950         return false;
2951     }
2952
2953     block = self->m_blocks[0].get();
2954     if (block->m_generated)
2955         return true;
2956
2957     if (!gen_blocks_recursive(code, self, block)) {
2958         irerror(self->m_context, "failed to generate blocks for '%s'", self->m_name.c_str());
2959         return false;
2960     }
2961
2962     /* code_write and qcvm -disasm need to know that the function ends here */
2963     retst = &code->statements.back();
2964     if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) &&
2965         self->m_outtype == TYPE_VOID &&
2966         retst->opcode == INSTR_RETURN &&
2967         !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1)
2968     {
2969         retst->opcode = INSTR_DONE;
2970         ++opts_optimizationcount[OPTIM_VOID_RETURN];
2971     } else {
2972         lex_ctx_t last;
2973
2974         stmt.opcode = INSTR_DONE;
2975         stmt.o1.u1  = 0;
2976         stmt.o2.u1  = 0;
2977         stmt.o3.u1  = 0;
2978         last.line   = code->linenums.back();
2979         last.column = code->columnnums.back();
2980
2981         code_push_statement(code, &stmt, last);
2982     }
2983     return true;
2984 }
2985
2986 qcint_t ir_builder::filestring(const char *filename)
2987 {
2988     /* NOTE: filename pointers are copied, we never strdup them,
2989      * thus we can use pointer-comparison to find the string.
2990      */
2991     qcint_t  str;
2992
2993     for (size_t i = 0; i != m_filenames.size(); ++i) {
2994         if (!strcmp(m_filenames[i], filename))
2995             return i;
2996     }
2997
2998     str = code_genstring(m_code.get(), filename);
2999     m_filenames.push_back(filename);
3000     m_filestrings.push_back(str);
3001     return str;
3002 }
3003
3004 bool ir_builder::generateGlobalFunction(ir_value *global)
3005 {
3006     prog_section_function_t fun;
3007     ir_function            *irfun;
3008
3009     size_t i;
3010
3011     if (!global->m_hasvalue || (!global->m_constval.vfunc)) {
3012         irerror(global->m_context, "Invalid state of function-global: not constant: %s", global->m_name.c_str());
3013         return false;
3014     }
3015
3016     irfun = global->m_constval.vfunc;
3017     fun.name = global->m_code.name;
3018     fun.file = filestring(global->m_context.file);
3019     fun.profile = 0; /* always 0 */
3020     fun.nargs = vec_size(irfun->m_params);
3021     if (fun.nargs > 8)
3022         fun.nargs = 8;
3023
3024     for (i = 0; i < 8; ++i) {
3025         if ((int32_t)i >= fun.nargs)
3026             fun.argsize[i] = 0;
3027         else
3028             fun.argsize[i] = type_sizeof_[irfun->m_params[i]];
3029     }
3030
3031     fun.firstlocal = 0;
3032     fun.locals = irfun->m_allocated_locals;
3033
3034     if (irfun->m_builtin)
3035         fun.entry = irfun->m_builtin+1;
3036     else {
3037         irfun->m_code_function_def = m_code->functions.size();
3038         fun.entry = m_code->statements.size();
3039     }
3040
3041     m_code->functions.push_back(fun);
3042     return true;
3043 }
3044
3045 ir_value* ir_builder::generateExtparamProto()
3046 {
3047     char      name[128];
3048
3049     util_snprintf(name, sizeof(name), "EXTPARM#%i", (int)(m_extparam_protos.size()));
3050     ir_value *global = new ir_value(name, store_global, TYPE_VECTOR);
3051     m_extparam_protos.emplace_back(global);
3052
3053     return global;
3054 }
3055
3056 void ir_builder::generateExtparam()
3057 {
3058     prog_section_def_t def;
3059     ir_value          *global;
3060
3061     if (m_extparam_protos.size() < m_extparams.size()+1)
3062         global = generateExtparamProto();
3063     else
3064         global = m_extparam_protos[m_extparams.size()].get();
3065
3066     def.name = code_genstring(m_code.get(), global->m_name.c_str());
3067     def.type = TYPE_VECTOR;
3068     def.offset = m_code->globals.size();
3069
3070     m_code->defs.push_back(def);
3071
3072     global->setCodeAddress(def.offset);
3073
3074     m_code->globals.push_back(0);
3075     m_code->globals.push_back(0);
3076     m_code->globals.push_back(0);
3077
3078     m_extparams.emplace_back(global);
3079 }
3080
3081 static bool gen_function_extparam_copy(code_t *code, ir_function *self)
3082 {
3083     ir_builder *ir = self->m_owner;
3084
3085     size_t numparams = vec_size(self->m_params);
3086     if (!numparams)
3087         return true;
3088
3089     prog_section_statement_t stmt;
3090     stmt.opcode = INSTR_STORE_F;
3091     stmt.o3.s1 = 0;
3092     for (size_t i = 8; i < numparams; ++i) {
3093         size_t ext = i - 8;
3094         if (ext >= ir->m_extparams.size())
3095             ir->generateExtparam();
3096
3097         ir_value *ep = ir->m_extparams[ext];
3098
3099         stmt.opcode = type_store_instr[self->m_locals[i]->m_vtype];
3100         if (self->m_locals[i]->m_vtype == TYPE_FIELD &&
3101             self->m_locals[i]->m_fieldtype == TYPE_VECTOR)
3102         {
3103             stmt.opcode = INSTR_STORE_V;
3104         }
3105         stmt.o1.u1 = ep->codeAddress();
3106         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3107         code_push_statement(code, &stmt, self->m_context);
3108     }
3109
3110     return true;
3111 }
3112
3113 static bool gen_function_varargs_copy(code_t *code, ir_function *self)
3114 {
3115     size_t i, ext, numparams, maxparams;
3116
3117     ir_builder *ir = self->m_owner;
3118     ir_value   *ep;
3119     prog_section_statement_t stmt;
3120
3121     numparams = vec_size(self->m_params);
3122     if (!numparams)
3123         return true;
3124
3125     stmt.opcode = INSTR_STORE_V;
3126     stmt.o3.s1 = 0;
3127     maxparams = numparams + self->m_max_varargs;
3128     for (i = numparams; i < maxparams; ++i) {
3129         if (i < 8) {
3130             stmt.o1.u1 = OFS_PARM0 + 3*i;
3131             stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3132             code_push_statement(code, &stmt, self->m_context);
3133             continue;
3134         }
3135         ext = i - 8;
3136         while (ext >= ir->m_extparams.size())
3137             ir->generateExtparam();
3138
3139         ep = ir->m_extparams[ext];
3140
3141         stmt.o1.u1 = ep->codeAddress();
3142         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3143         code_push_statement(code, &stmt, self->m_context);
3144     }
3145
3146     return true;
3147 }
3148
3149 bool ir_builder::generateFunctionLocals(ir_value *global)
3150 {
3151     prog_section_function_t *def;
3152     ir_function             *irfun;
3153     uint32_t                 firstlocal, firstglobal;
3154
3155     irfun = global->m_constval.vfunc;
3156     def   = &m_code->functions[0] + irfun->m_code_function_def;
3157
3158     if (OPTS_OPTION_BOOL(OPTION_G) ||
3159         !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS)        ||
3160         (irfun->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3161     {
3162         firstlocal = def->firstlocal = m_code->globals.size();
3163     } else {
3164         firstlocal = def->firstlocal = m_first_common_local;
3165         ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
3166     }
3167
3168     firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? m_first_common_globaltemp : firstlocal);
3169
3170     for (size_t i = m_code->globals.size(); i < firstlocal + irfun->m_allocated_locals; ++i)
3171         m_code->globals.push_back(0);
3172
3173     for (auto& lp : irfun->m_locals) {
3174         ir_value *v = lp.get();
3175         if (v->m_locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
3176             v->setCodeAddress(firstlocal + v->m_code.local);
3177             if (!generateGlobal(v, true)) {
3178                 irerror(v->m_context, "failed to generate local %s", v->m_name.c_str());
3179                 return false;
3180             }
3181         }
3182         else
3183             v->setCodeAddress(firstglobal + v->m_code.local);
3184     }
3185     for (auto& vp : irfun->m_values) {
3186         ir_value *v = vp.get();
3187         if (v->m_callparam)
3188             continue;
3189         if (v->m_locked)
3190             v->setCodeAddress(firstlocal + v->m_code.local);
3191         else
3192             v->setCodeAddress(firstglobal + v->m_code.local);
3193     }
3194     return true;
3195 }
3196
3197 bool ir_builder::generateGlobalFunctionCode(ir_value *global)
3198 {
3199     prog_section_function_t *fundef;
3200     ir_function             *irfun;
3201
3202     irfun = global->m_constval.vfunc;
3203     if (!irfun) {
3204         if (global->m_cvq == CV_NONE) {
3205             if (irwarning(global->m_context, WARN_IMPLICIT_FUNCTION_POINTER,
3206                           "function `%s` has no body and in QC implicitly becomes a function-pointer",
3207                           global->m_name.c_str()))
3208             {
3209                 /* Not bailing out just now. If this happens a lot you don't want to have
3210                  * to rerun gmqcc for each such function.
3211                  */
3212
3213                 /* return false; */
3214             }
3215         }
3216         /* this was a function pointer, don't generate code for those */
3217         return true;
3218     }
3219
3220     if (irfun->m_builtin)
3221         return true;
3222
3223     /*
3224      * If there is no definition and the thing is eraseable, we can ignore
3225      * outputting the function to begin with.
3226      */
3227     if (global->m_flags & IR_FLAG_ERASABLE && irfun->m_code_function_def < 0) {
3228         return true;
3229     }
3230
3231     if (irfun->m_code_function_def < 0) {
3232         irerror(irfun->m_context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->m_name.c_str());
3233         return false;
3234     }
3235     fundef = &m_code->functions[irfun->m_code_function_def];
3236
3237     fundef->entry = m_code->statements.size();
3238     if (!generateFunctionLocals(global)) {
3239         irerror(irfun->m_context, "Failed to generate locals for function %s", irfun->m_name.c_str());
3240         return false;
3241     }
3242     if (!gen_function_extparam_copy(m_code.get(), irfun)) {
3243         irerror(irfun->m_context, "Failed to generate extparam-copy code for function %s", irfun->m_name.c_str());
3244         return false;
3245     }
3246     if (irfun->m_max_varargs && !gen_function_varargs_copy(m_code.get(), irfun)) {
3247         irerror(irfun->m_context, "Failed to generate vararg-copy code for function %s", irfun->m_name.c_str());
3248         return false;
3249     }
3250     if (!gen_function_code(m_code.get(), irfun)) {
3251         irerror(irfun->m_context, "Failed to generate code for function %s", irfun->m_name.c_str());
3252         return false;
3253     }
3254     return true;
3255 }
3256
3257 static void gen_vector_defs(code_t *code, prog_section_def_t def, const char *name)
3258 {
3259     char  *component;
3260     size_t len, i;
3261
3262     if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3263         return;
3264
3265     def.type = TYPE_FLOAT;
3266
3267     len = strlen(name);
3268
3269     component = (char*)mem_a(len+3);
3270     memcpy(component, name, len);
3271     len += 2;
3272     component[len-0] = 0;
3273     component[len-2] = '_';
3274
3275     component[len-1] = 'x';
3276
3277     for (i = 0; i < 3; ++i) {
3278         def.name = code_genstring(code, component);
3279         code->defs.push_back(def);
3280         def.offset++;
3281         component[len-1]++;
3282     }
3283
3284     mem_d(component);
3285 }
3286
3287 static void gen_vector_fields(code_t *code, prog_section_field_t fld, const char *name)
3288 {
3289     char  *component;
3290     size_t len, i;
3291
3292     if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3293         return;
3294
3295     fld.type = TYPE_FLOAT;
3296
3297     len = strlen(name);
3298
3299     component = (char*)mem_a(len+3);
3300     memcpy(component, name, len);
3301     len += 2;
3302     component[len-0] = 0;
3303     component[len-2] = '_';
3304
3305     component[len-1] = 'x';
3306
3307     for (i = 0; i < 3; ++i) {
3308         fld.name = code_genstring(code, component);
3309         code->fields.push_back(fld);
3310         fld.offset++;
3311         component[len-1]++;
3312     }
3313
3314     mem_d(component);
3315 }
3316
3317 bool ir_builder::generateGlobal(ir_value *global, bool islocal)
3318 {
3319     size_t             i;
3320     int32_t           *iptr;
3321     prog_section_def_t def;
3322     bool               pushdef = opts.optimizeoff;
3323
3324     /* we don't generate split-vectors */
3325     if (global->m_vtype == TYPE_VECTOR && (global->m_flags & IR_FLAG_SPLIT_VECTOR))
3326         return true;
3327
3328     def.type = global->m_vtype;
3329     def.offset = m_code->globals.size();
3330     def.name = 0;
3331     if (OPTS_OPTION_BOOL(OPTION_G) || !islocal)
3332     {
3333         pushdef = true;
3334
3335         /*
3336          * if we're eraseable and the function isn't referenced ignore outputting
3337          * the function.
3338          */
3339         if (global->m_flags & IR_FLAG_ERASABLE && global->m_reads.empty()) {
3340             return true;
3341         }
3342
3343         if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) &&
3344             !(global->m_flags & IR_FLAG_INCLUDE_DEF) &&
3345             (global->m_name[0] == '#' || global->m_cvq == CV_CONST))
3346         {
3347             pushdef = false;
3348         }
3349
3350         if (pushdef) {
3351             if (global->m_name[0] == '#') {
3352                 if (!m_str_immediate)
3353                     m_str_immediate = code_genstring(m_code.get(), "IMMEDIATE");
3354                 def.name = global->m_code.name = m_str_immediate;
3355             }
3356             else
3357                 def.name = global->m_code.name = code_genstring(m_code.get(), global->m_name.c_str());
3358         }
3359         else
3360             def.name   = 0;
3361         if (islocal) {
3362             def.offset = global->codeAddress();
3363             m_code->defs.push_back(def);
3364             if (global->m_vtype == TYPE_VECTOR)
3365                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3366             else if (global->m_vtype == TYPE_FIELD && global->m_fieldtype == TYPE_VECTOR)
3367                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3368             return true;
3369         }
3370     }
3371     if (islocal)
3372         return true;
3373
3374     switch (global->m_vtype)
3375     {
3376     case TYPE_VOID:
3377         if (0 == global->m_name.compare("end_sys_globals")) {
3378             // TODO: remember this point... all the defs before this one
3379             // should be checksummed and added to progdefs.h when we generate it.
3380         }
3381         else if (0 == global->m_name.compare("end_sys_fields")) {
3382             // TODO: same as above but for entity-fields rather than globsl
3383         }
3384         else if(irwarning(global->m_context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`",
3385                           global->m_name.c_str()))
3386         {
3387             /* Not bailing out */
3388             /* return false; */
3389         }
3390         /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far
3391          * the system fields actually go? Though the engine knows this anyway...
3392          * Maybe this could be an -foption
3393          * fteqcc creates data for end_sys_* - of size 1, so let's do the same
3394          */
3395         global->setCodeAddress(m_code->globals.size());
3396         m_code->globals.push_back(0);
3397         /* Add the def */
3398         if (pushdef)
3399             m_code->defs.push_back(def);
3400         return true;
3401     case TYPE_POINTER:
3402         if (pushdef)
3403             m_code->defs.push_back(def);
3404         return gen_global_pointer(m_code.get(), global);
3405     case TYPE_FIELD:
3406         if (pushdef) {
3407             m_code->defs.push_back(def);
3408             if (global->m_fieldtype == TYPE_VECTOR)
3409                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3410         }
3411         return gen_global_field(m_code.get(), global);
3412     case TYPE_ENTITY:
3413         /* fall through */
3414     case TYPE_FLOAT:
3415     {
3416         global->setCodeAddress(m_code->globals.size());
3417         if (global->m_hasvalue) {
3418             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3419                 return true;
3420             iptr = (int32_t*)&global->m_constval.ivec[0];
3421             m_code->globals.push_back(*iptr);
3422         } else {
3423             m_code->globals.push_back(0);
3424         }
3425         if (!islocal && global->m_cvq != CV_CONST)
3426             def.type |= DEF_SAVEGLOBAL;
3427         if (pushdef)
3428             m_code->defs.push_back(def);
3429
3430         return global->m_code.globaladdr >= 0;
3431     }
3432     case TYPE_STRING:
3433     {
3434         global->setCodeAddress(m_code->globals.size());
3435         if (global->m_hasvalue) {
3436             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3437                 return true;
3438             uint32_t load = code_genstring(m_code.get(), global->m_constval.vstring);
3439             m_code->globals.push_back(load);
3440         } else {
3441             m_code->globals.push_back(0);
3442         }
3443         if (!islocal && global->m_cvq != CV_CONST)
3444             def.type |= DEF_SAVEGLOBAL;
3445         if (pushdef)
3446             m_code->defs.push_back(def);
3447         return global->m_code.globaladdr >= 0;
3448     }
3449     case TYPE_VECTOR:
3450     {
3451         size_t d;
3452         global->setCodeAddress(m_code->globals.size());
3453         if (global->m_hasvalue) {
3454             iptr = (int32_t*)&global->m_constval.ivec[0];
3455             m_code->globals.push_back(iptr[0]);
3456             if (global->m_code.globaladdr < 0)
3457                 return false;
3458             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3459                 m_code->globals.push_back(iptr[d]);
3460             }
3461         } else {
3462             m_code->globals.push_back(0);
3463             if (global->m_code.globaladdr < 0)
3464                 return false;
3465             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3466                 m_code->globals.push_back(0);
3467             }
3468         }
3469         if (!islocal && global->m_cvq != CV_CONST)
3470             def.type |= DEF_SAVEGLOBAL;
3471
3472         if (pushdef) {
3473             m_code->defs.push_back(def);
3474             def.type &= ~DEF_SAVEGLOBAL;
3475             gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3476         }
3477         return global->m_code.globaladdr >= 0;
3478     }
3479     case TYPE_FUNCTION:
3480         global->setCodeAddress(m_code->globals.size());
3481         if (!global->m_hasvalue) {
3482             m_code->globals.push_back(0);
3483             if (global->m_code.globaladdr < 0)
3484                 return false;
3485         } else {
3486             m_code->globals.push_back(m_code->functions.size());
3487             if (!generateGlobalFunction(global))
3488                 return false;
3489         }
3490         if (!islocal && global->m_cvq != CV_CONST)
3491             def.type |= DEF_SAVEGLOBAL;
3492         if (pushdef)
3493             m_code->defs.push_back(def);
3494         return true;
3495     case TYPE_VARIANT:
3496         /* assume biggest type */
3497             global->setCodeAddress(m_code->globals.size());
3498             m_code->globals.push_back(0);
3499             for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i)
3500                 m_code->globals.push_back(0);
3501             return true;
3502     default:
3503         /* refuse to create 'void' type or any other fancy business. */
3504         irerror(global->m_context, "Invalid type for global variable `%s`: %s",
3505                 global->m_name.c_str(), type_name[global->m_vtype]);
3506         return false;
3507     }
3508 }
3509
3510 static GMQCC_INLINE void ir_builder_prepare_field(code_t *code, ir_value *field)
3511 {
3512     field->m_code.fieldaddr = code_alloc_field(code, type_sizeof_[field->m_fieldtype]);
3513 }
3514
3515 static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
3516 {
3517     prog_section_def_t def;
3518     prog_section_field_t fld;
3519
3520     (void)self;
3521
3522     def.type   = (uint16_t)field->m_vtype;
3523     def.offset = (uint16_t)self->m_code->globals.size();
3524
3525     /* create a global named the same as the field */
3526     if (OPTS_OPTION_U32(OPTION_STANDARD) == COMPILER_GMQCC) {
3527         /* in our standard, the global gets a dot prefix */
3528         size_t len = field->m_name.length();
3529         char name[1024];
3530
3531         /* we really don't want to have to allocate this, and 1024
3532          * bytes is more than enough for a variable/field name
3533          */
3534         if (len+2 >= sizeof(name)) {
3535             irerror(field->m_context, "invalid field name size: %u", (unsigned int)len);
3536             return false;
3537         }
3538
3539         name[0] = '.';
3540         memcpy(name+1, field->m_name.c_str(), len); // no strncpy - we used strlen above
3541         name[len+1] = 0;
3542
3543         def.name = code_genstring(self->m_code.get(), name);
3544         fld.name = def.name + 1; /* we reuse that string table entry */
3545     } else {
3546         /* in plain QC, there cannot be a global with the same name,
3547          * and so we also name the global the same.
3548          * FIXME: fteqcc should create a global as well
3549          * check if it actually uses the same name. Probably does
3550          */
3551         def.name = code_genstring(self->m_code.get(), field->m_name.c_str());
3552         fld.name = def.name;
3553     }
3554
3555     field->m_code.name = def.name;
3556
3557     self->m_code->defs.push_back(def);
3558
3559     fld.type = field->m_fieldtype;
3560
3561     if (fld.type == TYPE_VOID) {
3562         irerror(field->m_context, "field is missing a type: %s - don't know its size", field->m_name.c_str());
3563         return false;
3564     }
3565
3566     fld.offset = field->m_code.fieldaddr;
3567
3568     self->m_code->fields.push_back(fld);
3569
3570     field->setCodeAddress(self->m_code->globals.size());
3571     self->m_code->globals.push_back(fld.offset);
3572     if (fld.type == TYPE_VECTOR) {
3573         self->m_code->globals.push_back(fld.offset+1);
3574         self->m_code->globals.push_back(fld.offset+2);
3575     }
3576
3577     if (field->m_fieldtype == TYPE_VECTOR) {
3578         gen_vector_defs  (self->m_code.get(), def, field->m_name.c_str());
3579         gen_vector_fields(self->m_code.get(), fld, field->m_name.c_str());
3580     }
3581
3582     return field->m_code.globaladdr >= 0;
3583 }
3584
3585 static void ir_builder_collect_reusables(ir_builder *builder) {
3586     std::vector<ir_value*> reusables;
3587
3588     for (auto& gp : builder->m_globals) {
3589         ir_value *value = gp.get();
3590         if (value->m_vtype != TYPE_FLOAT || !value->m_hasvalue)
3591             continue;
3592         if (value->m_cvq == CV_CONST || (value->m_name.length() >= 1 && value->m_name[0] == '#'))
3593             reusables.emplace_back(value);
3594     }
3595     builder->m_const_floats = move(reusables);
3596 }
3597
3598 static void ir_builder_split_vector(ir_builder *self, ir_value *vec) {
3599     ir_value* found[3] = { nullptr, nullptr, nullptr };
3600
3601     // must not be written to
3602     if (vec->m_writes.size())
3603         return;
3604     // must not be trying to access individual members
3605     if (vec->m_members[0] || vec->m_members[1] || vec->m_members[2])
3606         return;
3607     // should be actually used otherwise it won't be generated anyway
3608     if (vec->m_reads.empty())
3609         return;
3610     //size_t count = vec->m_reads.size();
3611     //if (!count)
3612     //    return;
3613
3614     // may only be used directly as function parameters, so if we find some other instruction cancel
3615     for (ir_instr *user : vec->m_reads) {
3616         // we only split vectors if they're used directly as parameter to a call only!
3617         if ((user->m_opcode < INSTR_CALL0 || user->m_opcode > INSTR_CALL8) && user->m_opcode != VINSTR_NRCALL)
3618             return;
3619     }
3620
3621     vec->m_flags |= IR_FLAG_SPLIT_VECTOR;
3622
3623     // find existing floats making up the split
3624     for (ir_value *c : self->m_const_floats) {
3625         if (!found[0] && c->m_constval.vfloat == vec->m_constval.vvec.x)
3626             found[0] = c;
3627         if (!found[1] && c->m_constval.vfloat == vec->m_constval.vvec.y)
3628             found[1] = c;
3629         if (!found[2] && c->m_constval.vfloat == vec->m_constval.vvec.z)
3630             found[2] = c;
3631         if (found[0] && found[1] && found[2])
3632             break;
3633     }
3634
3635     // generate floats for not yet found components
3636     if (!found[0])
3637         found[0] = self->literalFloat(vec->m_constval.vvec.x, true);
3638     if (!found[1]) {
3639         if (vec->m_constval.vvec.y == vec->m_constval.vvec.x)
3640             found[1] = found[0];
3641         else
3642             found[1] = self->literalFloat(vec->m_constval.vvec.y, true);
3643     }
3644     if (!found[2]) {
3645         if (vec->m_constval.vvec.z == vec->m_constval.vvec.x)
3646             found[2] = found[0];
3647         else if (vec->m_constval.vvec.z == vec->m_constval.vvec.y)
3648             found[2] = found[1];
3649         else
3650             found[2] = self->literalFloat(vec->m_constval.vvec.z, true);
3651     }
3652
3653     // the .members array should be safe to use here
3654     vec->m_members[0] = found[0];
3655     vec->m_members[1] = found[1];
3656     vec->m_members[2] = found[2];
3657
3658     // register the readers for these floats
3659     found[0]->m_reads.insert(found[0]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3660     found[1]->m_reads.insert(found[1]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3661     found[2]->m_reads.insert(found[2]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3662 }
3663
3664 static void ir_builder_split_vectors(ir_builder *self) {
3665     // member values may be added to self->m_globals during this operation, but
3666     // no new vectors will be added, we need to iterate via an index as
3667     // c++ iterators would be invalidated
3668     const size_t count = self->m_globals.size();
3669     for (size_t i = 0; i != count; ++i) {
3670         ir_value *v = self->m_globals[i].get();
3671         if (v->m_vtype != TYPE_VECTOR || !v->m_name.length() || v->m_name[0] != '#')
3672             continue;
3673         ir_builder_split_vector(self, v);
3674     }
3675 }
3676
3677 bool ir_builder::generate(const char *filename)
3678 {
3679     prog_section_statement_t stmt;
3680     char  *lnofile = nullptr;
3681
3682     if (OPTS_FLAG(SPLIT_VECTOR_PARAMETERS)) {
3683         ir_builder_collect_reusables(this);
3684         if (!m_const_floats.empty())
3685             ir_builder_split_vectors(this);
3686     }
3687
3688     for (auto& fp : m_fields)
3689         ir_builder_prepare_field(m_code.get(), fp.get());
3690
3691     for (auto& gp : m_globals) {
3692         ir_value *global = gp.get();
3693         if (!generateGlobal(global, false)) {
3694             return false;
3695         }
3696         if (global->m_vtype == TYPE_FUNCTION) {
3697             ir_function *func = global->m_constval.vfunc;
3698             if (func && m_max_locals < func->m_allocated_locals &&
3699                 !(func->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3700             {
3701                 m_max_locals = func->m_allocated_locals;
3702             }
3703             if (func && m_max_globaltemps < func->m_globaltemps)
3704                 m_max_globaltemps = func->m_globaltemps;
3705         }
3706     }
3707
3708     for (auto& fp : m_fields) {
3709         if (!ir_builder_gen_field(this, fp.get()))
3710             return false;
3711     }
3712
3713     // generate nil
3714     m_nil->setCodeAddress(m_code->globals.size());
3715     m_code->globals.push_back(0);
3716     m_code->globals.push_back(0);
3717     m_code->globals.push_back(0);
3718
3719     // generate virtual-instruction temps
3720     for (size_t i = 0; i < IR_MAX_VINSTR_TEMPS; ++i) {
3721         m_vinstr_temp[i]->setCodeAddress(m_code->globals.size());
3722         m_code->globals.push_back(0);
3723         m_code->globals.push_back(0);
3724         m_code->globals.push_back(0);
3725     }
3726
3727     // generate global temps
3728     m_first_common_globaltemp = m_code->globals.size();
3729     m_code->globals.insert(m_code->globals.end(), m_max_globaltemps, 0);
3730     // FIXME:DELME:
3731     //for (size_t i = 0; i < m_max_globaltemps; ++i) {
3732     //    m_code->globals.push_back(0);
3733     //}
3734     // generate common locals
3735     m_first_common_local = m_code->globals.size();
3736     m_code->globals.insert(m_code->globals.end(), m_max_locals, 0);
3737     // FIXME:DELME:
3738     //for (i = 0; i < m_max_locals; ++i) {
3739     //    m_code->globals.push_back(0);
3740     //}
3741
3742     // generate function code
3743
3744     for (auto& gp : m_globals) {
3745         ir_value *global = gp.get();
3746         if (global->m_vtype == TYPE_FUNCTION) {
3747             if (!this->generateGlobalFunctionCode(global))
3748                 return false;
3749         }
3750     }
3751
3752     if (m_code->globals.size() >= 65536) {
3753         irerror(m_globals.back()->m_context,
3754             "This progs file would require more globals than the metadata can handle (%zu). Bailing out.",
3755             m_code->globals.size());
3756         return false;
3757     }
3758
3759     /* DP errors if the last instruction is not an INSTR_DONE. */
3760     if (m_code->statements.back().opcode != INSTR_DONE)
3761     {
3762         lex_ctx_t last;
3763
3764         stmt.opcode = INSTR_DONE;
3765         stmt.o1.u1  = 0;
3766         stmt.o2.u1  = 0;
3767         stmt.o3.u1  = 0;
3768         last.line   = m_code->linenums.back();
3769         last.column = m_code->columnnums.back();
3770
3771         code_push_statement(m_code.get(), &stmt, last);
3772     }
3773
3774     if (OPTS_OPTION_BOOL(OPTION_PP_ONLY))
3775         return true;
3776
3777     if (m_code->statements.size() != m_code->linenums.size()) {
3778         con_err("Linecounter wrong: %lu != %lu\n",
3779                 m_code->statements.size(),
3780                 m_code->linenums.size());
3781     } else if (OPTS_FLAG(LNO)) {
3782         char  *dot;
3783         size_t filelen = strlen(filename);
3784
3785         memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
3786         dot = strrchr(lnofile, '.');
3787         if (!dot) {
3788             vec_pop(lnofile);
3789         } else {
3790             vec_shrinkto(lnofile, dot - lnofile);
3791         }
3792         memcpy(vec_add(lnofile, 5), ".lno", 5);
3793     }
3794
3795     if (!code_write(m_code.get(), filename, lnofile)) {
3796         vec_free(lnofile);
3797         return false;
3798     }
3799
3800     vec_free(lnofile);
3801     return true;
3802 }
3803
3804 /***********************************************************************
3805  *IR DEBUG Dump functions...
3806  */
3807
3808 #define IND_BUFSZ 1024
3809
3810 static const char *qc_opname(int op)
3811 {
3812     if (op < 0) return "<INVALID>";
3813     if (op < VINSTR_END)
3814         return util_instr_str[op];
3815     switch (op) {
3816         case VINSTR_END:       return "END";
3817         case VINSTR_PHI:       return "PHI";
3818         case VINSTR_JUMP:      return "JUMP";
3819         case VINSTR_COND:      return "COND";
3820         case VINSTR_BITXOR:    return "BITXOR";
3821         case VINSTR_BITAND_V:  return "BITAND_V";
3822         case VINSTR_BITOR_V:   return "BITOR_V";
3823         case VINSTR_BITXOR_V:  return "BITXOR_V";
3824         case VINSTR_BITAND_VF: return "BITAND_VF";
3825         case VINSTR_BITOR_VF:  return "BITOR_VF";
3826         case VINSTR_BITXOR_VF: return "BITXOR_VF";
3827         case VINSTR_CROSS:     return "CROSS";
3828         case VINSTR_NEG_F:     return "NEG_F";
3829         case VINSTR_NEG_V:     return "NEG_V";
3830         default:               return "<UNK>";
3831     }
3832 }
3833
3834 void ir_builder::dump(int (*oprintf)(const char*, ...)) const
3835 {
3836     size_t i;
3837     char indent[IND_BUFSZ];
3838     indent[0] = '\t';
3839     indent[1] = 0;
3840
3841     oprintf("module %s\n", m_name.c_str());
3842     for (i = 0; i < m_globals.size(); ++i)
3843     {
3844         oprintf("global ");
3845         if (m_globals[i]->m_hasvalue)
3846             oprintf("%s = ", m_globals[i]->m_name.c_str());
3847         m_globals[i].get()->dump(oprintf);
3848         oprintf("\n");
3849     }
3850     for (i = 0; i < m_functions.size(); ++i)
3851         ir_function_dump(m_functions[i].get(), indent, oprintf);
3852     oprintf("endmodule %s\n", m_name.c_str());
3853 }
3854
3855 static const char *storenames[] = {
3856     "[global]", "[local]", "[param]", "[value]", "[return]"
3857 };
3858
3859 void ir_function_dump(ir_function *f, char *ind,
3860                       int (*oprintf)(const char*, ...))
3861 {
3862     size_t i;
3863     if (f->m_builtin != 0) {
3864         oprintf("%sfunction %s = builtin %i\n", ind, f->m_name.c_str(), -f->m_builtin);
3865         return;
3866     }
3867     oprintf("%sfunction %s\n", ind, f->m_name.c_str());
3868     util_strncat(ind, "\t", IND_BUFSZ-1);
3869     if (f->m_locals.size())
3870     {
3871         oprintf("%s%i locals:\n", ind, (int)f->m_locals.size());
3872         for (i = 0; i < f->m_locals.size(); ++i) {
3873             oprintf("%s\t", ind);
3874             f->m_locals[i].get()->dump(oprintf);
3875             oprintf("\n");
3876         }
3877     }
3878     oprintf("%sliferanges:\n", ind);
3879     for (i = 0; i < f->m_locals.size(); ++i) {
3880         const char *attr = "";
3881         size_t l, m;
3882         ir_value *v = f->m_locals[i].get();
3883         if (v->m_unique_life && v->m_locked)
3884             attr = "unique,locked ";
3885         else if (v->m_unique_life)
3886             attr = "unique ";
3887         else if (v->m_locked)
3888             attr = "locked ";
3889         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3890                 storenames[v->m_store],
3891                 attr, (v->m_callparam ? "callparam " : ""),
3892                 (int)v->m_code.local);
3893         if (v->m_life.empty())
3894             oprintf("[null]");
3895         for (l = 0; l < v->m_life.size(); ++l) {
3896             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3897         }
3898         oprintf("\n");
3899         for (m = 0; m < 3; ++m) {
3900             ir_value *vm = v->m_members[m];
3901             if (!vm)
3902                 continue;
3903             oprintf("%s\t%s: @%i ", ind, vm->m_name.c_str(), (int)vm->m_code.local);
3904             for (l = 0; l < vm->m_life.size(); ++l) {
3905                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3906             }
3907             oprintf("\n");
3908         }
3909     }
3910     for (i = 0; i < f->m_values.size(); ++i) {
3911         const char *attr = "";
3912         size_t l, m;
3913         ir_value *v = f->m_values[i].get();
3914         if (v->m_unique_life && v->m_locked)
3915             attr = "unique,locked ";
3916         else if (v->m_unique_life)
3917             attr = "unique ";
3918         else if (v->m_locked)
3919             attr = "locked ";
3920         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3921                 storenames[v->m_store],
3922                 attr, (v->m_callparam ? "callparam " : ""),
3923                 (int)v->m_code.local);
3924         if (v->m_life.empty())
3925             oprintf("[null]");
3926         for (l = 0; l < v->m_life.size(); ++l) {
3927             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3928         }
3929         oprintf("\n");
3930         for (m = 0; m < 3; ++m) {
3931             ir_value *vm = v->m_members[m];
3932             if (!vm)
3933                 continue;
3934             if (vm->m_unique_life && vm->m_locked)
3935                 attr = "unique,locked ";
3936             else if (vm->m_unique_life)
3937                 attr = "unique ";
3938             else if (vm->m_locked)
3939                 attr = "locked ";
3940             oprintf("%s\t%s: %s@%i ", ind, vm->m_name.c_str(), attr, (int)vm->m_code.local);
3941             for (l = 0; l < vm->m_life.size(); ++l) {
3942                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3943             }
3944             oprintf("\n");
3945         }
3946     }
3947     if (f->m_blocks.size())
3948     {
3949         oprintf("%slife passes: %i\n", ind, (int)f->m_run_id);
3950         for (i = 0; i < f->m_blocks.size(); ++i) {
3951             ir_block_dump(f->m_blocks[i].get(), ind, oprintf);
3952         }
3953
3954     }
3955     ind[strlen(ind)-1] = 0;
3956     oprintf("%sendfunction %s\n", ind, f->m_name.c_str());
3957 }
3958
3959 void ir_block_dump(ir_block* b, char *ind,
3960                    int (*oprintf)(const char*, ...))
3961 {
3962     size_t i;
3963     oprintf("%s:%s\n", ind, b->m_label.c_str());
3964     util_strncat(ind, "\t", IND_BUFSZ-1);
3965
3966     if (b->m_instr && b->m_instr[0])
3967         oprintf("%s (%i) [entry]\n", ind, (int)(b->m_instr[0]->m_eid-1));
3968     for (i = 0; i < vec_size(b->m_instr); ++i)
3969         ir_instr_dump(b->m_instr[i], ind, oprintf);
3970     ind[strlen(ind)-1] = 0;
3971 }
3972
3973 static void dump_phi(ir_instr *in, int (*oprintf)(const char*, ...))
3974 {
3975     oprintf("%s <- phi ", in->_m_ops[0]->m_name.c_str());
3976     for (auto &it : in->m_phi) {
3977         oprintf("([%s] : %s) ", it.from->m_label.c_str(),
3978                                 it.value->m_name.c_str());
3979     }
3980     oprintf("\n");
3981 }
3982
3983 void ir_instr_dump(ir_instr *in, char *ind,
3984                        int (*oprintf)(const char*, ...))
3985 {
3986     size_t i;
3987     const char *comma = nullptr;
3988
3989     oprintf("%s (%i) ", ind, (int)in->m_eid);
3990
3991     if (in->m_opcode == VINSTR_PHI) {
3992         dump_phi(in, oprintf);
3993         return;
3994     }
3995
3996     util_strncat(ind, "\t", IND_BUFSZ-1);
3997
3998     if (in->_m_ops[0] && (in->_m_ops[1] || in->_m_ops[2])) {
3999         in->_m_ops[0]->dump(oprintf);
4000         if (in->_m_ops[1] || in->_m_ops[2])
4001             oprintf(" <- ");
4002     }
4003     if (in->m_opcode == INSTR_CALL0 || in->m_opcode == VINSTR_NRCALL) {
4004         oprintf("CALL%i\t", in->m_params.size());
4005     } else
4006         oprintf("%s\t", qc_opname(in->m_opcode));
4007
4008     if (in->_m_ops[0] && !(in->_m_ops[1] || in->_m_ops[2])) {
4009         in->_m_ops[0]->dump(oprintf);
4010         comma = ",\t";
4011     }
4012     else
4013     {
4014         for (i = 1; i != 3; ++i) {
4015             if (in->_m_ops[i]) {
4016                 if (comma)
4017                     oprintf(comma);
4018                 in->_m_ops[i]->dump(oprintf);
4019                 comma = ",\t";
4020             }
4021         }
4022     }
4023     if (in->m_bops[0]) {
4024         if (comma)
4025             oprintf(comma);
4026         oprintf("[%s]", in->m_bops[0]->m_label.c_str());
4027         comma = ",\t";
4028     }
4029     if (in->m_bops[1])
4030         oprintf("%s[%s]", comma, in->m_bops[1]->m_label.c_str());
4031     if (in->m_params.size()) {
4032         oprintf("\tparams: ");
4033         for (auto &it : in->m_params)
4034             oprintf("%s, ", it->m_name.c_str());
4035     }
4036     oprintf("\n");
4037     ind[strlen(ind)-1] = 0;
4038 }
4039
4040 static void ir_value_dump_string(const char *str, int (*oprintf)(const char*, ...))
4041 {
4042     oprintf("\"");
4043     for (; *str; ++str) {
4044         switch (*str) {
4045             case '\n': oprintf("\\n"); break;
4046             case '\r': oprintf("\\r"); break;
4047             case '\t': oprintf("\\t"); break;
4048             case '\v': oprintf("\\v"); break;
4049             case '\f': oprintf("\\f"); break;
4050             case '\b': oprintf("\\b"); break;
4051             case '\a': oprintf("\\a"); break;
4052             case '\\': oprintf("\\\\"); break;
4053             case '"': oprintf("\\\""); break;
4054             default: oprintf("%c", *str); break;
4055         }
4056     }
4057     oprintf("\"");
4058 }
4059
4060 void ir_value::dump(int (*oprintf)(const char*, ...)) const
4061 {
4062     if (m_hasvalue) {
4063         switch (m_vtype) {
4064             default:
4065             case TYPE_VOID:
4066                 oprintf("(void)");
4067                 break;
4068             case TYPE_FUNCTION:
4069                 oprintf("fn:%s", m_name.c_str());
4070                 break;
4071             case TYPE_FLOAT:
4072                 oprintf("%g", m_constval.vfloat);
4073                 break;
4074             case TYPE_VECTOR:
4075                 oprintf("'%g %g %g'",
4076                         m_constval.vvec.x,
4077                         m_constval.vvec.y,
4078                         m_constval.vvec.z);
4079                 break;
4080             case TYPE_ENTITY:
4081                 oprintf("(entity)");
4082                 break;
4083             case TYPE_STRING:
4084                 ir_value_dump_string(m_constval.vstring, oprintf);
4085                 break;
4086 #if 0
4087             case TYPE_INTEGER:
4088                 oprintf("%i", m_constval.vint);
4089                 break;
4090 #endif
4091             case TYPE_POINTER:
4092                 oprintf("&%s",
4093                     m_constval.vpointer->m_name.c_str());
4094                 break;
4095         }
4096     } else {
4097         oprintf("%s", m_name.c_str());
4098     }
4099 }
4100
4101 void ir_value::dumpLife(int (*oprintf)(const char*,...)) const
4102 {
4103     oprintf("Life of %12s:", m_name.c_str());
4104     for (size_t i = 0; i < m_life.size(); ++i)
4105     {
4106         oprintf(" + [%i, %i]\n", m_life[i].start, m_life[i].end);
4107     }
4108 }