]> git.xonotic.org Git - xonotic/gmqcc.git/blob - ir.cpp
ir_instr_delete_quick needs to clear _m_ops
[xonotic/gmqcc.git] / ir.cpp
1 #include <stdlib.h>
2 #include <string.h>
3
4 #include "gmqcc.h"
5 #include "ir.h"
6
7 /***********************************************************************
8  * Type sizes used at multiple points in the IR codegen
9  */
10
11 const char *type_name[TYPE_COUNT] = {
12     "void",
13     "string",
14     "float",
15     "vector",
16     "entity",
17     "field",
18     "function",
19     "pointer",
20     "integer",
21     "variant",
22     "struct",
23     "union",
24     "array",
25
26     "nil",
27     "<no-expression>"
28 };
29
30 static size_t type_sizeof_[TYPE_COUNT] = {
31     1, /* TYPE_VOID     */
32     1, /* TYPE_STRING   */
33     1, /* TYPE_FLOAT    */
34     3, /* TYPE_VECTOR   */
35     1, /* TYPE_ENTITY   */
36     1, /* TYPE_FIELD    */
37     1, /* TYPE_FUNCTION */
38     1, /* TYPE_POINTER  */
39     1, /* TYPE_INTEGER  */
40     3, /* TYPE_VARIANT  */
41     0, /* TYPE_STRUCT   */
42     0, /* TYPE_UNION    */
43     0, /* TYPE_ARRAY    */
44     0, /* TYPE_NIL      */
45     0, /* TYPE_NOESPR   */
46 };
47
48 const uint16_t type_store_instr[TYPE_COUNT] = {
49     INSTR_STORE_F, /* should use I when having integer support */
50     INSTR_STORE_S,
51     INSTR_STORE_F,
52     INSTR_STORE_V,
53     INSTR_STORE_ENT,
54     INSTR_STORE_FLD,
55     INSTR_STORE_FNC,
56     INSTR_STORE_ENT, /* should use I */
57 #if 0
58     INSTR_STORE_I, /* integer type */
59 #else
60     INSTR_STORE_F,
61 #endif
62
63     INSTR_STORE_V, /* variant, should never be accessed */
64
65     VINSTR_END, /* struct */
66     VINSTR_END, /* union  */
67     VINSTR_END, /* array  */
68     VINSTR_END, /* nil    */
69     VINSTR_END, /* noexpr */
70 };
71
72 const uint16_t field_store_instr[TYPE_COUNT] = {
73     INSTR_STORE_FLD,
74     INSTR_STORE_FLD,
75     INSTR_STORE_FLD,
76     INSTR_STORE_V,
77     INSTR_STORE_FLD,
78     INSTR_STORE_FLD,
79     INSTR_STORE_FLD,
80     INSTR_STORE_FLD,
81 #if 0
82     INSTR_STORE_FLD, /* integer type */
83 #else
84     INSTR_STORE_FLD,
85 #endif
86
87     INSTR_STORE_V, /* variant, should never be accessed */
88
89     VINSTR_END, /* struct */
90     VINSTR_END, /* union  */
91     VINSTR_END, /* array  */
92     VINSTR_END, /* nil    */
93     VINSTR_END, /* noexpr */
94 };
95
96 const uint16_t type_storep_instr[TYPE_COUNT] = {
97     INSTR_STOREP_F, /* should use I when having integer support */
98     INSTR_STOREP_S,
99     INSTR_STOREP_F,
100     INSTR_STOREP_V,
101     INSTR_STOREP_ENT,
102     INSTR_STOREP_FLD,
103     INSTR_STOREP_FNC,
104     INSTR_STOREP_ENT, /* should use I */
105 #if 0
106     INSTR_STOREP_ENT, /* integer type */
107 #else
108     INSTR_STOREP_F,
109 #endif
110
111     INSTR_STOREP_V, /* variant, should never be accessed */
112
113     VINSTR_END, /* struct */
114     VINSTR_END, /* union  */
115     VINSTR_END, /* array  */
116     VINSTR_END, /* nil    */
117     VINSTR_END, /* noexpr */
118 };
119
120 const uint16_t type_eq_instr[TYPE_COUNT] = {
121     INSTR_EQ_F, /* should use I when having integer support */
122     INSTR_EQ_S,
123     INSTR_EQ_F,
124     INSTR_EQ_V,
125     INSTR_EQ_E,
126     INSTR_EQ_E, /* FLD has no comparison */
127     INSTR_EQ_FNC,
128     INSTR_EQ_E, /* should use I */
129 #if 0
130     INSTR_EQ_I,
131 #else
132     INSTR_EQ_F,
133 #endif
134
135     INSTR_EQ_V, /* variant, should never be accessed */
136
137     VINSTR_END, /* struct */
138     VINSTR_END, /* union  */
139     VINSTR_END, /* array  */
140     VINSTR_END, /* nil    */
141     VINSTR_END, /* noexpr */
142 };
143
144 const uint16_t type_ne_instr[TYPE_COUNT] = {
145     INSTR_NE_F, /* should use I when having integer support */
146     INSTR_NE_S,
147     INSTR_NE_F,
148     INSTR_NE_V,
149     INSTR_NE_E,
150     INSTR_NE_E, /* FLD has no comparison */
151     INSTR_NE_FNC,
152     INSTR_NE_E, /* should use I */
153 #if 0
154     INSTR_NE_I,
155 #else
156     INSTR_NE_F,
157 #endif
158
159     INSTR_NE_V, /* variant, should never be accessed */
160
161     VINSTR_END, /* struct */
162     VINSTR_END, /* union  */
163     VINSTR_END, /* array  */
164     VINSTR_END, /* nil    */
165     VINSTR_END, /* noexpr */
166 };
167
168 const uint16_t type_not_instr[TYPE_COUNT] = {
169     INSTR_NOT_F, /* should use I when having integer support */
170     VINSTR_END,  /* not to be used, depends on string related -f flags */
171     INSTR_NOT_F,
172     INSTR_NOT_V,
173     INSTR_NOT_ENT,
174     INSTR_NOT_ENT,
175     INSTR_NOT_FNC,
176     INSTR_NOT_ENT, /* should use I */
177 #if 0
178     INSTR_NOT_I, /* integer type */
179 #else
180     INSTR_NOT_F,
181 #endif
182
183     INSTR_NOT_V, /* variant, should never be accessed */
184
185     VINSTR_END, /* struct */
186     VINSTR_END, /* union  */
187     VINSTR_END, /* array  */
188     VINSTR_END, /* nil    */
189     VINSTR_END, /* noexpr */
190 };
191
192 /* protos */
193 static void            ir_value_dump(ir_value*, int (*oprintf)(const char*,...));
194
195 static ir_value*       ir_gen_extparam_proto(ir_builder *ir);
196 static void            ir_gen_extparam      (ir_builder *ir);
197
198 static void            ir_function_dump(ir_function*, char *ind, int (*oprintf)(const char*,...));
199
200 static ir_value*       ir_block_create_general_instr(ir_block *self, lex_ctx_t, const char *label,
201                                                      int op, ir_value *a, ir_value *b, qc_type outype);
202 static bool GMQCC_WARN ir_block_create_store(ir_block*, lex_ctx_t, ir_value *target, ir_value *what);
203 static void            ir_block_dump(ir_block*, char *ind, int (*oprintf)(const char*,...));
204
205 static bool            ir_instr_op(ir_instr*, int op, ir_value *value, bool writing);
206 static void            ir_instr_dump(ir_instr* in, char *ind, int (*oprintf)(const char*,...));
207 /* error functions */
208
209 static void irerror(lex_ctx_t ctx, const char *msg, ...)
210 {
211     va_list ap;
212     va_start(ap, msg);
213     con_cvprintmsg(ctx, LVL_ERROR, "internal error", msg, ap);
214     va_end(ap);
215 }
216
217 static bool GMQCC_WARN irwarning(lex_ctx_t ctx, int warntype, const char *fmt, ...)
218 {
219     bool    r;
220     va_list ap;
221     va_start(ap, fmt);
222     r = vcompile_warning(ctx, warntype, fmt, ap);
223     va_end(ap);
224     return r;
225 }
226
227 /***********************************************************************
228  * Vector utility functions
229  */
230
231 static bool GMQCC_WARN vec_ir_value_find(std::vector<ir_value *> &vec, const ir_value *what, size_t *idx)
232 {
233     for (auto &it : vec) {
234         if (it != what)
235             continue;
236         if (idx)
237             *idx = &it - &vec[0];
238         return true;
239     }
240     return false;
241 }
242
243 static bool GMQCC_WARN vec_ir_block_find(ir_block **vec, ir_block *what, size_t *idx)
244 {
245     size_t i;
246     size_t len = vec_size(vec);
247     for (i = 0; i < len; ++i) {
248         if (vec[i] == what) {
249             if (idx) *idx = i;
250             return true;
251         }
252     }
253     return false;
254 }
255
256 static bool GMQCC_WARN vec_ir_instr_find(std::vector<ir_instr *> &vec, ir_instr *what, size_t *idx)
257 {
258     for (auto &it : vec) {
259         if (it != what)
260             continue;
261         if (idx)
262             *idx = &it - &vec[0];
263         return true;
264     }
265     return false;
266 }
267
268 /***********************************************************************
269  * IR Builder
270  */
271
272 static void ir_block_delete_quick(ir_block* self);
273 static void ir_instr_delete_quick(ir_instr *self);
274 static void ir_function_delete_quick(ir_function *self);
275
276 ir_builder::ir_builder(const std::string& modulename)
277 : m_name(modulename),
278   m_code(new code_t)
279 {
280     m_htglobals   = util_htnew(IR_HT_SIZE);
281     m_htfields    = util_htnew(IR_HT_SIZE);
282     m_htfunctions = util_htnew(IR_HT_SIZE);
283
284     m_nil = new ir_value("nil", store_value, TYPE_NIL);
285     m_nil->m_cvq = CV_CONST;
286
287     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
288         /* we write to them, but they're not supposed to be used outside the IR, so
289          * let's not allow the generation of ir_instrs which use these.
290          * So it's a constant noexpr.
291          */
292         m_vinstr_temp[i] = new ir_value("vinstr_temp", store_value, TYPE_NOEXPR);
293         m_vinstr_temp[i]->m_cvq = CV_CONST;
294     }
295 }
296
297 ir_builder::~ir_builder()
298 {
299     util_htdel(m_htglobals);
300     util_htdel(m_htfields);
301     util_htdel(m_htfunctions);
302     for (auto& f : m_functions)
303         ir_function_delete_quick(f.release());
304     m_functions.clear(); // delete them now before deleting the rest:
305
306     delete m_nil;
307
308     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
309         delete m_vinstr_temp[i];
310     }
311
312     m_extparams.clear();
313     m_extparam_protos.clear();
314 }
315
316 static ir_function* ir_builder_get_function(ir_builder *self, const char *name)
317 {
318     return (ir_function*)util_htget(self->m_htfunctions, name);
319 }
320
321 ir_function* ir_builder_create_function(ir_builder *self, const std::string& name, qc_type outtype)
322 {
323     ir_function *fn = ir_builder_get_function(self, name.c_str());
324     if (fn) {
325         return nullptr;
326     }
327
328     fn = new ir_function(self, outtype);
329     fn->m_name = name;
330     self->m_functions.emplace_back(fn);
331     util_htset(self->m_htfunctions, name.c_str(), fn);
332
333     fn->m_value = ir_builder_create_global(self, fn->m_name, TYPE_FUNCTION);
334     if (!fn->m_value) {
335         delete fn;
336         return nullptr;
337     }
338
339     fn->m_value->m_hasvalue = true;
340     fn->m_value->m_outtype = outtype;
341     fn->m_value->m_constval.vfunc = fn;
342     fn->m_value->m_context = fn->m_context;
343
344     return fn;
345 }
346
347 static ir_value* ir_builder_get_global(ir_builder *self, const char *name)
348 {
349     return (ir_value*)util_htget(self->m_htglobals, name);
350 }
351
352 ir_value* ir_builder_create_global(ir_builder *self, const std::string& name, qc_type vtype)
353 {
354     ir_value *ve;
355
356     if (name[0] != '#')
357     {
358         ve = ir_builder_get_global(self, name.c_str());
359         if (ve) {
360             return nullptr;
361         }
362     }
363
364     ve = new ir_value(std::string(name), store_global, vtype);
365     self->m_globals.emplace_back(ve);
366     util_htset(self->m_htglobals, name.c_str(), ve);
367     return ve;
368 }
369
370 ir_value* ir_builder_get_va_count(ir_builder *self)
371 {
372     if (self->m_reserved_va_count)
373         return self->m_reserved_va_count;
374     return (self->m_reserved_va_count = ir_builder_create_global(self, "reserved:va_count", TYPE_FLOAT));
375 }
376
377 static ir_value* ir_builder_get_field(ir_builder *self, const char *name)
378 {
379     return (ir_value*)util_htget(self->m_htfields, name);
380 }
381
382
383 ir_value* ir_builder_create_field(ir_builder *self, const std::string& name, qc_type vtype)
384 {
385     ir_value *ve = ir_builder_get_field(self, name.c_str());
386     if (ve) {
387         return nullptr;
388     }
389
390     ve = new ir_value(std::string(name), store_global, TYPE_FIELD);
391     ve->m_fieldtype = vtype;
392     self->m_fields.emplace_back(ve);
393     util_htset(self->m_htfields, name.c_str(), ve);
394     return ve;
395 }
396
397 /***********************************************************************
398  *IR Function
399  */
400
401 static bool ir_function_naive_phi(ir_function*);
402 static void ir_function_enumerate(ir_function*);
403 static bool ir_function_calculate_liferanges(ir_function*);
404 static bool ir_function_allocate_locals(ir_function*);
405
406 ir_function::ir_function(ir_builder* owner_, qc_type outtype_)
407 : m_owner(owner_),
408   m_name("<@unnamed>"),
409   m_outtype(outtype_)
410 {
411     m_context.file = "<@no context>";
412     m_context.line = 0;
413 }
414
415 ir_function::~ir_function()
416 {
417 }
418
419 static void ir_function_delete_quick(ir_function *self)
420 {
421     for (auto& b : self->m_blocks)
422         ir_block_delete_quick(b.release());
423     delete self;
424 }
425
426 static void ir_function_collect_value(ir_function *self, ir_value *v)
427 {
428     self->m_values.emplace_back(v);
429 }
430
431 ir_block* ir_function_create_block(lex_ctx_t ctx, ir_function *self, const char *label)
432 {
433     ir_block* bn = new ir_block(self, label ? std::string(label) : std::string());
434     bn->m_context = ctx;
435     self->m_blocks.emplace_back(bn);
436
437     if ((self->m_flags & IR_FLAG_BLOCK_COVERAGE) && self->m_owner->m_coverage_func)
438         (void)ir_block_create_call(bn, ctx, nullptr, self->m_owner->m_coverage_func, false);
439
440     return bn;
441 }
442
443 static bool instr_is_operation(uint16_t op)
444 {
445     return ( (op >= INSTR_MUL_F  && op <= INSTR_GT) ||
446              (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) ||
447              (op == INSTR_ADDRESS) ||
448              (op >= INSTR_NOT_F  && op <= INSTR_NOT_FNC) ||
449              (op >= INSTR_AND    && op <= INSTR_BITOR) ||
450              (op >= INSTR_CALL0  && op <= INSTR_CALL8) ||
451              (op >= VINSTR_BITAND_V && op <= VINSTR_NEG_V) );
452 }
453
454 static bool ir_function_pass_peephole(ir_function *self)
455 {
456     for (auto& bp : self->m_blocks) {
457         ir_block *block = bp.get();
458         for (size_t i = 0; i < vec_size(block->m_instr); ++i) {
459             ir_instr *inst;
460             inst = block->m_instr[i];
461
462             if (i >= 1 &&
463                 (inst->m_opcode >= INSTR_STORE_F &&
464                  inst->m_opcode <= INSTR_STORE_FNC))
465             {
466                 ir_instr *store;
467                 ir_instr *oper;
468                 ir_value *value;
469
470                 store = inst;
471
472                 oper  = block->m_instr[i-1];
473                 if (!instr_is_operation(oper->m_opcode))
474                     continue;
475
476                 /* Don't change semantics of MUL_VF in engines where these may not alias. */
477                 if (OPTS_FLAG(LEGACY_VECTOR_MATHS)) {
478                     if (oper->m_opcode == INSTR_MUL_VF && oper->_m_ops[2]->m_memberof == oper->_m_ops[1])
479                         continue;
480                     if (oper->m_opcode == INSTR_MUL_FV && oper->_m_ops[1]->m_memberof == oper->_m_ops[2])
481                         continue;
482                 }
483
484                 value = oper->_m_ops[0];
485
486                 /* only do it for SSA values */
487                 if (value->m_store != store_value)
488                     continue;
489
490                 /* don't optimize out the temp if it's used later again */
491                 if (value->m_reads.size() != 1)
492                     continue;
493
494                 /* The very next store must use this value */
495                 if (value->m_reads[0] != store)
496                     continue;
497
498                 /* And of course the store must _read_ from it, so it's in
499                  * OP 1 */
500                 if (store->_m_ops[1] != value)
501                     continue;
502
503                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
504                 (void)!ir_instr_op(oper, 0, store->_m_ops[0], true);
505
506                 vec_remove(block->m_instr, i, 1);
507                 delete store;
508             }
509             else if (inst->m_opcode == VINSTR_COND)
510             {
511                 /* COND on a value resulting from a NOT could
512                  * remove the NOT and swap its operands
513                  */
514                 while (true) {
515                     ir_block *tmp;
516                     size_t    inotid;
517                     ir_instr *inot;
518                     ir_value *value;
519                     value = inst->_m_ops[0];
520
521                     if (value->m_store != store_value || value->m_reads.size() != 1 || value->m_reads[0] != inst)
522                         break;
523
524                     inot = value->m_writes[0];
525                     if (inot->_m_ops[0] != value ||
526                         inot->m_opcode < INSTR_NOT_F ||
527                         inot->m_opcode > INSTR_NOT_FNC ||
528                         inot->m_opcode == INSTR_NOT_V || /* can't do these */
529                         inot->m_opcode == INSTR_NOT_S)
530                     {
531                         break;
532                     }
533
534                     /* count */
535                     ++opts_optimizationcount[OPTIM_PEEPHOLE];
536                     /* change operand */
537                     (void)!ir_instr_op(inst, 0, inot->_m_ops[1], false);
538                     /* remove NOT */
539                     tmp = inot->m_owner;
540                     for (inotid = 0; inotid < vec_size(tmp->m_instr); ++inotid) {
541                         if (tmp->m_instr[inotid] == inot)
542                             break;
543                     }
544                     if (inotid >= vec_size(tmp->m_instr)) {
545                         compile_error(inst->m_context, "sanity-check failed: failed to find instruction to optimize out");
546                         return false;
547                     }
548                     vec_remove(tmp->m_instr, inotid, 1);
549                     delete inot;
550                     /* swap ontrue/onfalse */
551                     tmp = inst->m_bops[0];
552                     inst->m_bops[0] = inst->m_bops[1];
553                     inst->m_bops[1] = tmp;
554                 }
555                 continue;
556             }
557         }
558     }
559
560     return true;
561 }
562
563 static bool ir_function_pass_tailrecursion(ir_function *self)
564 {
565     size_t p;
566
567     for (auto& bp : self->m_blocks) {
568         ir_block *block = bp.get();
569
570         ir_value *funcval;
571         ir_instr *ret, *call, *store = nullptr;
572
573         if (!block->m_final || vec_size(block->m_instr) < 2)
574             continue;
575
576         ret = block->m_instr[vec_size(block->m_instr)-1];
577         if (ret->m_opcode != INSTR_DONE && ret->m_opcode != INSTR_RETURN)
578             continue;
579
580         call = block->m_instr[vec_size(block->m_instr)-2];
581         if (call->m_opcode >= INSTR_STORE_F && call->m_opcode <= INSTR_STORE_FNC) {
582             /* account for the unoptimized
583              * CALL
584              * STORE %return, %tmp
585              * RETURN %tmp
586              * version
587              */
588             if (vec_size(block->m_instr) < 3)
589                 continue;
590
591             store = call;
592             call = block->m_instr[vec_size(block->m_instr)-3];
593         }
594
595         if (call->m_opcode < INSTR_CALL0 || call->m_opcode > INSTR_CALL8)
596             continue;
597
598         if (store) {
599             /* optimize out the STORE */
600             if (ret->_m_ops[0]   &&
601                 ret->_m_ops[0]   == store->_m_ops[0] &&
602                 store->_m_ops[1] == call->_m_ops[0])
603             {
604                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
605                 call->_m_ops[0] = store->_m_ops[0];
606                 vec_remove(block->m_instr, vec_size(block->m_instr) - 2, 1);
607                 delete store;
608             }
609             else
610                 continue;
611         }
612
613         if (!call->_m_ops[0])
614             continue;
615
616         funcval = call->_m_ops[1];
617         if (!funcval)
618             continue;
619         if (funcval->m_vtype != TYPE_FUNCTION || funcval->m_constval.vfunc != self)
620             continue;
621
622         /* now we have a CALL and a RET, check if it's a tailcall */
623         if (ret->_m_ops[0] && call->_m_ops[0] != ret->_m_ops[0])
624             continue;
625
626         ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
627         vec_shrinkby(block->m_instr, 2);
628
629         block->m_final = false; /* open it back up */
630
631         /* emite parameter-stores */
632         for (p = 0; p < call->m_params.size(); ++p) {
633             /* assert(call->params_count <= self->locals_count); */
634             if (!ir_block_create_store(block, call->m_context, self->m_locals[p].get(), call->m_params[p])) {
635                 irerror(call->m_context, "failed to create tailcall store instruction for parameter %i", (int)p);
636                 return false;
637             }
638         }
639         if (!ir_block_create_jump(block, call->m_context, self->m_blocks[0].get())) {
640             irerror(call->m_context, "failed to create tailcall jump");
641             return false;
642         }
643
644         delete call;
645         delete ret;
646     }
647
648     return true;
649 }
650
651 bool ir_function_finalize(ir_function *self)
652 {
653     if (self->m_builtin)
654         return true;
655
656     if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
657         if (!ir_function_pass_peephole(self)) {
658             irerror(self->m_context, "generic optimization pass broke something in `%s`", self->m_name.c_str());
659             return false;
660         }
661     }
662
663     if (OPTS_OPTIMIZATION(OPTIM_TAIL_RECURSION)) {
664         if (!ir_function_pass_tailrecursion(self)) {
665             irerror(self->m_context, "tail-recursion optimization pass broke something in `%s`", self->m_name.c_str());
666             return false;
667         }
668     }
669
670     if (!ir_function_naive_phi(self)) {
671         irerror(self->m_context, "internal error: ir_function_naive_phi failed");
672         return false;
673     }
674
675     for (auto& lp : self->m_locals) {
676         ir_value *v = lp.get();
677         if (v->m_vtype == TYPE_VECTOR ||
678             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
679         {
680             ir_value_vector_member(v, 0);
681             ir_value_vector_member(v, 1);
682             ir_value_vector_member(v, 2);
683         }
684     }
685     for (auto& vp : self->m_values) {
686         ir_value *v = vp.get();
687         if (v->m_vtype == TYPE_VECTOR ||
688             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
689         {
690             ir_value_vector_member(v, 0);
691             ir_value_vector_member(v, 1);
692             ir_value_vector_member(v, 2);
693         }
694     }
695
696     ir_function_enumerate(self);
697
698     if (!ir_function_calculate_liferanges(self))
699         return false;
700     if (!ir_function_allocate_locals(self))
701         return false;
702     return true;
703 }
704
705 ir_value* ir_function_create_local(ir_function *self, const std::string& name, qc_type vtype, bool param)
706 {
707     ir_value *ve;
708
709     if (param &&
710         !self->m_locals.empty() &&
711         self->m_locals.back()->m_store != store_param)
712     {
713         irerror(self->m_context, "cannot add parameters after adding locals");
714         return nullptr;
715     }
716
717     ve = new ir_value(std::string(name), (param ? store_param : store_local), vtype);
718     if (param)
719         ve->m_locked = true;
720     self->m_locals.emplace_back(ve);
721     return ve;
722 }
723
724 /***********************************************************************
725  *IR Block
726  */
727
728 ir_block::ir_block(ir_function* owner, const std::string& name)
729 : m_owner(owner),
730   m_label(name)
731 {
732     m_context.file = "<@no context>";
733     m_context.line = 0;
734 }
735
736 ir_block::~ir_block()
737 {
738     for (size_t i = 0; i != vec_size(m_instr); ++i)
739         delete m_instr[i];
740     vec_free(m_instr);
741     vec_free(m_entries);
742     vec_free(m_exits);
743 }
744
745 static void ir_block_delete_quick(ir_block* self)
746 {
747     size_t i;
748     for (i = 0; i != vec_size(self->m_instr); ++i)
749         ir_instr_delete_quick(self->m_instr[i]);
750     vec_free(self->m_instr);
751     delete self;
752 }
753
754 /***********************************************************************
755  *IR Instructions
756  */
757
758 ir_instr::ir_instr(lex_ctx_t ctx, ir_block* owner_, int op)
759 : m_opcode(op),
760   m_context(ctx),
761   m_owner(owner_)
762 {
763 }
764
765 ir_instr::~ir_instr()
766 {
767     // The following calls can only delete from
768     // vectors, we still want to delete this instruction
769     // so ignore the return value. Since with the warn_unused_result attribute
770     // gcc doesn't care about an explicit: (void)foo(); to ignore the result,
771     // I have to improvise here and use if(foo());
772     for (auto &it : m_phi) {
773         size_t idx;
774         if (vec_ir_instr_find(it.value->m_writes, this, &idx))
775             it.value->m_writes.erase(it.value->m_writes.begin() + idx);
776         if (vec_ir_instr_find(it.value->m_reads, this, &idx))
777             it.value->m_reads.erase(it.value->m_reads.begin() + idx);
778     }
779     for (auto &it : m_params) {
780         size_t idx;
781         if (vec_ir_instr_find(it->m_writes, this, &idx))
782             it->m_writes.erase(it->m_writes.begin() + idx);
783         if (vec_ir_instr_find(it->m_reads, this, &idx))
784             it->m_reads.erase(it->m_reads.begin() + idx);
785     }
786     (void)!ir_instr_op(this, 0, nullptr, false);
787     (void)!ir_instr_op(this, 1, nullptr, false);
788     (void)!ir_instr_op(this, 2, nullptr, false);
789 }
790
791 static void ir_instr_delete_quick(ir_instr *self)
792 {
793     self->m_phi.clear();
794     self->m_params.clear();
795     self->_m_ops[0] = nullptr;
796     self->_m_ops[1] = nullptr;
797     self->_m_ops[2] = nullptr;
798     delete self;
799 }
800
801 static bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing)
802 {
803     if (v && v->m_vtype == TYPE_NOEXPR) {
804         irerror(self->m_context, "tried to use a NOEXPR value");
805         return false;
806     }
807
808     if (self->_m_ops[op]) {
809         size_t idx;
810         if (writing && vec_ir_instr_find(self->_m_ops[op]->m_writes, self, &idx))
811             self->_m_ops[op]->m_writes.erase(self->_m_ops[op]->m_writes.begin() + idx);
812         else if (vec_ir_instr_find(self->_m_ops[op]->m_reads, self, &idx))
813             self->_m_ops[op]->m_reads.erase(self->_m_ops[op]->m_reads.begin() + idx);
814     }
815     if (v) {
816         if (writing)
817             v->m_writes.push_back(self);
818         else
819             v->m_reads.push_back(self);
820     }
821     self->_m_ops[op] = v;
822     return true;
823 }
824
825 /***********************************************************************
826  *IR Value
827  */
828
829 static void ir_value_code_setaddr(ir_value *self, int32_t gaddr)
830 {
831     self->m_code.globaladdr = gaddr;
832     if (self->m_members[0]) self->m_members[0]->m_code.globaladdr = gaddr;
833     if (self->m_members[1]) self->m_members[1]->m_code.globaladdr = gaddr;
834     if (self->m_members[2]) self->m_members[2]->m_code.globaladdr = gaddr;
835 }
836
837 static int32_t ir_value_code_addr(const ir_value *self)
838 {
839     if (self->m_store == store_return)
840         return OFS_RETURN + self->m_code.addroffset;
841     return self->m_code.globaladdr + self->m_code.addroffset;
842 }
843
844 ir_value::ir_value(std::string&& name_, store_type store_, qc_type vtype_)
845 : m_name(move(name_)),
846   m_vtype(vtype_),
847   m_store(store_)
848 {
849     m_fieldtype = TYPE_VOID;
850     m_outtype = TYPE_VOID;
851     m_flags = 0;
852
853     m_cvq          = CV_NONE;
854     m_hasvalue     = false;
855     m_context.file = "<@no context>";
856     m_context.line = 0;
857
858     memset(&m_constval, 0, sizeof(m_constval));
859     memset(&m_code,     0, sizeof(m_code));
860
861     m_members[0] = nullptr;
862     m_members[1] = nullptr;
863     m_members[2] = nullptr;
864     m_memberof = nullptr;
865
866     m_unique_life = false;
867     m_locked = false;
868     m_callparam  = false;
869 }
870
871 ir_value::~ir_value()
872 {
873     size_t i;
874     if (m_hasvalue) {
875         if (m_vtype == TYPE_STRING)
876             mem_d((void*)m_constval.vstring);
877     }
878     if (!(m_flags & IR_FLAG_SPLIT_VECTOR)) {
879         for (i = 0; i < 3; ++i) {
880             if (m_members[i])
881                 delete m_members[i];
882         }
883     }
884 }
885
886
887 /*  helper function */
888 static ir_value* ir_builder_imm_float(ir_builder *self, float value, bool add_to_list) {
889     ir_value *v = new ir_value("#IMMEDIATE", store_global, TYPE_FLOAT);
890     v->m_flags |= IR_FLAG_ERASABLE;
891     v->m_hasvalue = true;
892     v->m_cvq = CV_CONST;
893     v->m_constval.vfloat = value;
894
895     self->m_globals.emplace_back(v);
896     if (add_to_list)
897         self->m_const_floats.emplace_back(v);
898     return v;
899 }
900
901 ir_value* ir_value_vector_member(ir_value *self, unsigned int member)
902 {
903     std::string name;
904     ir_value *m;
905     if (member >= 3)
906         return nullptr;
907
908     if (self->m_members[member])
909         return self->m_members[member];
910
911     if (!self->m_name.empty()) {
912         char member_name[3] = { '_', char('x' + member), 0 };
913         name = self->m_name + member_name;
914     }
915
916     if (self->m_vtype == TYPE_VECTOR)
917     {
918         m = new ir_value(move(name), self->m_store, TYPE_FLOAT);
919         if (!m)
920             return nullptr;
921         m->m_context = self->m_context;
922
923         self->m_members[member] = m;
924         m->m_code.addroffset = member;
925     }
926     else if (self->m_vtype == TYPE_FIELD)
927     {
928         if (self->m_fieldtype != TYPE_VECTOR)
929             return nullptr;
930         m = new ir_value(move(name), self->m_store, TYPE_FIELD);
931         if (!m)
932             return nullptr;
933         m->m_fieldtype = TYPE_FLOAT;
934         m->m_context = self->m_context;
935
936         self->m_members[member] = m;
937         m->m_code.addroffset = member;
938     }
939     else
940     {
941         irerror(self->m_context, "invalid member access on %s", self->m_name.c_str());
942         return nullptr;
943     }
944
945     m->m_memberof = self;
946     return m;
947 }
948
949 static GMQCC_INLINE size_t ir_value_sizeof(const ir_value *self)
950 {
951     if (self->m_vtype == TYPE_FIELD && self->m_fieldtype == TYPE_VECTOR)
952         return type_sizeof_[TYPE_VECTOR];
953     return type_sizeof_[self->m_vtype];
954 }
955
956 static ir_value* ir_value_out(ir_function *owner, const char *name, store_type storetype, qc_type vtype)
957 {
958     ir_value *v = new ir_value(name ? std::string(name) : std::string(), storetype, vtype);
959     if (!v)
960         return nullptr;
961     ir_function_collect_value(owner, v);
962     return v;
963 }
964
965 bool ir_value_set_float(ir_value *self, float f)
966 {
967     if (self->m_vtype != TYPE_FLOAT)
968         return false;
969     self->m_constval.vfloat = f;
970     self->m_hasvalue = true;
971     return true;
972 }
973
974 bool ir_value_set_func(ir_value *self, int f)
975 {
976     if (self->m_vtype != TYPE_FUNCTION)
977         return false;
978     self->m_constval.vint = f;
979     self->m_hasvalue = true;
980     return true;
981 }
982
983 bool ir_value_set_vector(ir_value *self, vec3_t v)
984 {
985     if (self->m_vtype != TYPE_VECTOR)
986         return false;
987     self->m_constval.vvec = v;
988     self->m_hasvalue = true;
989     return true;
990 }
991
992 bool ir_value_set_field(ir_value *self, ir_value *fld)
993 {
994     if (self->m_vtype != TYPE_FIELD)
995         return false;
996     self->m_constval.vpointer = fld;
997     self->m_hasvalue = true;
998     return true;
999 }
1000
1001 bool ir_value_set_string(ir_value *self, const char *str)
1002 {
1003     if (self->m_vtype != TYPE_STRING)
1004         return false;
1005     self->m_constval.vstring = util_strdupe(str);
1006     self->m_hasvalue = true;
1007     return true;
1008 }
1009
1010 #if 0
1011 bool ir_value_set_int(ir_value *self, int i)
1012 {
1013     if (self->m_vtype != TYPE_INTEGER)
1014         return false;
1015     self->m_constval.vint = i;
1016     self->m_hasvalue = true;
1017     return true;
1018 }
1019 #endif
1020
1021 bool ir_value_lives(ir_value *self, size_t at)
1022 {
1023     for (auto& l : self->m_life) {
1024         if (l.start <= at && at <= l.end)
1025             return true;
1026         if (l.start > at) /* since it's ordered */
1027             return false;
1028     }
1029     return false;
1030 }
1031
1032 static bool ir_value_life_insert(ir_value *self, size_t idx, ir_life_entry_t e)
1033 {
1034     self->m_life.insert(self->m_life.begin() + idx, e);
1035     return true;
1036 }
1037
1038 static bool ir_value_life_merge(ir_value *self, size_t s)
1039 {
1040     size_t i;
1041     const size_t vs = self->m_life.size();
1042     ir_life_entry_t *life_found = nullptr;
1043     ir_life_entry_t *before = nullptr;
1044     ir_life_entry_t new_entry;
1045
1046     /* Find the first range >= s */
1047     for (i = 0; i < vs; ++i)
1048     {
1049         before = life_found;
1050         life_found = &self->m_life[i];
1051         if (life_found->start > s)
1052             break;
1053     }
1054     /* nothing found? append */
1055     if (i == vs) {
1056         ir_life_entry_t e;
1057         if (life_found && life_found->end+1 == s)
1058         {
1059             /* previous life range can be merged in */
1060             life_found->end++;
1061             return true;
1062         }
1063         if (life_found && life_found->end >= s)
1064             return false;
1065         e.start = e.end = s;
1066         self->m_life.emplace_back(e);
1067         return true;
1068     }
1069     /* found */
1070     if (before)
1071     {
1072         if (before->end + 1 == s &&
1073             life_found->start - 1 == s)
1074         {
1075             /* merge */
1076             before->end = life_found->end;
1077             self->m_life.erase(self->m_life.begin()+i);
1078             return true;
1079         }
1080         if (before->end + 1 == s)
1081         {
1082             /* extend before */
1083             before->end++;
1084             return true;
1085         }
1086         /* already contained */
1087         if (before->end >= s)
1088             return false;
1089     }
1090     /* extend */
1091     if (life_found->start - 1 == s)
1092     {
1093         life_found->start--;
1094         return true;
1095     }
1096     /* insert a new entry */
1097     new_entry.start = new_entry.end = s;
1098     return ir_value_life_insert(self, i, new_entry);
1099 }
1100
1101 static bool ir_value_life_merge_into(ir_value *self, const ir_value *other)
1102 {
1103     size_t i, myi;
1104
1105     if (other->m_life.empty())
1106         return true;
1107
1108     if (self->m_life.empty()) {
1109         self->m_life = other->m_life;
1110         return true;
1111     }
1112
1113     myi = 0;
1114     for (i = 0; i < other->m_life.size(); ++i)
1115     {
1116         const ir_life_entry_t &otherlife = other->m_life[i];
1117         while (true)
1118         {
1119             ir_life_entry_t *entry = &self->m_life[myi];
1120
1121             if (otherlife.end+1 < entry->start)
1122             {
1123                 /* adding an interval before entry */
1124                 if (!ir_value_life_insert(self, myi, otherlife))
1125                     return false;
1126                 ++myi;
1127                 break;
1128             }
1129
1130             if (otherlife.start <  entry->start &&
1131                 otherlife.end+1 >= entry->start)
1132             {
1133                 /* starts earlier and overlaps */
1134                 entry->start = otherlife.start;
1135             }
1136
1137             if (otherlife.end   >  entry->end &&
1138                 otherlife.start <= entry->end+1)
1139             {
1140                 /* ends later and overlaps */
1141                 entry->end = otherlife.end;
1142             }
1143
1144             /* see if our change combines it with the next ranges */
1145             while (myi+1 < self->m_life.size() &&
1146                    entry->end+1 >= self->m_life[1+myi].start)
1147             {
1148                 /* overlaps with (myi+1) */
1149                 if (entry->end < self->m_life[1+myi].end)
1150                     entry->end = self->m_life[1+myi].end;
1151                 self->m_life.erase(self->m_life.begin() + (myi + 1));
1152                 entry = &self->m_life[myi];
1153             }
1154
1155             /* see if we're after the entry */
1156             if (otherlife.start > entry->end)
1157             {
1158                 ++myi;
1159                 /* append if we're at the end */
1160                 if (myi >= self->m_life.size()) {
1161                     self->m_life.emplace_back(otherlife);
1162                     break;
1163                 }
1164                 /* otherweise check the next range */
1165                 continue;
1166             }
1167             break;
1168         }
1169     }
1170     return true;
1171 }
1172
1173 static bool ir_values_overlap(const ir_value *a, const ir_value *b)
1174 {
1175     /* For any life entry in A see if it overlaps with
1176      * any life entry in B.
1177      * Note that the life entries are orderes, so we can make a
1178      * more efficient algorithm there than naively translating the
1179      * statement above.
1180      */
1181
1182     const ir_life_entry_t *la, *lb, *enda, *endb;
1183
1184     /* first of all, if either has no life range, they cannot clash */
1185     if (a->m_life.empty() || b->m_life.empty())
1186         return false;
1187
1188     la = &a->m_life.front();
1189     lb = &b->m_life.front();
1190     enda = &a->m_life.back() + 1;
1191     endb = &b->m_life.back() + 1;
1192     while (true)
1193     {
1194         /* check if the entries overlap, for that,
1195          * both must start before the other one ends.
1196          */
1197         if (la->start < lb->end &&
1198             lb->start < la->end)
1199         {
1200             return true;
1201         }
1202
1203         /* entries are ordered
1204          * one entry is earlier than the other
1205          * that earlier entry will be moved forward
1206          */
1207         if (la->start < lb->start)
1208         {
1209             /* order: A B, move A forward
1210              * check if we hit the end with A
1211              */
1212             if (++la == enda)
1213                 break;
1214         }
1215         else /* if (lb->start < la->start)  actually <= */
1216         {
1217             /* order: B A, move B forward
1218              * check if we hit the end with B
1219              */
1220             if (++lb == endb)
1221                 break;
1222         }
1223     }
1224     return false;
1225 }
1226
1227 /***********************************************************************
1228  *IR main operations
1229  */
1230
1231 static bool ir_check_unreachable(ir_block *self)
1232 {
1233     /* The IR should never have to deal with unreachable code */
1234     if (!self->m_final/* || OPTS_FLAG(ALLOW_UNREACHABLE_CODE)*/)
1235         return true;
1236     irerror(self->m_context, "unreachable statement (%s)", self->m_label.c_str());
1237     return false;
1238 }
1239
1240 bool ir_block_create_store_op(ir_block *self, lex_ctx_t ctx, int op, ir_value *target, ir_value *what)
1241 {
1242     ir_instr *in;
1243     if (!ir_check_unreachable(self))
1244         return false;
1245
1246     if (target->m_store == store_value &&
1247         (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC))
1248     {
1249         irerror(self->m_context, "cannot store to an SSA value");
1250         irerror(self->m_context, "trying to store: %s <- %s", target->m_name.c_str(), what->m_name.c_str());
1251         irerror(self->m_context, "instruction: %s", util_instr_str[op]);
1252         return false;
1253     }
1254
1255     in = new ir_instr(ctx, self, op);
1256     if (!in)
1257         return false;
1258
1259     if (!ir_instr_op(in, 0, target, (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC)) ||
1260         !ir_instr_op(in, 1, what, false))
1261     {
1262         delete in;
1263         return false;
1264     }
1265     vec_push(self->m_instr, in);
1266     return true;
1267 }
1268
1269 bool ir_block_create_state_op(ir_block *self, lex_ctx_t ctx, ir_value *frame, ir_value *think)
1270 {
1271     ir_instr *in;
1272     if (!ir_check_unreachable(self))
1273         return false;
1274
1275     in = new ir_instr(ctx, self, INSTR_STATE);
1276     if (!in)
1277         return false;
1278
1279     if (!ir_instr_op(in, 0, frame, false) ||
1280         !ir_instr_op(in, 1, think, false))
1281     {
1282         delete in;
1283         return false;
1284     }
1285     vec_push(self->m_instr, in);
1286     return true;
1287 }
1288
1289 static bool ir_block_create_store(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1290 {
1291     int op = 0;
1292     qc_type vtype;
1293     if (target->m_vtype == TYPE_VARIANT)
1294         vtype = what->m_vtype;
1295     else
1296         vtype = target->m_vtype;
1297
1298 #if 0
1299     if      (vtype == TYPE_FLOAT   && what->m_vtype == TYPE_INTEGER)
1300         op = INSTR_CONV_ITOF;
1301     else if (vtype == TYPE_INTEGER && what->m_vtype == TYPE_FLOAT)
1302         op = INSTR_CONV_FTOI;
1303 #endif
1304         op = type_store_instr[vtype];
1305
1306     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1307         if (op == INSTR_STORE_FLD && what->m_fieldtype == TYPE_VECTOR)
1308             op = INSTR_STORE_V;
1309     }
1310
1311     return ir_block_create_store_op(self, ctx, op, target, what);
1312 }
1313
1314 bool ir_block_create_storep(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1315 {
1316     int op = 0;
1317     qc_type vtype;
1318
1319     if (target->m_vtype != TYPE_POINTER)
1320         return false;
1321
1322     /* storing using pointer - target is a pointer, type must be
1323      * inferred from source
1324      */
1325     vtype = what->m_vtype;
1326
1327     op = type_storep_instr[vtype];
1328     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1329         if (op == INSTR_STOREP_FLD && what->m_fieldtype == TYPE_VECTOR)
1330             op = INSTR_STOREP_V;
1331     }
1332
1333     return ir_block_create_store_op(self, ctx, op, target, what);
1334 }
1335
1336 bool ir_block_create_return(ir_block *self, lex_ctx_t ctx, ir_value *v)
1337 {
1338     ir_instr *in;
1339     if (!ir_check_unreachable(self))
1340         return false;
1341
1342     self->m_final = true;
1343
1344     self->m_is_return = true;
1345     in = new ir_instr(ctx, self, INSTR_RETURN);
1346     if (!in)
1347         return false;
1348
1349     if (v && !ir_instr_op(in, 0, v, false)) {
1350         delete in;
1351         return false;
1352     }
1353
1354     vec_push(self->m_instr, in);
1355     return true;
1356 }
1357
1358 bool ir_block_create_if(ir_block *self, lex_ctx_t ctx, ir_value *v,
1359                         ir_block *ontrue, ir_block *onfalse)
1360 {
1361     ir_instr *in;
1362     if (!ir_check_unreachable(self))
1363         return false;
1364     self->m_final = true;
1365     /*in = new ir_instr(ctx, self, (v->m_vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/
1366     in = new ir_instr(ctx, self, VINSTR_COND);
1367     if (!in)
1368         return false;
1369
1370     if (!ir_instr_op(in, 0, v, false)) {
1371         delete in;
1372         return false;
1373     }
1374
1375     in->m_bops[0] = ontrue;
1376     in->m_bops[1] = onfalse;
1377
1378     vec_push(self->m_instr, in);
1379
1380     vec_push(self->m_exits, ontrue);
1381     vec_push(self->m_exits, onfalse);
1382     vec_push(ontrue->m_entries,  self);
1383     vec_push(onfalse->m_entries, self);
1384     return true;
1385 }
1386
1387 bool ir_block_create_jump(ir_block *self, lex_ctx_t ctx, ir_block *to)
1388 {
1389     ir_instr *in;
1390     if (!ir_check_unreachable(self))
1391         return false;
1392     self->m_final = true;
1393     in = new ir_instr(ctx, self, VINSTR_JUMP);
1394     if (!in)
1395         return false;
1396
1397     in->m_bops[0] = to;
1398     vec_push(self->m_instr, in);
1399
1400     vec_push(self->m_exits, to);
1401     vec_push(to->m_entries, self);
1402     return true;
1403 }
1404
1405 bool ir_block_create_goto(ir_block *self, lex_ctx_t ctx, ir_block *to)
1406 {
1407     self->m_owner->m_flags |= IR_FLAG_HAS_GOTO;
1408     return ir_block_create_jump(self, ctx, to);
1409 }
1410
1411 ir_instr* ir_block_create_phi(ir_block *self, lex_ctx_t ctx, const char *label, qc_type ot)
1412 {
1413     ir_value *out;
1414     ir_instr *in;
1415     if (!ir_check_unreachable(self))
1416         return nullptr;
1417     in = new ir_instr(ctx, self, VINSTR_PHI);
1418     if (!in)
1419         return nullptr;
1420     out = ir_value_out(self->m_owner, label, store_value, ot);
1421     if (!out) {
1422         delete in;
1423         return nullptr;
1424     }
1425     if (!ir_instr_op(in, 0, out, true)) {
1426         delete in;
1427         return nullptr;
1428     }
1429     vec_push(self->m_instr, in);
1430     return in;
1431 }
1432
1433 ir_value* ir_phi_value(ir_instr *self)
1434 {
1435     return self->_m_ops[0];
1436 }
1437
1438 void ir_phi_add(ir_instr* self, ir_block *b, ir_value *v)
1439 {
1440     ir_phi_entry_t pe;
1441
1442     if (!vec_ir_block_find(self->m_owner->m_entries, b, nullptr)) {
1443         // Must not be possible to cause this, otherwise the AST
1444         // is doing something wrong.
1445         irerror(self->m_context, "Invalid entry block for PHI");
1446         exit(EXIT_FAILURE);
1447     }
1448
1449     pe.value = v;
1450     pe.from = b;
1451     v->m_reads.push_back(self);
1452     self->m_phi.push_back(pe);
1453 }
1454
1455 /* call related code */
1456 ir_instr* ir_block_create_call(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *func, bool noreturn)
1457 {
1458     ir_value *out;
1459     ir_instr *in;
1460     if (!ir_check_unreachable(self))
1461         return nullptr;
1462     in = new ir_instr(ctx, self, (noreturn ? VINSTR_NRCALL : INSTR_CALL0));
1463     if (!in)
1464         return nullptr;
1465     if (noreturn) {
1466         self->m_final = true;
1467         self->m_is_return = true;
1468     }
1469     out = ir_value_out(self->m_owner, label, (func->m_outtype == TYPE_VOID) ? store_return : store_value, func->m_outtype);
1470     if (!out) {
1471         delete in;
1472         return nullptr;
1473     }
1474     if (!ir_instr_op(in, 0, out, true) ||
1475         !ir_instr_op(in, 1, func, false))
1476     {
1477         delete in;
1478         return nullptr;
1479     }
1480     vec_push(self->m_instr, in);
1481     /*
1482     if (noreturn) {
1483         if (!ir_block_create_return(self, ctx, nullptr)) {
1484             compile_error(ctx, "internal error: failed to generate dummy-return instruction");
1485             delete in;
1486             return nullptr;
1487         }
1488     }
1489     */
1490     return in;
1491 }
1492
1493 ir_value* ir_call_value(ir_instr *self)
1494 {
1495     return self->_m_ops[0];
1496 }
1497
1498 void ir_call_param(ir_instr* self, ir_value *v)
1499 {
1500     self->m_params.push_back(v);
1501     v->m_reads.push_back(self);
1502 }
1503
1504 /* binary op related code */
1505
1506 ir_value* ir_block_create_binop(ir_block *self, lex_ctx_t ctx,
1507                                 const char *label, int opcode,
1508                                 ir_value *left, ir_value *right)
1509 {
1510     qc_type ot = TYPE_VOID;
1511     switch (opcode) {
1512         case INSTR_ADD_F:
1513         case INSTR_SUB_F:
1514         case INSTR_DIV_F:
1515         case INSTR_MUL_F:
1516         case INSTR_MUL_V:
1517         case INSTR_AND:
1518         case INSTR_OR:
1519 #if 0
1520         case INSTR_AND_I:
1521         case INSTR_AND_IF:
1522         case INSTR_AND_FI:
1523         case INSTR_OR_I:
1524         case INSTR_OR_IF:
1525         case INSTR_OR_FI:
1526 #endif
1527         case INSTR_BITAND:
1528         case INSTR_BITOR:
1529         case VINSTR_BITXOR:
1530 #if 0
1531         case INSTR_SUB_S: /* -- offset of string as float */
1532         case INSTR_MUL_IF:
1533         case INSTR_MUL_FI:
1534         case INSTR_DIV_IF:
1535         case INSTR_DIV_FI:
1536         case INSTR_BITOR_IF:
1537         case INSTR_BITOR_FI:
1538         case INSTR_BITAND_FI:
1539         case INSTR_BITAND_IF:
1540         case INSTR_EQ_I:
1541         case INSTR_NE_I:
1542 #endif
1543             ot = TYPE_FLOAT;
1544             break;
1545 #if 0
1546         case INSTR_ADD_I:
1547         case INSTR_ADD_IF:
1548         case INSTR_ADD_FI:
1549         case INSTR_SUB_I:
1550         case INSTR_SUB_FI:
1551         case INSTR_SUB_IF:
1552         case INSTR_MUL_I:
1553         case INSTR_DIV_I:
1554         case INSTR_BITAND_I:
1555         case INSTR_BITOR_I:
1556         case INSTR_XOR_I:
1557         case INSTR_RSHIFT_I:
1558         case INSTR_LSHIFT_I:
1559             ot = TYPE_INTEGER;
1560             break;
1561 #endif
1562         case INSTR_ADD_V:
1563         case INSTR_SUB_V:
1564         case INSTR_MUL_VF:
1565         case INSTR_MUL_FV:
1566         case VINSTR_BITAND_V:
1567         case VINSTR_BITOR_V:
1568         case VINSTR_BITXOR_V:
1569         case VINSTR_BITAND_VF:
1570         case VINSTR_BITOR_VF:
1571         case VINSTR_BITXOR_VF:
1572         case VINSTR_CROSS:
1573 #if 0
1574         case INSTR_DIV_VF:
1575         case INSTR_MUL_IV:
1576         case INSTR_MUL_VI:
1577 #endif
1578             ot = TYPE_VECTOR;
1579             break;
1580 #if 0
1581         case INSTR_ADD_SF:
1582             ot = TYPE_POINTER;
1583             break;
1584 #endif
1585     /*
1586      * after the following default case, the value of opcode can never
1587      * be 1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65
1588      */
1589         default:
1590             /* ranges: */
1591             /* boolean operations result in floats */
1592
1593             /*
1594              * opcode >= 10 takes true branch opcode is at least 10
1595              * opcode <= 23 takes false branch opcode is at least 24
1596              */
1597             if (opcode >= INSTR_EQ_F && opcode <= INSTR_GT)
1598                 ot = TYPE_FLOAT;
1599
1600             /*
1601              * At condition "opcode <= 23", the value of "opcode" must be
1602              * at least 24.
1603              * At condition "opcode <= 23", the value of "opcode" cannot be
1604              * equal to any of {1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65}.
1605              * The condition "opcode <= 23" cannot be true.
1606              *
1607              * Thus ot=2 (TYPE_FLOAT) can never be true
1608              */
1609 #if 0
1610             else if (opcode >= INSTR_LE && opcode <= INSTR_GT)
1611                 ot = TYPE_FLOAT;
1612             else if (opcode >= INSTR_LE_I && opcode <= INSTR_EQ_FI)
1613                 ot = TYPE_FLOAT;
1614 #endif
1615             break;
1616     };
1617     if (ot == TYPE_VOID) {
1618         /* The AST or parser were supposed to check this! */
1619         return nullptr;
1620     }
1621
1622     return ir_block_create_general_instr(self, ctx, label, opcode, left, right, ot);
1623 }
1624
1625 ir_value* ir_block_create_unary(ir_block *self, lex_ctx_t ctx,
1626                                 const char *label, int opcode,
1627                                 ir_value *operand)
1628 {
1629     qc_type ot = TYPE_FLOAT;
1630     switch (opcode) {
1631         case INSTR_NOT_F:
1632         case INSTR_NOT_V:
1633         case INSTR_NOT_S:
1634         case INSTR_NOT_ENT:
1635         case INSTR_NOT_FNC: /*
1636         case INSTR_NOT_I:   */
1637             ot = TYPE_FLOAT;
1638             break;
1639
1640         /*
1641          * Negation for virtual instructions is emulated with 0-value. Thankfully
1642          * the operand for 0 already exists so we just source it from here.
1643          */
1644         case VINSTR_NEG_F:
1645             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_F, nullptr, operand, ot);
1646         case VINSTR_NEG_V:
1647             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_V, nullptr, operand, TYPE_VECTOR);
1648
1649         default:
1650             ot = operand->m_vtype;
1651             break;
1652     };
1653     if (ot == TYPE_VOID) {
1654         /* The AST or parser were supposed to check this! */
1655         return nullptr;
1656     }
1657
1658     /* let's use the general instruction creator and pass nullptr for OPB */
1659     return ir_block_create_general_instr(self, ctx, label, opcode, operand, nullptr, ot);
1660 }
1661
1662 static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t ctx, const char *label,
1663                                         int op, ir_value *a, ir_value *b, qc_type outype)
1664 {
1665     ir_instr *instr;
1666     ir_value *out;
1667
1668     out = ir_value_out(self->m_owner, label, store_value, outype);
1669     if (!out)
1670         return nullptr;
1671
1672     instr = new ir_instr(ctx, self, op);
1673     if (!instr) {
1674         return nullptr;
1675     }
1676
1677     if (!ir_instr_op(instr, 0, out, true) ||
1678         !ir_instr_op(instr, 1, a, false) ||
1679         !ir_instr_op(instr, 2, b, false) )
1680     {
1681         goto on_error;
1682     }
1683
1684     vec_push(self->m_instr, instr);
1685
1686     return out;
1687 on_error:
1688     delete instr;
1689     return nullptr;
1690 }
1691
1692 ir_value* ir_block_create_fieldaddress(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field)
1693 {
1694     ir_value *v;
1695
1696     /* Support for various pointer types todo if so desired */
1697     if (ent->m_vtype != TYPE_ENTITY)
1698         return nullptr;
1699
1700     if (field->m_vtype != TYPE_FIELD)
1701         return nullptr;
1702
1703     v = ir_block_create_general_instr(self, ctx, label, INSTR_ADDRESS, ent, field, TYPE_POINTER);
1704     v->m_fieldtype = field->m_fieldtype;
1705     return v;
1706 }
1707
1708 ir_value* ir_block_create_load_from_ent(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field, qc_type outype)
1709 {
1710     int op;
1711     if (ent->m_vtype != TYPE_ENTITY)
1712         return nullptr;
1713
1714     /* at some point we could redirect for TYPE_POINTER... but that could lead to carelessness */
1715     if (field->m_vtype != TYPE_FIELD)
1716         return nullptr;
1717
1718     switch (outype)
1719     {
1720         case TYPE_FLOAT:    op = INSTR_LOAD_F;   break;
1721         case TYPE_VECTOR:   op = INSTR_LOAD_V;   break;
1722         case TYPE_STRING:   op = INSTR_LOAD_S;   break;
1723         case TYPE_FIELD:    op = INSTR_LOAD_FLD; break;
1724         case TYPE_ENTITY:   op = INSTR_LOAD_ENT; break;
1725         case TYPE_FUNCTION: op = INSTR_LOAD_FNC; break;
1726 #if 0
1727         case TYPE_POINTER: op = INSTR_LOAD_I;   break;
1728         case TYPE_INTEGER: op = INSTR_LOAD_I;   break;
1729 #endif
1730         default:
1731             irerror(self->m_context, "invalid type for ir_block_create_load_from_ent: %s", type_name[outype]);
1732             return nullptr;
1733     }
1734
1735     return ir_block_create_general_instr(self, ctx, label, op, ent, field, outype);
1736 }
1737
1738 /* PHI resolving breaks the SSA, and must thus be the last
1739  * step before life-range calculation.
1740  */
1741
1742 static bool ir_block_naive_phi(ir_block *self);
1743 bool ir_function_naive_phi(ir_function *self)
1744 {
1745     for (auto& b : self->m_blocks)
1746         if (!ir_block_naive_phi(b.get()))
1747             return false;
1748     return true;
1749 }
1750
1751 static bool ir_block_naive_phi(ir_block *self)
1752 {
1753     size_t i;
1754     /* FIXME: optionally, create_phi can add the phis
1755      * to a list so we don't need to loop through blocks
1756      * - anyway: "don't optimize YET"
1757      */
1758     for (i = 0; i < vec_size(self->m_instr); ++i)
1759     {
1760         ir_instr *instr = self->m_instr[i];
1761         if (instr->m_opcode != VINSTR_PHI)
1762             continue;
1763
1764         vec_remove(self->m_instr, i, 1);
1765         --i; /* NOTE: i+1 below */
1766
1767         for (auto &it : instr->m_phi) {
1768             ir_value *v = it.value;
1769             ir_block *b = it.from;
1770             if (v->m_store == store_value && v->m_reads.size() == 1 && v->m_writes.size() == 1) {
1771                 /* replace the value */
1772                 if (!ir_instr_op(v->m_writes[0], 0, instr->_m_ops[0], true))
1773                     return false;
1774             } else {
1775                 /* force a move instruction */
1776                 ir_instr *prevjump = vec_last(b->m_instr);
1777                 vec_pop(b->m_instr);
1778                 b->m_final = false;
1779                 instr->_m_ops[0]->m_store = store_global;
1780                 if (!ir_block_create_store(b, instr->m_context, instr->_m_ops[0], v))
1781                     return false;
1782                 instr->_m_ops[0]->m_store = store_value;
1783                 vec_push(b->m_instr, prevjump);
1784                 b->m_final = true;
1785             }
1786         }
1787         delete instr;
1788     }
1789     return true;
1790 }
1791
1792 /***********************************************************************
1793  *IR Temp allocation code
1794  * Propagating value life ranges by walking through the function backwards
1795  * until no more changes are made.
1796  * In theory this should happen once more than once for every nested loop
1797  * level.
1798  * Though this implementation might run an additional time for if nests.
1799  */
1800
1801 /* Enumerate instructions used by value's life-ranges
1802  */
1803 static void ir_block_enumerate(ir_block *self, size_t *_eid)
1804 {
1805     size_t i;
1806     size_t eid = *_eid;
1807     for (i = 0; i < vec_size(self->m_instr); ++i)
1808     {
1809         self->m_instr[i]->m_eid = eid++;
1810     }
1811     *_eid = eid;
1812 }
1813
1814 /* Enumerate blocks and instructions.
1815  * The block-enumeration is unordered!
1816  * We do not really use the block enumreation, however
1817  * the instruction enumeration is important for life-ranges.
1818  */
1819 void ir_function_enumerate(ir_function *self)
1820 {
1821     size_t instruction_id = 0;
1822     size_t block_eid = 0;
1823     for (auto& block : self->m_blocks)
1824     {
1825         /* each block now gets an additional "entry" instruction id
1826          * we can use to avoid point-life issues
1827          */
1828         block->m_entry_id = instruction_id;
1829         block->m_eid      = block_eid;
1830         ++instruction_id;
1831         ++block_eid;
1832
1833         ir_block_enumerate(block.get(), &instruction_id);
1834     }
1835 }
1836
1837 /* Local-value allocator
1838  * After finishing creating the liferange of all values used in a function
1839  * we can allocate their global-positions.
1840  * This is the counterpart to register-allocation in register machines.
1841  */
1842 struct function_allocator {
1843     ir_value **locals;
1844     size_t *sizes;
1845     size_t *positions;
1846     bool *unique;
1847 };
1848
1849 static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
1850 {
1851     ir_value *slot;
1852     size_t vsize = ir_value_sizeof(var);
1853
1854     var->m_code.local = vec_size(alloc->locals);
1855
1856     slot = new ir_value("reg", store_global, var->m_vtype);
1857     if (!slot)
1858         return false;
1859
1860     if (!ir_value_life_merge_into(slot, var))
1861         goto localerror;
1862
1863     vec_push(alloc->locals, slot);
1864     vec_push(alloc->sizes, vsize);
1865     vec_push(alloc->unique, var->m_unique_life);
1866
1867     return true;
1868
1869 localerror:
1870     delete slot;
1871     return false;
1872 }
1873
1874 static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
1875 {
1876     size_t a;
1877     ir_value *slot;
1878
1879     if (v->m_unique_life)
1880         return function_allocator_alloc(alloc, v);
1881
1882     for (a = 0; a < vec_size(alloc->locals); ++a)
1883     {
1884         /* if it's reserved for a unique liferange: skip */
1885         if (alloc->unique[a])
1886             continue;
1887
1888         slot = alloc->locals[a];
1889
1890         /* never resize parameters
1891          * will be required later when overlapping temps + locals
1892          */
1893         if (a < vec_size(self->m_params) &&
1894             alloc->sizes[a] < ir_value_sizeof(v))
1895         {
1896             continue;
1897         }
1898
1899         if (ir_values_overlap(v, slot))
1900             continue;
1901
1902         if (!ir_value_life_merge_into(slot, v))
1903             return false;
1904
1905         /* adjust size for this slot */
1906         if (alloc->sizes[a] < ir_value_sizeof(v))
1907             alloc->sizes[a] = ir_value_sizeof(v);
1908
1909         v->m_code.local = a;
1910         return true;
1911     }
1912     if (a >= vec_size(alloc->locals)) {
1913         if (!function_allocator_alloc(alloc, v))
1914             return false;
1915     }
1916     return true;
1917 }
1918
1919 bool ir_function_allocate_locals(ir_function *self)
1920 {
1921     bool   retval = true;
1922     size_t pos;
1923     bool   opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
1924
1925     function_allocator lockalloc, globalloc;
1926
1927     if (self->m_locals.empty() && self->m_values.empty())
1928         return true;
1929
1930     globalloc.locals    = nullptr;
1931     globalloc.sizes     = nullptr;
1932     globalloc.positions = nullptr;
1933     globalloc.unique    = nullptr;
1934     lockalloc.locals    = nullptr;
1935     lockalloc.sizes     = nullptr;
1936     lockalloc.positions = nullptr;
1937     lockalloc.unique    = nullptr;
1938
1939     size_t i;
1940     for (i = 0; i < self->m_locals.size(); ++i)
1941     {
1942         ir_value *v = self->m_locals[i].get();
1943         if ((self->m_flags & IR_FLAG_MASK_NO_LOCAL_TEMPS) || !OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) {
1944             v->m_locked      = true;
1945             v->m_unique_life = true;
1946         }
1947         else if (i >= vec_size(self->m_params))
1948             break;
1949         else
1950             v->m_locked = true; /* lock parameters locals */
1951         if (!function_allocator_alloc((v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1952             goto error;
1953     }
1954     for (; i < self->m_locals.size(); ++i)
1955     {
1956         ir_value *v = self->m_locals[i].get();
1957         if (v->m_life.empty())
1958             continue;
1959         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1960             goto error;
1961     }
1962
1963     /* Allocate a slot for any value that still exists */
1964     for (i = 0; i < self->m_values.size(); ++i)
1965     {
1966         ir_value *v = self->m_values[i].get();
1967
1968         if (v->m_life.empty())
1969             continue;
1970
1971         /* CALL optimization:
1972          * If the value is a parameter-temp: 1 write, 1 read from a CALL
1973          * and it's not "locked", write it to the OFS_PARM directly.
1974          */
1975         if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->m_locked && !v->m_unique_life) {
1976             if (v->m_reads.size() == 1 && v->m_writes.size() == 1 &&
1977                 (v->m_reads[0]->m_opcode == VINSTR_NRCALL ||
1978                  (v->m_reads[0]->m_opcode >= INSTR_CALL0 && v->m_reads[0]->m_opcode <= INSTR_CALL8)
1979                 )
1980                )
1981             {
1982                 size_t param;
1983                 ir_instr *call = v->m_reads[0];
1984                 if (!vec_ir_value_find(call->m_params, v, &param)) {
1985                     irerror(call->m_context, "internal error: unlocked parameter %s not found", v->m_name.c_str());
1986                     goto error;
1987                 }
1988                 ++opts_optimizationcount[OPTIM_CALL_STORES];
1989                 v->m_callparam = true;
1990                 if (param < 8)
1991                     ir_value_code_setaddr(v, OFS_PARM0 + 3*param);
1992                 else {
1993                     size_t nprotos = self->m_owner->m_extparam_protos.size();
1994                     ir_value *ep;
1995                     param -= 8;
1996                     if (nprotos > param)
1997                         ep = self->m_owner->m_extparam_protos[param].get();
1998                     else
1999                     {
2000                         ep = ir_gen_extparam_proto(self->m_owner);
2001                         while (++nprotos <= param)
2002                             ep = ir_gen_extparam_proto(self->m_owner);
2003                     }
2004                     ir_instr_op(v->m_writes[0], 0, ep, true);
2005                     call->m_params[param+8] = ep;
2006                 }
2007                 continue;
2008             }
2009             if (v->m_writes.size() == 1 && v->m_writes[0]->m_opcode == INSTR_CALL0) {
2010                 v->m_store = store_return;
2011                 if (v->m_members[0]) v->m_members[0]->m_store = store_return;
2012                 if (v->m_members[1]) v->m_members[1]->m_store = store_return;
2013                 if (v->m_members[2]) v->m_members[2]->m_store = store_return;
2014                 ++opts_optimizationcount[OPTIM_CALL_STORES];
2015                 continue;
2016             }
2017         }
2018
2019         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
2020             goto error;
2021     }
2022
2023     if (!lockalloc.sizes && !globalloc.sizes) {
2024         goto cleanup;
2025     }
2026     vec_push(lockalloc.positions, 0);
2027     vec_push(globalloc.positions, 0);
2028
2029     /* Adjust slot positions based on sizes */
2030     if (lockalloc.sizes) {
2031         pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
2032         for (i = 1; i < vec_size(lockalloc.sizes); ++i)
2033         {
2034             pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
2035             vec_push(lockalloc.positions, pos);
2036         }
2037         self->m_allocated_locals = pos + vec_last(lockalloc.sizes);
2038     }
2039     if (globalloc.sizes) {
2040         pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
2041         for (i = 1; i < vec_size(globalloc.sizes); ++i)
2042         {
2043             pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
2044             vec_push(globalloc.positions, pos);
2045         }
2046         self->m_globaltemps = pos + vec_last(globalloc.sizes);
2047     }
2048
2049     /* Locals need to know their new position */
2050     for (auto& local : self->m_locals) {
2051         if (local->m_locked || !opt_gt)
2052             local->m_code.local = lockalloc.positions[local->m_code.local];
2053         else
2054             local->m_code.local = globalloc.positions[local->m_code.local];
2055     }
2056     /* Take over the actual slot positions on values */
2057     for (auto& value : self->m_values) {
2058         if (value->m_locked || !opt_gt)
2059             value->m_code.local = lockalloc.positions[value->m_code.local];
2060         else
2061             value->m_code.local = globalloc.positions[value->m_code.local];
2062     }
2063
2064     goto cleanup;
2065
2066 error:
2067     retval = false;
2068 cleanup:
2069     for (i = 0; i < vec_size(lockalloc.locals); ++i)
2070         delete lockalloc.locals[i];
2071     for (i = 0; i < vec_size(globalloc.locals); ++i)
2072         delete globalloc.locals[i];
2073     vec_free(globalloc.unique);
2074     vec_free(globalloc.locals);
2075     vec_free(globalloc.sizes);
2076     vec_free(globalloc.positions);
2077     vec_free(lockalloc.unique);
2078     vec_free(lockalloc.locals);
2079     vec_free(lockalloc.sizes);
2080     vec_free(lockalloc.positions);
2081     return retval;
2082 }
2083
2084 /* Get information about which operand
2085  * is read from, or written to.
2086  */
2087 static void ir_op_read_write(int op, size_t *read, size_t *write)
2088 {
2089     switch (op)
2090     {
2091     case VINSTR_JUMP:
2092     case INSTR_GOTO:
2093         *write = 0;
2094         *read = 0;
2095         break;
2096     case INSTR_IF:
2097     case INSTR_IFNOT:
2098 #if 0
2099     case INSTR_IF_S:
2100     case INSTR_IFNOT_S:
2101 #endif
2102     case INSTR_RETURN:
2103     case VINSTR_COND:
2104         *write = 0;
2105         *read = 1;
2106         break;
2107     case INSTR_STOREP_F:
2108     case INSTR_STOREP_V:
2109     case INSTR_STOREP_S:
2110     case INSTR_STOREP_ENT:
2111     case INSTR_STOREP_FLD:
2112     case INSTR_STOREP_FNC:
2113         *write = 0;
2114         *read  = 7;
2115         break;
2116     default:
2117         *write = 1;
2118         *read = 6;
2119         break;
2120     };
2121 }
2122
2123 static bool ir_block_living_add_instr(ir_block *self, size_t eid) {
2124     bool changed = false;
2125     for (auto &it : self->m_living)
2126         if (ir_value_life_merge(it, eid))
2127             changed = true;
2128     return changed;
2129 }
2130
2131 static bool ir_block_living_lock(ir_block *self) {
2132     bool changed = false;
2133     for (auto &it : self->m_living) {
2134         if (it->m_locked)
2135             continue;
2136         it->m_locked = true;
2137         changed = true;
2138     }
2139     return changed;
2140 }
2141
2142 static bool ir_block_life_propagate(ir_block *self, bool *changed)
2143 {
2144     ir_instr *instr;
2145     ir_value *value;
2146     size_t i, o, p, mem;
2147     // bitmasks which operands are read from or written to
2148     size_t read, write;
2149
2150     self->m_living.clear();
2151
2152     p = vec_size(self->m_exits);
2153     for (i = 0; i < p; ++i) {
2154         ir_block *prev = self->m_exits[i];
2155         for (auto &it : prev->m_living)
2156             if (!vec_ir_value_find(self->m_living, it, nullptr))
2157                 self->m_living.push_back(it);
2158     }
2159
2160     i = vec_size(self->m_instr);
2161     while (i)
2162     { --i;
2163         instr = self->m_instr[i];
2164
2165         /* See which operands are read and write operands */
2166         ir_op_read_write(instr->m_opcode, &read, &write);
2167
2168         /* Go through the 3 main operands
2169          * writes first, then reads
2170          */
2171         for (o = 0; o < 3; ++o)
2172         {
2173             if (!instr->_m_ops[o]) /* no such operand */
2174                 continue;
2175
2176             value = instr->_m_ops[o];
2177
2178             /* We only care about locals */
2179             /* we also calculate parameter liferanges so that locals
2180              * can take up parameter slots */
2181             if (value->m_store != store_value &&
2182                 value->m_store != store_local &&
2183                 value->m_store != store_param)
2184                 continue;
2185
2186             /* write operands */
2187             /* When we write to a local, we consider it "dead" for the
2188              * remaining upper part of the function, since in SSA a value
2189              * can only be written once (== created)
2190              */
2191             if (write & (1<<o))
2192             {
2193                 size_t idx;
2194                 bool in_living = vec_ir_value_find(self->m_living, value, &idx);
2195                 if (!in_living)
2196                 {
2197                     /* If the value isn't alive it hasn't been read before... */
2198                     /* TODO: See if the warning can be emitted during parsing or AST processing
2199                      * otherwise have warning printed here.
2200                      * IF printing a warning here: include filecontext_t,
2201                      * and make sure it's only printed once
2202                      * since this function is run multiple times.
2203                      */
2204                     /* con_err( "Value only written %s\n", value->m_name); */
2205                     if (ir_value_life_merge(value, instr->m_eid))
2206                         *changed = true;
2207                 } else {
2208                     /* since 'living' won't contain it
2209                      * anymore, merge the value, since
2210                      * (A) doesn't.
2211                      */
2212                     if (ir_value_life_merge(value, instr->m_eid))
2213                         *changed = true;
2214                     // Then remove
2215                     self->m_living.erase(self->m_living.begin() + idx);
2216                 }
2217                 /* Removing a vector removes all members */
2218                 for (mem = 0; mem < 3; ++mem) {
2219                     if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], &idx)) {
2220                         if (ir_value_life_merge(value->m_members[mem], instr->m_eid))
2221                             *changed = true;
2222                         self->m_living.erase(self->m_living.begin() + idx);
2223                     }
2224                 }
2225                 /* Removing the last member removes the vector */
2226                 if (value->m_memberof) {
2227                     value = value->m_memberof;
2228                     for (mem = 0; mem < 3; ++mem) {
2229                         if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2230                             break;
2231                     }
2232                     if (mem == 3 && vec_ir_value_find(self->m_living, value, &idx)) {
2233                         if (ir_value_life_merge(value, instr->m_eid))
2234                             *changed = true;
2235                         self->m_living.erase(self->m_living.begin() + idx);
2236                     }
2237                 }
2238             }
2239         }
2240
2241         /* These operations need a special case as they can break when using
2242          * same source and destination operand otherwise, as the engine may
2243          * read the source multiple times. */
2244         if (instr->m_opcode == INSTR_MUL_VF ||
2245             instr->m_opcode == VINSTR_BITAND_VF ||
2246             instr->m_opcode == VINSTR_BITOR_VF ||
2247             instr->m_opcode == VINSTR_BITXOR ||
2248             instr->m_opcode == VINSTR_BITXOR_VF ||
2249             instr->m_opcode == VINSTR_BITXOR_V ||
2250             instr->m_opcode == VINSTR_CROSS)
2251         {
2252             value = instr->_m_ops[2];
2253             /* the float source will get an additional lifetime */
2254             if (ir_value_life_merge(value, instr->m_eid+1))
2255                 *changed = true;
2256             if (value->m_memberof && ir_value_life_merge(value->m_memberof, instr->m_eid+1))
2257                 *changed = true;
2258         }
2259
2260         if (instr->m_opcode == INSTR_MUL_FV ||
2261             instr->m_opcode == INSTR_LOAD_V ||
2262             instr->m_opcode == VINSTR_BITXOR ||
2263             instr->m_opcode == VINSTR_BITXOR_VF ||
2264             instr->m_opcode == VINSTR_BITXOR_V ||
2265             instr->m_opcode == VINSTR_CROSS)
2266         {
2267             value = instr->_m_ops[1];
2268             /* the float source will get an additional lifetime */
2269             if (ir_value_life_merge(value, instr->m_eid+1))
2270                 *changed = true;
2271             if (value->m_memberof && ir_value_life_merge(value->m_memberof, instr->m_eid+1))
2272                 *changed = true;
2273         }
2274
2275         for (o = 0; o < 3; ++o)
2276         {
2277             if (!instr->_m_ops[o]) /* no such operand */
2278                 continue;
2279
2280             value = instr->_m_ops[o];
2281
2282             /* We only care about locals */
2283             /* we also calculate parameter liferanges so that locals
2284              * can take up parameter slots */
2285             if (value->m_store != store_value &&
2286                 value->m_store != store_local &&
2287                 value->m_store != store_param)
2288                 continue;
2289
2290             /* read operands */
2291             if (read & (1<<o))
2292             {
2293                 if (!vec_ir_value_find(self->m_living, value, nullptr))
2294                     self->m_living.push_back(value);
2295                 /* reading adds the full vector */
2296                 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2297                     self->m_living.push_back(value->m_memberof);
2298                 for (mem = 0; mem < 3; ++mem) {
2299                     if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2300                         self->m_living.push_back(value->m_members[mem]);
2301                 }
2302             }
2303         }
2304         /* PHI operands are always read operands */
2305         for (auto &it : instr->m_phi) {
2306             value = it.value;
2307             if (!vec_ir_value_find(self->m_living, value, nullptr))
2308                 self->m_living.push_back(value);
2309             /* reading adds the full vector */
2310             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2311                 self->m_living.push_back(value->m_memberof);
2312             for (mem = 0; mem < 3; ++mem) {
2313                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2314                     self->m_living.push_back(value->m_members[mem]);
2315             }
2316         }
2317
2318         /* on a call, all these values must be "locked" */
2319         if (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8) {
2320             if (ir_block_living_lock(self))
2321                 *changed = true;
2322         }
2323         /* call params are read operands too */
2324         for (auto &it : instr->m_params) {
2325             value = it;
2326             if (!vec_ir_value_find(self->m_living, value, nullptr))
2327                 self->m_living.push_back(value);
2328             /* reading adds the full vector */
2329             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2330                 self->m_living.push_back(value->m_memberof);
2331             for (mem = 0; mem < 3; ++mem) {
2332                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2333                     self->m_living.push_back(value->m_members[mem]);
2334             }
2335         }
2336
2337         /* (A) */
2338         if (ir_block_living_add_instr(self, instr->m_eid))
2339             *changed = true;
2340     }
2341     /* the "entry" instruction ID */
2342     if (ir_block_living_add_instr(self, self->m_entry_id))
2343         *changed = true;
2344
2345     return true;
2346 }
2347
2348 bool ir_function_calculate_liferanges(ir_function *self)
2349 {
2350     /* parameters live at 0 */
2351     for (size_t i = 0; i < vec_size(self->m_params); ++i)
2352         if (!ir_value_life_merge(self->m_locals[i].get(), 0))
2353             compile_error(self->m_context, "internal error: failed value-life merging");
2354
2355     bool changed;
2356     do {
2357         self->m_run_id++;
2358         changed = false;
2359         for (auto i = self->m_blocks.rbegin(); i != self->m_blocks.rend(); ++i)
2360             ir_block_life_propagate(i->get(), &changed);
2361     } while (changed);
2362
2363     if (self->m_blocks.size()) {
2364         ir_block *block = self->m_blocks[0].get();
2365         for (auto &it : block->m_living) {
2366             ir_value *v = it;
2367             if (v->m_store != store_local)
2368                 continue;
2369             if (v->m_vtype == TYPE_VECTOR)
2370                 continue;
2371             self->m_flags |= IR_FLAG_HAS_UNINITIALIZED;
2372             /* find the instruction reading from it */
2373             size_t s = 0;
2374             for (; s < v->m_reads.size(); ++s) {
2375                 if (v->m_reads[s]->m_eid == v->m_life[0].end)
2376                     break;
2377             }
2378             if (s < v->m_reads.size()) {
2379                 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2380                               "variable `%s` may be used uninitialized in this function\n"
2381                               " -> %s:%i",
2382                               v->m_name.c_str(),
2383                               v->m_reads[s]->m_context.file, v->m_reads[s]->m_context.line)
2384                    )
2385                 {
2386                     return false;
2387                 }
2388                 continue;
2389             }
2390             if (v->m_memberof) {
2391                 ir_value *vec = v->m_memberof;
2392                 for (s = 0; s < vec->m_reads.size(); ++s) {
2393                     if (vec->m_reads[s]->m_eid == v->m_life[0].end)
2394                         break;
2395                 }
2396                 if (s < vec->m_reads.size()) {
2397                     if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2398                                   "variable `%s` may be used uninitialized in this function\n"
2399                                   " -> %s:%i",
2400                                   v->m_name.c_str(),
2401                                   vec->m_reads[s]->m_context.file, vec->m_reads[s]->m_context.line)
2402                        )
2403                     {
2404                         return false;
2405                     }
2406                     continue;
2407                 }
2408             }
2409             if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2410                           "variable `%s` may be used uninitialized in this function", v->m_name.c_str()))
2411             {
2412                 return false;
2413             }
2414         }
2415     }
2416     return true;
2417 }
2418
2419 /***********************************************************************
2420  *IR Code-Generation
2421  *
2422  * Since the IR has the convention of putting 'write' operands
2423  * at the beginning, we have to rotate the operands of instructions
2424  * properly in order to generate valid QCVM code.
2425  *
2426  * Having destinations at a fixed position is more convenient. In QC
2427  * this is *mostly* OPC,  but FTE adds at least 2 instructions which
2428  * read from from OPA,  and store to OPB rather than OPC.   Which is
2429  * partially the reason why the implementation of these instructions
2430  * in darkplaces has been delayed for so long.
2431  *
2432  * Breaking conventions is annoying...
2433  */
2434 static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool islocal);
2435
2436 static bool gen_global_field(code_t *code, ir_value *global)
2437 {
2438     if (global->m_hasvalue)
2439     {
2440         ir_value *fld = global->m_constval.vpointer;
2441         if (!fld) {
2442             irerror(global->m_context, "Invalid field constant with no field: %s", global->m_name.c_str());
2443             return false;
2444         }
2445
2446         /* copy the field's value */
2447         ir_value_code_setaddr(global, code->globals.size());
2448         code->globals.push_back(fld->m_code.fieldaddr);
2449         if (global->m_fieldtype == TYPE_VECTOR) {
2450             code->globals.push_back(fld->m_code.fieldaddr+1);
2451             code->globals.push_back(fld->m_code.fieldaddr+2);
2452         }
2453     }
2454     else
2455     {
2456         ir_value_code_setaddr(global, code->globals.size());
2457         code->globals.push_back(0);
2458         if (global->m_fieldtype == TYPE_VECTOR) {
2459             code->globals.push_back(0);
2460             code->globals.push_back(0);
2461         }
2462     }
2463     if (global->m_code.globaladdr < 0)
2464         return false;
2465     return true;
2466 }
2467
2468 static bool gen_global_pointer(code_t *code, ir_value *global)
2469 {
2470     if (global->m_hasvalue)
2471     {
2472         ir_value *target = global->m_constval.vpointer;
2473         if (!target) {
2474             irerror(global->m_context, "Invalid pointer constant: %s", global->m_name.c_str());
2475             /* nullptr pointers are pointing to the nullptr constant, which also
2476              * sits at address 0, but still has an ir_value for itself.
2477              */
2478             return false;
2479         }
2480
2481         /* Here, relocations ARE possible - in fteqcc-enhanced-qc:
2482          * void() foo; <- proto
2483          * void() *fooptr = &foo;
2484          * void() foo = { code }
2485          */
2486         if (!target->m_code.globaladdr) {
2487             /* FIXME: Check for the constant nullptr ir_value!
2488              * because then code.globaladdr being 0 is valid.
2489              */
2490             irerror(global->m_context, "FIXME: Relocation support");
2491             return false;
2492         }
2493
2494         ir_value_code_setaddr(global, code->globals.size());
2495         code->globals.push_back(target->m_code.globaladdr);
2496     }
2497     else
2498     {
2499         ir_value_code_setaddr(global, code->globals.size());
2500         code->globals.push_back(0);
2501     }
2502     if (global->m_code.globaladdr < 0)
2503         return false;
2504     return true;
2505 }
2506
2507 static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *block)
2508 {
2509     prog_section_statement_t stmt;
2510     ir_instr *instr;
2511     ir_block *target;
2512     ir_block *ontrue;
2513     ir_block *onfalse;
2514     size_t    stidx;
2515     size_t    i;
2516     int       j;
2517
2518     block->m_generated = true;
2519     block->m_code_start = code->statements.size();
2520     for (i = 0; i < vec_size(block->m_instr); ++i)
2521     {
2522         instr = block->m_instr[i];
2523
2524         if (instr->m_opcode == VINSTR_PHI) {
2525             irerror(block->m_context, "cannot generate virtual instruction (phi)");
2526             return false;
2527         }
2528
2529         if (instr->m_opcode == VINSTR_JUMP) {
2530             target = instr->m_bops[0];
2531             /* for uncoditional jumps, if the target hasn't been generated
2532              * yet, we generate them right here.
2533              */
2534             if (!target->m_generated)
2535                 return gen_blocks_recursive(code, func, target);
2536
2537             /* otherwise we generate a jump instruction */
2538             stmt.opcode = INSTR_GOTO;
2539             stmt.o1.s1 = target->m_code_start - code->statements.size();
2540             stmt.o2.s1 = 0;
2541             stmt.o3.s1 = 0;
2542             if (stmt.o1.s1 != 1)
2543                 code_push_statement(code, &stmt, instr->m_context);
2544
2545             /* no further instructions can be in this block */
2546             return true;
2547         }
2548
2549         if (instr->m_opcode == VINSTR_BITXOR) {
2550             stmt.opcode = INSTR_BITOR;
2551             stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]);
2552             stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2553             stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2554             code_push_statement(code, &stmt, instr->m_context);
2555             stmt.opcode = INSTR_BITAND;
2556             stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]);
2557             stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2558             stmt.o3.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]);
2559             code_push_statement(code, &stmt, instr->m_context);
2560             stmt.opcode = INSTR_SUB_F;
2561             stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[0]);
2562             stmt.o2.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]);
2563             stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2564             code_push_statement(code, &stmt, instr->m_context);
2565
2566             /* instruction generated */
2567             continue;
2568         }
2569
2570         if (instr->m_opcode == VINSTR_BITAND_V) {
2571             stmt.opcode = INSTR_BITAND;
2572             stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]);
2573             stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2574             stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2575             code_push_statement(code, &stmt, instr->m_context);
2576             ++stmt.o1.s1;
2577             ++stmt.o2.s1;
2578             ++stmt.o3.s1;
2579             code_push_statement(code, &stmt, instr->m_context);
2580             ++stmt.o1.s1;
2581             ++stmt.o2.s1;
2582             ++stmt.o3.s1;
2583             code_push_statement(code, &stmt, instr->m_context);
2584
2585             /* instruction generated */
2586             continue;
2587         }
2588
2589         if (instr->m_opcode == VINSTR_BITOR_V) {
2590             stmt.opcode = INSTR_BITOR;
2591             stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]);
2592             stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2593             stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2594             code_push_statement(code, &stmt, instr->m_context);
2595             ++stmt.o1.s1;
2596             ++stmt.o2.s1;
2597             ++stmt.o3.s1;
2598             code_push_statement(code, &stmt, instr->m_context);
2599             ++stmt.o1.s1;
2600             ++stmt.o2.s1;
2601             ++stmt.o3.s1;
2602             code_push_statement(code, &stmt, instr->m_context);
2603
2604             /* instruction generated */
2605             continue;
2606         }
2607
2608         if (instr->m_opcode == VINSTR_BITXOR_V) {
2609             for (j = 0; j < 3; ++j) {
2610                 stmt.opcode = INSTR_BITOR;
2611                 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]) + j;
2612                 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]) + j;
2613                 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]) + j;
2614                 code_push_statement(code, &stmt, instr->m_context);
2615                 stmt.opcode = INSTR_BITAND;
2616                 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]) + j;
2617                 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]) + j;
2618                 stmt.o3.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]) + j;
2619                 code_push_statement(code, &stmt, instr->m_context);
2620             }
2621             stmt.opcode = INSTR_SUB_V;
2622             stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[0]);
2623             stmt.o2.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]);
2624             stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2625             code_push_statement(code, &stmt, instr->m_context);
2626
2627             /* instruction generated */
2628             continue;
2629         }
2630
2631         if (instr->m_opcode == VINSTR_BITAND_VF) {
2632             stmt.opcode = INSTR_BITAND;
2633             stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]);
2634             stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2635             stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2636             code_push_statement(code, &stmt, instr->m_context);
2637             ++stmt.o1.s1;
2638             ++stmt.o3.s1;
2639             code_push_statement(code, &stmt, instr->m_context);
2640             ++stmt.o1.s1;
2641             ++stmt.o3.s1;
2642             code_push_statement(code, &stmt, instr->m_context);
2643
2644             /* instruction generated */
2645             continue;
2646         }
2647
2648         if (instr->m_opcode == VINSTR_BITOR_VF) {
2649             stmt.opcode = INSTR_BITOR;
2650             stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]);
2651             stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2652             stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2653             code_push_statement(code, &stmt, instr->m_context);
2654             ++stmt.o1.s1;
2655             ++stmt.o3.s1;
2656             code_push_statement(code, &stmt, instr->m_context);
2657             ++stmt.o1.s1;
2658             ++stmt.o3.s1;
2659             code_push_statement(code, &stmt, instr->m_context);
2660
2661             /* instruction generated */
2662             continue;
2663         }
2664
2665         if (instr->m_opcode == VINSTR_BITXOR_VF) {
2666             for (j = 0; j < 3; ++j) {
2667                 stmt.opcode = INSTR_BITOR;
2668                 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]) + j;
2669                 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2670                 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]) + j;
2671                 code_push_statement(code, &stmt, instr->m_context);
2672                 stmt.opcode = INSTR_BITAND;
2673                 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]) + j;
2674                 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2675                 stmt.o3.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]) + j;
2676                 code_push_statement(code, &stmt, instr->m_context);
2677             }
2678             stmt.opcode = INSTR_SUB_V;
2679             stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[0]);
2680             stmt.o2.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]);
2681             stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2682             code_push_statement(code, &stmt, instr->m_context);
2683
2684             /* instruction generated */
2685             continue;
2686         }
2687
2688         if (instr->m_opcode == VINSTR_CROSS) {
2689             stmt.opcode = INSTR_MUL_F;
2690             for (j = 0; j < 3; ++j) {
2691                 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]) + (j + 1) % 3;
2692                 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]) + (j + 2) % 3;
2693                 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]) + j;
2694                 code_push_statement(code, &stmt, instr->m_context);
2695                 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]) + (j + 2) % 3;
2696                 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]) + (j + 1) % 3;
2697                 stmt.o3.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]) + j;
2698                 code_push_statement(code, &stmt, instr->m_context);
2699             }
2700             stmt.opcode = INSTR_SUB_V;
2701             stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[0]);
2702             stmt.o2.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]);
2703             stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2704             code_push_statement(code, &stmt, instr->m_context);
2705
2706             /* instruction generated */
2707             continue;
2708         }
2709
2710         if (instr->m_opcode == VINSTR_COND) {
2711             ontrue  = instr->m_bops[0];
2712             onfalse = instr->m_bops[1];
2713             /* TODO: have the AST signal which block should
2714              * come first: eg. optimize IFs without ELSE...
2715              */
2716
2717             stmt.o1.u1 = ir_value_code_addr(instr->_m_ops[0]);
2718             stmt.o2.u1 = 0;
2719             stmt.o3.s1 = 0;
2720
2721             if (ontrue->m_generated) {
2722                 stmt.opcode = INSTR_IF;
2723                 stmt.o2.s1 = ontrue->m_code_start - code->statements.size();
2724                 if (stmt.o2.s1 != 1)
2725                     code_push_statement(code, &stmt, instr->m_context);
2726             }
2727             if (onfalse->m_generated) {
2728                 stmt.opcode = INSTR_IFNOT;
2729                 stmt.o2.s1 = onfalse->m_code_start - code->statements.size();
2730                 if (stmt.o2.s1 != 1)
2731                     code_push_statement(code, &stmt, instr->m_context);
2732             }
2733             if (!ontrue->m_generated) {
2734                 if (onfalse->m_generated)
2735                     return gen_blocks_recursive(code, func, ontrue);
2736             }
2737             if (!onfalse->m_generated) {
2738                 if (ontrue->m_generated)
2739                     return gen_blocks_recursive(code, func, onfalse);
2740             }
2741             /* neither ontrue nor onfalse exist */
2742             stmt.opcode = INSTR_IFNOT;
2743             if (!instr->m_likely) {
2744                 /* Honor the likelyhood hint */
2745                 ir_block *tmp = onfalse;
2746                 stmt.opcode = INSTR_IF;
2747                 onfalse = ontrue;
2748                 ontrue = tmp;
2749             }
2750             stidx = code->statements.size();
2751             code_push_statement(code, &stmt, instr->m_context);
2752             /* on false we jump, so add ontrue-path */
2753             if (!gen_blocks_recursive(code, func, ontrue))
2754                 return false;
2755             /* fixup the jump address */
2756             code->statements[stidx].o2.s1 = code->statements.size() - stidx;
2757             /* generate onfalse path */
2758             if (onfalse->m_generated) {
2759                 /* fixup the jump address */
2760                 code->statements[stidx].o2.s1 = onfalse->m_code_start - stidx;
2761                 if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2762                     code->statements[stidx] = code->statements[stidx+1];
2763                     if (code->statements[stidx].o1.s1 < 0)
2764                         code->statements[stidx].o1.s1++;
2765                     code_pop_statement(code);
2766                 }
2767                 stmt.opcode = code->statements.back().opcode;
2768                 if (stmt.opcode == INSTR_GOTO ||
2769                     stmt.opcode == INSTR_IF ||
2770                     stmt.opcode == INSTR_IFNOT ||
2771                     stmt.opcode == INSTR_RETURN ||
2772                     stmt.opcode == INSTR_DONE)
2773                 {
2774                     /* no use jumping from here */
2775                     return true;
2776                 }
2777                 /* may have been generated in the previous recursive call */
2778                 stmt.opcode = INSTR_GOTO;
2779                 stmt.o1.s1 = onfalse->m_code_start - code->statements.size();
2780                 stmt.o2.s1 = 0;
2781                 stmt.o3.s1 = 0;
2782                 if (stmt.o1.s1 != 1)
2783                     code_push_statement(code, &stmt, instr->m_context);
2784                 return true;
2785             }
2786             else if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2787                 code->statements[stidx] = code->statements[stidx+1];
2788                 if (code->statements[stidx].o1.s1 < 0)
2789                     code->statements[stidx].o1.s1++;
2790                 code_pop_statement(code);
2791             }
2792             /* if not, generate now */
2793             return gen_blocks_recursive(code, func, onfalse);
2794         }
2795
2796         if ( (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8)
2797            || instr->m_opcode == VINSTR_NRCALL)
2798         {
2799             size_t p, first;
2800             ir_value *retvalue;
2801
2802             first = instr->m_params.size();
2803             if (first > 8)
2804                 first = 8;
2805             for (p = 0; p < first; ++p)
2806             {
2807                 ir_value *param = instr->m_params[p];
2808                 if (param->m_callparam)
2809                     continue;
2810
2811                 stmt.opcode = INSTR_STORE_F;
2812                 stmt.o3.u1 = 0;
2813
2814                 if (param->m_vtype == TYPE_FIELD)
2815                     stmt.opcode = field_store_instr[param->m_fieldtype];
2816                 else if (param->m_vtype == TYPE_NIL)
2817                     stmt.opcode = INSTR_STORE_V;
2818                 else
2819                     stmt.opcode = type_store_instr[param->m_vtype];
2820                 stmt.o1.u1 = ir_value_code_addr(param);
2821                 stmt.o2.u1 = OFS_PARM0 + 3 * p;
2822
2823                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2824                     /* fetch 3 separate floats */
2825                     stmt.opcode = INSTR_STORE_F;
2826                     stmt.o1.u1 = ir_value_code_addr(param->m_members[0]);
2827                     code_push_statement(code, &stmt, instr->m_context);
2828                     stmt.o2.u1++;
2829                     stmt.o1.u1 = ir_value_code_addr(param->m_members[1]);
2830                     code_push_statement(code, &stmt, instr->m_context);
2831                     stmt.o2.u1++;
2832                     stmt.o1.u1 = ir_value_code_addr(param->m_members[2]);
2833                     code_push_statement(code, &stmt, instr->m_context);
2834                 }
2835                 else
2836                     code_push_statement(code, &stmt, instr->m_context);
2837             }
2838             /* Now handle extparams */
2839             first = instr->m_params.size();
2840             for (; p < first; ++p)
2841             {
2842                 ir_builder *ir = func->m_owner;
2843                 ir_value *param = instr->m_params[p];
2844                 ir_value *targetparam;
2845
2846                 if (param->m_callparam)
2847                     continue;
2848
2849                 if (p-8 >= ir->m_extparams.size())
2850                     ir_gen_extparam(ir);
2851
2852                 targetparam = ir->m_extparams[p-8];
2853
2854                 stmt.opcode = INSTR_STORE_F;
2855                 stmt.o3.u1 = 0;
2856
2857                 if (param->m_vtype == TYPE_FIELD)
2858                     stmt.opcode = field_store_instr[param->m_fieldtype];
2859                 else if (param->m_vtype == TYPE_NIL)
2860                     stmt.opcode = INSTR_STORE_V;
2861                 else
2862                     stmt.opcode = type_store_instr[param->m_vtype];
2863                 stmt.o1.u1 = ir_value_code_addr(param);
2864                 stmt.o2.u1 = ir_value_code_addr(targetparam);
2865                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2866                     /* fetch 3 separate floats */
2867                     stmt.opcode = INSTR_STORE_F;
2868                     stmt.o1.u1 = ir_value_code_addr(param->m_members[0]);
2869                     code_push_statement(code, &stmt, instr->m_context);
2870                     stmt.o2.u1++;
2871                     stmt.o1.u1 = ir_value_code_addr(param->m_members[1]);
2872                     code_push_statement(code, &stmt, instr->m_context);
2873                     stmt.o2.u1++;
2874                     stmt.o1.u1 = ir_value_code_addr(param->m_members[2]);
2875                     code_push_statement(code, &stmt, instr->m_context);
2876                 }
2877                 else
2878                     code_push_statement(code, &stmt, instr->m_context);
2879             }
2880
2881             stmt.opcode = INSTR_CALL0 + instr->m_params.size();
2882             if (stmt.opcode > INSTR_CALL8)
2883                 stmt.opcode = INSTR_CALL8;
2884             stmt.o1.u1 = ir_value_code_addr(instr->_m_ops[1]);
2885             stmt.o2.u1 = 0;
2886             stmt.o3.u1 = 0;
2887             code_push_statement(code, &stmt, instr->m_context);
2888
2889             retvalue = instr->_m_ops[0];
2890             if (retvalue && retvalue->m_store != store_return &&
2891                 (retvalue->m_store == store_global || retvalue->m_life.size()))
2892             {
2893                 /* not to be kept in OFS_RETURN */
2894                 if (retvalue->m_vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS))
2895                     stmt.opcode = field_store_instr[retvalue->m_fieldtype];
2896                 else
2897                     stmt.opcode = type_store_instr[retvalue->m_vtype];
2898                 stmt.o1.u1 = OFS_RETURN;
2899                 stmt.o2.u1 = ir_value_code_addr(retvalue);
2900                 stmt.o3.u1 = 0;
2901                 code_push_statement(code, &stmt, instr->m_context);
2902             }
2903             continue;
2904         }
2905
2906         if (instr->m_opcode == INSTR_STATE) {
2907             stmt.opcode = instr->m_opcode;
2908             if (instr->_m_ops[0])
2909                 stmt.o1.u1 = ir_value_code_addr(instr->_m_ops[0]);
2910             if (instr->_m_ops[1])
2911                 stmt.o2.u1 = ir_value_code_addr(instr->_m_ops[1]);
2912             stmt.o3.u1 = 0;
2913             code_push_statement(code, &stmt, instr->m_context);
2914             continue;
2915         }
2916
2917         stmt.opcode = instr->m_opcode;
2918         stmt.o1.u1 = 0;
2919         stmt.o2.u1 = 0;
2920         stmt.o3.u1 = 0;
2921
2922         /* This is the general order of operands */
2923         if (instr->_m_ops[0])
2924             stmt.o3.u1 = ir_value_code_addr(instr->_m_ops[0]);
2925
2926         if (instr->_m_ops[1])
2927             stmt.o1.u1 = ir_value_code_addr(instr->_m_ops[1]);
2928
2929         if (instr->_m_ops[2])
2930             stmt.o2.u1 = ir_value_code_addr(instr->_m_ops[2]);
2931
2932         if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
2933         {
2934             stmt.o1.u1 = stmt.o3.u1;
2935             stmt.o3.u1 = 0;
2936         }
2937         else if ((stmt.opcode >= INSTR_STORE_F &&
2938                   stmt.opcode <= INSTR_STORE_FNC) ||
2939                  (stmt.opcode >= INSTR_STOREP_F &&
2940                   stmt.opcode <= INSTR_STOREP_FNC))
2941         {
2942             /* 2-operand instructions with A -> B */
2943             stmt.o2.u1 = stmt.o3.u1;
2944             stmt.o3.u1 = 0;
2945
2946             /* tiny optimization, don't output
2947              * STORE a, a
2948              */
2949             if (stmt.o2.u1 == stmt.o1.u1 &&
2950                 OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
2951             {
2952                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
2953                 continue;
2954             }
2955         }
2956         code_push_statement(code, &stmt, instr->m_context);
2957     }
2958     return true;
2959 }
2960
2961 static bool gen_function_code(code_t *code, ir_function *self)
2962 {
2963     ir_block *block;
2964     prog_section_statement_t stmt, *retst;
2965
2966     /* Starting from entry point, we generate blocks "as they come"
2967      * for now. Dead blocks will not be translated obviously.
2968      */
2969     if (self->m_blocks.empty()) {
2970         irerror(self->m_context, "Function '%s' declared without body.", self->m_name.c_str());
2971         return false;
2972     }
2973
2974     block = self->m_blocks[0].get();
2975     if (block->m_generated)
2976         return true;
2977
2978     if (!gen_blocks_recursive(code, self, block)) {
2979         irerror(self->m_context, "failed to generate blocks for '%s'", self->m_name.c_str());
2980         return false;
2981     }
2982
2983     /* code_write and qcvm -disasm need to know that the function ends here */
2984     retst = &code->statements.back();
2985     if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) &&
2986         self->m_outtype == TYPE_VOID &&
2987         retst->opcode == INSTR_RETURN &&
2988         !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1)
2989     {
2990         retst->opcode = INSTR_DONE;
2991         ++opts_optimizationcount[OPTIM_VOID_RETURN];
2992     } else {
2993         lex_ctx_t last;
2994
2995         stmt.opcode = INSTR_DONE;
2996         stmt.o1.u1  = 0;
2997         stmt.o2.u1  = 0;
2998         stmt.o3.u1  = 0;
2999         last.line   = code->linenums.back();
3000         last.column = code->columnnums.back();
3001
3002         code_push_statement(code, &stmt, last);
3003     }
3004     return true;
3005 }
3006
3007 static qcint_t ir_builder_filestring(ir_builder *ir, const char *filename)
3008 {
3009     /* NOTE: filename pointers are copied, we never strdup them,
3010      * thus we can use pointer-comparison to find the string.
3011      */
3012     qcint_t  str;
3013
3014     for (size_t i = 0; i != ir->m_filenames.size(); ++i) {
3015         if (!strcmp(ir->m_filenames[i], filename))
3016             return i;
3017     }
3018
3019     str = code_genstring(ir->m_code.get(), filename);
3020     ir->m_filenames.push_back(filename);
3021     ir->m_filestrings.push_back(str);
3022     return str;
3023 }
3024
3025 static bool gen_global_function(ir_builder *ir, ir_value *global)
3026 {
3027     prog_section_function_t fun;
3028     ir_function            *irfun;
3029
3030     size_t i;
3031
3032     if (!global->m_hasvalue || (!global->m_constval.vfunc)) {
3033         irerror(global->m_context, "Invalid state of function-global: not constant: %s", global->m_name.c_str());
3034         return false;
3035     }
3036
3037     irfun = global->m_constval.vfunc;
3038     fun.name = global->m_code.name;
3039     fun.file = ir_builder_filestring(ir, global->m_context.file);
3040     fun.profile = 0; /* always 0 */
3041     fun.nargs = vec_size(irfun->m_params);
3042     if (fun.nargs > 8)
3043         fun.nargs = 8;
3044
3045     for (i = 0; i < 8; ++i) {
3046         if ((int32_t)i >= fun.nargs)
3047             fun.argsize[i] = 0;
3048         else
3049             fun.argsize[i] = type_sizeof_[irfun->m_params[i]];
3050     }
3051
3052     fun.firstlocal = 0;
3053     fun.locals = irfun->m_allocated_locals;
3054
3055     if (irfun->m_builtin)
3056         fun.entry = irfun->m_builtin+1;
3057     else {
3058         irfun->m_code_function_def = ir->m_code->functions.size();
3059         fun.entry = ir->m_code->statements.size();
3060     }
3061
3062     ir->m_code->functions.push_back(fun);
3063     return true;
3064 }
3065
3066 static ir_value* ir_gen_extparam_proto(ir_builder *ir)
3067 {
3068     char      name[128];
3069
3070     util_snprintf(name, sizeof(name), "EXTPARM#%i", (int)(ir->m_extparam_protos.size()));
3071     ir_value *global = new ir_value(name, store_global, TYPE_VECTOR);
3072     ir->m_extparam_protos.emplace_back(global);
3073
3074     return global;
3075 }
3076
3077 static void ir_gen_extparam(ir_builder *ir)
3078 {
3079     prog_section_def_t def;
3080     ir_value          *global;
3081
3082     if (ir->m_extparam_protos.size() < ir->m_extparams.size()+1)
3083         global = ir_gen_extparam_proto(ir);
3084     else
3085         global = ir->m_extparam_protos[ir->m_extparams.size()].get();
3086
3087     def.name = code_genstring(ir->m_code.get(), global->m_name.c_str());
3088     def.type = TYPE_VECTOR;
3089     def.offset = ir->m_code->globals.size();
3090
3091     ir->m_code->defs.push_back(def);
3092
3093     ir_value_code_setaddr(global, def.offset);
3094
3095     ir->m_code->globals.push_back(0);
3096     ir->m_code->globals.push_back(0);
3097     ir->m_code->globals.push_back(0);
3098
3099     ir->m_extparams.emplace_back(global);
3100 }
3101
3102 static bool gen_function_extparam_copy(code_t *code, ir_function *self)
3103 {
3104     ir_builder *ir = self->m_owner;
3105
3106     size_t numparams = vec_size(self->m_params);
3107     if (!numparams)
3108         return true;
3109
3110     prog_section_statement_t stmt;
3111     stmt.opcode = INSTR_STORE_F;
3112     stmt.o3.s1 = 0;
3113     for (size_t i = 8; i < numparams; ++i) {
3114         size_t ext = i - 8;
3115         if (ext >= ir->m_extparams.size())
3116             ir_gen_extparam(ir);
3117
3118         ir_value *ep = ir->m_extparams[ext];
3119
3120         stmt.opcode = type_store_instr[self->m_locals[i]->m_vtype];
3121         if (self->m_locals[i]->m_vtype == TYPE_FIELD &&
3122             self->m_locals[i]->m_fieldtype == TYPE_VECTOR)
3123         {
3124             stmt.opcode = INSTR_STORE_V;
3125         }
3126         stmt.o1.u1 = ir_value_code_addr(ep);
3127         stmt.o2.u1 = ir_value_code_addr(self->m_locals[i].get());
3128         code_push_statement(code, &stmt, self->m_context);
3129     }
3130
3131     return true;
3132 }
3133
3134 static bool gen_function_varargs_copy(code_t *code, ir_function *self)
3135 {
3136     size_t i, ext, numparams, maxparams;
3137
3138     ir_builder *ir = self->m_owner;
3139     ir_value   *ep;
3140     prog_section_statement_t stmt;
3141
3142     numparams = vec_size(self->m_params);
3143     if (!numparams)
3144         return true;
3145
3146     stmt.opcode = INSTR_STORE_V;
3147     stmt.o3.s1 = 0;
3148     maxparams = numparams + self->m_max_varargs;
3149     for (i = numparams; i < maxparams; ++i) {
3150         if (i < 8) {
3151             stmt.o1.u1 = OFS_PARM0 + 3*i;
3152             stmt.o2.u1 = ir_value_code_addr(self->m_locals[i].get());
3153             code_push_statement(code, &stmt, self->m_context);
3154             continue;
3155         }
3156         ext = i - 8;
3157         while (ext >= ir->m_extparams.size())
3158             ir_gen_extparam(ir);
3159
3160         ep = ir->m_extparams[ext];
3161
3162         stmt.o1.u1 = ir_value_code_addr(ep);
3163         stmt.o2.u1 = ir_value_code_addr(self->m_locals[i].get());
3164         code_push_statement(code, &stmt, self->m_context);
3165     }
3166
3167     return true;
3168 }
3169
3170 static bool gen_function_locals(ir_builder *ir, ir_value *global)
3171 {
3172     prog_section_function_t *def;
3173     ir_function             *irfun;
3174     uint32_t                 firstlocal, firstglobal;
3175
3176     irfun = global->m_constval.vfunc;
3177     def   = &ir->m_code->functions[0] + irfun->m_code_function_def;
3178
3179     if (OPTS_OPTION_BOOL(OPTION_G) ||
3180         !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS)        ||
3181         (irfun->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3182     {
3183         firstlocal = def->firstlocal = ir->m_code->globals.size();
3184     } else {
3185         firstlocal = def->firstlocal = ir->m_first_common_local;
3186         ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
3187     }
3188
3189     firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? ir->m_first_common_globaltemp : firstlocal);
3190
3191     for (size_t i = ir->m_code->globals.size(); i < firstlocal + irfun->m_allocated_locals; ++i)
3192         ir->m_code->globals.push_back(0);
3193
3194     for (auto& lp : irfun->m_locals) {
3195         ir_value *v = lp.get();
3196         if (v->m_locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
3197             ir_value_code_setaddr(v, firstlocal + v->m_code.local);
3198             if (!ir_builder_gen_global(ir, v, true)) {
3199                 irerror(v->m_context, "failed to generate local %s", v->m_name.c_str());
3200                 return false;
3201             }
3202         }
3203         else
3204             ir_value_code_setaddr(v, firstglobal + v->m_code.local);
3205     }
3206     for (auto& vp : irfun->m_values) {
3207         ir_value *v = vp.get();
3208         if (v->m_callparam)
3209             continue;
3210         if (v->m_locked)
3211             ir_value_code_setaddr(v, firstlocal + v->m_code.local);
3212         else
3213             ir_value_code_setaddr(v, firstglobal + v->m_code.local);
3214     }
3215     return true;
3216 }
3217
3218 static bool gen_global_function_code(ir_builder *ir, ir_value *global)
3219 {
3220     prog_section_function_t *fundef;
3221     ir_function             *irfun;
3222
3223     (void)ir;
3224
3225     irfun = global->m_constval.vfunc;
3226     if (!irfun) {
3227         if (global->m_cvq == CV_NONE) {
3228             if (irwarning(global->m_context, WARN_IMPLICIT_FUNCTION_POINTER,
3229                           "function `%s` has no body and in QC implicitly becomes a function-pointer",
3230                           global->m_name.c_str()))
3231             {
3232                 /* Not bailing out just now. If this happens a lot you don't want to have
3233                  * to rerun gmqcc for each such function.
3234                  */
3235
3236                 /* return false; */
3237             }
3238         }
3239         /* this was a function pointer, don't generate code for those */
3240         return true;
3241     }
3242
3243     if (irfun->m_builtin)
3244         return true;
3245
3246     /*
3247      * If there is no definition and the thing is eraseable, we can ignore
3248      * outputting the function to begin with.
3249      */
3250     if (global->m_flags & IR_FLAG_ERASABLE && irfun->m_code_function_def < 0) {
3251         return true;
3252     }
3253
3254     if (irfun->m_code_function_def < 0) {
3255         irerror(irfun->m_context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->m_name.c_str());
3256         return false;
3257     }
3258     fundef = &ir->m_code->functions[irfun->m_code_function_def];
3259
3260     fundef->entry = ir->m_code->statements.size();
3261     if (!gen_function_locals(ir, global)) {
3262         irerror(irfun->m_context, "Failed to generate locals for function %s", irfun->m_name.c_str());
3263         return false;
3264     }
3265     if (!gen_function_extparam_copy(ir->m_code.get(), irfun)) {
3266         irerror(irfun->m_context, "Failed to generate extparam-copy code for function %s", irfun->m_name.c_str());
3267         return false;
3268     }
3269     if (irfun->m_max_varargs && !gen_function_varargs_copy(ir->m_code.get(), irfun)) {
3270         irerror(irfun->m_context, "Failed to generate vararg-copy code for function %s", irfun->m_name.c_str());
3271         return false;
3272     }
3273     if (!gen_function_code(ir->m_code.get(), irfun)) {
3274         irerror(irfun->m_context, "Failed to generate code for function %s", irfun->m_name.c_str());
3275         return false;
3276     }
3277     return true;
3278 }
3279
3280 static void gen_vector_defs(code_t *code, prog_section_def_t def, const char *name)
3281 {
3282     char  *component;
3283     size_t len, i;
3284
3285     if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3286         return;
3287
3288     def.type = TYPE_FLOAT;
3289
3290     len = strlen(name);
3291
3292     component = (char*)mem_a(len+3);
3293     memcpy(component, name, len);
3294     len += 2;
3295     component[len-0] = 0;
3296     component[len-2] = '_';
3297
3298     component[len-1] = 'x';
3299
3300     for (i = 0; i < 3; ++i) {
3301         def.name = code_genstring(code, component);
3302         code->defs.push_back(def);
3303         def.offset++;
3304         component[len-1]++;
3305     }
3306
3307     mem_d(component);
3308 }
3309
3310 static void gen_vector_fields(code_t *code, prog_section_field_t fld, const char *name)
3311 {
3312     char  *component;
3313     size_t len, i;
3314
3315     if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3316         return;
3317
3318     fld.type = TYPE_FLOAT;
3319
3320     len = strlen(name);
3321
3322     component = (char*)mem_a(len+3);
3323     memcpy(component, name, len);
3324     len += 2;
3325     component[len-0] = 0;
3326     component[len-2] = '_';
3327
3328     component[len-1] = 'x';
3329
3330     for (i = 0; i < 3; ++i) {
3331         fld.name = code_genstring(code, component);
3332         code->fields.push_back(fld);
3333         fld.offset++;
3334         component[len-1]++;
3335     }
3336
3337     mem_d(component);
3338 }
3339
3340 static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool islocal)
3341 {
3342     size_t             i;
3343     int32_t           *iptr;
3344     prog_section_def_t def;
3345     bool               pushdef = opts.optimizeoff;
3346
3347     /* we don't generate split-vectors */
3348     if (global->m_vtype == TYPE_VECTOR && (global->m_flags & IR_FLAG_SPLIT_VECTOR))
3349         return true;
3350
3351     def.type = global->m_vtype;
3352     def.offset = self->m_code->globals.size();
3353     def.name = 0;
3354     if (OPTS_OPTION_BOOL(OPTION_G) || !islocal)
3355     {
3356         pushdef = true;
3357
3358         /*
3359          * if we're eraseable and the function isn't referenced ignore outputting
3360          * the function.
3361          */
3362         if (global->m_flags & IR_FLAG_ERASABLE && global->m_reads.empty()) {
3363             return true;
3364         }
3365
3366         if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) &&
3367             !(global->m_flags & IR_FLAG_INCLUDE_DEF) &&
3368             (global->m_name[0] == '#' || global->m_cvq == CV_CONST))
3369         {
3370             pushdef = false;
3371         }
3372
3373         if (pushdef) {
3374             if (global->m_name[0] == '#') {
3375                 if (!self->m_str_immediate)
3376                     self->m_str_immediate = code_genstring(self->m_code.get(), "IMMEDIATE");
3377                 def.name = global->m_code.name = self->m_str_immediate;
3378             }
3379             else
3380                 def.name = global->m_code.name = code_genstring(self->m_code.get(), global->m_name.c_str());
3381         }
3382         else
3383             def.name   = 0;
3384         if (islocal) {
3385             def.offset = ir_value_code_addr(global);
3386             self->m_code->defs.push_back(def);
3387             if (global->m_vtype == TYPE_VECTOR)
3388                 gen_vector_defs(self->m_code.get(), def, global->m_name.c_str());
3389             else if (global->m_vtype == TYPE_FIELD && global->m_fieldtype == TYPE_VECTOR)
3390                 gen_vector_defs(self->m_code.get(), def, global->m_name.c_str());
3391             return true;
3392         }
3393     }
3394     if (islocal)
3395         return true;
3396
3397     switch (global->m_vtype)
3398     {
3399     case TYPE_VOID:
3400         if (0 == global->m_name.compare("end_sys_globals")) {
3401             // TODO: remember this point... all the defs before this one
3402             // should be checksummed and added to progdefs.h when we generate it.
3403         }
3404         else if (0 == global->m_name.compare("end_sys_fields")) {
3405             // TODO: same as above but for entity-fields rather than globsl
3406         }
3407         else if(irwarning(global->m_context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`",
3408                           global->m_name.c_str()))
3409         {
3410             /* Not bailing out */
3411             /* return false; */
3412         }
3413         /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far
3414          * the system fields actually go? Though the engine knows this anyway...
3415          * Maybe this could be an -foption
3416          * fteqcc creates data for end_sys_* - of size 1, so let's do the same
3417          */
3418         ir_value_code_setaddr(global, self->m_code->globals.size());
3419         self->m_code->globals.push_back(0);
3420         /* Add the def */
3421         if (pushdef) self->m_code->defs.push_back(def);
3422         return true;
3423     case TYPE_POINTER:
3424         if (pushdef) self->m_code->defs.push_back(def);
3425         return gen_global_pointer(self->m_code.get(), global);
3426     case TYPE_FIELD:
3427         if (pushdef) {
3428             self->m_code->defs.push_back(def);
3429             if (global->m_fieldtype == TYPE_VECTOR)
3430                 gen_vector_defs(self->m_code.get(), def, global->m_name.c_str());
3431         }
3432         return gen_global_field(self->m_code.get(), global);
3433     case TYPE_ENTITY:
3434         /* fall through */
3435     case TYPE_FLOAT:
3436     {
3437         ir_value_code_setaddr(global, self->m_code->globals.size());
3438         if (global->m_hasvalue) {
3439             iptr = (int32_t*)&global->m_constval.ivec[0];
3440             self->m_code->globals.push_back(*iptr);
3441         } else {
3442             self->m_code->globals.push_back(0);
3443         }
3444         if (!islocal && global->m_cvq != CV_CONST)
3445             def.type |= DEF_SAVEGLOBAL;
3446         if (pushdef) self->m_code->defs.push_back(def);
3447
3448         return global->m_code.globaladdr >= 0;
3449     }
3450     case TYPE_STRING:
3451     {
3452         ir_value_code_setaddr(global, self->m_code->globals.size());
3453         if (global->m_hasvalue) {
3454             uint32_t load = code_genstring(self->m_code.get(), global->m_constval.vstring);
3455             self->m_code->globals.push_back(load);
3456         } else {
3457             self->m_code->globals.push_back(0);
3458         }
3459         if (!islocal && global->m_cvq != CV_CONST)
3460             def.type |= DEF_SAVEGLOBAL;
3461         if (pushdef) self->m_code->defs.push_back(def);
3462         return global->m_code.globaladdr >= 0;
3463     }
3464     case TYPE_VECTOR:
3465     {
3466         size_t d;
3467         ir_value_code_setaddr(global, self->m_code->globals.size());
3468         if (global->m_hasvalue) {
3469             iptr = (int32_t*)&global->m_constval.ivec[0];
3470             self->m_code->globals.push_back(iptr[0]);
3471             if (global->m_code.globaladdr < 0)
3472                 return false;
3473             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3474                 self->m_code->globals.push_back(iptr[d]);
3475             }
3476         } else {
3477             self->m_code->globals.push_back(0);
3478             if (global->m_code.globaladdr < 0)
3479                 return false;
3480             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3481                 self->m_code->globals.push_back(0);
3482             }
3483         }
3484         if (!islocal && global->m_cvq != CV_CONST)
3485             def.type |= DEF_SAVEGLOBAL;
3486
3487         if (pushdef) {
3488             self->m_code->defs.push_back(def);
3489             def.type &= ~DEF_SAVEGLOBAL;
3490             gen_vector_defs(self->m_code.get(), def, global->m_name.c_str());
3491         }
3492         return global->m_code.globaladdr >= 0;
3493     }
3494     case TYPE_FUNCTION:
3495         ir_value_code_setaddr(global, self->m_code->globals.size());
3496         if (!global->m_hasvalue) {
3497             self->m_code->globals.push_back(0);
3498             if (global->m_code.globaladdr < 0)
3499                 return false;
3500         } else {
3501             self->m_code->globals.push_back(self->m_code->functions.size());
3502             if (!gen_global_function(self, global))
3503                 return false;
3504         }
3505         if (!islocal && global->m_cvq != CV_CONST)
3506             def.type |= DEF_SAVEGLOBAL;
3507         if (pushdef) self->m_code->defs.push_back(def);
3508         return true;
3509     case TYPE_VARIANT:
3510         /* assume biggest type */
3511             ir_value_code_setaddr(global, self->m_code->globals.size());
3512             self->m_code->globals.push_back(0);
3513             for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i)
3514                 self->m_code->globals.push_back(0);
3515             return true;
3516     default:
3517         /* refuse to create 'void' type or any other fancy business. */
3518         irerror(global->m_context, "Invalid type for global variable `%s`: %s",
3519                 global->m_name.c_str(), type_name[global->m_vtype]);
3520         return false;
3521     }
3522 }
3523
3524 static GMQCC_INLINE void ir_builder_prepare_field(code_t *code, ir_value *field)
3525 {
3526     field->m_code.fieldaddr = code_alloc_field(code, type_sizeof_[field->m_fieldtype]);
3527 }
3528
3529 static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
3530 {
3531     prog_section_def_t def;
3532     prog_section_field_t fld;
3533
3534     (void)self;
3535
3536     def.type   = (uint16_t)field->m_vtype;
3537     def.offset = (uint16_t)self->m_code->globals.size();
3538
3539     /* create a global named the same as the field */
3540     if (OPTS_OPTION_U32(OPTION_STANDARD) == COMPILER_GMQCC) {
3541         /* in our standard, the global gets a dot prefix */
3542         size_t len = field->m_name.length();
3543         char name[1024];
3544
3545         /* we really don't want to have to allocate this, and 1024
3546          * bytes is more than enough for a variable/field name
3547          */
3548         if (len+2 >= sizeof(name)) {
3549             irerror(field->m_context, "invalid field name size: %u", (unsigned int)len);
3550             return false;
3551         }
3552
3553         name[0] = '.';
3554         memcpy(name+1, field->m_name.c_str(), len); // no strncpy - we used strlen above
3555         name[len+1] = 0;
3556
3557         def.name = code_genstring(self->m_code.get(), name);
3558         fld.name = def.name + 1; /* we reuse that string table entry */
3559     } else {
3560         /* in plain QC, there cannot be a global with the same name,
3561          * and so we also name the global the same.
3562          * FIXME: fteqcc should create a global as well
3563          * check if it actually uses the same name. Probably does
3564          */
3565         def.name = code_genstring(self->m_code.get(), field->m_name.c_str());
3566         fld.name = def.name;
3567     }
3568
3569     field->m_code.name = def.name;
3570
3571     self->m_code->defs.push_back(def);
3572
3573     fld.type = field->m_fieldtype;
3574
3575     if (fld.type == TYPE_VOID) {
3576         irerror(field->m_context, "field is missing a type: %s - don't know its size", field->m_name.c_str());
3577         return false;
3578     }
3579
3580     fld.offset = field->m_code.fieldaddr;
3581
3582     self->m_code->fields.push_back(fld);
3583
3584     ir_value_code_setaddr(field, self->m_code->globals.size());
3585     self->m_code->globals.push_back(fld.offset);
3586     if (fld.type == TYPE_VECTOR) {
3587         self->m_code->globals.push_back(fld.offset+1);
3588         self->m_code->globals.push_back(fld.offset+2);
3589     }
3590
3591     if (field->m_fieldtype == TYPE_VECTOR) {
3592         gen_vector_defs  (self->m_code.get(), def, field->m_name.c_str());
3593         gen_vector_fields(self->m_code.get(), fld, field->m_name.c_str());
3594     }
3595
3596     return field->m_code.globaladdr >= 0;
3597 }
3598
3599 static void ir_builder_collect_reusables(ir_builder *builder) {
3600     std::vector<ir_value*> reusables;
3601
3602     for (auto& gp : builder->m_globals) {
3603         ir_value *value = gp.get();
3604         if (value->m_vtype != TYPE_FLOAT || !value->m_hasvalue)
3605             continue;
3606         if (value->m_cvq == CV_CONST || (value->m_name.length() >= 1 && value->m_name[0] == '#'))
3607             reusables.emplace_back(value);
3608     }
3609     builder->m_const_floats = move(reusables);
3610 }
3611
3612 static void ir_builder_split_vector(ir_builder *self, ir_value *vec) {
3613     ir_value* found[3] = { nullptr, nullptr, nullptr };
3614
3615     // must not be written to
3616     if (vec->m_writes.size())
3617         return;
3618     // must not be trying to access individual members
3619     if (vec->m_members[0] || vec->m_members[1] || vec->m_members[2])
3620         return;
3621     // should be actually used otherwise it won't be generated anyway
3622     if (vec->m_reads.empty())
3623         return;
3624     //size_t count = vec->m_reads.size();
3625     //if (!count)
3626     //    return;
3627
3628     // may only be used directly as function parameters, so if we find some other instruction cancel
3629     for (ir_instr *user : vec->m_reads) {
3630         // we only split vectors if they're used directly as parameter to a call only!
3631         if ((user->m_opcode < INSTR_CALL0 || user->m_opcode > INSTR_CALL8) && user->m_opcode != VINSTR_NRCALL)
3632             return;
3633     }
3634
3635     vec->m_flags |= IR_FLAG_SPLIT_VECTOR;
3636
3637     // find existing floats making up the split
3638     for (ir_value *c : self->m_const_floats) {
3639         if (!found[0] && c->m_constval.vfloat == vec->m_constval.vvec.x)
3640             found[0] = c;
3641         if (!found[1] && c->m_constval.vfloat == vec->m_constval.vvec.y)
3642             found[1] = c;
3643         if (!found[2] && c->m_constval.vfloat == vec->m_constval.vvec.z)
3644             found[2] = c;
3645         if (found[0] && found[1] && found[2])
3646             break;
3647     }
3648
3649     // generate floats for not yet found components
3650     if (!found[0])
3651         found[0] = ir_builder_imm_float(self, vec->m_constval.vvec.x, true);
3652     if (!found[1]) {
3653         if (vec->m_constval.vvec.y == vec->m_constval.vvec.x)
3654             found[1] = found[0];
3655         else
3656             found[1] = ir_builder_imm_float(self, vec->m_constval.vvec.y, true);
3657     }
3658     if (!found[2]) {
3659         if (vec->m_constval.vvec.z == vec->m_constval.vvec.x)
3660             found[2] = found[0];
3661         else if (vec->m_constval.vvec.z == vec->m_constval.vvec.y)
3662             found[2] = found[1];
3663         else
3664             found[2] = ir_builder_imm_float(self, vec->m_constval.vvec.z, true);
3665     }
3666
3667     // the .members array should be safe to use here
3668     vec->m_members[0] = found[0];
3669     vec->m_members[1] = found[1];
3670     vec->m_members[2] = found[2];
3671
3672     // register the readers for these floats
3673     found[0]->m_reads.insert(found[0]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3674     found[1]->m_reads.insert(found[1]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3675     found[2]->m_reads.insert(found[2]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3676 }
3677
3678 static void ir_builder_split_vectors(ir_builder *self) {
3679     // member values may be added to self->m_globals during this operation, but
3680     // no new vectors will be added, we need to iterate via an index as
3681     // c++ iterators would be invalidated
3682     const size_t count = self->m_globals.size();
3683     for (size_t i = 0; i != count; ++i) {
3684         ir_value *v = self->m_globals[i].get();
3685         if (v->m_vtype != TYPE_VECTOR || !v->m_name.length() || v->m_name[0] != '#')
3686             continue;
3687         ir_builder_split_vector(self, v);
3688     }
3689 }
3690
3691 bool ir_builder_generate(ir_builder *self, const char *filename)
3692 {
3693     prog_section_statement_t stmt;
3694     char  *lnofile = nullptr;
3695
3696     if (OPTS_FLAG(SPLIT_VECTOR_PARAMETERS)) {
3697         ir_builder_collect_reusables(self);
3698         if (!self->m_const_floats.empty())
3699             ir_builder_split_vectors(self);
3700     }
3701
3702     for (auto& fp : self->m_fields)
3703         ir_builder_prepare_field(self->m_code.get(), fp.get());
3704
3705     for (auto& gp : self->m_globals) {
3706         ir_value *global = gp.get();
3707         if (!ir_builder_gen_global(self, global, false)) {
3708             return false;
3709         }
3710         if (global->m_vtype == TYPE_FUNCTION) {
3711             ir_function *func = global->m_constval.vfunc;
3712             if (func && self->m_max_locals < func->m_allocated_locals &&
3713                 !(func->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3714             {
3715                 self->m_max_locals = func->m_allocated_locals;
3716             }
3717             if (func && self->m_max_globaltemps < func->m_globaltemps)
3718                 self->m_max_globaltemps = func->m_globaltemps;
3719         }
3720     }
3721
3722     for (auto& fp : self->m_fields) {
3723         if (!ir_builder_gen_field(self, fp.get()))
3724             return false;
3725     }
3726
3727     // generate nil
3728     ir_value_code_setaddr(self->m_nil, self->m_code->globals.size());
3729     self->m_code->globals.push_back(0);
3730     self->m_code->globals.push_back(0);
3731     self->m_code->globals.push_back(0);
3732
3733     // generate virtual-instruction temps
3734     for (size_t i = 0; i < IR_MAX_VINSTR_TEMPS; ++i) {
3735         ir_value_code_setaddr(self->m_vinstr_temp[i], self->m_code->globals.size());
3736         self->m_code->globals.push_back(0);
3737         self->m_code->globals.push_back(0);
3738         self->m_code->globals.push_back(0);
3739     }
3740
3741     // generate global temps
3742     self->m_first_common_globaltemp = self->m_code->globals.size();
3743     self->m_code->globals.insert(self->m_code->globals.end(), self->m_max_globaltemps, 0);
3744     // FIXME:DELME:
3745     //for (size_t i = 0; i < self->m_max_globaltemps; ++i) {
3746     //    self->m_code->globals.push_back(0);
3747     //}
3748     // generate common locals
3749     self->m_first_common_local = self->m_code->globals.size();
3750     self->m_code->globals.insert(self->m_code->globals.end(), self->m_max_locals, 0);
3751     // FIXME:DELME:
3752     //for (i = 0; i < self->m_max_locals; ++i) {
3753     //    self->m_code->globals.push_back(0);
3754     //}
3755
3756     // generate function code
3757
3758     for (auto& gp : self->m_globals) {
3759         ir_value *global = gp.get();
3760         if (global->m_vtype == TYPE_FUNCTION) {
3761             if (!gen_global_function_code(self, global)) {
3762                 return false;
3763             }
3764         }
3765     }
3766
3767     if (self->m_code->globals.size() >= 65536) {
3768         irerror(self->m_globals.back()->m_context,
3769             "This progs file would require more globals than the metadata can handle (%zu). Bailing out.",
3770             self->m_code->globals.size());
3771         return false;
3772     }
3773
3774     /* DP errors if the last instruction is not an INSTR_DONE. */
3775     if (self->m_code->statements.back().opcode != INSTR_DONE)
3776     {
3777         lex_ctx_t last;
3778
3779         stmt.opcode = INSTR_DONE;
3780         stmt.o1.u1  = 0;
3781         stmt.o2.u1  = 0;
3782         stmt.o3.u1  = 0;
3783         last.line   = self->m_code->linenums.back();
3784         last.column = self->m_code->columnnums.back();
3785
3786         code_push_statement(self->m_code.get(), &stmt, last);
3787     }
3788
3789     if (OPTS_OPTION_BOOL(OPTION_PP_ONLY))
3790         return true;
3791
3792     if (self->m_code->statements.size() != self->m_code->linenums.size()) {
3793         con_err("Linecounter wrong: %lu != %lu\n",
3794                 self->m_code->statements.size(),
3795                 self->m_code->linenums.size());
3796     } else if (OPTS_FLAG(LNO)) {
3797         char  *dot;
3798         size_t filelen = strlen(filename);
3799
3800         memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
3801         dot = strrchr(lnofile, '.');
3802         if (!dot) {
3803             vec_pop(lnofile);
3804         } else {
3805             vec_shrinkto(lnofile, dot - lnofile);
3806         }
3807         memcpy(vec_add(lnofile, 5), ".lno", 5);
3808     }
3809
3810     if (!code_write(self->m_code.get(), filename, lnofile)) {
3811         vec_free(lnofile);
3812         return false;
3813     }
3814
3815     vec_free(lnofile);
3816     return true;
3817 }
3818
3819 /***********************************************************************
3820  *IR DEBUG Dump functions...
3821  */
3822
3823 #define IND_BUFSZ 1024
3824
3825 static const char *qc_opname(int op)
3826 {
3827     if (op < 0) return "<INVALID>";
3828     if (op < VINSTR_END)
3829         return util_instr_str[op];
3830     switch (op) {
3831         case VINSTR_END:       return "END";
3832         case VINSTR_PHI:       return "PHI";
3833         case VINSTR_JUMP:      return "JUMP";
3834         case VINSTR_COND:      return "COND";
3835         case VINSTR_BITXOR:    return "BITXOR";
3836         case VINSTR_BITAND_V:  return "BITAND_V";
3837         case VINSTR_BITOR_V:   return "BITOR_V";
3838         case VINSTR_BITXOR_V:  return "BITXOR_V";
3839         case VINSTR_BITAND_VF: return "BITAND_VF";
3840         case VINSTR_BITOR_VF:  return "BITOR_VF";
3841         case VINSTR_BITXOR_VF: return "BITXOR_VF";
3842         case VINSTR_CROSS:     return "CROSS";
3843         case VINSTR_NEG_F:     return "NEG_F";
3844         case VINSTR_NEG_V:     return "NEG_V";
3845         default:               return "<UNK>";
3846     }
3847 }
3848
3849 void ir_builder_dump(ir_builder *b, int (*oprintf)(const char*, ...))
3850 {
3851     size_t i;
3852     char indent[IND_BUFSZ];
3853     indent[0] = '\t';
3854     indent[1] = 0;
3855
3856     oprintf("module %s\n", b->m_name.c_str());
3857     for (i = 0; i < b->m_globals.size(); ++i)
3858     {
3859         oprintf("global ");
3860         if (b->m_globals[i]->m_hasvalue)
3861             oprintf("%s = ", b->m_globals[i]->m_name.c_str());
3862         ir_value_dump(b->m_globals[i].get(), oprintf);
3863         oprintf("\n");
3864     }
3865     for (i = 0; i < b->m_functions.size(); ++i)
3866         ir_function_dump(b->m_functions[i].get(), indent, oprintf);
3867     oprintf("endmodule %s\n", b->m_name.c_str());
3868 }
3869
3870 static const char *storenames[] = {
3871     "[global]", "[local]", "[param]", "[value]", "[return]"
3872 };
3873
3874 void ir_function_dump(ir_function *f, char *ind,
3875                       int (*oprintf)(const char*, ...))
3876 {
3877     size_t i;
3878     if (f->m_builtin != 0) {
3879         oprintf("%sfunction %s = builtin %i\n", ind, f->m_name.c_str(), -f->m_builtin);
3880         return;
3881     }
3882     oprintf("%sfunction %s\n", ind, f->m_name.c_str());
3883     util_strncat(ind, "\t", IND_BUFSZ-1);
3884     if (f->m_locals.size())
3885     {
3886         oprintf("%s%i locals:\n", ind, (int)f->m_locals.size());
3887         for (i = 0; i < f->m_locals.size(); ++i) {
3888             oprintf("%s\t", ind);
3889             ir_value_dump(f->m_locals[i].get(), oprintf);
3890             oprintf("\n");
3891         }
3892     }
3893     oprintf("%sliferanges:\n", ind);
3894     for (i = 0; i < f->m_locals.size(); ++i) {
3895         const char *attr = "";
3896         size_t l, m;
3897         ir_value *v = f->m_locals[i].get();
3898         if (v->m_unique_life && v->m_locked)
3899             attr = "unique,locked ";
3900         else if (v->m_unique_life)
3901             attr = "unique ";
3902         else if (v->m_locked)
3903             attr = "locked ";
3904         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3905                 storenames[v->m_store],
3906                 attr, (v->m_callparam ? "callparam " : ""),
3907                 (int)v->m_code.local);
3908         if (v->m_life.empty())
3909             oprintf("[null]");
3910         for (l = 0; l < v->m_life.size(); ++l) {
3911             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3912         }
3913         oprintf("\n");
3914         for (m = 0; m < 3; ++m) {
3915             ir_value *vm = v->m_members[m];
3916             if (!vm)
3917                 continue;
3918             oprintf("%s\t%s: @%i ", ind, vm->m_name.c_str(), (int)vm->m_code.local);
3919             for (l = 0; l < vm->m_life.size(); ++l) {
3920                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3921             }
3922             oprintf("\n");
3923         }
3924     }
3925     for (i = 0; i < f->m_values.size(); ++i) {
3926         const char *attr = "";
3927         size_t l, m;
3928         ir_value *v = f->m_values[i].get();
3929         if (v->m_unique_life && v->m_locked)
3930             attr = "unique,locked ";
3931         else if (v->m_unique_life)
3932             attr = "unique ";
3933         else if (v->m_locked)
3934             attr = "locked ";
3935         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3936                 storenames[v->m_store],
3937                 attr, (v->m_callparam ? "callparam " : ""),
3938                 (int)v->m_code.local);
3939         if (v->m_life.empty())
3940             oprintf("[null]");
3941         for (l = 0; l < v->m_life.size(); ++l) {
3942             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3943         }
3944         oprintf("\n");
3945         for (m = 0; m < 3; ++m) {
3946             ir_value *vm = v->m_members[m];
3947             if (!vm)
3948                 continue;
3949             if (vm->m_unique_life && vm->m_locked)
3950                 attr = "unique,locked ";
3951             else if (vm->m_unique_life)
3952                 attr = "unique ";
3953             else if (vm->m_locked)
3954                 attr = "locked ";
3955             oprintf("%s\t%s: %s@%i ", ind, vm->m_name.c_str(), attr, (int)vm->m_code.local);
3956             for (l = 0; l < vm->m_life.size(); ++l) {
3957                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3958             }
3959             oprintf("\n");
3960         }
3961     }
3962     if (f->m_blocks.size())
3963     {
3964         oprintf("%slife passes: %i\n", ind, (int)f->m_run_id);
3965         for (i = 0; i < f->m_blocks.size(); ++i) {
3966             ir_block_dump(f->m_blocks[i].get(), ind, oprintf);
3967         }
3968
3969     }
3970     ind[strlen(ind)-1] = 0;
3971     oprintf("%sendfunction %s\n", ind, f->m_name.c_str());
3972 }
3973
3974 void ir_block_dump(ir_block* b, char *ind,
3975                    int (*oprintf)(const char*, ...))
3976 {
3977     size_t i;
3978     oprintf("%s:%s\n", ind, b->m_label.c_str());
3979     util_strncat(ind, "\t", IND_BUFSZ-1);
3980
3981     if (b->m_instr && b->m_instr[0])
3982         oprintf("%s (%i) [entry]\n", ind, (int)(b->m_instr[0]->m_eid-1));
3983     for (i = 0; i < vec_size(b->m_instr); ++i)
3984         ir_instr_dump(b->m_instr[i], ind, oprintf);
3985     ind[strlen(ind)-1] = 0;
3986 }
3987
3988 static void dump_phi(ir_instr *in, int (*oprintf)(const char*, ...))
3989 {
3990     oprintf("%s <- phi ", in->_m_ops[0]->m_name.c_str());
3991     for (auto &it : in->m_phi) {
3992         oprintf("([%s] : %s) ", it.from->m_label.c_str(),
3993                                 it.value->m_name.c_str());
3994     }
3995     oprintf("\n");
3996 }
3997
3998 void ir_instr_dump(ir_instr *in, char *ind,
3999                        int (*oprintf)(const char*, ...))
4000 {
4001     size_t i;
4002     const char *comma = nullptr;
4003
4004     oprintf("%s (%i) ", ind, (int)in->m_eid);
4005
4006     if (in->m_opcode == VINSTR_PHI) {
4007         dump_phi(in, oprintf);
4008         return;
4009     }
4010
4011     util_strncat(ind, "\t", IND_BUFSZ-1);
4012
4013     if (in->_m_ops[0] && (in->_m_ops[1] || in->_m_ops[2])) {
4014         ir_value_dump(in->_m_ops[0], oprintf);
4015         if (in->_m_ops[1] || in->_m_ops[2])
4016             oprintf(" <- ");
4017     }
4018     if (in->m_opcode == INSTR_CALL0 || in->m_opcode == VINSTR_NRCALL) {
4019         oprintf("CALL%i\t", in->m_params.size());
4020     } else
4021         oprintf("%s\t", qc_opname(in->m_opcode));
4022
4023     if (in->_m_ops[0] && !(in->_m_ops[1] || in->_m_ops[2])) {
4024         ir_value_dump(in->_m_ops[0], oprintf);
4025         comma = ",\t";
4026     }
4027     else
4028     {
4029         for (i = 1; i != 3; ++i) {
4030             if (in->_m_ops[i]) {
4031                 if (comma)
4032                     oprintf(comma);
4033                 ir_value_dump(in->_m_ops[i], oprintf);
4034                 comma = ",\t";
4035             }
4036         }
4037     }
4038     if (in->m_bops[0]) {
4039         if (comma)
4040             oprintf(comma);
4041         oprintf("[%s]", in->m_bops[0]->m_label.c_str());
4042         comma = ",\t";
4043     }
4044     if (in->m_bops[1])
4045         oprintf("%s[%s]", comma, in->m_bops[1]->m_label.c_str());
4046     if (in->m_params.size()) {
4047         oprintf("\tparams: ");
4048         for (auto &it : in->m_params)
4049             oprintf("%s, ", it->m_name.c_str());
4050     }
4051     oprintf("\n");
4052     ind[strlen(ind)-1] = 0;
4053 }
4054
4055 static void ir_value_dump_string(const char *str, int (*oprintf)(const char*, ...))
4056 {
4057     oprintf("\"");
4058     for (; *str; ++str) {
4059         switch (*str) {
4060             case '\n': oprintf("\\n"); break;
4061             case '\r': oprintf("\\r"); break;
4062             case '\t': oprintf("\\t"); break;
4063             case '\v': oprintf("\\v"); break;
4064             case '\f': oprintf("\\f"); break;
4065             case '\b': oprintf("\\b"); break;
4066             case '\a': oprintf("\\a"); break;
4067             case '\\': oprintf("\\\\"); break;
4068             case '"': oprintf("\\\""); break;
4069             default: oprintf("%c", *str); break;
4070         }
4071     }
4072     oprintf("\"");
4073 }
4074
4075 void ir_value_dump(ir_value* v, int (*oprintf)(const char*, ...))
4076 {
4077     if (v->m_hasvalue) {
4078         switch (v->m_vtype) {
4079             default:
4080             case TYPE_VOID:
4081                 oprintf("(void)");
4082                 break;
4083             case TYPE_FUNCTION:
4084                 oprintf("fn:%s", v->m_name.c_str());
4085                 break;
4086             case TYPE_FLOAT:
4087                 oprintf("%g", v->m_constval.vfloat);
4088                 break;
4089             case TYPE_VECTOR:
4090                 oprintf("'%g %g %g'",
4091                         v->m_constval.vvec.x,
4092                         v->m_constval.vvec.y,
4093                         v->m_constval.vvec.z);
4094                 break;
4095             case TYPE_ENTITY:
4096                 oprintf("(entity)");
4097                 break;
4098             case TYPE_STRING:
4099                 ir_value_dump_string(v->m_constval.vstring, oprintf);
4100                 break;
4101 #if 0
4102             case TYPE_INTEGER:
4103                 oprintf("%i", v->m_constval.vint);
4104                 break;
4105 #endif
4106             case TYPE_POINTER:
4107                 oprintf("&%s",
4108                     v->m_constval.vpointer->m_name.c_str());
4109                 break;
4110         }
4111     } else {
4112         oprintf("%s", v->m_name.c_str());
4113     }
4114 }
4115
4116 void ir_value_dump_life(const ir_value *self, int (*oprintf)(const char*,...))
4117 {
4118     oprintf("Life of %12s:", self->m_name.c_str());
4119     for (size_t i = 0; i < self->m_life.size(); ++i)
4120     {
4121         oprintf(" + [%i, %i]\n", self->m_life[i].start, self->m_life[i].end);
4122     }
4123 }