]> git.xonotic.org Git - xonotic/gmqcc.git/blob - ir.cpp
f5bc4520147f3765be59b31738056d308ec91535
[xonotic/gmqcc.git] / ir.cpp
1 #include <stdlib.h>
2 #include <string.h>
3
4 #include "gmqcc.h"
5 #include "ir.h"
6
7 /***********************************************************************
8  * Type sizes used at multiple points in the IR codegen
9  */
10
11 const char *type_name[TYPE_COUNT] = {
12     "void",
13     "string",
14     "float",
15     "vector",
16     "entity",
17     "field",
18     "function",
19     "pointer",
20     "integer",
21     "variant",
22     "struct",
23     "union",
24     "array",
25
26     "nil",
27     "<no-expression>"
28 };
29
30 static size_t type_sizeof_[TYPE_COUNT] = {
31     1, /* TYPE_VOID     */
32     1, /* TYPE_STRING   */
33     1, /* TYPE_FLOAT    */
34     3, /* TYPE_VECTOR   */
35     1, /* TYPE_ENTITY   */
36     1, /* TYPE_FIELD    */
37     1, /* TYPE_FUNCTION */
38     1, /* TYPE_POINTER  */
39     1, /* TYPE_INTEGER  */
40     3, /* TYPE_VARIANT  */
41     0, /* TYPE_STRUCT   */
42     0, /* TYPE_UNION    */
43     0, /* TYPE_ARRAY    */
44     0, /* TYPE_NIL      */
45     0, /* TYPE_NOESPR   */
46 };
47
48 const uint16_t type_store_instr[TYPE_COUNT] = {
49     INSTR_STORE_F, /* should use I when having integer support */
50     INSTR_STORE_S,
51     INSTR_STORE_F,
52     INSTR_STORE_V,
53     INSTR_STORE_ENT,
54     INSTR_STORE_FLD,
55     INSTR_STORE_FNC,
56     INSTR_STORE_ENT, /* should use I */
57 #if 0
58     INSTR_STORE_I, /* integer type */
59 #else
60     INSTR_STORE_F,
61 #endif
62
63     INSTR_STORE_V, /* variant, should never be accessed */
64
65     VINSTR_END, /* struct */
66     VINSTR_END, /* union  */
67     VINSTR_END, /* array  */
68     VINSTR_END, /* nil    */
69     VINSTR_END, /* noexpr */
70 };
71
72 const uint16_t field_store_instr[TYPE_COUNT] = {
73     INSTR_STORE_FLD,
74     INSTR_STORE_FLD,
75     INSTR_STORE_FLD,
76     INSTR_STORE_V,
77     INSTR_STORE_FLD,
78     INSTR_STORE_FLD,
79     INSTR_STORE_FLD,
80     INSTR_STORE_FLD,
81 #if 0
82     INSTR_STORE_FLD, /* integer type */
83 #else
84     INSTR_STORE_FLD,
85 #endif
86
87     INSTR_STORE_V, /* variant, should never be accessed */
88
89     VINSTR_END, /* struct */
90     VINSTR_END, /* union  */
91     VINSTR_END, /* array  */
92     VINSTR_END, /* nil    */
93     VINSTR_END, /* noexpr */
94 };
95
96 const uint16_t type_storep_instr[TYPE_COUNT] = {
97     INSTR_STOREP_F, /* should use I when having integer support */
98     INSTR_STOREP_S,
99     INSTR_STOREP_F,
100     INSTR_STOREP_V,
101     INSTR_STOREP_ENT,
102     INSTR_STOREP_FLD,
103     INSTR_STOREP_FNC,
104     INSTR_STOREP_ENT, /* should use I */
105 #if 0
106     INSTR_STOREP_ENT, /* integer type */
107 #else
108     INSTR_STOREP_F,
109 #endif
110
111     INSTR_STOREP_V, /* variant, should never be accessed */
112
113     VINSTR_END, /* struct */
114     VINSTR_END, /* union  */
115     VINSTR_END, /* array  */
116     VINSTR_END, /* nil    */
117     VINSTR_END, /* noexpr */
118 };
119
120 const uint16_t type_eq_instr[TYPE_COUNT] = {
121     INSTR_EQ_F, /* should use I when having integer support */
122     INSTR_EQ_S,
123     INSTR_EQ_F,
124     INSTR_EQ_V,
125     INSTR_EQ_E,
126     INSTR_EQ_E, /* FLD has no comparison */
127     INSTR_EQ_FNC,
128     INSTR_EQ_E, /* should use I */
129 #if 0
130     INSTR_EQ_I,
131 #else
132     INSTR_EQ_F,
133 #endif
134
135     INSTR_EQ_V, /* variant, should never be accessed */
136
137     VINSTR_END, /* struct */
138     VINSTR_END, /* union  */
139     VINSTR_END, /* array  */
140     VINSTR_END, /* nil    */
141     VINSTR_END, /* noexpr */
142 };
143
144 const uint16_t type_ne_instr[TYPE_COUNT] = {
145     INSTR_NE_F, /* should use I when having integer support */
146     INSTR_NE_S,
147     INSTR_NE_F,
148     INSTR_NE_V,
149     INSTR_NE_E,
150     INSTR_NE_E, /* FLD has no comparison */
151     INSTR_NE_FNC,
152     INSTR_NE_E, /* should use I */
153 #if 0
154     INSTR_NE_I,
155 #else
156     INSTR_NE_F,
157 #endif
158
159     INSTR_NE_V, /* variant, should never be accessed */
160
161     VINSTR_END, /* struct */
162     VINSTR_END, /* union  */
163     VINSTR_END, /* array  */
164     VINSTR_END, /* nil    */
165     VINSTR_END, /* noexpr */
166 };
167
168 const uint16_t type_not_instr[TYPE_COUNT] = {
169     INSTR_NOT_F, /* should use I when having integer support */
170     VINSTR_END,  /* not to be used, depends on string related -f flags */
171     INSTR_NOT_F,
172     INSTR_NOT_V,
173     INSTR_NOT_ENT,
174     INSTR_NOT_ENT,
175     INSTR_NOT_FNC,
176     INSTR_NOT_ENT, /* should use I */
177 #if 0
178     INSTR_NOT_I, /* integer type */
179 #else
180     INSTR_NOT_F,
181 #endif
182
183     INSTR_NOT_V, /* variant, should never be accessed */
184
185     VINSTR_END, /* struct */
186     VINSTR_END, /* union  */
187     VINSTR_END, /* array  */
188     VINSTR_END, /* nil    */
189     VINSTR_END, /* noexpr */
190 };
191
192 /* protos */
193 static void            ir_function_dump(ir_function*, char *ind, int (*oprintf)(const char*,...));
194
195 static ir_value*       ir_block_create_general_instr(ir_block *self, lex_ctx_t, const char *label,
196                                                      int op, ir_value *a, ir_value *b, qc_type outype);
197 static bool GMQCC_WARN ir_block_create_store(ir_block*, lex_ctx_t, ir_value *target, ir_value *what);
198 static void            ir_block_dump(ir_block*, char *ind, int (*oprintf)(const char*,...));
199
200 static bool            ir_instr_op(ir_instr*, int op, ir_value *value, bool writing);
201 static void            ir_instr_dump(ir_instr* in, char *ind, int (*oprintf)(const char*,...));
202 /* error functions */
203
204 static void irerror(lex_ctx_t ctx, const char *msg, ...)
205 {
206     va_list ap;
207     va_start(ap, msg);
208     con_cvprintmsg(ctx, LVL_ERROR, "internal error", msg, ap);
209     va_end(ap);
210 }
211
212 static bool GMQCC_WARN irwarning(lex_ctx_t ctx, int warntype, const char *fmt, ...)
213 {
214     bool    r;
215     va_list ap;
216     va_start(ap, fmt);
217     r = vcompile_warning(ctx, warntype, fmt, ap);
218     va_end(ap);
219     return r;
220 }
221
222 /***********************************************************************
223  * Vector utility functions
224  */
225
226 static bool GMQCC_WARN vec_ir_value_find(std::vector<ir_value *> &vec, const ir_value *what, size_t *idx)
227 {
228     for (auto &it : vec) {
229         if (it != what)
230             continue;
231         if (idx)
232             *idx = &it - &vec[0];
233         return true;
234     }
235     return false;
236 }
237
238 static bool GMQCC_WARN vec_ir_block_find(ir_block **vec, ir_block *what, size_t *idx)
239 {
240     size_t i;
241     size_t len = vec_size(vec);
242     for (i = 0; i < len; ++i) {
243         if (vec[i] == what) {
244             if (idx) *idx = i;
245             return true;
246         }
247     }
248     return false;
249 }
250
251 static bool GMQCC_WARN vec_ir_instr_find(std::vector<ir_instr *> &vec, ir_instr *what, size_t *idx)
252 {
253     for (auto &it : vec) {
254         if (it != what)
255             continue;
256         if (idx)
257             *idx = &it - &vec[0];
258         return true;
259     }
260     return false;
261 }
262
263 /***********************************************************************
264  * IR Builder
265  */
266
267 static void ir_block_delete_quick(ir_block* self);
268 static void ir_instr_delete_quick(ir_instr *self);
269 static void ir_function_delete_quick(ir_function *self);
270
271 ir_builder::ir_builder(const std::string& modulename)
272 : m_name(modulename),
273   m_code(new code_t)
274 {
275     m_htglobals   = util_htnew(IR_HT_SIZE);
276     m_htfields    = util_htnew(IR_HT_SIZE);
277     m_htfunctions = util_htnew(IR_HT_SIZE);
278
279     m_nil = new ir_value("nil", store_value, TYPE_NIL);
280     m_nil->m_cvq = CV_CONST;
281
282     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
283         /* we write to them, but they're not supposed to be used outside the IR, so
284          * let's not allow the generation of ir_instrs which use these.
285          * So it's a constant noexpr.
286          */
287         m_vinstr_temp[i] = new ir_value("vinstr_temp", store_value, TYPE_NOEXPR);
288         m_vinstr_temp[i]->m_cvq = CV_CONST;
289     }
290 }
291
292 ir_builder::~ir_builder()
293 {
294     util_htdel(m_htglobals);
295     util_htdel(m_htfields);
296     util_htdel(m_htfunctions);
297     for (auto& f : m_functions)
298         ir_function_delete_quick(f.release());
299     m_functions.clear(); // delete them now before deleting the rest:
300
301     delete m_nil;
302
303     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
304         delete m_vinstr_temp[i];
305     }
306
307     m_extparams.clear();
308     m_extparam_protos.clear();
309 }
310
311 ir_function* ir_builder::createFunction(const std::string& name, qc_type outtype)
312 {
313     ir_function *fn = (ir_function*)util_htget(m_htfunctions, name.c_str());
314     if (fn)
315         return nullptr;
316
317     fn = new ir_function(this, outtype);
318     fn->m_name = name;
319     m_functions.emplace_back(fn);
320     util_htset(m_htfunctions, name.c_str(), fn);
321
322     fn->m_value = createGlobal(fn->m_name, TYPE_FUNCTION);
323     if (!fn->m_value) {
324         delete fn;
325         return nullptr;
326     }
327
328     fn->m_value->m_hasvalue = true;
329     fn->m_value->m_outtype = outtype;
330     fn->m_value->m_constval.vfunc = fn;
331     fn->m_value->m_context = fn->m_context;
332
333     return fn;
334 }
335
336 ir_value* ir_builder::createGlobal(const std::string& name, qc_type vtype)
337 {
338     ir_value *ve;
339
340     if (name[0] != '#')
341     {
342         ve = (ir_value*)util_htget(m_htglobals, name.c_str());
343         if (ve) {
344             return nullptr;
345         }
346     }
347
348     ve = new ir_value(std::string(name), store_global, vtype);
349     m_globals.emplace_back(ve);
350     util_htset(m_htglobals, name.c_str(), ve);
351     return ve;
352 }
353
354 ir_value* ir_builder::get_va_count()
355 {
356     if (m_reserved_va_count)
357         return m_reserved_va_count;
358     return (m_reserved_va_count = createGlobal("reserved:va_count", TYPE_FLOAT));
359 }
360
361 ir_value* ir_builder::createField(const std::string& name, qc_type vtype)
362 {
363     ir_value *ve = (ir_value*)util_htget(m_htfields, name.c_str());
364     if (ve) {
365         return nullptr;
366     }
367
368     ve = new ir_value(std::string(name), store_global, TYPE_FIELD);
369     ve->m_fieldtype = vtype;
370     m_fields.emplace_back(ve);
371     util_htset(m_htfields, name.c_str(), ve);
372     return ve;
373 }
374
375 /***********************************************************************
376  *IR Function
377  */
378
379 static bool ir_function_naive_phi(ir_function*);
380 static void ir_function_enumerate(ir_function*);
381 static bool ir_function_calculate_liferanges(ir_function*);
382 static bool ir_function_allocate_locals(ir_function*);
383
384 ir_function::ir_function(ir_builder* owner_, qc_type outtype_)
385 : m_owner(owner_),
386   m_name("<@unnamed>"),
387   m_outtype(outtype_)
388 {
389     m_context.file = "<@no context>";
390     m_context.line = 0;
391 }
392
393 ir_function::~ir_function()
394 {
395 }
396
397 static void ir_function_delete_quick(ir_function *self)
398 {
399     for (auto& b : self->m_blocks)
400         ir_block_delete_quick(b.release());
401     delete self;
402 }
403
404 static void ir_function_collect_value(ir_function *self, ir_value *v)
405 {
406     self->m_values.emplace_back(v);
407 }
408
409 ir_block* ir_function_create_block(lex_ctx_t ctx, ir_function *self, const char *label)
410 {
411     ir_block* bn = new ir_block(self, label ? std::string(label) : std::string());
412     bn->m_context = ctx;
413     self->m_blocks.emplace_back(bn);
414
415     if ((self->m_flags & IR_FLAG_BLOCK_COVERAGE) && self->m_owner->m_coverage_func)
416         (void)ir_block_create_call(bn, ctx, nullptr, self->m_owner->m_coverage_func, false);
417
418     return bn;
419 }
420
421 static bool instr_is_operation(uint16_t op)
422 {
423     return ( (op >= INSTR_MUL_F  && op <= INSTR_GT) ||
424              (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) ||
425              (op == INSTR_ADDRESS) ||
426              (op >= INSTR_NOT_F  && op <= INSTR_NOT_FNC) ||
427              (op >= INSTR_AND    && op <= INSTR_BITOR) ||
428              (op >= INSTR_CALL0  && op <= INSTR_CALL8) ||
429              (op >= VINSTR_BITAND_V && op <= VINSTR_NEG_V) );
430 }
431
432 static bool ir_function_pass_peephole(ir_function *self)
433 {
434     for (auto& bp : self->m_blocks) {
435         ir_block *block = bp.get();
436         for (size_t i = 0; i < vec_size(block->m_instr); ++i) {
437             ir_instr *inst;
438             inst = block->m_instr[i];
439
440             if (i >= 1 &&
441                 (inst->m_opcode >= INSTR_STORE_F &&
442                  inst->m_opcode <= INSTR_STORE_FNC))
443             {
444                 ir_instr *store;
445                 ir_instr *oper;
446                 ir_value *value;
447
448                 store = inst;
449
450                 oper  = block->m_instr[i-1];
451                 if (!instr_is_operation(oper->m_opcode))
452                     continue;
453
454                 /* Don't change semantics of MUL_VF in engines where these may not alias. */
455                 if (OPTS_FLAG(LEGACY_VECTOR_MATHS)) {
456                     if (oper->m_opcode == INSTR_MUL_VF && oper->_m_ops[2]->m_memberof == oper->_m_ops[1])
457                         continue;
458                     if (oper->m_opcode == INSTR_MUL_FV && oper->_m_ops[1]->m_memberof == oper->_m_ops[2])
459                         continue;
460                 }
461
462                 value = oper->_m_ops[0];
463
464                 /* only do it for SSA values */
465                 if (value->m_store != store_value)
466                     continue;
467
468                 /* don't optimize out the temp if it's used later again */
469                 if (value->m_reads.size() != 1)
470                     continue;
471
472                 /* The very next store must use this value */
473                 if (value->m_reads[0] != store)
474                     continue;
475
476                 /* And of course the store must _read_ from it, so it's in
477                  * OP 1 */
478                 if (store->_m_ops[1] != value)
479                     continue;
480
481                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
482                 (void)!ir_instr_op(oper, 0, store->_m_ops[0], true);
483
484                 vec_remove(block->m_instr, i, 1);
485                 delete store;
486             }
487             else if (inst->m_opcode == VINSTR_COND)
488             {
489                 /* COND on a value resulting from a NOT could
490                  * remove the NOT and swap its operands
491                  */
492                 while (true) {
493                     ir_block *tmp;
494                     size_t    inotid;
495                     ir_instr *inot;
496                     ir_value *value;
497                     value = inst->_m_ops[0];
498
499                     if (value->m_store != store_value || value->m_reads.size() != 1 || value->m_reads[0] != inst)
500                         break;
501
502                     inot = value->m_writes[0];
503                     if (inot->_m_ops[0] != value ||
504                         inot->m_opcode < INSTR_NOT_F ||
505                         inot->m_opcode > INSTR_NOT_FNC ||
506                         inot->m_opcode == INSTR_NOT_V || /* can't do these */
507                         inot->m_opcode == INSTR_NOT_S)
508                     {
509                         break;
510                     }
511
512                     /* count */
513                     ++opts_optimizationcount[OPTIM_PEEPHOLE];
514                     /* change operand */
515                     (void)!ir_instr_op(inst, 0, inot->_m_ops[1], false);
516                     /* remove NOT */
517                     tmp = inot->m_owner;
518                     for (inotid = 0; inotid < vec_size(tmp->m_instr); ++inotid) {
519                         if (tmp->m_instr[inotid] == inot)
520                             break;
521                     }
522                     if (inotid >= vec_size(tmp->m_instr)) {
523                         compile_error(inst->m_context, "sanity-check failed: failed to find instruction to optimize out");
524                         return false;
525                     }
526                     vec_remove(tmp->m_instr, inotid, 1);
527                     delete inot;
528                     /* swap ontrue/onfalse */
529                     tmp = inst->m_bops[0];
530                     inst->m_bops[0] = inst->m_bops[1];
531                     inst->m_bops[1] = tmp;
532                 }
533                 continue;
534             }
535         }
536     }
537
538     return true;
539 }
540
541 static bool ir_function_pass_tailrecursion(ir_function *self)
542 {
543     size_t p;
544
545     for (auto& bp : self->m_blocks) {
546         ir_block *block = bp.get();
547
548         ir_value *funcval;
549         ir_instr *ret, *call, *store = nullptr;
550
551         if (!block->m_final || vec_size(block->m_instr) < 2)
552             continue;
553
554         ret = block->m_instr[vec_size(block->m_instr)-1];
555         if (ret->m_opcode != INSTR_DONE && ret->m_opcode != INSTR_RETURN)
556             continue;
557
558         call = block->m_instr[vec_size(block->m_instr)-2];
559         if (call->m_opcode >= INSTR_STORE_F && call->m_opcode <= INSTR_STORE_FNC) {
560             /* account for the unoptimized
561              * CALL
562              * STORE %return, %tmp
563              * RETURN %tmp
564              * version
565              */
566             if (vec_size(block->m_instr) < 3)
567                 continue;
568
569             store = call;
570             call = block->m_instr[vec_size(block->m_instr)-3];
571         }
572
573         if (call->m_opcode < INSTR_CALL0 || call->m_opcode > INSTR_CALL8)
574             continue;
575
576         if (store) {
577             /* optimize out the STORE */
578             if (ret->_m_ops[0]   &&
579                 ret->_m_ops[0]   == store->_m_ops[0] &&
580                 store->_m_ops[1] == call->_m_ops[0])
581             {
582                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
583                 call->_m_ops[0] = store->_m_ops[0];
584                 vec_remove(block->m_instr, vec_size(block->m_instr) - 2, 1);
585                 delete store;
586             }
587             else
588                 continue;
589         }
590
591         if (!call->_m_ops[0])
592             continue;
593
594         funcval = call->_m_ops[1];
595         if (!funcval)
596             continue;
597         if (funcval->m_vtype != TYPE_FUNCTION || funcval->m_constval.vfunc != self)
598             continue;
599
600         /* now we have a CALL and a RET, check if it's a tailcall */
601         if (ret->_m_ops[0] && call->_m_ops[0] != ret->_m_ops[0])
602             continue;
603
604         ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
605         vec_shrinkby(block->m_instr, 2);
606
607         block->m_final = false; /* open it back up */
608
609         /* emite parameter-stores */
610         for (p = 0; p < call->m_params.size(); ++p) {
611             /* assert(call->params_count <= self->locals_count); */
612             if (!ir_block_create_store(block, call->m_context, self->m_locals[p].get(), call->m_params[p])) {
613                 irerror(call->m_context, "failed to create tailcall store instruction for parameter %i", (int)p);
614                 return false;
615             }
616         }
617         if (!ir_block_create_jump(block, call->m_context, self->m_blocks[0].get())) {
618             irerror(call->m_context, "failed to create tailcall jump");
619             return false;
620         }
621
622         delete call;
623         delete ret;
624     }
625
626     return true;
627 }
628
629 bool ir_function_finalize(ir_function *self)
630 {
631     if (self->m_builtin)
632         return true;
633
634     for (auto& lp : self->m_locals) {
635         ir_value *v = lp.get();
636         if (v->m_reads.empty() && v->m_writes.size() && !(v->m_flags & IR_FLAG_NOREF)) {
637             // if it's a vector check to ensure all it's members are unused before
638             // claiming it's unused, otherwise skip the vector entierly
639             if (v->m_vtype == TYPE_VECTOR)
640             {
641                 size_t mask = (1 << 0) | (1 << 1) | (1 << 2), bits = 0;
642                 for (size_t i = 0; i < 3; i++)
643                     if (!v->m_members[i] || (v->m_members[i]->m_reads.empty()
644                         && v->m_members[i]->m_writes.size()))
645                         bits |= (1 << i);
646                 // all components are unused so just report the vector
647                 if (bits == mask && irwarning(v->m_context, WARN_UNUSED_VARIABLE,
648                     "unused variable: `%s`", v->m_name.c_str()))
649                     return false;
650                 else if (bits != mask)
651                     // individual components are unused so mention them
652                     for (size_t i = 0; i < 3; i++)
653                         if ((bits & (1 << i))
654                             && irwarning(v->m_context, WARN_UNUSED_COMPONENT,
655                                 "unused vector component: `%s.%c`", v->m_name.c_str(), "xyz"[i]))
656                             return false;
657             }
658             // just a standard variable
659             else if (irwarning(v->m_context, WARN_UNUSED_VARIABLE,
660                     "unused variable: `%s`", v->m_name.c_str())) return false;
661         }
662     }
663
664     if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
665         if (!ir_function_pass_peephole(self)) {
666             irerror(self->m_context, "generic optimization pass broke something in `%s`", self->m_name.c_str());
667             return false;
668         }
669     }
670
671     if (OPTS_OPTIMIZATION(OPTIM_TAIL_RECURSION)) {
672         if (!ir_function_pass_tailrecursion(self)) {
673             irerror(self->m_context, "tail-recursion optimization pass broke something in `%s`", self->m_name.c_str());
674             return false;
675         }
676     }
677
678     if (!ir_function_naive_phi(self)) {
679         irerror(self->m_context, "internal error: ir_function_naive_phi failed");
680         return false;
681     }
682
683     for (auto& lp : self->m_locals) {
684         ir_value *v = lp.get();
685         if (v->m_vtype == TYPE_VECTOR ||
686             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
687         {
688             v->vectorMember(0);
689             v->vectorMember(1);
690             v->vectorMember(2);
691         }
692     }
693     for (auto& vp : self->m_values) {
694         ir_value *v = vp.get();
695         if (v->m_vtype == TYPE_VECTOR ||
696             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
697         {
698             v->vectorMember(0);
699             v->vectorMember(1);
700             v->vectorMember(2);
701         }
702     }
703
704     ir_function_enumerate(self);
705
706     if (!ir_function_calculate_liferanges(self))
707         return false;
708     if (!ir_function_allocate_locals(self))
709         return false;
710     return true;
711 }
712
713 ir_value* ir_function_create_local(ir_function *self, const std::string& name, qc_type vtype, bool param)
714 {
715     ir_value *ve;
716
717     if (param &&
718         !self->m_locals.empty() &&
719         self->m_locals.back()->m_store != store_param)
720     {
721         irerror(self->m_context, "cannot add parameters after adding locals");
722         return nullptr;
723     }
724
725     ve = new ir_value(std::string(name), (param ? store_param : store_local), vtype);
726     if (param)
727         ve->m_locked = true;
728     self->m_locals.emplace_back(ve);
729     return ve;
730 }
731
732 /***********************************************************************
733  *IR Block
734  */
735
736 ir_block::ir_block(ir_function* owner, const std::string& name)
737 : m_owner(owner),
738   m_label(name)
739 {
740     m_context.file = "<@no context>";
741     m_context.line = 0;
742 }
743
744 ir_block::~ir_block()
745 {
746     for (size_t i = 0; i != vec_size(m_instr); ++i)
747         delete m_instr[i];
748     vec_free(m_instr);
749     vec_free(m_entries);
750     vec_free(m_exits);
751 }
752
753 static void ir_block_delete_quick(ir_block* self)
754 {
755     size_t i;
756     for (i = 0; i != vec_size(self->m_instr); ++i)
757         ir_instr_delete_quick(self->m_instr[i]);
758     vec_free(self->m_instr);
759     delete self;
760 }
761
762 /***********************************************************************
763  *IR Instructions
764  */
765
766 ir_instr::ir_instr(lex_ctx_t ctx, ir_block* owner_, int op)
767 : m_opcode(op),
768   m_context(ctx),
769   m_owner(owner_)
770 {
771 }
772
773 ir_instr::~ir_instr()
774 {
775     // The following calls can only delete from
776     // vectors, we still want to delete this instruction
777     // so ignore the return value. Since with the warn_unused_result attribute
778     // gcc doesn't care about an explicit: (void)foo(); to ignore the result,
779     // I have to improvise here and use if(foo());
780     for (auto &it : m_phi) {
781         size_t idx;
782         if (vec_ir_instr_find(it.value->m_writes, this, &idx))
783             it.value->m_writes.erase(it.value->m_writes.begin() + idx);
784         if (vec_ir_instr_find(it.value->m_reads, this, &idx))
785             it.value->m_reads.erase(it.value->m_reads.begin() + idx);
786     }
787     for (auto &it : m_params) {
788         size_t idx;
789         if (vec_ir_instr_find(it->m_writes, this, &idx))
790             it->m_writes.erase(it->m_writes.begin() + idx);
791         if (vec_ir_instr_find(it->m_reads, this, &idx))
792             it->m_reads.erase(it->m_reads.begin() + idx);
793     }
794     (void)!ir_instr_op(this, 0, nullptr, false);
795     (void)!ir_instr_op(this, 1, nullptr, false);
796     (void)!ir_instr_op(this, 2, nullptr, false);
797 }
798
799 static void ir_instr_delete_quick(ir_instr *self)
800 {
801     self->m_phi.clear();
802     self->m_params.clear();
803     self->_m_ops[0] = nullptr;
804     self->_m_ops[1] = nullptr;
805     self->_m_ops[2] = nullptr;
806     delete self;
807 }
808
809 static bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing)
810 {
811     if (v && v->m_vtype == TYPE_NOEXPR) {
812         irerror(self->m_context, "tried to use a NOEXPR value");
813         return false;
814     }
815
816     if (self->_m_ops[op]) {
817         size_t idx;
818         if (writing && vec_ir_instr_find(self->_m_ops[op]->m_writes, self, &idx))
819             self->_m_ops[op]->m_writes.erase(self->_m_ops[op]->m_writes.begin() + idx);
820         else if (vec_ir_instr_find(self->_m_ops[op]->m_reads, self, &idx))
821             self->_m_ops[op]->m_reads.erase(self->_m_ops[op]->m_reads.begin() + idx);
822     }
823     if (v) {
824         if (writing)
825             v->m_writes.push_back(self);
826         else
827             v->m_reads.push_back(self);
828     }
829     self->_m_ops[op] = v;
830     return true;
831 }
832
833 /***********************************************************************
834  *IR Value
835  */
836
837 void ir_value::setCodeAddress(int32_t gaddr)
838 {
839     m_code.globaladdr = gaddr;
840     if (m_members[0]) m_members[0]->m_code.globaladdr = gaddr;
841     if (m_members[1]) m_members[1]->m_code.globaladdr = gaddr;
842     if (m_members[2]) m_members[2]->m_code.globaladdr = gaddr;
843 }
844
845 int32_t ir_value::codeAddress() const
846 {
847     if (m_store == store_return)
848         return OFS_RETURN + m_code.addroffset;
849     return m_code.globaladdr + m_code.addroffset;
850 }
851
852 ir_value::ir_value(std::string&& name_, store_type store_, qc_type vtype_)
853     : m_name(move(name_))
854     , m_vtype(vtype_)
855     , m_store(store_)
856 {
857     m_fieldtype = TYPE_VOID;
858     m_outtype = TYPE_VOID;
859     m_flags = 0;
860
861     m_cvq          = CV_NONE;
862     m_hasvalue     = false;
863     m_context.file = "<@no context>";
864     m_context.line = 0;
865
866     memset(&m_constval, 0, sizeof(m_constval));
867     memset(&m_code,     0, sizeof(m_code));
868
869     m_members[0] = nullptr;
870     m_members[1] = nullptr;
871     m_members[2] = nullptr;
872     m_memberof = nullptr;
873
874     m_unique_life = false;
875     m_locked = false;
876     m_callparam  = false;
877 }
878
879 ir_value::ir_value(ir_function *owner, std::string&& name, store_type storetype, qc_type vtype)
880     : ir_value(move(name), storetype, vtype)
881 {
882     ir_function_collect_value(owner, this);
883 }
884
885 ir_value::~ir_value()
886 {
887     size_t i;
888     if (m_hasvalue) {
889         if (m_vtype == TYPE_STRING)
890             mem_d((void*)m_constval.vstring);
891     }
892     if (!(m_flags & IR_FLAG_SPLIT_VECTOR)) {
893         for (i = 0; i < 3; ++i) {
894             if (m_members[i])
895                 delete m_members[i];
896         }
897     }
898 }
899
900
901 /*  helper function */
902 ir_value* ir_builder::literalFloat(float value, bool add_to_list) {
903     ir_value *v = new ir_value("#IMMEDIATE", store_global, TYPE_FLOAT);
904     v->m_flags |= IR_FLAG_ERASABLE;
905     v->m_hasvalue = true;
906     v->m_cvq = CV_CONST;
907     v->m_constval.vfloat = value;
908
909     m_globals.emplace_back(v);
910     if (add_to_list)
911         m_const_floats.emplace_back(v);
912     return v;
913 }
914
915 ir_value* ir_value::vectorMember(unsigned int member)
916 {
917     std::string name;
918     ir_value *m;
919     if (member >= 3)
920         return nullptr;
921
922     if (m_members[member])
923         return m_members[member];
924
925     if (!m_name.empty()) {
926         char member_name[3] = { '_', char('x' + member), 0 };
927         name = m_name + member_name;
928     }
929
930     if (m_vtype == TYPE_VECTOR)
931     {
932         m = new ir_value(move(name), m_store, TYPE_FLOAT);
933         if (!m)
934             return nullptr;
935         m->m_context = m_context;
936
937         m_members[member] = m;
938         m->m_code.addroffset = member;
939     }
940     else if (m_vtype == TYPE_FIELD)
941     {
942         if (m_fieldtype != TYPE_VECTOR)
943             return nullptr;
944         m = new ir_value(move(name), m_store, TYPE_FIELD);
945         if (!m)
946             return nullptr;
947         m->m_fieldtype = TYPE_FLOAT;
948         m->m_context = m_context;
949
950         m_members[member] = m;
951         m->m_code.addroffset = member;
952     }
953     else
954     {
955         irerror(m_context, "invalid member access on %s", m_name.c_str());
956         return nullptr;
957     }
958
959     m->m_memberof = this;
960     return m;
961 }
962
963 size_t ir_value::size() const {
964     if (m_vtype == TYPE_FIELD && m_fieldtype == TYPE_VECTOR)
965         return type_sizeof_[TYPE_VECTOR];
966     return type_sizeof_[m_vtype];
967 }
968
969 bool ir_value::setFloat(float f)
970 {
971     if (m_vtype != TYPE_FLOAT)
972         return false;
973     m_constval.vfloat = f;
974     m_hasvalue = true;
975     return true;
976 }
977
978 bool ir_value::setFunc(int f)
979 {
980     if (m_vtype != TYPE_FUNCTION)
981         return false;
982     m_constval.vint = f;
983     m_hasvalue = true;
984     return true;
985 }
986
987 bool ir_value::setVector(vec3_t v)
988 {
989     if (m_vtype != TYPE_VECTOR)
990         return false;
991     m_constval.vvec = v;
992     m_hasvalue = true;
993     return true;
994 }
995
996 bool ir_value::setField(ir_value *fld)
997 {
998     if (m_vtype != TYPE_FIELD)
999         return false;
1000     m_constval.vpointer = fld;
1001     m_hasvalue = true;
1002     return true;
1003 }
1004
1005 bool ir_value::setString(const char *str)
1006 {
1007     if (m_vtype != TYPE_STRING)
1008         return false;
1009     m_constval.vstring = util_strdupe(str);
1010     m_hasvalue = true;
1011     return true;
1012 }
1013
1014 #if 0
1015 bool ir_value::setInt(int i)
1016 {
1017     if (m_vtype != TYPE_INTEGER)
1018         return false;
1019     m_constval.vint = i;
1020     m_hasvalue = true;
1021     return true;
1022 }
1023 #endif
1024
1025 bool ir_value::lives(size_t at)
1026 {
1027     for (auto& l : m_life) {
1028         if (l.start <= at && at <= l.end)
1029             return true;
1030         if (l.start > at) /* since it's ordered */
1031             return false;
1032     }
1033     return false;
1034 }
1035
1036 bool ir_value::insertLife(size_t idx, ir_life_entry_t e)
1037 {
1038     m_life.insert(m_life.begin() + idx, e);
1039     return true;
1040 }
1041
1042 bool ir_value::setAlive(size_t s)
1043 {
1044     size_t i;
1045     const size_t vs = m_life.size();
1046     ir_life_entry_t *life_found = nullptr;
1047     ir_life_entry_t *before = nullptr;
1048     ir_life_entry_t new_entry;
1049
1050     /* Find the first range >= s */
1051     for (i = 0; i < vs; ++i)
1052     {
1053         before = life_found;
1054         life_found = &m_life[i];
1055         if (life_found->start > s)
1056             break;
1057     }
1058     /* nothing found? append */
1059     if (i == vs) {
1060         ir_life_entry_t e;
1061         if (life_found && life_found->end+1 == s)
1062         {
1063             /* previous life range can be merged in */
1064             life_found->end++;
1065             return true;
1066         }
1067         if (life_found && life_found->end >= s)
1068             return false;
1069         e.start = e.end = s;
1070         m_life.emplace_back(e);
1071         return true;
1072     }
1073     /* found */
1074     if (before)
1075     {
1076         if (before->end + 1 == s &&
1077             life_found->start - 1 == s)
1078         {
1079             /* merge */
1080             before->end = life_found->end;
1081             m_life.erase(m_life.begin()+i);
1082             return true;
1083         }
1084         if (before->end + 1 == s)
1085         {
1086             /* extend before */
1087             before->end++;
1088             return true;
1089         }
1090         /* already contained */
1091         if (before->end >= s)
1092             return false;
1093     }
1094     /* extend */
1095     if (life_found->start - 1 == s)
1096     {
1097         life_found->start--;
1098         return true;
1099     }
1100     /* insert a new entry */
1101     new_entry.start = new_entry.end = s;
1102     return insertLife(i, new_entry);
1103 }
1104
1105 bool ir_value::mergeLife(const ir_value *other)
1106 {
1107     size_t i, myi;
1108
1109     if (other->m_life.empty())
1110         return true;
1111
1112     if (m_life.empty()) {
1113         m_life = other->m_life;
1114         return true;
1115     }
1116
1117     myi = 0;
1118     for (i = 0; i < other->m_life.size(); ++i)
1119     {
1120         const ir_life_entry_t &otherlife = other->m_life[i];
1121         while (true)
1122         {
1123             ir_life_entry_t *entry = &m_life[myi];
1124
1125             if (otherlife.end+1 < entry->start)
1126             {
1127                 /* adding an interval before entry */
1128                 if (!insertLife(myi, otherlife))
1129                     return false;
1130                 ++myi;
1131                 break;
1132             }
1133
1134             if (otherlife.start <  entry->start &&
1135                 otherlife.end+1 >= entry->start)
1136             {
1137                 /* starts earlier and overlaps */
1138                 entry->start = otherlife.start;
1139             }
1140
1141             if (otherlife.end   >  entry->end &&
1142                 otherlife.start <= entry->end+1)
1143             {
1144                 /* ends later and overlaps */
1145                 entry->end = otherlife.end;
1146             }
1147
1148             /* see if our change combines it with the next ranges */
1149             while (myi+1 < m_life.size() &&
1150                    entry->end+1 >= m_life[1+myi].start)
1151             {
1152                 /* overlaps with (myi+1) */
1153                 if (entry->end < m_life[1+myi].end)
1154                     entry->end = m_life[1+myi].end;
1155                 m_life.erase(m_life.begin() + (myi + 1));
1156                 entry = &m_life[myi];
1157             }
1158
1159             /* see if we're after the entry */
1160             if (otherlife.start > entry->end)
1161             {
1162                 ++myi;
1163                 /* append if we're at the end */
1164                 if (myi >= m_life.size()) {
1165                     m_life.emplace_back(otherlife);
1166                     break;
1167                 }
1168                 /* otherweise check the next range */
1169                 continue;
1170             }
1171             break;
1172         }
1173     }
1174     return true;
1175 }
1176
1177 static bool ir_values_overlap(const ir_value *a, const ir_value *b)
1178 {
1179     /* For any life entry in A see if it overlaps with
1180      * any life entry in B.
1181      * Note that the life entries are orderes, so we can make a
1182      * more efficient algorithm there than naively translating the
1183      * statement above.
1184      */
1185
1186     const ir_life_entry_t *la, *lb, *enda, *endb;
1187
1188     /* first of all, if either has no life range, they cannot clash */
1189     if (a->m_life.empty() || b->m_life.empty())
1190         return false;
1191
1192     la = &a->m_life.front();
1193     lb = &b->m_life.front();
1194     enda = &a->m_life.back() + 1;
1195     endb = &b->m_life.back() + 1;
1196     while (true)
1197     {
1198         /* check if the entries overlap, for that,
1199          * both must start before the other one ends.
1200          */
1201         if (la->start < lb->end &&
1202             lb->start < la->end)
1203         {
1204             return true;
1205         }
1206
1207         /* entries are ordered
1208          * one entry is earlier than the other
1209          * that earlier entry will be moved forward
1210          */
1211         if (la->start < lb->start)
1212         {
1213             /* order: A B, move A forward
1214              * check if we hit the end with A
1215              */
1216             if (++la == enda)
1217                 break;
1218         }
1219         else /* if (lb->start < la->start)  actually <= */
1220         {
1221             /* order: B A, move B forward
1222              * check if we hit the end with B
1223              */
1224             if (++lb == endb)
1225                 break;
1226         }
1227     }
1228     return false;
1229 }
1230
1231 /***********************************************************************
1232  *IR main operations
1233  */
1234
1235 static bool ir_check_unreachable(ir_block *self)
1236 {
1237     /* The IR should never have to deal with unreachable code */
1238     if (!self->m_final/* || OPTS_FLAG(ALLOW_UNREACHABLE_CODE)*/)
1239         return true;
1240     irerror(self->m_context, "unreachable statement (%s)", self->m_label.c_str());
1241     return false;
1242 }
1243
1244 bool ir_block_create_store_op(ir_block *self, lex_ctx_t ctx, int op, ir_value *target, ir_value *what)
1245 {
1246     ir_instr *in;
1247     if (!ir_check_unreachable(self))
1248         return false;
1249
1250     if (target->m_store == store_value &&
1251         (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC))
1252     {
1253         irerror(self->m_context, "cannot store to an SSA value");
1254         irerror(self->m_context, "trying to store: %s <- %s", target->m_name.c_str(), what->m_name.c_str());
1255         irerror(self->m_context, "instruction: %s", util_instr_str[op]);
1256         return false;
1257     }
1258
1259     in = new ir_instr(ctx, self, op);
1260     if (!in)
1261         return false;
1262
1263     if (!ir_instr_op(in, 0, target, (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC)) ||
1264         !ir_instr_op(in, 1, what, false))
1265     {
1266         delete in;
1267         return false;
1268     }
1269     vec_push(self->m_instr, in);
1270     return true;
1271 }
1272
1273 bool ir_block_create_state_op(ir_block *self, lex_ctx_t ctx, ir_value *frame, ir_value *think)
1274 {
1275     ir_instr *in;
1276     if (!ir_check_unreachable(self))
1277         return false;
1278
1279     in = new ir_instr(ctx, self, INSTR_STATE);
1280     if (!in)
1281         return false;
1282
1283     if (!ir_instr_op(in, 0, frame, false) ||
1284         !ir_instr_op(in, 1, think, false))
1285     {
1286         delete in;
1287         return false;
1288     }
1289     vec_push(self->m_instr, in);
1290     return true;
1291 }
1292
1293 static bool ir_block_create_store(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1294 {
1295     int op = 0;
1296     qc_type vtype;
1297     if (target->m_vtype == TYPE_VARIANT)
1298         vtype = what->m_vtype;
1299     else
1300         vtype = target->m_vtype;
1301
1302 #if 0
1303     if      (vtype == TYPE_FLOAT   && what->m_vtype == TYPE_INTEGER)
1304         op = INSTR_CONV_ITOF;
1305     else if (vtype == TYPE_INTEGER && what->m_vtype == TYPE_FLOAT)
1306         op = INSTR_CONV_FTOI;
1307 #endif
1308         op = type_store_instr[vtype];
1309
1310     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1311         if (op == INSTR_STORE_FLD && what->m_fieldtype == TYPE_VECTOR)
1312             op = INSTR_STORE_V;
1313     }
1314
1315     return ir_block_create_store_op(self, ctx, op, target, what);
1316 }
1317
1318 bool ir_block_create_storep(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1319 {
1320     int op = 0;
1321     qc_type vtype;
1322
1323     if (target->m_vtype != TYPE_POINTER)
1324         return false;
1325
1326     /* storing using pointer - target is a pointer, type must be
1327      * inferred from source
1328      */
1329     vtype = what->m_vtype;
1330
1331     op = type_storep_instr[vtype];
1332     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1333         if (op == INSTR_STOREP_FLD && what->m_fieldtype == TYPE_VECTOR)
1334             op = INSTR_STOREP_V;
1335     }
1336
1337     return ir_block_create_store_op(self, ctx, op, target, what);
1338 }
1339
1340 bool ir_block_create_return(ir_block *self, lex_ctx_t ctx, ir_value *v)
1341 {
1342     ir_instr *in;
1343     if (!ir_check_unreachable(self))
1344         return false;
1345
1346     self->m_final = true;
1347
1348     self->m_is_return = true;
1349     in = new ir_instr(ctx, self, INSTR_RETURN);
1350     if (!in)
1351         return false;
1352
1353     if (v && !ir_instr_op(in, 0, v, false)) {
1354         delete in;
1355         return false;
1356     }
1357
1358     vec_push(self->m_instr, in);
1359     return true;
1360 }
1361
1362 bool ir_block_create_if(ir_block *self, lex_ctx_t ctx, ir_value *v,
1363                         ir_block *ontrue, ir_block *onfalse)
1364 {
1365     ir_instr *in;
1366     if (!ir_check_unreachable(self))
1367         return false;
1368     self->m_final = true;
1369     /*in = new ir_instr(ctx, self, (v->m_vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/
1370     in = new ir_instr(ctx, self, VINSTR_COND);
1371     if (!in)
1372         return false;
1373
1374     if (!ir_instr_op(in, 0, v, false)) {
1375         delete in;
1376         return false;
1377     }
1378
1379     in->m_bops[0] = ontrue;
1380     in->m_bops[1] = onfalse;
1381
1382     vec_push(self->m_instr, in);
1383
1384     vec_push(self->m_exits, ontrue);
1385     vec_push(self->m_exits, onfalse);
1386     vec_push(ontrue->m_entries,  self);
1387     vec_push(onfalse->m_entries, self);
1388     return true;
1389 }
1390
1391 bool ir_block_create_jump(ir_block *self, lex_ctx_t ctx, ir_block *to)
1392 {
1393     ir_instr *in;
1394     if (!ir_check_unreachable(self))
1395         return false;
1396     self->m_final = true;
1397     in = new ir_instr(ctx, self, VINSTR_JUMP);
1398     if (!in)
1399         return false;
1400
1401     in->m_bops[0] = to;
1402     vec_push(self->m_instr, in);
1403
1404     vec_push(self->m_exits, to);
1405     vec_push(to->m_entries, self);
1406     return true;
1407 }
1408
1409 bool ir_block_create_goto(ir_block *self, lex_ctx_t ctx, ir_block *to)
1410 {
1411     self->m_owner->m_flags |= IR_FLAG_HAS_GOTO;
1412     return ir_block_create_jump(self, ctx, to);
1413 }
1414
1415 ir_instr* ir_block_create_phi(ir_block *self, lex_ctx_t ctx, const char *label, qc_type ot)
1416 {
1417     ir_value *out;
1418     ir_instr *in;
1419     if (!ir_check_unreachable(self))
1420         return nullptr;
1421     in = new ir_instr(ctx, self, VINSTR_PHI);
1422     if (!in)
1423         return nullptr;
1424     out = new ir_value(self->m_owner, label ? label : "", store_value, ot);
1425     if (!out) {
1426         delete in;
1427         return nullptr;
1428     }
1429     if (!ir_instr_op(in, 0, out, true)) {
1430         delete in;
1431         return nullptr;
1432     }
1433     vec_push(self->m_instr, in);
1434     return in;
1435 }
1436
1437 ir_value* ir_phi_value(ir_instr *self)
1438 {
1439     return self->_m_ops[0];
1440 }
1441
1442 void ir_phi_add(ir_instr* self, ir_block *b, ir_value *v)
1443 {
1444     ir_phi_entry_t pe;
1445
1446     if (!vec_ir_block_find(self->m_owner->m_entries, b, nullptr)) {
1447         // Must not be possible to cause this, otherwise the AST
1448         // is doing something wrong.
1449         irerror(self->m_context, "Invalid entry block for PHI");
1450         exit(EXIT_FAILURE);
1451     }
1452
1453     pe.value = v;
1454     pe.from = b;
1455     v->m_reads.push_back(self);
1456     self->m_phi.push_back(pe);
1457 }
1458
1459 /* call related code */
1460 ir_instr* ir_block_create_call(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *func, bool noreturn)
1461 {
1462     ir_value *out;
1463     ir_instr *in;
1464     if (!ir_check_unreachable(self))
1465         return nullptr;
1466     in = new ir_instr(ctx, self, (noreturn ? VINSTR_NRCALL : INSTR_CALL0));
1467     if (!in)
1468         return nullptr;
1469     if (noreturn) {
1470         self->m_final = true;
1471         self->m_is_return = true;
1472     }
1473     out = new ir_value(self->m_owner, label ? label : "", (func->m_outtype == TYPE_VOID) ? store_return : store_value, func->m_outtype);
1474     if (!out) {
1475         delete in;
1476         return nullptr;
1477     }
1478     if (!ir_instr_op(in, 0, out, true) ||
1479         !ir_instr_op(in, 1, func, false))
1480     {
1481         delete in;
1482         return nullptr;
1483     }
1484     vec_push(self->m_instr, in);
1485     /*
1486     if (noreturn) {
1487         if (!ir_block_create_return(self, ctx, nullptr)) {
1488             compile_error(ctx, "internal error: failed to generate dummy-return instruction");
1489             delete in;
1490             return nullptr;
1491         }
1492     }
1493     */
1494     return in;
1495 }
1496
1497 ir_value* ir_call_value(ir_instr *self)
1498 {
1499     return self->_m_ops[0];
1500 }
1501
1502 void ir_call_param(ir_instr* self, ir_value *v)
1503 {
1504     self->m_params.push_back(v);
1505     v->m_reads.push_back(self);
1506 }
1507
1508 /* binary op related code */
1509
1510 ir_value* ir_block_create_binop(ir_block *self, lex_ctx_t ctx,
1511                                 const char *label, int opcode,
1512                                 ir_value *left, ir_value *right)
1513 {
1514     qc_type ot = TYPE_VOID;
1515     switch (opcode) {
1516         case INSTR_ADD_F:
1517         case INSTR_SUB_F:
1518         case INSTR_DIV_F:
1519         case INSTR_MUL_F:
1520         case INSTR_MUL_V:
1521         case INSTR_AND:
1522         case INSTR_OR:
1523 #if 0
1524         case INSTR_AND_I:
1525         case INSTR_AND_IF:
1526         case INSTR_AND_FI:
1527         case INSTR_OR_I:
1528         case INSTR_OR_IF:
1529         case INSTR_OR_FI:
1530 #endif
1531         case INSTR_BITAND:
1532         case INSTR_BITOR:
1533         case VINSTR_BITXOR:
1534 #if 0
1535         case INSTR_SUB_S: /* -- offset of string as float */
1536         case INSTR_MUL_IF:
1537         case INSTR_MUL_FI:
1538         case INSTR_DIV_IF:
1539         case INSTR_DIV_FI:
1540         case INSTR_BITOR_IF:
1541         case INSTR_BITOR_FI:
1542         case INSTR_BITAND_FI:
1543         case INSTR_BITAND_IF:
1544         case INSTR_EQ_I:
1545         case INSTR_NE_I:
1546 #endif
1547             ot = TYPE_FLOAT;
1548             break;
1549 #if 0
1550         case INSTR_ADD_I:
1551         case INSTR_ADD_IF:
1552         case INSTR_ADD_FI:
1553         case INSTR_SUB_I:
1554         case INSTR_SUB_FI:
1555         case INSTR_SUB_IF:
1556         case INSTR_MUL_I:
1557         case INSTR_DIV_I:
1558         case INSTR_BITAND_I:
1559         case INSTR_BITOR_I:
1560         case INSTR_XOR_I:
1561         case INSTR_RSHIFT_I:
1562         case INSTR_LSHIFT_I:
1563             ot = TYPE_INTEGER;
1564             break;
1565 #endif
1566         case INSTR_ADD_V:
1567         case INSTR_SUB_V:
1568         case INSTR_MUL_VF:
1569         case INSTR_MUL_FV:
1570         case VINSTR_BITAND_V:
1571         case VINSTR_BITOR_V:
1572         case VINSTR_BITXOR_V:
1573         case VINSTR_BITAND_VF:
1574         case VINSTR_BITOR_VF:
1575         case VINSTR_BITXOR_VF:
1576         case VINSTR_CROSS:
1577 #if 0
1578         case INSTR_DIV_VF:
1579         case INSTR_MUL_IV:
1580         case INSTR_MUL_VI:
1581 #endif
1582             ot = TYPE_VECTOR;
1583             break;
1584 #if 0
1585         case INSTR_ADD_SF:
1586             ot = TYPE_POINTER;
1587             break;
1588 #endif
1589     /*
1590      * after the following default case, the value of opcode can never
1591      * be 1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65
1592      */
1593         default:
1594             /* ranges: */
1595             /* boolean operations result in floats */
1596
1597             /*
1598              * opcode >= 10 takes true branch opcode is at least 10
1599              * opcode <= 23 takes false branch opcode is at least 24
1600              */
1601             if (opcode >= INSTR_EQ_F && opcode <= INSTR_GT)
1602                 ot = TYPE_FLOAT;
1603
1604             /*
1605              * At condition "opcode <= 23", the value of "opcode" must be
1606              * at least 24.
1607              * At condition "opcode <= 23", the value of "opcode" cannot be
1608              * equal to any of {1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65}.
1609              * The condition "opcode <= 23" cannot be true.
1610              *
1611              * Thus ot=2 (TYPE_FLOAT) can never be true
1612              */
1613 #if 0
1614             else if (opcode >= INSTR_LE && opcode <= INSTR_GT)
1615                 ot = TYPE_FLOAT;
1616             else if (opcode >= INSTR_LE_I && opcode <= INSTR_EQ_FI)
1617                 ot = TYPE_FLOAT;
1618 #endif
1619             break;
1620     };
1621     if (ot == TYPE_VOID) {
1622         /* The AST or parser were supposed to check this! */
1623         return nullptr;
1624     }
1625
1626     return ir_block_create_general_instr(self, ctx, label, opcode, left, right, ot);
1627 }
1628
1629 ir_value* ir_block_create_unary(ir_block *self, lex_ctx_t ctx,
1630                                 const char *label, int opcode,
1631                                 ir_value *operand)
1632 {
1633     qc_type ot = TYPE_FLOAT;
1634     switch (opcode) {
1635         case INSTR_NOT_F:
1636         case INSTR_NOT_V:
1637         case INSTR_NOT_S:
1638         case INSTR_NOT_ENT:
1639         case INSTR_NOT_FNC: /*
1640         case INSTR_NOT_I:   */
1641             ot = TYPE_FLOAT;
1642             break;
1643
1644         /*
1645          * Negation for virtual instructions is emulated with 0-value. Thankfully
1646          * the operand for 0 already exists so we just source it from here.
1647          */
1648         case VINSTR_NEG_F:
1649             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_F, nullptr, operand, ot);
1650         case VINSTR_NEG_V:
1651             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_V, nullptr, operand, TYPE_VECTOR);
1652
1653         default:
1654             ot = operand->m_vtype;
1655             break;
1656     };
1657     if (ot == TYPE_VOID) {
1658         /* The AST or parser were supposed to check this! */
1659         return nullptr;
1660     }
1661
1662     /* let's use the general instruction creator and pass nullptr for OPB */
1663     return ir_block_create_general_instr(self, ctx, label, opcode, operand, nullptr, ot);
1664 }
1665
1666 static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t ctx, const char *label,
1667                                         int op, ir_value *a, ir_value *b, qc_type outype)
1668 {
1669     ir_instr *instr;
1670     ir_value *out;
1671
1672     out = new ir_value(self->m_owner, label ? label : "", store_value, outype);
1673     if (!out)
1674         return nullptr;
1675
1676     instr = new ir_instr(ctx, self, op);
1677     if (!instr) {
1678         return nullptr;
1679     }
1680
1681     if (!ir_instr_op(instr, 0, out, true) ||
1682         !ir_instr_op(instr, 1, a, false) ||
1683         !ir_instr_op(instr, 2, b, false) )
1684     {
1685         goto on_error;
1686     }
1687
1688     vec_push(self->m_instr, instr);
1689
1690     return out;
1691 on_error:
1692     delete instr;
1693     return nullptr;
1694 }
1695
1696 ir_value* ir_block_create_fieldaddress(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field)
1697 {
1698     ir_value *v;
1699
1700     /* Support for various pointer types todo if so desired */
1701     if (ent->m_vtype != TYPE_ENTITY)
1702         return nullptr;
1703
1704     if (field->m_vtype != TYPE_FIELD)
1705         return nullptr;
1706
1707     v = ir_block_create_general_instr(self, ctx, label, INSTR_ADDRESS, ent, field, TYPE_POINTER);
1708     v->m_fieldtype = field->m_fieldtype;
1709     return v;
1710 }
1711
1712 ir_value* ir_block_create_load_from_ent(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field, qc_type outype)
1713 {
1714     int op;
1715     if (ent->m_vtype != TYPE_ENTITY)
1716         return nullptr;
1717
1718     /* at some point we could redirect for TYPE_POINTER... but that could lead to carelessness */
1719     if (field->m_vtype != TYPE_FIELD)
1720         return nullptr;
1721
1722     switch (outype)
1723     {
1724         case TYPE_FLOAT:    op = INSTR_LOAD_F;   break;
1725         case TYPE_VECTOR:   op = INSTR_LOAD_V;   break;
1726         case TYPE_STRING:   op = INSTR_LOAD_S;   break;
1727         case TYPE_FIELD:    op = INSTR_LOAD_FLD; break;
1728         case TYPE_ENTITY:   op = INSTR_LOAD_ENT; break;
1729         case TYPE_FUNCTION: op = INSTR_LOAD_FNC; break;
1730 #if 0
1731         case TYPE_POINTER: op = INSTR_LOAD_I;   break;
1732         case TYPE_INTEGER: op = INSTR_LOAD_I;   break;
1733 #endif
1734         default:
1735             irerror(self->m_context, "invalid type for ir_block_create_load_from_ent: %s", type_name[outype]);
1736             return nullptr;
1737     }
1738
1739     return ir_block_create_general_instr(self, ctx, label, op, ent, field, outype);
1740 }
1741
1742 /* PHI resolving breaks the SSA, and must thus be the last
1743  * step before life-range calculation.
1744  */
1745
1746 static bool ir_block_naive_phi(ir_block *self);
1747 bool ir_function_naive_phi(ir_function *self)
1748 {
1749     for (auto& b : self->m_blocks)
1750         if (!ir_block_naive_phi(b.get()))
1751             return false;
1752     return true;
1753 }
1754
1755 static bool ir_block_naive_phi(ir_block *self)
1756 {
1757     size_t i;
1758     /* FIXME: optionally, create_phi can add the phis
1759      * to a list so we don't need to loop through blocks
1760      * - anyway: "don't optimize YET"
1761      */
1762     for (i = 0; i < vec_size(self->m_instr); ++i)
1763     {
1764         ir_instr *instr = self->m_instr[i];
1765         if (instr->m_opcode != VINSTR_PHI)
1766             continue;
1767
1768         vec_remove(self->m_instr, i, 1);
1769         --i; /* NOTE: i+1 below */
1770
1771         for (auto &it : instr->m_phi) {
1772             ir_value *v = it.value;
1773             ir_block *b = it.from;
1774             if (v->m_store == store_value && v->m_reads.size() == 1 && v->m_writes.size() == 1) {
1775                 /* replace the value */
1776                 if (!ir_instr_op(v->m_writes[0], 0, instr->_m_ops[0], true))
1777                     return false;
1778             } else {
1779                 /* force a move instruction */
1780                 ir_instr *prevjump = vec_last(b->m_instr);
1781                 vec_pop(b->m_instr);
1782                 b->m_final = false;
1783                 instr->_m_ops[0]->m_store = store_global;
1784                 if (!ir_block_create_store(b, instr->m_context, instr->_m_ops[0], v))
1785                     return false;
1786                 instr->_m_ops[0]->m_store = store_value;
1787                 vec_push(b->m_instr, prevjump);
1788                 b->m_final = true;
1789             }
1790         }
1791         delete instr;
1792     }
1793     return true;
1794 }
1795
1796 /***********************************************************************
1797  *IR Temp allocation code
1798  * Propagating value life ranges by walking through the function backwards
1799  * until no more changes are made.
1800  * In theory this should happen once more than once for every nested loop
1801  * level.
1802  * Though this implementation might run an additional time for if nests.
1803  */
1804
1805 /* Enumerate instructions used by value's life-ranges
1806  */
1807 static void ir_block_enumerate(ir_block *self, size_t *_eid)
1808 {
1809     size_t i;
1810     size_t eid = *_eid;
1811     for (i = 0; i < vec_size(self->m_instr); ++i)
1812     {
1813         self->m_instr[i]->m_eid = eid++;
1814     }
1815     *_eid = eid;
1816 }
1817
1818 /* Enumerate blocks and instructions.
1819  * The block-enumeration is unordered!
1820  * We do not really use the block enumreation, however
1821  * the instruction enumeration is important for life-ranges.
1822  */
1823 void ir_function_enumerate(ir_function *self)
1824 {
1825     size_t instruction_id = 0;
1826     size_t block_eid = 0;
1827     for (auto& block : self->m_blocks)
1828     {
1829         /* each block now gets an additional "entry" instruction id
1830          * we can use to avoid point-life issues
1831          */
1832         block->m_entry_id = instruction_id;
1833         block->m_eid      = block_eid;
1834         ++instruction_id;
1835         ++block_eid;
1836
1837         ir_block_enumerate(block.get(), &instruction_id);
1838     }
1839 }
1840
1841 /* Local-value allocator
1842  * After finishing creating the liferange of all values used in a function
1843  * we can allocate their global-positions.
1844  * This is the counterpart to register-allocation in register machines.
1845  */
1846 struct function_allocator {
1847     ir_value **locals;
1848     size_t *sizes;
1849     size_t *positions;
1850     bool *unique;
1851 };
1852
1853 static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
1854 {
1855     ir_value *slot;
1856     size_t vsize = var->size();
1857
1858     var->m_code.local = vec_size(alloc->locals);
1859
1860     slot = new ir_value("reg", store_global, var->m_vtype);
1861     if (!slot)
1862         return false;
1863
1864     if (!slot->mergeLife(var))
1865         goto localerror;
1866
1867     vec_push(alloc->locals, slot);
1868     vec_push(alloc->sizes, vsize);
1869     vec_push(alloc->unique, var->m_unique_life);
1870
1871     return true;
1872
1873 localerror:
1874     delete slot;
1875     return false;
1876 }
1877
1878 static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
1879 {
1880     size_t a;
1881     ir_value *slot;
1882
1883     if (v->m_unique_life)
1884         return function_allocator_alloc(alloc, v);
1885
1886     for (a = 0; a < vec_size(alloc->locals); ++a)
1887     {
1888         /* if it's reserved for a unique liferange: skip */
1889         if (alloc->unique[a])
1890             continue;
1891
1892         slot = alloc->locals[a];
1893
1894         /* never resize parameters
1895          * will be required later when overlapping temps + locals
1896          */
1897         if (a < vec_size(self->m_params) &&
1898             alloc->sizes[a] < v->size())
1899         {
1900             continue;
1901         }
1902
1903         if (ir_values_overlap(v, slot))
1904             continue;
1905
1906         if (!slot->mergeLife(v))
1907             return false;
1908
1909         /* adjust size for this slot */
1910         if (alloc->sizes[a] < v->size())
1911             alloc->sizes[a] = v->size();
1912
1913         v->m_code.local = a;
1914         return true;
1915     }
1916     if (a >= vec_size(alloc->locals)) {
1917         if (!function_allocator_alloc(alloc, v))
1918             return false;
1919     }
1920     return true;
1921 }
1922
1923 bool ir_function_allocate_locals(ir_function *self)
1924 {
1925     bool   retval = true;
1926     size_t pos;
1927     bool   opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
1928
1929     function_allocator lockalloc, globalloc;
1930
1931     if (self->m_locals.empty() && self->m_values.empty())
1932         return true;
1933
1934     globalloc.locals    = nullptr;
1935     globalloc.sizes     = nullptr;
1936     globalloc.positions = nullptr;
1937     globalloc.unique    = nullptr;
1938     lockalloc.locals    = nullptr;
1939     lockalloc.sizes     = nullptr;
1940     lockalloc.positions = nullptr;
1941     lockalloc.unique    = nullptr;
1942
1943     size_t i;
1944     for (i = 0; i < self->m_locals.size(); ++i)
1945     {
1946         ir_value *v = self->m_locals[i].get();
1947         if ((self->m_flags & IR_FLAG_MASK_NO_LOCAL_TEMPS) || !OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) {
1948             v->m_locked      = true;
1949             v->m_unique_life = true;
1950         }
1951         else if (i >= vec_size(self->m_params))
1952             break;
1953         else
1954             v->m_locked = true; /* lock parameters locals */
1955         if (!function_allocator_alloc((v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1956             goto error;
1957     }
1958     for (; i < self->m_locals.size(); ++i)
1959     {
1960         ir_value *v = self->m_locals[i].get();
1961         if (v->m_life.empty())
1962             continue;
1963         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1964             goto error;
1965     }
1966
1967     /* Allocate a slot for any value that still exists */
1968     for (i = 0; i < self->m_values.size(); ++i)
1969     {
1970         ir_value *v = self->m_values[i].get();
1971
1972         if (v->m_life.empty())
1973             continue;
1974
1975         /* CALL optimization:
1976          * If the value is a parameter-temp: 1 write, 1 read from a CALL
1977          * and it's not "locked", write it to the OFS_PARM directly.
1978          */
1979         if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->m_locked && !v->m_unique_life) {
1980             if (v->m_reads.size() == 1 && v->m_writes.size() == 1 &&
1981                 (v->m_reads[0]->m_opcode == VINSTR_NRCALL ||
1982                  (v->m_reads[0]->m_opcode >= INSTR_CALL0 && v->m_reads[0]->m_opcode <= INSTR_CALL8)
1983                 )
1984                )
1985             {
1986                 size_t param;
1987                 ir_instr *call = v->m_reads[0];
1988                 if (!vec_ir_value_find(call->m_params, v, &param)) {
1989                     irerror(call->m_context, "internal error: unlocked parameter %s not found", v->m_name.c_str());
1990                     goto error;
1991                 }
1992                 ++opts_optimizationcount[OPTIM_CALL_STORES];
1993                 v->m_callparam = true;
1994                 if (param < 8)
1995                     v->setCodeAddress(OFS_PARM0 + 3*param);
1996                 else {
1997                     size_t nprotos = self->m_owner->m_extparam_protos.size();
1998                     ir_value *ep;
1999                     param -= 8;
2000                     if (nprotos > param)
2001                         ep = self->m_owner->m_extparam_protos[param].get();
2002                     else
2003                     {
2004                         ep = self->m_owner->generateExtparamProto();
2005                         while (++nprotos <= param)
2006                             ep = self->m_owner->generateExtparamProto();
2007                     }
2008                     ir_instr_op(v->m_writes[0], 0, ep, true);
2009                     call->m_params[param+8] = ep;
2010                 }
2011                 continue;
2012             }
2013             if (v->m_writes.size() == 1 && v->m_writes[0]->m_opcode == INSTR_CALL0) {
2014                 v->m_store = store_return;
2015                 if (v->m_members[0]) v->m_members[0]->m_store = store_return;
2016                 if (v->m_members[1]) v->m_members[1]->m_store = store_return;
2017                 if (v->m_members[2]) v->m_members[2]->m_store = store_return;
2018                 ++opts_optimizationcount[OPTIM_CALL_STORES];
2019                 continue;
2020             }
2021         }
2022
2023         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
2024             goto error;
2025     }
2026
2027     if (!lockalloc.sizes && !globalloc.sizes) {
2028         goto cleanup;
2029     }
2030     vec_push(lockalloc.positions, 0);
2031     vec_push(globalloc.positions, 0);
2032
2033     /* Adjust slot positions based on sizes */
2034     if (lockalloc.sizes) {
2035         pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
2036         for (i = 1; i < vec_size(lockalloc.sizes); ++i)
2037         {
2038             pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
2039             vec_push(lockalloc.positions, pos);
2040         }
2041         self->m_allocated_locals = pos + vec_last(lockalloc.sizes);
2042     }
2043     if (globalloc.sizes) {
2044         pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
2045         for (i = 1; i < vec_size(globalloc.sizes); ++i)
2046         {
2047             pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
2048             vec_push(globalloc.positions, pos);
2049         }
2050         self->m_globaltemps = pos + vec_last(globalloc.sizes);
2051     }
2052
2053     /* Locals need to know their new position */
2054     for (auto& local : self->m_locals) {
2055         if (local->m_locked || !opt_gt)
2056             local->m_code.local = lockalloc.positions[local->m_code.local];
2057         else
2058             local->m_code.local = globalloc.positions[local->m_code.local];
2059     }
2060     /* Take over the actual slot positions on values */
2061     for (auto& value : self->m_values) {
2062         if (value->m_locked || !opt_gt)
2063             value->m_code.local = lockalloc.positions[value->m_code.local];
2064         else
2065             value->m_code.local = globalloc.positions[value->m_code.local];
2066     }
2067
2068     goto cleanup;
2069
2070 error:
2071     retval = false;
2072 cleanup:
2073     for (i = 0; i < vec_size(lockalloc.locals); ++i)
2074         delete lockalloc.locals[i];
2075     for (i = 0; i < vec_size(globalloc.locals); ++i)
2076         delete globalloc.locals[i];
2077     vec_free(globalloc.unique);
2078     vec_free(globalloc.locals);
2079     vec_free(globalloc.sizes);
2080     vec_free(globalloc.positions);
2081     vec_free(lockalloc.unique);
2082     vec_free(lockalloc.locals);
2083     vec_free(lockalloc.sizes);
2084     vec_free(lockalloc.positions);
2085     return retval;
2086 }
2087
2088 /* Get information about which operand
2089  * is read from, or written to.
2090  */
2091 static void ir_op_read_write(int op, size_t *read, size_t *write)
2092 {
2093     switch (op)
2094     {
2095     case VINSTR_JUMP:
2096     case INSTR_GOTO:
2097         *write = 0;
2098         *read = 0;
2099         break;
2100     case INSTR_IF:
2101     case INSTR_IFNOT:
2102 #if 0
2103     case INSTR_IF_S:
2104     case INSTR_IFNOT_S:
2105 #endif
2106     case INSTR_RETURN:
2107     case VINSTR_COND:
2108         *write = 0;
2109         *read = 1;
2110         break;
2111     case INSTR_STOREP_F:
2112     case INSTR_STOREP_V:
2113     case INSTR_STOREP_S:
2114     case INSTR_STOREP_ENT:
2115     case INSTR_STOREP_FLD:
2116     case INSTR_STOREP_FNC:
2117         *write = 0;
2118         *read  = 7;
2119         break;
2120     default:
2121         *write = 1;
2122         *read = 6;
2123         break;
2124     };
2125 }
2126
2127 static bool ir_block_living_add_instr(ir_block *self, size_t eid) {
2128     bool changed = false;
2129     for (auto &it : self->m_living)
2130         if (it->setAlive(eid))
2131             changed = true;
2132     return changed;
2133 }
2134
2135 static bool ir_block_living_lock(ir_block *self) {
2136     bool changed = false;
2137     for (auto &it : self->m_living) {
2138         if (it->m_locked)
2139             continue;
2140         it->m_locked = true;
2141         changed = true;
2142     }
2143     return changed;
2144 }
2145
2146 static bool ir_block_life_propagate(ir_block *self, bool *changed)
2147 {
2148     ir_instr *instr;
2149     ir_value *value;
2150     size_t i, o, p, mem;
2151     // bitmasks which operands are read from or written to
2152     size_t read, write;
2153
2154     self->m_living.clear();
2155
2156     p = vec_size(self->m_exits);
2157     for (i = 0; i < p; ++i) {
2158         ir_block *prev = self->m_exits[i];
2159         for (auto &it : prev->m_living)
2160             if (!vec_ir_value_find(self->m_living, it, nullptr))
2161                 self->m_living.push_back(it);
2162     }
2163
2164     i = vec_size(self->m_instr);
2165     while (i)
2166     { --i;
2167         instr = self->m_instr[i];
2168
2169         /* See which operands are read and write operands */
2170         ir_op_read_write(instr->m_opcode, &read, &write);
2171
2172         /* Go through the 3 main operands
2173          * writes first, then reads
2174          */
2175         for (o = 0; o < 3; ++o)
2176         {
2177             if (!instr->_m_ops[o]) /* no such operand */
2178                 continue;
2179
2180             value = instr->_m_ops[o];
2181
2182             /* We only care about locals */
2183             /* we also calculate parameter liferanges so that locals
2184              * can take up parameter slots */
2185             if (value->m_store != store_value &&
2186                 value->m_store != store_local &&
2187                 value->m_store != store_param)
2188                 continue;
2189
2190             /* write operands */
2191             /* When we write to a local, we consider it "dead" for the
2192              * remaining upper part of the function, since in SSA a value
2193              * can only be written once (== created)
2194              */
2195             if (write & (1<<o))
2196             {
2197                 size_t idx;
2198                 bool in_living = vec_ir_value_find(self->m_living, value, &idx);
2199                 if (!in_living)
2200                 {
2201                     /* If the value isn't alive it hasn't been read before... */
2202                     /* TODO: See if the warning can be emitted during parsing or AST processing
2203                      * otherwise have warning printed here.
2204                      * IF printing a warning here: include filecontext_t,
2205                      * and make sure it's only printed once
2206                      * since this function is run multiple times.
2207                      */
2208                     /* con_err( "Value only written %s\n", value->m_name); */
2209                     if (value->setAlive(instr->m_eid))
2210                         *changed = true;
2211                 } else {
2212                     /* since 'living' won't contain it
2213                      * anymore, merge the value, since
2214                      * (A) doesn't.
2215                      */
2216                     if (value->setAlive(instr->m_eid))
2217                         *changed = true;
2218                     // Then remove
2219                     self->m_living.erase(self->m_living.begin() + idx);
2220                 }
2221                 /* Removing a vector removes all members */
2222                 for (mem = 0; mem < 3; ++mem) {
2223                     if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], &idx)) {
2224                         if (value->m_members[mem]->setAlive(instr->m_eid))
2225                             *changed = true;
2226                         self->m_living.erase(self->m_living.begin() + idx);
2227                     }
2228                 }
2229                 /* Removing the last member removes the vector */
2230                 if (value->m_memberof) {
2231                     value = value->m_memberof;
2232                     for (mem = 0; mem < 3; ++mem) {
2233                         if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2234                             break;
2235                     }
2236                     if (mem == 3 && vec_ir_value_find(self->m_living, value, &idx)) {
2237                         if (value->setAlive(instr->m_eid))
2238                             *changed = true;
2239                         self->m_living.erase(self->m_living.begin() + idx);
2240                     }
2241                 }
2242             }
2243         }
2244
2245         /* These operations need a special case as they can break when using
2246          * same source and destination operand otherwise, as the engine may
2247          * read the source multiple times. */
2248         if (instr->m_opcode == INSTR_MUL_VF ||
2249             instr->m_opcode == VINSTR_BITAND_VF ||
2250             instr->m_opcode == VINSTR_BITOR_VF ||
2251             instr->m_opcode == VINSTR_BITXOR ||
2252             instr->m_opcode == VINSTR_BITXOR_VF ||
2253             instr->m_opcode == VINSTR_BITXOR_V ||
2254             instr->m_opcode == VINSTR_CROSS)
2255         {
2256             value = instr->_m_ops[2];
2257             /* the float source will get an additional lifetime */
2258             if (value->setAlive(instr->m_eid+1))
2259                 *changed = true;
2260             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2261                 *changed = true;
2262         }
2263
2264         if (instr->m_opcode == INSTR_MUL_FV ||
2265             instr->m_opcode == INSTR_LOAD_V ||
2266             instr->m_opcode == VINSTR_BITXOR ||
2267             instr->m_opcode == VINSTR_BITXOR_VF ||
2268             instr->m_opcode == VINSTR_BITXOR_V ||
2269             instr->m_opcode == VINSTR_CROSS)
2270         {
2271             value = instr->_m_ops[1];
2272             /* the float source will get an additional lifetime */
2273             if (value->setAlive(instr->m_eid+1))
2274                 *changed = true;
2275             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2276                 *changed = true;
2277         }
2278
2279         for (o = 0; o < 3; ++o)
2280         {
2281             if (!instr->_m_ops[o]) /* no such operand */
2282                 continue;
2283
2284             value = instr->_m_ops[o];
2285
2286             /* We only care about locals */
2287             /* we also calculate parameter liferanges so that locals
2288              * can take up parameter slots */
2289             if (value->m_store != store_value &&
2290                 value->m_store != store_local &&
2291                 value->m_store != store_param)
2292                 continue;
2293
2294             /* read operands */
2295             if (read & (1<<o))
2296             {
2297                 if (!vec_ir_value_find(self->m_living, value, nullptr))
2298                     self->m_living.push_back(value);
2299                 /* reading adds the full vector */
2300                 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2301                     self->m_living.push_back(value->m_memberof);
2302                 for (mem = 0; mem < 3; ++mem) {
2303                     if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2304                         self->m_living.push_back(value->m_members[mem]);
2305                 }
2306             }
2307         }
2308         /* PHI operands are always read operands */
2309         for (auto &it : instr->m_phi) {
2310             value = it.value;
2311             if (!vec_ir_value_find(self->m_living, value, nullptr))
2312                 self->m_living.push_back(value);
2313             /* reading adds the full vector */
2314             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2315                 self->m_living.push_back(value->m_memberof);
2316             for (mem = 0; mem < 3; ++mem) {
2317                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2318                     self->m_living.push_back(value->m_members[mem]);
2319             }
2320         }
2321
2322         /* on a call, all these values must be "locked" */
2323         if (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8) {
2324             if (ir_block_living_lock(self))
2325                 *changed = true;
2326         }
2327         /* call params are read operands too */
2328         for (auto &it : instr->m_params) {
2329             value = it;
2330             if (!vec_ir_value_find(self->m_living, value, nullptr))
2331                 self->m_living.push_back(value);
2332             /* reading adds the full vector */
2333             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2334                 self->m_living.push_back(value->m_memberof);
2335             for (mem = 0; mem < 3; ++mem) {
2336                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2337                     self->m_living.push_back(value->m_members[mem]);
2338             }
2339         }
2340
2341         /* (A) */
2342         if (ir_block_living_add_instr(self, instr->m_eid))
2343             *changed = true;
2344     }
2345     /* the "entry" instruction ID */
2346     if (ir_block_living_add_instr(self, self->m_entry_id))
2347         *changed = true;
2348
2349     return true;
2350 }
2351
2352 bool ir_function_calculate_liferanges(ir_function *self)
2353 {
2354     /* parameters live at 0 */
2355     for (size_t i = 0; i < vec_size(self->m_params); ++i)
2356         if (!self->m_locals[i].get()->setAlive(0))
2357             compile_error(self->m_context, "internal error: failed value-life merging");
2358
2359     bool changed;
2360     do {
2361         self->m_run_id++;
2362         changed = false;
2363         for (auto i = self->m_blocks.rbegin(); i != self->m_blocks.rend(); ++i)
2364             ir_block_life_propagate(i->get(), &changed);
2365     } while (changed);
2366
2367     if (self->m_blocks.size()) {
2368         ir_block *block = self->m_blocks[0].get();
2369         for (auto &it : block->m_living) {
2370             ir_value *v = it;
2371             if (v->m_store != store_local)
2372                 continue;
2373             if (v->m_vtype == TYPE_VECTOR)
2374                 continue;
2375             self->m_flags |= IR_FLAG_HAS_UNINITIALIZED;
2376             /* find the instruction reading from it */
2377             size_t s = 0;
2378             for (; s < v->m_reads.size(); ++s) {
2379                 if (v->m_reads[s]->m_eid == v->m_life[0].end)
2380                     break;
2381             }
2382             if (s < v->m_reads.size()) {
2383                 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2384                               "variable `%s` may be used uninitialized in this function\n"
2385                               " -> %s:%i",
2386                               v->m_name.c_str(),
2387                               v->m_reads[s]->m_context.file, v->m_reads[s]->m_context.line)
2388                    )
2389                 {
2390                     return false;
2391                 }
2392                 continue;
2393             }
2394             if (v->m_memberof) {
2395                 ir_value *vec = v->m_memberof;
2396                 for (s = 0; s < vec->m_reads.size(); ++s) {
2397                     if (vec->m_reads[s]->m_eid == v->m_life[0].end)
2398                         break;
2399                 }
2400                 if (s < vec->m_reads.size()) {
2401                     if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2402                                   "variable `%s` may be used uninitialized in this function\n"
2403                                   " -> %s:%i",
2404                                   v->m_name.c_str(),
2405                                   vec->m_reads[s]->m_context.file, vec->m_reads[s]->m_context.line)
2406                        )
2407                     {
2408                         return false;
2409                     }
2410                     continue;
2411                 }
2412             }
2413             if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2414                           "variable `%s` may be used uninitialized in this function", v->m_name.c_str()))
2415             {
2416                 return false;
2417             }
2418         }
2419     }
2420     return true;
2421 }
2422
2423 /***********************************************************************
2424  *IR Code-Generation
2425  *
2426  * Since the IR has the convention of putting 'write' operands
2427  * at the beginning, we have to rotate the operands of instructions
2428  * properly in order to generate valid QCVM code.
2429  *
2430  * Having destinations at a fixed position is more convenient. In QC
2431  * this is *mostly* OPC,  but FTE adds at least 2 instructions which
2432  * read from from OPA,  and store to OPB rather than OPC.   Which is
2433  * partially the reason why the implementation of these instructions
2434  * in darkplaces has been delayed for so long.
2435  *
2436  * Breaking conventions is annoying...
2437  */
2438 static bool gen_global_field(code_t *code, ir_value *global)
2439 {
2440     if (global->m_hasvalue)
2441     {
2442         ir_value *fld = global->m_constval.vpointer;
2443         if (!fld) {
2444             irerror(global->m_context, "Invalid field constant with no field: %s", global->m_name.c_str());
2445             return false;
2446         }
2447
2448         /* copy the field's value */
2449         global->setCodeAddress(code->globals.size());
2450         code->globals.push_back(fld->m_code.fieldaddr);
2451         if (global->m_fieldtype == TYPE_VECTOR) {
2452             code->globals.push_back(fld->m_code.fieldaddr+1);
2453             code->globals.push_back(fld->m_code.fieldaddr+2);
2454         }
2455     }
2456     else
2457     {
2458         global->setCodeAddress(code->globals.size());
2459         code->globals.push_back(0);
2460         if (global->m_fieldtype == TYPE_VECTOR) {
2461             code->globals.push_back(0);
2462             code->globals.push_back(0);
2463         }
2464     }
2465     if (global->m_code.globaladdr < 0)
2466         return false;
2467     return true;
2468 }
2469
2470 static bool gen_global_pointer(code_t *code, ir_value *global)
2471 {
2472     if (global->m_hasvalue)
2473     {
2474         ir_value *target = global->m_constval.vpointer;
2475         if (!target) {
2476             irerror(global->m_context, "Invalid pointer constant: %s", global->m_name.c_str());
2477             /* nullptr pointers are pointing to the nullptr constant, which also
2478              * sits at address 0, but still has an ir_value for itself.
2479              */
2480             return false;
2481         }
2482
2483         /* Here, relocations ARE possible - in fteqcc-enhanced-qc:
2484          * void() foo; <- proto
2485          * void() *fooptr = &foo;
2486          * void() foo = { code }
2487          */
2488         if (!target->m_code.globaladdr) {
2489             /* FIXME: Check for the constant nullptr ir_value!
2490              * because then code.globaladdr being 0 is valid.
2491              */
2492             irerror(global->m_context, "FIXME: Relocation support");
2493             return false;
2494         }
2495
2496         global->setCodeAddress(code->globals.size());
2497         code->globals.push_back(target->m_code.globaladdr);
2498     }
2499     else
2500     {
2501         global->setCodeAddress(code->globals.size());
2502         code->globals.push_back(0);
2503     }
2504     if (global->m_code.globaladdr < 0)
2505         return false;
2506     return true;
2507 }
2508
2509 static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *block)
2510 {
2511     prog_section_statement_t stmt;
2512     ir_instr *instr;
2513     ir_block *target;
2514     ir_block *ontrue;
2515     ir_block *onfalse;
2516     size_t    stidx;
2517     size_t    i;
2518     int       j;
2519
2520     block->m_generated = true;
2521     block->m_code_start = code->statements.size();
2522     for (i = 0; i < vec_size(block->m_instr); ++i)
2523     {
2524         instr = block->m_instr[i];
2525
2526         if (instr->m_opcode == VINSTR_PHI) {
2527             irerror(block->m_context, "cannot generate virtual instruction (phi)");
2528             return false;
2529         }
2530
2531         if (instr->m_opcode == VINSTR_JUMP) {
2532             target = instr->m_bops[0];
2533             /* for uncoditional jumps, if the target hasn't been generated
2534              * yet, we generate them right here.
2535              */
2536             if (!target->m_generated)
2537                 return gen_blocks_recursive(code, func, target);
2538
2539             /* otherwise we generate a jump instruction */
2540             stmt.opcode = INSTR_GOTO;
2541             stmt.o1.s1 = target->m_code_start - code->statements.size();
2542             stmt.o2.s1 = 0;
2543             stmt.o3.s1 = 0;
2544             if (stmt.o1.s1 != 1)
2545                 code_push_statement(code, &stmt, instr->m_context);
2546
2547             /* no further instructions can be in this block */
2548             return true;
2549         }
2550
2551         if (instr->m_opcode == VINSTR_BITXOR) {
2552             stmt.opcode = INSTR_BITOR;
2553             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2554             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2555             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2556             code_push_statement(code, &stmt, instr->m_context);
2557             stmt.opcode = INSTR_BITAND;
2558             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2559             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2560             stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2561             code_push_statement(code, &stmt, instr->m_context);
2562             stmt.opcode = INSTR_SUB_F;
2563             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2564             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2565             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2566             code_push_statement(code, &stmt, instr->m_context);
2567
2568             /* instruction generated */
2569             continue;
2570         }
2571
2572         if (instr->m_opcode == VINSTR_BITAND_V) {
2573             stmt.opcode = INSTR_BITAND;
2574             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2575             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2576             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2577             code_push_statement(code, &stmt, instr->m_context);
2578             ++stmt.o1.s1;
2579             ++stmt.o2.s1;
2580             ++stmt.o3.s1;
2581             code_push_statement(code, &stmt, instr->m_context);
2582             ++stmt.o1.s1;
2583             ++stmt.o2.s1;
2584             ++stmt.o3.s1;
2585             code_push_statement(code, &stmt, instr->m_context);
2586
2587             /* instruction generated */
2588             continue;
2589         }
2590
2591         if (instr->m_opcode == VINSTR_BITOR_V) {
2592             stmt.opcode = INSTR_BITOR;
2593             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2594             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2595             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2596             code_push_statement(code, &stmt, instr->m_context);
2597             ++stmt.o1.s1;
2598             ++stmt.o2.s1;
2599             ++stmt.o3.s1;
2600             code_push_statement(code, &stmt, instr->m_context);
2601             ++stmt.o1.s1;
2602             ++stmt.o2.s1;
2603             ++stmt.o3.s1;
2604             code_push_statement(code, &stmt, instr->m_context);
2605
2606             /* instruction generated */
2607             continue;
2608         }
2609
2610         if (instr->m_opcode == VINSTR_BITXOR_V) {
2611             for (j = 0; j < 3; ++j) {
2612                 stmt.opcode = INSTR_BITOR;
2613                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2614                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2615                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2616                 code_push_statement(code, &stmt, instr->m_context);
2617                 stmt.opcode = INSTR_BITAND;
2618                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2619                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2620                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2621                 code_push_statement(code, &stmt, instr->m_context);
2622             }
2623             stmt.opcode = INSTR_SUB_V;
2624             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2625             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2626             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2627             code_push_statement(code, &stmt, instr->m_context);
2628
2629             /* instruction generated */
2630             continue;
2631         }
2632
2633         if (instr->m_opcode == VINSTR_BITAND_VF) {
2634             stmt.opcode = INSTR_BITAND;
2635             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2636             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2637             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2638             code_push_statement(code, &stmt, instr->m_context);
2639             ++stmt.o1.s1;
2640             ++stmt.o3.s1;
2641             code_push_statement(code, &stmt, instr->m_context);
2642             ++stmt.o1.s1;
2643             ++stmt.o3.s1;
2644             code_push_statement(code, &stmt, instr->m_context);
2645
2646             /* instruction generated */
2647             continue;
2648         }
2649
2650         if (instr->m_opcode == VINSTR_BITOR_VF) {
2651             stmt.opcode = INSTR_BITOR;
2652             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2653             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2654             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2655             code_push_statement(code, &stmt, instr->m_context);
2656             ++stmt.o1.s1;
2657             ++stmt.o3.s1;
2658             code_push_statement(code, &stmt, instr->m_context);
2659             ++stmt.o1.s1;
2660             ++stmt.o3.s1;
2661             code_push_statement(code, &stmt, instr->m_context);
2662
2663             /* instruction generated */
2664             continue;
2665         }
2666
2667         if (instr->m_opcode == VINSTR_BITXOR_VF) {
2668             for (j = 0; j < 3; ++j) {
2669                 stmt.opcode = INSTR_BITOR;
2670                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2671                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2672                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2673                 code_push_statement(code, &stmt, instr->m_context);
2674                 stmt.opcode = INSTR_BITAND;
2675                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2676                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2677                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2678                 code_push_statement(code, &stmt, instr->m_context);
2679             }
2680             stmt.opcode = INSTR_SUB_V;
2681             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2682             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2683             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2684             code_push_statement(code, &stmt, instr->m_context);
2685
2686             /* instruction generated */
2687             continue;
2688         }
2689
2690         if (instr->m_opcode == VINSTR_CROSS) {
2691             stmt.opcode = INSTR_MUL_F;
2692             for (j = 0; j < 3; ++j) {
2693                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 1) % 3;
2694                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 2) % 3;
2695                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2696                 code_push_statement(code, &stmt, instr->m_context);
2697                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 2) % 3;
2698                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 1) % 3;
2699                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2700                 code_push_statement(code, &stmt, instr->m_context);
2701             }
2702             stmt.opcode = INSTR_SUB_V;
2703             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2704             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2705             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2706             code_push_statement(code, &stmt, instr->m_context);
2707
2708             /* instruction generated */
2709             continue;
2710         }
2711
2712         if (instr->m_opcode == VINSTR_COND) {
2713             ontrue  = instr->m_bops[0];
2714             onfalse = instr->m_bops[1];
2715             /* TODO: have the AST signal which block should
2716              * come first: eg. optimize IFs without ELSE...
2717              */
2718
2719             stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2720             stmt.o2.u1 = 0;
2721             stmt.o3.s1 = 0;
2722
2723             if (ontrue->m_generated) {
2724                 stmt.opcode = INSTR_IF;
2725                 stmt.o2.s1 = ontrue->m_code_start - code->statements.size();
2726                 if (stmt.o2.s1 != 1)
2727                     code_push_statement(code, &stmt, instr->m_context);
2728             }
2729             if (onfalse->m_generated) {
2730                 stmt.opcode = INSTR_IFNOT;
2731                 stmt.o2.s1 = onfalse->m_code_start - code->statements.size();
2732                 if (stmt.o2.s1 != 1)
2733                     code_push_statement(code, &stmt, instr->m_context);
2734             }
2735             if (!ontrue->m_generated) {
2736                 if (onfalse->m_generated)
2737                     return gen_blocks_recursive(code, func, ontrue);
2738             }
2739             if (!onfalse->m_generated) {
2740                 if (ontrue->m_generated)
2741                     return gen_blocks_recursive(code, func, onfalse);
2742             }
2743             /* neither ontrue nor onfalse exist */
2744             stmt.opcode = INSTR_IFNOT;
2745             if (!instr->m_likely) {
2746                 /* Honor the likelyhood hint */
2747                 ir_block *tmp = onfalse;
2748                 stmt.opcode = INSTR_IF;
2749                 onfalse = ontrue;
2750                 ontrue = tmp;
2751             }
2752             stidx = code->statements.size();
2753             code_push_statement(code, &stmt, instr->m_context);
2754             /* on false we jump, so add ontrue-path */
2755             if (!gen_blocks_recursive(code, func, ontrue))
2756                 return false;
2757             /* fixup the jump address */
2758             code->statements[stidx].o2.s1 = code->statements.size() - stidx;
2759             /* generate onfalse path */
2760             if (onfalse->m_generated) {
2761                 /* fixup the jump address */
2762                 code->statements[stidx].o2.s1 = onfalse->m_code_start - stidx;
2763                 if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2764                     code->statements[stidx] = code->statements[stidx+1];
2765                     if (code->statements[stidx].o1.s1 < 0)
2766                         code->statements[stidx].o1.s1++;
2767                     code_pop_statement(code);
2768                 }
2769                 stmt.opcode = code->statements.back().opcode;
2770                 if (stmt.opcode == INSTR_GOTO ||
2771                     stmt.opcode == INSTR_IF ||
2772                     stmt.opcode == INSTR_IFNOT ||
2773                     stmt.opcode == INSTR_RETURN ||
2774                     stmt.opcode == INSTR_DONE)
2775                 {
2776                     /* no use jumping from here */
2777                     return true;
2778                 }
2779                 /* may have been generated in the previous recursive call */
2780                 stmt.opcode = INSTR_GOTO;
2781                 stmt.o1.s1 = onfalse->m_code_start - code->statements.size();
2782                 stmt.o2.s1 = 0;
2783                 stmt.o3.s1 = 0;
2784                 if (stmt.o1.s1 != 1)
2785                     code_push_statement(code, &stmt, instr->m_context);
2786                 return true;
2787             }
2788             else if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2789                 code->statements[stidx] = code->statements[stidx+1];
2790                 if (code->statements[stidx].o1.s1 < 0)
2791                     code->statements[stidx].o1.s1++;
2792                 code_pop_statement(code);
2793             }
2794             /* if not, generate now */
2795             return gen_blocks_recursive(code, func, onfalse);
2796         }
2797
2798         if ( (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8)
2799            || instr->m_opcode == VINSTR_NRCALL)
2800         {
2801             size_t p, first;
2802             ir_value *retvalue;
2803
2804             first = instr->m_params.size();
2805             if (first > 8)
2806                 first = 8;
2807             for (p = 0; p < first; ++p)
2808             {
2809                 ir_value *param = instr->m_params[p];
2810                 if (param->m_callparam)
2811                     continue;
2812
2813                 stmt.opcode = INSTR_STORE_F;
2814                 stmt.o3.u1 = 0;
2815
2816                 if (param->m_vtype == TYPE_FIELD)
2817                     stmt.opcode = field_store_instr[param->m_fieldtype];
2818                 else if (param->m_vtype == TYPE_NIL)
2819                     stmt.opcode = INSTR_STORE_V;
2820                 else
2821                     stmt.opcode = type_store_instr[param->m_vtype];
2822                 stmt.o1.u1 = param->codeAddress();
2823                 stmt.o2.u1 = OFS_PARM0 + 3 * p;
2824
2825                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2826                     /* fetch 3 separate floats */
2827                     stmt.opcode = INSTR_STORE_F;
2828                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2829                     code_push_statement(code, &stmt, instr->m_context);
2830                     stmt.o2.u1++;
2831                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2832                     code_push_statement(code, &stmt, instr->m_context);
2833                     stmt.o2.u1++;
2834                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2835                     code_push_statement(code, &stmt, instr->m_context);
2836                 }
2837                 else
2838                     code_push_statement(code, &stmt, instr->m_context);
2839             }
2840             /* Now handle extparams */
2841             first = instr->m_params.size();
2842             for (; p < first; ++p)
2843             {
2844                 ir_builder *ir = func->m_owner;
2845                 ir_value *param = instr->m_params[p];
2846                 ir_value *targetparam;
2847
2848                 if (param->m_callparam)
2849                     continue;
2850
2851                 if (p-8 >= ir->m_extparams.size())
2852                     ir->generateExtparam();
2853
2854                 targetparam = ir->m_extparams[p-8];
2855
2856                 stmt.opcode = INSTR_STORE_F;
2857                 stmt.o3.u1 = 0;
2858
2859                 if (param->m_vtype == TYPE_FIELD)
2860                     stmt.opcode = field_store_instr[param->m_fieldtype];
2861                 else if (param->m_vtype == TYPE_NIL)
2862                     stmt.opcode = INSTR_STORE_V;
2863                 else
2864                     stmt.opcode = type_store_instr[param->m_vtype];
2865                 stmt.o1.u1 = param->codeAddress();
2866                 stmt.o2.u1 = targetparam->codeAddress();
2867                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2868                     /* fetch 3 separate floats */
2869                     stmt.opcode = INSTR_STORE_F;
2870                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2871                     code_push_statement(code, &stmt, instr->m_context);
2872                     stmt.o2.u1++;
2873                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2874                     code_push_statement(code, &stmt, instr->m_context);
2875                     stmt.o2.u1++;
2876                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2877                     code_push_statement(code, &stmt, instr->m_context);
2878                 }
2879                 else
2880                     code_push_statement(code, &stmt, instr->m_context);
2881             }
2882
2883             stmt.opcode = INSTR_CALL0 + instr->m_params.size();
2884             if (stmt.opcode > INSTR_CALL8)
2885                 stmt.opcode = INSTR_CALL8;
2886             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2887             stmt.o2.u1 = 0;
2888             stmt.o3.u1 = 0;
2889             code_push_statement(code, &stmt, instr->m_context);
2890
2891             retvalue = instr->_m_ops[0];
2892             if (retvalue && retvalue->m_store != store_return &&
2893                 (retvalue->m_store == store_global || retvalue->m_life.size()))
2894             {
2895                 /* not to be kept in OFS_RETURN */
2896                 if (retvalue->m_vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS))
2897                     stmt.opcode = field_store_instr[retvalue->m_fieldtype];
2898                 else
2899                     stmt.opcode = type_store_instr[retvalue->m_vtype];
2900                 stmt.o1.u1 = OFS_RETURN;
2901                 stmt.o2.u1 = retvalue->codeAddress();
2902                 stmt.o3.u1 = 0;
2903                 code_push_statement(code, &stmt, instr->m_context);
2904             }
2905             continue;
2906         }
2907
2908         if (instr->m_opcode == INSTR_STATE) {
2909             stmt.opcode = instr->m_opcode;
2910             if (instr->_m_ops[0])
2911                 stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2912             if (instr->_m_ops[1])
2913                 stmt.o2.u1 = instr->_m_ops[1]->codeAddress();
2914             stmt.o3.u1 = 0;
2915             code_push_statement(code, &stmt, instr->m_context);
2916             continue;
2917         }
2918
2919         stmt.opcode = instr->m_opcode;
2920         stmt.o1.u1 = 0;
2921         stmt.o2.u1 = 0;
2922         stmt.o3.u1 = 0;
2923
2924         /* This is the general order of operands */
2925         if (instr->_m_ops[0])
2926             stmt.o3.u1 = instr->_m_ops[0]->codeAddress();
2927
2928         if (instr->_m_ops[1])
2929             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2930
2931         if (instr->_m_ops[2])
2932             stmt.o2.u1 = instr->_m_ops[2]->codeAddress();
2933
2934         if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
2935         {
2936             stmt.o1.u1 = stmt.o3.u1;
2937             stmt.o3.u1 = 0;
2938         }
2939         else if ((stmt.opcode >= INSTR_STORE_F &&
2940                   stmt.opcode <= INSTR_STORE_FNC) ||
2941                  (stmt.opcode >= INSTR_STOREP_F &&
2942                   stmt.opcode <= INSTR_STOREP_FNC))
2943         {
2944             /* 2-operand instructions with A -> B */
2945             stmt.o2.u1 = stmt.o3.u1;
2946             stmt.o3.u1 = 0;
2947
2948             /* tiny optimization, don't output
2949              * STORE a, a
2950              */
2951             if (stmt.o2.u1 == stmt.o1.u1 &&
2952                 OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
2953             {
2954                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
2955                 continue;
2956             }
2957         }
2958         code_push_statement(code, &stmt, instr->m_context);
2959     }
2960     return true;
2961 }
2962
2963 static bool gen_function_code(code_t *code, ir_function *self)
2964 {
2965     ir_block *block;
2966     prog_section_statement_t stmt, *retst;
2967
2968     /* Starting from entry point, we generate blocks "as they come"
2969      * for now. Dead blocks will not be translated obviously.
2970      */
2971     if (self->m_blocks.empty()) {
2972         irerror(self->m_context, "Function '%s' declared without body.", self->m_name.c_str());
2973         return false;
2974     }
2975
2976     block = self->m_blocks[0].get();
2977     if (block->m_generated)
2978         return true;
2979
2980     if (!gen_blocks_recursive(code, self, block)) {
2981         irerror(self->m_context, "failed to generate blocks for '%s'", self->m_name.c_str());
2982         return false;
2983     }
2984
2985     /* code_write and qcvm -disasm need to know that the function ends here */
2986     retst = &code->statements.back();
2987     if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) &&
2988         self->m_outtype == TYPE_VOID &&
2989         retst->opcode == INSTR_RETURN &&
2990         !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1)
2991     {
2992         retst->opcode = INSTR_DONE;
2993         ++opts_optimizationcount[OPTIM_VOID_RETURN];
2994     } else {
2995         lex_ctx_t last;
2996
2997         stmt.opcode = INSTR_DONE;
2998         stmt.o1.u1  = 0;
2999         stmt.o2.u1  = 0;
3000         stmt.o3.u1  = 0;
3001         last.line   = code->linenums.back();
3002         last.column = code->columnnums.back();
3003
3004         code_push_statement(code, &stmt, last);
3005     }
3006     return true;
3007 }
3008
3009 qcint_t ir_builder::filestring(const char *filename)
3010 {
3011     /* NOTE: filename pointers are copied, we never strdup them,
3012      * thus we can use pointer-comparison to find the string.
3013      */
3014     qcint_t  str;
3015
3016     for (size_t i = 0; i != m_filenames.size(); ++i) {
3017         if (!strcmp(m_filenames[i], filename))
3018             return i;
3019     }
3020
3021     str = code_genstring(m_code.get(), filename);
3022     m_filenames.push_back(filename);
3023     m_filestrings.push_back(str);
3024     return str;
3025 }
3026
3027 bool ir_builder::generateGlobalFunction(ir_value *global)
3028 {
3029     prog_section_function_t fun;
3030     ir_function            *irfun;
3031
3032     size_t i;
3033
3034     if (!global->m_hasvalue || (!global->m_constval.vfunc)) {
3035         irerror(global->m_context, "Invalid state of function-global: not constant: %s", global->m_name.c_str());
3036         return false;
3037     }
3038
3039     irfun = global->m_constval.vfunc;
3040     fun.name = global->m_code.name;
3041     fun.file = filestring(global->m_context.file);
3042     fun.profile = 0; /* always 0 */
3043     fun.nargs = vec_size(irfun->m_params);
3044     if (fun.nargs > 8)
3045         fun.nargs = 8;
3046
3047     for (i = 0; i < 8; ++i) {
3048         if ((int32_t)i >= fun.nargs)
3049             fun.argsize[i] = 0;
3050         else
3051             fun.argsize[i] = type_sizeof_[irfun->m_params[i]];
3052     }
3053
3054     fun.firstlocal = 0;
3055     fun.locals = irfun->m_allocated_locals;
3056
3057     if (irfun->m_builtin)
3058         fun.entry = irfun->m_builtin+1;
3059     else {
3060         irfun->m_code_function_def = m_code->functions.size();
3061         fun.entry = m_code->statements.size();
3062     }
3063
3064     m_code->functions.push_back(fun);
3065     return true;
3066 }
3067
3068 ir_value* ir_builder::generateExtparamProto()
3069 {
3070     char      name[128];
3071
3072     util_snprintf(name, sizeof(name), "EXTPARM#%i", (int)(m_extparam_protos.size()));
3073     ir_value *global = new ir_value(name, store_global, TYPE_VECTOR);
3074     m_extparam_protos.emplace_back(global);
3075
3076     return global;
3077 }
3078
3079 void ir_builder::generateExtparam()
3080 {
3081     prog_section_def_t def;
3082     ir_value          *global;
3083
3084     if (m_extparam_protos.size() < m_extparams.size()+1)
3085         global = generateExtparamProto();
3086     else
3087         global = m_extparam_protos[m_extparams.size()].get();
3088
3089     def.name = code_genstring(m_code.get(), global->m_name.c_str());
3090     def.type = TYPE_VECTOR;
3091     def.offset = m_code->globals.size();
3092
3093     m_code->defs.push_back(def);
3094
3095     global->setCodeAddress(def.offset);
3096
3097     m_code->globals.push_back(0);
3098     m_code->globals.push_back(0);
3099     m_code->globals.push_back(0);
3100
3101     m_extparams.emplace_back(global);
3102 }
3103
3104 static bool gen_function_extparam_copy(code_t *code, ir_function *self)
3105 {
3106     ir_builder *ir = self->m_owner;
3107
3108     size_t numparams = vec_size(self->m_params);
3109     if (!numparams)
3110         return true;
3111
3112     prog_section_statement_t stmt;
3113     stmt.opcode = INSTR_STORE_F;
3114     stmt.o3.s1 = 0;
3115     for (size_t i = 8; i < numparams; ++i) {
3116         size_t ext = i - 8;
3117         if (ext >= ir->m_extparams.size())
3118             ir->generateExtparam();
3119
3120         ir_value *ep = ir->m_extparams[ext];
3121
3122         stmt.opcode = type_store_instr[self->m_locals[i]->m_vtype];
3123         if (self->m_locals[i]->m_vtype == TYPE_FIELD &&
3124             self->m_locals[i]->m_fieldtype == TYPE_VECTOR)
3125         {
3126             stmt.opcode = INSTR_STORE_V;
3127         }
3128         stmt.o1.u1 = ep->codeAddress();
3129         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3130         code_push_statement(code, &stmt, self->m_context);
3131     }
3132
3133     return true;
3134 }
3135
3136 static bool gen_function_varargs_copy(code_t *code, ir_function *self)
3137 {
3138     size_t i, ext, numparams, maxparams;
3139
3140     ir_builder *ir = self->m_owner;
3141     ir_value   *ep;
3142     prog_section_statement_t stmt;
3143
3144     numparams = vec_size(self->m_params);
3145     if (!numparams)
3146         return true;
3147
3148     stmt.opcode = INSTR_STORE_V;
3149     stmt.o3.s1 = 0;
3150     maxparams = numparams + self->m_max_varargs;
3151     for (i = numparams; i < maxparams; ++i) {
3152         if (i < 8) {
3153             stmt.o1.u1 = OFS_PARM0 + 3*i;
3154             stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3155             code_push_statement(code, &stmt, self->m_context);
3156             continue;
3157         }
3158         ext = i - 8;
3159         while (ext >= ir->m_extparams.size())
3160             ir->generateExtparam();
3161
3162         ep = ir->m_extparams[ext];
3163
3164         stmt.o1.u1 = ep->codeAddress();
3165         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3166         code_push_statement(code, &stmt, self->m_context);
3167     }
3168
3169     return true;
3170 }
3171
3172 bool ir_builder::generateFunctionLocals(ir_value *global)
3173 {
3174     prog_section_function_t *def;
3175     ir_function             *irfun;
3176     uint32_t                 firstlocal, firstglobal;
3177
3178     irfun = global->m_constval.vfunc;
3179     def   = &m_code->functions[0] + irfun->m_code_function_def;
3180
3181     if (OPTS_OPTION_BOOL(OPTION_G) ||
3182         !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS)        ||
3183         (irfun->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3184     {
3185         firstlocal = def->firstlocal = m_code->globals.size();
3186     } else {
3187         firstlocal = def->firstlocal = m_first_common_local;
3188         ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
3189     }
3190
3191     firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? m_first_common_globaltemp : firstlocal);
3192
3193     for (size_t i = m_code->globals.size(); i < firstlocal + irfun->m_allocated_locals; ++i)
3194         m_code->globals.push_back(0);
3195
3196     for (auto& lp : irfun->m_locals) {
3197         ir_value *v = lp.get();
3198         if (v->m_locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
3199             v->setCodeAddress(firstlocal + v->m_code.local);
3200             if (!generateGlobal(v, true)) {
3201                 irerror(v->m_context, "failed to generate local %s", v->m_name.c_str());
3202                 return false;
3203             }
3204         }
3205         else
3206             v->setCodeAddress(firstglobal + v->m_code.local);
3207     }
3208     for (auto& vp : irfun->m_values) {
3209         ir_value *v = vp.get();
3210         if (v->m_callparam)
3211             continue;
3212         if (v->m_locked)
3213             v->setCodeAddress(firstlocal + v->m_code.local);
3214         else
3215             v->setCodeAddress(firstglobal + v->m_code.local);
3216     }
3217     return true;
3218 }
3219
3220 bool ir_builder::generateGlobalFunctionCode(ir_value *global)
3221 {
3222     prog_section_function_t *fundef;
3223     ir_function             *irfun;
3224
3225     irfun = global->m_constval.vfunc;
3226     if (!irfun) {
3227         if (global->m_cvq == CV_NONE) {
3228             if (irwarning(global->m_context, WARN_IMPLICIT_FUNCTION_POINTER,
3229                           "function `%s` has no body and in QC implicitly becomes a function-pointer",
3230                           global->m_name.c_str()))
3231             {
3232                 /* Not bailing out just now. If this happens a lot you don't want to have
3233                  * to rerun gmqcc for each such function.
3234                  */
3235
3236                 /* return false; */
3237             }
3238         }
3239         /* this was a function pointer, don't generate code for those */
3240         return true;
3241     }
3242
3243     if (irfun->m_builtin)
3244         return true;
3245
3246     /*
3247      * If there is no definition and the thing is eraseable, we can ignore
3248      * outputting the function to begin with.
3249      */
3250     if (global->m_flags & IR_FLAG_ERASABLE && irfun->m_code_function_def < 0) {
3251         return true;
3252     }
3253
3254     if (irfun->m_code_function_def < 0) {
3255         irerror(irfun->m_context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->m_name.c_str());
3256         return false;
3257     }
3258     fundef = &m_code->functions[irfun->m_code_function_def];
3259
3260     fundef->entry = m_code->statements.size();
3261     if (!generateFunctionLocals(global)) {
3262         irerror(irfun->m_context, "Failed to generate locals for function %s", irfun->m_name.c_str());
3263         return false;
3264     }
3265     if (!gen_function_extparam_copy(m_code.get(), irfun)) {
3266         irerror(irfun->m_context, "Failed to generate extparam-copy code for function %s", irfun->m_name.c_str());
3267         return false;
3268     }
3269     if (irfun->m_max_varargs && !gen_function_varargs_copy(m_code.get(), irfun)) {
3270         irerror(irfun->m_context, "Failed to generate vararg-copy code for function %s", irfun->m_name.c_str());
3271         return false;
3272     }
3273     if (!gen_function_code(m_code.get(), irfun)) {
3274         irerror(irfun->m_context, "Failed to generate code for function %s", irfun->m_name.c_str());
3275         return false;
3276     }
3277     return true;
3278 }
3279
3280 static void gen_vector_defs(code_t *code, prog_section_def_t def, const char *name)
3281 {
3282     char  *component;
3283     size_t len, i;
3284
3285     if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3286         return;
3287
3288     def.type = TYPE_FLOAT;
3289
3290     len = strlen(name);
3291
3292     component = (char*)mem_a(len+3);
3293     memcpy(component, name, len);
3294     len += 2;
3295     component[len-0] = 0;
3296     component[len-2] = '_';
3297
3298     component[len-1] = 'x';
3299
3300     for (i = 0; i < 3; ++i) {
3301         def.name = code_genstring(code, component);
3302         code->defs.push_back(def);
3303         def.offset++;
3304         component[len-1]++;
3305     }
3306
3307     mem_d(component);
3308 }
3309
3310 static void gen_vector_fields(code_t *code, prog_section_field_t fld, const char *name)
3311 {
3312     char  *component;
3313     size_t len, i;
3314
3315     if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3316         return;
3317
3318     fld.type = TYPE_FLOAT;
3319
3320     len = strlen(name);
3321
3322     component = (char*)mem_a(len+3);
3323     memcpy(component, name, len);
3324     len += 2;
3325     component[len-0] = 0;
3326     component[len-2] = '_';
3327
3328     component[len-1] = 'x';
3329
3330     for (i = 0; i < 3; ++i) {
3331         fld.name = code_genstring(code, component);
3332         code->fields.push_back(fld);
3333         fld.offset++;
3334         component[len-1]++;
3335     }
3336
3337     mem_d(component);
3338 }
3339
3340 bool ir_builder::generateGlobal(ir_value *global, bool islocal)
3341 {
3342     size_t             i;
3343     int32_t           *iptr;
3344     prog_section_def_t def;
3345     bool               pushdef = opts.optimizeoff;
3346
3347     /* we don't generate split-vectors */
3348     if (global->m_vtype == TYPE_VECTOR && (global->m_flags & IR_FLAG_SPLIT_VECTOR))
3349         return true;
3350
3351     def.type = global->m_vtype;
3352     def.offset = m_code->globals.size();
3353     def.name = 0;
3354     if (OPTS_OPTION_BOOL(OPTION_G) || !islocal)
3355     {
3356         pushdef = true;
3357
3358         /*
3359          * if we're eraseable and the function isn't referenced ignore outputting
3360          * the function.
3361          */
3362         if (global->m_flags & IR_FLAG_ERASABLE && global->m_reads.empty()) {
3363             return true;
3364         }
3365
3366         if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) &&
3367             !(global->m_flags & IR_FLAG_INCLUDE_DEF) &&
3368             (global->m_name[0] == '#' || global->m_cvq == CV_CONST))
3369         {
3370             pushdef = false;
3371         }
3372
3373         if (pushdef) {
3374             if (global->m_name[0] == '#') {
3375                 if (!m_str_immediate)
3376                     m_str_immediate = code_genstring(m_code.get(), "IMMEDIATE");
3377                 def.name = global->m_code.name = m_str_immediate;
3378             }
3379             else
3380                 def.name = global->m_code.name = code_genstring(m_code.get(), global->m_name.c_str());
3381         }
3382         else
3383             def.name   = 0;
3384         if (islocal) {
3385             def.offset = global->codeAddress();
3386             m_code->defs.push_back(def);
3387             if (global->m_vtype == TYPE_VECTOR)
3388                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3389             else if (global->m_vtype == TYPE_FIELD && global->m_fieldtype == TYPE_VECTOR)
3390                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3391             return true;
3392         }
3393     }
3394     if (islocal)
3395         return true;
3396
3397     switch (global->m_vtype)
3398     {
3399     case TYPE_VOID:
3400         if (0 == global->m_name.compare("end_sys_globals")) {
3401             // TODO: remember this point... all the defs before this one
3402             // should be checksummed and added to progdefs.h when we generate it.
3403         }
3404         else if (0 == global->m_name.compare("end_sys_fields")) {
3405             // TODO: same as above but for entity-fields rather than globsl
3406         }
3407         else if(irwarning(global->m_context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`",
3408                           global->m_name.c_str()))
3409         {
3410             /* Not bailing out */
3411             /* return false; */
3412         }
3413         /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far
3414          * the system fields actually go? Though the engine knows this anyway...
3415          * Maybe this could be an -foption
3416          * fteqcc creates data for end_sys_* - of size 1, so let's do the same
3417          */
3418         global->setCodeAddress(m_code->globals.size());
3419         m_code->globals.push_back(0);
3420         /* Add the def */
3421         if (pushdef)
3422             m_code->defs.push_back(def);
3423         return true;
3424     case TYPE_POINTER:
3425         if (pushdef)
3426             m_code->defs.push_back(def);
3427         return gen_global_pointer(m_code.get(), global);
3428     case TYPE_FIELD:
3429         if (pushdef) {
3430             m_code->defs.push_back(def);
3431             if (global->m_fieldtype == TYPE_VECTOR)
3432                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3433         }
3434         return gen_global_field(m_code.get(), global);
3435     case TYPE_ENTITY:
3436         /* fall through */
3437     case TYPE_FLOAT:
3438     {
3439         global->setCodeAddress(m_code->globals.size());
3440         if (global->m_hasvalue) {
3441             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3442                 return true;
3443             iptr = (int32_t*)&global->m_constval.ivec[0];
3444             m_code->globals.push_back(*iptr);
3445         } else {
3446             m_code->globals.push_back(0);
3447         }
3448         if (!islocal && global->m_cvq != CV_CONST)
3449             def.type |= DEF_SAVEGLOBAL;
3450         if (pushdef)
3451             m_code->defs.push_back(def);
3452
3453         return global->m_code.globaladdr >= 0;
3454     }
3455     case TYPE_STRING:
3456     {
3457         global->setCodeAddress(m_code->globals.size());
3458         if (global->m_hasvalue) {
3459             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3460                 return true;
3461             uint32_t load = code_genstring(m_code.get(), global->m_constval.vstring);
3462             m_code->globals.push_back(load);
3463         } else {
3464             m_code->globals.push_back(0);
3465         }
3466         if (!islocal && global->m_cvq != CV_CONST)
3467             def.type |= DEF_SAVEGLOBAL;
3468         if (pushdef)
3469             m_code->defs.push_back(def);
3470         return global->m_code.globaladdr >= 0;
3471     }
3472     case TYPE_VECTOR:
3473     {
3474         size_t d;
3475         global->setCodeAddress(m_code->globals.size());
3476         if (global->m_hasvalue) {
3477             iptr = (int32_t*)&global->m_constval.ivec[0];
3478             m_code->globals.push_back(iptr[0]);
3479             if (global->m_code.globaladdr < 0)
3480                 return false;
3481             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3482                 m_code->globals.push_back(iptr[d]);
3483             }
3484         } else {
3485             m_code->globals.push_back(0);
3486             if (global->m_code.globaladdr < 0)
3487                 return false;
3488             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3489                 m_code->globals.push_back(0);
3490             }
3491         }
3492         if (!islocal && global->m_cvq != CV_CONST)
3493             def.type |= DEF_SAVEGLOBAL;
3494
3495         if (pushdef) {
3496             m_code->defs.push_back(def);
3497             def.type &= ~DEF_SAVEGLOBAL;
3498             gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3499         }
3500         return global->m_code.globaladdr >= 0;
3501     }
3502     case TYPE_FUNCTION:
3503         global->setCodeAddress(m_code->globals.size());
3504         if (!global->m_hasvalue) {
3505             m_code->globals.push_back(0);
3506             if (global->m_code.globaladdr < 0)
3507                 return false;
3508         } else {
3509             m_code->globals.push_back(m_code->functions.size());
3510             if (!generateGlobalFunction(global))
3511                 return false;
3512         }
3513         if (!islocal && global->m_cvq != CV_CONST)
3514             def.type |= DEF_SAVEGLOBAL;
3515         if (pushdef)
3516             m_code->defs.push_back(def);
3517         return true;
3518     case TYPE_VARIANT:
3519         /* assume biggest type */
3520             global->setCodeAddress(m_code->globals.size());
3521             m_code->globals.push_back(0);
3522             for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i)
3523                 m_code->globals.push_back(0);
3524             return true;
3525     default:
3526         /* refuse to create 'void' type or any other fancy business. */
3527         irerror(global->m_context, "Invalid type for global variable `%s`: %s",
3528                 global->m_name.c_str(), type_name[global->m_vtype]);
3529         return false;
3530     }
3531 }
3532
3533 static GMQCC_INLINE void ir_builder_prepare_field(code_t *code, ir_value *field)
3534 {
3535     field->m_code.fieldaddr = code_alloc_field(code, type_sizeof_[field->m_fieldtype]);
3536 }
3537
3538 static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
3539 {
3540     prog_section_def_t def;
3541     prog_section_field_t fld;
3542
3543     (void)self;
3544
3545     def.type   = (uint16_t)field->m_vtype;
3546     def.offset = (uint16_t)self->m_code->globals.size();
3547
3548     /* create a global named the same as the field */
3549     if (OPTS_OPTION_U32(OPTION_STANDARD) == COMPILER_GMQCC) {
3550         /* in our standard, the global gets a dot prefix */
3551         size_t len = field->m_name.length();
3552         char name[1024];
3553
3554         /* we really don't want to have to allocate this, and 1024
3555          * bytes is more than enough for a variable/field name
3556          */
3557         if (len+2 >= sizeof(name)) {
3558             irerror(field->m_context, "invalid field name size: %u", (unsigned int)len);
3559             return false;
3560         }
3561
3562         name[0] = '.';
3563         memcpy(name+1, field->m_name.c_str(), len); // no strncpy - we used strlen above
3564         name[len+1] = 0;
3565
3566         def.name = code_genstring(self->m_code.get(), name);
3567         fld.name = def.name + 1; /* we reuse that string table entry */
3568     } else {
3569         /* in plain QC, there cannot be a global with the same name,
3570          * and so we also name the global the same.
3571          * FIXME: fteqcc should create a global as well
3572          * check if it actually uses the same name. Probably does
3573          */
3574         def.name = code_genstring(self->m_code.get(), field->m_name.c_str());
3575         fld.name = def.name;
3576     }
3577
3578     field->m_code.name = def.name;
3579
3580     self->m_code->defs.push_back(def);
3581
3582     fld.type = field->m_fieldtype;
3583
3584     if (fld.type == TYPE_VOID) {
3585         irerror(field->m_context, "field is missing a type: %s - don't know its size", field->m_name.c_str());
3586         return false;
3587     }
3588
3589     fld.offset = field->m_code.fieldaddr;
3590
3591     self->m_code->fields.push_back(fld);
3592
3593     field->setCodeAddress(self->m_code->globals.size());
3594     self->m_code->globals.push_back(fld.offset);
3595     if (fld.type == TYPE_VECTOR) {
3596         self->m_code->globals.push_back(fld.offset+1);
3597         self->m_code->globals.push_back(fld.offset+2);
3598     }
3599
3600     if (field->m_fieldtype == TYPE_VECTOR) {
3601         gen_vector_defs  (self->m_code.get(), def, field->m_name.c_str());
3602         gen_vector_fields(self->m_code.get(), fld, field->m_name.c_str());
3603     }
3604
3605     return field->m_code.globaladdr >= 0;
3606 }
3607
3608 static void ir_builder_collect_reusables(ir_builder *builder) {
3609     std::vector<ir_value*> reusables;
3610
3611     for (auto& gp : builder->m_globals) {
3612         ir_value *value = gp.get();
3613         if (value->m_vtype != TYPE_FLOAT || !value->m_hasvalue)
3614             continue;
3615         if (value->m_cvq == CV_CONST || (value->m_name.length() >= 1 && value->m_name[0] == '#'))
3616             reusables.emplace_back(value);
3617     }
3618     builder->m_const_floats = move(reusables);
3619 }
3620
3621 static void ir_builder_split_vector(ir_builder *self, ir_value *vec) {
3622     ir_value* found[3] = { nullptr, nullptr, nullptr };
3623
3624     // must not be written to
3625     if (vec->m_writes.size())
3626         return;
3627     // must not be trying to access individual members
3628     if (vec->m_members[0] || vec->m_members[1] || vec->m_members[2])
3629         return;
3630     // should be actually used otherwise it won't be generated anyway
3631     if (vec->m_reads.empty())
3632         return;
3633     //size_t count = vec->m_reads.size();
3634     //if (!count)
3635     //    return;
3636
3637     // may only be used directly as function parameters, so if we find some other instruction cancel
3638     for (ir_instr *user : vec->m_reads) {
3639         // we only split vectors if they're used directly as parameter to a call only!
3640         if ((user->m_opcode < INSTR_CALL0 || user->m_opcode > INSTR_CALL8) && user->m_opcode != VINSTR_NRCALL)
3641             return;
3642     }
3643
3644     vec->m_flags |= IR_FLAG_SPLIT_VECTOR;
3645
3646     // find existing floats making up the split
3647     for (ir_value *c : self->m_const_floats) {
3648         if (!found[0] && c->m_constval.vfloat == vec->m_constval.vvec.x)
3649             found[0] = c;
3650         if (!found[1] && c->m_constval.vfloat == vec->m_constval.vvec.y)
3651             found[1] = c;
3652         if (!found[2] && c->m_constval.vfloat == vec->m_constval.vvec.z)
3653             found[2] = c;
3654         if (found[0] && found[1] && found[2])
3655             break;
3656     }
3657
3658     // generate floats for not yet found components
3659     if (!found[0])
3660         found[0] = self->literalFloat(vec->m_constval.vvec.x, true);
3661     if (!found[1]) {
3662         if (vec->m_constval.vvec.y == vec->m_constval.vvec.x)
3663             found[1] = found[0];
3664         else
3665             found[1] = self->literalFloat(vec->m_constval.vvec.y, true);
3666     }
3667     if (!found[2]) {
3668         if (vec->m_constval.vvec.z == vec->m_constval.vvec.x)
3669             found[2] = found[0];
3670         else if (vec->m_constval.vvec.z == vec->m_constval.vvec.y)
3671             found[2] = found[1];
3672         else
3673             found[2] = self->literalFloat(vec->m_constval.vvec.z, true);
3674     }
3675
3676     // the .members array should be safe to use here
3677     vec->m_members[0] = found[0];
3678     vec->m_members[1] = found[1];
3679     vec->m_members[2] = found[2];
3680
3681     // register the readers for these floats
3682     found[0]->m_reads.insert(found[0]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3683     found[1]->m_reads.insert(found[1]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3684     found[2]->m_reads.insert(found[2]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3685 }
3686
3687 static void ir_builder_split_vectors(ir_builder *self) {
3688     // member values may be added to self->m_globals during this operation, but
3689     // no new vectors will be added, we need to iterate via an index as
3690     // c++ iterators would be invalidated
3691     const size_t count = self->m_globals.size();
3692     for (size_t i = 0; i != count; ++i) {
3693         ir_value *v = self->m_globals[i].get();
3694         if (v->m_vtype != TYPE_VECTOR || !v->m_name.length() || v->m_name[0] != '#')
3695             continue;
3696         ir_builder_split_vector(self, v);
3697     }
3698 }
3699
3700 bool ir_builder::generate(const char *filename)
3701 {
3702     prog_section_statement_t stmt;
3703     char  *lnofile = nullptr;
3704
3705     if (OPTS_FLAG(SPLIT_VECTOR_PARAMETERS)) {
3706         ir_builder_collect_reusables(this);
3707         if (!m_const_floats.empty())
3708             ir_builder_split_vectors(this);
3709     }
3710
3711     for (auto& fp : m_fields)
3712         ir_builder_prepare_field(m_code.get(), fp.get());
3713
3714     for (auto& gp : m_globals) {
3715         ir_value *global = gp.get();
3716         if (!generateGlobal(global, false)) {
3717             return false;
3718         }
3719         if (global->m_vtype == TYPE_FUNCTION) {
3720             ir_function *func = global->m_constval.vfunc;
3721             if (func && m_max_locals < func->m_allocated_locals &&
3722                 !(func->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3723             {
3724                 m_max_locals = func->m_allocated_locals;
3725             }
3726             if (func && m_max_globaltemps < func->m_globaltemps)
3727                 m_max_globaltemps = func->m_globaltemps;
3728         }
3729     }
3730
3731     for (auto& fp : m_fields) {
3732         if (!ir_builder_gen_field(this, fp.get()))
3733             return false;
3734     }
3735
3736     // generate nil
3737     m_nil->setCodeAddress(m_code->globals.size());
3738     m_code->globals.push_back(0);
3739     m_code->globals.push_back(0);
3740     m_code->globals.push_back(0);
3741
3742     // generate virtual-instruction temps
3743     for (size_t i = 0; i < IR_MAX_VINSTR_TEMPS; ++i) {
3744         m_vinstr_temp[i]->setCodeAddress(m_code->globals.size());
3745         m_code->globals.push_back(0);
3746         m_code->globals.push_back(0);
3747         m_code->globals.push_back(0);
3748     }
3749
3750     // generate global temps
3751     m_first_common_globaltemp = m_code->globals.size();
3752     m_code->globals.insert(m_code->globals.end(), m_max_globaltemps, 0);
3753     // FIXME:DELME:
3754     //for (size_t i = 0; i < m_max_globaltemps; ++i) {
3755     //    m_code->globals.push_back(0);
3756     //}
3757     // generate common locals
3758     m_first_common_local = m_code->globals.size();
3759     m_code->globals.insert(m_code->globals.end(), m_max_locals, 0);
3760     // FIXME:DELME:
3761     //for (i = 0; i < m_max_locals; ++i) {
3762     //    m_code->globals.push_back(0);
3763     //}
3764
3765     // generate function code
3766
3767     for (auto& gp : m_globals) {
3768         ir_value *global = gp.get();
3769         if (global->m_vtype == TYPE_FUNCTION) {
3770             if (!this->generateGlobalFunctionCode(global))
3771                 return false;
3772         }
3773     }
3774
3775     if (m_code->globals.size() >= 65536) {
3776         irerror(m_globals.back()->m_context,
3777             "This progs file would require more globals than the metadata can handle (%zu). Bailing out.",
3778             m_code->globals.size());
3779         return false;
3780     }
3781
3782     /* DP errors if the last instruction is not an INSTR_DONE. */
3783     if (m_code->statements.back().opcode != INSTR_DONE)
3784     {
3785         lex_ctx_t last;
3786
3787         stmt.opcode = INSTR_DONE;
3788         stmt.o1.u1  = 0;
3789         stmt.o2.u1  = 0;
3790         stmt.o3.u1  = 0;
3791         last.line   = m_code->linenums.back();
3792         last.column = m_code->columnnums.back();
3793
3794         code_push_statement(m_code.get(), &stmt, last);
3795     }
3796
3797     if (OPTS_OPTION_BOOL(OPTION_PP_ONLY))
3798         return true;
3799
3800     if (m_code->statements.size() != m_code->linenums.size()) {
3801         con_err("Linecounter wrong: %lu != %lu\n",
3802                 m_code->statements.size(),
3803                 m_code->linenums.size());
3804     } else if (OPTS_FLAG(LNO)) {
3805         char  *dot;
3806         size_t filelen = strlen(filename);
3807
3808         memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
3809         dot = strrchr(lnofile, '.');
3810         if (!dot) {
3811             vec_pop(lnofile);
3812         } else {
3813             vec_shrinkto(lnofile, dot - lnofile);
3814         }
3815         memcpy(vec_add(lnofile, 5), ".lno", 5);
3816     }
3817
3818     if (!code_write(m_code.get(), filename, lnofile)) {
3819         vec_free(lnofile);
3820         return false;
3821     }
3822
3823     vec_free(lnofile);
3824     return true;
3825 }
3826
3827 /***********************************************************************
3828  *IR DEBUG Dump functions...
3829  */
3830
3831 #define IND_BUFSZ 1024
3832
3833 static const char *qc_opname(int op)
3834 {
3835     if (op < 0) return "<INVALID>";
3836     if (op < VINSTR_END)
3837         return util_instr_str[op];
3838     switch (op) {
3839         case VINSTR_END:       return "END";
3840         case VINSTR_PHI:       return "PHI";
3841         case VINSTR_JUMP:      return "JUMP";
3842         case VINSTR_COND:      return "COND";
3843         case VINSTR_BITXOR:    return "BITXOR";
3844         case VINSTR_BITAND_V:  return "BITAND_V";
3845         case VINSTR_BITOR_V:   return "BITOR_V";
3846         case VINSTR_BITXOR_V:  return "BITXOR_V";
3847         case VINSTR_BITAND_VF: return "BITAND_VF";
3848         case VINSTR_BITOR_VF:  return "BITOR_VF";
3849         case VINSTR_BITXOR_VF: return "BITXOR_VF";
3850         case VINSTR_CROSS:     return "CROSS";
3851         case VINSTR_NEG_F:     return "NEG_F";
3852         case VINSTR_NEG_V:     return "NEG_V";
3853         default:               return "<UNK>";
3854     }
3855 }
3856
3857 void ir_builder::dump(int (*oprintf)(const char*, ...)) const
3858 {
3859     size_t i;
3860     char indent[IND_BUFSZ];
3861     indent[0] = '\t';
3862     indent[1] = 0;
3863
3864     oprintf("module %s\n", m_name.c_str());
3865     for (i = 0; i < m_globals.size(); ++i)
3866     {
3867         oprintf("global ");
3868         if (m_globals[i]->m_hasvalue)
3869             oprintf("%s = ", m_globals[i]->m_name.c_str());
3870         m_globals[i].get()->dump(oprintf);
3871         oprintf("\n");
3872     }
3873     for (i = 0; i < m_functions.size(); ++i)
3874         ir_function_dump(m_functions[i].get(), indent, oprintf);
3875     oprintf("endmodule %s\n", m_name.c_str());
3876 }
3877
3878 static const char *storenames[] = {
3879     "[global]", "[local]", "[param]", "[value]", "[return]"
3880 };
3881
3882 void ir_function_dump(ir_function *f, char *ind,
3883                       int (*oprintf)(const char*, ...))
3884 {
3885     size_t i;
3886     if (f->m_builtin != 0) {
3887         oprintf("%sfunction %s = builtin %i\n", ind, f->m_name.c_str(), -f->m_builtin);
3888         return;
3889     }
3890     oprintf("%sfunction %s\n", ind, f->m_name.c_str());
3891     util_strncat(ind, "\t", IND_BUFSZ-1);
3892     if (f->m_locals.size())
3893     {
3894         oprintf("%s%i locals:\n", ind, (int)f->m_locals.size());
3895         for (i = 0; i < f->m_locals.size(); ++i) {
3896             oprintf("%s\t", ind);
3897             f->m_locals[i].get()->dump(oprintf);
3898             oprintf("\n");
3899         }
3900     }
3901     oprintf("%sliferanges:\n", ind);
3902     for (i = 0; i < f->m_locals.size(); ++i) {
3903         const char *attr = "";
3904         size_t l, m;
3905         ir_value *v = f->m_locals[i].get();
3906         if (v->m_unique_life && v->m_locked)
3907             attr = "unique,locked ";
3908         else if (v->m_unique_life)
3909             attr = "unique ";
3910         else if (v->m_locked)
3911             attr = "locked ";
3912         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3913                 storenames[v->m_store],
3914                 attr, (v->m_callparam ? "callparam " : ""),
3915                 (int)v->m_code.local);
3916         if (v->m_life.empty())
3917             oprintf("[null]");
3918         for (l = 0; l < v->m_life.size(); ++l) {
3919             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3920         }
3921         oprintf("\n");
3922         for (m = 0; m < 3; ++m) {
3923             ir_value *vm = v->m_members[m];
3924             if (!vm)
3925                 continue;
3926             oprintf("%s\t%s: @%i ", ind, vm->m_name.c_str(), (int)vm->m_code.local);
3927             for (l = 0; l < vm->m_life.size(); ++l) {
3928                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3929             }
3930             oprintf("\n");
3931         }
3932     }
3933     for (i = 0; i < f->m_values.size(); ++i) {
3934         const char *attr = "";
3935         size_t l, m;
3936         ir_value *v = f->m_values[i].get();
3937         if (v->m_unique_life && v->m_locked)
3938             attr = "unique,locked ";
3939         else if (v->m_unique_life)
3940             attr = "unique ";
3941         else if (v->m_locked)
3942             attr = "locked ";
3943         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3944                 storenames[v->m_store],
3945                 attr, (v->m_callparam ? "callparam " : ""),
3946                 (int)v->m_code.local);
3947         if (v->m_life.empty())
3948             oprintf("[null]");
3949         for (l = 0; l < v->m_life.size(); ++l) {
3950             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3951         }
3952         oprintf("\n");
3953         for (m = 0; m < 3; ++m) {
3954             ir_value *vm = v->m_members[m];
3955             if (!vm)
3956                 continue;
3957             if (vm->m_unique_life && vm->m_locked)
3958                 attr = "unique,locked ";
3959             else if (vm->m_unique_life)
3960                 attr = "unique ";
3961             else if (vm->m_locked)
3962                 attr = "locked ";
3963             oprintf("%s\t%s: %s@%i ", ind, vm->m_name.c_str(), attr, (int)vm->m_code.local);
3964             for (l = 0; l < vm->m_life.size(); ++l) {
3965                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3966             }
3967             oprintf("\n");
3968         }
3969     }
3970     if (f->m_blocks.size())
3971     {
3972         oprintf("%slife passes: %i\n", ind, (int)f->m_run_id);
3973         for (i = 0; i < f->m_blocks.size(); ++i) {
3974             ir_block_dump(f->m_blocks[i].get(), ind, oprintf);
3975         }
3976
3977     }
3978     ind[strlen(ind)-1] = 0;
3979     oprintf("%sendfunction %s\n", ind, f->m_name.c_str());
3980 }
3981
3982 void ir_block_dump(ir_block* b, char *ind,
3983                    int (*oprintf)(const char*, ...))
3984 {
3985     size_t i;
3986     oprintf("%s:%s\n", ind, b->m_label.c_str());
3987     util_strncat(ind, "\t", IND_BUFSZ-1);
3988
3989     if (b->m_instr && b->m_instr[0])
3990         oprintf("%s (%i) [entry]\n", ind, (int)(b->m_instr[0]->m_eid-1));
3991     for (i = 0; i < vec_size(b->m_instr); ++i)
3992         ir_instr_dump(b->m_instr[i], ind, oprintf);
3993     ind[strlen(ind)-1] = 0;
3994 }
3995
3996 static void dump_phi(ir_instr *in, int (*oprintf)(const char*, ...))
3997 {
3998     oprintf("%s <- phi ", in->_m_ops[0]->m_name.c_str());
3999     for (auto &it : in->m_phi) {
4000         oprintf("([%s] : %s) ", it.from->m_label.c_str(),
4001                                 it.value->m_name.c_str());
4002     }
4003     oprintf("\n");
4004 }
4005
4006 void ir_instr_dump(ir_instr *in, char *ind,
4007                        int (*oprintf)(const char*, ...))
4008 {
4009     size_t i;
4010     const char *comma = nullptr;
4011
4012     oprintf("%s (%i) ", ind, (int)in->m_eid);
4013
4014     if (in->m_opcode == VINSTR_PHI) {
4015         dump_phi(in, oprintf);
4016         return;
4017     }
4018
4019     util_strncat(ind, "\t", IND_BUFSZ-1);
4020
4021     if (in->_m_ops[0] && (in->_m_ops[1] || in->_m_ops[2])) {
4022         in->_m_ops[0]->dump(oprintf);
4023         if (in->_m_ops[1] || in->_m_ops[2])
4024             oprintf(" <- ");
4025     }
4026     if (in->m_opcode == INSTR_CALL0 || in->m_opcode == VINSTR_NRCALL) {
4027         oprintf("CALL%i\t", in->m_params.size());
4028     } else
4029         oprintf("%s\t", qc_opname(in->m_opcode));
4030
4031     if (in->_m_ops[0] && !(in->_m_ops[1] || in->_m_ops[2])) {
4032         in->_m_ops[0]->dump(oprintf);
4033         comma = ",\t";
4034     }
4035     else
4036     {
4037         for (i = 1; i != 3; ++i) {
4038             if (in->_m_ops[i]) {
4039                 if (comma)
4040                     oprintf(comma);
4041                 in->_m_ops[i]->dump(oprintf);
4042                 comma = ",\t";
4043             }
4044         }
4045     }
4046     if (in->m_bops[0]) {
4047         if (comma)
4048             oprintf(comma);
4049         oprintf("[%s]", in->m_bops[0]->m_label.c_str());
4050         comma = ",\t";
4051     }
4052     if (in->m_bops[1])
4053         oprintf("%s[%s]", comma, in->m_bops[1]->m_label.c_str());
4054     if (in->m_params.size()) {
4055         oprintf("\tparams: ");
4056         for (auto &it : in->m_params)
4057             oprintf("%s, ", it->m_name.c_str());
4058     }
4059     oprintf("\n");
4060     ind[strlen(ind)-1] = 0;
4061 }
4062
4063 static void ir_value_dump_string(const char *str, int (*oprintf)(const char*, ...))
4064 {
4065     oprintf("\"");
4066     for (; *str; ++str) {
4067         switch (*str) {
4068             case '\n': oprintf("\\n"); break;
4069             case '\r': oprintf("\\r"); break;
4070             case '\t': oprintf("\\t"); break;
4071             case '\v': oprintf("\\v"); break;
4072             case '\f': oprintf("\\f"); break;
4073             case '\b': oprintf("\\b"); break;
4074             case '\a': oprintf("\\a"); break;
4075             case '\\': oprintf("\\\\"); break;
4076             case '"': oprintf("\\\""); break;
4077             default: oprintf("%c", *str); break;
4078         }
4079     }
4080     oprintf("\"");
4081 }
4082
4083 void ir_value::dump(int (*oprintf)(const char*, ...)) const
4084 {
4085     if (m_hasvalue) {
4086         switch (m_vtype) {
4087             default:
4088             case TYPE_VOID:
4089                 oprintf("(void)");
4090                 break;
4091             case TYPE_FUNCTION:
4092                 oprintf("fn:%s", m_name.c_str());
4093                 break;
4094             case TYPE_FLOAT:
4095                 oprintf("%g", m_constval.vfloat);
4096                 break;
4097             case TYPE_VECTOR:
4098                 oprintf("'%g %g %g'",
4099                         m_constval.vvec.x,
4100                         m_constval.vvec.y,
4101                         m_constval.vvec.z);
4102                 break;
4103             case TYPE_ENTITY:
4104                 oprintf("(entity)");
4105                 break;
4106             case TYPE_STRING:
4107                 ir_value_dump_string(m_constval.vstring, oprintf);
4108                 break;
4109 #if 0
4110             case TYPE_INTEGER:
4111                 oprintf("%i", m_constval.vint);
4112                 break;
4113 #endif
4114             case TYPE_POINTER:
4115                 oprintf("&%s",
4116                     m_constval.vpointer->m_name.c_str());
4117                 break;
4118         }
4119     } else {
4120         oprintf("%s", m_name.c_str());
4121     }
4122 }
4123
4124 void ir_value::dumpLife(int (*oprintf)(const char*,...)) const
4125 {
4126     oprintf("Life of %12s:", m_name.c_str());
4127     for (size_t i = 0; i < m_life.size(); ++i)
4128     {
4129         oprintf(" + [%i, %i]\n", m_life[i].start, m_life[i].end);
4130     }
4131 }