]> git.xonotic.org Git - xonotic/gmqcc.git/blob - ir.cpp
3b78c35a7096a81fd7daa240df505f4890dc0ee1
[xonotic/gmqcc.git] / ir.cpp
1 #include <stdlib.h>
2 #include <string.h>
3
4 #include "gmqcc.h"
5 #include "ir.h"
6
7 /***********************************************************************
8  * Type sizes used at multiple points in the IR codegen
9  */
10
11 const char *type_name[TYPE_COUNT] = {
12     "void",
13     "string",
14     "float",
15     "vector",
16     "entity",
17     "field",
18     "function",
19     "pointer",
20     "integer",
21     "variant",
22     "struct",
23     "union",
24     "array",
25
26     "nil",
27     "<no-expression>"
28 };
29
30 static size_t type_sizeof_[TYPE_COUNT] = {
31     1, /* TYPE_VOID     */
32     1, /* TYPE_STRING   */
33     1, /* TYPE_FLOAT    */
34     3, /* TYPE_VECTOR   */
35     1, /* TYPE_ENTITY   */
36     1, /* TYPE_FIELD    */
37     1, /* TYPE_FUNCTION */
38     1, /* TYPE_POINTER  */
39     1, /* TYPE_INTEGER  */
40     3, /* TYPE_VARIANT  */
41     0, /* TYPE_STRUCT   */
42     0, /* TYPE_UNION    */
43     0, /* TYPE_ARRAY    */
44     0, /* TYPE_NIL      */
45     0, /* TYPE_NOESPR   */
46 };
47
48 const uint16_t type_store_instr[TYPE_COUNT] = {
49     INSTR_STORE_F, /* should use I when having integer support */
50     INSTR_STORE_S,
51     INSTR_STORE_F,
52     INSTR_STORE_V,
53     INSTR_STORE_ENT,
54     INSTR_STORE_FLD,
55     INSTR_STORE_FNC,
56     INSTR_STORE_ENT, /* should use I */
57 #if 0
58     INSTR_STORE_I, /* integer type */
59 #else
60     INSTR_STORE_F,
61 #endif
62
63     INSTR_STORE_V, /* variant, should never be accessed */
64
65     VINSTR_END, /* struct */
66     VINSTR_END, /* union  */
67     VINSTR_END, /* array  */
68     VINSTR_END, /* nil    */
69     VINSTR_END, /* noexpr */
70 };
71
72 const uint16_t field_store_instr[TYPE_COUNT] = {
73     INSTR_STORE_FLD,
74     INSTR_STORE_FLD,
75     INSTR_STORE_FLD,
76     INSTR_STORE_V,
77     INSTR_STORE_FLD,
78     INSTR_STORE_FLD,
79     INSTR_STORE_FLD,
80     INSTR_STORE_FLD,
81 #if 0
82     INSTR_STORE_FLD, /* integer type */
83 #else
84     INSTR_STORE_FLD,
85 #endif
86
87     INSTR_STORE_V, /* variant, should never be accessed */
88
89     VINSTR_END, /* struct */
90     VINSTR_END, /* union  */
91     VINSTR_END, /* array  */
92     VINSTR_END, /* nil    */
93     VINSTR_END, /* noexpr */
94 };
95
96 const uint16_t type_storep_instr[TYPE_COUNT] = {
97     INSTR_STOREP_F, /* should use I when having integer support */
98     INSTR_STOREP_S,
99     INSTR_STOREP_F,
100     INSTR_STOREP_V,
101     INSTR_STOREP_ENT,
102     INSTR_STOREP_FLD,
103     INSTR_STOREP_FNC,
104     INSTR_STOREP_ENT, /* should use I */
105 #if 0
106     INSTR_STOREP_ENT, /* integer type */
107 #else
108     INSTR_STOREP_F,
109 #endif
110
111     INSTR_STOREP_V, /* variant, should never be accessed */
112
113     VINSTR_END, /* struct */
114     VINSTR_END, /* union  */
115     VINSTR_END, /* array  */
116     VINSTR_END, /* nil    */
117     VINSTR_END, /* noexpr */
118 };
119
120 const uint16_t type_eq_instr[TYPE_COUNT] = {
121     INSTR_EQ_F, /* should use I when having integer support */
122     INSTR_EQ_S,
123     INSTR_EQ_F,
124     INSTR_EQ_V,
125     INSTR_EQ_E,
126     INSTR_EQ_E, /* FLD has no comparison */
127     INSTR_EQ_FNC,
128     INSTR_EQ_E, /* should use I */
129 #if 0
130     INSTR_EQ_I,
131 #else
132     INSTR_EQ_F,
133 #endif
134
135     INSTR_EQ_V, /* variant, should never be accessed */
136
137     VINSTR_END, /* struct */
138     VINSTR_END, /* union  */
139     VINSTR_END, /* array  */
140     VINSTR_END, /* nil    */
141     VINSTR_END, /* noexpr */
142 };
143
144 const uint16_t type_ne_instr[TYPE_COUNT] = {
145     INSTR_NE_F, /* should use I when having integer support */
146     INSTR_NE_S,
147     INSTR_NE_F,
148     INSTR_NE_V,
149     INSTR_NE_E,
150     INSTR_NE_E, /* FLD has no comparison */
151     INSTR_NE_FNC,
152     INSTR_NE_E, /* should use I */
153 #if 0
154     INSTR_NE_I,
155 #else
156     INSTR_NE_F,
157 #endif
158
159     INSTR_NE_V, /* variant, should never be accessed */
160
161     VINSTR_END, /* struct */
162     VINSTR_END, /* union  */
163     VINSTR_END, /* array  */
164     VINSTR_END, /* nil    */
165     VINSTR_END, /* noexpr */
166 };
167
168 const uint16_t type_not_instr[TYPE_COUNT] = {
169     INSTR_NOT_F, /* should use I when having integer support */
170     VINSTR_END,  /* not to be used, depends on string related -f flags */
171     INSTR_NOT_F,
172     INSTR_NOT_V,
173     INSTR_NOT_ENT,
174     INSTR_NOT_ENT,
175     INSTR_NOT_FNC,
176     INSTR_NOT_ENT, /* should use I */
177 #if 0
178     INSTR_NOT_I, /* integer type */
179 #else
180     INSTR_NOT_F,
181 #endif
182
183     INSTR_NOT_V, /* variant, should never be accessed */
184
185     VINSTR_END, /* struct */
186     VINSTR_END, /* union  */
187     VINSTR_END, /* array  */
188     VINSTR_END, /* nil    */
189     VINSTR_END, /* noexpr */
190 };
191
192 /* protos */
193 static void            ir_function_dump(ir_function*, char *ind, int (*oprintf)(const char*,...));
194
195 static ir_value*       ir_block_create_general_instr(ir_block *self, lex_ctx_t, const char *label,
196                                                      int op, ir_value *a, ir_value *b, qc_type outype);
197 static bool GMQCC_WARN ir_block_create_store(ir_block*, lex_ctx_t, ir_value *target, ir_value *what);
198 static void            ir_block_dump(ir_block*, char *ind, int (*oprintf)(const char*,...));
199
200 static bool            ir_instr_op(ir_instr*, int op, ir_value *value, bool writing);
201 static void            ir_instr_dump(ir_instr* in, char *ind, int (*oprintf)(const char*,...));
202 /* error functions */
203
204 static void irerror(lex_ctx_t ctx, const char *msg, ...)
205 {
206     va_list ap;
207     va_start(ap, msg);
208     con_cvprintmsg(ctx, LVL_ERROR, "internal error", msg, ap);
209     va_end(ap);
210 }
211
212 static bool GMQCC_WARN irwarning(lex_ctx_t ctx, int warntype, const char *fmt, ...)
213 {
214     bool    r;
215     va_list ap;
216     va_start(ap, fmt);
217     r = vcompile_warning(ctx, warntype, fmt, ap);
218     va_end(ap);
219     return r;
220 }
221
222 /***********************************************************************
223  * Vector utility functions
224  */
225
226 static bool GMQCC_WARN vec_ir_value_find(std::vector<ir_value *> &vec, const ir_value *what, size_t *idx)
227 {
228     for (auto &it : vec) {
229         if (it != what)
230             continue;
231         if (idx)
232             *idx = &it - &vec[0];
233         return true;
234     }
235     return false;
236 }
237
238 static bool GMQCC_WARN vec_ir_block_find(ir_block **vec, ir_block *what, size_t *idx)
239 {
240     size_t i;
241     size_t len = vec_size(vec);
242     for (i = 0; i < len; ++i) {
243         if (vec[i] == what) {
244             if (idx) *idx = i;
245             return true;
246         }
247     }
248     return false;
249 }
250
251 static bool GMQCC_WARN vec_ir_instr_find(std::vector<ir_instr *> &vec, ir_instr *what, size_t *idx)
252 {
253     for (auto &it : vec) {
254         if (it != what)
255             continue;
256         if (idx)
257             *idx = &it - &vec[0];
258         return true;
259     }
260     return false;
261 }
262
263 /***********************************************************************
264  * IR Builder
265  */
266
267 static void ir_block_delete_quick(ir_block* self);
268 static void ir_instr_delete_quick(ir_instr *self);
269 static void ir_function_delete_quick(ir_function *self);
270
271 ir_builder::ir_builder(const std::string& modulename)
272 : m_name(modulename),
273   m_code(new code_t)
274 {
275     m_htglobals   = util_htnew(IR_HT_SIZE);
276     m_htfields    = util_htnew(IR_HT_SIZE);
277     m_htfunctions = util_htnew(IR_HT_SIZE);
278
279     m_nil = new ir_value("nil", store_value, TYPE_NIL);
280     m_nil->m_cvq = CV_CONST;
281
282     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
283         /* we write to them, but they're not supposed to be used outside the IR, so
284          * let's not allow the generation of ir_instrs which use these.
285          * So it's a constant noexpr.
286          */
287         m_vinstr_temp[i] = new ir_value("vinstr_temp", store_value, TYPE_NOEXPR);
288         m_vinstr_temp[i]->m_cvq = CV_CONST;
289     }
290 }
291
292 ir_builder::~ir_builder()
293 {
294     util_htdel(m_htglobals);
295     util_htdel(m_htfields);
296     util_htdel(m_htfunctions);
297     for (auto& f : m_functions)
298         ir_function_delete_quick(f.release());
299     m_functions.clear(); // delete them now before deleting the rest:
300
301     delete m_nil;
302
303     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
304         delete m_vinstr_temp[i];
305     }
306
307     m_extparams.clear();
308     m_extparam_protos.clear();
309 }
310
311 ir_function* ir_builder::createFunction(const std::string& name, qc_type outtype)
312 {
313     ir_function *fn = (ir_function*)util_htget(m_htfunctions, name.c_str());
314     if (fn)
315         return nullptr;
316
317     fn = new ir_function(this, outtype);
318     fn->m_name = name;
319     m_functions.emplace_back(fn);
320     util_htset(m_htfunctions, name.c_str(), fn);
321
322     fn->m_value = createGlobal(fn->m_name, TYPE_FUNCTION);
323     if (!fn->m_value) {
324         delete fn;
325         return nullptr;
326     }
327
328     fn->m_value->m_hasvalue = true;
329     fn->m_value->m_outtype = outtype;
330     fn->m_value->m_constval.vfunc = fn;
331     fn->m_value->m_context = fn->m_context;
332
333     return fn;
334 }
335
336 ir_value* ir_builder::createGlobal(const std::string& name, qc_type vtype)
337 {
338     ir_value *ve;
339
340     if (name[0] != '#')
341     {
342         ve = (ir_value*)util_htget(m_htglobals, name.c_str());
343         if (ve) {
344             return nullptr;
345         }
346     }
347
348     ve = new ir_value(std::string(name), store_global, vtype);
349     m_globals.emplace_back(ve);
350     util_htset(m_htglobals, name.c_str(), ve);
351     return ve;
352 }
353
354 ir_value* ir_builder::get_va_count()
355 {
356     if (m_reserved_va_count)
357         return m_reserved_va_count;
358     return (m_reserved_va_count = createGlobal("reserved:va_count", TYPE_FLOAT));
359 }
360
361 ir_value* ir_builder::createField(const std::string& name, qc_type vtype)
362 {
363     ir_value *ve = (ir_value*)util_htget(m_htfields, name.c_str());
364     if (ve) {
365         return nullptr;
366     }
367
368     ve = new ir_value(std::string(name), store_global, TYPE_FIELD);
369     ve->m_fieldtype = vtype;
370     m_fields.emplace_back(ve);
371     util_htset(m_htfields, name.c_str(), ve);
372     return ve;
373 }
374
375 /***********************************************************************
376  *IR Function
377  */
378
379 static bool ir_function_naive_phi(ir_function*);
380 static void ir_function_enumerate(ir_function*);
381 static bool ir_function_calculate_liferanges(ir_function*);
382 static bool ir_function_allocate_locals(ir_function*);
383
384 ir_function::ir_function(ir_builder* owner_, qc_type outtype_)
385 : m_owner(owner_),
386   m_name("<@unnamed>"),
387   m_outtype(outtype_)
388 {
389     m_context.file = "<@no context>";
390     m_context.line = 0;
391 }
392
393 ir_function::~ir_function()
394 {
395 }
396
397 static void ir_function_delete_quick(ir_function *self)
398 {
399     for (auto& b : self->m_blocks)
400         ir_block_delete_quick(b.release());
401     delete self;
402 }
403
404 static void ir_function_collect_value(ir_function *self, ir_value *v)
405 {
406     self->m_values.emplace_back(v);
407 }
408
409 ir_block* ir_function_create_block(lex_ctx_t ctx, ir_function *self, const char *label)
410 {
411     ir_block* bn = new ir_block(self, label ? std::string(label) : std::string());
412     bn->m_context = ctx;
413     self->m_blocks.emplace_back(bn);
414
415     if ((self->m_flags & IR_FLAG_BLOCK_COVERAGE) && self->m_owner->m_coverage_func)
416         (void)ir_block_create_call(bn, ctx, nullptr, self->m_owner->m_coverage_func, false);
417
418     return bn;
419 }
420
421 static bool instr_is_operation(uint16_t op)
422 {
423     return ( (op >= INSTR_MUL_F  && op <= INSTR_GT) ||
424              (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) ||
425              (op == INSTR_ADDRESS) ||
426              (op >= INSTR_NOT_F  && op <= INSTR_NOT_FNC) ||
427              (op >= INSTR_AND    && op <= INSTR_BITOR) ||
428              (op >= INSTR_CALL0  && op <= INSTR_CALL8) ||
429              (op >= VINSTR_BITAND_V && op <= VINSTR_NEG_V) );
430 }
431
432 static bool ir_function_pass_peephole(ir_function *self)
433 {
434     for (auto& bp : self->m_blocks) {
435         ir_block *block = bp.get();
436         for (size_t i = 0; i < vec_size(block->m_instr); ++i) {
437             ir_instr *inst;
438             inst = block->m_instr[i];
439
440             if (i >= 1 &&
441                 (inst->m_opcode >= INSTR_STORE_F &&
442                  inst->m_opcode <= INSTR_STORE_FNC))
443             {
444                 ir_instr *store;
445                 ir_instr *oper;
446                 ir_value *value;
447
448                 store = inst;
449
450                 oper  = block->m_instr[i-1];
451                 if (!instr_is_operation(oper->m_opcode))
452                     continue;
453
454                 /* Don't change semantics of MUL_VF in engines where these may not alias. */
455                 if (OPTS_FLAG(LEGACY_VECTOR_MATHS)) {
456                     if (oper->m_opcode == INSTR_MUL_VF && oper->_m_ops[2]->m_memberof == oper->_m_ops[1])
457                         continue;
458                     if (oper->m_opcode == INSTR_MUL_FV && oper->_m_ops[1]->m_memberof == oper->_m_ops[2])
459                         continue;
460                 }
461
462                 value = oper->_m_ops[0];
463
464                 /* only do it for SSA values */
465                 if (value->m_store != store_value)
466                     continue;
467
468                 /* don't optimize out the temp if it's used later again */
469                 if (value->m_reads.size() != 1)
470                     continue;
471
472                 /* The very next store must use this value */
473                 if (value->m_reads[0] != store)
474                     continue;
475
476                 /* And of course the store must _read_ from it, so it's in
477                  * OP 1 */
478                 if (store->_m_ops[1] != value)
479                     continue;
480
481                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
482                 (void)!ir_instr_op(oper, 0, store->_m_ops[0], true);
483
484                 vec_remove(block->m_instr, i, 1);
485                 delete store;
486             }
487             else if (inst->m_opcode == VINSTR_COND)
488             {
489                 /* COND on a value resulting from a NOT could
490                  * remove the NOT and swap its operands
491                  */
492                 while (true) {
493                     ir_block *tmp;
494                     size_t    inotid;
495                     ir_instr *inot;
496                     ir_value *value;
497                     value = inst->_m_ops[0];
498
499                     if (value->m_store != store_value || value->m_reads.size() != 1 || value->m_reads[0] != inst)
500                         break;
501
502                     inot = value->m_writes[0];
503                     if (inot->_m_ops[0] != value ||
504                         inot->m_opcode < INSTR_NOT_F ||
505                         inot->m_opcode > INSTR_NOT_FNC ||
506                         inot->m_opcode == INSTR_NOT_V || /* can't do these */
507                         inot->m_opcode == INSTR_NOT_S)
508                     {
509                         break;
510                     }
511
512                     /* count */
513                     ++opts_optimizationcount[OPTIM_PEEPHOLE];
514                     /* change operand */
515                     (void)!ir_instr_op(inst, 0, inot->_m_ops[1], false);
516                     /* remove NOT */
517                     tmp = inot->m_owner;
518                     for (inotid = 0; inotid < vec_size(tmp->m_instr); ++inotid) {
519                         if (tmp->m_instr[inotid] == inot)
520                             break;
521                     }
522                     if (inotid >= vec_size(tmp->m_instr)) {
523                         compile_error(inst->m_context, "sanity-check failed: failed to find instruction to optimize out");
524                         return false;
525                     }
526                     vec_remove(tmp->m_instr, inotid, 1);
527                     delete inot;
528                     /* swap ontrue/onfalse */
529                     tmp = inst->m_bops[0];
530                     inst->m_bops[0] = inst->m_bops[1];
531                     inst->m_bops[1] = tmp;
532                 }
533                 continue;
534             }
535         }
536     }
537
538     return true;
539 }
540
541 static bool ir_function_pass_tailrecursion(ir_function *self)
542 {
543     size_t p;
544
545     for (auto& bp : self->m_blocks) {
546         ir_block *block = bp.get();
547
548         ir_value *funcval;
549         ir_instr *ret, *call, *store = nullptr;
550
551         if (!block->m_final || vec_size(block->m_instr) < 2)
552             continue;
553
554         ret = block->m_instr[vec_size(block->m_instr)-1];
555         if (ret->m_opcode != INSTR_DONE && ret->m_opcode != INSTR_RETURN)
556             continue;
557
558         call = block->m_instr[vec_size(block->m_instr)-2];
559         if (call->m_opcode >= INSTR_STORE_F && call->m_opcode <= INSTR_STORE_FNC) {
560             /* account for the unoptimized
561              * CALL
562              * STORE %return, %tmp
563              * RETURN %tmp
564              * version
565              */
566             if (vec_size(block->m_instr) < 3)
567                 continue;
568
569             store = call;
570             call = block->m_instr[vec_size(block->m_instr)-3];
571         }
572
573         if (call->m_opcode < INSTR_CALL0 || call->m_opcode > INSTR_CALL8)
574             continue;
575
576         if (store) {
577             /* optimize out the STORE */
578             if (ret->_m_ops[0]   &&
579                 ret->_m_ops[0]   == store->_m_ops[0] &&
580                 store->_m_ops[1] == call->_m_ops[0])
581             {
582                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
583                 call->_m_ops[0] = store->_m_ops[0];
584                 vec_remove(block->m_instr, vec_size(block->m_instr) - 2, 1);
585                 delete store;
586             }
587             else
588                 continue;
589         }
590
591         if (!call->_m_ops[0])
592             continue;
593
594         funcval = call->_m_ops[1];
595         if (!funcval)
596             continue;
597         if (funcval->m_vtype != TYPE_FUNCTION || funcval->m_constval.vfunc != self)
598             continue;
599
600         /* now we have a CALL and a RET, check if it's a tailcall */
601         if (ret->_m_ops[0] && call->_m_ops[0] != ret->_m_ops[0])
602             continue;
603
604         ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
605         vec_shrinkby(block->m_instr, 2);
606
607         block->m_final = false; /* open it back up */
608
609         /* emite parameter-stores */
610         for (p = 0; p < call->m_params.size(); ++p) {
611             /* assert(call->params_count <= self->locals_count); */
612             if (!ir_block_create_store(block, call->m_context, self->m_locals[p].get(), call->m_params[p])) {
613                 irerror(call->m_context, "failed to create tailcall store instruction for parameter %i", (int)p);
614                 return false;
615             }
616         }
617         if (!ir_block_create_jump(block, call->m_context, self->m_blocks[0].get())) {
618             irerror(call->m_context, "failed to create tailcall jump");
619             return false;
620         }
621
622         delete call;
623         delete ret;
624     }
625
626     return true;
627 }
628
629 bool ir_function_finalize(ir_function *self)
630 {
631     if (self->m_builtin)
632         return true;
633
634     for (auto& lp : self->m_locals) {
635         ir_value *v = lp.get();
636         if (v->m_reads.empty() && v->m_writes.size() && !(v->m_flags & IR_FLAG_NOREF)) {
637             // if it's a vector check to ensure all it's members are unused before
638             // claiming it's unused, otherwise skip the vector entierly
639             if (v->m_vtype == TYPE_VECTOR)
640             {
641                 size_t mask = (1 << 0) | (1 << 1) | (1 << 2), bits = 0;
642                 for (size_t i = 0; i < 3; i++)
643                     if (!v->m_members[i] || (v->m_members[i]->m_reads.empty()
644                         && v->m_members[i]->m_writes.size()))
645                         bits |= (1 << i);
646                 // all components are unused so just report the vector
647                 if (bits == mask && irwarning(v->m_context, WARN_UNUSED_VARIABLE,
648                     "unused variable: `%s`", v->m_name.c_str()))
649                     return false;
650                 else if (bits != mask)
651                     // individual components are unused so mention them
652                     for (size_t i = 0; i < 3; i++)
653                         if ((bits & (1 << i))
654                             && irwarning(v->m_context, WARN_UNUSED_COMPONENT,
655                                 "unused vector component: `%s.%c`", v->m_name.c_str(), "xyz"[i]))
656                             return false;
657             }
658             // just a standard variable
659             else if (v->m_name[0] != '#'
660                 && irwarning(v->m_context, WARN_UNUSED_VARIABLE,
661                     "unused variable: `%s`", v->m_name.c_str())) return false;
662         }
663     }
664
665     if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
666         if (!ir_function_pass_peephole(self)) {
667             irerror(self->m_context, "generic optimization pass broke something in `%s`", self->m_name.c_str());
668             return false;
669         }
670     }
671
672     if (OPTS_OPTIMIZATION(OPTIM_TAIL_RECURSION)) {
673         if (!ir_function_pass_tailrecursion(self)) {
674             irerror(self->m_context, "tail-recursion optimization pass broke something in `%s`", self->m_name.c_str());
675             return false;
676         }
677     }
678
679     if (!ir_function_naive_phi(self)) {
680         irerror(self->m_context, "internal error: ir_function_naive_phi failed");
681         return false;
682     }
683
684     for (auto& lp : self->m_locals) {
685         ir_value *v = lp.get();
686         if (v->m_vtype == TYPE_VECTOR ||
687             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
688         {
689             v->vectorMember(0);
690             v->vectorMember(1);
691             v->vectorMember(2);
692         }
693     }
694     for (auto& vp : self->m_values) {
695         ir_value *v = vp.get();
696         if (v->m_vtype == TYPE_VECTOR ||
697             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
698         {
699             v->vectorMember(0);
700             v->vectorMember(1);
701             v->vectorMember(2);
702         }
703     }
704
705     ir_function_enumerate(self);
706
707     if (!ir_function_calculate_liferanges(self))
708         return false;
709     if (!ir_function_allocate_locals(self))
710         return false;
711     return true;
712 }
713
714 ir_value* ir_function_create_local(ir_function *self, const std::string& name, qc_type vtype, bool param)
715 {
716     ir_value *ve;
717
718     if (param &&
719         !self->m_locals.empty() &&
720         self->m_locals.back()->m_store != store_param)
721     {
722         irerror(self->m_context, "cannot add parameters after adding locals");
723         return nullptr;
724     }
725
726     ve = new ir_value(std::string(name), (param ? store_param : store_local), vtype);
727     if (param)
728         ve->m_locked = true;
729     self->m_locals.emplace_back(ve);
730     return ve;
731 }
732
733 /***********************************************************************
734  *IR Block
735  */
736
737 ir_block::ir_block(ir_function* owner, const std::string& name)
738 : m_owner(owner),
739   m_label(name)
740 {
741     m_context.file = "<@no context>";
742     m_context.line = 0;
743 }
744
745 ir_block::~ir_block()
746 {
747     for (size_t i = 0; i != vec_size(m_instr); ++i)
748         delete m_instr[i];
749     vec_free(m_instr);
750     vec_free(m_entries);
751     vec_free(m_exits);
752 }
753
754 static void ir_block_delete_quick(ir_block* self)
755 {
756     size_t i;
757     for (i = 0; i != vec_size(self->m_instr); ++i)
758         ir_instr_delete_quick(self->m_instr[i]);
759     vec_free(self->m_instr);
760     delete self;
761 }
762
763 /***********************************************************************
764  *IR Instructions
765  */
766
767 ir_instr::ir_instr(lex_ctx_t ctx, ir_block* owner_, int op)
768 : m_opcode(op),
769   m_context(ctx),
770   m_owner(owner_)
771 {
772 }
773
774 ir_instr::~ir_instr()
775 {
776     // The following calls can only delete from
777     // vectors, we still want to delete this instruction
778     // so ignore the return value. Since with the warn_unused_result attribute
779     // gcc doesn't care about an explicit: (void)foo(); to ignore the result,
780     // I have to improvise here and use if(foo());
781     for (auto &it : m_phi) {
782         size_t idx;
783         if (vec_ir_instr_find(it.value->m_writes, this, &idx))
784             it.value->m_writes.erase(it.value->m_writes.begin() + idx);
785         if (vec_ir_instr_find(it.value->m_reads, this, &idx))
786             it.value->m_reads.erase(it.value->m_reads.begin() + idx);
787     }
788     for (auto &it : m_params) {
789         size_t idx;
790         if (vec_ir_instr_find(it->m_writes, this, &idx))
791             it->m_writes.erase(it->m_writes.begin() + idx);
792         if (vec_ir_instr_find(it->m_reads, this, &idx))
793             it->m_reads.erase(it->m_reads.begin() + idx);
794     }
795     (void)!ir_instr_op(this, 0, nullptr, false);
796     (void)!ir_instr_op(this, 1, nullptr, false);
797     (void)!ir_instr_op(this, 2, nullptr, false);
798 }
799
800 static void ir_instr_delete_quick(ir_instr *self)
801 {
802     self->m_phi.clear();
803     self->m_params.clear();
804     self->_m_ops[0] = nullptr;
805     self->_m_ops[1] = nullptr;
806     self->_m_ops[2] = nullptr;
807     delete self;
808 }
809
810 static bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing)
811 {
812     if (v && v->m_vtype == TYPE_NOEXPR) {
813         irerror(self->m_context, "tried to use a NOEXPR value");
814         return false;
815     }
816
817     if (self->_m_ops[op]) {
818         size_t idx;
819         if (writing && vec_ir_instr_find(self->_m_ops[op]->m_writes, self, &idx))
820             self->_m_ops[op]->m_writes.erase(self->_m_ops[op]->m_writes.begin() + idx);
821         else if (vec_ir_instr_find(self->_m_ops[op]->m_reads, self, &idx))
822             self->_m_ops[op]->m_reads.erase(self->_m_ops[op]->m_reads.begin() + idx);
823     }
824     if (v) {
825         if (writing)
826             v->m_writes.push_back(self);
827         else
828             v->m_reads.push_back(self);
829     }
830     self->_m_ops[op] = v;
831     return true;
832 }
833
834 /***********************************************************************
835  *IR Value
836  */
837
838 void ir_value::setCodeAddress(int32_t gaddr)
839 {
840     m_code.globaladdr = gaddr;
841     if (m_members[0]) m_members[0]->m_code.globaladdr = gaddr;
842     if (m_members[1]) m_members[1]->m_code.globaladdr = gaddr;
843     if (m_members[2]) m_members[2]->m_code.globaladdr = gaddr;
844 }
845
846 int32_t ir_value::codeAddress() const
847 {
848     if (m_store == store_return)
849         return OFS_RETURN + m_code.addroffset;
850     return m_code.globaladdr + m_code.addroffset;
851 }
852
853 ir_value::ir_value(std::string&& name_, store_type store_, qc_type vtype_)
854     : m_name(move(name_))
855     , m_vtype(vtype_)
856     , m_store(store_)
857 {
858     m_fieldtype = TYPE_VOID;
859     m_outtype = TYPE_VOID;
860     m_flags = 0;
861
862     m_cvq          = CV_NONE;
863     m_hasvalue     = false;
864     m_context.file = "<@no context>";
865     m_context.line = 0;
866
867     memset(&m_constval, 0, sizeof(m_constval));
868     memset(&m_code,     0, sizeof(m_code));
869
870     m_members[0] = nullptr;
871     m_members[1] = nullptr;
872     m_members[2] = nullptr;
873     m_memberof = nullptr;
874
875     m_unique_life = false;
876     m_locked = false;
877     m_callparam  = false;
878 }
879
880 ir_value::ir_value(ir_function *owner, std::string&& name, store_type storetype, qc_type vtype)
881     : ir_value(move(name), storetype, vtype)
882 {
883     ir_function_collect_value(owner, this);
884 }
885
886 ir_value::~ir_value()
887 {
888     size_t i;
889     if (m_hasvalue) {
890         if (m_vtype == TYPE_STRING)
891             mem_d((void*)m_constval.vstring);
892     }
893     if (!(m_flags & IR_FLAG_SPLIT_VECTOR)) {
894         for (i = 0; i < 3; ++i) {
895             if (m_members[i])
896                 delete m_members[i];
897         }
898     }
899 }
900
901
902 /*  helper function */
903 ir_value* ir_builder::literalFloat(float value, bool add_to_list) {
904     ir_value *v = new ir_value("#IMMEDIATE", store_global, TYPE_FLOAT);
905     v->m_flags |= IR_FLAG_ERASABLE;
906     v->m_hasvalue = true;
907     v->m_cvq = CV_CONST;
908     v->m_constval.vfloat = value;
909
910     m_globals.emplace_back(v);
911     if (add_to_list)
912         m_const_floats.emplace_back(v);
913     return v;
914 }
915
916 ir_value* ir_value::vectorMember(unsigned int member)
917 {
918     std::string name;
919     ir_value *m;
920     if (member >= 3)
921         return nullptr;
922
923     if (m_members[member])
924         return m_members[member];
925
926     if (!m_name.empty()) {
927         char member_name[3] = { '_', char('x' + member), 0 };
928         name = m_name + member_name;
929     }
930
931     if (m_vtype == TYPE_VECTOR)
932     {
933         m = new ir_value(move(name), m_store, TYPE_FLOAT);
934         if (!m)
935             return nullptr;
936         m->m_context = m_context;
937
938         m_members[member] = m;
939         m->m_code.addroffset = member;
940     }
941     else if (m_vtype == TYPE_FIELD)
942     {
943         if (m_fieldtype != TYPE_VECTOR)
944             return nullptr;
945         m = new ir_value(move(name), m_store, TYPE_FIELD);
946         if (!m)
947             return nullptr;
948         m->m_fieldtype = TYPE_FLOAT;
949         m->m_context = m_context;
950
951         m_members[member] = m;
952         m->m_code.addroffset = member;
953     }
954     else
955     {
956         irerror(m_context, "invalid member access on %s", m_name.c_str());
957         return nullptr;
958     }
959
960     m->m_memberof = this;
961     return m;
962 }
963
964 size_t ir_value::size() const {
965     if (m_vtype == TYPE_FIELD && m_fieldtype == TYPE_VECTOR)
966         return type_sizeof_[TYPE_VECTOR];
967     return type_sizeof_[m_vtype];
968 }
969
970 bool ir_value::setFloat(float f)
971 {
972     if (m_vtype != TYPE_FLOAT)
973         return false;
974     m_constval.vfloat = f;
975     m_hasvalue = true;
976     return true;
977 }
978
979 bool ir_value::setFunc(int f)
980 {
981     if (m_vtype != TYPE_FUNCTION)
982         return false;
983     m_constval.vint = f;
984     m_hasvalue = true;
985     return true;
986 }
987
988 bool ir_value::setVector(vec3_t v)
989 {
990     if (m_vtype != TYPE_VECTOR)
991         return false;
992     m_constval.vvec = v;
993     m_hasvalue = true;
994     return true;
995 }
996
997 bool ir_value::setField(ir_value *fld)
998 {
999     if (m_vtype != TYPE_FIELD)
1000         return false;
1001     m_constval.vpointer = fld;
1002     m_hasvalue = true;
1003     return true;
1004 }
1005
1006 bool ir_value::setString(const char *str)
1007 {
1008     if (m_vtype != TYPE_STRING)
1009         return false;
1010     m_constval.vstring = util_strdupe(str);
1011     m_hasvalue = true;
1012     return true;
1013 }
1014
1015 #if 0
1016 bool ir_value::setInt(int i)
1017 {
1018     if (m_vtype != TYPE_INTEGER)
1019         return false;
1020     m_constval.vint = i;
1021     m_hasvalue = true;
1022     return true;
1023 }
1024 #endif
1025
1026 bool ir_value::lives(size_t at)
1027 {
1028     for (auto& l : m_life) {
1029         if (l.start <= at && at <= l.end)
1030             return true;
1031         if (l.start > at) /* since it's ordered */
1032             return false;
1033     }
1034     return false;
1035 }
1036
1037 bool ir_value::insertLife(size_t idx, ir_life_entry_t e)
1038 {
1039     m_life.insert(m_life.begin() + idx, e);
1040     return true;
1041 }
1042
1043 bool ir_value::setAlive(size_t s)
1044 {
1045     size_t i;
1046     const size_t vs = m_life.size();
1047     ir_life_entry_t *life_found = nullptr;
1048     ir_life_entry_t *before = nullptr;
1049     ir_life_entry_t new_entry;
1050
1051     /* Find the first range >= s */
1052     for (i = 0; i < vs; ++i)
1053     {
1054         before = life_found;
1055         life_found = &m_life[i];
1056         if (life_found->start > s)
1057             break;
1058     }
1059     /* nothing found? append */
1060     if (i == vs) {
1061         ir_life_entry_t e;
1062         if (life_found && life_found->end+1 == s)
1063         {
1064             /* previous life range can be merged in */
1065             life_found->end++;
1066             return true;
1067         }
1068         if (life_found && life_found->end >= s)
1069             return false;
1070         e.start = e.end = s;
1071         m_life.emplace_back(e);
1072         return true;
1073     }
1074     /* found */
1075     if (before)
1076     {
1077         if (before->end + 1 == s &&
1078             life_found->start - 1 == s)
1079         {
1080             /* merge */
1081             before->end = life_found->end;
1082             m_life.erase(m_life.begin()+i);
1083             return true;
1084         }
1085         if (before->end + 1 == s)
1086         {
1087             /* extend before */
1088             before->end++;
1089             return true;
1090         }
1091         /* already contained */
1092         if (before->end >= s)
1093             return false;
1094     }
1095     /* extend */
1096     if (life_found->start - 1 == s)
1097     {
1098         life_found->start--;
1099         return true;
1100     }
1101     /* insert a new entry */
1102     new_entry.start = new_entry.end = s;
1103     return insertLife(i, new_entry);
1104 }
1105
1106 bool ir_value::mergeLife(const ir_value *other)
1107 {
1108     size_t i, myi;
1109
1110     if (other->m_life.empty())
1111         return true;
1112
1113     if (m_life.empty()) {
1114         m_life = other->m_life;
1115         return true;
1116     }
1117
1118     myi = 0;
1119     for (i = 0; i < other->m_life.size(); ++i)
1120     {
1121         const ir_life_entry_t &otherlife = other->m_life[i];
1122         while (true)
1123         {
1124             ir_life_entry_t *entry = &m_life[myi];
1125
1126             if (otherlife.end+1 < entry->start)
1127             {
1128                 /* adding an interval before entry */
1129                 if (!insertLife(myi, otherlife))
1130                     return false;
1131                 ++myi;
1132                 break;
1133             }
1134
1135             if (otherlife.start <  entry->start &&
1136                 otherlife.end+1 >= entry->start)
1137             {
1138                 /* starts earlier and overlaps */
1139                 entry->start = otherlife.start;
1140             }
1141
1142             if (otherlife.end   >  entry->end &&
1143                 otherlife.start <= entry->end+1)
1144             {
1145                 /* ends later and overlaps */
1146                 entry->end = otherlife.end;
1147             }
1148
1149             /* see if our change combines it with the next ranges */
1150             while (myi+1 < m_life.size() &&
1151                    entry->end+1 >= m_life[1+myi].start)
1152             {
1153                 /* overlaps with (myi+1) */
1154                 if (entry->end < m_life[1+myi].end)
1155                     entry->end = m_life[1+myi].end;
1156                 m_life.erase(m_life.begin() + (myi + 1));
1157                 entry = &m_life[myi];
1158             }
1159
1160             /* see if we're after the entry */
1161             if (otherlife.start > entry->end)
1162             {
1163                 ++myi;
1164                 /* append if we're at the end */
1165                 if (myi >= m_life.size()) {
1166                     m_life.emplace_back(otherlife);
1167                     break;
1168                 }
1169                 /* otherweise check the next range */
1170                 continue;
1171             }
1172             break;
1173         }
1174     }
1175     return true;
1176 }
1177
1178 static bool ir_values_overlap(const ir_value *a, const ir_value *b)
1179 {
1180     /* For any life entry in A see if it overlaps with
1181      * any life entry in B.
1182      * Note that the life entries are orderes, so we can make a
1183      * more efficient algorithm there than naively translating the
1184      * statement above.
1185      */
1186
1187     const ir_life_entry_t *la, *lb, *enda, *endb;
1188
1189     /* first of all, if either has no life range, they cannot clash */
1190     if (a->m_life.empty() || b->m_life.empty())
1191         return false;
1192
1193     la = &a->m_life.front();
1194     lb = &b->m_life.front();
1195     enda = &a->m_life.back() + 1;
1196     endb = &b->m_life.back() + 1;
1197     while (true)
1198     {
1199         /* check if the entries overlap, for that,
1200          * both must start before the other one ends.
1201          */
1202         if (la->start < lb->end &&
1203             lb->start < la->end)
1204         {
1205             return true;
1206         }
1207
1208         /* entries are ordered
1209          * one entry is earlier than the other
1210          * that earlier entry will be moved forward
1211          */
1212         if (la->start < lb->start)
1213         {
1214             /* order: A B, move A forward
1215              * check if we hit the end with A
1216              */
1217             if (++la == enda)
1218                 break;
1219         }
1220         else /* if (lb->start < la->start)  actually <= */
1221         {
1222             /* order: B A, move B forward
1223              * check if we hit the end with B
1224              */
1225             if (++lb == endb)
1226                 break;
1227         }
1228     }
1229     return false;
1230 }
1231
1232 /***********************************************************************
1233  *IR main operations
1234  */
1235
1236 static bool ir_check_unreachable(ir_block *self)
1237 {
1238     /* The IR should never have to deal with unreachable code */
1239     if (!self->m_final/* || OPTS_FLAG(ALLOW_UNREACHABLE_CODE)*/)
1240         return true;
1241     irerror(self->m_context, "unreachable statement (%s)", self->m_label.c_str());
1242     return false;
1243 }
1244
1245 bool ir_block_create_store_op(ir_block *self, lex_ctx_t ctx, int op, ir_value *target, ir_value *what)
1246 {
1247     ir_instr *in;
1248     if (!ir_check_unreachable(self))
1249         return false;
1250
1251     if (target->m_store == store_value &&
1252         (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC))
1253     {
1254         irerror(self->m_context, "cannot store to an SSA value");
1255         irerror(self->m_context, "trying to store: %s <- %s", target->m_name.c_str(), what->m_name.c_str());
1256         irerror(self->m_context, "instruction: %s", util_instr_str[op]);
1257         return false;
1258     }
1259
1260     in = new ir_instr(ctx, self, op);
1261     if (!in)
1262         return false;
1263
1264     if (!ir_instr_op(in, 0, target, (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC)) ||
1265         !ir_instr_op(in, 1, what, false))
1266     {
1267         delete in;
1268         return false;
1269     }
1270     vec_push(self->m_instr, in);
1271     return true;
1272 }
1273
1274 bool ir_block_create_state_op(ir_block *self, lex_ctx_t ctx, ir_value *frame, ir_value *think)
1275 {
1276     ir_instr *in;
1277     if (!ir_check_unreachable(self))
1278         return false;
1279
1280     in = new ir_instr(ctx, self, INSTR_STATE);
1281     if (!in)
1282         return false;
1283
1284     if (!ir_instr_op(in, 0, frame, false) ||
1285         !ir_instr_op(in, 1, think, false))
1286     {
1287         delete in;
1288         return false;
1289     }
1290     vec_push(self->m_instr, in);
1291     return true;
1292 }
1293
1294 static bool ir_block_create_store(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1295 {
1296     int op = 0;
1297     qc_type vtype;
1298     if (target->m_vtype == TYPE_VARIANT)
1299         vtype = what->m_vtype;
1300     else
1301         vtype = target->m_vtype;
1302
1303 #if 0
1304     if      (vtype == TYPE_FLOAT   && what->m_vtype == TYPE_INTEGER)
1305         op = INSTR_CONV_ITOF;
1306     else if (vtype == TYPE_INTEGER && what->m_vtype == TYPE_FLOAT)
1307         op = INSTR_CONV_FTOI;
1308 #endif
1309         op = type_store_instr[vtype];
1310
1311     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1312         if (op == INSTR_STORE_FLD && what->m_fieldtype == TYPE_VECTOR)
1313             op = INSTR_STORE_V;
1314     }
1315
1316     return ir_block_create_store_op(self, ctx, op, target, what);
1317 }
1318
1319 bool ir_block_create_storep(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1320 {
1321     int op = 0;
1322     qc_type vtype;
1323
1324     if (target->m_vtype != TYPE_POINTER)
1325         return false;
1326
1327     /* storing using pointer - target is a pointer, type must be
1328      * inferred from source
1329      */
1330     vtype = what->m_vtype;
1331
1332     op = type_storep_instr[vtype];
1333     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1334         if (op == INSTR_STOREP_FLD && what->m_fieldtype == TYPE_VECTOR)
1335             op = INSTR_STOREP_V;
1336     }
1337
1338     return ir_block_create_store_op(self, ctx, op, target, what);
1339 }
1340
1341 bool ir_block_create_return(ir_block *self, lex_ctx_t ctx, ir_value *v)
1342 {
1343     ir_instr *in;
1344     if (!ir_check_unreachable(self))
1345         return false;
1346
1347     self->m_final = true;
1348
1349     self->m_is_return = true;
1350     in = new ir_instr(ctx, self, INSTR_RETURN);
1351     if (!in)
1352         return false;
1353
1354     if (v && !ir_instr_op(in, 0, v, false)) {
1355         delete in;
1356         return false;
1357     }
1358
1359     vec_push(self->m_instr, in);
1360     return true;
1361 }
1362
1363 bool ir_block_create_if(ir_block *self, lex_ctx_t ctx, ir_value *v,
1364                         ir_block *ontrue, ir_block *onfalse)
1365 {
1366     ir_instr *in;
1367     if (!ir_check_unreachable(self))
1368         return false;
1369     self->m_final = true;
1370     /*in = new ir_instr(ctx, self, (v->m_vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/
1371     in = new ir_instr(ctx, self, VINSTR_COND);
1372     if (!in)
1373         return false;
1374
1375     if (!ir_instr_op(in, 0, v, false)) {
1376         delete in;
1377         return false;
1378     }
1379
1380     in->m_bops[0] = ontrue;
1381     in->m_bops[1] = onfalse;
1382
1383     vec_push(self->m_instr, in);
1384
1385     vec_push(self->m_exits, ontrue);
1386     vec_push(self->m_exits, onfalse);
1387     vec_push(ontrue->m_entries,  self);
1388     vec_push(onfalse->m_entries, self);
1389     return true;
1390 }
1391
1392 bool ir_block_create_jump(ir_block *self, lex_ctx_t ctx, ir_block *to)
1393 {
1394     ir_instr *in;
1395     if (!ir_check_unreachable(self))
1396         return false;
1397     self->m_final = true;
1398     in = new ir_instr(ctx, self, VINSTR_JUMP);
1399     if (!in)
1400         return false;
1401
1402     in->m_bops[0] = to;
1403     vec_push(self->m_instr, in);
1404
1405     vec_push(self->m_exits, to);
1406     vec_push(to->m_entries, self);
1407     return true;
1408 }
1409
1410 bool ir_block_create_goto(ir_block *self, lex_ctx_t ctx, ir_block *to)
1411 {
1412     self->m_owner->m_flags |= IR_FLAG_HAS_GOTO;
1413     return ir_block_create_jump(self, ctx, to);
1414 }
1415
1416 ir_instr* ir_block_create_phi(ir_block *self, lex_ctx_t ctx, const char *label, qc_type ot)
1417 {
1418     ir_value *out;
1419     ir_instr *in;
1420     if (!ir_check_unreachable(self))
1421         return nullptr;
1422     in = new ir_instr(ctx, self, VINSTR_PHI);
1423     if (!in)
1424         return nullptr;
1425     out = new ir_value(self->m_owner, label ? label : "", store_value, ot);
1426     if (!out) {
1427         delete in;
1428         return nullptr;
1429     }
1430     if (!ir_instr_op(in, 0, out, true)) {
1431         delete in;
1432         return nullptr;
1433     }
1434     vec_push(self->m_instr, in);
1435     return in;
1436 }
1437
1438 ir_value* ir_phi_value(ir_instr *self)
1439 {
1440     return self->_m_ops[0];
1441 }
1442
1443 void ir_phi_add(ir_instr* self, ir_block *b, ir_value *v)
1444 {
1445     ir_phi_entry_t pe;
1446
1447     if (!vec_ir_block_find(self->m_owner->m_entries, b, nullptr)) {
1448         // Must not be possible to cause this, otherwise the AST
1449         // is doing something wrong.
1450         irerror(self->m_context, "Invalid entry block for PHI");
1451         exit(EXIT_FAILURE);
1452     }
1453
1454     pe.value = v;
1455     pe.from = b;
1456     v->m_reads.push_back(self);
1457     self->m_phi.push_back(pe);
1458 }
1459
1460 /* call related code */
1461 ir_instr* ir_block_create_call(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *func, bool noreturn)
1462 {
1463     ir_value *out;
1464     ir_instr *in;
1465     if (!ir_check_unreachable(self))
1466         return nullptr;
1467     in = new ir_instr(ctx, self, (noreturn ? VINSTR_NRCALL : INSTR_CALL0));
1468     if (!in)
1469         return nullptr;
1470     if (noreturn) {
1471         self->m_final = true;
1472         self->m_is_return = true;
1473     }
1474     out = new ir_value(self->m_owner, label ? label : "", (func->m_outtype == TYPE_VOID) ? store_return : store_value, func->m_outtype);
1475     if (!out) {
1476         delete in;
1477         return nullptr;
1478     }
1479     if (!ir_instr_op(in, 0, out, true) ||
1480         !ir_instr_op(in, 1, func, false))
1481     {
1482         delete in;
1483         return nullptr;
1484     }
1485     vec_push(self->m_instr, in);
1486     /*
1487     if (noreturn) {
1488         if (!ir_block_create_return(self, ctx, nullptr)) {
1489             compile_error(ctx, "internal error: failed to generate dummy-return instruction");
1490             delete in;
1491             return nullptr;
1492         }
1493     }
1494     */
1495     return in;
1496 }
1497
1498 ir_value* ir_call_value(ir_instr *self)
1499 {
1500     return self->_m_ops[0];
1501 }
1502
1503 void ir_call_param(ir_instr* self, ir_value *v)
1504 {
1505     self->m_params.push_back(v);
1506     v->m_reads.push_back(self);
1507 }
1508
1509 /* binary op related code */
1510
1511 ir_value* ir_block_create_binop(ir_block *self, lex_ctx_t ctx,
1512                                 const char *label, int opcode,
1513                                 ir_value *left, ir_value *right)
1514 {
1515     qc_type ot = TYPE_VOID;
1516     switch (opcode) {
1517         case INSTR_ADD_F:
1518         case INSTR_SUB_F:
1519         case INSTR_DIV_F:
1520         case INSTR_MUL_F:
1521         case INSTR_MUL_V:
1522         case INSTR_AND:
1523         case INSTR_OR:
1524 #if 0
1525         case INSTR_AND_I:
1526         case INSTR_AND_IF:
1527         case INSTR_AND_FI:
1528         case INSTR_OR_I:
1529         case INSTR_OR_IF:
1530         case INSTR_OR_FI:
1531 #endif
1532         case INSTR_BITAND:
1533         case INSTR_BITOR:
1534         case VINSTR_BITXOR:
1535 #if 0
1536         case INSTR_SUB_S: /* -- offset of string as float */
1537         case INSTR_MUL_IF:
1538         case INSTR_MUL_FI:
1539         case INSTR_DIV_IF:
1540         case INSTR_DIV_FI:
1541         case INSTR_BITOR_IF:
1542         case INSTR_BITOR_FI:
1543         case INSTR_BITAND_FI:
1544         case INSTR_BITAND_IF:
1545         case INSTR_EQ_I:
1546         case INSTR_NE_I:
1547 #endif
1548             ot = TYPE_FLOAT;
1549             break;
1550 #if 0
1551         case INSTR_ADD_I:
1552         case INSTR_ADD_IF:
1553         case INSTR_ADD_FI:
1554         case INSTR_SUB_I:
1555         case INSTR_SUB_FI:
1556         case INSTR_SUB_IF:
1557         case INSTR_MUL_I:
1558         case INSTR_DIV_I:
1559         case INSTR_BITAND_I:
1560         case INSTR_BITOR_I:
1561         case INSTR_XOR_I:
1562         case INSTR_RSHIFT_I:
1563         case INSTR_LSHIFT_I:
1564             ot = TYPE_INTEGER;
1565             break;
1566 #endif
1567         case INSTR_ADD_V:
1568         case INSTR_SUB_V:
1569         case INSTR_MUL_VF:
1570         case INSTR_MUL_FV:
1571         case VINSTR_BITAND_V:
1572         case VINSTR_BITOR_V:
1573         case VINSTR_BITXOR_V:
1574         case VINSTR_BITAND_VF:
1575         case VINSTR_BITOR_VF:
1576         case VINSTR_BITXOR_VF:
1577         case VINSTR_CROSS:
1578 #if 0
1579         case INSTR_DIV_VF:
1580         case INSTR_MUL_IV:
1581         case INSTR_MUL_VI:
1582 #endif
1583             ot = TYPE_VECTOR;
1584             break;
1585 #if 0
1586         case INSTR_ADD_SF:
1587             ot = TYPE_POINTER;
1588             break;
1589 #endif
1590     /*
1591      * after the following default case, the value of opcode can never
1592      * be 1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65
1593      */
1594         default:
1595             /* ranges: */
1596             /* boolean operations result in floats */
1597
1598             /*
1599              * opcode >= 10 takes true branch opcode is at least 10
1600              * opcode <= 23 takes false branch opcode is at least 24
1601              */
1602             if (opcode >= INSTR_EQ_F && opcode <= INSTR_GT)
1603                 ot = TYPE_FLOAT;
1604
1605             /*
1606              * At condition "opcode <= 23", the value of "opcode" must be
1607              * at least 24.
1608              * At condition "opcode <= 23", the value of "opcode" cannot be
1609              * equal to any of {1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65}.
1610              * The condition "opcode <= 23" cannot be true.
1611              *
1612              * Thus ot=2 (TYPE_FLOAT) can never be true
1613              */
1614 #if 0
1615             else if (opcode >= INSTR_LE && opcode <= INSTR_GT)
1616                 ot = TYPE_FLOAT;
1617             else if (opcode >= INSTR_LE_I && opcode <= INSTR_EQ_FI)
1618                 ot = TYPE_FLOAT;
1619 #endif
1620             break;
1621     };
1622     if (ot == TYPE_VOID) {
1623         /* The AST or parser were supposed to check this! */
1624         return nullptr;
1625     }
1626
1627     return ir_block_create_general_instr(self, ctx, label, opcode, left, right, ot);
1628 }
1629
1630 ir_value* ir_block_create_unary(ir_block *self, lex_ctx_t ctx,
1631                                 const char *label, int opcode,
1632                                 ir_value *operand)
1633 {
1634     qc_type ot = TYPE_FLOAT;
1635     switch (opcode) {
1636         case INSTR_NOT_F:
1637         case INSTR_NOT_V:
1638         case INSTR_NOT_S:
1639         case INSTR_NOT_ENT:
1640         case INSTR_NOT_FNC: /*
1641         case INSTR_NOT_I:   */
1642             ot = TYPE_FLOAT;
1643             break;
1644
1645         /*
1646          * Negation for virtual instructions is emulated with 0-value. Thankfully
1647          * the operand for 0 already exists so we just source it from here.
1648          */
1649         case VINSTR_NEG_F:
1650             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_F, nullptr, operand, ot);
1651         case VINSTR_NEG_V:
1652             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_V, nullptr, operand, TYPE_VECTOR);
1653
1654         default:
1655             ot = operand->m_vtype;
1656             break;
1657     };
1658     if (ot == TYPE_VOID) {
1659         /* The AST or parser were supposed to check this! */
1660         return nullptr;
1661     }
1662
1663     /* let's use the general instruction creator and pass nullptr for OPB */
1664     return ir_block_create_general_instr(self, ctx, label, opcode, operand, nullptr, ot);
1665 }
1666
1667 static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t ctx, const char *label,
1668                                         int op, ir_value *a, ir_value *b, qc_type outype)
1669 {
1670     ir_instr *instr;
1671     ir_value *out;
1672
1673     out = new ir_value(self->m_owner, label ? label : "", store_value, outype);
1674     if (!out)
1675         return nullptr;
1676
1677     instr = new ir_instr(ctx, self, op);
1678     if (!instr) {
1679         return nullptr;
1680     }
1681
1682     if (!ir_instr_op(instr, 0, out, true) ||
1683         !ir_instr_op(instr, 1, a, false) ||
1684         !ir_instr_op(instr, 2, b, false) )
1685     {
1686         goto on_error;
1687     }
1688
1689     vec_push(self->m_instr, instr);
1690
1691     return out;
1692 on_error:
1693     delete instr;
1694     return nullptr;
1695 }
1696
1697 ir_value* ir_block_create_fieldaddress(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field)
1698 {
1699     ir_value *v;
1700
1701     /* Support for various pointer types todo if so desired */
1702     if (ent->m_vtype != TYPE_ENTITY)
1703         return nullptr;
1704
1705     if (field->m_vtype != TYPE_FIELD)
1706         return nullptr;
1707
1708     v = ir_block_create_general_instr(self, ctx, label, INSTR_ADDRESS, ent, field, TYPE_POINTER);
1709     v->m_fieldtype = field->m_fieldtype;
1710     return v;
1711 }
1712
1713 ir_value* ir_block_create_load_from_ent(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field, qc_type outype)
1714 {
1715     int op;
1716     if (ent->m_vtype != TYPE_ENTITY)
1717         return nullptr;
1718
1719     /* at some point we could redirect for TYPE_POINTER... but that could lead to carelessness */
1720     if (field->m_vtype != TYPE_FIELD)
1721         return nullptr;
1722
1723     switch (outype)
1724     {
1725         case TYPE_FLOAT:    op = INSTR_LOAD_F;   break;
1726         case TYPE_VECTOR:   op = INSTR_LOAD_V;   break;
1727         case TYPE_STRING:   op = INSTR_LOAD_S;   break;
1728         case TYPE_FIELD:    op = INSTR_LOAD_FLD; break;
1729         case TYPE_ENTITY:   op = INSTR_LOAD_ENT; break;
1730         case TYPE_FUNCTION: op = INSTR_LOAD_FNC; break;
1731 #if 0
1732         case TYPE_POINTER: op = INSTR_LOAD_I;   break;
1733         case TYPE_INTEGER: op = INSTR_LOAD_I;   break;
1734 #endif
1735         default:
1736             irerror(self->m_context, "invalid type for ir_block_create_load_from_ent: %s", type_name[outype]);
1737             return nullptr;
1738     }
1739
1740     return ir_block_create_general_instr(self, ctx, label, op, ent, field, outype);
1741 }
1742
1743 /* PHI resolving breaks the SSA, and must thus be the last
1744  * step before life-range calculation.
1745  */
1746
1747 static bool ir_block_naive_phi(ir_block *self);
1748 bool ir_function_naive_phi(ir_function *self)
1749 {
1750     for (auto& b : self->m_blocks)
1751         if (!ir_block_naive_phi(b.get()))
1752             return false;
1753     return true;
1754 }
1755
1756 static bool ir_block_naive_phi(ir_block *self)
1757 {
1758     size_t i;
1759     /* FIXME: optionally, create_phi can add the phis
1760      * to a list so we don't need to loop through blocks
1761      * - anyway: "don't optimize YET"
1762      */
1763     for (i = 0; i < vec_size(self->m_instr); ++i)
1764     {
1765         ir_instr *instr = self->m_instr[i];
1766         if (instr->m_opcode != VINSTR_PHI)
1767             continue;
1768
1769         vec_remove(self->m_instr, i, 1);
1770         --i; /* NOTE: i+1 below */
1771
1772         for (auto &it : instr->m_phi) {
1773             ir_value *v = it.value;
1774             ir_block *b = it.from;
1775             if (v->m_store == store_value && v->m_reads.size() == 1 && v->m_writes.size() == 1) {
1776                 /* replace the value */
1777                 if (!ir_instr_op(v->m_writes[0], 0, instr->_m_ops[0], true))
1778                     return false;
1779             } else {
1780                 /* force a move instruction */
1781                 ir_instr *prevjump = vec_last(b->m_instr);
1782                 vec_pop(b->m_instr);
1783                 b->m_final = false;
1784                 instr->_m_ops[0]->m_store = store_global;
1785                 if (!ir_block_create_store(b, instr->m_context, instr->_m_ops[0], v))
1786                     return false;
1787                 instr->_m_ops[0]->m_store = store_value;
1788                 vec_push(b->m_instr, prevjump);
1789                 b->m_final = true;
1790             }
1791         }
1792         delete instr;
1793     }
1794     return true;
1795 }
1796
1797 /***********************************************************************
1798  *IR Temp allocation code
1799  * Propagating value life ranges by walking through the function backwards
1800  * until no more changes are made.
1801  * In theory this should happen once more than once for every nested loop
1802  * level.
1803  * Though this implementation might run an additional time for if nests.
1804  */
1805
1806 /* Enumerate instructions used by value's life-ranges
1807  */
1808 static void ir_block_enumerate(ir_block *self, size_t *_eid)
1809 {
1810     size_t i;
1811     size_t eid = *_eid;
1812     for (i = 0; i < vec_size(self->m_instr); ++i)
1813     {
1814         self->m_instr[i]->m_eid = eid++;
1815     }
1816     *_eid = eid;
1817 }
1818
1819 /* Enumerate blocks and instructions.
1820  * The block-enumeration is unordered!
1821  * We do not really use the block enumreation, however
1822  * the instruction enumeration is important for life-ranges.
1823  */
1824 void ir_function_enumerate(ir_function *self)
1825 {
1826     size_t instruction_id = 0;
1827     size_t block_eid = 0;
1828     for (auto& block : self->m_blocks)
1829     {
1830         /* each block now gets an additional "entry" instruction id
1831          * we can use to avoid point-life issues
1832          */
1833         block->m_entry_id = instruction_id;
1834         block->m_eid      = block_eid;
1835         ++instruction_id;
1836         ++block_eid;
1837
1838         ir_block_enumerate(block.get(), &instruction_id);
1839     }
1840 }
1841
1842 /* Local-value allocator
1843  * After finishing creating the liferange of all values used in a function
1844  * we can allocate their global-positions.
1845  * This is the counterpart to register-allocation in register machines.
1846  */
1847 struct function_allocator {
1848     ir_value **locals;
1849     size_t *sizes;
1850     size_t *positions;
1851     bool *unique;
1852 };
1853
1854 static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
1855 {
1856     ir_value *slot;
1857     size_t vsize = var->size();
1858
1859     var->m_code.local = vec_size(alloc->locals);
1860
1861     slot = new ir_value("reg", store_global, var->m_vtype);
1862     if (!slot)
1863         return false;
1864
1865     if (!slot->mergeLife(var))
1866         goto localerror;
1867
1868     vec_push(alloc->locals, slot);
1869     vec_push(alloc->sizes, vsize);
1870     vec_push(alloc->unique, var->m_unique_life);
1871
1872     return true;
1873
1874 localerror:
1875     delete slot;
1876     return false;
1877 }
1878
1879 static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
1880 {
1881     size_t a;
1882     ir_value *slot;
1883
1884     if (v->m_unique_life)
1885         return function_allocator_alloc(alloc, v);
1886
1887     for (a = 0; a < vec_size(alloc->locals); ++a)
1888     {
1889         /* if it's reserved for a unique liferange: skip */
1890         if (alloc->unique[a])
1891             continue;
1892
1893         slot = alloc->locals[a];
1894
1895         /* never resize parameters
1896          * will be required later when overlapping temps + locals
1897          */
1898         if (a < vec_size(self->m_params) &&
1899             alloc->sizes[a] < v->size())
1900         {
1901             continue;
1902         }
1903
1904         if (ir_values_overlap(v, slot))
1905             continue;
1906
1907         if (!slot->mergeLife(v))
1908             return false;
1909
1910         /* adjust size for this slot */
1911         if (alloc->sizes[a] < v->size())
1912             alloc->sizes[a] = v->size();
1913
1914         v->m_code.local = a;
1915         return true;
1916     }
1917     if (a >= vec_size(alloc->locals)) {
1918         if (!function_allocator_alloc(alloc, v))
1919             return false;
1920     }
1921     return true;
1922 }
1923
1924 bool ir_function_allocate_locals(ir_function *self)
1925 {
1926     bool   retval = true;
1927     size_t pos;
1928     bool   opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
1929
1930     function_allocator lockalloc, globalloc;
1931
1932     if (self->m_locals.empty() && self->m_values.empty())
1933         return true;
1934
1935     globalloc.locals    = nullptr;
1936     globalloc.sizes     = nullptr;
1937     globalloc.positions = nullptr;
1938     globalloc.unique    = nullptr;
1939     lockalloc.locals    = nullptr;
1940     lockalloc.sizes     = nullptr;
1941     lockalloc.positions = nullptr;
1942     lockalloc.unique    = nullptr;
1943
1944     size_t i;
1945     for (i = 0; i < self->m_locals.size(); ++i)
1946     {
1947         ir_value *v = self->m_locals[i].get();
1948         if ((self->m_flags & IR_FLAG_MASK_NO_LOCAL_TEMPS) || !OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) {
1949             v->m_locked      = true;
1950             v->m_unique_life = true;
1951         }
1952         else if (i >= vec_size(self->m_params))
1953             break;
1954         else
1955             v->m_locked = true; /* lock parameters locals */
1956         if (!function_allocator_alloc((v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1957             goto error;
1958     }
1959     for (; i < self->m_locals.size(); ++i)
1960     {
1961         ir_value *v = self->m_locals[i].get();
1962         if (v->m_life.empty())
1963             continue;
1964         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1965             goto error;
1966     }
1967
1968     /* Allocate a slot for any value that still exists */
1969     for (i = 0; i < self->m_values.size(); ++i)
1970     {
1971         ir_value *v = self->m_values[i].get();
1972
1973         if (v->m_life.empty())
1974             continue;
1975
1976         /* CALL optimization:
1977          * If the value is a parameter-temp: 1 write, 1 read from a CALL
1978          * and it's not "locked", write it to the OFS_PARM directly.
1979          */
1980         if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->m_locked && !v->m_unique_life) {
1981             if (v->m_reads.size() == 1 && v->m_writes.size() == 1 &&
1982                 (v->m_reads[0]->m_opcode == VINSTR_NRCALL ||
1983                  (v->m_reads[0]->m_opcode >= INSTR_CALL0 && v->m_reads[0]->m_opcode <= INSTR_CALL8)
1984                 )
1985                )
1986             {
1987                 size_t param;
1988                 ir_instr *call = v->m_reads[0];
1989                 if (!vec_ir_value_find(call->m_params, v, &param)) {
1990                     irerror(call->m_context, "internal error: unlocked parameter %s not found", v->m_name.c_str());
1991                     goto error;
1992                 }
1993                 ++opts_optimizationcount[OPTIM_CALL_STORES];
1994                 v->m_callparam = true;
1995                 if (param < 8)
1996                     v->setCodeAddress(OFS_PARM0 + 3*param);
1997                 else {
1998                     size_t nprotos = self->m_owner->m_extparam_protos.size();
1999                     ir_value *ep;
2000                     param -= 8;
2001                     if (nprotos > param)
2002                         ep = self->m_owner->m_extparam_protos[param].get();
2003                     else
2004                     {
2005                         ep = self->m_owner->generateExtparamProto();
2006                         while (++nprotos <= param)
2007                             ep = self->m_owner->generateExtparamProto();
2008                     }
2009                     ir_instr_op(v->m_writes[0], 0, ep, true);
2010                     call->m_params[param+8] = ep;
2011                 }
2012                 continue;
2013             }
2014             if (v->m_writes.size() == 1 && v->m_writes[0]->m_opcode == INSTR_CALL0) {
2015                 v->m_store = store_return;
2016                 if (v->m_members[0]) v->m_members[0]->m_store = store_return;
2017                 if (v->m_members[1]) v->m_members[1]->m_store = store_return;
2018                 if (v->m_members[2]) v->m_members[2]->m_store = store_return;
2019                 ++opts_optimizationcount[OPTIM_CALL_STORES];
2020                 continue;
2021             }
2022         }
2023
2024         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
2025             goto error;
2026     }
2027
2028     if (!lockalloc.sizes && !globalloc.sizes) {
2029         goto cleanup;
2030     }
2031     vec_push(lockalloc.positions, 0);
2032     vec_push(globalloc.positions, 0);
2033
2034     /* Adjust slot positions based on sizes */
2035     if (lockalloc.sizes) {
2036         pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
2037         for (i = 1; i < vec_size(lockalloc.sizes); ++i)
2038         {
2039             pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
2040             vec_push(lockalloc.positions, pos);
2041         }
2042         self->m_allocated_locals = pos + vec_last(lockalloc.sizes);
2043     }
2044     if (globalloc.sizes) {
2045         pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
2046         for (i = 1; i < vec_size(globalloc.sizes); ++i)
2047         {
2048             pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
2049             vec_push(globalloc.positions, pos);
2050         }
2051         self->m_globaltemps = pos + vec_last(globalloc.sizes);
2052     }
2053
2054     /* Locals need to know their new position */
2055     for (auto& local : self->m_locals) {
2056         if (local->m_locked || !opt_gt)
2057             local->m_code.local = lockalloc.positions[local->m_code.local];
2058         else
2059             local->m_code.local = globalloc.positions[local->m_code.local];
2060     }
2061     /* Take over the actual slot positions on values */
2062     for (auto& value : self->m_values) {
2063         if (value->m_locked || !opt_gt)
2064             value->m_code.local = lockalloc.positions[value->m_code.local];
2065         else
2066             value->m_code.local = globalloc.positions[value->m_code.local];
2067     }
2068
2069     goto cleanup;
2070
2071 error:
2072     retval = false;
2073 cleanup:
2074     for (i = 0; i < vec_size(lockalloc.locals); ++i)
2075         delete lockalloc.locals[i];
2076     for (i = 0; i < vec_size(globalloc.locals); ++i)
2077         delete globalloc.locals[i];
2078     vec_free(globalloc.unique);
2079     vec_free(globalloc.locals);
2080     vec_free(globalloc.sizes);
2081     vec_free(globalloc.positions);
2082     vec_free(lockalloc.unique);
2083     vec_free(lockalloc.locals);
2084     vec_free(lockalloc.sizes);
2085     vec_free(lockalloc.positions);
2086     return retval;
2087 }
2088
2089 /* Get information about which operand
2090  * is read from, or written to.
2091  */
2092 static void ir_op_read_write(int op, size_t *read, size_t *write)
2093 {
2094     switch (op)
2095     {
2096     case VINSTR_JUMP:
2097     case INSTR_GOTO:
2098         *write = 0;
2099         *read = 0;
2100         break;
2101     case INSTR_IF:
2102     case INSTR_IFNOT:
2103 #if 0
2104     case INSTR_IF_S:
2105     case INSTR_IFNOT_S:
2106 #endif
2107     case INSTR_RETURN:
2108     case VINSTR_COND:
2109         *write = 0;
2110         *read = 1;
2111         break;
2112     case INSTR_STOREP_F:
2113     case INSTR_STOREP_V:
2114     case INSTR_STOREP_S:
2115     case INSTR_STOREP_ENT:
2116     case INSTR_STOREP_FLD:
2117     case INSTR_STOREP_FNC:
2118         *write = 0;
2119         *read  = 7;
2120         break;
2121     default:
2122         *write = 1;
2123         *read = 6;
2124         break;
2125     };
2126 }
2127
2128 static bool ir_block_living_add_instr(ir_block *self, size_t eid) {
2129     bool changed = false;
2130     for (auto &it : self->m_living)
2131         if (it->setAlive(eid))
2132             changed = true;
2133     return changed;
2134 }
2135
2136 static bool ir_block_living_lock(ir_block *self) {
2137     bool changed = false;
2138     for (auto &it : self->m_living) {
2139         if (it->m_locked)
2140             continue;
2141         it->m_locked = true;
2142         changed = true;
2143     }
2144     return changed;
2145 }
2146
2147 static bool ir_block_life_propagate(ir_block *self, bool *changed)
2148 {
2149     ir_instr *instr;
2150     ir_value *value;
2151     size_t i, o, p, mem;
2152     // bitmasks which operands are read from or written to
2153     size_t read, write;
2154
2155     self->m_living.clear();
2156
2157     p = vec_size(self->m_exits);
2158     for (i = 0; i < p; ++i) {
2159         ir_block *prev = self->m_exits[i];
2160         for (auto &it : prev->m_living)
2161             if (!vec_ir_value_find(self->m_living, it, nullptr))
2162                 self->m_living.push_back(it);
2163     }
2164
2165     i = vec_size(self->m_instr);
2166     while (i)
2167     { --i;
2168         instr = self->m_instr[i];
2169
2170         /* See which operands are read and write operands */
2171         ir_op_read_write(instr->m_opcode, &read, &write);
2172
2173         /* Go through the 3 main operands
2174          * writes first, then reads
2175          */
2176         for (o = 0; o < 3; ++o)
2177         {
2178             if (!instr->_m_ops[o]) /* no such operand */
2179                 continue;
2180
2181             value = instr->_m_ops[o];
2182
2183             /* We only care about locals */
2184             /* we also calculate parameter liferanges so that locals
2185              * can take up parameter slots */
2186             if (value->m_store != store_value &&
2187                 value->m_store != store_local &&
2188                 value->m_store != store_param)
2189                 continue;
2190
2191             /* write operands */
2192             /* When we write to a local, we consider it "dead" for the
2193              * remaining upper part of the function, since in SSA a value
2194              * can only be written once (== created)
2195              */
2196             if (write & (1<<o))
2197             {
2198                 size_t idx;
2199                 bool in_living = vec_ir_value_find(self->m_living, value, &idx);
2200                 if (!in_living)
2201                 {
2202                     /* If the value isn't alive it hasn't been read before... */
2203                     /* TODO: See if the warning can be emitted during parsing or AST processing
2204                      * otherwise have warning printed here.
2205                      * IF printing a warning here: include filecontext_t,
2206                      * and make sure it's only printed once
2207                      * since this function is run multiple times.
2208                      */
2209                     /* con_err( "Value only written %s\n", value->m_name); */
2210                     if (value->setAlive(instr->m_eid))
2211                         *changed = true;
2212                 } else {
2213                     /* since 'living' won't contain it
2214                      * anymore, merge the value, since
2215                      * (A) doesn't.
2216                      */
2217                     if (value->setAlive(instr->m_eid))
2218                         *changed = true;
2219                     // Then remove
2220                     self->m_living.erase(self->m_living.begin() + idx);
2221                 }
2222                 /* Removing a vector removes all members */
2223                 for (mem = 0; mem < 3; ++mem) {
2224                     if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], &idx)) {
2225                         if (value->m_members[mem]->setAlive(instr->m_eid))
2226                             *changed = true;
2227                         self->m_living.erase(self->m_living.begin() + idx);
2228                     }
2229                 }
2230                 /* Removing the last member removes the vector */
2231                 if (value->m_memberof) {
2232                     value = value->m_memberof;
2233                     for (mem = 0; mem < 3; ++mem) {
2234                         if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2235                             break;
2236                     }
2237                     if (mem == 3 && vec_ir_value_find(self->m_living, value, &idx)) {
2238                         if (value->setAlive(instr->m_eid))
2239                             *changed = true;
2240                         self->m_living.erase(self->m_living.begin() + idx);
2241                     }
2242                 }
2243             }
2244         }
2245
2246         /* These operations need a special case as they can break when using
2247          * same source and destination operand otherwise, as the engine may
2248          * read the source multiple times. */
2249         if (instr->m_opcode == INSTR_MUL_VF ||
2250             instr->m_opcode == VINSTR_BITAND_VF ||
2251             instr->m_opcode == VINSTR_BITOR_VF ||
2252             instr->m_opcode == VINSTR_BITXOR ||
2253             instr->m_opcode == VINSTR_BITXOR_VF ||
2254             instr->m_opcode == VINSTR_BITXOR_V ||
2255             instr->m_opcode == VINSTR_CROSS)
2256         {
2257             value = instr->_m_ops[2];
2258             /* the float source will get an additional lifetime */
2259             if (value->setAlive(instr->m_eid+1))
2260                 *changed = true;
2261             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2262                 *changed = true;
2263         }
2264
2265         if (instr->m_opcode == INSTR_MUL_FV ||
2266             instr->m_opcode == INSTR_LOAD_V ||
2267             instr->m_opcode == VINSTR_BITXOR ||
2268             instr->m_opcode == VINSTR_BITXOR_VF ||
2269             instr->m_opcode == VINSTR_BITXOR_V ||
2270             instr->m_opcode == VINSTR_CROSS)
2271         {
2272             value = instr->_m_ops[1];
2273             /* the float source will get an additional lifetime */
2274             if (value->setAlive(instr->m_eid+1))
2275                 *changed = true;
2276             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2277                 *changed = true;
2278         }
2279
2280         for (o = 0; o < 3; ++o)
2281         {
2282             if (!instr->_m_ops[o]) /* no such operand */
2283                 continue;
2284
2285             value = instr->_m_ops[o];
2286
2287             /* We only care about locals */
2288             /* we also calculate parameter liferanges so that locals
2289              * can take up parameter slots */
2290             if (value->m_store != store_value &&
2291                 value->m_store != store_local &&
2292                 value->m_store != store_param)
2293                 continue;
2294
2295             /* read operands */
2296             if (read & (1<<o))
2297             {
2298                 if (!vec_ir_value_find(self->m_living, value, nullptr))
2299                     self->m_living.push_back(value);
2300                 /* reading adds the full vector */
2301                 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2302                     self->m_living.push_back(value->m_memberof);
2303                 for (mem = 0; mem < 3; ++mem) {
2304                     if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2305                         self->m_living.push_back(value->m_members[mem]);
2306                 }
2307             }
2308         }
2309         /* PHI operands are always read operands */
2310         for (auto &it : instr->m_phi) {
2311             value = it.value;
2312             if (!vec_ir_value_find(self->m_living, value, nullptr))
2313                 self->m_living.push_back(value);
2314             /* reading adds the full vector */
2315             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2316                 self->m_living.push_back(value->m_memberof);
2317             for (mem = 0; mem < 3; ++mem) {
2318                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2319                     self->m_living.push_back(value->m_members[mem]);
2320             }
2321         }
2322
2323         /* on a call, all these values must be "locked" */
2324         if (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8) {
2325             if (ir_block_living_lock(self))
2326                 *changed = true;
2327         }
2328         /* call params are read operands too */
2329         for (auto &it : instr->m_params) {
2330             value = it;
2331             if (!vec_ir_value_find(self->m_living, value, nullptr))
2332                 self->m_living.push_back(value);
2333             /* reading adds the full vector */
2334             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2335                 self->m_living.push_back(value->m_memberof);
2336             for (mem = 0; mem < 3; ++mem) {
2337                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2338                     self->m_living.push_back(value->m_members[mem]);
2339             }
2340         }
2341
2342         /* (A) */
2343         if (ir_block_living_add_instr(self, instr->m_eid))
2344             *changed = true;
2345     }
2346     /* the "entry" instruction ID */
2347     if (ir_block_living_add_instr(self, self->m_entry_id))
2348         *changed = true;
2349
2350     return true;
2351 }
2352
2353 bool ir_function_calculate_liferanges(ir_function *self)
2354 {
2355     /* parameters live at 0 */
2356     for (size_t i = 0; i < vec_size(self->m_params); ++i)
2357         if (!self->m_locals[i].get()->setAlive(0))
2358             compile_error(self->m_context, "internal error: failed value-life merging");
2359
2360     bool changed;
2361     do {
2362         self->m_run_id++;
2363         changed = false;
2364         for (auto i = self->m_blocks.rbegin(); i != self->m_blocks.rend(); ++i)
2365             ir_block_life_propagate(i->get(), &changed);
2366     } while (changed);
2367
2368     if (self->m_blocks.size()) {
2369         ir_block *block = self->m_blocks[0].get();
2370         for (auto &it : block->m_living) {
2371             ir_value *v = it;
2372             if (v->m_store != store_local)
2373                 continue;
2374             if (v->m_vtype == TYPE_VECTOR)
2375                 continue;
2376             self->m_flags |= IR_FLAG_HAS_UNINITIALIZED;
2377             /* find the instruction reading from it */
2378             size_t s = 0;
2379             for (; s < v->m_reads.size(); ++s) {
2380                 if (v->m_reads[s]->m_eid == v->m_life[0].end)
2381                     break;
2382             }
2383             if (s < v->m_reads.size()) {
2384                 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2385                               "variable `%s` may be used uninitialized in this function\n"
2386                               " -> %s:%i",
2387                               v->m_name.c_str(),
2388                               v->m_reads[s]->m_context.file, v->m_reads[s]->m_context.line)
2389                    )
2390                 {
2391                     return false;
2392                 }
2393                 continue;
2394             }
2395             if (v->m_memberof) {
2396                 ir_value *vec = v->m_memberof;
2397                 for (s = 0; s < vec->m_reads.size(); ++s) {
2398                     if (vec->m_reads[s]->m_eid == v->m_life[0].end)
2399                         break;
2400                 }
2401                 if (s < vec->m_reads.size()) {
2402                     if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2403                                   "variable `%s` may be used uninitialized in this function\n"
2404                                   " -> %s:%i",
2405                                   v->m_name.c_str(),
2406                                   vec->m_reads[s]->m_context.file, vec->m_reads[s]->m_context.line)
2407                        )
2408                     {
2409                         return false;
2410                     }
2411                     continue;
2412                 }
2413             }
2414             if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2415                           "variable `%s` may be used uninitialized in this function", v->m_name.c_str()))
2416             {
2417                 return false;
2418             }
2419         }
2420     }
2421     return true;
2422 }
2423
2424 /***********************************************************************
2425  *IR Code-Generation
2426  *
2427  * Since the IR has the convention of putting 'write' operands
2428  * at the beginning, we have to rotate the operands of instructions
2429  * properly in order to generate valid QCVM code.
2430  *
2431  * Having destinations at a fixed position is more convenient. In QC
2432  * this is *mostly* OPC,  but FTE adds at least 2 instructions which
2433  * read from from OPA,  and store to OPB rather than OPC.   Which is
2434  * partially the reason why the implementation of these instructions
2435  * in darkplaces has been delayed for so long.
2436  *
2437  * Breaking conventions is annoying...
2438  */
2439 static bool gen_global_field(code_t *code, ir_value *global)
2440 {
2441     if (global->m_hasvalue)
2442     {
2443         ir_value *fld = global->m_constval.vpointer;
2444         if (!fld) {
2445             irerror(global->m_context, "Invalid field constant with no field: %s", global->m_name.c_str());
2446             return false;
2447         }
2448
2449         /* copy the field's value */
2450         global->setCodeAddress(code->globals.size());
2451         code->globals.push_back(fld->m_code.fieldaddr);
2452         if (global->m_fieldtype == TYPE_VECTOR) {
2453             code->globals.push_back(fld->m_code.fieldaddr+1);
2454             code->globals.push_back(fld->m_code.fieldaddr+2);
2455         }
2456     }
2457     else
2458     {
2459         global->setCodeAddress(code->globals.size());
2460         code->globals.push_back(0);
2461         if (global->m_fieldtype == TYPE_VECTOR) {
2462             code->globals.push_back(0);
2463             code->globals.push_back(0);
2464         }
2465     }
2466     if (global->m_code.globaladdr < 0)
2467         return false;
2468     return true;
2469 }
2470
2471 static bool gen_global_pointer(code_t *code, ir_value *global)
2472 {
2473     if (global->m_hasvalue)
2474     {
2475         ir_value *target = global->m_constval.vpointer;
2476         if (!target) {
2477             irerror(global->m_context, "Invalid pointer constant: %s", global->m_name.c_str());
2478             /* nullptr pointers are pointing to the nullptr constant, which also
2479              * sits at address 0, but still has an ir_value for itself.
2480              */
2481             return false;
2482         }
2483
2484         /* Here, relocations ARE possible - in fteqcc-enhanced-qc:
2485          * void() foo; <- proto
2486          * void() *fooptr = &foo;
2487          * void() foo = { code }
2488          */
2489         if (!target->m_code.globaladdr) {
2490             /* FIXME: Check for the constant nullptr ir_value!
2491              * because then code.globaladdr being 0 is valid.
2492              */
2493             irerror(global->m_context, "FIXME: Relocation support");
2494             return false;
2495         }
2496
2497         global->setCodeAddress(code->globals.size());
2498         code->globals.push_back(target->m_code.globaladdr);
2499     }
2500     else
2501     {
2502         global->setCodeAddress(code->globals.size());
2503         code->globals.push_back(0);
2504     }
2505     if (global->m_code.globaladdr < 0)
2506         return false;
2507     return true;
2508 }
2509
2510 static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *block)
2511 {
2512     prog_section_statement_t stmt;
2513     ir_instr *instr;
2514     ir_block *target;
2515     ir_block *ontrue;
2516     ir_block *onfalse;
2517     size_t    stidx;
2518     size_t    i;
2519     int       j;
2520
2521     block->m_generated = true;
2522     block->m_code_start = code->statements.size();
2523     for (i = 0; i < vec_size(block->m_instr); ++i)
2524     {
2525         instr = block->m_instr[i];
2526
2527         if (instr->m_opcode == VINSTR_PHI) {
2528             irerror(block->m_context, "cannot generate virtual instruction (phi)");
2529             return false;
2530         }
2531
2532         if (instr->m_opcode == VINSTR_JUMP) {
2533             target = instr->m_bops[0];
2534             /* for uncoditional jumps, if the target hasn't been generated
2535              * yet, we generate them right here.
2536              */
2537             if (!target->m_generated)
2538                 return gen_blocks_recursive(code, func, target);
2539
2540             /* otherwise we generate a jump instruction */
2541             stmt.opcode = INSTR_GOTO;
2542             stmt.o1.s1 = target->m_code_start - code->statements.size();
2543             stmt.o2.s1 = 0;
2544             stmt.o3.s1 = 0;
2545             if (stmt.o1.s1 != 1)
2546                 code_push_statement(code, &stmt, instr->m_context);
2547
2548             /* no further instructions can be in this block */
2549             return true;
2550         }
2551
2552         if (instr->m_opcode == VINSTR_BITXOR) {
2553             stmt.opcode = INSTR_BITOR;
2554             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2555             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2556             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2557             code_push_statement(code, &stmt, instr->m_context);
2558             stmt.opcode = INSTR_BITAND;
2559             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2560             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2561             stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2562             code_push_statement(code, &stmt, instr->m_context);
2563             stmt.opcode = INSTR_SUB_F;
2564             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2565             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2566             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2567             code_push_statement(code, &stmt, instr->m_context);
2568
2569             /* instruction generated */
2570             continue;
2571         }
2572
2573         if (instr->m_opcode == VINSTR_BITAND_V) {
2574             stmt.opcode = INSTR_BITAND;
2575             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2576             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2577             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2578             code_push_statement(code, &stmt, instr->m_context);
2579             ++stmt.o1.s1;
2580             ++stmt.o2.s1;
2581             ++stmt.o3.s1;
2582             code_push_statement(code, &stmt, instr->m_context);
2583             ++stmt.o1.s1;
2584             ++stmt.o2.s1;
2585             ++stmt.o3.s1;
2586             code_push_statement(code, &stmt, instr->m_context);
2587
2588             /* instruction generated */
2589             continue;
2590         }
2591
2592         if (instr->m_opcode == VINSTR_BITOR_V) {
2593             stmt.opcode = INSTR_BITOR;
2594             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2595             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2596             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2597             code_push_statement(code, &stmt, instr->m_context);
2598             ++stmt.o1.s1;
2599             ++stmt.o2.s1;
2600             ++stmt.o3.s1;
2601             code_push_statement(code, &stmt, instr->m_context);
2602             ++stmt.o1.s1;
2603             ++stmt.o2.s1;
2604             ++stmt.o3.s1;
2605             code_push_statement(code, &stmt, instr->m_context);
2606
2607             /* instruction generated */
2608             continue;
2609         }
2610
2611         if (instr->m_opcode == VINSTR_BITXOR_V) {
2612             for (j = 0; j < 3; ++j) {
2613                 stmt.opcode = INSTR_BITOR;
2614                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2615                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2616                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2617                 code_push_statement(code, &stmt, instr->m_context);
2618                 stmt.opcode = INSTR_BITAND;
2619                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2620                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2621                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2622                 code_push_statement(code, &stmt, instr->m_context);
2623             }
2624             stmt.opcode = INSTR_SUB_V;
2625             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2626             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2627             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2628             code_push_statement(code, &stmt, instr->m_context);
2629
2630             /* instruction generated */
2631             continue;
2632         }
2633
2634         if (instr->m_opcode == VINSTR_BITAND_VF) {
2635             stmt.opcode = INSTR_BITAND;
2636             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2637             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2638             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2639             code_push_statement(code, &stmt, instr->m_context);
2640             ++stmt.o1.s1;
2641             ++stmt.o3.s1;
2642             code_push_statement(code, &stmt, instr->m_context);
2643             ++stmt.o1.s1;
2644             ++stmt.o3.s1;
2645             code_push_statement(code, &stmt, instr->m_context);
2646
2647             /* instruction generated */
2648             continue;
2649         }
2650
2651         if (instr->m_opcode == VINSTR_BITOR_VF) {
2652             stmt.opcode = INSTR_BITOR;
2653             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2654             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2655             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2656             code_push_statement(code, &stmt, instr->m_context);
2657             ++stmt.o1.s1;
2658             ++stmt.o3.s1;
2659             code_push_statement(code, &stmt, instr->m_context);
2660             ++stmt.o1.s1;
2661             ++stmt.o3.s1;
2662             code_push_statement(code, &stmt, instr->m_context);
2663
2664             /* instruction generated */
2665             continue;
2666         }
2667
2668         if (instr->m_opcode == VINSTR_BITXOR_VF) {
2669             for (j = 0; j < 3; ++j) {
2670                 stmt.opcode = INSTR_BITOR;
2671                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2672                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2673                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2674                 code_push_statement(code, &stmt, instr->m_context);
2675                 stmt.opcode = INSTR_BITAND;
2676                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2677                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2678                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2679                 code_push_statement(code, &stmt, instr->m_context);
2680             }
2681             stmt.opcode = INSTR_SUB_V;
2682             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2683             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2684             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2685             code_push_statement(code, &stmt, instr->m_context);
2686
2687             /* instruction generated */
2688             continue;
2689         }
2690
2691         if (instr->m_opcode == VINSTR_CROSS) {
2692             stmt.opcode = INSTR_MUL_F;
2693             for (j = 0; j < 3; ++j) {
2694                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 1) % 3;
2695                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 2) % 3;
2696                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2697                 code_push_statement(code, &stmt, instr->m_context);
2698                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 2) % 3;
2699                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 1) % 3;
2700                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2701                 code_push_statement(code, &stmt, instr->m_context);
2702             }
2703             stmt.opcode = INSTR_SUB_V;
2704             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2705             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2706             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2707             code_push_statement(code, &stmt, instr->m_context);
2708
2709             /* instruction generated */
2710             continue;
2711         }
2712
2713         if (instr->m_opcode == VINSTR_COND) {
2714             ontrue  = instr->m_bops[0];
2715             onfalse = instr->m_bops[1];
2716             /* TODO: have the AST signal which block should
2717              * come first: eg. optimize IFs without ELSE...
2718              */
2719
2720             stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2721             stmt.o2.u1 = 0;
2722             stmt.o3.s1 = 0;
2723
2724             if (ontrue->m_generated) {
2725                 stmt.opcode = INSTR_IF;
2726                 stmt.o2.s1 = ontrue->m_code_start - code->statements.size();
2727                 if (stmt.o2.s1 != 1)
2728                     code_push_statement(code, &stmt, instr->m_context);
2729             }
2730             if (onfalse->m_generated) {
2731                 stmt.opcode = INSTR_IFNOT;
2732                 stmt.o2.s1 = onfalse->m_code_start - code->statements.size();
2733                 if (stmt.o2.s1 != 1)
2734                     code_push_statement(code, &stmt, instr->m_context);
2735             }
2736             if (!ontrue->m_generated) {
2737                 if (onfalse->m_generated)
2738                     return gen_blocks_recursive(code, func, ontrue);
2739             }
2740             if (!onfalse->m_generated) {
2741                 if (ontrue->m_generated)
2742                     return gen_blocks_recursive(code, func, onfalse);
2743             }
2744             /* neither ontrue nor onfalse exist */
2745             stmt.opcode = INSTR_IFNOT;
2746             if (!instr->m_likely) {
2747                 /* Honor the likelyhood hint */
2748                 ir_block *tmp = onfalse;
2749                 stmt.opcode = INSTR_IF;
2750                 onfalse = ontrue;
2751                 ontrue = tmp;
2752             }
2753             stidx = code->statements.size();
2754             code_push_statement(code, &stmt, instr->m_context);
2755             /* on false we jump, so add ontrue-path */
2756             if (!gen_blocks_recursive(code, func, ontrue))
2757                 return false;
2758             /* fixup the jump address */
2759             code->statements[stidx].o2.s1 = code->statements.size() - stidx;
2760             /* generate onfalse path */
2761             if (onfalse->m_generated) {
2762                 /* fixup the jump address */
2763                 code->statements[stidx].o2.s1 = onfalse->m_code_start - stidx;
2764                 if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2765                     code->statements[stidx] = code->statements[stidx+1];
2766                     if (code->statements[stidx].o1.s1 < 0)
2767                         code->statements[stidx].o1.s1++;
2768                     code_pop_statement(code);
2769                 }
2770                 stmt.opcode = code->statements.back().opcode;
2771                 if (stmt.opcode == INSTR_GOTO ||
2772                     stmt.opcode == INSTR_IF ||
2773                     stmt.opcode == INSTR_IFNOT ||
2774                     stmt.opcode == INSTR_RETURN ||
2775                     stmt.opcode == INSTR_DONE)
2776                 {
2777                     /* no use jumping from here */
2778                     return true;
2779                 }
2780                 /* may have been generated in the previous recursive call */
2781                 stmt.opcode = INSTR_GOTO;
2782                 stmt.o1.s1 = onfalse->m_code_start - code->statements.size();
2783                 stmt.o2.s1 = 0;
2784                 stmt.o3.s1 = 0;
2785                 if (stmt.o1.s1 != 1)
2786                     code_push_statement(code, &stmt, instr->m_context);
2787                 return true;
2788             }
2789             else if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2790                 code->statements[stidx] = code->statements[stidx+1];
2791                 if (code->statements[stidx].o1.s1 < 0)
2792                     code->statements[stidx].o1.s1++;
2793                 code_pop_statement(code);
2794             }
2795             /* if not, generate now */
2796             return gen_blocks_recursive(code, func, onfalse);
2797         }
2798
2799         if ( (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8)
2800            || instr->m_opcode == VINSTR_NRCALL)
2801         {
2802             size_t p, first;
2803             ir_value *retvalue;
2804
2805             first = instr->m_params.size();
2806             if (first > 8)
2807                 first = 8;
2808             for (p = 0; p < first; ++p)
2809             {
2810                 ir_value *param = instr->m_params[p];
2811                 if (param->m_callparam)
2812                     continue;
2813
2814                 stmt.opcode = INSTR_STORE_F;
2815                 stmt.o3.u1 = 0;
2816
2817                 if (param->m_vtype == TYPE_FIELD)
2818                     stmt.opcode = field_store_instr[param->m_fieldtype];
2819                 else if (param->m_vtype == TYPE_NIL)
2820                     stmt.opcode = INSTR_STORE_V;
2821                 else
2822                     stmt.opcode = type_store_instr[param->m_vtype];
2823                 stmt.o1.u1 = param->codeAddress();
2824                 stmt.o2.u1 = OFS_PARM0 + 3 * p;
2825
2826                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2827                     /* fetch 3 separate floats */
2828                     stmt.opcode = INSTR_STORE_F;
2829                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2830                     code_push_statement(code, &stmt, instr->m_context);
2831                     stmt.o2.u1++;
2832                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2833                     code_push_statement(code, &stmt, instr->m_context);
2834                     stmt.o2.u1++;
2835                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2836                     code_push_statement(code, &stmt, instr->m_context);
2837                 }
2838                 else
2839                     code_push_statement(code, &stmt, instr->m_context);
2840             }
2841             /* Now handle extparams */
2842             first = instr->m_params.size();
2843             for (; p < first; ++p)
2844             {
2845                 ir_builder *ir = func->m_owner;
2846                 ir_value *param = instr->m_params[p];
2847                 ir_value *targetparam;
2848
2849                 if (param->m_callparam)
2850                     continue;
2851
2852                 if (p-8 >= ir->m_extparams.size())
2853                     ir->generateExtparam();
2854
2855                 targetparam = ir->m_extparams[p-8];
2856
2857                 stmt.opcode = INSTR_STORE_F;
2858                 stmt.o3.u1 = 0;
2859
2860                 if (param->m_vtype == TYPE_FIELD)
2861                     stmt.opcode = field_store_instr[param->m_fieldtype];
2862                 else if (param->m_vtype == TYPE_NIL)
2863                     stmt.opcode = INSTR_STORE_V;
2864                 else
2865                     stmt.opcode = type_store_instr[param->m_vtype];
2866                 stmt.o1.u1 = param->codeAddress();
2867                 stmt.o2.u1 = targetparam->codeAddress();
2868                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2869                     /* fetch 3 separate floats */
2870                     stmt.opcode = INSTR_STORE_F;
2871                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2872                     code_push_statement(code, &stmt, instr->m_context);
2873                     stmt.o2.u1++;
2874                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2875                     code_push_statement(code, &stmt, instr->m_context);
2876                     stmt.o2.u1++;
2877                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2878                     code_push_statement(code, &stmt, instr->m_context);
2879                 }
2880                 else
2881                     code_push_statement(code, &stmt, instr->m_context);
2882             }
2883
2884             stmt.opcode = INSTR_CALL0 + instr->m_params.size();
2885             if (stmt.opcode > INSTR_CALL8)
2886                 stmt.opcode = INSTR_CALL8;
2887             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2888             stmt.o2.u1 = 0;
2889             stmt.o3.u1 = 0;
2890             code_push_statement(code, &stmt, instr->m_context);
2891
2892             retvalue = instr->_m_ops[0];
2893             if (retvalue && retvalue->m_store != store_return &&
2894                 (retvalue->m_store == store_global || retvalue->m_life.size()))
2895             {
2896                 /* not to be kept in OFS_RETURN */
2897                 if (retvalue->m_vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS))
2898                     stmt.opcode = field_store_instr[retvalue->m_fieldtype];
2899                 else
2900                     stmt.opcode = type_store_instr[retvalue->m_vtype];
2901                 stmt.o1.u1 = OFS_RETURN;
2902                 stmt.o2.u1 = retvalue->codeAddress();
2903                 stmt.o3.u1 = 0;
2904                 code_push_statement(code, &stmt, instr->m_context);
2905             }
2906             continue;
2907         }
2908
2909         if (instr->m_opcode == INSTR_STATE) {
2910             stmt.opcode = instr->m_opcode;
2911             if (instr->_m_ops[0])
2912                 stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2913             if (instr->_m_ops[1])
2914                 stmt.o2.u1 = instr->_m_ops[1]->codeAddress();
2915             stmt.o3.u1 = 0;
2916             code_push_statement(code, &stmt, instr->m_context);
2917             continue;
2918         }
2919
2920         stmt.opcode = instr->m_opcode;
2921         stmt.o1.u1 = 0;
2922         stmt.o2.u1 = 0;
2923         stmt.o3.u1 = 0;
2924
2925         /* This is the general order of operands */
2926         if (instr->_m_ops[0])
2927             stmt.o3.u1 = instr->_m_ops[0]->codeAddress();
2928
2929         if (instr->_m_ops[1])
2930             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2931
2932         if (instr->_m_ops[2])
2933             stmt.o2.u1 = instr->_m_ops[2]->codeAddress();
2934
2935         if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
2936         {
2937             stmt.o1.u1 = stmt.o3.u1;
2938             stmt.o3.u1 = 0;
2939         }
2940         else if ((stmt.opcode >= INSTR_STORE_F &&
2941                   stmt.opcode <= INSTR_STORE_FNC) ||
2942                  (stmt.opcode >= INSTR_STOREP_F &&
2943                   stmt.opcode <= INSTR_STOREP_FNC))
2944         {
2945             /* 2-operand instructions with A -> B */
2946             stmt.o2.u1 = stmt.o3.u1;
2947             stmt.o3.u1 = 0;
2948
2949             /* tiny optimization, don't output
2950              * STORE a, a
2951              */
2952             if (stmt.o2.u1 == stmt.o1.u1 &&
2953                 OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
2954             {
2955                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
2956                 continue;
2957             }
2958         }
2959         code_push_statement(code, &stmt, instr->m_context);
2960     }
2961     return true;
2962 }
2963
2964 static bool gen_function_code(code_t *code, ir_function *self)
2965 {
2966     ir_block *block;
2967     prog_section_statement_t stmt, *retst;
2968
2969     /* Starting from entry point, we generate blocks "as they come"
2970      * for now. Dead blocks will not be translated obviously.
2971      */
2972     if (self->m_blocks.empty()) {
2973         irerror(self->m_context, "Function '%s' declared without body.", self->m_name.c_str());
2974         return false;
2975     }
2976
2977     block = self->m_blocks[0].get();
2978     if (block->m_generated)
2979         return true;
2980
2981     if (!gen_blocks_recursive(code, self, block)) {
2982         irerror(self->m_context, "failed to generate blocks for '%s'", self->m_name.c_str());
2983         return false;
2984     }
2985
2986     /* code_write and qcvm -disasm need to know that the function ends here */
2987     retst = &code->statements.back();
2988     if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) &&
2989         self->m_outtype == TYPE_VOID &&
2990         retst->opcode == INSTR_RETURN &&
2991         !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1)
2992     {
2993         retst->opcode = INSTR_DONE;
2994         ++opts_optimizationcount[OPTIM_VOID_RETURN];
2995     } else {
2996         lex_ctx_t last;
2997
2998         stmt.opcode = INSTR_DONE;
2999         stmt.o1.u1  = 0;
3000         stmt.o2.u1  = 0;
3001         stmt.o3.u1  = 0;
3002         last.line   = code->linenums.back();
3003         last.column = code->columnnums.back();
3004
3005         code_push_statement(code, &stmt, last);
3006     }
3007     return true;
3008 }
3009
3010 qcint_t ir_builder::filestring(const char *filename)
3011 {
3012     /* NOTE: filename pointers are copied, we never strdup them,
3013      * thus we can use pointer-comparison to find the string.
3014      */
3015     qcint_t  str;
3016
3017     for (size_t i = 0; i != m_filenames.size(); ++i) {
3018         if (!strcmp(m_filenames[i], filename))
3019             return i;
3020     }
3021
3022     str = code_genstring(m_code.get(), filename);
3023     m_filenames.push_back(filename);
3024     m_filestrings.push_back(str);
3025     return str;
3026 }
3027
3028 bool ir_builder::generateGlobalFunction(ir_value *global)
3029 {
3030     prog_section_function_t fun;
3031     ir_function            *irfun;
3032
3033     size_t i;
3034
3035     if (!global->m_hasvalue || (!global->m_constval.vfunc)) {
3036         irerror(global->m_context, "Invalid state of function-global: not constant: %s", global->m_name.c_str());
3037         return false;
3038     }
3039
3040     irfun = global->m_constval.vfunc;
3041     fun.name = global->m_code.name;
3042     fun.file = filestring(global->m_context.file);
3043     fun.profile = 0; /* always 0 */
3044     fun.nargs = vec_size(irfun->m_params);
3045     if (fun.nargs > 8)
3046         fun.nargs = 8;
3047
3048     for (i = 0; i < 8; ++i) {
3049         if ((int32_t)i >= fun.nargs)
3050             fun.argsize[i] = 0;
3051         else
3052             fun.argsize[i] = type_sizeof_[irfun->m_params[i]];
3053     }
3054
3055     fun.firstlocal = 0;
3056     fun.locals = irfun->m_allocated_locals;
3057
3058     if (irfun->m_builtin)
3059         fun.entry = irfun->m_builtin+1;
3060     else {
3061         irfun->m_code_function_def = m_code->functions.size();
3062         fun.entry = m_code->statements.size();
3063     }
3064
3065     m_code->functions.push_back(fun);
3066     return true;
3067 }
3068
3069 ir_value* ir_builder::generateExtparamProto()
3070 {
3071     char      name[128];
3072
3073     util_snprintf(name, sizeof(name), "EXTPARM#%i", (int)(m_extparam_protos.size()));
3074     ir_value *global = new ir_value(name, store_global, TYPE_VECTOR);
3075     m_extparam_protos.emplace_back(global);
3076
3077     return global;
3078 }
3079
3080 void ir_builder::generateExtparam()
3081 {
3082     prog_section_def_t def;
3083     ir_value          *global;
3084
3085     if (m_extparam_protos.size() < m_extparams.size()+1)
3086         global = generateExtparamProto();
3087     else
3088         global = m_extparam_protos[m_extparams.size()].get();
3089
3090     def.name = code_genstring(m_code.get(), global->m_name.c_str());
3091     def.type = TYPE_VECTOR;
3092     def.offset = m_code->globals.size();
3093
3094     m_code->defs.push_back(def);
3095
3096     global->setCodeAddress(def.offset);
3097
3098     m_code->globals.push_back(0);
3099     m_code->globals.push_back(0);
3100     m_code->globals.push_back(0);
3101
3102     m_extparams.emplace_back(global);
3103 }
3104
3105 static bool gen_function_extparam_copy(code_t *code, ir_function *self)
3106 {
3107     ir_builder *ir = self->m_owner;
3108
3109     size_t numparams = vec_size(self->m_params);
3110     if (!numparams)
3111         return true;
3112
3113     prog_section_statement_t stmt;
3114     stmt.opcode = INSTR_STORE_F;
3115     stmt.o3.s1 = 0;
3116     for (size_t i = 8; i < numparams; ++i) {
3117         size_t ext = i - 8;
3118         if (ext >= ir->m_extparams.size())
3119             ir->generateExtparam();
3120
3121         ir_value *ep = ir->m_extparams[ext];
3122
3123         stmt.opcode = type_store_instr[self->m_locals[i]->m_vtype];
3124         if (self->m_locals[i]->m_vtype == TYPE_FIELD &&
3125             self->m_locals[i]->m_fieldtype == TYPE_VECTOR)
3126         {
3127             stmt.opcode = INSTR_STORE_V;
3128         }
3129         stmt.o1.u1 = ep->codeAddress();
3130         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3131         code_push_statement(code, &stmt, self->m_context);
3132     }
3133
3134     return true;
3135 }
3136
3137 static bool gen_function_varargs_copy(code_t *code, ir_function *self)
3138 {
3139     size_t i, ext, numparams, maxparams;
3140
3141     ir_builder *ir = self->m_owner;
3142     ir_value   *ep;
3143     prog_section_statement_t stmt;
3144
3145     numparams = vec_size(self->m_params);
3146     if (!numparams)
3147         return true;
3148
3149     stmt.opcode = INSTR_STORE_V;
3150     stmt.o3.s1 = 0;
3151     maxparams = numparams + self->m_max_varargs;
3152     for (i = numparams; i < maxparams; ++i) {
3153         if (i < 8) {
3154             stmt.o1.u1 = OFS_PARM0 + 3*i;
3155             stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3156             code_push_statement(code, &stmt, self->m_context);
3157             continue;
3158         }
3159         ext = i - 8;
3160         while (ext >= ir->m_extparams.size())
3161             ir->generateExtparam();
3162
3163         ep = ir->m_extparams[ext];
3164
3165         stmt.o1.u1 = ep->codeAddress();
3166         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3167         code_push_statement(code, &stmt, self->m_context);
3168     }
3169
3170     return true;
3171 }
3172
3173 bool ir_builder::generateFunctionLocals(ir_value *global)
3174 {
3175     prog_section_function_t *def;
3176     ir_function             *irfun;
3177     uint32_t                 firstlocal, firstglobal;
3178
3179     irfun = global->m_constval.vfunc;
3180     def   = &m_code->functions[0] + irfun->m_code_function_def;
3181
3182     if (OPTS_OPTION_BOOL(OPTION_G) ||
3183         !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS)        ||
3184         (irfun->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3185     {
3186         firstlocal = def->firstlocal = m_code->globals.size();
3187     } else {
3188         firstlocal = def->firstlocal = m_first_common_local;
3189         ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
3190     }
3191
3192     firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? m_first_common_globaltemp : firstlocal);
3193
3194     for (size_t i = m_code->globals.size(); i < firstlocal + irfun->m_allocated_locals; ++i)
3195         m_code->globals.push_back(0);
3196
3197     for (auto& lp : irfun->m_locals) {
3198         ir_value *v = lp.get();
3199         if (v->m_locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
3200             v->setCodeAddress(firstlocal + v->m_code.local);
3201             if (!generateGlobal(v, true)) {
3202                 irerror(v->m_context, "failed to generate local %s", v->m_name.c_str());
3203                 return false;
3204             }
3205         }
3206         else
3207             v->setCodeAddress(firstglobal + v->m_code.local);
3208     }
3209     for (auto& vp : irfun->m_values) {
3210         ir_value *v = vp.get();
3211         if (v->m_callparam)
3212             continue;
3213         if (v->m_locked)
3214             v->setCodeAddress(firstlocal + v->m_code.local);
3215         else
3216             v->setCodeAddress(firstglobal + v->m_code.local);
3217     }
3218     return true;
3219 }
3220
3221 bool ir_builder::generateGlobalFunctionCode(ir_value *global)
3222 {
3223     prog_section_function_t *fundef;
3224     ir_function             *irfun;
3225
3226     irfun = global->m_constval.vfunc;
3227     if (!irfun) {
3228         if (global->m_cvq == CV_NONE) {
3229             if (irwarning(global->m_context, WARN_IMPLICIT_FUNCTION_POINTER,
3230                           "function `%s` has no body and in QC implicitly becomes a function-pointer",
3231                           global->m_name.c_str()))
3232             {
3233                 /* Not bailing out just now. If this happens a lot you don't want to have
3234                  * to rerun gmqcc for each such function.
3235                  */
3236
3237                 /* return false; */
3238             }
3239         }
3240         /* this was a function pointer, don't generate code for those */
3241         return true;
3242     }
3243
3244     if (irfun->m_builtin)
3245         return true;
3246
3247     /*
3248      * If there is no definition and the thing is eraseable, we can ignore
3249      * outputting the function to begin with.
3250      */
3251     if (global->m_flags & IR_FLAG_ERASABLE && irfun->m_code_function_def < 0) {
3252         return true;
3253     }
3254
3255     if (irfun->m_code_function_def < 0) {
3256         irerror(irfun->m_context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->m_name.c_str());
3257         return false;
3258     }
3259     fundef = &m_code->functions[irfun->m_code_function_def];
3260
3261     fundef->entry = m_code->statements.size();
3262     if (!generateFunctionLocals(global)) {
3263         irerror(irfun->m_context, "Failed to generate locals for function %s", irfun->m_name.c_str());
3264         return false;
3265     }
3266     if (!gen_function_extparam_copy(m_code.get(), irfun)) {
3267         irerror(irfun->m_context, "Failed to generate extparam-copy code for function %s", irfun->m_name.c_str());
3268         return false;
3269     }
3270     if (irfun->m_max_varargs && !gen_function_varargs_copy(m_code.get(), irfun)) {
3271         irerror(irfun->m_context, "Failed to generate vararg-copy code for function %s", irfun->m_name.c_str());
3272         return false;
3273     }
3274     if (!gen_function_code(m_code.get(), irfun)) {
3275         irerror(irfun->m_context, "Failed to generate code for function %s", irfun->m_name.c_str());
3276         return false;
3277     }
3278     return true;
3279 }
3280
3281 static void gen_vector_defs(code_t *code, prog_section_def_t def, const char *name)
3282 {
3283     char  *component;
3284     size_t len, i;
3285
3286     if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3287         return;
3288
3289     def.type = TYPE_FLOAT;
3290
3291     len = strlen(name);
3292
3293     component = (char*)mem_a(len+3);
3294     memcpy(component, name, len);
3295     len += 2;
3296     component[len-0] = 0;
3297     component[len-2] = '_';
3298
3299     component[len-1] = 'x';
3300
3301     for (i = 0; i < 3; ++i) {
3302         def.name = code_genstring(code, component);
3303         code->defs.push_back(def);
3304         def.offset++;
3305         component[len-1]++;
3306     }
3307
3308     mem_d(component);
3309 }
3310
3311 static void gen_vector_fields(code_t *code, prog_section_field_t fld, const char *name)
3312 {
3313     char  *component;
3314     size_t len, i;
3315
3316     if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3317         return;
3318
3319     fld.type = TYPE_FLOAT;
3320
3321     len = strlen(name);
3322
3323     component = (char*)mem_a(len+3);
3324     memcpy(component, name, len);
3325     len += 2;
3326     component[len-0] = 0;
3327     component[len-2] = '_';
3328
3329     component[len-1] = 'x';
3330
3331     for (i = 0; i < 3; ++i) {
3332         fld.name = code_genstring(code, component);
3333         code->fields.push_back(fld);
3334         fld.offset++;
3335         component[len-1]++;
3336     }
3337
3338     mem_d(component);
3339 }
3340
3341 bool ir_builder::generateGlobal(ir_value *global, bool islocal)
3342 {
3343     size_t             i;
3344     int32_t           *iptr;
3345     prog_section_def_t def;
3346     bool               pushdef = opts.optimizeoff;
3347
3348     /* we don't generate split-vectors */
3349     if (global->m_vtype == TYPE_VECTOR && (global->m_flags & IR_FLAG_SPLIT_VECTOR))
3350         return true;
3351
3352     def.type = global->m_vtype;
3353     def.offset = m_code->globals.size();
3354     def.name = 0;
3355     if (OPTS_OPTION_BOOL(OPTION_G) || !islocal)
3356     {
3357         pushdef = true;
3358
3359         /*
3360          * if we're eraseable and the function isn't referenced ignore outputting
3361          * the function.
3362          */
3363         if (global->m_flags & IR_FLAG_ERASABLE && global->m_reads.empty()) {
3364             return true;
3365         }
3366
3367         if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) &&
3368             !(global->m_flags & IR_FLAG_INCLUDE_DEF) &&
3369             (global->m_name[0] == '#' || global->m_cvq == CV_CONST))
3370         {
3371             pushdef = false;
3372         }
3373
3374         if (pushdef) {
3375             if (global->m_name[0] == '#') {
3376                 if (!m_str_immediate)
3377                     m_str_immediate = code_genstring(m_code.get(), "IMMEDIATE");
3378                 def.name = global->m_code.name = m_str_immediate;
3379             }
3380             else
3381                 def.name = global->m_code.name = code_genstring(m_code.get(), global->m_name.c_str());
3382         }
3383         else
3384             def.name   = 0;
3385         if (islocal) {
3386             def.offset = global->codeAddress();
3387             m_code->defs.push_back(def);
3388             if (global->m_vtype == TYPE_VECTOR)
3389                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3390             else if (global->m_vtype == TYPE_FIELD && global->m_fieldtype == TYPE_VECTOR)
3391                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3392             return true;
3393         }
3394     }
3395     if (islocal)
3396         return true;
3397
3398     switch (global->m_vtype)
3399     {
3400     case TYPE_VOID:
3401         if (0 == global->m_name.compare("end_sys_globals")) {
3402             // TODO: remember this point... all the defs before this one
3403             // should be checksummed and added to progdefs.h when we generate it.
3404         }
3405         else if (0 == global->m_name.compare("end_sys_fields")) {
3406             // TODO: same as above but for entity-fields rather than globsl
3407         }
3408         else if(irwarning(global->m_context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`",
3409                           global->m_name.c_str()))
3410         {
3411             /* Not bailing out */
3412             /* return false; */
3413         }
3414         /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far
3415          * the system fields actually go? Though the engine knows this anyway...
3416          * Maybe this could be an -foption
3417          * fteqcc creates data for end_sys_* - of size 1, so let's do the same
3418          */
3419         global->setCodeAddress(m_code->globals.size());
3420         m_code->globals.push_back(0);
3421         /* Add the def */
3422         if (pushdef)
3423             m_code->defs.push_back(def);
3424         return true;
3425     case TYPE_POINTER:
3426         if (pushdef)
3427             m_code->defs.push_back(def);
3428         return gen_global_pointer(m_code.get(), global);
3429     case TYPE_FIELD:
3430         if (pushdef) {
3431             m_code->defs.push_back(def);
3432             if (global->m_fieldtype == TYPE_VECTOR)
3433                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3434         }
3435         return gen_global_field(m_code.get(), global);
3436     case TYPE_ENTITY:
3437         /* fall through */
3438     case TYPE_FLOAT:
3439     {
3440         global->setCodeAddress(m_code->globals.size());
3441         if (global->m_hasvalue) {
3442             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3443                 return true;
3444             iptr = (int32_t*)&global->m_constval.ivec[0];
3445             m_code->globals.push_back(*iptr);
3446         } else {
3447             m_code->globals.push_back(0);
3448         }
3449         if (!islocal && global->m_cvq != CV_CONST)
3450             def.type |= DEF_SAVEGLOBAL;
3451         if (pushdef)
3452             m_code->defs.push_back(def);
3453
3454         return global->m_code.globaladdr >= 0;
3455     }
3456     case TYPE_STRING:
3457     {
3458         global->setCodeAddress(m_code->globals.size());
3459         if (global->m_hasvalue) {
3460             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3461                 return true;
3462             uint32_t load = code_genstring(m_code.get(), global->m_constval.vstring);
3463             m_code->globals.push_back(load);
3464         } else {
3465             m_code->globals.push_back(0);
3466         }
3467         if (!islocal && global->m_cvq != CV_CONST)
3468             def.type |= DEF_SAVEGLOBAL;
3469         if (pushdef)
3470             m_code->defs.push_back(def);
3471         return global->m_code.globaladdr >= 0;
3472     }
3473     case TYPE_VECTOR:
3474     {
3475         size_t d;
3476         global->setCodeAddress(m_code->globals.size());
3477         if (global->m_hasvalue) {
3478             iptr = (int32_t*)&global->m_constval.ivec[0];
3479             m_code->globals.push_back(iptr[0]);
3480             if (global->m_code.globaladdr < 0)
3481                 return false;
3482             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3483                 m_code->globals.push_back(iptr[d]);
3484             }
3485         } else {
3486             m_code->globals.push_back(0);
3487             if (global->m_code.globaladdr < 0)
3488                 return false;
3489             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3490                 m_code->globals.push_back(0);
3491             }
3492         }
3493         if (!islocal && global->m_cvq != CV_CONST)
3494             def.type |= DEF_SAVEGLOBAL;
3495
3496         if (pushdef) {
3497             m_code->defs.push_back(def);
3498             def.type &= ~DEF_SAVEGLOBAL;
3499             gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3500         }
3501         return global->m_code.globaladdr >= 0;
3502     }
3503     case TYPE_FUNCTION:
3504         global->setCodeAddress(m_code->globals.size());
3505         if (!global->m_hasvalue) {
3506             m_code->globals.push_back(0);
3507             if (global->m_code.globaladdr < 0)
3508                 return false;
3509         } else {
3510             m_code->globals.push_back(m_code->functions.size());
3511             if (!generateGlobalFunction(global))
3512                 return false;
3513         }
3514         if (!islocal && global->m_cvq != CV_CONST)
3515             def.type |= DEF_SAVEGLOBAL;
3516         if (pushdef)
3517             m_code->defs.push_back(def);
3518         return true;
3519     case TYPE_VARIANT:
3520         /* assume biggest type */
3521             global->setCodeAddress(m_code->globals.size());
3522             m_code->globals.push_back(0);
3523             for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i)
3524                 m_code->globals.push_back(0);
3525             return true;
3526     default:
3527         /* refuse to create 'void' type or any other fancy business. */
3528         irerror(global->m_context, "Invalid type for global variable `%s`: %s",
3529                 global->m_name.c_str(), type_name[global->m_vtype]);
3530         return false;
3531     }
3532 }
3533
3534 static GMQCC_INLINE void ir_builder_prepare_field(code_t *code, ir_value *field)
3535 {
3536     field->m_code.fieldaddr = code_alloc_field(code, type_sizeof_[field->m_fieldtype]);
3537 }
3538
3539 static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
3540 {
3541     prog_section_def_t def;
3542     prog_section_field_t fld;
3543
3544     (void)self;
3545
3546     def.type   = (uint16_t)field->m_vtype;
3547     def.offset = (uint16_t)self->m_code->globals.size();
3548
3549     /* create a global named the same as the field */
3550     if (OPTS_OPTION_U32(OPTION_STANDARD) == COMPILER_GMQCC) {
3551         /* in our standard, the global gets a dot prefix */
3552         size_t len = field->m_name.length();
3553         char name[1024];
3554
3555         /* we really don't want to have to allocate this, and 1024
3556          * bytes is more than enough for a variable/field name
3557          */
3558         if (len+2 >= sizeof(name)) {
3559             irerror(field->m_context, "invalid field name size: %u", (unsigned int)len);
3560             return false;
3561         }
3562
3563         name[0] = '.';
3564         memcpy(name+1, field->m_name.c_str(), len); // no strncpy - we used strlen above
3565         name[len+1] = 0;
3566
3567         def.name = code_genstring(self->m_code.get(), name);
3568         fld.name = def.name + 1; /* we reuse that string table entry */
3569     } else {
3570         /* in plain QC, there cannot be a global with the same name,
3571          * and so we also name the global the same.
3572          * FIXME: fteqcc should create a global as well
3573          * check if it actually uses the same name. Probably does
3574          */
3575         def.name = code_genstring(self->m_code.get(), field->m_name.c_str());
3576         fld.name = def.name;
3577     }
3578
3579     field->m_code.name = def.name;
3580
3581     self->m_code->defs.push_back(def);
3582
3583     fld.type = field->m_fieldtype;
3584
3585     if (fld.type == TYPE_VOID) {
3586         irerror(field->m_context, "field is missing a type: %s - don't know its size", field->m_name.c_str());
3587         return false;
3588     }
3589
3590     fld.offset = field->m_code.fieldaddr;
3591
3592     self->m_code->fields.push_back(fld);
3593
3594     field->setCodeAddress(self->m_code->globals.size());
3595     self->m_code->globals.push_back(fld.offset);
3596     if (fld.type == TYPE_VECTOR) {
3597         self->m_code->globals.push_back(fld.offset+1);
3598         self->m_code->globals.push_back(fld.offset+2);
3599     }
3600
3601     if (field->m_fieldtype == TYPE_VECTOR) {
3602         gen_vector_defs  (self->m_code.get(), def, field->m_name.c_str());
3603         gen_vector_fields(self->m_code.get(), fld, field->m_name.c_str());
3604     }
3605
3606     return field->m_code.globaladdr >= 0;
3607 }
3608
3609 static void ir_builder_collect_reusables(ir_builder *builder) {
3610     std::vector<ir_value*> reusables;
3611
3612     for (auto& gp : builder->m_globals) {
3613         ir_value *value = gp.get();
3614         if (value->m_vtype != TYPE_FLOAT || !value->m_hasvalue)
3615             continue;
3616         if (value->m_cvq == CV_CONST || (value->m_name.length() >= 1 && value->m_name[0] == '#'))
3617             reusables.emplace_back(value);
3618     }
3619     builder->m_const_floats = move(reusables);
3620 }
3621
3622 static void ir_builder_split_vector(ir_builder *self, ir_value *vec) {
3623     ir_value* found[3] = { nullptr, nullptr, nullptr };
3624
3625     // must not be written to
3626     if (vec->m_writes.size())
3627         return;
3628     // must not be trying to access individual members
3629     if (vec->m_members[0] || vec->m_members[1] || vec->m_members[2])
3630         return;
3631     // should be actually used otherwise it won't be generated anyway
3632     if (vec->m_reads.empty())
3633         return;
3634     //size_t count = vec->m_reads.size();
3635     //if (!count)
3636     //    return;
3637
3638     // may only be used directly as function parameters, so if we find some other instruction cancel
3639     for (ir_instr *user : vec->m_reads) {
3640         // we only split vectors if they're used directly as parameter to a call only!
3641         if ((user->m_opcode < INSTR_CALL0 || user->m_opcode > INSTR_CALL8) && user->m_opcode != VINSTR_NRCALL)
3642             return;
3643     }
3644
3645     vec->m_flags |= IR_FLAG_SPLIT_VECTOR;
3646
3647     // find existing floats making up the split
3648     for (ir_value *c : self->m_const_floats) {
3649         if (!found[0] && c->m_constval.vfloat == vec->m_constval.vvec.x)
3650             found[0] = c;
3651         if (!found[1] && c->m_constval.vfloat == vec->m_constval.vvec.y)
3652             found[1] = c;
3653         if (!found[2] && c->m_constval.vfloat == vec->m_constval.vvec.z)
3654             found[2] = c;
3655         if (found[0] && found[1] && found[2])
3656             break;
3657     }
3658
3659     // generate floats for not yet found components
3660     if (!found[0])
3661         found[0] = self->literalFloat(vec->m_constval.vvec.x, true);
3662     if (!found[1]) {
3663         if (vec->m_constval.vvec.y == vec->m_constval.vvec.x)
3664             found[1] = found[0];
3665         else
3666             found[1] = self->literalFloat(vec->m_constval.vvec.y, true);
3667     }
3668     if (!found[2]) {
3669         if (vec->m_constval.vvec.z == vec->m_constval.vvec.x)
3670             found[2] = found[0];
3671         else if (vec->m_constval.vvec.z == vec->m_constval.vvec.y)
3672             found[2] = found[1];
3673         else
3674             found[2] = self->literalFloat(vec->m_constval.vvec.z, true);
3675     }
3676
3677     // the .members array should be safe to use here
3678     vec->m_members[0] = found[0];
3679     vec->m_members[1] = found[1];
3680     vec->m_members[2] = found[2];
3681
3682     // register the readers for these floats
3683     found[0]->m_reads.insert(found[0]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3684     found[1]->m_reads.insert(found[1]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3685     found[2]->m_reads.insert(found[2]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3686 }
3687
3688 static void ir_builder_split_vectors(ir_builder *self) {
3689     // member values may be added to self->m_globals during this operation, but
3690     // no new vectors will be added, we need to iterate via an index as
3691     // c++ iterators would be invalidated
3692     const size_t count = self->m_globals.size();
3693     for (size_t i = 0; i != count; ++i) {
3694         ir_value *v = self->m_globals[i].get();
3695         if (v->m_vtype != TYPE_VECTOR || !v->m_name.length() || v->m_name[0] != '#')
3696             continue;
3697         ir_builder_split_vector(self, v);
3698     }
3699 }
3700
3701 bool ir_builder::generate(const char *filename)
3702 {
3703     prog_section_statement_t stmt;
3704     char  *lnofile = nullptr;
3705
3706     if (OPTS_FLAG(SPLIT_VECTOR_PARAMETERS)) {
3707         ir_builder_collect_reusables(this);
3708         if (!m_const_floats.empty())
3709             ir_builder_split_vectors(this);
3710     }
3711
3712     for (auto& fp : m_fields)
3713         ir_builder_prepare_field(m_code.get(), fp.get());
3714
3715     for (auto& gp : m_globals) {
3716         ir_value *global = gp.get();
3717         if (!generateGlobal(global, false)) {
3718             return false;
3719         }
3720         if (global->m_vtype == TYPE_FUNCTION) {
3721             ir_function *func = global->m_constval.vfunc;
3722             if (func && m_max_locals < func->m_allocated_locals &&
3723                 !(func->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3724             {
3725                 m_max_locals = func->m_allocated_locals;
3726             }
3727             if (func && m_max_globaltemps < func->m_globaltemps)
3728                 m_max_globaltemps = func->m_globaltemps;
3729         }
3730     }
3731
3732     for (auto& fp : m_fields) {
3733         if (!ir_builder_gen_field(this, fp.get()))
3734             return false;
3735     }
3736
3737     // generate nil
3738     m_nil->setCodeAddress(m_code->globals.size());
3739     m_code->globals.push_back(0);
3740     m_code->globals.push_back(0);
3741     m_code->globals.push_back(0);
3742
3743     // generate virtual-instruction temps
3744     for (size_t i = 0; i < IR_MAX_VINSTR_TEMPS; ++i) {
3745         m_vinstr_temp[i]->setCodeAddress(m_code->globals.size());
3746         m_code->globals.push_back(0);
3747         m_code->globals.push_back(0);
3748         m_code->globals.push_back(0);
3749     }
3750
3751     // generate global temps
3752     m_first_common_globaltemp = m_code->globals.size();
3753     m_code->globals.insert(m_code->globals.end(), m_max_globaltemps, 0);
3754     // FIXME:DELME:
3755     //for (size_t i = 0; i < m_max_globaltemps; ++i) {
3756     //    m_code->globals.push_back(0);
3757     //}
3758     // generate common locals
3759     m_first_common_local = m_code->globals.size();
3760     m_code->globals.insert(m_code->globals.end(), m_max_locals, 0);
3761     // FIXME:DELME:
3762     //for (i = 0; i < m_max_locals; ++i) {
3763     //    m_code->globals.push_back(0);
3764     //}
3765
3766     // generate function code
3767
3768     for (auto& gp : m_globals) {
3769         ir_value *global = gp.get();
3770         if (global->m_vtype == TYPE_FUNCTION) {
3771             if (!this->generateGlobalFunctionCode(global))
3772                 return false;
3773         }
3774     }
3775
3776     if (m_code->globals.size() >= 65536) {
3777         irerror(m_globals.back()->m_context,
3778             "This progs file would require more globals than the metadata can handle (%zu). Bailing out.",
3779             m_code->globals.size());
3780         return false;
3781     }
3782
3783     /* DP errors if the last instruction is not an INSTR_DONE. */
3784     if (m_code->statements.back().opcode != INSTR_DONE)
3785     {
3786         lex_ctx_t last;
3787
3788         stmt.opcode = INSTR_DONE;
3789         stmt.o1.u1  = 0;
3790         stmt.o2.u1  = 0;
3791         stmt.o3.u1  = 0;
3792         last.line   = m_code->linenums.back();
3793         last.column = m_code->columnnums.back();
3794
3795         code_push_statement(m_code.get(), &stmt, last);
3796     }
3797
3798     if (OPTS_OPTION_BOOL(OPTION_PP_ONLY))
3799         return true;
3800
3801     if (m_code->statements.size() != m_code->linenums.size()) {
3802         con_err("Linecounter wrong: %lu != %lu\n",
3803                 m_code->statements.size(),
3804                 m_code->linenums.size());
3805     } else if (OPTS_FLAG(LNO)) {
3806         char  *dot;
3807         size_t filelen = strlen(filename);
3808
3809         memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
3810         dot = strrchr(lnofile, '.');
3811         if (!dot) {
3812             vec_pop(lnofile);
3813         } else {
3814             vec_shrinkto(lnofile, dot - lnofile);
3815         }
3816         memcpy(vec_add(lnofile, 5), ".lno", 5);
3817     }
3818
3819     if (!code_write(m_code.get(), filename, lnofile)) {
3820         vec_free(lnofile);
3821         return false;
3822     }
3823
3824     vec_free(lnofile);
3825     return true;
3826 }
3827
3828 /***********************************************************************
3829  *IR DEBUG Dump functions...
3830  */
3831
3832 #define IND_BUFSZ 1024
3833
3834 static const char *qc_opname(int op)
3835 {
3836     if (op < 0) return "<INVALID>";
3837     if (op < VINSTR_END)
3838         return util_instr_str[op];
3839     switch (op) {
3840         case VINSTR_END:       return "END";
3841         case VINSTR_PHI:       return "PHI";
3842         case VINSTR_JUMP:      return "JUMP";
3843         case VINSTR_COND:      return "COND";
3844         case VINSTR_BITXOR:    return "BITXOR";
3845         case VINSTR_BITAND_V:  return "BITAND_V";
3846         case VINSTR_BITOR_V:   return "BITOR_V";
3847         case VINSTR_BITXOR_V:  return "BITXOR_V";
3848         case VINSTR_BITAND_VF: return "BITAND_VF";
3849         case VINSTR_BITOR_VF:  return "BITOR_VF";
3850         case VINSTR_BITXOR_VF: return "BITXOR_VF";
3851         case VINSTR_CROSS:     return "CROSS";
3852         case VINSTR_NEG_F:     return "NEG_F";
3853         case VINSTR_NEG_V:     return "NEG_V";
3854         default:               return "<UNK>";
3855     }
3856 }
3857
3858 void ir_builder::dump(int (*oprintf)(const char*, ...)) const
3859 {
3860     size_t i;
3861     char indent[IND_BUFSZ];
3862     indent[0] = '\t';
3863     indent[1] = 0;
3864
3865     oprintf("module %s\n", m_name.c_str());
3866     for (i = 0; i < m_globals.size(); ++i)
3867     {
3868         oprintf("global ");
3869         if (m_globals[i]->m_hasvalue)
3870             oprintf("%s = ", m_globals[i]->m_name.c_str());
3871         m_globals[i].get()->dump(oprintf);
3872         oprintf("\n");
3873     }
3874     for (i = 0; i < m_functions.size(); ++i)
3875         ir_function_dump(m_functions[i].get(), indent, oprintf);
3876     oprintf("endmodule %s\n", m_name.c_str());
3877 }
3878
3879 static const char *storenames[] = {
3880     "[global]", "[local]", "[param]", "[value]", "[return]"
3881 };
3882
3883 void ir_function_dump(ir_function *f, char *ind,
3884                       int (*oprintf)(const char*, ...))
3885 {
3886     size_t i;
3887     if (f->m_builtin != 0) {
3888         oprintf("%sfunction %s = builtin %i\n", ind, f->m_name.c_str(), -f->m_builtin);
3889         return;
3890     }
3891     oprintf("%sfunction %s\n", ind, f->m_name.c_str());
3892     util_strncat(ind, "\t", IND_BUFSZ-1);
3893     if (f->m_locals.size())
3894     {
3895         oprintf("%s%i locals:\n", ind, (int)f->m_locals.size());
3896         for (i = 0; i < f->m_locals.size(); ++i) {
3897             oprintf("%s\t", ind);
3898             f->m_locals[i].get()->dump(oprintf);
3899             oprintf("\n");
3900         }
3901     }
3902     oprintf("%sliferanges:\n", ind);
3903     for (i = 0; i < f->m_locals.size(); ++i) {
3904         const char *attr = "";
3905         size_t l, m;
3906         ir_value *v = f->m_locals[i].get();
3907         if (v->m_unique_life && v->m_locked)
3908             attr = "unique,locked ";
3909         else if (v->m_unique_life)
3910             attr = "unique ";
3911         else if (v->m_locked)
3912             attr = "locked ";
3913         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3914                 storenames[v->m_store],
3915                 attr, (v->m_callparam ? "callparam " : ""),
3916                 (int)v->m_code.local);
3917         if (v->m_life.empty())
3918             oprintf("[null]");
3919         for (l = 0; l < v->m_life.size(); ++l) {
3920             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3921         }
3922         oprintf("\n");
3923         for (m = 0; m < 3; ++m) {
3924             ir_value *vm = v->m_members[m];
3925             if (!vm)
3926                 continue;
3927             oprintf("%s\t%s: @%i ", ind, vm->m_name.c_str(), (int)vm->m_code.local);
3928             for (l = 0; l < vm->m_life.size(); ++l) {
3929                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3930             }
3931             oprintf("\n");
3932         }
3933     }
3934     for (i = 0; i < f->m_values.size(); ++i) {
3935         const char *attr = "";
3936         size_t l, m;
3937         ir_value *v = f->m_values[i].get();
3938         if (v->m_unique_life && v->m_locked)
3939             attr = "unique,locked ";
3940         else if (v->m_unique_life)
3941             attr = "unique ";
3942         else if (v->m_locked)
3943             attr = "locked ";
3944         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3945                 storenames[v->m_store],
3946                 attr, (v->m_callparam ? "callparam " : ""),
3947                 (int)v->m_code.local);
3948         if (v->m_life.empty())
3949             oprintf("[null]");
3950         for (l = 0; l < v->m_life.size(); ++l) {
3951             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3952         }
3953         oprintf("\n");
3954         for (m = 0; m < 3; ++m) {
3955             ir_value *vm = v->m_members[m];
3956             if (!vm)
3957                 continue;
3958             if (vm->m_unique_life && vm->m_locked)
3959                 attr = "unique,locked ";
3960             else if (vm->m_unique_life)
3961                 attr = "unique ";
3962             else if (vm->m_locked)
3963                 attr = "locked ";
3964             oprintf("%s\t%s: %s@%i ", ind, vm->m_name.c_str(), attr, (int)vm->m_code.local);
3965             for (l = 0; l < vm->m_life.size(); ++l) {
3966                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3967             }
3968             oprintf("\n");
3969         }
3970     }
3971     if (f->m_blocks.size())
3972     {
3973         oprintf("%slife passes: %i\n", ind, (int)f->m_run_id);
3974         for (i = 0; i < f->m_blocks.size(); ++i) {
3975             ir_block_dump(f->m_blocks[i].get(), ind, oprintf);
3976         }
3977
3978     }
3979     ind[strlen(ind)-1] = 0;
3980     oprintf("%sendfunction %s\n", ind, f->m_name.c_str());
3981 }
3982
3983 void ir_block_dump(ir_block* b, char *ind,
3984                    int (*oprintf)(const char*, ...))
3985 {
3986     size_t i;
3987     oprintf("%s:%s\n", ind, b->m_label.c_str());
3988     util_strncat(ind, "\t", IND_BUFSZ-1);
3989
3990     if (b->m_instr && b->m_instr[0])
3991         oprintf("%s (%i) [entry]\n", ind, (int)(b->m_instr[0]->m_eid-1));
3992     for (i = 0; i < vec_size(b->m_instr); ++i)
3993         ir_instr_dump(b->m_instr[i], ind, oprintf);
3994     ind[strlen(ind)-1] = 0;
3995 }
3996
3997 static void dump_phi(ir_instr *in, int (*oprintf)(const char*, ...))
3998 {
3999     oprintf("%s <- phi ", in->_m_ops[0]->m_name.c_str());
4000     for (auto &it : in->m_phi) {
4001         oprintf("([%s] : %s) ", it.from->m_label.c_str(),
4002                                 it.value->m_name.c_str());
4003     }
4004     oprintf("\n");
4005 }
4006
4007 void ir_instr_dump(ir_instr *in, char *ind,
4008                        int (*oprintf)(const char*, ...))
4009 {
4010     size_t i;
4011     const char *comma = nullptr;
4012
4013     oprintf("%s (%i) ", ind, (int)in->m_eid);
4014
4015     if (in->m_opcode == VINSTR_PHI) {
4016         dump_phi(in, oprintf);
4017         return;
4018     }
4019
4020     util_strncat(ind, "\t", IND_BUFSZ-1);
4021
4022     if (in->_m_ops[0] && (in->_m_ops[1] || in->_m_ops[2])) {
4023         in->_m_ops[0]->dump(oprintf);
4024         if (in->_m_ops[1] || in->_m_ops[2])
4025             oprintf(" <- ");
4026     }
4027     if (in->m_opcode == INSTR_CALL0 || in->m_opcode == VINSTR_NRCALL) {
4028         oprintf("CALL%i\t", in->m_params.size());
4029     } else
4030         oprintf("%s\t", qc_opname(in->m_opcode));
4031
4032     if (in->_m_ops[0] && !(in->_m_ops[1] || in->_m_ops[2])) {
4033         in->_m_ops[0]->dump(oprintf);
4034         comma = ",\t";
4035     }
4036     else
4037     {
4038         for (i = 1; i != 3; ++i) {
4039             if (in->_m_ops[i]) {
4040                 if (comma)
4041                     oprintf(comma);
4042                 in->_m_ops[i]->dump(oprintf);
4043                 comma = ",\t";
4044             }
4045         }
4046     }
4047     if (in->m_bops[0]) {
4048         if (comma)
4049             oprintf(comma);
4050         oprintf("[%s]", in->m_bops[0]->m_label.c_str());
4051         comma = ",\t";
4052     }
4053     if (in->m_bops[1])
4054         oprintf("%s[%s]", comma, in->m_bops[1]->m_label.c_str());
4055     if (in->m_params.size()) {
4056         oprintf("\tparams: ");
4057         for (auto &it : in->m_params)
4058             oprintf("%s, ", it->m_name.c_str());
4059     }
4060     oprintf("\n");
4061     ind[strlen(ind)-1] = 0;
4062 }
4063
4064 static void ir_value_dump_string(const char *str, int (*oprintf)(const char*, ...))
4065 {
4066     oprintf("\"");
4067     for (; *str; ++str) {
4068         switch (*str) {
4069             case '\n': oprintf("\\n"); break;
4070             case '\r': oprintf("\\r"); break;
4071             case '\t': oprintf("\\t"); break;
4072             case '\v': oprintf("\\v"); break;
4073             case '\f': oprintf("\\f"); break;
4074             case '\b': oprintf("\\b"); break;
4075             case '\a': oprintf("\\a"); break;
4076             case '\\': oprintf("\\\\"); break;
4077             case '"': oprintf("\\\""); break;
4078             default: oprintf("%c", *str); break;
4079         }
4080     }
4081     oprintf("\"");
4082 }
4083
4084 void ir_value::dump(int (*oprintf)(const char*, ...)) const
4085 {
4086     if (m_hasvalue) {
4087         switch (m_vtype) {
4088             default:
4089             case TYPE_VOID:
4090                 oprintf("(void)");
4091                 break;
4092             case TYPE_FUNCTION:
4093                 oprintf("fn:%s", m_name.c_str());
4094                 break;
4095             case TYPE_FLOAT:
4096                 oprintf("%g", m_constval.vfloat);
4097                 break;
4098             case TYPE_VECTOR:
4099                 oprintf("'%g %g %g'",
4100                         m_constval.vvec.x,
4101                         m_constval.vvec.y,
4102                         m_constval.vvec.z);
4103                 break;
4104             case TYPE_ENTITY:
4105                 oprintf("(entity)");
4106                 break;
4107             case TYPE_STRING:
4108                 ir_value_dump_string(m_constval.vstring, oprintf);
4109                 break;
4110 #if 0
4111             case TYPE_INTEGER:
4112                 oprintf("%i", m_constval.vint);
4113                 break;
4114 #endif
4115             case TYPE_POINTER:
4116                 oprintf("&%s",
4117                     m_constval.vpointer->m_name.c_str());
4118                 break;
4119         }
4120     } else {
4121         oprintf("%s", m_name.c_str());
4122     }
4123 }
4124
4125 void ir_value::dumpLife(int (*oprintf)(const char*,...)) const
4126 {
4127     oprintf("Life of %12s:", m_name.c_str());
4128     for (size_t i = 0; i < m_life.size(); ++i)
4129     {
4130         oprintf(" + [%i, %i]\n", m_life[i].start, m_life[i].end);
4131     }
4132 }