]> git.xonotic.org Git - xonotic/gmqcc.git/blob - ir.cpp
76d9db3d57b15cf5dad83b494dfe21d9ee8286a3
[xonotic/gmqcc.git] / ir.cpp
1 #include <stdlib.h>
2 #include <string.h>
3
4 #include "gmqcc.h"
5 #include "ir.h"
6
7 /***********************************************************************
8  * Type sizes used at multiple points in the IR codegen
9  */
10
11 const char *type_name[TYPE_COUNT] = {
12     "void",
13     "string",
14     "float",
15     "vector",
16     "entity",
17     "field",
18     "function",
19     "pointer",
20     "integer",
21     "variant",
22     "struct",
23     "union",
24     "array",
25
26     "nil",
27     "<no-expression>"
28 };
29
30 static size_t type_sizeof_[TYPE_COUNT] = {
31     1, /* TYPE_VOID     */
32     1, /* TYPE_STRING   */
33     1, /* TYPE_FLOAT    */
34     3, /* TYPE_VECTOR   */
35     1, /* TYPE_ENTITY   */
36     1, /* TYPE_FIELD    */
37     1, /* TYPE_FUNCTION */
38     1, /* TYPE_POINTER  */
39     1, /* TYPE_INTEGER  */
40     3, /* TYPE_VARIANT  */
41     0, /* TYPE_STRUCT   */
42     0, /* TYPE_UNION    */
43     0, /* TYPE_ARRAY    */
44     0, /* TYPE_NIL      */
45     0, /* TYPE_NOESPR   */
46 };
47
48 const uint16_t type_store_instr[TYPE_COUNT] = {
49     INSTR_STORE_F, /* should use I when having integer support */
50     INSTR_STORE_S,
51     INSTR_STORE_F,
52     INSTR_STORE_V,
53     INSTR_STORE_ENT,
54     INSTR_STORE_FLD,
55     INSTR_STORE_FNC,
56     INSTR_STORE_ENT, /* should use I */
57 #if 0
58     INSTR_STORE_I, /* integer type */
59 #else
60     INSTR_STORE_F,
61 #endif
62
63     INSTR_STORE_V, /* variant, should never be accessed */
64
65     VINSTR_END, /* struct */
66     VINSTR_END, /* union  */
67     VINSTR_END, /* array  */
68     VINSTR_END, /* nil    */
69     VINSTR_END, /* noexpr */
70 };
71
72 const uint16_t field_store_instr[TYPE_COUNT] = {
73     INSTR_STORE_FLD,
74     INSTR_STORE_FLD,
75     INSTR_STORE_FLD,
76     INSTR_STORE_V,
77     INSTR_STORE_FLD,
78     INSTR_STORE_FLD,
79     INSTR_STORE_FLD,
80     INSTR_STORE_FLD,
81 #if 0
82     INSTR_STORE_FLD, /* integer type */
83 #else
84     INSTR_STORE_FLD,
85 #endif
86
87     INSTR_STORE_V, /* variant, should never be accessed */
88
89     VINSTR_END, /* struct */
90     VINSTR_END, /* union  */
91     VINSTR_END, /* array  */
92     VINSTR_END, /* nil    */
93     VINSTR_END, /* noexpr */
94 };
95
96 const uint16_t type_storep_instr[TYPE_COUNT] = {
97     INSTR_STOREP_F, /* should use I when having integer support */
98     INSTR_STOREP_S,
99     INSTR_STOREP_F,
100     INSTR_STOREP_V,
101     INSTR_STOREP_ENT,
102     INSTR_STOREP_FLD,
103     INSTR_STOREP_FNC,
104     INSTR_STOREP_ENT, /* should use I */
105 #if 0
106     INSTR_STOREP_ENT, /* integer type */
107 #else
108     INSTR_STOREP_F,
109 #endif
110
111     INSTR_STOREP_V, /* variant, should never be accessed */
112
113     VINSTR_END, /* struct */
114     VINSTR_END, /* union  */
115     VINSTR_END, /* array  */
116     VINSTR_END, /* nil    */
117     VINSTR_END, /* noexpr */
118 };
119
120 const uint16_t type_eq_instr[TYPE_COUNT] = {
121     INSTR_EQ_F, /* should use I when having integer support */
122     INSTR_EQ_S,
123     INSTR_EQ_F,
124     INSTR_EQ_V,
125     INSTR_EQ_E,
126     INSTR_EQ_E, /* FLD has no comparison */
127     INSTR_EQ_FNC,
128     INSTR_EQ_E, /* should use I */
129 #if 0
130     INSTR_EQ_I,
131 #else
132     INSTR_EQ_F,
133 #endif
134
135     INSTR_EQ_V, /* variant, should never be accessed */
136
137     VINSTR_END, /* struct */
138     VINSTR_END, /* union  */
139     VINSTR_END, /* array  */
140     VINSTR_END, /* nil    */
141     VINSTR_END, /* noexpr */
142 };
143
144 const uint16_t type_ne_instr[TYPE_COUNT] = {
145     INSTR_NE_F, /* should use I when having integer support */
146     INSTR_NE_S,
147     INSTR_NE_F,
148     INSTR_NE_V,
149     INSTR_NE_E,
150     INSTR_NE_E, /* FLD has no comparison */
151     INSTR_NE_FNC,
152     INSTR_NE_E, /* should use I */
153 #if 0
154     INSTR_NE_I,
155 #else
156     INSTR_NE_F,
157 #endif
158
159     INSTR_NE_V, /* variant, should never be accessed */
160
161     VINSTR_END, /* struct */
162     VINSTR_END, /* union  */
163     VINSTR_END, /* array  */
164     VINSTR_END, /* nil    */
165     VINSTR_END, /* noexpr */
166 };
167
168 const uint16_t type_not_instr[TYPE_COUNT] = {
169     INSTR_NOT_F, /* should use I when having integer support */
170     VINSTR_END,  /* not to be used, depends on string related -f flags */
171     INSTR_NOT_F,
172     INSTR_NOT_V,
173     INSTR_NOT_ENT,
174     INSTR_NOT_ENT,
175     INSTR_NOT_FNC,
176     INSTR_NOT_ENT, /* should use I */
177 #if 0
178     INSTR_NOT_I, /* integer type */
179 #else
180     INSTR_NOT_F,
181 #endif
182
183     INSTR_NOT_V, /* variant, should never be accessed */
184
185     VINSTR_END, /* struct */
186     VINSTR_END, /* union  */
187     VINSTR_END, /* array  */
188     VINSTR_END, /* nil    */
189     VINSTR_END, /* noexpr */
190 };
191
192 /* protos */
193 static void            ir_function_dump(ir_function*, char *ind, int (*oprintf)(const char*,...));
194
195 static ir_value*       ir_block_create_general_instr(ir_block *self, lex_ctx_t, const char *label,
196                                                      int op, ir_value *a, ir_value *b, qc_type outype);
197 static bool GMQCC_WARN ir_block_create_store(ir_block*, lex_ctx_t, ir_value *target, ir_value *what);
198 static void            ir_block_dump(ir_block*, char *ind, int (*oprintf)(const char*,...));
199
200 static bool            ir_instr_op(ir_instr*, int op, ir_value *value, bool writing);
201 static void            ir_instr_dump(ir_instr* in, char *ind, int (*oprintf)(const char*,...));
202 /* error functions */
203
204 static void irerror(lex_ctx_t ctx, const char *msg, ...)
205 {
206     va_list ap;
207     va_start(ap, msg);
208     con_cvprintmsg(ctx, LVL_ERROR, "internal error", msg, ap);
209     va_end(ap);
210 }
211
212 static bool GMQCC_WARN irwarning(lex_ctx_t ctx, int warntype, const char *fmt, ...)
213 {
214     bool    r;
215     va_list ap;
216     va_start(ap, fmt);
217     r = vcompile_warning(ctx, warntype, fmt, ap);
218     va_end(ap);
219     return r;
220 }
221
222 /***********************************************************************
223  * Vector utility functions
224  */
225
226 static bool GMQCC_WARN vec_ir_value_find(std::vector<ir_value *> &vec, const ir_value *what, size_t *idx)
227 {
228     for (auto &it : vec) {
229         if (it != what)
230             continue;
231         if (idx)
232             *idx = &it - &vec[0];
233         return true;
234     }
235     return false;
236 }
237
238 static bool GMQCC_WARN vec_ir_block_find(ir_block **vec, ir_block *what, size_t *idx)
239 {
240     size_t i;
241     size_t len = vec_size(vec);
242     for (i = 0; i < len; ++i) {
243         if (vec[i] == what) {
244             if (idx) *idx = i;
245             return true;
246         }
247     }
248     return false;
249 }
250
251 static bool GMQCC_WARN vec_ir_instr_find(std::vector<ir_instr *> &vec, ir_instr *what, size_t *idx)
252 {
253     for (auto &it : vec) {
254         if (it != what)
255             continue;
256         if (idx)
257             *idx = &it - &vec[0];
258         return true;
259     }
260     return false;
261 }
262
263 /***********************************************************************
264  * IR Builder
265  */
266
267 static void ir_block_delete_quick(ir_block* self);
268 static void ir_instr_delete_quick(ir_instr *self);
269 static void ir_function_delete_quick(ir_function *self);
270
271 ir_builder::ir_builder(const std::string& modulename)
272 : m_name(modulename),
273   m_code(new code_t)
274 {
275     m_htglobals   = util_htnew(IR_HT_SIZE);
276     m_htfields    = util_htnew(IR_HT_SIZE);
277     m_htfunctions = util_htnew(IR_HT_SIZE);
278
279     m_nil = new ir_value("nil", store_value, TYPE_NIL);
280     m_nil->m_cvq = CV_CONST;
281
282     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
283         /* we write to them, but they're not supposed to be used outside the IR, so
284          * let's not allow the generation of ir_instrs which use these.
285          * So it's a constant noexpr.
286          */
287         m_vinstr_temp[i] = new ir_value("vinstr_temp", store_value, TYPE_NOEXPR);
288         m_vinstr_temp[i]->m_cvq = CV_CONST;
289     }
290 }
291
292 ir_builder::~ir_builder()
293 {
294     util_htdel(m_htglobals);
295     util_htdel(m_htfields);
296     util_htdel(m_htfunctions);
297     for (auto& f : m_functions)
298         ir_function_delete_quick(f.release());
299     m_functions.clear(); // delete them now before deleting the rest:
300
301     delete m_nil;
302
303     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
304         delete m_vinstr_temp[i];
305     }
306
307     m_extparams.clear();
308     m_extparam_protos.clear();
309 }
310
311 ir_function* ir_builder::createFunction(const std::string& name, qc_type outtype)
312 {
313     ir_function *fn = (ir_function*)util_htget(m_htfunctions, name.c_str());
314     if (fn)
315         return nullptr;
316
317     fn = new ir_function(this, outtype);
318     fn->m_name = name;
319     m_functions.emplace_back(fn);
320     util_htset(m_htfunctions, name.c_str(), fn);
321
322     fn->m_value = createGlobal(fn->m_name, TYPE_FUNCTION);
323     if (!fn->m_value) {
324         delete fn;
325         return nullptr;
326     }
327
328     fn->m_value->m_hasvalue = true;
329     fn->m_value->m_outtype = outtype;
330     fn->m_value->m_constval.vfunc = fn;
331     fn->m_value->m_context = fn->m_context;
332
333     return fn;
334 }
335
336 ir_value* ir_builder::createGlobal(const std::string& name, qc_type vtype)
337 {
338     ir_value *ve;
339
340     if (name[0] != '#')
341     {
342         ve = (ir_value*)util_htget(m_htglobals, name.c_str());
343         if (ve) {
344             return nullptr;
345         }
346     }
347
348     ve = new ir_value(std::string(name), store_global, vtype);
349     m_globals.emplace_back(ve);
350     util_htset(m_htglobals, name.c_str(), ve);
351     return ve;
352 }
353
354 ir_value* ir_builder::get_va_count()
355 {
356     if (m_reserved_va_count)
357         return m_reserved_va_count;
358     return (m_reserved_va_count = createGlobal("reserved:va_count", TYPE_FLOAT));
359 }
360
361 ir_value* ir_builder::createField(const std::string& name, qc_type vtype)
362 {
363     ir_value *ve = (ir_value*)util_htget(m_htfields, name.c_str());
364     if (ve) {
365         return nullptr;
366     }
367
368     ve = new ir_value(std::string(name), store_global, TYPE_FIELD);
369     ve->m_fieldtype = vtype;
370     m_fields.emplace_back(ve);
371     util_htset(m_htfields, name.c_str(), ve);
372     return ve;
373 }
374
375 /***********************************************************************
376  *IR Function
377  */
378
379 static bool ir_function_naive_phi(ir_function*);
380 static void ir_function_enumerate(ir_function*);
381 static bool ir_function_calculate_liferanges(ir_function*);
382 static bool ir_function_allocate_locals(ir_function*);
383
384 ir_function::ir_function(ir_builder* owner_, qc_type outtype_)
385 : m_owner(owner_),
386   m_name("<@unnamed>"),
387   m_outtype(outtype_)
388 {
389     m_context.file = "<@no context>";
390     m_context.line = 0;
391 }
392
393 ir_function::~ir_function()
394 {
395 }
396
397 static void ir_function_delete_quick(ir_function *self)
398 {
399     for (auto& b : self->m_blocks)
400         ir_block_delete_quick(b.release());
401     delete self;
402 }
403
404 static void ir_function_collect_value(ir_function *self, ir_value *v)
405 {
406     self->m_values.emplace_back(v);
407 }
408
409 ir_block* ir_function_create_block(lex_ctx_t ctx, ir_function *self, const char *label)
410 {
411     ir_block* bn = new ir_block(self, label ? std::string(label) : std::string());
412     bn->m_context = ctx;
413     self->m_blocks.emplace_back(bn);
414
415     if ((self->m_flags & IR_FLAG_BLOCK_COVERAGE) && self->m_owner->m_coverage_func)
416         (void)ir_block_create_call(bn, ctx, nullptr, self->m_owner->m_coverage_func, false);
417
418     return bn;
419 }
420
421 static bool instr_is_operation(uint16_t op)
422 {
423     return ( (op >= INSTR_MUL_F  && op <= INSTR_GT) ||
424              (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) ||
425              (op == INSTR_ADDRESS) ||
426              (op >= INSTR_NOT_F  && op <= INSTR_NOT_FNC) ||
427              (op >= INSTR_AND    && op <= INSTR_BITOR) ||
428              (op >= INSTR_CALL0  && op <= INSTR_CALL8) ||
429              (op >= VINSTR_BITAND_V && op <= VINSTR_NEG_V) );
430 }
431
432 static bool ir_function_pass_peephole(ir_function *self)
433 {
434     for (auto& bp : self->m_blocks) {
435         ir_block *block = bp.get();
436         for (size_t i = 0; i < vec_size(block->m_instr); ++i) {
437             ir_instr *inst;
438             inst = block->m_instr[i];
439
440             if (i >= 1 &&
441                 (inst->m_opcode >= INSTR_STORE_F &&
442                  inst->m_opcode <= INSTR_STORE_FNC))
443             {
444                 ir_instr *store;
445                 ir_instr *oper;
446                 ir_value *value;
447
448                 store = inst;
449
450                 oper  = block->m_instr[i-1];
451                 if (!instr_is_operation(oper->m_opcode))
452                     continue;
453
454                 /* Don't change semantics of MUL_VF in engines where these may not alias. */
455                 if (OPTS_FLAG(LEGACY_VECTOR_MATHS)) {
456                     if (oper->m_opcode == INSTR_MUL_VF && oper->_m_ops[2]->m_memberof == oper->_m_ops[1])
457                         continue;
458                     if (oper->m_opcode == INSTR_MUL_FV && oper->_m_ops[1]->m_memberof == oper->_m_ops[2])
459                         continue;
460                 }
461
462                 value = oper->_m_ops[0];
463
464                 /* only do it for SSA values */
465                 if (value->m_store != store_value)
466                     continue;
467
468                 /* don't optimize out the temp if it's used later again */
469                 if (value->m_reads.size() != 1)
470                     continue;
471
472                 /* The very next store must use this value */
473                 if (value->m_reads[0] != store)
474                     continue;
475
476                 /* And of course the store must _read_ from it, so it's in
477                  * OP 1 */
478                 if (store->_m_ops[1] != value)
479                     continue;
480
481                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
482                 (void)!ir_instr_op(oper, 0, store->_m_ops[0], true);
483
484                 vec_remove(block->m_instr, i, 1);
485                 delete store;
486             }
487             else if (inst->m_opcode == VINSTR_COND)
488             {
489                 /* COND on a value resulting from a NOT could
490                  * remove the NOT and swap its operands
491                  */
492                 while (true) {
493                     ir_block *tmp;
494                     size_t    inotid;
495                     ir_instr *inot;
496                     ir_value *value;
497                     value = inst->_m_ops[0];
498
499                     if (value->m_store != store_value || value->m_reads.size() != 1 || value->m_reads[0] != inst)
500                         break;
501
502                     inot = value->m_writes[0];
503                     if (inot->_m_ops[0] != value ||
504                         inot->m_opcode < INSTR_NOT_F ||
505                         inot->m_opcode > INSTR_NOT_FNC ||
506                         inot->m_opcode == INSTR_NOT_V || /* can't do these */
507                         inot->m_opcode == INSTR_NOT_S)
508                     {
509                         break;
510                     }
511
512                     /* count */
513                     ++opts_optimizationcount[OPTIM_PEEPHOLE];
514                     /* change operand */
515                     (void)!ir_instr_op(inst, 0, inot->_m_ops[1], false);
516                     /* remove NOT */
517                     tmp = inot->m_owner;
518                     for (inotid = 0; inotid < vec_size(tmp->m_instr); ++inotid) {
519                         if (tmp->m_instr[inotid] == inot)
520                             break;
521                     }
522                     if (inotid >= vec_size(tmp->m_instr)) {
523                         compile_error(inst->m_context, "sanity-check failed: failed to find instruction to optimize out");
524                         return false;
525                     }
526                     vec_remove(tmp->m_instr, inotid, 1);
527                     delete inot;
528                     /* swap ontrue/onfalse */
529                     tmp = inst->m_bops[0];
530                     inst->m_bops[0] = inst->m_bops[1];
531                     inst->m_bops[1] = tmp;
532                 }
533                 continue;
534             }
535         }
536     }
537
538     return true;
539 }
540
541 static bool ir_function_pass_tailrecursion(ir_function *self)
542 {
543     size_t p;
544
545     for (auto& bp : self->m_blocks) {
546         ir_block *block = bp.get();
547
548         ir_value *funcval;
549         ir_instr *ret, *call, *store = nullptr;
550
551         if (!block->m_final || vec_size(block->m_instr) < 2)
552             continue;
553
554         ret = block->m_instr[vec_size(block->m_instr)-1];
555         if (ret->m_opcode != INSTR_DONE && ret->m_opcode != INSTR_RETURN)
556             continue;
557
558         call = block->m_instr[vec_size(block->m_instr)-2];
559         if (call->m_opcode >= INSTR_STORE_F && call->m_opcode <= INSTR_STORE_FNC) {
560             /* account for the unoptimized
561              * CALL
562              * STORE %return, %tmp
563              * RETURN %tmp
564              * version
565              */
566             if (vec_size(block->m_instr) < 3)
567                 continue;
568
569             store = call;
570             call = block->m_instr[vec_size(block->m_instr)-3];
571         }
572
573         if (call->m_opcode < INSTR_CALL0 || call->m_opcode > INSTR_CALL8)
574             continue;
575
576         if (store) {
577             /* optimize out the STORE */
578             if (ret->_m_ops[0]   &&
579                 ret->_m_ops[0]   == store->_m_ops[0] &&
580                 store->_m_ops[1] == call->_m_ops[0])
581             {
582                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
583                 call->_m_ops[0] = store->_m_ops[0];
584                 vec_remove(block->m_instr, vec_size(block->m_instr) - 2, 1);
585                 delete store;
586             }
587             else
588                 continue;
589         }
590
591         if (!call->_m_ops[0])
592             continue;
593
594         funcval = call->_m_ops[1];
595         if (!funcval)
596             continue;
597         if (funcval->m_vtype != TYPE_FUNCTION || funcval->m_constval.vfunc != self)
598             continue;
599
600         /* now we have a CALL and a RET, check if it's a tailcall */
601         if (ret->_m_ops[0] && call->_m_ops[0] != ret->_m_ops[0])
602             continue;
603
604         ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
605         vec_shrinkby(block->m_instr, 2);
606
607         block->m_final = false; /* open it back up */
608
609         /* emite parameter-stores */
610         for (p = 0; p < call->m_params.size(); ++p) {
611             /* assert(call->params_count <= self->locals_count); */
612             if (!ir_block_create_store(block, call->m_context, self->m_locals[p].get(), call->m_params[p])) {
613                 irerror(call->m_context, "failed to create tailcall store instruction for parameter %i", (int)p);
614                 return false;
615             }
616         }
617         if (!ir_block_create_jump(block, call->m_context, self->m_blocks[0].get())) {
618             irerror(call->m_context, "failed to create tailcall jump");
619             return false;
620         }
621
622         delete call;
623         delete ret;
624     }
625
626     return true;
627 }
628
629 bool ir_function_finalize(ir_function *self)
630 {
631     if (self->m_builtin)
632         return true;
633
634     if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
635         if (!ir_function_pass_peephole(self)) {
636             irerror(self->m_context, "generic optimization pass broke something in `%s`", self->m_name.c_str());
637             return false;
638         }
639     }
640
641     if (OPTS_OPTIMIZATION(OPTIM_TAIL_RECURSION)) {
642         if (!ir_function_pass_tailrecursion(self)) {
643             irerror(self->m_context, "tail-recursion optimization pass broke something in `%s`", self->m_name.c_str());
644             return false;
645         }
646     }
647
648     if (!ir_function_naive_phi(self)) {
649         irerror(self->m_context, "internal error: ir_function_naive_phi failed");
650         return false;
651     }
652
653     for (auto& lp : self->m_locals) {
654         ir_value *v = lp.get();
655         if (v->m_vtype == TYPE_VECTOR ||
656             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
657         {
658             v->vectorMember(0);
659             v->vectorMember(1);
660             v->vectorMember(2);
661         }
662     }
663     for (auto& vp : self->m_values) {
664         ir_value *v = vp.get();
665         if (v->m_vtype == TYPE_VECTOR ||
666             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
667         {
668             v->vectorMember(0);
669             v->vectorMember(1);
670             v->vectorMember(2);
671         }
672     }
673
674     ir_function_enumerate(self);
675
676     if (!ir_function_calculate_liferanges(self))
677         return false;
678     if (!ir_function_allocate_locals(self))
679         return false;
680     return true;
681 }
682
683 ir_value* ir_function_create_local(ir_function *self, const std::string& name, qc_type vtype, bool param)
684 {
685     ir_value *ve;
686
687     if (param &&
688         !self->m_locals.empty() &&
689         self->m_locals.back()->m_store != store_param)
690     {
691         irerror(self->m_context, "cannot add parameters after adding locals");
692         return nullptr;
693     }
694
695     ve = new ir_value(std::string(name), (param ? store_param : store_local), vtype);
696     if (param)
697         ve->m_locked = true;
698     self->m_locals.emplace_back(ve);
699     return ve;
700 }
701
702 /***********************************************************************
703  *IR Block
704  */
705
706 ir_block::ir_block(ir_function* owner, const std::string& name)
707 : m_owner(owner),
708   m_label(name)
709 {
710     m_context.file = "<@no context>";
711     m_context.line = 0;
712 }
713
714 ir_block::~ir_block()
715 {
716     for (size_t i = 0; i != vec_size(m_instr); ++i)
717         delete m_instr[i];
718     vec_free(m_instr);
719     vec_free(m_entries);
720     vec_free(m_exits);
721 }
722
723 static void ir_block_delete_quick(ir_block* self)
724 {
725     size_t i;
726     for (i = 0; i != vec_size(self->m_instr); ++i)
727         ir_instr_delete_quick(self->m_instr[i]);
728     vec_free(self->m_instr);
729     delete self;
730 }
731
732 /***********************************************************************
733  *IR Instructions
734  */
735
736 ir_instr::ir_instr(lex_ctx_t ctx, ir_block* owner_, int op)
737 : m_opcode(op),
738   m_context(ctx),
739   m_owner(owner_)
740 {
741 }
742
743 ir_instr::~ir_instr()
744 {
745     // The following calls can only delete from
746     // vectors, we still want to delete this instruction
747     // so ignore the return value. Since with the warn_unused_result attribute
748     // gcc doesn't care about an explicit: (void)foo(); to ignore the result,
749     // I have to improvise here and use if(foo());
750     for (auto &it : m_phi) {
751         size_t idx;
752         if (vec_ir_instr_find(it.value->m_writes, this, &idx))
753             it.value->m_writes.erase(it.value->m_writes.begin() + idx);
754         if (vec_ir_instr_find(it.value->m_reads, this, &idx))
755             it.value->m_reads.erase(it.value->m_reads.begin() + idx);
756     }
757     for (auto &it : m_params) {
758         size_t idx;
759         if (vec_ir_instr_find(it->m_writes, this, &idx))
760             it->m_writes.erase(it->m_writes.begin() + idx);
761         if (vec_ir_instr_find(it->m_reads, this, &idx))
762             it->m_reads.erase(it->m_reads.begin() + idx);
763     }
764     (void)!ir_instr_op(this, 0, nullptr, false);
765     (void)!ir_instr_op(this, 1, nullptr, false);
766     (void)!ir_instr_op(this, 2, nullptr, false);
767 }
768
769 static void ir_instr_delete_quick(ir_instr *self)
770 {
771     self->m_phi.clear();
772     self->m_params.clear();
773     self->_m_ops[0] = nullptr;
774     self->_m_ops[1] = nullptr;
775     self->_m_ops[2] = nullptr;
776     delete self;
777 }
778
779 static bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing)
780 {
781     if (v && v->m_vtype == TYPE_NOEXPR) {
782         irerror(self->m_context, "tried to use a NOEXPR value");
783         return false;
784     }
785
786     if (self->_m_ops[op]) {
787         size_t idx;
788         if (writing && vec_ir_instr_find(self->_m_ops[op]->m_writes, self, &idx))
789             self->_m_ops[op]->m_writes.erase(self->_m_ops[op]->m_writes.begin() + idx);
790         else if (vec_ir_instr_find(self->_m_ops[op]->m_reads, self, &idx))
791             self->_m_ops[op]->m_reads.erase(self->_m_ops[op]->m_reads.begin() + idx);
792     }
793     if (v) {
794         if (writing)
795             v->m_writes.push_back(self);
796         else
797             v->m_reads.push_back(self);
798     }
799     self->_m_ops[op] = v;
800     return true;
801 }
802
803 /***********************************************************************
804  *IR Value
805  */
806
807 void ir_value::setCodeAddress(int32_t gaddr)
808 {
809     m_code.globaladdr = gaddr;
810     if (m_members[0]) m_members[0]->m_code.globaladdr = gaddr;
811     if (m_members[1]) m_members[1]->m_code.globaladdr = gaddr;
812     if (m_members[2]) m_members[2]->m_code.globaladdr = gaddr;
813 }
814
815 int32_t ir_value::codeAddress() const
816 {
817     if (m_store == store_return)
818         return OFS_RETURN + m_code.addroffset;
819     return m_code.globaladdr + m_code.addroffset;
820 }
821
822 ir_value::ir_value(std::string&& name_, store_type store_, qc_type vtype_)
823     : m_name(move(name_))
824     , m_vtype(vtype_)
825     , m_store(store_)
826 {
827     m_fieldtype = TYPE_VOID;
828     m_outtype = TYPE_VOID;
829     m_flags = 0;
830
831     m_cvq          = CV_NONE;
832     m_hasvalue     = false;
833     m_context.file = "<@no context>";
834     m_context.line = 0;
835
836     memset(&m_constval, 0, sizeof(m_constval));
837     memset(&m_code,     0, sizeof(m_code));
838
839     m_members[0] = nullptr;
840     m_members[1] = nullptr;
841     m_members[2] = nullptr;
842     m_memberof = nullptr;
843
844     m_unique_life = false;
845     m_locked = false;
846     m_callparam  = false;
847 }
848
849 ir_value::ir_value(ir_function *owner, std::string&& name, store_type storetype, qc_type vtype)
850     : ir_value(move(name), storetype, vtype)
851 {
852     ir_function_collect_value(owner, this);
853 }
854
855 ir_value::~ir_value()
856 {
857     size_t i;
858     if (m_hasvalue) {
859         if (m_vtype == TYPE_STRING)
860             mem_d((void*)m_constval.vstring);
861     }
862     if (!(m_flags & IR_FLAG_SPLIT_VECTOR)) {
863         for (i = 0; i < 3; ++i) {
864             if (m_members[i])
865                 delete m_members[i];
866         }
867     }
868 }
869
870
871 /*  helper function */
872 ir_value* ir_builder::literalFloat(float value, bool add_to_list) {
873     ir_value *v = new ir_value("#IMMEDIATE", store_global, TYPE_FLOAT);
874     v->m_flags |= IR_FLAG_ERASABLE;
875     v->m_hasvalue = true;
876     v->m_cvq = CV_CONST;
877     v->m_constval.vfloat = value;
878
879     m_globals.emplace_back(v);
880     if (add_to_list)
881         m_const_floats.emplace_back(v);
882     return v;
883 }
884
885 ir_value* ir_value::vectorMember(unsigned int member)
886 {
887     std::string name;
888     ir_value *m;
889     if (member >= 3)
890         return nullptr;
891
892     if (m_members[member])
893         return m_members[member];
894
895     if (!m_name.empty()) {
896         char member_name[3] = { '_', char('x' + member), 0 };
897         name = m_name + member_name;
898     }
899
900     if (m_vtype == TYPE_VECTOR)
901     {
902         m = new ir_value(move(name), m_store, TYPE_FLOAT);
903         if (!m)
904             return nullptr;
905         m->m_context = m_context;
906
907         m_members[member] = m;
908         m->m_code.addroffset = member;
909     }
910     else if (m_vtype == TYPE_FIELD)
911     {
912         if (m_fieldtype != TYPE_VECTOR)
913             return nullptr;
914         m = new ir_value(move(name), m_store, TYPE_FIELD);
915         if (!m)
916             return nullptr;
917         m->m_fieldtype = TYPE_VECTOR;
918         m->m_context = m_context;
919
920         m_members[member] = m;
921         m->m_code.addroffset = member;
922     }
923     else
924     {
925         irerror(m_context, "invalid member access on %s", m_name.c_str());
926         return nullptr;
927     }
928
929     m->m_memberof = this;
930     return m;
931 }
932
933 size_t ir_value::size() const {
934     if (m_vtype == TYPE_FIELD && m_fieldtype == TYPE_VECTOR)
935         return type_sizeof_[TYPE_VECTOR];
936     return type_sizeof_[m_vtype];
937 }
938
939 bool ir_value::setFloat(float f)
940 {
941     if (m_vtype != TYPE_FLOAT)
942         return false;
943     m_constval.vfloat = f;
944     m_hasvalue = true;
945     return true;
946 }
947
948 bool ir_value::setFunc(int f)
949 {
950     if (m_vtype != TYPE_FUNCTION)
951         return false;
952     m_constval.vint = f;
953     m_hasvalue = true;
954     return true;
955 }
956
957 bool ir_value::setVector(vec3_t v)
958 {
959     if (m_vtype != TYPE_VECTOR)
960         return false;
961     m_constval.vvec = v;
962     m_hasvalue = true;
963     return true;
964 }
965
966 bool ir_value::setField(ir_value *fld)
967 {
968     if (m_vtype != TYPE_FIELD)
969         return false;
970     m_constval.vpointer = fld;
971     m_hasvalue = true;
972     return true;
973 }
974
975 bool ir_value::setString(const char *str)
976 {
977     if (m_vtype != TYPE_STRING)
978         return false;
979     m_constval.vstring = util_strdupe(str);
980     m_hasvalue = true;
981     return true;
982 }
983
984 #if 0
985 bool ir_value::setInt(int i)
986 {
987     if (m_vtype != TYPE_INTEGER)
988         return false;
989     m_constval.vint = i;
990     m_hasvalue = true;
991     return true;
992 }
993 #endif
994
995 bool ir_value::lives(size_t at)
996 {
997     for (auto& l : m_life) {
998         if (l.start <= at && at <= l.end)
999             return true;
1000         if (l.start > at) /* since it's ordered */
1001             return false;
1002     }
1003     return false;
1004 }
1005
1006 bool ir_value::insertLife(size_t idx, ir_life_entry_t e)
1007 {
1008     m_life.insert(m_life.begin() + idx, e);
1009     return true;
1010 }
1011
1012 bool ir_value::setAlive(size_t s)
1013 {
1014     size_t i;
1015     const size_t vs = m_life.size();
1016     ir_life_entry_t *life_found = nullptr;
1017     ir_life_entry_t *before = nullptr;
1018     ir_life_entry_t new_entry;
1019
1020     /* Find the first range >= s */
1021     for (i = 0; i < vs; ++i)
1022     {
1023         before = life_found;
1024         life_found = &m_life[i];
1025         if (life_found->start > s)
1026             break;
1027     }
1028     /* nothing found? append */
1029     if (i == vs) {
1030         ir_life_entry_t e;
1031         if (life_found && life_found->end+1 == s)
1032         {
1033             /* previous life range can be merged in */
1034             life_found->end++;
1035             return true;
1036         }
1037         if (life_found && life_found->end >= s)
1038             return false;
1039         e.start = e.end = s;
1040         m_life.emplace_back(e);
1041         return true;
1042     }
1043     /* found */
1044     if (before)
1045     {
1046         if (before->end + 1 == s &&
1047             life_found->start - 1 == s)
1048         {
1049             /* merge */
1050             before->end = life_found->end;
1051             m_life.erase(m_life.begin()+i);
1052             return true;
1053         }
1054         if (before->end + 1 == s)
1055         {
1056             /* extend before */
1057             before->end++;
1058             return true;
1059         }
1060         /* already contained */
1061         if (before->end >= s)
1062             return false;
1063     }
1064     /* extend */
1065     if (life_found->start - 1 == s)
1066     {
1067         life_found->start--;
1068         return true;
1069     }
1070     /* insert a new entry */
1071     new_entry.start = new_entry.end = s;
1072     return insertLife(i, new_entry);
1073 }
1074
1075 bool ir_value::mergeLife(const ir_value *other)
1076 {
1077     size_t i, myi;
1078
1079     if (other->m_life.empty())
1080         return true;
1081
1082     if (m_life.empty()) {
1083         m_life = other->m_life;
1084         return true;
1085     }
1086
1087     myi = 0;
1088     for (i = 0; i < other->m_life.size(); ++i)
1089     {
1090         const ir_life_entry_t &otherlife = other->m_life[i];
1091         while (true)
1092         {
1093             ir_life_entry_t *entry = &m_life[myi];
1094
1095             if (otherlife.end+1 < entry->start)
1096             {
1097                 /* adding an interval before entry */
1098                 if (!insertLife(myi, otherlife))
1099                     return false;
1100                 ++myi;
1101                 break;
1102             }
1103
1104             if (otherlife.start <  entry->start &&
1105                 otherlife.end+1 >= entry->start)
1106             {
1107                 /* starts earlier and overlaps */
1108                 entry->start = otherlife.start;
1109             }
1110
1111             if (otherlife.end   >  entry->end &&
1112                 otherlife.start <= entry->end+1)
1113             {
1114                 /* ends later and overlaps */
1115                 entry->end = otherlife.end;
1116             }
1117
1118             /* see if our change combines it with the next ranges */
1119             while (myi+1 < m_life.size() &&
1120                    entry->end+1 >= m_life[1+myi].start)
1121             {
1122                 /* overlaps with (myi+1) */
1123                 if (entry->end < m_life[1+myi].end)
1124                     entry->end = m_life[1+myi].end;
1125                 m_life.erase(m_life.begin() + (myi + 1));
1126                 entry = &m_life[myi];
1127             }
1128
1129             /* see if we're after the entry */
1130             if (otherlife.start > entry->end)
1131             {
1132                 ++myi;
1133                 /* append if we're at the end */
1134                 if (myi >= m_life.size()) {
1135                     m_life.emplace_back(otherlife);
1136                     break;
1137                 }
1138                 /* otherweise check the next range */
1139                 continue;
1140             }
1141             break;
1142         }
1143     }
1144     return true;
1145 }
1146
1147 static bool ir_values_overlap(const ir_value *a, const ir_value *b)
1148 {
1149     /* For any life entry in A see if it overlaps with
1150      * any life entry in B.
1151      * Note that the life entries are orderes, so we can make a
1152      * more efficient algorithm there than naively translating the
1153      * statement above.
1154      */
1155
1156     const ir_life_entry_t *la, *lb, *enda, *endb;
1157
1158     /* first of all, if either has no life range, they cannot clash */
1159     if (a->m_life.empty() || b->m_life.empty())
1160         return false;
1161
1162     la = &a->m_life.front();
1163     lb = &b->m_life.front();
1164     enda = &a->m_life.back() + 1;
1165     endb = &b->m_life.back() + 1;
1166     while (true)
1167     {
1168         /* check if the entries overlap, for that,
1169          * both must start before the other one ends.
1170          */
1171         if (la->start < lb->end &&
1172             lb->start < la->end)
1173         {
1174             return true;
1175         }
1176
1177         /* entries are ordered
1178          * one entry is earlier than the other
1179          * that earlier entry will be moved forward
1180          */
1181         if (la->start < lb->start)
1182         {
1183             /* order: A B, move A forward
1184              * check if we hit the end with A
1185              */
1186             if (++la == enda)
1187                 break;
1188         }
1189         else /* if (lb->start < la->start)  actually <= */
1190         {
1191             /* order: B A, move B forward
1192              * check if we hit the end with B
1193              */
1194             if (++lb == endb)
1195                 break;
1196         }
1197     }
1198     return false;
1199 }
1200
1201 /***********************************************************************
1202  *IR main operations
1203  */
1204
1205 static bool ir_check_unreachable(ir_block *self)
1206 {
1207     /* The IR should never have to deal with unreachable code */
1208     if (!self->m_final/* || OPTS_FLAG(ALLOW_UNREACHABLE_CODE)*/)
1209         return true;
1210     irerror(self->m_context, "unreachable statement (%s)", self->m_label.c_str());
1211     return false;
1212 }
1213
1214 bool ir_block_create_store_op(ir_block *self, lex_ctx_t ctx, int op, ir_value *target, ir_value *what)
1215 {
1216     ir_instr *in;
1217     if (!ir_check_unreachable(self))
1218         return false;
1219
1220     if (target->m_store == store_value &&
1221         (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC))
1222     {
1223         irerror(self->m_context, "cannot store to an SSA value");
1224         irerror(self->m_context, "trying to store: %s <- %s", target->m_name.c_str(), what->m_name.c_str());
1225         irerror(self->m_context, "instruction: %s", util_instr_str[op]);
1226         return false;
1227     }
1228
1229     in = new ir_instr(ctx, self, op);
1230     if (!in)
1231         return false;
1232
1233     if (!ir_instr_op(in, 0, target, (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC)) ||
1234         !ir_instr_op(in, 1, what, false))
1235     {
1236         delete in;
1237         return false;
1238     }
1239     vec_push(self->m_instr, in);
1240     return true;
1241 }
1242
1243 bool ir_block_create_state_op(ir_block *self, lex_ctx_t ctx, ir_value *frame, ir_value *think)
1244 {
1245     ir_instr *in;
1246     if (!ir_check_unreachable(self))
1247         return false;
1248
1249     in = new ir_instr(ctx, self, INSTR_STATE);
1250     if (!in)
1251         return false;
1252
1253     if (!ir_instr_op(in, 0, frame, false) ||
1254         !ir_instr_op(in, 1, think, false))
1255     {
1256         delete in;
1257         return false;
1258     }
1259     vec_push(self->m_instr, in);
1260     return true;
1261 }
1262
1263 static bool ir_block_create_store(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1264 {
1265     int op = 0;
1266     qc_type vtype;
1267     if (target->m_vtype == TYPE_VARIANT)
1268         vtype = what->m_vtype;
1269     else
1270         vtype = target->m_vtype;
1271
1272 #if 0
1273     if      (vtype == TYPE_FLOAT   && what->m_vtype == TYPE_INTEGER)
1274         op = INSTR_CONV_ITOF;
1275     else if (vtype == TYPE_INTEGER && what->m_vtype == TYPE_FLOAT)
1276         op = INSTR_CONV_FTOI;
1277 #endif
1278         op = type_store_instr[vtype];
1279
1280     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1281         if (op == INSTR_STORE_FLD && what->m_fieldtype == TYPE_VECTOR)
1282             op = INSTR_STORE_V;
1283     }
1284
1285     return ir_block_create_store_op(self, ctx, op, target, what);
1286 }
1287
1288 bool ir_block_create_storep(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1289 {
1290     int op = 0;
1291     qc_type vtype;
1292
1293     if (target->m_vtype != TYPE_POINTER)
1294         return false;
1295
1296     /* storing using pointer - target is a pointer, type must be
1297      * inferred from source
1298      */
1299     vtype = what->m_vtype;
1300
1301     op = type_storep_instr[vtype];
1302     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1303         if (op == INSTR_STOREP_FLD && what->m_fieldtype == TYPE_VECTOR)
1304             op = INSTR_STOREP_V;
1305     }
1306
1307     return ir_block_create_store_op(self, ctx, op, target, what);
1308 }
1309
1310 bool ir_block_create_return(ir_block *self, lex_ctx_t ctx, ir_value *v)
1311 {
1312     ir_instr *in;
1313     if (!ir_check_unreachable(self))
1314         return false;
1315
1316     self->m_final = true;
1317
1318     self->m_is_return = true;
1319     in = new ir_instr(ctx, self, INSTR_RETURN);
1320     if (!in)
1321         return false;
1322
1323     if (v && !ir_instr_op(in, 0, v, false)) {
1324         delete in;
1325         return false;
1326     }
1327
1328     vec_push(self->m_instr, in);
1329     return true;
1330 }
1331
1332 bool ir_block_create_if(ir_block *self, lex_ctx_t ctx, ir_value *v,
1333                         ir_block *ontrue, ir_block *onfalse)
1334 {
1335     ir_instr *in;
1336     if (!ir_check_unreachable(self))
1337         return false;
1338     self->m_final = true;
1339     /*in = new ir_instr(ctx, self, (v->m_vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/
1340     in = new ir_instr(ctx, self, VINSTR_COND);
1341     if (!in)
1342         return false;
1343
1344     if (!ir_instr_op(in, 0, v, false)) {
1345         delete in;
1346         return false;
1347     }
1348
1349     in->m_bops[0] = ontrue;
1350     in->m_bops[1] = onfalse;
1351
1352     vec_push(self->m_instr, in);
1353
1354     vec_push(self->m_exits, ontrue);
1355     vec_push(self->m_exits, onfalse);
1356     vec_push(ontrue->m_entries,  self);
1357     vec_push(onfalse->m_entries, self);
1358     return true;
1359 }
1360
1361 bool ir_block_create_jump(ir_block *self, lex_ctx_t ctx, ir_block *to)
1362 {
1363     ir_instr *in;
1364     if (!ir_check_unreachable(self))
1365         return false;
1366     self->m_final = true;
1367     in = new ir_instr(ctx, self, VINSTR_JUMP);
1368     if (!in)
1369         return false;
1370
1371     in->m_bops[0] = to;
1372     vec_push(self->m_instr, in);
1373
1374     vec_push(self->m_exits, to);
1375     vec_push(to->m_entries, self);
1376     return true;
1377 }
1378
1379 bool ir_block_create_goto(ir_block *self, lex_ctx_t ctx, ir_block *to)
1380 {
1381     self->m_owner->m_flags |= IR_FLAG_HAS_GOTO;
1382     return ir_block_create_jump(self, ctx, to);
1383 }
1384
1385 ir_instr* ir_block_create_phi(ir_block *self, lex_ctx_t ctx, const char *label, qc_type ot)
1386 {
1387     ir_value *out;
1388     ir_instr *in;
1389     if (!ir_check_unreachable(self))
1390         return nullptr;
1391     in = new ir_instr(ctx, self, VINSTR_PHI);
1392     if (!in)
1393         return nullptr;
1394     out = new ir_value(self->m_owner, label ? label : "", store_value, ot);
1395     if (!out) {
1396         delete in;
1397         return nullptr;
1398     }
1399     if (!ir_instr_op(in, 0, out, true)) {
1400         delete in;
1401         return nullptr;
1402     }
1403     vec_push(self->m_instr, in);
1404     return in;
1405 }
1406
1407 ir_value* ir_phi_value(ir_instr *self)
1408 {
1409     return self->_m_ops[0];
1410 }
1411
1412 void ir_phi_add(ir_instr* self, ir_block *b, ir_value *v)
1413 {
1414     ir_phi_entry_t pe;
1415
1416     if (!vec_ir_block_find(self->m_owner->m_entries, b, nullptr)) {
1417         // Must not be possible to cause this, otherwise the AST
1418         // is doing something wrong.
1419         irerror(self->m_context, "Invalid entry block for PHI");
1420         exit(EXIT_FAILURE);
1421     }
1422
1423     pe.value = v;
1424     pe.from = b;
1425     v->m_reads.push_back(self);
1426     self->m_phi.push_back(pe);
1427 }
1428
1429 /* call related code */
1430 ir_instr* ir_block_create_call(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *func, bool noreturn)
1431 {
1432     ir_value *out;
1433     ir_instr *in;
1434     if (!ir_check_unreachable(self))
1435         return nullptr;
1436     in = new ir_instr(ctx, self, (noreturn ? VINSTR_NRCALL : INSTR_CALL0));
1437     if (!in)
1438         return nullptr;
1439     if (noreturn) {
1440         self->m_final = true;
1441         self->m_is_return = true;
1442     }
1443     out = new ir_value(self->m_owner, label ? label : "", (func->m_outtype == TYPE_VOID) ? store_return : store_value, func->m_outtype);
1444     if (!out) {
1445         delete in;
1446         return nullptr;
1447     }
1448     if (!ir_instr_op(in, 0, out, true) ||
1449         !ir_instr_op(in, 1, func, false))
1450     {
1451         delete in;
1452         return nullptr;
1453     }
1454     vec_push(self->m_instr, in);
1455     /*
1456     if (noreturn) {
1457         if (!ir_block_create_return(self, ctx, nullptr)) {
1458             compile_error(ctx, "internal error: failed to generate dummy-return instruction");
1459             delete in;
1460             return nullptr;
1461         }
1462     }
1463     */
1464     return in;
1465 }
1466
1467 ir_value* ir_call_value(ir_instr *self)
1468 {
1469     return self->_m_ops[0];
1470 }
1471
1472 void ir_call_param(ir_instr* self, ir_value *v)
1473 {
1474     self->m_params.push_back(v);
1475     v->m_reads.push_back(self);
1476 }
1477
1478 /* binary op related code */
1479
1480 ir_value* ir_block_create_binop(ir_block *self, lex_ctx_t ctx,
1481                                 const char *label, int opcode,
1482                                 ir_value *left, ir_value *right)
1483 {
1484     qc_type ot = TYPE_VOID;
1485     switch (opcode) {
1486         case INSTR_ADD_F:
1487         case INSTR_SUB_F:
1488         case INSTR_DIV_F:
1489         case INSTR_MUL_F:
1490         case INSTR_MUL_V:
1491         case INSTR_AND:
1492         case INSTR_OR:
1493 #if 0
1494         case INSTR_AND_I:
1495         case INSTR_AND_IF:
1496         case INSTR_AND_FI:
1497         case INSTR_OR_I:
1498         case INSTR_OR_IF:
1499         case INSTR_OR_FI:
1500 #endif
1501         case INSTR_BITAND:
1502         case INSTR_BITOR:
1503         case VINSTR_BITXOR:
1504 #if 0
1505         case INSTR_SUB_S: /* -- offset of string as float */
1506         case INSTR_MUL_IF:
1507         case INSTR_MUL_FI:
1508         case INSTR_DIV_IF:
1509         case INSTR_DIV_FI:
1510         case INSTR_BITOR_IF:
1511         case INSTR_BITOR_FI:
1512         case INSTR_BITAND_FI:
1513         case INSTR_BITAND_IF:
1514         case INSTR_EQ_I:
1515         case INSTR_NE_I:
1516 #endif
1517             ot = TYPE_FLOAT;
1518             break;
1519 #if 0
1520         case INSTR_ADD_I:
1521         case INSTR_ADD_IF:
1522         case INSTR_ADD_FI:
1523         case INSTR_SUB_I:
1524         case INSTR_SUB_FI:
1525         case INSTR_SUB_IF:
1526         case INSTR_MUL_I:
1527         case INSTR_DIV_I:
1528         case INSTR_BITAND_I:
1529         case INSTR_BITOR_I:
1530         case INSTR_XOR_I:
1531         case INSTR_RSHIFT_I:
1532         case INSTR_LSHIFT_I:
1533             ot = TYPE_INTEGER;
1534             break;
1535 #endif
1536         case INSTR_ADD_V:
1537         case INSTR_SUB_V:
1538         case INSTR_MUL_VF:
1539         case INSTR_MUL_FV:
1540         case VINSTR_BITAND_V:
1541         case VINSTR_BITOR_V:
1542         case VINSTR_BITXOR_V:
1543         case VINSTR_BITAND_VF:
1544         case VINSTR_BITOR_VF:
1545         case VINSTR_BITXOR_VF:
1546         case VINSTR_CROSS:
1547 #if 0
1548         case INSTR_DIV_VF:
1549         case INSTR_MUL_IV:
1550         case INSTR_MUL_VI:
1551 #endif
1552             ot = TYPE_VECTOR;
1553             break;
1554 #if 0
1555         case INSTR_ADD_SF:
1556             ot = TYPE_POINTER;
1557             break;
1558 #endif
1559     /*
1560      * after the following default case, the value of opcode can never
1561      * be 1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65
1562      */
1563         default:
1564             /* ranges: */
1565             /* boolean operations result in floats */
1566
1567             /*
1568              * opcode >= 10 takes true branch opcode is at least 10
1569              * opcode <= 23 takes false branch opcode is at least 24
1570              */
1571             if (opcode >= INSTR_EQ_F && opcode <= INSTR_GT)
1572                 ot = TYPE_FLOAT;
1573
1574             /*
1575              * At condition "opcode <= 23", the value of "opcode" must be
1576              * at least 24.
1577              * At condition "opcode <= 23", the value of "opcode" cannot be
1578              * equal to any of {1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65}.
1579              * The condition "opcode <= 23" cannot be true.
1580              *
1581              * Thus ot=2 (TYPE_FLOAT) can never be true
1582              */
1583 #if 0
1584             else if (opcode >= INSTR_LE && opcode <= INSTR_GT)
1585                 ot = TYPE_FLOAT;
1586             else if (opcode >= INSTR_LE_I && opcode <= INSTR_EQ_FI)
1587                 ot = TYPE_FLOAT;
1588 #endif
1589             break;
1590     };
1591     if (ot == TYPE_VOID) {
1592         /* The AST or parser were supposed to check this! */
1593         return nullptr;
1594     }
1595
1596     return ir_block_create_general_instr(self, ctx, label, opcode, left, right, ot);
1597 }
1598
1599 ir_value* ir_block_create_unary(ir_block *self, lex_ctx_t ctx,
1600                                 const char *label, int opcode,
1601                                 ir_value *operand)
1602 {
1603     qc_type ot = TYPE_FLOAT;
1604     switch (opcode) {
1605         case INSTR_NOT_F:
1606         case INSTR_NOT_V:
1607         case INSTR_NOT_S:
1608         case INSTR_NOT_ENT:
1609         case INSTR_NOT_FNC: /*
1610         case INSTR_NOT_I:   */
1611             ot = TYPE_FLOAT;
1612             break;
1613
1614         /*
1615          * Negation for virtual instructions is emulated with 0-value. Thankfully
1616          * the operand for 0 already exists so we just source it from here.
1617          */
1618         case VINSTR_NEG_F:
1619             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_F, nullptr, operand, ot);
1620         case VINSTR_NEG_V:
1621             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_V, nullptr, operand, TYPE_VECTOR);
1622
1623         default:
1624             ot = operand->m_vtype;
1625             break;
1626     };
1627     if (ot == TYPE_VOID) {
1628         /* The AST or parser were supposed to check this! */
1629         return nullptr;
1630     }
1631
1632     /* let's use the general instruction creator and pass nullptr for OPB */
1633     return ir_block_create_general_instr(self, ctx, label, opcode, operand, nullptr, ot);
1634 }
1635
1636 static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t ctx, const char *label,
1637                                         int op, ir_value *a, ir_value *b, qc_type outype)
1638 {
1639     ir_instr *instr;
1640     ir_value *out;
1641
1642     out = new ir_value(self->m_owner, label ? label : "", store_value, outype);
1643     if (!out)
1644         return nullptr;
1645
1646     instr = new ir_instr(ctx, self, op);
1647     if (!instr) {
1648         return nullptr;
1649     }
1650
1651     if (!ir_instr_op(instr, 0, out, true) ||
1652         !ir_instr_op(instr, 1, a, false) ||
1653         !ir_instr_op(instr, 2, b, false) )
1654     {
1655         goto on_error;
1656     }
1657
1658     vec_push(self->m_instr, instr);
1659
1660     return out;
1661 on_error:
1662     delete instr;
1663     return nullptr;
1664 }
1665
1666 ir_value* ir_block_create_fieldaddress(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field)
1667 {
1668     ir_value *v;
1669
1670     /* Support for various pointer types todo if so desired */
1671     if (ent->m_vtype != TYPE_ENTITY)
1672         return nullptr;
1673
1674     if (field->m_vtype != TYPE_FIELD)
1675         return nullptr;
1676
1677     v = ir_block_create_general_instr(self, ctx, label, INSTR_ADDRESS, ent, field, TYPE_POINTER);
1678     v->m_fieldtype = field->m_fieldtype;
1679     return v;
1680 }
1681
1682 ir_value* ir_block_create_load_from_ent(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field, qc_type outype)
1683 {
1684     int op;
1685     if (ent->m_vtype != TYPE_ENTITY)
1686         return nullptr;
1687
1688     /* at some point we could redirect for TYPE_POINTER... but that could lead to carelessness */
1689     if (field->m_vtype != TYPE_FIELD)
1690         return nullptr;
1691
1692     switch (outype)
1693     {
1694         case TYPE_FLOAT:    op = INSTR_LOAD_F;   break;
1695         case TYPE_VECTOR:   op = INSTR_LOAD_V;   break;
1696         case TYPE_STRING:   op = INSTR_LOAD_S;   break;
1697         case TYPE_FIELD:    op = INSTR_LOAD_FLD; break;
1698         case TYPE_ENTITY:   op = INSTR_LOAD_ENT; break;
1699         case TYPE_FUNCTION: op = INSTR_LOAD_FNC; break;
1700 #if 0
1701         case TYPE_POINTER: op = INSTR_LOAD_I;   break;
1702         case TYPE_INTEGER: op = INSTR_LOAD_I;   break;
1703 #endif
1704         default:
1705             irerror(self->m_context, "invalid type for ir_block_create_load_from_ent: %s", type_name[outype]);
1706             return nullptr;
1707     }
1708
1709     return ir_block_create_general_instr(self, ctx, label, op, ent, field, outype);
1710 }
1711
1712 /* PHI resolving breaks the SSA, and must thus be the last
1713  * step before life-range calculation.
1714  */
1715
1716 static bool ir_block_naive_phi(ir_block *self);
1717 bool ir_function_naive_phi(ir_function *self)
1718 {
1719     for (auto& b : self->m_blocks)
1720         if (!ir_block_naive_phi(b.get()))
1721             return false;
1722     return true;
1723 }
1724
1725 static bool ir_block_naive_phi(ir_block *self)
1726 {
1727     size_t i;
1728     /* FIXME: optionally, create_phi can add the phis
1729      * to a list so we don't need to loop through blocks
1730      * - anyway: "don't optimize YET"
1731      */
1732     for (i = 0; i < vec_size(self->m_instr); ++i)
1733     {
1734         ir_instr *instr = self->m_instr[i];
1735         if (instr->m_opcode != VINSTR_PHI)
1736             continue;
1737
1738         vec_remove(self->m_instr, i, 1);
1739         --i; /* NOTE: i+1 below */
1740
1741         for (auto &it : instr->m_phi) {
1742             ir_value *v = it.value;
1743             ir_block *b = it.from;
1744             if (v->m_store == store_value && v->m_reads.size() == 1 && v->m_writes.size() == 1) {
1745                 /* replace the value */
1746                 if (!ir_instr_op(v->m_writes[0], 0, instr->_m_ops[0], true))
1747                     return false;
1748             } else {
1749                 /* force a move instruction */
1750                 ir_instr *prevjump = vec_last(b->m_instr);
1751                 vec_pop(b->m_instr);
1752                 b->m_final = false;
1753                 instr->_m_ops[0]->m_store = store_global;
1754                 if (!ir_block_create_store(b, instr->m_context, instr->_m_ops[0], v))
1755                     return false;
1756                 instr->_m_ops[0]->m_store = store_value;
1757                 vec_push(b->m_instr, prevjump);
1758                 b->m_final = true;
1759             }
1760         }
1761         delete instr;
1762     }
1763     return true;
1764 }
1765
1766 /***********************************************************************
1767  *IR Temp allocation code
1768  * Propagating value life ranges by walking through the function backwards
1769  * until no more changes are made.
1770  * In theory this should happen once more than once for every nested loop
1771  * level.
1772  * Though this implementation might run an additional time for if nests.
1773  */
1774
1775 /* Enumerate instructions used by value's life-ranges
1776  */
1777 static void ir_block_enumerate(ir_block *self, size_t *_eid)
1778 {
1779     size_t i;
1780     size_t eid = *_eid;
1781     for (i = 0; i < vec_size(self->m_instr); ++i)
1782     {
1783         self->m_instr[i]->m_eid = eid++;
1784     }
1785     *_eid = eid;
1786 }
1787
1788 /* Enumerate blocks and instructions.
1789  * The block-enumeration is unordered!
1790  * We do not really use the block enumreation, however
1791  * the instruction enumeration is important for life-ranges.
1792  */
1793 void ir_function_enumerate(ir_function *self)
1794 {
1795     size_t instruction_id = 0;
1796     size_t block_eid = 0;
1797     for (auto& block : self->m_blocks)
1798     {
1799         /* each block now gets an additional "entry" instruction id
1800          * we can use to avoid point-life issues
1801          */
1802         block->m_entry_id = instruction_id;
1803         block->m_eid      = block_eid;
1804         ++instruction_id;
1805         ++block_eid;
1806
1807         ir_block_enumerate(block.get(), &instruction_id);
1808     }
1809 }
1810
1811 /* Local-value allocator
1812  * After finishing creating the liferange of all values used in a function
1813  * we can allocate their global-positions.
1814  * This is the counterpart to register-allocation in register machines.
1815  */
1816 struct function_allocator {
1817     ir_value **locals;
1818     size_t *sizes;
1819     size_t *positions;
1820     bool *unique;
1821 };
1822
1823 static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
1824 {
1825     ir_value *slot;
1826     size_t vsize = var->size();
1827
1828     var->m_code.local = vec_size(alloc->locals);
1829
1830     slot = new ir_value("reg", store_global, var->m_vtype);
1831     if (!slot)
1832         return false;
1833
1834     if (!slot->mergeLife(var))
1835         goto localerror;
1836
1837     vec_push(alloc->locals, slot);
1838     vec_push(alloc->sizes, vsize);
1839     vec_push(alloc->unique, var->m_unique_life);
1840
1841     return true;
1842
1843 localerror:
1844     delete slot;
1845     return false;
1846 }
1847
1848 static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
1849 {
1850     size_t a;
1851     ir_value *slot;
1852
1853     if (v->m_unique_life)
1854         return function_allocator_alloc(alloc, v);
1855
1856     for (a = 0; a < vec_size(alloc->locals); ++a)
1857     {
1858         /* if it's reserved for a unique liferange: skip */
1859         if (alloc->unique[a])
1860             continue;
1861
1862         slot = alloc->locals[a];
1863
1864         /* never resize parameters
1865          * will be required later when overlapping temps + locals
1866          */
1867         if (a < vec_size(self->m_params) &&
1868             alloc->sizes[a] < v->size())
1869         {
1870             continue;
1871         }
1872
1873         if (ir_values_overlap(v, slot))
1874             continue;
1875
1876         if (!slot->mergeLife(v))
1877             return false;
1878
1879         /* adjust size for this slot */
1880         if (alloc->sizes[a] < v->size())
1881             alloc->sizes[a] = v->size();
1882
1883         v->m_code.local = a;
1884         return true;
1885     }
1886     if (a >= vec_size(alloc->locals)) {
1887         if (!function_allocator_alloc(alloc, v))
1888             return false;
1889     }
1890     return true;
1891 }
1892
1893 bool ir_function_allocate_locals(ir_function *self)
1894 {
1895     bool   retval = true;
1896     size_t pos;
1897     bool   opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
1898
1899     function_allocator lockalloc, globalloc;
1900
1901     if (self->m_locals.empty() && self->m_values.empty())
1902         return true;
1903
1904     globalloc.locals    = nullptr;
1905     globalloc.sizes     = nullptr;
1906     globalloc.positions = nullptr;
1907     globalloc.unique    = nullptr;
1908     lockalloc.locals    = nullptr;
1909     lockalloc.sizes     = nullptr;
1910     lockalloc.positions = nullptr;
1911     lockalloc.unique    = nullptr;
1912
1913     size_t i;
1914     for (i = 0; i < self->m_locals.size(); ++i)
1915     {
1916         ir_value *v = self->m_locals[i].get();
1917         if ((self->m_flags & IR_FLAG_MASK_NO_LOCAL_TEMPS) || !OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) {
1918             v->m_locked      = true;
1919             v->m_unique_life = true;
1920         }
1921         else if (i >= vec_size(self->m_params))
1922             break;
1923         else
1924             v->m_locked = true; /* lock parameters locals */
1925         if (!function_allocator_alloc((v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1926             goto error;
1927     }
1928     for (; i < self->m_locals.size(); ++i)
1929     {
1930         ir_value *v = self->m_locals[i].get();
1931         if (v->m_life.empty())
1932             continue;
1933         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1934             goto error;
1935     }
1936
1937     /* Allocate a slot for any value that still exists */
1938     for (i = 0; i < self->m_values.size(); ++i)
1939     {
1940         ir_value *v = self->m_values[i].get();
1941
1942         if (v->m_life.empty())
1943             continue;
1944
1945         /* CALL optimization:
1946          * If the value is a parameter-temp: 1 write, 1 read from a CALL
1947          * and it's not "locked", write it to the OFS_PARM directly.
1948          */
1949         if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->m_locked && !v->m_unique_life) {
1950             if (v->m_reads.size() == 1 && v->m_writes.size() == 1 &&
1951                 (v->m_reads[0]->m_opcode == VINSTR_NRCALL ||
1952                  (v->m_reads[0]->m_opcode >= INSTR_CALL0 && v->m_reads[0]->m_opcode <= INSTR_CALL8)
1953                 )
1954                )
1955             {
1956                 size_t param;
1957                 ir_instr *call = v->m_reads[0];
1958                 if (!vec_ir_value_find(call->m_params, v, &param)) {
1959                     irerror(call->m_context, "internal error: unlocked parameter %s not found", v->m_name.c_str());
1960                     goto error;
1961                 }
1962                 ++opts_optimizationcount[OPTIM_CALL_STORES];
1963                 v->m_callparam = true;
1964                 if (param < 8)
1965                     v->setCodeAddress(OFS_PARM0 + 3*param);
1966                 else {
1967                     size_t nprotos = self->m_owner->m_extparam_protos.size();
1968                     ir_value *ep;
1969                     param -= 8;
1970                     if (nprotos > param)
1971                         ep = self->m_owner->m_extparam_protos[param].get();
1972                     else
1973                     {
1974                         ep = self->m_owner->generateExtparamProto();
1975                         while (++nprotos <= param)
1976                             ep = self->m_owner->generateExtparamProto();
1977                     }
1978                     ir_instr_op(v->m_writes[0], 0, ep, true);
1979                     call->m_params[param+8] = ep;
1980                 }
1981                 continue;
1982             }
1983             if (v->m_writes.size() == 1 && v->m_writes[0]->m_opcode == INSTR_CALL0) {
1984                 v->m_store = store_return;
1985                 if (v->m_members[0]) v->m_members[0]->m_store = store_return;
1986                 if (v->m_members[1]) v->m_members[1]->m_store = store_return;
1987                 if (v->m_members[2]) v->m_members[2]->m_store = store_return;
1988                 ++opts_optimizationcount[OPTIM_CALL_STORES];
1989                 continue;
1990             }
1991         }
1992
1993         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1994             goto error;
1995     }
1996
1997     if (!lockalloc.sizes && !globalloc.sizes) {
1998         goto cleanup;
1999     }
2000     vec_push(lockalloc.positions, 0);
2001     vec_push(globalloc.positions, 0);
2002
2003     /* Adjust slot positions based on sizes */
2004     if (lockalloc.sizes) {
2005         pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
2006         for (i = 1; i < vec_size(lockalloc.sizes); ++i)
2007         {
2008             pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
2009             vec_push(lockalloc.positions, pos);
2010         }
2011         self->m_allocated_locals = pos + vec_last(lockalloc.sizes);
2012     }
2013     if (globalloc.sizes) {
2014         pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
2015         for (i = 1; i < vec_size(globalloc.sizes); ++i)
2016         {
2017             pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
2018             vec_push(globalloc.positions, pos);
2019         }
2020         self->m_globaltemps = pos + vec_last(globalloc.sizes);
2021     }
2022
2023     /* Locals need to know their new position */
2024     for (auto& local : self->m_locals) {
2025         if (local->m_locked || !opt_gt)
2026             local->m_code.local = lockalloc.positions[local->m_code.local];
2027         else
2028             local->m_code.local = globalloc.positions[local->m_code.local];
2029     }
2030     /* Take over the actual slot positions on values */
2031     for (auto& value : self->m_values) {
2032         if (value->m_locked || !opt_gt)
2033             value->m_code.local = lockalloc.positions[value->m_code.local];
2034         else
2035             value->m_code.local = globalloc.positions[value->m_code.local];
2036     }
2037
2038     goto cleanup;
2039
2040 error:
2041     retval = false;
2042 cleanup:
2043     for (i = 0; i < vec_size(lockalloc.locals); ++i)
2044         delete lockalloc.locals[i];
2045     for (i = 0; i < vec_size(globalloc.locals); ++i)
2046         delete globalloc.locals[i];
2047     vec_free(globalloc.unique);
2048     vec_free(globalloc.locals);
2049     vec_free(globalloc.sizes);
2050     vec_free(globalloc.positions);
2051     vec_free(lockalloc.unique);
2052     vec_free(lockalloc.locals);
2053     vec_free(lockalloc.sizes);
2054     vec_free(lockalloc.positions);
2055     return retval;
2056 }
2057
2058 /* Get information about which operand
2059  * is read from, or written to.
2060  */
2061 static void ir_op_read_write(int op, size_t *read, size_t *write)
2062 {
2063     switch (op)
2064     {
2065     case VINSTR_JUMP:
2066     case INSTR_GOTO:
2067         *write = 0;
2068         *read = 0;
2069         break;
2070     case INSTR_IF:
2071     case INSTR_IFNOT:
2072 #if 0
2073     case INSTR_IF_S:
2074     case INSTR_IFNOT_S:
2075 #endif
2076     case INSTR_RETURN:
2077     case VINSTR_COND:
2078         *write = 0;
2079         *read = 1;
2080         break;
2081     case INSTR_STOREP_F:
2082     case INSTR_STOREP_V:
2083     case INSTR_STOREP_S:
2084     case INSTR_STOREP_ENT:
2085     case INSTR_STOREP_FLD:
2086     case INSTR_STOREP_FNC:
2087         *write = 0;
2088         *read  = 7;
2089         break;
2090     default:
2091         *write = 1;
2092         *read = 6;
2093         break;
2094     };
2095 }
2096
2097 static bool ir_block_living_add_instr(ir_block *self, size_t eid) {
2098     bool changed = false;
2099     for (auto &it : self->m_living)
2100         if (it->setAlive(eid))
2101             changed = true;
2102     return changed;
2103 }
2104
2105 static bool ir_block_living_lock(ir_block *self) {
2106     bool changed = false;
2107     for (auto &it : self->m_living) {
2108         if (it->m_locked)
2109             continue;
2110         it->m_locked = true;
2111         changed = true;
2112     }
2113     return changed;
2114 }
2115
2116 static bool ir_block_life_propagate(ir_block *self, bool *changed)
2117 {
2118     ir_instr *instr;
2119     ir_value *value;
2120     size_t i, o, p, mem;
2121     // bitmasks which operands are read from or written to
2122     size_t read, write;
2123
2124     self->m_living.clear();
2125
2126     p = vec_size(self->m_exits);
2127     for (i = 0; i < p; ++i) {
2128         ir_block *prev = self->m_exits[i];
2129         for (auto &it : prev->m_living)
2130             if (!vec_ir_value_find(self->m_living, it, nullptr))
2131                 self->m_living.push_back(it);
2132     }
2133
2134     i = vec_size(self->m_instr);
2135     while (i)
2136     { --i;
2137         instr = self->m_instr[i];
2138
2139         /* See which operands are read and write operands */
2140         ir_op_read_write(instr->m_opcode, &read, &write);
2141
2142         /* Go through the 3 main operands
2143          * writes first, then reads
2144          */
2145         for (o = 0; o < 3; ++o)
2146         {
2147             if (!instr->_m_ops[o]) /* no such operand */
2148                 continue;
2149
2150             value = instr->_m_ops[o];
2151
2152             /* We only care about locals */
2153             /* we also calculate parameter liferanges so that locals
2154              * can take up parameter slots */
2155             if (value->m_store != store_value &&
2156                 value->m_store != store_local &&
2157                 value->m_store != store_param)
2158                 continue;
2159
2160             /* write operands */
2161             /* When we write to a local, we consider it "dead" for the
2162              * remaining upper part of the function, since in SSA a value
2163              * can only be written once (== created)
2164              */
2165             if (write & (1<<o))
2166             {
2167                 size_t idx;
2168                 bool in_living = vec_ir_value_find(self->m_living, value, &idx);
2169                 if (!in_living)
2170                 {
2171                     /* If the value isn't alive it hasn't been read before... */
2172                     /* TODO: See if the warning can be emitted during parsing or AST processing
2173                      * otherwise have warning printed here.
2174                      * IF printing a warning here: include filecontext_t,
2175                      * and make sure it's only printed once
2176                      * since this function is run multiple times.
2177                      */
2178                     /* con_err( "Value only written %s\n", value->m_name); */
2179                     if (value->setAlive(instr->m_eid))
2180                         *changed = true;
2181                 } else {
2182                     /* since 'living' won't contain it
2183                      * anymore, merge the value, since
2184                      * (A) doesn't.
2185                      */
2186                     if (value->setAlive(instr->m_eid))
2187                         *changed = true;
2188                     // Then remove
2189                     self->m_living.erase(self->m_living.begin() + idx);
2190                 }
2191                 /* Removing a vector removes all members */
2192                 for (mem = 0; mem < 3; ++mem) {
2193                     if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], &idx)) {
2194                         if (value->m_members[mem]->setAlive(instr->m_eid))
2195                             *changed = true;
2196                         self->m_living.erase(self->m_living.begin() + idx);
2197                     }
2198                 }
2199                 /* Removing the last member removes the vector */
2200                 if (value->m_memberof) {
2201                     value = value->m_memberof;
2202                     for (mem = 0; mem < 3; ++mem) {
2203                         if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2204                             break;
2205                     }
2206                     if (mem == 3 && vec_ir_value_find(self->m_living, value, &idx)) {
2207                         if (value->setAlive(instr->m_eid))
2208                             *changed = true;
2209                         self->m_living.erase(self->m_living.begin() + idx);
2210                     }
2211                 }
2212             }
2213         }
2214
2215         /* These operations need a special case as they can break when using
2216          * same source and destination operand otherwise, as the engine may
2217          * read the source multiple times. */
2218         if (instr->m_opcode == INSTR_MUL_VF ||
2219             instr->m_opcode == VINSTR_BITAND_VF ||
2220             instr->m_opcode == VINSTR_BITOR_VF ||
2221             instr->m_opcode == VINSTR_BITXOR ||
2222             instr->m_opcode == VINSTR_BITXOR_VF ||
2223             instr->m_opcode == VINSTR_BITXOR_V ||
2224             instr->m_opcode == VINSTR_CROSS)
2225         {
2226             value = instr->_m_ops[2];
2227             /* the float source will get an additional lifetime */
2228             if (value->setAlive(instr->m_eid+1))
2229                 *changed = true;
2230             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2231                 *changed = true;
2232         }
2233
2234         if (instr->m_opcode == INSTR_MUL_FV ||
2235             instr->m_opcode == INSTR_LOAD_V ||
2236             instr->m_opcode == VINSTR_BITXOR ||
2237             instr->m_opcode == VINSTR_BITXOR_VF ||
2238             instr->m_opcode == VINSTR_BITXOR_V ||
2239             instr->m_opcode == VINSTR_CROSS)
2240         {
2241             value = instr->_m_ops[1];
2242             /* the float source will get an additional lifetime */
2243             if (value->setAlive(instr->m_eid+1))
2244                 *changed = true;
2245             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2246                 *changed = true;
2247         }
2248
2249         for (o = 0; o < 3; ++o)
2250         {
2251             if (!instr->_m_ops[o]) /* no such operand */
2252                 continue;
2253
2254             value = instr->_m_ops[o];
2255
2256             /* We only care about locals */
2257             /* we also calculate parameter liferanges so that locals
2258              * can take up parameter slots */
2259             if (value->m_store != store_value &&
2260                 value->m_store != store_local &&
2261                 value->m_store != store_param)
2262                 continue;
2263
2264             /* read operands */
2265             if (read & (1<<o))
2266             {
2267                 if (!vec_ir_value_find(self->m_living, value, nullptr))
2268                     self->m_living.push_back(value);
2269                 /* reading adds the full vector */
2270                 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2271                     self->m_living.push_back(value->m_memberof);
2272                 for (mem = 0; mem < 3; ++mem) {
2273                     if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2274                         self->m_living.push_back(value->m_members[mem]);
2275                 }
2276             }
2277         }
2278         /* PHI operands are always read operands */
2279         for (auto &it : instr->m_phi) {
2280             value = it.value;
2281             if (!vec_ir_value_find(self->m_living, value, nullptr))
2282                 self->m_living.push_back(value);
2283             /* reading adds the full vector */
2284             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2285                 self->m_living.push_back(value->m_memberof);
2286             for (mem = 0; mem < 3; ++mem) {
2287                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2288                     self->m_living.push_back(value->m_members[mem]);
2289             }
2290         }
2291
2292         /* on a call, all these values must be "locked" */
2293         if (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8) {
2294             if (ir_block_living_lock(self))
2295                 *changed = true;
2296         }
2297         /* call params are read operands too */
2298         for (auto &it : instr->m_params) {
2299             value = it;
2300             if (!vec_ir_value_find(self->m_living, value, nullptr))
2301                 self->m_living.push_back(value);
2302             /* reading adds the full vector */
2303             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2304                 self->m_living.push_back(value->m_memberof);
2305             for (mem = 0; mem < 3; ++mem) {
2306                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2307                     self->m_living.push_back(value->m_members[mem]);
2308             }
2309         }
2310
2311         /* (A) */
2312         if (ir_block_living_add_instr(self, instr->m_eid))
2313             *changed = true;
2314     }
2315     /* the "entry" instruction ID */
2316     if (ir_block_living_add_instr(self, self->m_entry_id))
2317         *changed = true;
2318
2319     return true;
2320 }
2321
2322 bool ir_function_calculate_liferanges(ir_function *self)
2323 {
2324     /* parameters live at 0 */
2325     for (size_t i = 0; i < vec_size(self->m_params); ++i)
2326         if (!self->m_locals[i].get()->setAlive(0))
2327             compile_error(self->m_context, "internal error: failed value-life merging");
2328
2329     bool changed;
2330     do {
2331         self->m_run_id++;
2332         changed = false;
2333         for (auto i = self->m_blocks.rbegin(); i != self->m_blocks.rend(); ++i)
2334             ir_block_life_propagate(i->get(), &changed);
2335     } while (changed);
2336
2337     if (self->m_blocks.size()) {
2338         ir_block *block = self->m_blocks[0].get();
2339         for (auto &it : block->m_living) {
2340             ir_value *v = it;
2341             if (v->m_store != store_local)
2342                 continue;
2343             if (v->m_vtype == TYPE_VECTOR)
2344                 continue;
2345             self->m_flags |= IR_FLAG_HAS_UNINITIALIZED;
2346             /* find the instruction reading from it */
2347             size_t s = 0;
2348             for (; s < v->m_reads.size(); ++s) {
2349                 if (v->m_reads[s]->m_eid == v->m_life[0].end)
2350                     break;
2351             }
2352             if (s < v->m_reads.size()) {
2353                 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2354                               "variable `%s` may be used uninitialized in this function\n"
2355                               " -> %s:%i",
2356                               v->m_name.c_str(),
2357                               v->m_reads[s]->m_context.file, v->m_reads[s]->m_context.line)
2358                    )
2359                 {
2360                     return false;
2361                 }
2362                 continue;
2363             }
2364             if (v->m_memberof) {
2365                 ir_value *vec = v->m_memberof;
2366                 for (s = 0; s < vec->m_reads.size(); ++s) {
2367                     if (vec->m_reads[s]->m_eid == v->m_life[0].end)
2368                         break;
2369                 }
2370                 if (s < vec->m_reads.size()) {
2371                     if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2372                                   "variable `%s` may be used uninitialized in this function\n"
2373                                   " -> %s:%i",
2374                                   v->m_name.c_str(),
2375                                   vec->m_reads[s]->m_context.file, vec->m_reads[s]->m_context.line)
2376                        )
2377                     {
2378                         return false;
2379                     }
2380                     continue;
2381                 }
2382             }
2383             if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2384                           "variable `%s` may be used uninitialized in this function", v->m_name.c_str()))
2385             {
2386                 return false;
2387             }
2388         }
2389     }
2390     return true;
2391 }
2392
2393 /***********************************************************************
2394  *IR Code-Generation
2395  *
2396  * Since the IR has the convention of putting 'write' operands
2397  * at the beginning, we have to rotate the operands of instructions
2398  * properly in order to generate valid QCVM code.
2399  *
2400  * Having destinations at a fixed position is more convenient. In QC
2401  * this is *mostly* OPC,  but FTE adds at least 2 instructions which
2402  * read from from OPA,  and store to OPB rather than OPC.   Which is
2403  * partially the reason why the implementation of these instructions
2404  * in darkplaces has been delayed for so long.
2405  *
2406  * Breaking conventions is annoying...
2407  */
2408 static bool gen_global_field(code_t *code, ir_value *global)
2409 {
2410     if (global->m_hasvalue)
2411     {
2412         ir_value *fld = global->m_constval.vpointer;
2413         if (!fld) {
2414             irerror(global->m_context, "Invalid field constant with no field: %s", global->m_name.c_str());
2415             return false;
2416         }
2417
2418         /* copy the field's value */
2419         global->setCodeAddress(code->globals.size());
2420         code->globals.push_back(fld->m_code.fieldaddr);
2421         if (global->m_fieldtype == TYPE_VECTOR) {
2422             code->globals.push_back(fld->m_code.fieldaddr+1);
2423             code->globals.push_back(fld->m_code.fieldaddr+2);
2424         }
2425     }
2426     else
2427     {
2428         global->setCodeAddress(code->globals.size());
2429         code->globals.push_back(0);
2430         if (global->m_fieldtype == TYPE_VECTOR) {
2431             code->globals.push_back(0);
2432             code->globals.push_back(0);
2433         }
2434     }
2435     if (global->m_code.globaladdr < 0)
2436         return false;
2437     return true;
2438 }
2439
2440 static bool gen_global_pointer(code_t *code, ir_value *global)
2441 {
2442     if (global->m_hasvalue)
2443     {
2444         ir_value *target = global->m_constval.vpointer;
2445         if (!target) {
2446             irerror(global->m_context, "Invalid pointer constant: %s", global->m_name.c_str());
2447             /* nullptr pointers are pointing to the nullptr constant, which also
2448              * sits at address 0, but still has an ir_value for itself.
2449              */
2450             return false;
2451         }
2452
2453         /* Here, relocations ARE possible - in fteqcc-enhanced-qc:
2454          * void() foo; <- proto
2455          * void() *fooptr = &foo;
2456          * void() foo = { code }
2457          */
2458         if (!target->m_code.globaladdr) {
2459             /* FIXME: Check for the constant nullptr ir_value!
2460              * because then code.globaladdr being 0 is valid.
2461              */
2462             irerror(global->m_context, "FIXME: Relocation support");
2463             return false;
2464         }
2465
2466         global->setCodeAddress(code->globals.size());
2467         code->globals.push_back(target->m_code.globaladdr);
2468     }
2469     else
2470     {
2471         global->setCodeAddress(code->globals.size());
2472         code->globals.push_back(0);
2473     }
2474     if (global->m_code.globaladdr < 0)
2475         return false;
2476     return true;
2477 }
2478
2479 static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *block)
2480 {
2481     prog_section_statement_t stmt;
2482     ir_instr *instr;
2483     ir_block *target;
2484     ir_block *ontrue;
2485     ir_block *onfalse;
2486     size_t    stidx;
2487     size_t    i;
2488     int       j;
2489
2490     block->m_generated = true;
2491     block->m_code_start = code->statements.size();
2492     for (i = 0; i < vec_size(block->m_instr); ++i)
2493     {
2494         instr = block->m_instr[i];
2495
2496         if (instr->m_opcode == VINSTR_PHI) {
2497             irerror(block->m_context, "cannot generate virtual instruction (phi)");
2498             return false;
2499         }
2500
2501         if (instr->m_opcode == VINSTR_JUMP) {
2502             target = instr->m_bops[0];
2503             /* for uncoditional jumps, if the target hasn't been generated
2504              * yet, we generate them right here.
2505              */
2506             if (!target->m_generated)
2507                 return gen_blocks_recursive(code, func, target);
2508
2509             /* otherwise we generate a jump instruction */
2510             stmt.opcode = INSTR_GOTO;
2511             stmt.o1.s1 = target->m_code_start - code->statements.size();
2512             stmt.o2.s1 = 0;
2513             stmt.o3.s1 = 0;
2514             if (stmt.o1.s1 != 1)
2515                 code_push_statement(code, &stmt, instr->m_context);
2516
2517             /* no further instructions can be in this block */
2518             return true;
2519         }
2520
2521         if (instr->m_opcode == VINSTR_BITXOR) {
2522             stmt.opcode = INSTR_BITOR;
2523             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2524             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2525             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2526             code_push_statement(code, &stmt, instr->m_context);
2527             stmt.opcode = INSTR_BITAND;
2528             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2529             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2530             stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2531             code_push_statement(code, &stmt, instr->m_context);
2532             stmt.opcode = INSTR_SUB_F;
2533             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2534             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2535             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2536             code_push_statement(code, &stmt, instr->m_context);
2537
2538             /* instruction generated */
2539             continue;
2540         }
2541
2542         if (instr->m_opcode == VINSTR_BITAND_V) {
2543             stmt.opcode = INSTR_BITAND;
2544             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2545             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2546             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2547             code_push_statement(code, &stmt, instr->m_context);
2548             ++stmt.o1.s1;
2549             ++stmt.o2.s1;
2550             ++stmt.o3.s1;
2551             code_push_statement(code, &stmt, instr->m_context);
2552             ++stmt.o1.s1;
2553             ++stmt.o2.s1;
2554             ++stmt.o3.s1;
2555             code_push_statement(code, &stmt, instr->m_context);
2556
2557             /* instruction generated */
2558             continue;
2559         }
2560
2561         if (instr->m_opcode == VINSTR_BITOR_V) {
2562             stmt.opcode = INSTR_BITOR;
2563             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2564             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2565             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2566             code_push_statement(code, &stmt, instr->m_context);
2567             ++stmt.o1.s1;
2568             ++stmt.o2.s1;
2569             ++stmt.o3.s1;
2570             code_push_statement(code, &stmt, instr->m_context);
2571             ++stmt.o1.s1;
2572             ++stmt.o2.s1;
2573             ++stmt.o3.s1;
2574             code_push_statement(code, &stmt, instr->m_context);
2575
2576             /* instruction generated */
2577             continue;
2578         }
2579
2580         if (instr->m_opcode == VINSTR_BITXOR_V) {
2581             for (j = 0; j < 3; ++j) {
2582                 stmt.opcode = INSTR_BITOR;
2583                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2584                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2585                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2586                 code_push_statement(code, &stmt, instr->m_context);
2587                 stmt.opcode = INSTR_BITAND;
2588                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2589                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2590                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2591                 code_push_statement(code, &stmt, instr->m_context);
2592             }
2593             stmt.opcode = INSTR_SUB_V;
2594             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2595             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2596             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2597             code_push_statement(code, &stmt, instr->m_context);
2598
2599             /* instruction generated */
2600             continue;
2601         }
2602
2603         if (instr->m_opcode == VINSTR_BITAND_VF) {
2604             stmt.opcode = INSTR_BITAND;
2605             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2606             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2607             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2608             code_push_statement(code, &stmt, instr->m_context);
2609             ++stmt.o1.s1;
2610             ++stmt.o3.s1;
2611             code_push_statement(code, &stmt, instr->m_context);
2612             ++stmt.o1.s1;
2613             ++stmt.o3.s1;
2614             code_push_statement(code, &stmt, instr->m_context);
2615
2616             /* instruction generated */
2617             continue;
2618         }
2619
2620         if (instr->m_opcode == VINSTR_BITOR_VF) {
2621             stmt.opcode = INSTR_BITOR;
2622             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2623             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2624             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2625             code_push_statement(code, &stmt, instr->m_context);
2626             ++stmt.o1.s1;
2627             ++stmt.o3.s1;
2628             code_push_statement(code, &stmt, instr->m_context);
2629             ++stmt.o1.s1;
2630             ++stmt.o3.s1;
2631             code_push_statement(code, &stmt, instr->m_context);
2632
2633             /* instruction generated */
2634             continue;
2635         }
2636
2637         if (instr->m_opcode == VINSTR_BITXOR_VF) {
2638             for (j = 0; j < 3; ++j) {
2639                 stmt.opcode = INSTR_BITOR;
2640                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2641                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2642                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2643                 code_push_statement(code, &stmt, instr->m_context);
2644                 stmt.opcode = INSTR_BITAND;
2645                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2646                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2647                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2648                 code_push_statement(code, &stmt, instr->m_context);
2649             }
2650             stmt.opcode = INSTR_SUB_V;
2651             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2652             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2653             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2654             code_push_statement(code, &stmt, instr->m_context);
2655
2656             /* instruction generated */
2657             continue;
2658         }
2659
2660         if (instr->m_opcode == VINSTR_CROSS) {
2661             stmt.opcode = INSTR_MUL_F;
2662             for (j = 0; j < 3; ++j) {
2663                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 1) % 3;
2664                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 2) % 3;
2665                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2666                 code_push_statement(code, &stmt, instr->m_context);
2667                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 2) % 3;
2668                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 1) % 3;
2669                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2670                 code_push_statement(code, &stmt, instr->m_context);
2671             }
2672             stmt.opcode = INSTR_SUB_V;
2673             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2674             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2675             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2676             code_push_statement(code, &stmt, instr->m_context);
2677
2678             /* instruction generated */
2679             continue;
2680         }
2681
2682         if (instr->m_opcode == VINSTR_COND) {
2683             ontrue  = instr->m_bops[0];
2684             onfalse = instr->m_bops[1];
2685             /* TODO: have the AST signal which block should
2686              * come first: eg. optimize IFs without ELSE...
2687              */
2688
2689             stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2690             stmt.o2.u1 = 0;
2691             stmt.o3.s1 = 0;
2692
2693             if (ontrue->m_generated) {
2694                 stmt.opcode = INSTR_IF;
2695                 stmt.o2.s1 = ontrue->m_code_start - code->statements.size();
2696                 if (stmt.o2.s1 != 1)
2697                     code_push_statement(code, &stmt, instr->m_context);
2698             }
2699             if (onfalse->m_generated) {
2700                 stmt.opcode = INSTR_IFNOT;
2701                 stmt.o2.s1 = onfalse->m_code_start - code->statements.size();
2702                 if (stmt.o2.s1 != 1)
2703                     code_push_statement(code, &stmt, instr->m_context);
2704             }
2705             if (!ontrue->m_generated) {
2706                 if (onfalse->m_generated)
2707                     return gen_blocks_recursive(code, func, ontrue);
2708             }
2709             if (!onfalse->m_generated) {
2710                 if (ontrue->m_generated)
2711                     return gen_blocks_recursive(code, func, onfalse);
2712             }
2713             /* neither ontrue nor onfalse exist */
2714             stmt.opcode = INSTR_IFNOT;
2715             if (!instr->m_likely) {
2716                 /* Honor the likelyhood hint */
2717                 ir_block *tmp = onfalse;
2718                 stmt.opcode = INSTR_IF;
2719                 onfalse = ontrue;
2720                 ontrue = tmp;
2721             }
2722             stidx = code->statements.size();
2723             code_push_statement(code, &stmt, instr->m_context);
2724             /* on false we jump, so add ontrue-path */
2725             if (!gen_blocks_recursive(code, func, ontrue))
2726                 return false;
2727             /* fixup the jump address */
2728             code->statements[stidx].o2.s1 = code->statements.size() - stidx;
2729             /* generate onfalse path */
2730             if (onfalse->m_generated) {
2731                 /* fixup the jump address */
2732                 code->statements[stidx].o2.s1 = onfalse->m_code_start - stidx;
2733                 if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2734                     code->statements[stidx] = code->statements[stidx+1];
2735                     if (code->statements[stidx].o1.s1 < 0)
2736                         code->statements[stidx].o1.s1++;
2737                     code_pop_statement(code);
2738                 }
2739                 stmt.opcode = code->statements.back().opcode;
2740                 if (stmt.opcode == INSTR_GOTO ||
2741                     stmt.opcode == INSTR_IF ||
2742                     stmt.opcode == INSTR_IFNOT ||
2743                     stmt.opcode == INSTR_RETURN ||
2744                     stmt.opcode == INSTR_DONE)
2745                 {
2746                     /* no use jumping from here */
2747                     return true;
2748                 }
2749                 /* may have been generated in the previous recursive call */
2750                 stmt.opcode = INSTR_GOTO;
2751                 stmt.o1.s1 = onfalse->m_code_start - code->statements.size();
2752                 stmt.o2.s1 = 0;
2753                 stmt.o3.s1 = 0;
2754                 if (stmt.o1.s1 != 1)
2755                     code_push_statement(code, &stmt, instr->m_context);
2756                 return true;
2757             }
2758             else if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2759                 code->statements[stidx] = code->statements[stidx+1];
2760                 if (code->statements[stidx].o1.s1 < 0)
2761                     code->statements[stidx].o1.s1++;
2762                 code_pop_statement(code);
2763             }
2764             /* if not, generate now */
2765             return gen_blocks_recursive(code, func, onfalse);
2766         }
2767
2768         if ( (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8)
2769            || instr->m_opcode == VINSTR_NRCALL)
2770         {
2771             size_t p, first;
2772             ir_value *retvalue;
2773
2774             first = instr->m_params.size();
2775             if (first > 8)
2776                 first = 8;
2777             for (p = 0; p < first; ++p)
2778             {
2779                 ir_value *param = instr->m_params[p];
2780                 if (param->m_callparam)
2781                     continue;
2782
2783                 stmt.opcode = INSTR_STORE_F;
2784                 stmt.o3.u1 = 0;
2785
2786                 if (param->m_vtype == TYPE_FIELD)
2787                     stmt.opcode = field_store_instr[param->m_fieldtype];
2788                 else if (param->m_vtype == TYPE_NIL)
2789                     stmt.opcode = INSTR_STORE_V;
2790                 else
2791                     stmt.opcode = type_store_instr[param->m_vtype];
2792                 stmt.o1.u1 = param->codeAddress();
2793                 stmt.o2.u1 = OFS_PARM0 + 3 * p;
2794
2795                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2796                     /* fetch 3 separate floats */
2797                     stmt.opcode = INSTR_STORE_F;
2798                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2799                     code_push_statement(code, &stmt, instr->m_context);
2800                     stmt.o2.u1++;
2801                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2802                     code_push_statement(code, &stmt, instr->m_context);
2803                     stmt.o2.u1++;
2804                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2805                     code_push_statement(code, &stmt, instr->m_context);
2806                 }
2807                 else
2808                     code_push_statement(code, &stmt, instr->m_context);
2809             }
2810             /* Now handle extparams */
2811             first = instr->m_params.size();
2812             for (; p < first; ++p)
2813             {
2814                 ir_builder *ir = func->m_owner;
2815                 ir_value *param = instr->m_params[p];
2816                 ir_value *targetparam;
2817
2818                 if (param->m_callparam)
2819                     continue;
2820
2821                 if (p-8 >= ir->m_extparams.size())
2822                     ir->generateExtparam();
2823
2824                 targetparam = ir->m_extparams[p-8];
2825
2826                 stmt.opcode = INSTR_STORE_F;
2827                 stmt.o3.u1 = 0;
2828
2829                 if (param->m_vtype == TYPE_FIELD)
2830                     stmt.opcode = field_store_instr[param->m_fieldtype];
2831                 else if (param->m_vtype == TYPE_NIL)
2832                     stmt.opcode = INSTR_STORE_V;
2833                 else
2834                     stmt.opcode = type_store_instr[param->m_vtype];
2835                 stmt.o1.u1 = param->codeAddress();
2836                 stmt.o2.u1 = targetparam->codeAddress();
2837                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2838                     /* fetch 3 separate floats */
2839                     stmt.opcode = INSTR_STORE_F;
2840                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2841                     code_push_statement(code, &stmt, instr->m_context);
2842                     stmt.o2.u1++;
2843                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2844                     code_push_statement(code, &stmt, instr->m_context);
2845                     stmt.o2.u1++;
2846                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2847                     code_push_statement(code, &stmt, instr->m_context);
2848                 }
2849                 else
2850                     code_push_statement(code, &stmt, instr->m_context);
2851             }
2852
2853             stmt.opcode = INSTR_CALL0 + instr->m_params.size();
2854             if (stmt.opcode > INSTR_CALL8)
2855                 stmt.opcode = INSTR_CALL8;
2856             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2857             stmt.o2.u1 = 0;
2858             stmt.o3.u1 = 0;
2859             code_push_statement(code, &stmt, instr->m_context);
2860
2861             retvalue = instr->_m_ops[0];
2862             if (retvalue && retvalue->m_store != store_return &&
2863                 (retvalue->m_store == store_global || retvalue->m_life.size()))
2864             {
2865                 /* not to be kept in OFS_RETURN */
2866                 if (retvalue->m_vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS))
2867                     stmt.opcode = field_store_instr[retvalue->m_fieldtype];
2868                 else
2869                     stmt.opcode = type_store_instr[retvalue->m_vtype];
2870                 stmt.o1.u1 = OFS_RETURN;
2871                 stmt.o2.u1 = retvalue->codeAddress();
2872                 stmt.o3.u1 = 0;
2873                 code_push_statement(code, &stmt, instr->m_context);
2874             }
2875             continue;
2876         }
2877
2878         if (instr->m_opcode == INSTR_STATE) {
2879             stmt.opcode = instr->m_opcode;
2880             if (instr->_m_ops[0])
2881                 stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2882             if (instr->_m_ops[1])
2883                 stmt.o2.u1 = instr->_m_ops[1]->codeAddress();
2884             stmt.o3.u1 = 0;
2885             code_push_statement(code, &stmt, instr->m_context);
2886             continue;
2887         }
2888
2889         stmt.opcode = instr->m_opcode;
2890         stmt.o1.u1 = 0;
2891         stmt.o2.u1 = 0;
2892         stmt.o3.u1 = 0;
2893
2894         /* This is the general order of operands */
2895         if (instr->_m_ops[0])
2896             stmt.o3.u1 = instr->_m_ops[0]->codeAddress();
2897
2898         if (instr->_m_ops[1])
2899             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2900
2901         if (instr->_m_ops[2])
2902             stmt.o2.u1 = instr->_m_ops[2]->codeAddress();
2903
2904         if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
2905         {
2906             stmt.o1.u1 = stmt.o3.u1;
2907             stmt.o3.u1 = 0;
2908         }
2909         else if ((stmt.opcode >= INSTR_STORE_F &&
2910                   stmt.opcode <= INSTR_STORE_FNC) ||
2911                  (stmt.opcode >= INSTR_STOREP_F &&
2912                   stmt.opcode <= INSTR_STOREP_FNC))
2913         {
2914             /* 2-operand instructions with A -> B */
2915             stmt.o2.u1 = stmt.o3.u1;
2916             stmt.o3.u1 = 0;
2917
2918             /* tiny optimization, don't output
2919              * STORE a, a
2920              */
2921             if (stmt.o2.u1 == stmt.o1.u1 &&
2922                 OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
2923             {
2924                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
2925                 continue;
2926             }
2927         }
2928         code_push_statement(code, &stmt, instr->m_context);
2929     }
2930     return true;
2931 }
2932
2933 static bool gen_function_code(code_t *code, ir_function *self)
2934 {
2935     ir_block *block;
2936     prog_section_statement_t stmt, *retst;
2937
2938     /* Starting from entry point, we generate blocks "as they come"
2939      * for now. Dead blocks will not be translated obviously.
2940      */
2941     if (self->m_blocks.empty()) {
2942         irerror(self->m_context, "Function '%s' declared without body.", self->m_name.c_str());
2943         return false;
2944     }
2945
2946     block = self->m_blocks[0].get();
2947     if (block->m_generated)
2948         return true;
2949
2950     if (!gen_blocks_recursive(code, self, block)) {
2951         irerror(self->m_context, "failed to generate blocks for '%s'", self->m_name.c_str());
2952         return false;
2953     }
2954
2955     /* code_write and qcvm -disasm need to know that the function ends here */
2956     retst = &code->statements.back();
2957     if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) &&
2958         self->m_outtype == TYPE_VOID &&
2959         retst->opcode == INSTR_RETURN &&
2960         !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1)
2961     {
2962         retst->opcode = INSTR_DONE;
2963         ++opts_optimizationcount[OPTIM_VOID_RETURN];
2964     } else {
2965         lex_ctx_t last;
2966
2967         stmt.opcode = INSTR_DONE;
2968         stmt.o1.u1  = 0;
2969         stmt.o2.u1  = 0;
2970         stmt.o3.u1  = 0;
2971         last.line   = code->linenums.back();
2972         last.column = code->columnnums.back();
2973
2974         code_push_statement(code, &stmt, last);
2975     }
2976     return true;
2977 }
2978
2979 qcint_t ir_builder::filestring(const char *filename)
2980 {
2981     /* NOTE: filename pointers are copied, we never strdup them,
2982      * thus we can use pointer-comparison to find the string.
2983      */
2984     qcint_t  str;
2985
2986     for (size_t i = 0; i != m_filenames.size(); ++i) {
2987         if (!strcmp(m_filenames[i], filename))
2988             return i;
2989     }
2990
2991     str = code_genstring(m_code.get(), filename);
2992     m_filenames.push_back(filename);
2993     m_filestrings.push_back(str);
2994     return str;
2995 }
2996
2997 bool ir_builder::generateGlobalFunction(ir_value *global)
2998 {
2999     prog_section_function_t fun;
3000     ir_function            *irfun;
3001
3002     size_t i;
3003
3004     if (!global->m_hasvalue || (!global->m_constval.vfunc)) {
3005         irerror(global->m_context, "Invalid state of function-global: not constant: %s", global->m_name.c_str());
3006         return false;
3007     }
3008
3009     irfun = global->m_constval.vfunc;
3010     fun.name = global->m_code.name;
3011     fun.file = filestring(global->m_context.file);
3012     fun.profile = 0; /* always 0 */
3013     fun.nargs = vec_size(irfun->m_params);
3014     if (fun.nargs > 8)
3015         fun.nargs = 8;
3016
3017     for (i = 0; i < 8; ++i) {
3018         if ((int32_t)i >= fun.nargs)
3019             fun.argsize[i] = 0;
3020         else
3021             fun.argsize[i] = type_sizeof_[irfun->m_params[i]];
3022     }
3023
3024     fun.firstlocal = 0;
3025     fun.locals = irfun->m_allocated_locals;
3026
3027     if (irfun->m_builtin)
3028         fun.entry = irfun->m_builtin+1;
3029     else {
3030         irfun->m_code_function_def = m_code->functions.size();
3031         fun.entry = m_code->statements.size();
3032     }
3033
3034     m_code->functions.push_back(fun);
3035     return true;
3036 }
3037
3038 ir_value* ir_builder::generateExtparamProto()
3039 {
3040     char      name[128];
3041
3042     util_snprintf(name, sizeof(name), "EXTPARM#%i", (int)(m_extparam_protos.size()));
3043     ir_value *global = new ir_value(name, store_global, TYPE_VECTOR);
3044     m_extparam_protos.emplace_back(global);
3045
3046     return global;
3047 }
3048
3049 void ir_builder::generateExtparam()
3050 {
3051     prog_section_def_t def;
3052     ir_value          *global;
3053
3054     if (m_extparam_protos.size() < m_extparams.size()+1)
3055         global = generateExtparamProto();
3056     else
3057         global = m_extparam_protos[m_extparams.size()].get();
3058
3059     def.name = code_genstring(m_code.get(), global->m_name.c_str());
3060     def.type = TYPE_VECTOR;
3061     def.offset = m_code->globals.size();
3062
3063     m_code->defs.push_back(def);
3064
3065     global->setCodeAddress(def.offset);
3066
3067     m_code->globals.push_back(0);
3068     m_code->globals.push_back(0);
3069     m_code->globals.push_back(0);
3070
3071     m_extparams.emplace_back(global);
3072 }
3073
3074 static bool gen_function_extparam_copy(code_t *code, ir_function *self)
3075 {
3076     ir_builder *ir = self->m_owner;
3077
3078     size_t numparams = vec_size(self->m_params);
3079     if (!numparams)
3080         return true;
3081
3082     prog_section_statement_t stmt;
3083     stmt.opcode = INSTR_STORE_F;
3084     stmt.o3.s1 = 0;
3085     for (size_t i = 8; i < numparams; ++i) {
3086         size_t ext = i - 8;
3087         if (ext >= ir->m_extparams.size())
3088             ir->generateExtparam();
3089
3090         ir_value *ep = ir->m_extparams[ext];
3091
3092         stmt.opcode = type_store_instr[self->m_locals[i]->m_vtype];
3093         if (self->m_locals[i]->m_vtype == TYPE_FIELD &&
3094             self->m_locals[i]->m_fieldtype == TYPE_VECTOR)
3095         {
3096             stmt.opcode = INSTR_STORE_V;
3097         }
3098         stmt.o1.u1 = ep->codeAddress();
3099         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3100         code_push_statement(code, &stmt, self->m_context);
3101     }
3102
3103     return true;
3104 }
3105
3106 static bool gen_function_varargs_copy(code_t *code, ir_function *self)
3107 {
3108     size_t i, ext, numparams, maxparams;
3109
3110     ir_builder *ir = self->m_owner;
3111     ir_value   *ep;
3112     prog_section_statement_t stmt;
3113
3114     numparams = vec_size(self->m_params);
3115     if (!numparams)
3116         return true;
3117
3118     stmt.opcode = INSTR_STORE_V;
3119     stmt.o3.s1 = 0;
3120     maxparams = numparams + self->m_max_varargs;
3121     for (i = numparams; i < maxparams; ++i) {
3122         if (i < 8) {
3123             stmt.o1.u1 = OFS_PARM0 + 3*i;
3124             stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3125             code_push_statement(code, &stmt, self->m_context);
3126             continue;
3127         }
3128         ext = i - 8;
3129         while (ext >= ir->m_extparams.size())
3130             ir->generateExtparam();
3131
3132         ep = ir->m_extparams[ext];
3133
3134         stmt.o1.u1 = ep->codeAddress();
3135         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3136         code_push_statement(code, &stmt, self->m_context);
3137     }
3138
3139     return true;
3140 }
3141
3142 bool ir_builder::generateFunctionLocals(ir_value *global)
3143 {
3144     prog_section_function_t *def;
3145     ir_function             *irfun;
3146     uint32_t                 firstlocal, firstglobal;
3147
3148     irfun = global->m_constval.vfunc;
3149     def   = &m_code->functions[0] + irfun->m_code_function_def;
3150
3151     if (OPTS_OPTION_BOOL(OPTION_G) ||
3152         !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS)        ||
3153         (irfun->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3154     {
3155         firstlocal = def->firstlocal = m_code->globals.size();
3156     } else {
3157         firstlocal = def->firstlocal = m_first_common_local;
3158         ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
3159     }
3160
3161     firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? m_first_common_globaltemp : firstlocal);
3162
3163     for (size_t i = m_code->globals.size(); i < firstlocal + irfun->m_allocated_locals; ++i)
3164         m_code->globals.push_back(0);
3165
3166     for (auto& lp : irfun->m_locals) {
3167         ir_value *v = lp.get();
3168         if (v->m_locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
3169             v->setCodeAddress(firstlocal + v->m_code.local);
3170             if (!generateGlobal(v, true)) {
3171                 irerror(v->m_context, "failed to generate local %s", v->m_name.c_str());
3172                 return false;
3173             }
3174         }
3175         else
3176             v->setCodeAddress(firstglobal + v->m_code.local);
3177     }
3178     for (auto& vp : irfun->m_values) {
3179         ir_value *v = vp.get();
3180         if (v->m_callparam)
3181             continue;
3182         if (v->m_locked)
3183             v->setCodeAddress(firstlocal + v->m_code.local);
3184         else
3185             v->setCodeAddress(firstglobal + v->m_code.local);
3186     }
3187     return true;
3188 }
3189
3190 bool ir_builder::generateGlobalFunctionCode(ir_value *global)
3191 {
3192     prog_section_function_t *fundef;
3193     ir_function             *irfun;
3194
3195     irfun = global->m_constval.vfunc;
3196     if (!irfun) {
3197         if (global->m_cvq == CV_NONE) {
3198             if (irwarning(global->m_context, WARN_IMPLICIT_FUNCTION_POINTER,
3199                           "function `%s` has no body and in QC implicitly becomes a function-pointer",
3200                           global->m_name.c_str()))
3201             {
3202                 /* Not bailing out just now. If this happens a lot you don't want to have
3203                  * to rerun gmqcc for each such function.
3204                  */
3205
3206                 /* return false; */
3207             }
3208         }
3209         /* this was a function pointer, don't generate code for those */
3210         return true;
3211     }
3212
3213     if (irfun->m_builtin)
3214         return true;
3215
3216     /*
3217      * If there is no definition and the thing is eraseable, we can ignore
3218      * outputting the function to begin with.
3219      */
3220     if (global->m_flags & IR_FLAG_ERASABLE && irfun->m_code_function_def < 0) {
3221         return true;
3222     }
3223
3224     if (irfun->m_code_function_def < 0) {
3225         irerror(irfun->m_context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->m_name.c_str());
3226         return false;
3227     }
3228     fundef = &m_code->functions[irfun->m_code_function_def];
3229
3230     fundef->entry = m_code->statements.size();
3231     if (!generateFunctionLocals(global)) {
3232         irerror(irfun->m_context, "Failed to generate locals for function %s", irfun->m_name.c_str());
3233         return false;
3234     }
3235     if (!gen_function_extparam_copy(m_code.get(), irfun)) {
3236         irerror(irfun->m_context, "Failed to generate extparam-copy code for function %s", irfun->m_name.c_str());
3237         return false;
3238     }
3239     if (irfun->m_max_varargs && !gen_function_varargs_copy(m_code.get(), irfun)) {
3240         irerror(irfun->m_context, "Failed to generate vararg-copy code for function %s", irfun->m_name.c_str());
3241         return false;
3242     }
3243     if (!gen_function_code(m_code.get(), irfun)) {
3244         irerror(irfun->m_context, "Failed to generate code for function %s", irfun->m_name.c_str());
3245         return false;
3246     }
3247     return true;
3248 }
3249
3250 static void gen_vector_defs(code_t *code, prog_section_def_t def, const char *name)
3251 {
3252     char  *component;
3253     size_t len, i;
3254
3255     if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3256         return;
3257
3258     def.type = TYPE_FLOAT;
3259
3260     len = strlen(name);
3261
3262     component = (char*)mem_a(len+3);
3263     memcpy(component, name, len);
3264     len += 2;
3265     component[len-0] = 0;
3266     component[len-2] = '_';
3267
3268     component[len-1] = 'x';
3269
3270     for (i = 0; i < 3; ++i) {
3271         def.name = code_genstring(code, component);
3272         code->defs.push_back(def);
3273         def.offset++;
3274         component[len-1]++;
3275     }
3276
3277     mem_d(component);
3278 }
3279
3280 static void gen_vector_fields(code_t *code, prog_section_field_t fld, const char *name)
3281 {
3282     char  *component;
3283     size_t len, i;
3284
3285     if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3286         return;
3287
3288     fld.type = TYPE_FLOAT;
3289
3290     len = strlen(name);
3291
3292     component = (char*)mem_a(len+3);
3293     memcpy(component, name, len);
3294     len += 2;
3295     component[len-0] = 0;
3296     component[len-2] = '_';
3297
3298     component[len-1] = 'x';
3299
3300     for (i = 0; i < 3; ++i) {
3301         fld.name = code_genstring(code, component);
3302         code->fields.push_back(fld);
3303         fld.offset++;
3304         component[len-1]++;
3305     }
3306
3307     mem_d(component);
3308 }
3309
3310 bool ir_builder::generateGlobal(ir_value *global, bool islocal)
3311 {
3312     size_t             i;
3313     int32_t           *iptr;
3314     prog_section_def_t def;
3315     bool               pushdef = opts.optimizeoff;
3316
3317     /* we don't generate split-vectors */
3318     if (global->m_vtype == TYPE_VECTOR && (global->m_flags & IR_FLAG_SPLIT_VECTOR))
3319         return true;
3320
3321     def.type = global->m_vtype;
3322     def.offset = m_code->globals.size();
3323     def.name = 0;
3324     if (OPTS_OPTION_BOOL(OPTION_G) || !islocal)
3325     {
3326         pushdef = true;
3327
3328         /*
3329          * if we're eraseable and the function isn't referenced ignore outputting
3330          * the function.
3331          */
3332         if (global->m_flags & IR_FLAG_ERASABLE && global->m_reads.empty()) {
3333             return true;
3334         }
3335
3336         if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) &&
3337             !(global->m_flags & IR_FLAG_INCLUDE_DEF) &&
3338             (global->m_name[0] == '#' || global->m_cvq == CV_CONST))
3339         {
3340             pushdef = false;
3341         }
3342
3343         if (pushdef) {
3344             if (global->m_name[0] == '#') {
3345                 if (!m_str_immediate)
3346                     m_str_immediate = code_genstring(m_code.get(), "IMMEDIATE");
3347                 def.name = global->m_code.name = m_str_immediate;
3348             }
3349             else
3350                 def.name = global->m_code.name = code_genstring(m_code.get(), global->m_name.c_str());
3351         }
3352         else
3353             def.name   = 0;
3354         if (islocal) {
3355             def.offset = global->codeAddress();
3356             m_code->defs.push_back(def);
3357             if (global->m_vtype == TYPE_VECTOR)
3358                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3359             else if (global->m_vtype == TYPE_FIELD && global->m_fieldtype == TYPE_VECTOR)
3360                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3361             return true;
3362         }
3363     }
3364     if (islocal)
3365         return true;
3366
3367     switch (global->m_vtype)
3368     {
3369     case TYPE_VOID:
3370         if (0 == global->m_name.compare("end_sys_globals")) {
3371             // TODO: remember this point... all the defs before this one
3372             // should be checksummed and added to progdefs.h when we generate it.
3373         }
3374         else if (0 == global->m_name.compare("end_sys_fields")) {
3375             // TODO: same as above but for entity-fields rather than globsl
3376         }
3377         else if(irwarning(global->m_context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`",
3378                           global->m_name.c_str()))
3379         {
3380             /* Not bailing out */
3381             /* return false; */
3382         }
3383         /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far
3384          * the system fields actually go? Though the engine knows this anyway...
3385          * Maybe this could be an -foption
3386          * fteqcc creates data for end_sys_* - of size 1, so let's do the same
3387          */
3388         global->setCodeAddress(m_code->globals.size());
3389         m_code->globals.push_back(0);
3390         /* Add the def */
3391         if (pushdef)
3392             m_code->defs.push_back(def);
3393         return true;
3394     case TYPE_POINTER:
3395         if (pushdef)
3396             m_code->defs.push_back(def);
3397         return gen_global_pointer(m_code.get(), global);
3398     case TYPE_FIELD:
3399         if (pushdef) {
3400             m_code->defs.push_back(def);
3401             if (global->m_fieldtype == TYPE_VECTOR)
3402                 gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3403         }
3404         return gen_global_field(m_code.get(), global);
3405     case TYPE_ENTITY:
3406         /* fall through */
3407     case TYPE_FLOAT:
3408     {
3409         global->setCodeAddress(m_code->globals.size());
3410         if (global->m_hasvalue) {
3411             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3412                 return true;
3413             iptr = (int32_t*)&global->m_constval.ivec[0];
3414             m_code->globals.push_back(*iptr);
3415         } else {
3416             m_code->globals.push_back(0);
3417         }
3418         if (!islocal && global->m_cvq != CV_CONST)
3419             def.type |= DEF_SAVEGLOBAL;
3420         if (pushdef)
3421             m_code->defs.push_back(def);
3422
3423         return global->m_code.globaladdr >= 0;
3424     }
3425     case TYPE_STRING:
3426     {
3427         global->setCodeAddress(m_code->globals.size());
3428         if (global->m_hasvalue) {
3429             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3430                 return true;
3431             uint32_t load = code_genstring(m_code.get(), global->m_constval.vstring);
3432             m_code->globals.push_back(load);
3433         } else {
3434             m_code->globals.push_back(0);
3435         }
3436         if (!islocal && global->m_cvq != CV_CONST)
3437             def.type |= DEF_SAVEGLOBAL;
3438         if (pushdef)
3439             m_code->defs.push_back(def);
3440         return global->m_code.globaladdr >= 0;
3441     }
3442     case TYPE_VECTOR:
3443     {
3444         size_t d;
3445         global->setCodeAddress(m_code->globals.size());
3446         if (global->m_hasvalue) {
3447             iptr = (int32_t*)&global->m_constval.ivec[0];
3448             m_code->globals.push_back(iptr[0]);
3449             if (global->m_code.globaladdr < 0)
3450                 return false;
3451             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3452                 m_code->globals.push_back(iptr[d]);
3453             }
3454         } else {
3455             m_code->globals.push_back(0);
3456             if (global->m_code.globaladdr < 0)
3457                 return false;
3458             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3459                 m_code->globals.push_back(0);
3460             }
3461         }
3462         if (!islocal && global->m_cvq != CV_CONST)
3463             def.type |= DEF_SAVEGLOBAL;
3464
3465         if (pushdef) {
3466             m_code->defs.push_back(def);
3467             def.type &= ~DEF_SAVEGLOBAL;
3468             gen_vector_defs(m_code.get(), def, global->m_name.c_str());
3469         }
3470         return global->m_code.globaladdr >= 0;
3471     }
3472     case TYPE_FUNCTION:
3473         global->setCodeAddress(m_code->globals.size());
3474         if (!global->m_hasvalue) {
3475             m_code->globals.push_back(0);
3476             if (global->m_code.globaladdr < 0)
3477                 return false;
3478         } else {
3479             m_code->globals.push_back(m_code->functions.size());
3480             if (!generateGlobalFunction(global))
3481                 return false;
3482         }
3483         if (!islocal && global->m_cvq != CV_CONST)
3484             def.type |= DEF_SAVEGLOBAL;
3485         if (pushdef)
3486             m_code->defs.push_back(def);
3487         return true;
3488     case TYPE_VARIANT:
3489         /* assume biggest type */
3490             global->setCodeAddress(m_code->globals.size());
3491             m_code->globals.push_back(0);
3492             for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i)
3493                 m_code->globals.push_back(0);
3494             return true;
3495     default:
3496         /* refuse to create 'void' type or any other fancy business. */
3497         irerror(global->m_context, "Invalid type for global variable `%s`: %s",
3498                 global->m_name.c_str(), type_name[global->m_vtype]);
3499         return false;
3500     }
3501 }
3502
3503 static GMQCC_INLINE void ir_builder_prepare_field(code_t *code, ir_value *field)
3504 {
3505     field->m_code.fieldaddr = code_alloc_field(code, type_sizeof_[field->m_fieldtype]);
3506 }
3507
3508 static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
3509 {
3510     prog_section_def_t def;
3511     prog_section_field_t fld;
3512
3513     (void)self;
3514
3515     def.type   = (uint16_t)field->m_vtype;
3516     def.offset = (uint16_t)self->m_code->globals.size();
3517
3518     /* create a global named the same as the field */
3519     if (OPTS_OPTION_U32(OPTION_STANDARD) == COMPILER_GMQCC) {
3520         /* in our standard, the global gets a dot prefix */
3521         size_t len = field->m_name.length();
3522         char name[1024];
3523
3524         /* we really don't want to have to allocate this, and 1024
3525          * bytes is more than enough for a variable/field name
3526          */
3527         if (len+2 >= sizeof(name)) {
3528             irerror(field->m_context, "invalid field name size: %u", (unsigned int)len);
3529             return false;
3530         }
3531
3532         name[0] = '.';
3533         memcpy(name+1, field->m_name.c_str(), len); // no strncpy - we used strlen above
3534         name[len+1] = 0;
3535
3536         def.name = code_genstring(self->m_code.get(), name);
3537         fld.name = def.name + 1; /* we reuse that string table entry */
3538     } else {
3539         /* in plain QC, there cannot be a global with the same name,
3540          * and so we also name the global the same.
3541          * FIXME: fteqcc should create a global as well
3542          * check if it actually uses the same name. Probably does
3543          */
3544         def.name = code_genstring(self->m_code.get(), field->m_name.c_str());
3545         fld.name = def.name;
3546     }
3547
3548     field->m_code.name = def.name;
3549
3550     self->m_code->defs.push_back(def);
3551
3552     fld.type = field->m_fieldtype;
3553
3554     if (fld.type == TYPE_VOID) {
3555         irerror(field->m_context, "field is missing a type: %s - don't know its size", field->m_name.c_str());
3556         return false;
3557     }
3558
3559     fld.offset = field->m_code.fieldaddr;
3560
3561     self->m_code->fields.push_back(fld);
3562
3563     field->setCodeAddress(self->m_code->globals.size());
3564     self->m_code->globals.push_back(fld.offset);
3565     if (fld.type == TYPE_VECTOR) {
3566         self->m_code->globals.push_back(fld.offset+1);
3567         self->m_code->globals.push_back(fld.offset+2);
3568     }
3569
3570     if (field->m_fieldtype == TYPE_VECTOR) {
3571         gen_vector_defs  (self->m_code.get(), def, field->m_name.c_str());
3572         gen_vector_fields(self->m_code.get(), fld, field->m_name.c_str());
3573     }
3574
3575     return field->m_code.globaladdr >= 0;
3576 }
3577
3578 static void ir_builder_collect_reusables(ir_builder *builder) {
3579     std::vector<ir_value*> reusables;
3580
3581     for (auto& gp : builder->m_globals) {
3582         ir_value *value = gp.get();
3583         if (value->m_vtype != TYPE_FLOAT || !value->m_hasvalue)
3584             continue;
3585         if (value->m_cvq == CV_CONST || (value->m_name.length() >= 1 && value->m_name[0] == '#'))
3586             reusables.emplace_back(value);
3587     }
3588     builder->m_const_floats = move(reusables);
3589 }
3590
3591 static void ir_builder_split_vector(ir_builder *self, ir_value *vec) {
3592     ir_value* found[3] = { nullptr, nullptr, nullptr };
3593
3594     // must not be written to
3595     if (vec->m_writes.size())
3596         return;
3597     // must not be trying to access individual members
3598     if (vec->m_members[0] || vec->m_members[1] || vec->m_members[2])
3599         return;
3600     // should be actually used otherwise it won't be generated anyway
3601     if (vec->m_reads.empty())
3602         return;
3603     //size_t count = vec->m_reads.size();
3604     //if (!count)
3605     //    return;
3606
3607     // may only be used directly as function parameters, so if we find some other instruction cancel
3608     for (ir_instr *user : vec->m_reads) {
3609         // we only split vectors if they're used directly as parameter to a call only!
3610         if ((user->m_opcode < INSTR_CALL0 || user->m_opcode > INSTR_CALL8) && user->m_opcode != VINSTR_NRCALL)
3611             return;
3612     }
3613
3614     vec->m_flags |= IR_FLAG_SPLIT_VECTOR;
3615
3616     // find existing floats making up the split
3617     for (ir_value *c : self->m_const_floats) {
3618         if (!found[0] && c->m_constval.vfloat == vec->m_constval.vvec.x)
3619             found[0] = c;
3620         if (!found[1] && c->m_constval.vfloat == vec->m_constval.vvec.y)
3621             found[1] = c;
3622         if (!found[2] && c->m_constval.vfloat == vec->m_constval.vvec.z)
3623             found[2] = c;
3624         if (found[0] && found[1] && found[2])
3625             break;
3626     }
3627
3628     // generate floats for not yet found components
3629     if (!found[0])
3630         found[0] = self->literalFloat(vec->m_constval.vvec.x, true);
3631     if (!found[1]) {
3632         if (vec->m_constval.vvec.y == vec->m_constval.vvec.x)
3633             found[1] = found[0];
3634         else
3635             found[1] = self->literalFloat(vec->m_constval.vvec.y, true);
3636     }
3637     if (!found[2]) {
3638         if (vec->m_constval.vvec.z == vec->m_constval.vvec.x)
3639             found[2] = found[0];
3640         else if (vec->m_constval.vvec.z == vec->m_constval.vvec.y)
3641             found[2] = found[1];
3642         else
3643             found[2] = self->literalFloat(vec->m_constval.vvec.z, true);
3644     }
3645
3646     // the .members array should be safe to use here
3647     vec->m_members[0] = found[0];
3648     vec->m_members[1] = found[1];
3649     vec->m_members[2] = found[2];
3650
3651     // register the readers for these floats
3652     found[0]->m_reads.insert(found[0]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3653     found[1]->m_reads.insert(found[1]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3654     found[2]->m_reads.insert(found[2]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3655 }
3656
3657 static void ir_builder_split_vectors(ir_builder *self) {
3658     // member values may be added to self->m_globals during this operation, but
3659     // no new vectors will be added, we need to iterate via an index as
3660     // c++ iterators would be invalidated
3661     const size_t count = self->m_globals.size();
3662     for (size_t i = 0; i != count; ++i) {
3663         ir_value *v = self->m_globals[i].get();
3664         if (v->m_vtype != TYPE_VECTOR || !v->m_name.length() || v->m_name[0] != '#')
3665             continue;
3666         ir_builder_split_vector(self, v);
3667     }
3668 }
3669
3670 bool ir_builder::generate(const char *filename)
3671 {
3672     prog_section_statement_t stmt;
3673     char  *lnofile = nullptr;
3674
3675     if (OPTS_FLAG(SPLIT_VECTOR_PARAMETERS)) {
3676         ir_builder_collect_reusables(this);
3677         if (!m_const_floats.empty())
3678             ir_builder_split_vectors(this);
3679     }
3680
3681     for (auto& fp : m_fields)
3682         ir_builder_prepare_field(m_code.get(), fp.get());
3683
3684     for (auto& gp : m_globals) {
3685         ir_value *global = gp.get();
3686         if (!generateGlobal(global, false)) {
3687             return false;
3688         }
3689         if (global->m_vtype == TYPE_FUNCTION) {
3690             ir_function *func = global->m_constval.vfunc;
3691             if (func && m_max_locals < func->m_allocated_locals &&
3692                 !(func->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3693             {
3694                 m_max_locals = func->m_allocated_locals;
3695             }
3696             if (func && m_max_globaltemps < func->m_globaltemps)
3697                 m_max_globaltemps = func->m_globaltemps;
3698         }
3699     }
3700
3701     for (auto& fp : m_fields) {
3702         if (!ir_builder_gen_field(this, fp.get()))
3703             return false;
3704     }
3705
3706     // generate nil
3707     m_nil->setCodeAddress(m_code->globals.size());
3708     m_code->globals.push_back(0);
3709     m_code->globals.push_back(0);
3710     m_code->globals.push_back(0);
3711
3712     // generate virtual-instruction temps
3713     for (size_t i = 0; i < IR_MAX_VINSTR_TEMPS; ++i) {
3714         m_vinstr_temp[i]->setCodeAddress(m_code->globals.size());
3715         m_code->globals.push_back(0);
3716         m_code->globals.push_back(0);
3717         m_code->globals.push_back(0);
3718     }
3719
3720     // generate global temps
3721     m_first_common_globaltemp = m_code->globals.size();
3722     m_code->globals.insert(m_code->globals.end(), m_max_globaltemps, 0);
3723     // FIXME:DELME:
3724     //for (size_t i = 0; i < m_max_globaltemps; ++i) {
3725     //    m_code->globals.push_back(0);
3726     //}
3727     // generate common locals
3728     m_first_common_local = m_code->globals.size();
3729     m_code->globals.insert(m_code->globals.end(), m_max_locals, 0);
3730     // FIXME:DELME:
3731     //for (i = 0; i < m_max_locals; ++i) {
3732     //    m_code->globals.push_back(0);
3733     //}
3734
3735     // generate function code
3736
3737     for (auto& gp : m_globals) {
3738         ir_value *global = gp.get();
3739         if (global->m_vtype == TYPE_FUNCTION) {
3740             if (!this->generateGlobalFunctionCode(global))
3741                 return false;
3742         }
3743     }
3744
3745     if (m_code->globals.size() >= 65536) {
3746         irerror(m_globals.back()->m_context,
3747             "This progs file would require more globals than the metadata can handle (%zu). Bailing out.",
3748             m_code->globals.size());
3749         return false;
3750     }
3751
3752     /* DP errors if the last instruction is not an INSTR_DONE. */
3753     if (m_code->statements.back().opcode != INSTR_DONE)
3754     {
3755         lex_ctx_t last;
3756
3757         stmt.opcode = INSTR_DONE;
3758         stmt.o1.u1  = 0;
3759         stmt.o2.u1  = 0;
3760         stmt.o3.u1  = 0;
3761         last.line   = m_code->linenums.back();
3762         last.column = m_code->columnnums.back();
3763
3764         code_push_statement(m_code.get(), &stmt, last);
3765     }
3766
3767     if (OPTS_OPTION_BOOL(OPTION_PP_ONLY))
3768         return true;
3769
3770     if (m_code->statements.size() != m_code->linenums.size()) {
3771         con_err("Linecounter wrong: %lu != %lu\n",
3772                 m_code->statements.size(),
3773                 m_code->linenums.size());
3774     } else if (OPTS_FLAG(LNO)) {
3775         char  *dot;
3776         size_t filelen = strlen(filename);
3777
3778         memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
3779         dot = strrchr(lnofile, '.');
3780         if (!dot) {
3781             vec_pop(lnofile);
3782         } else {
3783             vec_shrinkto(lnofile, dot - lnofile);
3784         }
3785         memcpy(vec_add(lnofile, 5), ".lno", 5);
3786     }
3787
3788     if (!code_write(m_code.get(), filename, lnofile)) {
3789         vec_free(lnofile);
3790         return false;
3791     }
3792
3793     vec_free(lnofile);
3794     return true;
3795 }
3796
3797 /***********************************************************************
3798  *IR DEBUG Dump functions...
3799  */
3800
3801 #define IND_BUFSZ 1024
3802
3803 static const char *qc_opname(int op)
3804 {
3805     if (op < 0) return "<INVALID>";
3806     if (op < VINSTR_END)
3807         return util_instr_str[op];
3808     switch (op) {
3809         case VINSTR_END:       return "END";
3810         case VINSTR_PHI:       return "PHI";
3811         case VINSTR_JUMP:      return "JUMP";
3812         case VINSTR_COND:      return "COND";
3813         case VINSTR_BITXOR:    return "BITXOR";
3814         case VINSTR_BITAND_V:  return "BITAND_V";
3815         case VINSTR_BITOR_V:   return "BITOR_V";
3816         case VINSTR_BITXOR_V:  return "BITXOR_V";
3817         case VINSTR_BITAND_VF: return "BITAND_VF";
3818         case VINSTR_BITOR_VF:  return "BITOR_VF";
3819         case VINSTR_BITXOR_VF: return "BITXOR_VF";
3820         case VINSTR_CROSS:     return "CROSS";
3821         case VINSTR_NEG_F:     return "NEG_F";
3822         case VINSTR_NEG_V:     return "NEG_V";
3823         default:               return "<UNK>";
3824     }
3825 }
3826
3827 void ir_builder::dump(int (*oprintf)(const char*, ...)) const
3828 {
3829     size_t i;
3830     char indent[IND_BUFSZ];
3831     indent[0] = '\t';
3832     indent[1] = 0;
3833
3834     oprintf("module %s\n", m_name.c_str());
3835     for (i = 0; i < m_globals.size(); ++i)
3836     {
3837         oprintf("global ");
3838         if (m_globals[i]->m_hasvalue)
3839             oprintf("%s = ", m_globals[i]->m_name.c_str());
3840         m_globals[i].get()->dump(oprintf);
3841         oprintf("\n");
3842     }
3843     for (i = 0; i < m_functions.size(); ++i)
3844         ir_function_dump(m_functions[i].get(), indent, oprintf);
3845     oprintf("endmodule %s\n", m_name.c_str());
3846 }
3847
3848 static const char *storenames[] = {
3849     "[global]", "[local]", "[param]", "[value]", "[return]"
3850 };
3851
3852 void ir_function_dump(ir_function *f, char *ind,
3853                       int (*oprintf)(const char*, ...))
3854 {
3855     size_t i;
3856     if (f->m_builtin != 0) {
3857         oprintf("%sfunction %s = builtin %i\n", ind, f->m_name.c_str(), -f->m_builtin);
3858         return;
3859     }
3860     oprintf("%sfunction %s\n", ind, f->m_name.c_str());
3861     util_strncat(ind, "\t", IND_BUFSZ-1);
3862     if (f->m_locals.size())
3863     {
3864         oprintf("%s%i locals:\n", ind, (int)f->m_locals.size());
3865         for (i = 0; i < f->m_locals.size(); ++i) {
3866             oprintf("%s\t", ind);
3867             f->m_locals[i].get()->dump(oprintf);
3868             oprintf("\n");
3869         }
3870     }
3871     oprintf("%sliferanges:\n", ind);
3872     for (i = 0; i < f->m_locals.size(); ++i) {
3873         const char *attr = "";
3874         size_t l, m;
3875         ir_value *v = f->m_locals[i].get();
3876         if (v->m_unique_life && v->m_locked)
3877             attr = "unique,locked ";
3878         else if (v->m_unique_life)
3879             attr = "unique ";
3880         else if (v->m_locked)
3881             attr = "locked ";
3882         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3883                 storenames[v->m_store],
3884                 attr, (v->m_callparam ? "callparam " : ""),
3885                 (int)v->m_code.local);
3886         if (v->m_life.empty())
3887             oprintf("[null]");
3888         for (l = 0; l < v->m_life.size(); ++l) {
3889             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3890         }
3891         oprintf("\n");
3892         for (m = 0; m < 3; ++m) {
3893             ir_value *vm = v->m_members[m];
3894             if (!vm)
3895                 continue;
3896             oprintf("%s\t%s: @%i ", ind, vm->m_name.c_str(), (int)vm->m_code.local);
3897             for (l = 0; l < vm->m_life.size(); ++l) {
3898                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3899             }
3900             oprintf("\n");
3901         }
3902     }
3903     for (i = 0; i < f->m_values.size(); ++i) {
3904         const char *attr = "";
3905         size_t l, m;
3906         ir_value *v = f->m_values[i].get();
3907         if (v->m_unique_life && v->m_locked)
3908             attr = "unique,locked ";
3909         else if (v->m_unique_life)
3910             attr = "unique ";
3911         else if (v->m_locked)
3912             attr = "locked ";
3913         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3914                 storenames[v->m_store],
3915                 attr, (v->m_callparam ? "callparam " : ""),
3916                 (int)v->m_code.local);
3917         if (v->m_life.empty())
3918             oprintf("[null]");
3919         for (l = 0; l < v->m_life.size(); ++l) {
3920             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3921         }
3922         oprintf("\n");
3923         for (m = 0; m < 3; ++m) {
3924             ir_value *vm = v->m_members[m];
3925             if (!vm)
3926                 continue;
3927             if (vm->m_unique_life && vm->m_locked)
3928                 attr = "unique,locked ";
3929             else if (vm->m_unique_life)
3930                 attr = "unique ";
3931             else if (vm->m_locked)
3932                 attr = "locked ";
3933             oprintf("%s\t%s: %s@%i ", ind, vm->m_name.c_str(), attr, (int)vm->m_code.local);
3934             for (l = 0; l < vm->m_life.size(); ++l) {
3935                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3936             }
3937             oprintf("\n");
3938         }
3939     }
3940     if (f->m_blocks.size())
3941     {
3942         oprintf("%slife passes: %i\n", ind, (int)f->m_run_id);
3943         for (i = 0; i < f->m_blocks.size(); ++i) {
3944             ir_block_dump(f->m_blocks[i].get(), ind, oprintf);
3945         }
3946
3947     }
3948     ind[strlen(ind)-1] = 0;
3949     oprintf("%sendfunction %s\n", ind, f->m_name.c_str());
3950 }
3951
3952 void ir_block_dump(ir_block* b, char *ind,
3953                    int (*oprintf)(const char*, ...))
3954 {
3955     size_t i;
3956     oprintf("%s:%s\n", ind, b->m_label.c_str());
3957     util_strncat(ind, "\t", IND_BUFSZ-1);
3958
3959     if (b->m_instr && b->m_instr[0])
3960         oprintf("%s (%i) [entry]\n", ind, (int)(b->m_instr[0]->m_eid-1));
3961     for (i = 0; i < vec_size(b->m_instr); ++i)
3962         ir_instr_dump(b->m_instr[i], ind, oprintf);
3963     ind[strlen(ind)-1] = 0;
3964 }
3965
3966 static void dump_phi(ir_instr *in, int (*oprintf)(const char*, ...))
3967 {
3968     oprintf("%s <- phi ", in->_m_ops[0]->m_name.c_str());
3969     for (auto &it : in->m_phi) {
3970         oprintf("([%s] : %s) ", it.from->m_label.c_str(),
3971                                 it.value->m_name.c_str());
3972     }
3973     oprintf("\n");
3974 }
3975
3976 void ir_instr_dump(ir_instr *in, char *ind,
3977                        int (*oprintf)(const char*, ...))
3978 {
3979     size_t i;
3980     const char *comma = nullptr;
3981
3982     oprintf("%s (%i) ", ind, (int)in->m_eid);
3983
3984     if (in->m_opcode == VINSTR_PHI) {
3985         dump_phi(in, oprintf);
3986         return;
3987     }
3988
3989     util_strncat(ind, "\t", IND_BUFSZ-1);
3990
3991     if (in->_m_ops[0] && (in->_m_ops[1] || in->_m_ops[2])) {
3992         in->_m_ops[0]->dump(oprintf);
3993         if (in->_m_ops[1] || in->_m_ops[2])
3994             oprintf(" <- ");
3995     }
3996     if (in->m_opcode == INSTR_CALL0 || in->m_opcode == VINSTR_NRCALL) {
3997         oprintf("CALL%i\t", in->m_params.size());
3998     } else
3999         oprintf("%s\t", qc_opname(in->m_opcode));
4000
4001     if (in->_m_ops[0] && !(in->_m_ops[1] || in->_m_ops[2])) {
4002         in->_m_ops[0]->dump(oprintf);
4003         comma = ",\t";
4004     }
4005     else
4006     {
4007         for (i = 1; i != 3; ++i) {
4008             if (in->_m_ops[i]) {
4009                 if (comma)
4010                     oprintf(comma);
4011                 in->_m_ops[i]->dump(oprintf);
4012                 comma = ",\t";
4013             }
4014         }
4015     }
4016     if (in->m_bops[0]) {
4017         if (comma)
4018             oprintf(comma);
4019         oprintf("[%s]", in->m_bops[0]->m_label.c_str());
4020         comma = ",\t";
4021     }
4022     if (in->m_bops[1])
4023         oprintf("%s[%s]", comma, in->m_bops[1]->m_label.c_str());
4024     if (in->m_params.size()) {
4025         oprintf("\tparams: ");
4026         for (auto &it : in->m_params)
4027             oprintf("%s, ", it->m_name.c_str());
4028     }
4029     oprintf("\n");
4030     ind[strlen(ind)-1] = 0;
4031 }
4032
4033 static void ir_value_dump_string(const char *str, int (*oprintf)(const char*, ...))
4034 {
4035     oprintf("\"");
4036     for (; *str; ++str) {
4037         switch (*str) {
4038             case '\n': oprintf("\\n"); break;
4039             case '\r': oprintf("\\r"); break;
4040             case '\t': oprintf("\\t"); break;
4041             case '\v': oprintf("\\v"); break;
4042             case '\f': oprintf("\\f"); break;
4043             case '\b': oprintf("\\b"); break;
4044             case '\a': oprintf("\\a"); break;
4045             case '\\': oprintf("\\\\"); break;
4046             case '"': oprintf("\\\""); break;
4047             default: oprintf("%c", *str); break;
4048         }
4049     }
4050     oprintf("\"");
4051 }
4052
4053 void ir_value::dump(int (*oprintf)(const char*, ...)) const
4054 {
4055     if (m_hasvalue) {
4056         switch (m_vtype) {
4057             default:
4058             case TYPE_VOID:
4059                 oprintf("(void)");
4060                 break;
4061             case TYPE_FUNCTION:
4062                 oprintf("fn:%s", m_name.c_str());
4063                 break;
4064             case TYPE_FLOAT:
4065                 oprintf("%g", m_constval.vfloat);
4066                 break;
4067             case TYPE_VECTOR:
4068                 oprintf("'%g %g %g'",
4069                         m_constval.vvec.x,
4070                         m_constval.vvec.y,
4071                         m_constval.vvec.z);
4072                 break;
4073             case TYPE_ENTITY:
4074                 oprintf("(entity)");
4075                 break;
4076             case TYPE_STRING:
4077                 ir_value_dump_string(m_constval.vstring, oprintf);
4078                 break;
4079 #if 0
4080             case TYPE_INTEGER:
4081                 oprintf("%i", m_constval.vint);
4082                 break;
4083 #endif
4084             case TYPE_POINTER:
4085                 oprintf("&%s",
4086                     m_constval.vpointer->m_name.c_str());
4087                 break;
4088         }
4089     } else {
4090         oprintf("%s", m_name.c_str());
4091     }
4092 }
4093
4094 void ir_value::dumpLife(int (*oprintf)(const char*,...)) const
4095 {
4096     oprintf("Life of %12s:", m_name.c_str());
4097     for (size_t i = 0; i < m_life.size(); ++i)
4098     {
4099         oprintf(" + [%i, %i]\n", m_life[i].start, m_life[i].end);
4100     }
4101 }