]> git.xonotic.org Git - xonotic/gmqcc.git/blob - ir.cpp
Merge branch 'master' of github.com:graphitemaster/gmqcc
[xonotic/gmqcc.git] / ir.cpp
1 #include <stdlib.h>
2 #include <string.h>
3
4 #include "gmqcc.h"
5 #include "ir.h"
6
7 /***********************************************************************
8  * Type sizes used at multiple points in the IR codegen
9  */
10
11 const char *type_name[TYPE_COUNT] = {
12     "void",
13     "string",
14     "float",
15     "vector",
16     "entity",
17     "field",
18     "function",
19     "pointer",
20     "integer",
21     "variant",
22     "struct",
23     "union",
24     "array",
25
26     "nil",
27     "<no-expression>"
28 };
29
30 static size_t type_sizeof_[TYPE_COUNT] = {
31     1, /* TYPE_VOID     */
32     1, /* TYPE_STRING   */
33     1, /* TYPE_FLOAT    */
34     3, /* TYPE_VECTOR   */
35     1, /* TYPE_ENTITY   */
36     1, /* TYPE_FIELD    */
37     1, /* TYPE_FUNCTION */
38     1, /* TYPE_POINTER  */
39     1, /* TYPE_INTEGER  */
40     3, /* TYPE_VARIANT  */
41     0, /* TYPE_STRUCT   */
42     0, /* TYPE_UNION    */
43     0, /* TYPE_ARRAY    */
44     0, /* TYPE_NIL      */
45     0, /* TYPE_NOESPR   */
46 };
47
48 const uint16_t type_store_instr[TYPE_COUNT] = {
49     INSTR_STORE_F, /* should use I when having integer support */
50     INSTR_STORE_S,
51     INSTR_STORE_F,
52     INSTR_STORE_V,
53     INSTR_STORE_ENT,
54     INSTR_STORE_FLD,
55     INSTR_STORE_FNC,
56     INSTR_STORE_ENT, /* should use I */
57 #if 0
58     INSTR_STORE_I, /* integer type */
59 #else
60     INSTR_STORE_F,
61 #endif
62
63     INSTR_STORE_V, /* variant, should never be accessed */
64
65     VINSTR_END, /* struct */
66     VINSTR_END, /* union  */
67     VINSTR_END, /* array  */
68     VINSTR_END, /* nil    */
69     VINSTR_END, /* noexpr */
70 };
71
72 const uint16_t field_store_instr[TYPE_COUNT] = {
73     INSTR_STORE_FLD,
74     INSTR_STORE_FLD,
75     INSTR_STORE_FLD,
76     INSTR_STORE_V,
77     INSTR_STORE_FLD,
78     INSTR_STORE_FLD,
79     INSTR_STORE_FLD,
80     INSTR_STORE_FLD,
81 #if 0
82     INSTR_STORE_FLD, /* integer type */
83 #else
84     INSTR_STORE_FLD,
85 #endif
86
87     INSTR_STORE_V, /* variant, should never be accessed */
88
89     VINSTR_END, /* struct */
90     VINSTR_END, /* union  */
91     VINSTR_END, /* array  */
92     VINSTR_END, /* nil    */
93     VINSTR_END, /* noexpr */
94 };
95
96 const uint16_t type_storep_instr[TYPE_COUNT] = {
97     INSTR_STOREP_F, /* should use I when having integer support */
98     INSTR_STOREP_S,
99     INSTR_STOREP_F,
100     INSTR_STOREP_V,
101     INSTR_STOREP_ENT,
102     INSTR_STOREP_FLD,
103     INSTR_STOREP_FNC,
104     INSTR_STOREP_ENT, /* should use I */
105 #if 0
106     INSTR_STOREP_ENT, /* integer type */
107 #else
108     INSTR_STOREP_F,
109 #endif
110
111     INSTR_STOREP_V, /* variant, should never be accessed */
112
113     VINSTR_END, /* struct */
114     VINSTR_END, /* union  */
115     VINSTR_END, /* array  */
116     VINSTR_END, /* nil    */
117     VINSTR_END, /* noexpr */
118 };
119
120 const uint16_t type_eq_instr[TYPE_COUNT] = {
121     INSTR_EQ_F, /* should use I when having integer support */
122     INSTR_EQ_S,
123     INSTR_EQ_F,
124     INSTR_EQ_V,
125     INSTR_EQ_E,
126     INSTR_EQ_E, /* FLD has no comparison */
127     INSTR_EQ_FNC,
128     INSTR_EQ_E, /* should use I */
129 #if 0
130     INSTR_EQ_I,
131 #else
132     INSTR_EQ_F,
133 #endif
134
135     INSTR_EQ_V, /* variant, should never be accessed */
136
137     VINSTR_END, /* struct */
138     VINSTR_END, /* union  */
139     VINSTR_END, /* array  */
140     VINSTR_END, /* nil    */
141     VINSTR_END, /* noexpr */
142 };
143
144 const uint16_t type_ne_instr[TYPE_COUNT] = {
145     INSTR_NE_F, /* should use I when having integer support */
146     INSTR_NE_S,
147     INSTR_NE_F,
148     INSTR_NE_V,
149     INSTR_NE_E,
150     INSTR_NE_E, /* FLD has no comparison */
151     INSTR_NE_FNC,
152     INSTR_NE_E, /* should use I */
153 #if 0
154     INSTR_NE_I,
155 #else
156     INSTR_NE_F,
157 #endif
158
159     INSTR_NE_V, /* variant, should never be accessed */
160
161     VINSTR_END, /* struct */
162     VINSTR_END, /* union  */
163     VINSTR_END, /* array  */
164     VINSTR_END, /* nil    */
165     VINSTR_END, /* noexpr */
166 };
167
168 const uint16_t type_not_instr[TYPE_COUNT] = {
169     INSTR_NOT_F, /* should use I when having integer support */
170     VINSTR_END,  /* not to be used, depends on string related -f flags */
171     INSTR_NOT_F,
172     INSTR_NOT_V,
173     INSTR_NOT_ENT,
174     INSTR_NOT_ENT,
175     INSTR_NOT_FNC,
176     INSTR_NOT_ENT, /* should use I */
177 #if 0
178     INSTR_NOT_I, /* integer type */
179 #else
180     INSTR_NOT_F,
181 #endif
182
183     INSTR_NOT_V, /* variant, should never be accessed */
184
185     VINSTR_END, /* struct */
186     VINSTR_END, /* union  */
187     VINSTR_END, /* array  */
188     VINSTR_END, /* nil    */
189     VINSTR_END, /* noexpr */
190 };
191
192 /* protos */
193 static void            ir_function_dump(ir_function*, char *ind, int (*oprintf)(const char*,...));
194
195 static ir_value*       ir_block_create_general_instr(ir_block *self, lex_ctx_t, const char *label,
196                                                      int op, ir_value *a, ir_value *b, qc_type outype);
197 static bool GMQCC_WARN ir_block_create_store(ir_block*, lex_ctx_t, ir_value *target, ir_value *what);
198 static void            ir_block_dump(ir_block*, char *ind, int (*oprintf)(const char*,...));
199
200 static bool            ir_instr_op(ir_instr*, int op, ir_value *value, bool writing);
201 static void            ir_instr_dump(ir_instr* in, char *ind, int (*oprintf)(const char*,...));
202 /* error functions */
203
204 static void irerror(lex_ctx_t ctx, const char *msg, ...)
205 {
206     va_list ap;
207     va_start(ap, msg);
208     con_cvprintmsg(ctx, LVL_ERROR, "internal error", msg, ap);
209     va_end(ap);
210 }
211
212 static bool GMQCC_WARN irwarning(lex_ctx_t ctx, int warntype, const char *fmt, ...)
213 {
214     bool    r;
215     va_list ap;
216     va_start(ap, fmt);
217     r = vcompile_warning(ctx, warntype, fmt, ap);
218     va_end(ap);
219     return r;
220 }
221
222 /***********************************************************************
223  * Vector utility functions
224  */
225
226 static bool GMQCC_WARN vec_ir_value_find(std::vector<ir_value *> &vec, const ir_value *what, size_t *idx)
227 {
228     for (auto &it : vec) {
229         if (it != what)
230             continue;
231         if (idx)
232             *idx = &it - &vec[0];
233         return true;
234     }
235     return false;
236 }
237
238 static bool GMQCC_WARN vec_ir_block_find(std::vector<ir_block *> &vec, ir_block *what, size_t *idx)
239 {
240     for (auto &it : vec) {
241         if (it != what)
242             continue;
243         if (idx)
244             *idx = &it - &vec[0];
245         return true;
246     }
247     return false;
248 }
249
250 static bool GMQCC_WARN vec_ir_instr_find(std::vector<ir_instr *> &vec, ir_instr *what, size_t *idx)
251 {
252     for (auto &it : vec) {
253         if (it != what)
254             continue;
255         if (idx)
256             *idx = &it - &vec[0];
257         return true;
258     }
259     return false;
260 }
261
262 /***********************************************************************
263  * IR Builder
264  */
265
266 static void ir_block_delete_quick(ir_block* self);
267 static void ir_instr_delete_quick(ir_instr *self);
268 static void ir_function_delete_quick(ir_function *self);
269
270 ir_builder::ir_builder(const std::string& modulename)
271 : m_name(modulename),
272   m_code(new code_t)
273 {
274     m_htglobals   = util_htnew(IR_HT_SIZE);
275     m_htfields    = util_htnew(IR_HT_SIZE);
276     m_htfunctions = util_htnew(IR_HT_SIZE);
277
278     m_nil = new ir_value("nil", store_value, TYPE_NIL);
279     m_nil->m_cvq = CV_CONST;
280
281     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
282         /* we write to them, but they're not supposed to be used outside the IR, so
283          * let's not allow the generation of ir_instrs which use these.
284          * So it's a constant noexpr.
285          */
286         m_vinstr_temp[i] = new ir_value("vinstr_temp", store_value, TYPE_NOEXPR);
287         m_vinstr_temp[i]->m_cvq = CV_CONST;
288     }
289 }
290
291 ir_builder::~ir_builder()
292 {
293     util_htdel(m_htglobals);
294     util_htdel(m_htfields);
295     util_htdel(m_htfunctions);
296     for (auto& f : m_functions)
297         ir_function_delete_quick(f.release());
298     m_functions.clear(); // delete them now before deleting the rest:
299
300     delete m_nil;
301
302     for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
303         delete m_vinstr_temp[i];
304     }
305
306     m_extparams.clear();
307     m_extparam_protos.clear();
308 }
309
310 ir_function* ir_builder::createFunction(const std::string& name, qc_type outtype)
311 {
312     ir_function *fn = (ir_function*)util_htget(m_htfunctions, name.c_str());
313     if (fn)
314         return nullptr;
315
316     fn = new ir_function(this, outtype);
317     fn->m_name = name;
318     m_functions.emplace_back(fn);
319     util_htset(m_htfunctions, name.c_str(), fn);
320
321     fn->m_value = createGlobal(fn->m_name, TYPE_FUNCTION);
322     if (!fn->m_value) {
323         delete fn;
324         return nullptr;
325     }
326
327     fn->m_value->m_hasvalue = true;
328     fn->m_value->m_outtype = outtype;
329     fn->m_value->m_constval.vfunc = fn;
330     fn->m_value->m_context = fn->m_context;
331
332     return fn;
333 }
334
335 ir_value* ir_builder::createGlobal(const std::string& name, qc_type vtype)
336 {
337     ir_value *ve;
338
339     if (name[0] != '#')
340     {
341         ve = (ir_value*)util_htget(m_htglobals, name.c_str());
342         if (ve) {
343             return nullptr;
344         }
345     }
346
347     ve = new ir_value(std::string(name), store_global, vtype);
348     m_globals.emplace_back(ve);
349     util_htset(m_htglobals, name.c_str(), ve);
350     return ve;
351 }
352
353 ir_value* ir_builder::get_va_count()
354 {
355     if (m_reserved_va_count)
356         return m_reserved_va_count;
357     return (m_reserved_va_count = createGlobal("reserved:va_count", TYPE_FLOAT));
358 }
359
360 ir_value* ir_builder::createField(const std::string& name, qc_type vtype)
361 {
362     ir_value *ve = (ir_value*)util_htget(m_htfields, name.c_str());
363     if (ve) {
364         return nullptr;
365     }
366
367     ve = new ir_value(std::string(name), store_global, TYPE_FIELD);
368     ve->m_fieldtype = vtype;
369     m_fields.emplace_back(ve);
370     util_htset(m_htfields, name.c_str(), ve);
371     return ve;
372 }
373
374 /***********************************************************************
375  *IR Function
376  */
377
378 static bool ir_function_naive_phi(ir_function*);
379 static void ir_function_enumerate(ir_function*);
380 static bool ir_function_calculate_liferanges(ir_function*);
381 static bool ir_function_allocate_locals(ir_function*);
382
383 ir_function::ir_function(ir_builder* owner_, qc_type outtype_)
384 : m_owner(owner_),
385   m_name("<@unnamed>"),
386   m_outtype(outtype_)
387 {
388     m_context.file = "<@no context>";
389     m_context.line = 0;
390 }
391
392 ir_function::~ir_function()
393 {
394 }
395
396 static void ir_function_delete_quick(ir_function *self)
397 {
398     for (auto& b : self->m_blocks)
399         ir_block_delete_quick(b.release());
400     delete self;
401 }
402
403 static void ir_function_collect_value(ir_function *self, ir_value *v)
404 {
405     self->m_values.emplace_back(v);
406 }
407
408 ir_block* ir_function_create_block(lex_ctx_t ctx, ir_function *self, const char *label)
409 {
410     ir_block* bn = new ir_block(self, label ? std::string(label) : std::string());
411     bn->m_context = ctx;
412     self->m_blocks.emplace_back(bn);
413
414     if ((self->m_flags & IR_FLAG_BLOCK_COVERAGE) && self->m_owner->m_coverage_func)
415         (void)ir_block_create_call(bn, ctx, nullptr, self->m_owner->m_coverage_func, false);
416
417     return bn;
418 }
419
420 static bool instr_is_operation(uint16_t op)
421 {
422     return ( (op >= INSTR_MUL_F  && op <= INSTR_GT) ||
423              (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) ||
424              (op == INSTR_ADDRESS) ||
425              (op >= INSTR_NOT_F  && op <= INSTR_NOT_FNC) ||
426              (op >= INSTR_AND    && op <= INSTR_BITOR) ||
427              (op >= INSTR_CALL0  && op <= INSTR_CALL8) ||
428              (op >= VINSTR_BITAND_V && op <= VINSTR_NEG_V) );
429 }
430
431 static bool ir_function_pass_peephole(ir_function *self)
432 {
433     for (auto& bp : self->m_blocks) {
434         ir_block *block = bp.get();
435         for (size_t i = 0; i < block->m_instr.size(); ++i) {
436             ir_instr *inst;
437             inst = block->m_instr[i];
438
439             if (i >= 1 &&
440                 (inst->m_opcode >= INSTR_STORE_F &&
441                  inst->m_opcode <= INSTR_STORE_FNC))
442             {
443                 ir_instr *store;
444                 ir_instr *oper;
445                 ir_value *value;
446
447                 store = inst;
448
449                 oper  = block->m_instr[i-1];
450                 if (!instr_is_operation(oper->m_opcode))
451                     continue;
452
453                 /* Don't change semantics of MUL_VF in engines where these may not alias. */
454                 if (OPTS_FLAG(LEGACY_VECTOR_MATHS)) {
455                     if (oper->m_opcode == INSTR_MUL_VF && oper->_m_ops[2]->m_memberof == oper->_m_ops[1])
456                         continue;
457                     if (oper->m_opcode == INSTR_MUL_FV && oper->_m_ops[1]->m_memberof == oper->_m_ops[2])
458                         continue;
459                 }
460
461                 value = oper->_m_ops[0];
462
463                 /* only do it for SSA values */
464                 if (value->m_store != store_value)
465                     continue;
466
467                 /* don't optimize out the temp if it's used later again */
468                 if (value->m_reads.size() != 1)
469                     continue;
470
471                 /* The very next store must use this value */
472                 if (value->m_reads[0] != store)
473                     continue;
474
475                 /* And of course the store must _read_ from it, so it's in
476                  * OP 1 */
477                 if (store->_m_ops[1] != value)
478                     continue;
479
480                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
481                 (void)!ir_instr_op(oper, 0, store->_m_ops[0], true);
482
483                 block->m_instr.erase(block->m_instr.begin() + i);
484                 delete store;
485             }
486             else if (inst->m_opcode == VINSTR_COND)
487             {
488                 /* COND on a value resulting from a NOT could
489                  * remove the NOT and swap its operands
490                  */
491                 while (true) {
492                     ir_block *tmp;
493                     size_t    inotid;
494                     ir_instr *inot;
495                     ir_value *value;
496                     value = inst->_m_ops[0];
497
498                     if (value->m_store != store_value || value->m_reads.size() != 1 || value->m_reads[0] != inst)
499                         break;
500
501                     inot = value->m_writes[0];
502                     if (inot->_m_ops[0] != value ||
503                         inot->m_opcode < INSTR_NOT_F ||
504                         inot->m_opcode > INSTR_NOT_FNC ||
505                         inot->m_opcode == INSTR_NOT_V || /* can't do these */
506                         inot->m_opcode == INSTR_NOT_S)
507                     {
508                         break;
509                     }
510
511                     /* count */
512                     ++opts_optimizationcount[OPTIM_PEEPHOLE];
513                     /* change operand */
514                     (void)!ir_instr_op(inst, 0, inot->_m_ops[1], false);
515                     /* remove NOT */
516                     tmp = inot->m_owner;
517                     for (inotid = 0; inotid < tmp->m_instr.size(); ++inotid) {
518                         if (tmp->m_instr[inotid] == inot)
519                             break;
520                     }
521                     if (inotid >= tmp->m_instr.size()) {
522                         compile_error(inst->m_context, "sanity-check failed: failed to find instruction to optimize out");
523                         return false;
524                     }
525                     tmp->m_instr.erase(tmp->m_instr.begin() + inotid);
526                     delete inot;
527                     /* swap ontrue/onfalse */
528                     tmp = inst->m_bops[0];
529                     inst->m_bops[0] = inst->m_bops[1];
530                     inst->m_bops[1] = tmp;
531                 }
532                 continue;
533             }
534         }
535     }
536
537     return true;
538 }
539
540 static bool ir_function_pass_tailrecursion(ir_function *self)
541 {
542     size_t p;
543
544     for (auto& bp : self->m_blocks) {
545         ir_block *block = bp.get();
546
547         ir_value *funcval;
548         ir_instr *ret, *call, *store = nullptr;
549
550         if (!block->m_final || block->m_instr.size() < 2)
551             continue;
552
553         ret = block->m_instr.back();
554         if (ret->m_opcode != INSTR_DONE && ret->m_opcode != INSTR_RETURN)
555             continue;
556
557         call = block->m_instr[block->m_instr.size()-2];
558         if (call->m_opcode >= INSTR_STORE_F && call->m_opcode <= INSTR_STORE_FNC) {
559             /* account for the unoptimized
560              * CALL
561              * STORE %return, %tmp
562              * RETURN %tmp
563              * version
564              */
565             if (block->m_instr.size() < 3)
566                 continue;
567
568             store = call;
569             call = block->m_instr[block->m_instr.size()-3];
570         }
571
572         if (call->m_opcode < INSTR_CALL0 || call->m_opcode > INSTR_CALL8)
573             continue;
574
575         if (store) {
576             /* optimize out the STORE */
577             if (ret->_m_ops[0]   &&
578                 ret->_m_ops[0]   == store->_m_ops[0] &&
579                 store->_m_ops[1] == call->_m_ops[0])
580             {
581                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
582                 call->_m_ops[0] = store->_m_ops[0];
583                 block->m_instr.erase(block->m_instr.end()-2);
584                 delete store;
585             }
586             else
587                 continue;
588         }
589
590         if (!call->_m_ops[0])
591             continue;
592
593         funcval = call->_m_ops[1];
594         if (!funcval)
595             continue;
596         if (funcval->m_vtype != TYPE_FUNCTION || funcval->m_constval.vfunc != self)
597             continue;
598
599         /* now we have a CALL and a RET, check if it's a tailcall */
600         if (ret->_m_ops[0] && call->_m_ops[0] != ret->_m_ops[0])
601             continue;
602
603         ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
604         block->m_instr.erase(block->m_instr.end()-2, block->m_instr.end());
605
606         block->m_final = false; /* open it back up */
607
608         /* emite parameter-stores */
609         for (p = 0; p < call->m_params.size(); ++p) {
610             /* assert(call->params_count <= self->locals_count); */
611             if (!ir_block_create_store(block, call->m_context, self->m_locals[p].get(), call->m_params[p])) {
612                 irerror(call->m_context, "failed to create tailcall store instruction for parameter %i", (int)p);
613                 return false;
614             }
615         }
616         if (!ir_block_create_jump(block, call->m_context, self->m_blocks[0].get())) {
617             irerror(call->m_context, "failed to create tailcall jump");
618             return false;
619         }
620
621         delete call;
622         delete ret;
623     }
624
625     return true;
626 }
627
628 bool ir_function_finalize(ir_function *self)
629 {
630     if (self->m_builtin)
631         return true;
632
633     for (auto& lp : self->m_locals) {
634         ir_value *v = lp.get();
635         if (v->m_reads.empty() && v->m_writes.size() && !(v->m_flags & IR_FLAG_NOREF)) {
636             // if it's a vector check to ensure all it's members are unused before
637             // claiming it's unused, otherwise skip the vector entierly
638             if (v->m_vtype == TYPE_VECTOR)
639             {
640                 size_t mask = (1 << 3) - 1, bits = 0;
641                 for (size_t i = 0; i < 3; i++)
642                     if (!v->m_members[i] || (v->m_members[i]->m_reads.empty()
643                         && v->m_members[i]->m_writes.size()))
644                         bits |= (1 << i);
645                 // all components are unused so just report the vector
646                 if (bits == mask && irwarning(v->m_context, WARN_UNUSED_VARIABLE,
647                     "unused variable: `%s`", v->m_name.c_str()))
648                     return false;
649                 else if (bits != mask)
650                     // individual components are unused so mention them
651                     for (size_t i = 0; i < 3; i++)
652                         if ((bits & (1 << i))
653                             && irwarning(v->m_context, WARN_UNUSED_COMPONENT,
654                                 "unused vector component: `%s.%c`", v->m_name.c_str(), "xyz"[i]))
655                             return false;
656             }
657             // just a standard variable
658             else if (irwarning(v->m_context, WARN_UNUSED_VARIABLE,
659                     "unused variable: `%s`", v->m_name.c_str())) return false;
660         }
661     }
662
663     if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
664         if (!ir_function_pass_peephole(self)) {
665             irerror(self->m_context, "generic optimization pass broke something in `%s`", self->m_name.c_str());
666             return false;
667         }
668     }
669
670     if (OPTS_OPTIMIZATION(OPTIM_TAIL_RECURSION)) {
671         if (!ir_function_pass_tailrecursion(self)) {
672             irerror(self->m_context, "tail-recursion optimization pass broke something in `%s`", self->m_name.c_str());
673             return false;
674         }
675     }
676
677     if (!ir_function_naive_phi(self)) {
678         irerror(self->m_context, "internal error: ir_function_naive_phi failed");
679         return false;
680     }
681
682     for (auto& lp : self->m_locals) {
683         ir_value *v = lp.get();
684         if (v->m_vtype == TYPE_VECTOR ||
685             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
686         {
687             v->vectorMember(0);
688             v->vectorMember(1);
689             v->vectorMember(2);
690         }
691     }
692     for (auto& vp : self->m_values) {
693         ir_value *v = vp.get();
694         if (v->m_vtype == TYPE_VECTOR ||
695             (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
696         {
697             v->vectorMember(0);
698             v->vectorMember(1);
699             v->vectorMember(2);
700         }
701     }
702
703     ir_function_enumerate(self);
704
705     if (!ir_function_calculate_liferanges(self))
706         return false;
707     if (!ir_function_allocate_locals(self))
708         return false;
709     return true;
710 }
711
712 ir_value* ir_function_create_local(ir_function *self, const std::string& name, qc_type vtype, bool param)
713 {
714     ir_value *ve;
715
716     if (param &&
717         !self->m_locals.empty() &&
718         self->m_locals.back()->m_store != store_param)
719     {
720         irerror(self->m_context, "cannot add parameters after adding locals");
721         return nullptr;
722     }
723
724     ve = new ir_value(std::string(name), (param ? store_param : store_local), vtype);
725     if (param)
726         ve->m_locked = true;
727     self->m_locals.emplace_back(ve);
728     return ve;
729 }
730
731 /***********************************************************************
732  *IR Block
733  */
734
735 ir_block::ir_block(ir_function* owner, const std::string& name)
736 : m_owner(owner),
737   m_label(name)
738 {
739     m_context.file = "<@no context>";
740     m_context.line = 0;
741 }
742
743 ir_block::~ir_block()
744 {
745     for (auto &i : m_instr)
746         delete i;
747 }
748
749 static void ir_block_delete_quick(ir_block* self)
750 {
751     for (auto &i : self->m_instr)
752         ir_instr_delete_quick(i);
753     self->m_instr.clear();
754     delete self;
755 }
756
757 /***********************************************************************
758  *IR Instructions
759  */
760
761 ir_instr::ir_instr(lex_ctx_t ctx, ir_block* owner_, int op)
762 : m_opcode(op),
763   m_context(ctx),
764   m_owner(owner_)
765 {
766 }
767
768 ir_instr::~ir_instr()
769 {
770     // The following calls can only delete from
771     // vectors, we still want to delete this instruction
772     // so ignore the return value. Since with the warn_unused_result attribute
773     // gcc doesn't care about an explicit: (void)foo(); to ignore the result,
774     // I have to improvise here and use if(foo());
775     for (auto &it : m_phi) {
776         size_t idx;
777         if (vec_ir_instr_find(it.value->m_writes, this, &idx))
778             it.value->m_writes.erase(it.value->m_writes.begin() + idx);
779         if (vec_ir_instr_find(it.value->m_reads, this, &idx))
780             it.value->m_reads.erase(it.value->m_reads.begin() + idx);
781     }
782     for (auto &it : m_params) {
783         size_t idx;
784         if (vec_ir_instr_find(it->m_writes, this, &idx))
785             it->m_writes.erase(it->m_writes.begin() + idx);
786         if (vec_ir_instr_find(it->m_reads, this, &idx))
787             it->m_reads.erase(it->m_reads.begin() + idx);
788     }
789     (void)!ir_instr_op(this, 0, nullptr, false);
790     (void)!ir_instr_op(this, 1, nullptr, false);
791     (void)!ir_instr_op(this, 2, nullptr, false);
792 }
793
794 static void ir_instr_delete_quick(ir_instr *self)
795 {
796     self->m_phi.clear();
797     self->m_params.clear();
798     self->_m_ops[0] = nullptr;
799     self->_m_ops[1] = nullptr;
800     self->_m_ops[2] = nullptr;
801     delete self;
802 }
803
804 static bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing)
805 {
806     if (v && v->m_vtype == TYPE_NOEXPR) {
807         irerror(self->m_context, "tried to use a NOEXPR value");
808         return false;
809     }
810
811     if (self->_m_ops[op]) {
812         size_t idx;
813         if (writing && vec_ir_instr_find(self->_m_ops[op]->m_writes, self, &idx))
814             self->_m_ops[op]->m_writes.erase(self->_m_ops[op]->m_writes.begin() + idx);
815         else if (vec_ir_instr_find(self->_m_ops[op]->m_reads, self, &idx))
816             self->_m_ops[op]->m_reads.erase(self->_m_ops[op]->m_reads.begin() + idx);
817     }
818     if (v) {
819         if (writing)
820             v->m_writes.push_back(self);
821         else
822             v->m_reads.push_back(self);
823     }
824     self->_m_ops[op] = v;
825     return true;
826 }
827
828 /***********************************************************************
829  *IR Value
830  */
831
832 void ir_value::setCodeAddress(int32_t gaddr)
833 {
834     m_code.globaladdr = gaddr;
835     if (m_members[0]) m_members[0]->m_code.globaladdr = gaddr;
836     if (m_members[1]) m_members[1]->m_code.globaladdr = gaddr;
837     if (m_members[2]) m_members[2]->m_code.globaladdr = gaddr;
838 }
839
840 int32_t ir_value::codeAddress() const
841 {
842     if (m_store == store_return)
843         return OFS_RETURN + m_code.addroffset;
844     return m_code.globaladdr + m_code.addroffset;
845 }
846
847 ir_value::ir_value(std::string&& name_, store_type store_, qc_type vtype_)
848     : m_name(move(name_))
849     , m_vtype(vtype_)
850     , m_store(store_)
851 {
852     m_fieldtype = TYPE_VOID;
853     m_outtype = TYPE_VOID;
854     m_flags = 0;
855
856     m_cvq          = CV_NONE;
857     m_hasvalue     = false;
858     m_context.file = "<@no context>";
859     m_context.line = 0;
860
861     memset(&m_constval, 0, sizeof(m_constval));
862     memset(&m_code,     0, sizeof(m_code));
863
864     m_members[0] = nullptr;
865     m_members[1] = nullptr;
866     m_members[2] = nullptr;
867     m_memberof = nullptr;
868
869     m_unique_life = false;
870     m_locked = false;
871     m_callparam  = false;
872 }
873
874 ir_value::ir_value(ir_function *owner, std::string&& name, store_type storetype, qc_type vtype)
875     : ir_value(move(name), storetype, vtype)
876 {
877     ir_function_collect_value(owner, this);
878 }
879
880 ir_value::~ir_value()
881 {
882     size_t i;
883     if (m_hasvalue) {
884         if (m_vtype == TYPE_STRING)
885             mem_d((void*)m_constval.vstring);
886     }
887     if (!(m_flags & IR_FLAG_SPLIT_VECTOR)) {
888         for (i = 0; i < 3; ++i) {
889             if (m_members[i])
890                 delete m_members[i];
891         }
892     }
893 }
894
895
896 /*  helper function */
897 ir_value* ir_builder::literalFloat(float value, bool add_to_list) {
898     ir_value *v = new ir_value("#IMMEDIATE", store_global, TYPE_FLOAT);
899     v->m_flags |= IR_FLAG_ERASABLE;
900     v->m_hasvalue = true;
901     v->m_cvq = CV_CONST;
902     v->m_constval.vfloat = value;
903
904     m_globals.emplace_back(v);
905     if (add_to_list)
906         m_const_floats.emplace_back(v);
907     return v;
908 }
909
910 ir_value* ir_value::vectorMember(unsigned int member)
911 {
912     std::string name;
913     ir_value *m;
914     if (member >= 3)
915         return nullptr;
916
917     if (m_members[member])
918         return m_members[member];
919
920     if (!m_name.empty()) {
921         char member_name[3] = { '_', char('x' + member), 0 };
922         name = m_name + member_name;
923     }
924
925     if (m_vtype == TYPE_VECTOR)
926     {
927         m = new ir_value(move(name), m_store, TYPE_FLOAT);
928         if (!m)
929             return nullptr;
930         m->m_context = m_context;
931
932         m_members[member] = m;
933         m->m_code.addroffset = member;
934     }
935     else if (m_vtype == TYPE_FIELD)
936     {
937         if (m_fieldtype != TYPE_VECTOR)
938             return nullptr;
939         m = new ir_value(move(name), m_store, TYPE_FIELD);
940         if (!m)
941             return nullptr;
942         m->m_fieldtype = TYPE_FLOAT;
943         m->m_context = m_context;
944
945         m_members[member] = m;
946         m->m_code.addroffset = member;
947     }
948     else
949     {
950         irerror(m_context, "invalid member access on %s", m_name.c_str());
951         return nullptr;
952     }
953
954     m->m_memberof = this;
955     return m;
956 }
957
958 size_t ir_value::size() const {
959     if (m_vtype == TYPE_FIELD && m_fieldtype == TYPE_VECTOR)
960         return type_sizeof_[TYPE_VECTOR];
961     return type_sizeof_[m_vtype];
962 }
963
964 bool ir_value::setFloat(float f)
965 {
966     if (m_vtype != TYPE_FLOAT)
967         return false;
968     m_constval.vfloat = f;
969     m_hasvalue = true;
970     return true;
971 }
972
973 bool ir_value::setFunc(int f)
974 {
975     if (m_vtype != TYPE_FUNCTION)
976         return false;
977     m_constval.vint = f;
978     m_hasvalue = true;
979     return true;
980 }
981
982 bool ir_value::setVector(vec3_t v)
983 {
984     if (m_vtype != TYPE_VECTOR)
985         return false;
986     m_constval.vvec = v;
987     m_hasvalue = true;
988     return true;
989 }
990
991 bool ir_value::setField(ir_value *fld)
992 {
993     if (m_vtype != TYPE_FIELD)
994         return false;
995     m_constval.vpointer = fld;
996     m_hasvalue = true;
997     return true;
998 }
999
1000 bool ir_value::setString(const char *str)
1001 {
1002     if (m_vtype != TYPE_STRING)
1003         return false;
1004     m_constval.vstring = util_strdupe(str);
1005     m_hasvalue = true;
1006     return true;
1007 }
1008
1009 #if 0
1010 bool ir_value::setInt(int i)
1011 {
1012     if (m_vtype != TYPE_INTEGER)
1013         return false;
1014     m_constval.vint = i;
1015     m_hasvalue = true;
1016     return true;
1017 }
1018 #endif
1019
1020 bool ir_value::lives(size_t at)
1021 {
1022     for (auto& l : m_life) {
1023         if (l.start <= at && at <= l.end)
1024             return true;
1025         if (l.start > at) /* since it's ordered */
1026             return false;
1027     }
1028     return false;
1029 }
1030
1031 bool ir_value::insertLife(size_t idx, ir_life_entry_t e)
1032 {
1033     m_life.insert(m_life.begin() + idx, e);
1034     return true;
1035 }
1036
1037 bool ir_value::setAlive(size_t s)
1038 {
1039     size_t i;
1040     const size_t vs = m_life.size();
1041     ir_life_entry_t *life_found = nullptr;
1042     ir_life_entry_t *before = nullptr;
1043     ir_life_entry_t new_entry;
1044
1045     /* Find the first range >= s */
1046     for (i = 0; i < vs; ++i)
1047     {
1048         before = life_found;
1049         life_found = &m_life[i];
1050         if (life_found->start > s)
1051             break;
1052     }
1053     /* nothing found? append */
1054     if (i == vs) {
1055         ir_life_entry_t e;
1056         if (life_found && life_found->end+1 == s)
1057         {
1058             /* previous life range can be merged in */
1059             life_found->end++;
1060             return true;
1061         }
1062         if (life_found && life_found->end >= s)
1063             return false;
1064         e.start = e.end = s;
1065         m_life.emplace_back(e);
1066         return true;
1067     }
1068     /* found */
1069     if (before)
1070     {
1071         if (before->end + 1 == s &&
1072             life_found->start - 1 == s)
1073         {
1074             /* merge */
1075             before->end = life_found->end;
1076             m_life.erase(m_life.begin()+i);
1077             return true;
1078         }
1079         if (before->end + 1 == s)
1080         {
1081             /* extend before */
1082             before->end++;
1083             return true;
1084         }
1085         /* already contained */
1086         if (before->end >= s)
1087             return false;
1088     }
1089     /* extend */
1090     if (life_found->start - 1 == s)
1091     {
1092         life_found->start--;
1093         return true;
1094     }
1095     /* insert a new entry */
1096     new_entry.start = new_entry.end = s;
1097     return insertLife(i, new_entry);
1098 }
1099
1100 bool ir_value::mergeLife(const ir_value *other)
1101 {
1102     size_t i, myi;
1103
1104     if (other->m_life.empty())
1105         return true;
1106
1107     if (m_life.empty()) {
1108         m_life = other->m_life;
1109         return true;
1110     }
1111
1112     myi = 0;
1113     for (i = 0; i < other->m_life.size(); ++i)
1114     {
1115         const ir_life_entry_t &otherlife = other->m_life[i];
1116         while (true)
1117         {
1118             ir_life_entry_t *entry = &m_life[myi];
1119
1120             if (otherlife.end+1 < entry->start)
1121             {
1122                 /* adding an interval before entry */
1123                 if (!insertLife(myi, otherlife))
1124                     return false;
1125                 ++myi;
1126                 break;
1127             }
1128
1129             if (otherlife.start <  entry->start &&
1130                 otherlife.end+1 >= entry->start)
1131             {
1132                 /* starts earlier and overlaps */
1133                 entry->start = otherlife.start;
1134             }
1135
1136             if (otherlife.end   >  entry->end &&
1137                 otherlife.start <= entry->end+1)
1138             {
1139                 /* ends later and overlaps */
1140                 entry->end = otherlife.end;
1141             }
1142
1143             /* see if our change combines it with the next ranges */
1144             while (myi+1 < m_life.size() &&
1145                    entry->end+1 >= m_life[1+myi].start)
1146             {
1147                 /* overlaps with (myi+1) */
1148                 if (entry->end < m_life[1+myi].end)
1149                     entry->end = m_life[1+myi].end;
1150                 m_life.erase(m_life.begin() + (myi + 1));
1151                 entry = &m_life[myi];
1152             }
1153
1154             /* see if we're after the entry */
1155             if (otherlife.start > entry->end)
1156             {
1157                 ++myi;
1158                 /* append if we're at the end */
1159                 if (myi >= m_life.size()) {
1160                     m_life.emplace_back(otherlife);
1161                     break;
1162                 }
1163                 /* otherweise check the next range */
1164                 continue;
1165             }
1166             break;
1167         }
1168     }
1169     return true;
1170 }
1171
1172 static bool ir_values_overlap(const ir_value *a, const ir_value *b)
1173 {
1174     /* For any life entry in A see if it overlaps with
1175      * any life entry in B.
1176      * Note that the life entries are orderes, so we can make a
1177      * more efficient algorithm there than naively translating the
1178      * statement above.
1179      */
1180
1181     const ir_life_entry_t *la, *lb, *enda, *endb;
1182
1183     /* first of all, if either has no life range, they cannot clash */
1184     if (a->m_life.empty() || b->m_life.empty())
1185         return false;
1186
1187     la = &a->m_life.front();
1188     lb = &b->m_life.front();
1189     enda = &a->m_life.back() + 1;
1190     endb = &b->m_life.back() + 1;
1191     while (true)
1192     {
1193         /* check if the entries overlap, for that,
1194          * both must start before the other one ends.
1195          */
1196         if (la->start < lb->end &&
1197             lb->start < la->end)
1198         {
1199             return true;
1200         }
1201
1202         /* entries are ordered
1203          * one entry is earlier than the other
1204          * that earlier entry will be moved forward
1205          */
1206         if (la->start < lb->start)
1207         {
1208             /* order: A B, move A forward
1209              * check if we hit the end with A
1210              */
1211             if (++la == enda)
1212                 break;
1213         }
1214         else /* if (lb->start < la->start)  actually <= */
1215         {
1216             /* order: B A, move B forward
1217              * check if we hit the end with B
1218              */
1219             if (++lb == endb)
1220                 break;
1221         }
1222     }
1223     return false;
1224 }
1225
1226 /***********************************************************************
1227  *IR main operations
1228  */
1229
1230 static bool ir_check_unreachable(ir_block *self)
1231 {
1232     /* The IR should never have to deal with unreachable code */
1233     if (!self->m_final/* || OPTS_FLAG(ALLOW_UNREACHABLE_CODE)*/)
1234         return true;
1235     irerror(self->m_context, "unreachable statement (%s)", self->m_label.c_str());
1236     return false;
1237 }
1238
1239 bool ir_block_create_store_op(ir_block *self, lex_ctx_t ctx, int op, ir_value *target, ir_value *what)
1240 {
1241     ir_instr *in;
1242     if (!ir_check_unreachable(self))
1243         return false;
1244
1245     if (target->m_store == store_value &&
1246         (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC))
1247     {
1248         irerror(self->m_context, "cannot store to an SSA value");
1249         irerror(self->m_context, "trying to store: %s <- %s", target->m_name.c_str(), what->m_name.c_str());
1250         irerror(self->m_context, "instruction: %s", util_instr_str[op]);
1251         return false;
1252     }
1253
1254     in = new ir_instr(ctx, self, op);
1255     if (!in)
1256         return false;
1257
1258     if (!ir_instr_op(in, 0, target, (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC)) ||
1259         !ir_instr_op(in, 1, what, false))
1260     {
1261         delete in;
1262         return false;
1263     }
1264     self->m_instr.push_back(in);
1265     return true;
1266 }
1267
1268 bool ir_block_create_state_op(ir_block *self, lex_ctx_t ctx, ir_value *frame, ir_value *think)
1269 {
1270     ir_instr *in;
1271     if (!ir_check_unreachable(self))
1272         return false;
1273
1274     in = new ir_instr(ctx, self, INSTR_STATE);
1275     if (!in)
1276         return false;
1277
1278     if (!ir_instr_op(in, 0, frame, false) ||
1279         !ir_instr_op(in, 1, think, false))
1280     {
1281         delete in;
1282         return false;
1283     }
1284     self->m_instr.push_back(in);
1285     return true;
1286 }
1287
1288 static bool ir_block_create_store(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1289 {
1290     int op = 0;
1291     qc_type vtype;
1292     if (target->m_vtype == TYPE_VARIANT)
1293         vtype = what->m_vtype;
1294     else
1295         vtype = target->m_vtype;
1296
1297 #if 0
1298     if      (vtype == TYPE_FLOAT   && what->m_vtype == TYPE_INTEGER)
1299         op = INSTR_CONV_ITOF;
1300     else if (vtype == TYPE_INTEGER && what->m_vtype == TYPE_FLOAT)
1301         op = INSTR_CONV_FTOI;
1302 #endif
1303         op = type_store_instr[vtype];
1304
1305     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1306         if (op == INSTR_STORE_FLD && what->m_fieldtype == TYPE_VECTOR)
1307             op = INSTR_STORE_V;
1308     }
1309
1310     return ir_block_create_store_op(self, ctx, op, target, what);
1311 }
1312
1313 bool ir_block_create_storep(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1314 {
1315     int op = 0;
1316     qc_type vtype;
1317
1318     if (target->m_vtype != TYPE_POINTER)
1319         return false;
1320
1321     /* storing using pointer - target is a pointer, type must be
1322      * inferred from source
1323      */
1324     vtype = what->m_vtype;
1325
1326     op = type_storep_instr[vtype];
1327     if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1328         if (op == INSTR_STOREP_FLD && what->m_fieldtype == TYPE_VECTOR)
1329             op = INSTR_STOREP_V;
1330     }
1331
1332     return ir_block_create_store_op(self, ctx, op, target, what);
1333 }
1334
1335 bool ir_block_create_return(ir_block *self, lex_ctx_t ctx, ir_value *v)
1336 {
1337     ir_instr *in;
1338     if (!ir_check_unreachable(self))
1339         return false;
1340
1341     self->m_final = true;
1342
1343     self->m_is_return = true;
1344     in = new ir_instr(ctx, self, INSTR_RETURN);
1345     if (!in)
1346         return false;
1347
1348     if (v && !ir_instr_op(in, 0, v, false)) {
1349         delete in;
1350         return false;
1351     }
1352
1353     self->m_instr.push_back(in);
1354     return true;
1355 }
1356
1357 bool ir_block_create_if(ir_block *self, lex_ctx_t ctx, ir_value *v,
1358                         ir_block *ontrue, ir_block *onfalse)
1359 {
1360     ir_instr *in;
1361     if (!ir_check_unreachable(self))
1362         return false;
1363     self->m_final = true;
1364     /*in = new ir_instr(ctx, self, (v->m_vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/
1365     in = new ir_instr(ctx, self, VINSTR_COND);
1366     if (!in)
1367         return false;
1368
1369     if (!ir_instr_op(in, 0, v, false)) {
1370         delete in;
1371         return false;
1372     }
1373
1374     in->m_bops[0] = ontrue;
1375     in->m_bops[1] = onfalse;
1376
1377     self->m_instr.push_back(in);
1378
1379     self->m_exits.push_back(ontrue);
1380     self->m_exits.push_back(onfalse);
1381     ontrue->m_entries.push_back(self);
1382     onfalse->m_entries.push_back(self);
1383     return true;
1384 }
1385
1386 bool ir_block_create_jump(ir_block *self, lex_ctx_t ctx, ir_block *to)
1387 {
1388     ir_instr *in;
1389     if (!ir_check_unreachable(self))
1390         return false;
1391     self->m_final = true;
1392     in = new ir_instr(ctx, self, VINSTR_JUMP);
1393     if (!in)
1394         return false;
1395
1396     in->m_bops[0] = to;
1397     self->m_instr.push_back(in);
1398
1399     self->m_exits.push_back(to);
1400     to->m_entries.push_back(self);
1401     return true;
1402 }
1403
1404 bool ir_block_create_goto(ir_block *self, lex_ctx_t ctx, ir_block *to)
1405 {
1406     self->m_owner->m_flags |= IR_FLAG_HAS_GOTO;
1407     return ir_block_create_jump(self, ctx, to);
1408 }
1409
1410 ir_instr* ir_block_create_phi(ir_block *self, lex_ctx_t ctx, const char *label, qc_type ot)
1411 {
1412     ir_value *out;
1413     ir_instr *in;
1414     if (!ir_check_unreachable(self))
1415         return nullptr;
1416     in = new ir_instr(ctx, self, VINSTR_PHI);
1417     if (!in)
1418         return nullptr;
1419     out = new ir_value(self->m_owner, label ? label : "", store_value, ot);
1420     if (!out) {
1421         delete in;
1422         return nullptr;
1423     }
1424     if (!ir_instr_op(in, 0, out, true)) {
1425         delete in;
1426         return nullptr;
1427     }
1428     self->m_instr.push_back(in);
1429     return in;
1430 }
1431
1432 ir_value* ir_phi_value(ir_instr *self)
1433 {
1434     return self->_m_ops[0];
1435 }
1436
1437 void ir_phi_add(ir_instr* self, ir_block *b, ir_value *v)
1438 {
1439     ir_phi_entry_t pe;
1440
1441     if (!vec_ir_block_find(self->m_owner->m_entries, b, nullptr)) {
1442         // Must not be possible to cause this, otherwise the AST
1443         // is doing something wrong.
1444         irerror(self->m_context, "Invalid entry block for PHI");
1445         exit(EXIT_FAILURE);
1446     }
1447
1448     pe.value = v;
1449     pe.from = b;
1450     v->m_reads.push_back(self);
1451     self->m_phi.push_back(pe);
1452 }
1453
1454 /* call related code */
1455 ir_instr* ir_block_create_call(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *func, bool noreturn)
1456 {
1457     ir_value *out;
1458     ir_instr *in;
1459     if (!ir_check_unreachable(self))
1460         return nullptr;
1461     in = new ir_instr(ctx, self, (noreturn ? VINSTR_NRCALL : INSTR_CALL0));
1462     if (!in)
1463         return nullptr;
1464     if (noreturn) {
1465         self->m_final = true;
1466         self->m_is_return = true;
1467     }
1468     out = new ir_value(self->m_owner, label ? label : "", (func->m_outtype == TYPE_VOID) ? store_return : store_value, func->m_outtype);
1469     if (!out) {
1470         delete in;
1471         return nullptr;
1472     }
1473     if (!ir_instr_op(in, 0, out, true) ||
1474         !ir_instr_op(in, 1, func, false))
1475     {
1476         delete in;
1477         delete out;
1478         return nullptr;
1479     }
1480     self->m_instr.push_back(in);
1481     /*
1482     if (noreturn) {
1483         if (!ir_block_create_return(self, ctx, nullptr)) {
1484             compile_error(ctx, "internal error: failed to generate dummy-return instruction");
1485             delete in;
1486             return nullptr;
1487         }
1488     }
1489     */
1490     return in;
1491 }
1492
1493 ir_value* ir_call_value(ir_instr *self)
1494 {
1495     return self->_m_ops[0];
1496 }
1497
1498 void ir_call_param(ir_instr* self, ir_value *v)
1499 {
1500     self->m_params.push_back(v);
1501     v->m_reads.push_back(self);
1502 }
1503
1504 /* binary op related code */
1505
1506 ir_value* ir_block_create_binop(ir_block *self, lex_ctx_t ctx,
1507                                 const char *label, int opcode,
1508                                 ir_value *left, ir_value *right)
1509 {
1510     qc_type ot = TYPE_VOID;
1511     switch (opcode) {
1512         case INSTR_ADD_F:
1513         case INSTR_SUB_F:
1514         case INSTR_DIV_F:
1515         case INSTR_MUL_F:
1516         case INSTR_MUL_V:
1517         case INSTR_AND:
1518         case INSTR_OR:
1519 #if 0
1520         case INSTR_AND_I:
1521         case INSTR_AND_IF:
1522         case INSTR_AND_FI:
1523         case INSTR_OR_I:
1524         case INSTR_OR_IF:
1525         case INSTR_OR_FI:
1526 #endif
1527         case INSTR_BITAND:
1528         case INSTR_BITOR:
1529         case VINSTR_BITXOR:
1530 #if 0
1531         case INSTR_SUB_S: /* -- offset of string as float */
1532         case INSTR_MUL_IF:
1533         case INSTR_MUL_FI:
1534         case INSTR_DIV_IF:
1535         case INSTR_DIV_FI:
1536         case INSTR_BITOR_IF:
1537         case INSTR_BITOR_FI:
1538         case INSTR_BITAND_FI:
1539         case INSTR_BITAND_IF:
1540         case INSTR_EQ_I:
1541         case INSTR_NE_I:
1542 #endif
1543             ot = TYPE_FLOAT;
1544             break;
1545 #if 0
1546         case INSTR_ADD_I:
1547         case INSTR_ADD_IF:
1548         case INSTR_ADD_FI:
1549         case INSTR_SUB_I:
1550         case INSTR_SUB_FI:
1551         case INSTR_SUB_IF:
1552         case INSTR_MUL_I:
1553         case INSTR_DIV_I:
1554         case INSTR_BITAND_I:
1555         case INSTR_BITOR_I:
1556         case INSTR_XOR_I:
1557         case INSTR_RSHIFT_I:
1558         case INSTR_LSHIFT_I:
1559             ot = TYPE_INTEGER;
1560             break;
1561 #endif
1562         case INSTR_ADD_V:
1563         case INSTR_SUB_V:
1564         case INSTR_MUL_VF:
1565         case INSTR_MUL_FV:
1566         case VINSTR_BITAND_V:
1567         case VINSTR_BITOR_V:
1568         case VINSTR_BITXOR_V:
1569         case VINSTR_BITAND_VF:
1570         case VINSTR_BITOR_VF:
1571         case VINSTR_BITXOR_VF:
1572         case VINSTR_CROSS:
1573 #if 0
1574         case INSTR_DIV_VF:
1575         case INSTR_MUL_IV:
1576         case INSTR_MUL_VI:
1577 #endif
1578             ot = TYPE_VECTOR;
1579             break;
1580 #if 0
1581         case INSTR_ADD_SF:
1582             ot = TYPE_POINTER;
1583             break;
1584 #endif
1585     /*
1586      * after the following default case, the value of opcode can never
1587      * be 1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65
1588      */
1589         default:
1590             /* ranges: */
1591             /* boolean operations result in floats */
1592
1593             /*
1594              * opcode >= 10 takes true branch opcode is at least 10
1595              * opcode <= 23 takes false branch opcode is at least 24
1596              */
1597             if (opcode >= INSTR_EQ_F && opcode <= INSTR_GT)
1598                 ot = TYPE_FLOAT;
1599
1600             /*
1601              * At condition "opcode <= 23", the value of "opcode" must be
1602              * at least 24.
1603              * At condition "opcode <= 23", the value of "opcode" cannot be
1604              * equal to any of {1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65}.
1605              * The condition "opcode <= 23" cannot be true.
1606              *
1607              * Thus ot=2 (TYPE_FLOAT) can never be true
1608              */
1609 #if 0
1610             else if (opcode >= INSTR_LE && opcode <= INSTR_GT)
1611                 ot = TYPE_FLOAT;
1612             else if (opcode >= INSTR_LE_I && opcode <= INSTR_EQ_FI)
1613                 ot = TYPE_FLOAT;
1614 #endif
1615             break;
1616     };
1617     if (ot == TYPE_VOID) {
1618         /* The AST or parser were supposed to check this! */
1619         return nullptr;
1620     }
1621
1622     return ir_block_create_general_instr(self, ctx, label, opcode, left, right, ot);
1623 }
1624
1625 ir_value* ir_block_create_unary(ir_block *self, lex_ctx_t ctx,
1626                                 const char *label, int opcode,
1627                                 ir_value *operand)
1628 {
1629     qc_type ot = TYPE_FLOAT;
1630     switch (opcode) {
1631         case INSTR_NOT_F:
1632         case INSTR_NOT_V:
1633         case INSTR_NOT_S:
1634         case INSTR_NOT_ENT:
1635         case INSTR_NOT_FNC: /*
1636         case INSTR_NOT_I:   */
1637             ot = TYPE_FLOAT;
1638             break;
1639
1640         /*
1641          * Negation for virtual instructions is emulated with 0-value. Thankfully
1642          * the operand for 0 already exists so we just source it from here.
1643          */
1644         case VINSTR_NEG_F:
1645             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_F, nullptr, operand, ot);
1646         case VINSTR_NEG_V:
1647             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_V, self->m_owner->m_owner->m_nil, operand, TYPE_VECTOR);
1648
1649         default:
1650             ot = operand->m_vtype;
1651             break;
1652     };
1653     if (ot == TYPE_VOID) {
1654         /* The AST or parser were supposed to check this! */
1655         return nullptr;
1656     }
1657
1658     /* let's use the general instruction creator and pass nullptr for OPB */
1659     return ir_block_create_general_instr(self, ctx, label, opcode, operand, nullptr, ot);
1660 }
1661
1662 static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t ctx, const char *label,
1663                                         int op, ir_value *a, ir_value *b, qc_type outype)
1664 {
1665     ir_instr *instr;
1666     ir_value *out;
1667
1668     out = new ir_value(self->m_owner, label ? label : "", store_value, outype);
1669     if (!out)
1670         return nullptr;
1671
1672     instr = new ir_instr(ctx, self, op);
1673     if (!instr) {
1674         return nullptr;
1675     }
1676
1677     if (!ir_instr_op(instr, 0, out, true) ||
1678         !ir_instr_op(instr, 1, a, false) ||
1679         !ir_instr_op(instr, 2, b, false) )
1680     {
1681         goto on_error;
1682     }
1683
1684     self->m_instr.push_back(instr);
1685
1686     return out;
1687 on_error:
1688     delete instr;
1689     return nullptr;
1690 }
1691
1692 ir_value* ir_block_create_fieldaddress(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field)
1693 {
1694     ir_value *v;
1695
1696     /* Support for various pointer types todo if so desired */
1697     if (ent->m_vtype != TYPE_ENTITY)
1698         return nullptr;
1699
1700     if (field->m_vtype != TYPE_FIELD)
1701         return nullptr;
1702
1703     v = ir_block_create_general_instr(self, ctx, label, INSTR_ADDRESS, ent, field, TYPE_POINTER);
1704     v->m_fieldtype = field->m_fieldtype;
1705     return v;
1706 }
1707
1708 ir_value* ir_block_create_load_from_ent(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field, qc_type outype)
1709 {
1710     int op;
1711     if (ent->m_vtype != TYPE_ENTITY)
1712         return nullptr;
1713
1714     /* at some point we could redirect for TYPE_POINTER... but that could lead to carelessness */
1715     if (field->m_vtype != TYPE_FIELD)
1716         return nullptr;
1717
1718     switch (outype)
1719     {
1720         case TYPE_FLOAT:    op = INSTR_LOAD_F;   break;
1721         case TYPE_VECTOR:   op = INSTR_LOAD_V;   break;
1722         case TYPE_STRING:   op = INSTR_LOAD_S;   break;
1723         case TYPE_FIELD:    op = INSTR_LOAD_FLD; break;
1724         case TYPE_ENTITY:   op = INSTR_LOAD_ENT; break;
1725         case TYPE_FUNCTION: op = INSTR_LOAD_FNC; break;
1726 #if 0
1727         case TYPE_POINTER: op = INSTR_LOAD_I;   break;
1728         case TYPE_INTEGER: op = INSTR_LOAD_I;   break;
1729 #endif
1730         default:
1731             irerror(self->m_context, "invalid type for ir_block_create_load_from_ent: %s", type_name[outype]);
1732             return nullptr;
1733     }
1734
1735     return ir_block_create_general_instr(self, ctx, label, op, ent, field, outype);
1736 }
1737
1738 /* PHI resolving breaks the SSA, and must thus be the last
1739  * step before life-range calculation.
1740  */
1741
1742 static bool ir_block_naive_phi(ir_block *self);
1743 bool ir_function_naive_phi(ir_function *self)
1744 {
1745     for (auto& b : self->m_blocks)
1746         if (!ir_block_naive_phi(b.get()))
1747             return false;
1748     return true;
1749 }
1750
1751 static bool ir_block_naive_phi(ir_block *self)
1752 {
1753     size_t i;
1754     /* FIXME: optionally, create_phi can add the phis
1755      * to a list so we don't need to loop through blocks
1756      * - anyway: "don't optimize YET"
1757      */
1758     for (i = 0; i < self->m_instr.size(); ++i)
1759     {
1760         ir_instr *instr = self->m_instr[i];
1761         if (instr->m_opcode != VINSTR_PHI)
1762             continue;
1763
1764         self->m_instr.erase(self->m_instr.begin()+i);
1765         --i; /* NOTE: i+1 below */
1766
1767         for (auto &it : instr->m_phi) {
1768             ir_value *v = it.value;
1769             ir_block *b = it.from;
1770             if (v->m_store == store_value && v->m_reads.size() == 1 && v->m_writes.size() == 1) {
1771                 /* replace the value */
1772                 if (!ir_instr_op(v->m_writes[0], 0, instr->_m_ops[0], true))
1773                     return false;
1774             } else {
1775                 /* force a move instruction */
1776                 ir_instr *prevjump = b->m_instr.back();
1777                 b->m_instr.pop_back();
1778                 b->m_final = false;
1779                 instr->_m_ops[0]->m_store = store_global;
1780                 if (!ir_block_create_store(b, instr->m_context, instr->_m_ops[0], v))
1781                     return false;
1782                 instr->_m_ops[0]->m_store = store_value;
1783                 b->m_instr.push_back(prevjump);
1784                 b->m_final = true;
1785             }
1786         }
1787         delete instr;
1788     }
1789     return true;
1790 }
1791
1792 /***********************************************************************
1793  *IR Temp allocation code
1794  * Propagating value life ranges by walking through the function backwards
1795  * until no more changes are made.
1796  * In theory this should happen once more than once for every nested loop
1797  * level.
1798  * Though this implementation might run an additional time for if nests.
1799  */
1800
1801 /* Enumerate instructions used by value's life-ranges
1802  */
1803 static void ir_block_enumerate(ir_block *self, size_t *_eid)
1804 {
1805     size_t eid = *_eid;
1806     for (auto &i : self->m_instr)
1807         i->m_eid = eid++;
1808     *_eid = eid;
1809 }
1810
1811 /* Enumerate blocks and instructions.
1812  * The block-enumeration is unordered!
1813  * We do not really use the block enumreation, however
1814  * the instruction enumeration is important for life-ranges.
1815  */
1816 void ir_function_enumerate(ir_function *self)
1817 {
1818     size_t instruction_id = 0;
1819     size_t block_eid = 0;
1820     for (auto& block : self->m_blocks)
1821     {
1822         /* each block now gets an additional "entry" instruction id
1823          * we can use to avoid point-life issues
1824          */
1825         block->m_entry_id = instruction_id;
1826         block->m_eid      = block_eid;
1827         ++instruction_id;
1828         ++block_eid;
1829
1830         ir_block_enumerate(block.get(), &instruction_id);
1831     }
1832 }
1833
1834 /* Local-value allocator
1835  * After finishing creating the liferange of all values used in a function
1836  * we can allocate their global-positions.
1837  * This is the counterpart to register-allocation in register machines.
1838  */
1839 struct function_allocator {
1840     std::vector<std::unique_ptr<ir_value>> locals;
1841     std::vector<size_t> sizes;
1842     std::vector<size_t> positions;
1843     std::vector<bool> unique;
1844 };
1845
1846 static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
1847 {
1848     ir_value *slot;
1849     size_t vsize = var->size();
1850
1851     var->m_code.local = alloc->locals.size();
1852
1853     slot = new ir_value("reg", store_global, var->m_vtype);
1854     if (!slot)
1855         return false;
1856
1857     if (!slot->mergeLife(var))
1858         goto localerror;
1859
1860     alloc->locals.emplace_back(slot);
1861     alloc->sizes.push_back(vsize);
1862     alloc->unique.push_back(var->m_unique_life);
1863
1864     return true;
1865
1866 localerror:
1867     delete slot;
1868     return false;
1869 }
1870
1871 static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
1872 {
1873     size_t a;
1874
1875     if (v->m_unique_life)
1876         return function_allocator_alloc(alloc, v);
1877
1878     for (a = 0; a < alloc->locals.size(); ++a)
1879     {
1880         /* if it's reserved for a unique liferange: skip */
1881         if (alloc->unique[a])
1882             continue;
1883
1884         ir_value *slot = alloc->locals[a].get();
1885
1886         /* never resize parameters
1887          * will be required later when overlapping temps + locals
1888          */
1889         if (a < self->m_params.size() &&
1890             alloc->sizes[a] < v->size())
1891         {
1892             continue;
1893         }
1894
1895         if (ir_values_overlap(v, slot))
1896             continue;
1897
1898         if (!slot->mergeLife(v))
1899             return false;
1900
1901         /* adjust size for this slot */
1902         if (alloc->sizes[a] < v->size())
1903             alloc->sizes[a] = v->size();
1904
1905         v->m_code.local = a;
1906         return true;
1907     }
1908     if (a >= alloc->locals.size()) {
1909         if (!function_allocator_alloc(alloc, v))
1910             return false;
1911     }
1912     return true;
1913 }
1914
1915 bool ir_function_allocate_locals(ir_function *self)
1916 {
1917     size_t pos;
1918     bool   opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
1919
1920     function_allocator lockalloc, globalloc;
1921
1922     if (self->m_locals.empty() && self->m_values.empty())
1923         return true;
1924
1925     size_t i;
1926     for (i = 0; i < self->m_locals.size(); ++i)
1927     {
1928         ir_value *v = self->m_locals[i].get();
1929         if ((self->m_flags & IR_FLAG_MASK_NO_LOCAL_TEMPS) || !OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) {
1930             v->m_locked      = true;
1931             v->m_unique_life = true;
1932         }
1933         else if (i >= self->m_params.size())
1934             break;
1935         else
1936             v->m_locked = true; /* lock parameters locals */
1937         if (!function_allocator_alloc((v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1938             return false;
1939     }
1940     for (; i < self->m_locals.size(); ++i)
1941     {
1942         ir_value *v = self->m_locals[i].get();
1943         if (v->m_life.empty())
1944             continue;
1945         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1946             return false;
1947     }
1948
1949     /* Allocate a slot for any value that still exists */
1950     for (i = 0; i < self->m_values.size(); ++i)
1951     {
1952         ir_value *v = self->m_values[i].get();
1953
1954         if (v->m_life.empty())
1955             continue;
1956
1957         /* CALL optimization:
1958          * If the value is a parameter-temp: 1 write, 1 read from a CALL
1959          * and it's not "locked", write it to the OFS_PARM directly.
1960          */
1961         if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->m_locked && !v->m_unique_life) {
1962             if (v->m_reads.size() == 1 && v->m_writes.size() == 1 &&
1963                 (v->m_reads[0]->m_opcode == VINSTR_NRCALL ||
1964                  (v->m_reads[0]->m_opcode >= INSTR_CALL0 && v->m_reads[0]->m_opcode <= INSTR_CALL8)
1965                 )
1966                )
1967             {
1968                 size_t param;
1969                 ir_instr *call = v->m_reads[0];
1970                 if (!vec_ir_value_find(call->m_params, v, &param)) {
1971                     irerror(call->m_context, "internal error: unlocked parameter %s not found", v->m_name.c_str());
1972                     return false;
1973                 }
1974                 ++opts_optimizationcount[OPTIM_CALL_STORES];
1975                 v->m_callparam = true;
1976                 if (param < 8)
1977                     v->setCodeAddress(OFS_PARM0 + 3*param);
1978                 else {
1979                     size_t nprotos = self->m_owner->m_extparam_protos.size();
1980                     ir_value *ep;
1981                     param -= 8;
1982                     if (nprotos > param)
1983                         ep = self->m_owner->m_extparam_protos[param].get();
1984                     else
1985                     {
1986                         ep = self->m_owner->generateExtparamProto();
1987                         while (++nprotos <= param)
1988                             ep = self->m_owner->generateExtparamProto();
1989                     }
1990                     ir_instr_op(v->m_writes[0], 0, ep, true);
1991                     call->m_params[param+8] = ep;
1992                 }
1993                 continue;
1994             }
1995             if (v->m_writes.size() == 1 && v->m_writes[0]->m_opcode == INSTR_CALL0) {
1996                 v->m_store = store_return;
1997                 if (v->m_members[0]) v->m_members[0]->m_store = store_return;
1998                 if (v->m_members[1]) v->m_members[1]->m_store = store_return;
1999                 if (v->m_members[2]) v->m_members[2]->m_store = store_return;
2000                 ++opts_optimizationcount[OPTIM_CALL_STORES];
2001                 continue;
2002             }
2003         }
2004
2005         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
2006             return false;
2007     }
2008
2009     if (lockalloc.sizes.empty() && globalloc.sizes.empty())
2010         return true;
2011
2012     lockalloc.positions.push_back(0);
2013     globalloc.positions.push_back(0);
2014
2015     /* Adjust slot positions based on sizes */
2016     if (!lockalloc.sizes.empty()) {
2017         pos = (lockalloc.sizes.size() ? lockalloc.positions[0] : 0);
2018         for (i = 1; i < lockalloc.sizes.size(); ++i)
2019         {
2020             pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
2021             lockalloc.positions.push_back(pos);
2022         }
2023         self->m_allocated_locals = pos + lockalloc.sizes.back();
2024     }
2025     if (!globalloc.sizes.empty()) {
2026         pos = (globalloc.sizes.size() ? globalloc.positions[0] : 0);
2027         for (i = 1; i < globalloc.sizes.size(); ++i)
2028         {
2029             pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
2030             globalloc.positions.push_back(pos);
2031         }
2032         self->m_globaltemps = pos + globalloc.sizes.back();
2033     }
2034
2035     /* Locals need to know their new position */
2036     for (auto& local : self->m_locals) {
2037         if (local->m_locked || !opt_gt)
2038             local->m_code.local = lockalloc.positions[local->m_code.local];
2039         else
2040             local->m_code.local = globalloc.positions[local->m_code.local];
2041     }
2042     /* Take over the actual slot positions on values */
2043     for (auto& value : self->m_values) {
2044         if (value->m_locked || !opt_gt)
2045             value->m_code.local = lockalloc.positions[value->m_code.local];
2046         else
2047             value->m_code.local = globalloc.positions[value->m_code.local];
2048     }
2049
2050     return true;
2051 }
2052
2053 /* Get information about which operand
2054  * is read from, or written to.
2055  */
2056 static void ir_op_read_write(int op, size_t *read, size_t *write)
2057 {
2058     switch (op)
2059     {
2060     case VINSTR_JUMP:
2061     case INSTR_GOTO:
2062         *write = 0;
2063         *read = 0;
2064         break;
2065     case INSTR_IF:
2066     case INSTR_IFNOT:
2067 #if 0
2068     case INSTR_IF_S:
2069     case INSTR_IFNOT_S:
2070 #endif
2071     case INSTR_RETURN:
2072     case VINSTR_COND:
2073         *write = 0;
2074         *read = 1;
2075         break;
2076     case INSTR_STOREP_F:
2077     case INSTR_STOREP_V:
2078     case INSTR_STOREP_S:
2079     case INSTR_STOREP_ENT:
2080     case INSTR_STOREP_FLD:
2081     case INSTR_STOREP_FNC:
2082         *write = 0;
2083         *read  = 7;
2084         break;
2085     default:
2086         *write = 1;
2087         *read = 6;
2088         break;
2089     };
2090 }
2091
2092 static bool ir_block_living_add_instr(ir_block *self, size_t eid) {
2093     bool changed = false;
2094     for (auto &it : self->m_living)
2095         if (it->setAlive(eid))
2096             changed = true;
2097     return changed;
2098 }
2099
2100 static bool ir_block_living_lock(ir_block *self) {
2101     bool changed = false;
2102     for (auto &it : self->m_living) {
2103         if (it->m_locked)
2104             continue;
2105         it->m_locked = true;
2106         changed = true;
2107     }
2108     return changed;
2109 }
2110
2111 static bool ir_block_life_propagate(ir_block *self, bool *changed)
2112 {
2113     ir_instr *instr;
2114     ir_value *value;
2115     size_t i, o, mem;
2116     // bitmasks which operands are read from or written to
2117     size_t read, write;
2118
2119     self->m_living.clear();
2120
2121     for (auto &prev : self->m_exits) {
2122         for (auto &it : prev->m_living)
2123             if (!vec_ir_value_find(self->m_living, it, nullptr))
2124                 self->m_living.push_back(it);
2125     }
2126
2127     i = self->m_instr.size();
2128     while (i)
2129     { --i;
2130         instr = self->m_instr[i];
2131
2132         /* See which operands are read and write operands */
2133         ir_op_read_write(instr->m_opcode, &read, &write);
2134
2135         /* Go through the 3 main operands
2136          * writes first, then reads
2137          */
2138         for (o = 0; o < 3; ++o)
2139         {
2140             if (!instr->_m_ops[o]) /* no such operand */
2141                 continue;
2142
2143             value = instr->_m_ops[o];
2144
2145             /* We only care about locals */
2146             /* we also calculate parameter liferanges so that locals
2147              * can take up parameter slots */
2148             if (value->m_store != store_value &&
2149                 value->m_store != store_local &&
2150                 value->m_store != store_param)
2151                 continue;
2152
2153             /* write operands */
2154             /* When we write to a local, we consider it "dead" for the
2155              * remaining upper part of the function, since in SSA a value
2156              * can only be written once (== created)
2157              */
2158             if (write & (1<<o))
2159             {
2160                 size_t idx;
2161                 bool in_living = vec_ir_value_find(self->m_living, value, &idx);
2162                 if (!in_living)
2163                 {
2164                     /* If the value isn't alive it hasn't been read before... */
2165                     /* TODO: See if the warning can be emitted during parsing or AST processing
2166                      * otherwise have warning printed here.
2167                      * IF printing a warning here: include filecontext_t,
2168                      * and make sure it's only printed once
2169                      * since this function is run multiple times.
2170                      */
2171                     /* con_err( "Value only written %s\n", value->m_name); */
2172                     if (value->setAlive(instr->m_eid))
2173                         *changed = true;
2174                 } else {
2175                     /* since 'living' won't contain it
2176                      * anymore, merge the value, since
2177                      * (A) doesn't.
2178                      */
2179                     if (value->setAlive(instr->m_eid))
2180                         *changed = true;
2181                     // Then remove
2182                     self->m_living.erase(self->m_living.begin() + idx);
2183                 }
2184                 /* Removing a vector removes all members */
2185                 for (mem = 0; mem < 3; ++mem) {
2186                     if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], &idx)) {
2187                         if (value->m_members[mem]->setAlive(instr->m_eid))
2188                             *changed = true;
2189                         self->m_living.erase(self->m_living.begin() + idx);
2190                     }
2191                 }
2192                 /* Removing the last member removes the vector */
2193                 if (value->m_memberof) {
2194                     value = value->m_memberof;
2195                     for (mem = 0; mem < 3; ++mem) {
2196                         if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2197                             break;
2198                     }
2199                     if (mem == 3 && vec_ir_value_find(self->m_living, value, &idx)) {
2200                         if (value->setAlive(instr->m_eid))
2201                             *changed = true;
2202                         self->m_living.erase(self->m_living.begin() + idx);
2203                     }
2204                 }
2205             }
2206         }
2207
2208         /* These operations need a special case as they can break when using
2209          * same source and destination operand otherwise, as the engine may
2210          * read the source multiple times. */
2211         if (instr->m_opcode == INSTR_MUL_VF ||
2212             instr->m_opcode == VINSTR_BITAND_VF ||
2213             instr->m_opcode == VINSTR_BITOR_VF ||
2214             instr->m_opcode == VINSTR_BITXOR ||
2215             instr->m_opcode == VINSTR_BITXOR_VF ||
2216             instr->m_opcode == VINSTR_BITXOR_V ||
2217             instr->m_opcode == VINSTR_CROSS)
2218         {
2219             value = instr->_m_ops[2];
2220             /* the float source will get an additional lifetime */
2221             if (value->setAlive(instr->m_eid+1))
2222                 *changed = true;
2223             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2224                 *changed = true;
2225         }
2226
2227         if (instr->m_opcode == INSTR_MUL_FV ||
2228             instr->m_opcode == INSTR_LOAD_V ||
2229             instr->m_opcode == VINSTR_BITXOR ||
2230             instr->m_opcode == VINSTR_BITXOR_VF ||
2231             instr->m_opcode == VINSTR_BITXOR_V ||
2232             instr->m_opcode == VINSTR_CROSS)
2233         {
2234             value = instr->_m_ops[1];
2235             /* the float source will get an additional lifetime */
2236             if (value->setAlive(instr->m_eid+1))
2237                 *changed = true;
2238             if (value->m_memberof && value->m_memberof->setAlive(instr->m_eid+1))
2239                 *changed = true;
2240         }
2241
2242         for (o = 0; o < 3; ++o)
2243         {
2244             if (!instr->_m_ops[o]) /* no such operand */
2245                 continue;
2246
2247             value = instr->_m_ops[o];
2248
2249             /* We only care about locals */
2250             /* we also calculate parameter liferanges so that locals
2251              * can take up parameter slots */
2252             if (value->m_store != store_value &&
2253                 value->m_store != store_local &&
2254                 value->m_store != store_param)
2255                 continue;
2256
2257             /* read operands */
2258             if (read & (1<<o))
2259             {
2260                 if (!vec_ir_value_find(self->m_living, value, nullptr))
2261                     self->m_living.push_back(value);
2262                 /* reading adds the full vector */
2263                 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2264                     self->m_living.push_back(value->m_memberof);
2265                 for (mem = 0; mem < 3; ++mem) {
2266                     if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2267                         self->m_living.push_back(value->m_members[mem]);
2268                 }
2269             }
2270         }
2271         /* PHI operands are always read operands */
2272         for (auto &it : instr->m_phi) {
2273             value = it.value;
2274             if (!vec_ir_value_find(self->m_living, value, nullptr))
2275                 self->m_living.push_back(value);
2276             /* reading adds the full vector */
2277             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2278                 self->m_living.push_back(value->m_memberof);
2279             for (mem = 0; mem < 3; ++mem) {
2280                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2281                     self->m_living.push_back(value->m_members[mem]);
2282             }
2283         }
2284
2285         /* on a call, all these values must be "locked" */
2286         if (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8) {
2287             if (ir_block_living_lock(self))
2288                 *changed = true;
2289         }
2290         /* call params are read operands too */
2291         for (auto &it : instr->m_params) {
2292             value = it;
2293             if (!vec_ir_value_find(self->m_living, value, nullptr))
2294                 self->m_living.push_back(value);
2295             /* reading adds the full vector */
2296             if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2297                 self->m_living.push_back(value->m_memberof);
2298             for (mem = 0; mem < 3; ++mem) {
2299                 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2300                     self->m_living.push_back(value->m_members[mem]);
2301             }
2302         }
2303
2304         /* (A) */
2305         if (ir_block_living_add_instr(self, instr->m_eid))
2306             *changed = true;
2307     }
2308     /* the "entry" instruction ID */
2309     if (ir_block_living_add_instr(self, self->m_entry_id))
2310         *changed = true;
2311
2312     return true;
2313 }
2314
2315 bool ir_function_calculate_liferanges(ir_function *self)
2316 {
2317     /* parameters live at 0 */
2318     for (size_t i = 0; i < self->m_params.size(); ++i)
2319         if (!self->m_locals[i].get()->setAlive(0))
2320             compile_error(self->m_context, "internal error: failed value-life merging");
2321
2322     bool changed;
2323     do {
2324         self->m_run_id++;
2325         changed = false;
2326         for (auto i = self->m_blocks.rbegin(); i != self->m_blocks.rend(); ++i)
2327             ir_block_life_propagate(i->get(), &changed);
2328     } while (changed);
2329
2330     if (self->m_blocks.size()) {
2331         ir_block *block = self->m_blocks[0].get();
2332         for (auto &it : block->m_living) {
2333             ir_value *v = it;
2334             if (v->m_store != store_local)
2335                 continue;
2336             if (v->m_vtype == TYPE_VECTOR)
2337                 continue;
2338             self->m_flags |= IR_FLAG_HAS_UNINITIALIZED;
2339             /* find the instruction reading from it */
2340             size_t s = 0;
2341             for (; s < v->m_reads.size(); ++s) {
2342                 if (v->m_reads[s]->m_eid == v->m_life[0].end)
2343                     break;
2344             }
2345             if (s < v->m_reads.size()) {
2346                 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2347                               "variable `%s` may be used uninitialized in this function\n"
2348                               " -> %s:%i",
2349                               v->m_name.c_str(),
2350                               v->m_reads[s]->m_context.file, v->m_reads[s]->m_context.line)
2351                    )
2352                 {
2353                     return false;
2354                 }
2355                 continue;
2356             }
2357             if (v->m_memberof) {
2358                 ir_value *vec = v->m_memberof;
2359                 for (s = 0; s < vec->m_reads.size(); ++s) {
2360                     if (vec->m_reads[s]->m_eid == v->m_life[0].end)
2361                         break;
2362                 }
2363                 if (s < vec->m_reads.size()) {
2364                     if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2365                                   "variable `%s` may be used uninitialized in this function\n"
2366                                   " -> %s:%i",
2367                                   v->m_name.c_str(),
2368                                   vec->m_reads[s]->m_context.file, vec->m_reads[s]->m_context.line)
2369                        )
2370                     {
2371                         return false;
2372                     }
2373                     continue;
2374                 }
2375             }
2376             if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2377                           "variable `%s` may be used uninitialized in this function", v->m_name.c_str()))
2378             {
2379                 return false;
2380             }
2381         }
2382     }
2383     return true;
2384 }
2385
2386 /***********************************************************************
2387  *IR Code-Generation
2388  *
2389  * Since the IR has the convention of putting 'write' operands
2390  * at the beginning, we have to rotate the operands of instructions
2391  * properly in order to generate valid QCVM code.
2392  *
2393  * Having destinations at a fixed position is more convenient. In QC
2394  * this is *mostly* OPC,  but FTE adds at least 2 instructions which
2395  * read from from OPA,  and store to OPB rather than OPC.   Which is
2396  * partially the reason why the implementation of these instructions
2397  * in darkplaces has been delayed for so long.
2398  *
2399  * Breaking conventions is annoying...
2400  */
2401 static bool gen_global_field(code_t *code, ir_value *global)
2402 {
2403     if (global->m_hasvalue)
2404     {
2405         ir_value *fld = global->m_constval.vpointer;
2406         if (!fld) {
2407             irerror(global->m_context, "Invalid field constant with no field: %s", global->m_name.c_str());
2408             return false;
2409         }
2410
2411         /* copy the field's value */
2412         global->setCodeAddress(code->globals.size());
2413         code->globals.push_back(fld->m_code.fieldaddr);
2414         if (global->m_fieldtype == TYPE_VECTOR) {
2415             code->globals.push_back(fld->m_code.fieldaddr+1);
2416             code->globals.push_back(fld->m_code.fieldaddr+2);
2417         }
2418     }
2419     else
2420     {
2421         global->setCodeAddress(code->globals.size());
2422         code->globals.push_back(0);
2423         if (global->m_fieldtype == TYPE_VECTOR) {
2424             code->globals.push_back(0);
2425             code->globals.push_back(0);
2426         }
2427     }
2428     if (global->m_code.globaladdr < 0)
2429         return false;
2430     return true;
2431 }
2432
2433 static bool gen_global_pointer(code_t *code, ir_value *global)
2434 {
2435     if (global->m_hasvalue)
2436     {
2437         ir_value *target = global->m_constval.vpointer;
2438         if (!target) {
2439             irerror(global->m_context, "Invalid pointer constant: %s", global->m_name.c_str());
2440             /* nullptr pointers are pointing to the nullptr constant, which also
2441              * sits at address 0, but still has an ir_value for itself.
2442              */
2443             return false;
2444         }
2445
2446         /* Here, relocations ARE possible - in fteqcc-enhanced-qc:
2447          * void() foo; <- proto
2448          * void() *fooptr = &foo;
2449          * void() foo = { code }
2450          */
2451         if (!target->m_code.globaladdr) {
2452             /* FIXME: Check for the constant nullptr ir_value!
2453              * because then code.globaladdr being 0 is valid.
2454              */
2455             irerror(global->m_context, "FIXME: Relocation support");
2456             return false;
2457         }
2458
2459         global->setCodeAddress(code->globals.size());
2460         code->globals.push_back(target->m_code.globaladdr);
2461     }
2462     else
2463     {
2464         global->setCodeAddress(code->globals.size());
2465         code->globals.push_back(0);
2466     }
2467     if (global->m_code.globaladdr < 0)
2468         return false;
2469     return true;
2470 }
2471
2472 static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *block)
2473 {
2474     prog_section_statement_t stmt;
2475     ir_instr *instr;
2476     ir_block *target;
2477     ir_block *ontrue;
2478     ir_block *onfalse;
2479     size_t    stidx;
2480     size_t    i;
2481     int       j;
2482
2483     block->m_generated = true;
2484     block->m_code_start = code->statements.size();
2485     for (i = 0; i < block->m_instr.size(); ++i)
2486     {
2487         instr = block->m_instr[i];
2488
2489         if (instr->m_opcode == VINSTR_PHI) {
2490             irerror(block->m_context, "cannot generate virtual instruction (phi)");
2491             return false;
2492         }
2493
2494         if (instr->m_opcode == VINSTR_JUMP) {
2495             target = instr->m_bops[0];
2496             /* for uncoditional jumps, if the target hasn't been generated
2497              * yet, we generate them right here.
2498              */
2499             if (!target->m_generated)
2500                 return gen_blocks_recursive(code, func, target);
2501
2502             /* otherwise we generate a jump instruction */
2503             stmt.opcode = INSTR_GOTO;
2504             stmt.o1.s1 = target->m_code_start - code->statements.size();
2505             stmt.o2.s1 = 0;
2506             stmt.o3.s1 = 0;
2507             if (stmt.o1.s1 != 1)
2508                 code_push_statement(code, &stmt, instr->m_context);
2509
2510             /* no further instructions can be in this block */
2511             return true;
2512         }
2513
2514         if (instr->m_opcode == VINSTR_BITXOR) {
2515             stmt.opcode = INSTR_BITOR;
2516             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2517             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2518             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2519             code_push_statement(code, &stmt, instr->m_context);
2520             stmt.opcode = INSTR_BITAND;
2521             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2522             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2523             stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2524             code_push_statement(code, &stmt, instr->m_context);
2525             stmt.opcode = INSTR_SUB_F;
2526             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2527             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2528             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2529             code_push_statement(code, &stmt, instr->m_context);
2530
2531             /* instruction generated */
2532             continue;
2533         }
2534
2535         if (instr->m_opcode == VINSTR_BITAND_V) {
2536             stmt.opcode = INSTR_BITAND;
2537             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2538             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2539             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2540             code_push_statement(code, &stmt, instr->m_context);
2541             ++stmt.o1.s1;
2542             ++stmt.o2.s1;
2543             ++stmt.o3.s1;
2544             code_push_statement(code, &stmt, instr->m_context);
2545             ++stmt.o1.s1;
2546             ++stmt.o2.s1;
2547             ++stmt.o3.s1;
2548             code_push_statement(code, &stmt, instr->m_context);
2549
2550             /* instruction generated */
2551             continue;
2552         }
2553
2554         if (instr->m_opcode == VINSTR_BITOR_V) {
2555             stmt.opcode = INSTR_BITOR;
2556             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2557             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2558             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2559             code_push_statement(code, &stmt, instr->m_context);
2560             ++stmt.o1.s1;
2561             ++stmt.o2.s1;
2562             ++stmt.o3.s1;
2563             code_push_statement(code, &stmt, instr->m_context);
2564             ++stmt.o1.s1;
2565             ++stmt.o2.s1;
2566             ++stmt.o3.s1;
2567             code_push_statement(code, &stmt, instr->m_context);
2568
2569             /* instruction generated */
2570             continue;
2571         }
2572
2573         if (instr->m_opcode == VINSTR_BITXOR_V) {
2574             for (j = 0; j < 3; ++j) {
2575                 stmt.opcode = INSTR_BITOR;
2576                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2577                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2578                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2579                 code_push_statement(code, &stmt, instr->m_context);
2580                 stmt.opcode = INSTR_BITAND;
2581                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2582                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
2583                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2584                 code_push_statement(code, &stmt, instr->m_context);
2585             }
2586             stmt.opcode = INSTR_SUB_V;
2587             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2588             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2589             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2590             code_push_statement(code, &stmt, instr->m_context);
2591
2592             /* instruction generated */
2593             continue;
2594         }
2595
2596         if (instr->m_opcode == VINSTR_BITAND_VF) {
2597             stmt.opcode = INSTR_BITAND;
2598             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2599             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2600             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2601             code_push_statement(code, &stmt, instr->m_context);
2602             ++stmt.o1.s1;
2603             ++stmt.o3.s1;
2604             code_push_statement(code, &stmt, instr->m_context);
2605             ++stmt.o1.s1;
2606             ++stmt.o3.s1;
2607             code_push_statement(code, &stmt, instr->m_context);
2608
2609             /* instruction generated */
2610             continue;
2611         }
2612
2613         if (instr->m_opcode == VINSTR_BITOR_VF) {
2614             stmt.opcode = INSTR_BITOR;
2615             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
2616             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2617             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2618             code_push_statement(code, &stmt, instr->m_context);
2619             ++stmt.o1.s1;
2620             ++stmt.o3.s1;
2621             code_push_statement(code, &stmt, instr->m_context);
2622             ++stmt.o1.s1;
2623             ++stmt.o3.s1;
2624             code_push_statement(code, &stmt, instr->m_context);
2625
2626             /* instruction generated */
2627             continue;
2628         }
2629
2630         if (instr->m_opcode == VINSTR_BITXOR_VF) {
2631             for (j = 0; j < 3; ++j) {
2632                 stmt.opcode = INSTR_BITOR;
2633                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2634                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2635                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2636                 code_push_statement(code, &stmt, instr->m_context);
2637                 stmt.opcode = INSTR_BITAND;
2638                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
2639                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
2640                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2641                 code_push_statement(code, &stmt, instr->m_context);
2642             }
2643             stmt.opcode = INSTR_SUB_V;
2644             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2645             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2646             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2647             code_push_statement(code, &stmt, instr->m_context);
2648
2649             /* instruction generated */
2650             continue;
2651         }
2652
2653         if (instr->m_opcode == VINSTR_CROSS) {
2654             stmt.opcode = INSTR_MUL_F;
2655             for (j = 0; j < 3; ++j) {
2656                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 1) % 3;
2657                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 2) % 3;
2658                 stmt.o3.s1 = instr->_m_ops[0]->codeAddress() + j;
2659                 code_push_statement(code, &stmt, instr->m_context);
2660                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 2) % 3;
2661                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 1) % 3;
2662                 stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
2663                 code_push_statement(code, &stmt, instr->m_context);
2664             }
2665             stmt.opcode = INSTR_SUB_V;
2666             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
2667             stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
2668             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
2669             code_push_statement(code, &stmt, instr->m_context);
2670
2671             /* instruction generated */
2672             continue;
2673         }
2674
2675         if (instr->m_opcode == VINSTR_COND) {
2676             ontrue  = instr->m_bops[0];
2677             onfalse = instr->m_bops[1];
2678             /* TODO: have the AST signal which block should
2679              * come first: eg. optimize IFs without ELSE...
2680              */
2681
2682             stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2683             stmt.o2.u1 = 0;
2684             stmt.o3.s1 = 0;
2685
2686             if (ontrue->m_generated) {
2687                 stmt.opcode = INSTR_IF;
2688                 stmt.o2.s1 = ontrue->m_code_start - code->statements.size();
2689                 if (stmt.o2.s1 != 1)
2690                     code_push_statement(code, &stmt, instr->m_context);
2691             }
2692             if (onfalse->m_generated) {
2693                 stmt.opcode = INSTR_IFNOT;
2694                 stmt.o2.s1 = onfalse->m_code_start - code->statements.size();
2695                 if (stmt.o2.s1 != 1)
2696                     code_push_statement(code, &stmt, instr->m_context);
2697             }
2698             if (!ontrue->m_generated) {
2699                 if (onfalse->m_generated)
2700                     return gen_blocks_recursive(code, func, ontrue);
2701             }
2702             if (!onfalse->m_generated) {
2703                 if (ontrue->m_generated)
2704                     return gen_blocks_recursive(code, func, onfalse);
2705             }
2706             /* neither ontrue nor onfalse exist */
2707             stmt.opcode = INSTR_IFNOT;
2708             if (!instr->m_likely) {
2709                 /* Honor the likelyhood hint */
2710                 ir_block *tmp = onfalse;
2711                 stmt.opcode = INSTR_IF;
2712                 onfalse = ontrue;
2713                 ontrue = tmp;
2714             }
2715             stidx = code->statements.size();
2716             code_push_statement(code, &stmt, instr->m_context);
2717             /* on false we jump, so add ontrue-path */
2718             if (!gen_blocks_recursive(code, func, ontrue))
2719                 return false;
2720             /* fixup the jump address */
2721             code->statements[stidx].o2.s1 = code->statements.size() - stidx;
2722             /* generate onfalse path */
2723             if (onfalse->m_generated) {
2724                 /* fixup the jump address */
2725                 code->statements[stidx].o2.s1 = onfalse->m_code_start - stidx;
2726                 if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2727                     code->statements[stidx] = code->statements[stidx+1];
2728                     if (code->statements[stidx].o1.s1 < 0)
2729                         code->statements[stidx].o1.s1++;
2730                     code_pop_statement(code);
2731                 }
2732                 stmt.opcode = code->statements.back().opcode;
2733                 if (stmt.opcode == INSTR_GOTO ||
2734                     stmt.opcode == INSTR_IF ||
2735                     stmt.opcode == INSTR_IFNOT ||
2736                     stmt.opcode == INSTR_RETURN ||
2737                     stmt.opcode == INSTR_DONE)
2738                 {
2739                     /* no use jumping from here */
2740                     return true;
2741                 }
2742                 /* may have been generated in the previous recursive call */
2743                 stmt.opcode = INSTR_GOTO;
2744                 stmt.o1.s1 = onfalse->m_code_start - code->statements.size();
2745                 stmt.o2.s1 = 0;
2746                 stmt.o3.s1 = 0;
2747                 if (stmt.o1.s1 != 1)
2748                     code_push_statement(code, &stmt, instr->m_context);
2749                 return true;
2750             }
2751             else if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2752                 code->statements[stidx] = code->statements[stidx+1];
2753                 if (code->statements[stidx].o1.s1 < 0)
2754                     code->statements[stidx].o1.s1++;
2755                 code_pop_statement(code);
2756             }
2757             /* if not, generate now */
2758             return gen_blocks_recursive(code, func, onfalse);
2759         }
2760
2761         if ( (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8)
2762            || instr->m_opcode == VINSTR_NRCALL)
2763         {
2764             size_t p, first;
2765             ir_value *retvalue;
2766
2767             first = instr->m_params.size();
2768             if (first > 8)
2769                 first = 8;
2770             for (p = 0; p < first; ++p)
2771             {
2772                 ir_value *param = instr->m_params[p];
2773                 if (param->m_callparam)
2774                     continue;
2775
2776                 stmt.opcode = INSTR_STORE_F;
2777                 stmt.o3.u1 = 0;
2778
2779                 if (param->m_vtype == TYPE_FIELD)
2780                     stmt.opcode = field_store_instr[param->m_fieldtype];
2781                 else if (param->m_vtype == TYPE_NIL)
2782                     stmt.opcode = INSTR_STORE_V;
2783                 else
2784                     stmt.opcode = type_store_instr[param->m_vtype];
2785                 stmt.o1.u1 = param->codeAddress();
2786                 stmt.o2.u1 = OFS_PARM0 + 3 * p;
2787
2788                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2789                     /* fetch 3 separate floats */
2790                     stmt.opcode = INSTR_STORE_F;
2791                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2792                     code_push_statement(code, &stmt, instr->m_context);
2793                     stmt.o2.u1++;
2794                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2795                     code_push_statement(code, &stmt, instr->m_context);
2796                     stmt.o2.u1++;
2797                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2798                     code_push_statement(code, &stmt, instr->m_context);
2799                 }
2800                 else
2801                     code_push_statement(code, &stmt, instr->m_context);
2802             }
2803             /* Now handle extparams */
2804             first = instr->m_params.size();
2805             for (; p < first; ++p)
2806             {
2807                 ir_builder *ir = func->m_owner;
2808                 ir_value *param = instr->m_params[p];
2809                 ir_value *targetparam;
2810
2811                 if (param->m_callparam)
2812                     continue;
2813
2814                 if (p-8 >= ir->m_extparams.size())
2815                     ir->generateExtparam();
2816
2817                 targetparam = ir->m_extparams[p-8];
2818
2819                 stmt.opcode = INSTR_STORE_F;
2820                 stmt.o3.u1 = 0;
2821
2822                 if (param->m_vtype == TYPE_FIELD)
2823                     stmt.opcode = field_store_instr[param->m_fieldtype];
2824                 else if (param->m_vtype == TYPE_NIL)
2825                     stmt.opcode = INSTR_STORE_V;
2826                 else
2827                     stmt.opcode = type_store_instr[param->m_vtype];
2828                 stmt.o1.u1 = param->codeAddress();
2829                 stmt.o2.u1 = targetparam->codeAddress();
2830                 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2831                     /* fetch 3 separate floats */
2832                     stmt.opcode = INSTR_STORE_F;
2833                     stmt.o1.u1 = param->m_members[0]->codeAddress();
2834                     code_push_statement(code, &stmt, instr->m_context);
2835                     stmt.o2.u1++;
2836                     stmt.o1.u1 = param->m_members[1]->codeAddress();
2837                     code_push_statement(code, &stmt, instr->m_context);
2838                     stmt.o2.u1++;
2839                     stmt.o1.u1 = param->m_members[2]->codeAddress();
2840                     code_push_statement(code, &stmt, instr->m_context);
2841                 }
2842                 else
2843                     code_push_statement(code, &stmt, instr->m_context);
2844             }
2845
2846             stmt.opcode = INSTR_CALL0 + instr->m_params.size();
2847             if (stmt.opcode > INSTR_CALL8)
2848                 stmt.opcode = INSTR_CALL8;
2849             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2850             stmt.o2.u1 = 0;
2851             stmt.o3.u1 = 0;
2852             code_push_statement(code, &stmt, instr->m_context);
2853
2854             retvalue = instr->_m_ops[0];
2855             if (retvalue && retvalue->m_store != store_return &&
2856                 (retvalue->m_store == store_global || retvalue->m_life.size()))
2857             {
2858                 /* not to be kept in OFS_RETURN */
2859                 if (retvalue->m_vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS))
2860                     stmt.opcode = field_store_instr[retvalue->m_fieldtype];
2861                 else
2862                     stmt.opcode = type_store_instr[retvalue->m_vtype];
2863                 stmt.o1.u1 = OFS_RETURN;
2864                 stmt.o2.u1 = retvalue->codeAddress();
2865                 stmt.o3.u1 = 0;
2866                 code_push_statement(code, &stmt, instr->m_context);
2867             }
2868             continue;
2869         }
2870
2871         if (instr->m_opcode == INSTR_STATE) {
2872             stmt.opcode = instr->m_opcode;
2873             if (instr->_m_ops[0])
2874                 stmt.o1.u1 = instr->_m_ops[0]->codeAddress();
2875             if (instr->_m_ops[1])
2876                 stmt.o2.u1 = instr->_m_ops[1]->codeAddress();
2877             stmt.o3.u1 = 0;
2878             code_push_statement(code, &stmt, instr->m_context);
2879             continue;
2880         }
2881
2882         stmt.opcode = instr->m_opcode;
2883         stmt.o1.u1 = 0;
2884         stmt.o2.u1 = 0;
2885         stmt.o3.u1 = 0;
2886
2887         /* This is the general order of operands */
2888         if (instr->_m_ops[0])
2889             stmt.o3.u1 = instr->_m_ops[0]->codeAddress();
2890
2891         if (instr->_m_ops[1])
2892             stmt.o1.u1 = instr->_m_ops[1]->codeAddress();
2893
2894         if (instr->_m_ops[2])
2895             stmt.o2.u1 = instr->_m_ops[2]->codeAddress();
2896
2897         if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
2898         {
2899             stmt.o1.u1 = stmt.o3.u1;
2900             stmt.o3.u1 = 0;
2901         }
2902         else if ((stmt.opcode >= INSTR_STORE_F &&
2903                   stmt.opcode <= INSTR_STORE_FNC) ||
2904                  (stmt.opcode >= INSTR_STOREP_F &&
2905                   stmt.opcode <= INSTR_STOREP_FNC))
2906         {
2907             /* 2-operand instructions with A -> B */
2908             stmt.o2.u1 = stmt.o3.u1;
2909             stmt.o3.u1 = 0;
2910
2911             /* tiny optimization, don't output
2912              * STORE a, a
2913              */
2914             if (stmt.o2.u1 == stmt.o1.u1 &&
2915                 OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
2916             {
2917                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
2918                 continue;
2919             }
2920         }
2921         code_push_statement(code, &stmt, instr->m_context);
2922     }
2923     return true;
2924 }
2925
2926 static bool gen_function_code(code_t *code, ir_function *self)
2927 {
2928     ir_block *block;
2929     prog_section_statement_t stmt, *retst;
2930
2931     /* Starting from entry point, we generate blocks "as they come"
2932      * for now. Dead blocks will not be translated obviously.
2933      */
2934     if (self->m_blocks.empty()) {
2935         irerror(self->m_context, "Function '%s' declared without body.", self->m_name.c_str());
2936         return false;
2937     }
2938
2939     block = self->m_blocks[0].get();
2940     if (block->m_generated)
2941         return true;
2942
2943     if (!gen_blocks_recursive(code, self, block)) {
2944         irerror(self->m_context, "failed to generate blocks for '%s'", self->m_name.c_str());
2945         return false;
2946     }
2947
2948     /* code_write and qcvm -disasm need to know that the function ends here */
2949     retst = &code->statements.back();
2950     if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) &&
2951         self->m_outtype == TYPE_VOID &&
2952         retst->opcode == INSTR_RETURN &&
2953         !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1)
2954     {
2955         retst->opcode = INSTR_DONE;
2956         ++opts_optimizationcount[OPTIM_VOID_RETURN];
2957     } else {
2958         lex_ctx_t last;
2959
2960         stmt.opcode = INSTR_DONE;
2961         stmt.o1.u1  = 0;
2962         stmt.o2.u1  = 0;
2963         stmt.o3.u1  = 0;
2964         last.line   = code->linenums.back();
2965         last.column = code->columnnums.back();
2966
2967         code_push_statement(code, &stmt, last);
2968     }
2969     return true;
2970 }
2971
2972 qcint_t ir_builder::filestring(const char *filename)
2973 {
2974     /* NOTE: filename pointers are copied, we never strdup them,
2975      * thus we can use pointer-comparison to find the string.
2976      */
2977     qcint_t  str;
2978
2979     for (size_t i = 0; i != m_filenames.size(); ++i) {
2980         if (!strcmp(m_filenames[i], filename))
2981             return i;
2982     }
2983
2984     str = code_genstring(m_code.get(), filename);
2985     m_filenames.push_back(filename);
2986     m_filestrings.push_back(str);
2987     return str;
2988 }
2989
2990 bool ir_builder::generateGlobalFunction(ir_value *global)
2991 {
2992     prog_section_function_t fun;
2993     ir_function            *irfun;
2994
2995     size_t i;
2996
2997     if (!global->m_hasvalue || (!global->m_constval.vfunc)) {
2998         irerror(global->m_context, "Invalid state of function-global: not constant: %s", global->m_name.c_str());
2999         return false;
3000     }
3001
3002     irfun = global->m_constval.vfunc;
3003     fun.name = global->m_code.name;
3004     fun.file = filestring(global->m_context.file);
3005     fun.profile = 0; /* always 0 */
3006     fun.nargs = irfun->m_params.size();
3007     if (fun.nargs > 8)
3008         fun.nargs = 8;
3009
3010     for (i = 0; i < 8; ++i) {
3011         if ((int32_t)i >= fun.nargs)
3012             fun.argsize[i] = 0;
3013         else
3014             fun.argsize[i] = type_sizeof_[irfun->m_params[i]];
3015     }
3016
3017     fun.firstlocal = 0;
3018     fun.locals = irfun->m_allocated_locals;
3019
3020     if (irfun->m_builtin)
3021         fun.entry = irfun->m_builtin+1;
3022     else {
3023         irfun->m_code_function_def = m_code->functions.size();
3024         fun.entry = m_code->statements.size();
3025     }
3026
3027     m_code->functions.push_back(fun);
3028     return true;
3029 }
3030
3031 ir_value* ir_builder::generateExtparamProto()
3032 {
3033     char      name[128];
3034
3035     util_snprintf(name, sizeof(name), "EXTPARM#%i", (int)(m_extparam_protos.size()));
3036     ir_value *global = new ir_value(name, store_global, TYPE_VECTOR);
3037     m_extparam_protos.emplace_back(global);
3038
3039     return global;
3040 }
3041
3042 void ir_builder::generateExtparam()
3043 {
3044     prog_section_def_t def;
3045     ir_value          *global;
3046
3047     if (m_extparam_protos.size() < m_extparams.size()+1)
3048         global = generateExtparamProto();
3049     else
3050         global = m_extparam_protos[m_extparams.size()].get();
3051
3052     def.name = code_genstring(m_code.get(), global->m_name.c_str());
3053     def.type = TYPE_VECTOR;
3054     def.offset = m_code->globals.size();
3055
3056     m_code->defs.push_back(def);
3057
3058     global->setCodeAddress(def.offset);
3059
3060     m_code->globals.push_back(0);
3061     m_code->globals.push_back(0);
3062     m_code->globals.push_back(0);
3063
3064     m_extparams.emplace_back(global);
3065 }
3066
3067 static bool gen_function_extparam_copy(code_t *code, ir_function *self)
3068 {
3069     ir_builder *ir = self->m_owner;
3070
3071     size_t numparams = self->m_params.size();
3072     if (!numparams)
3073         return true;
3074
3075     prog_section_statement_t stmt;
3076     stmt.opcode = INSTR_STORE_F;
3077     stmt.o3.s1 = 0;
3078     for (size_t i = 8; i < numparams; ++i) {
3079         size_t ext = i - 8;
3080         if (ext >= ir->m_extparams.size())
3081             ir->generateExtparam();
3082
3083         ir_value *ep = ir->m_extparams[ext];
3084
3085         stmt.opcode = type_store_instr[self->m_locals[i]->m_vtype];
3086         if (self->m_locals[i]->m_vtype == TYPE_FIELD &&
3087             self->m_locals[i]->m_fieldtype == TYPE_VECTOR)
3088         {
3089             stmt.opcode = INSTR_STORE_V;
3090         }
3091         stmt.o1.u1 = ep->codeAddress();
3092         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3093         code_push_statement(code, &stmt, self->m_context);
3094     }
3095
3096     return true;
3097 }
3098
3099 static bool gen_function_varargs_copy(code_t *code, ir_function *self)
3100 {
3101     size_t i, ext, numparams, maxparams;
3102
3103     ir_builder *ir = self->m_owner;
3104     ir_value   *ep;
3105     prog_section_statement_t stmt;
3106
3107     numparams = self->m_params.size();
3108     if (!numparams)
3109         return true;
3110
3111     stmt.opcode = INSTR_STORE_V;
3112     stmt.o3.s1 = 0;
3113     maxparams = numparams + self->m_max_varargs;
3114     for (i = numparams; i < maxparams; ++i) {
3115         if (i < 8) {
3116             stmt.o1.u1 = OFS_PARM0 + 3*i;
3117             stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3118             code_push_statement(code, &stmt, self->m_context);
3119             continue;
3120         }
3121         ext = i - 8;
3122         while (ext >= ir->m_extparams.size())
3123             ir->generateExtparam();
3124
3125         ep = ir->m_extparams[ext];
3126
3127         stmt.o1.u1 = ep->codeAddress();
3128         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
3129         code_push_statement(code, &stmt, self->m_context);
3130     }
3131
3132     return true;
3133 }
3134
3135 bool ir_builder::generateFunctionLocals(ir_value *global)
3136 {
3137     prog_section_function_t *def;
3138     ir_function             *irfun;
3139     uint32_t                 firstlocal, firstglobal;
3140
3141     irfun = global->m_constval.vfunc;
3142     def   = &m_code->functions[0] + irfun->m_code_function_def;
3143
3144     if (OPTS_OPTION_BOOL(OPTION_G) ||
3145         !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS)        ||
3146         (irfun->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3147     {
3148         firstlocal = def->firstlocal = m_code->globals.size();
3149     } else {
3150         firstlocal = def->firstlocal = m_first_common_local;
3151         ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
3152     }
3153
3154     firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? m_first_common_globaltemp : firstlocal);
3155
3156     for (size_t i = m_code->globals.size(); i < firstlocal + irfun->m_allocated_locals; ++i)
3157         m_code->globals.push_back(0);
3158
3159     for (auto& lp : irfun->m_locals) {
3160         ir_value *v = lp.get();
3161         if (v->m_locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
3162             v->setCodeAddress(firstlocal + v->m_code.local);
3163             if (!generateGlobal(v, true)) {
3164                 irerror(v->m_context, "failed to generate local %s", v->m_name.c_str());
3165                 return false;
3166             }
3167         }
3168         else
3169             v->setCodeAddress(firstglobal + v->m_code.local);
3170     }
3171     for (auto& vp : irfun->m_values) {
3172         ir_value *v = vp.get();
3173         if (v->m_callparam)
3174             continue;
3175         if (v->m_locked)
3176             v->setCodeAddress(firstlocal + v->m_code.local);
3177         else
3178             v->setCodeAddress(firstglobal + v->m_code.local);
3179     }
3180     return true;
3181 }
3182
3183 bool ir_builder::generateGlobalFunctionCode(ir_value *global)
3184 {
3185     prog_section_function_t *fundef;
3186     ir_function             *irfun;
3187
3188     irfun = global->m_constval.vfunc;
3189     if (!irfun) {
3190         if (global->m_cvq == CV_NONE) {
3191             if (irwarning(global->m_context, WARN_IMPLICIT_FUNCTION_POINTER,
3192                           "function `%s` has no body and in QC implicitly becomes a function-pointer",
3193                           global->m_name.c_str()))
3194             {
3195                 /* Not bailing out just now. If this happens a lot you don't want to have
3196                  * to rerun gmqcc for each such function.
3197                  */
3198
3199                 /* return false; */
3200             }
3201         }
3202         /* this was a function pointer, don't generate code for those */
3203         return true;
3204     }
3205
3206     if (irfun->m_builtin)
3207         return true;
3208
3209     /*
3210      * If there is no definition and the thing is eraseable, we can ignore
3211      * outputting the function to begin with.
3212      */
3213     if (global->m_flags & IR_FLAG_ERASABLE && irfun->m_code_function_def < 0) {
3214         return true;
3215     }
3216
3217     if (irfun->m_code_function_def < 0) {
3218         irerror(irfun->m_context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->m_name.c_str());
3219         return false;
3220     }
3221     fundef = &m_code->functions[irfun->m_code_function_def];
3222
3223     fundef->entry = m_code->statements.size();
3224     if (!generateFunctionLocals(global)) {
3225         irerror(irfun->m_context, "Failed to generate locals for function %s", irfun->m_name.c_str());
3226         return false;
3227     }
3228     if (!gen_function_extparam_copy(m_code.get(), irfun)) {
3229         irerror(irfun->m_context, "Failed to generate extparam-copy code for function %s", irfun->m_name.c_str());
3230         return false;
3231     }
3232     if (irfun->m_max_varargs && !gen_function_varargs_copy(m_code.get(), irfun)) {
3233         irerror(irfun->m_context, "Failed to generate vararg-copy code for function %s", irfun->m_name.c_str());
3234         return false;
3235     }
3236     if (!gen_function_code(m_code.get(), irfun)) {
3237         irerror(irfun->m_context, "Failed to generate code for function %s", irfun->m_name.c_str());
3238         return false;
3239     }
3240     return true;
3241 }
3242
3243 static void gen_vector_defs(code_t *code, prog_section_def_t def, const char *name, int type)
3244 {
3245     char  *component;
3246     size_t len, i;
3247
3248     if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3249         return;
3250
3251     def.type = type;
3252
3253     len = strlen(name);
3254
3255     component = (char*)mem_a(len+3);
3256     memcpy(component, name, len);
3257     len += 2;
3258     component[len-0] = 0;
3259     component[len-2] = '_';
3260
3261     component[len-1] = 'x';
3262
3263     for (i = 0; i < 3; ++i) {
3264         def.name = code_genstring(code, component);
3265         code->defs.push_back(def);
3266         def.offset++;
3267         component[len-1]++;
3268     }
3269
3270     mem_d(component);
3271 }
3272
3273 static void gen_vector_fields(code_t *code, prog_section_field_t fld, const char *name)
3274 {
3275     char  *component;
3276     size_t len, i;
3277
3278     if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3279         return;
3280
3281     fld.type = TYPE_FLOAT;
3282
3283     len = strlen(name);
3284
3285     component = (char*)mem_a(len+3);
3286     memcpy(component, name, len);
3287     len += 2;
3288     component[len-0] = 0;
3289     component[len-2] = '_';
3290
3291     component[len-1] = 'x';
3292
3293     for (i = 0; i < 3; ++i) {
3294         fld.name = code_genstring(code, component);
3295         code->fields.push_back(fld);
3296         fld.offset++;
3297         component[len-1]++;
3298     }
3299
3300     mem_d(component);
3301 }
3302
3303 bool ir_builder::generateGlobal(ir_value *global, bool islocal)
3304 {
3305     size_t             i;
3306     int32_t           *iptr;
3307     prog_section_def_t def;
3308     bool               pushdef = opts.optimizeoff;
3309
3310     /* we don't generate split-vectors */
3311     if (global->m_vtype == TYPE_VECTOR && (global->m_flags & IR_FLAG_SPLIT_VECTOR))
3312         return true;
3313
3314     def.type = global->m_vtype;
3315     def.offset = m_code->globals.size();
3316     def.name = 0;
3317     if (OPTS_OPTION_BOOL(OPTION_G) || !islocal)
3318     {
3319         pushdef = true;
3320
3321         /*
3322          * if we're eraseable and the function isn't referenced ignore outputting
3323          * the function.
3324          */
3325         if (global->m_flags & IR_FLAG_ERASABLE && global->m_reads.empty()) {
3326             return true;
3327         }
3328
3329         if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) &&
3330             !(global->m_flags & IR_FLAG_INCLUDE_DEF) &&
3331             (global->m_name[0] == '#' || global->m_cvq == CV_CONST))
3332         {
3333             pushdef = false;
3334         }
3335
3336         if (pushdef) {
3337             if (global->m_name[0] == '#') {
3338                 if (!m_str_immediate)
3339                     m_str_immediate = code_genstring(m_code.get(), "IMMEDIATE");
3340                 def.name = global->m_code.name = m_str_immediate;
3341             }
3342             else
3343                 def.name = global->m_code.name = code_genstring(m_code.get(), global->m_name.c_str());
3344         }
3345         else
3346             def.name   = 0;
3347         if (islocal) {
3348             def.offset = global->codeAddress();
3349             m_code->defs.push_back(def);
3350             if (global->m_vtype == TYPE_VECTOR)
3351                 gen_vector_defs(m_code.get(), def, global->m_name.c_str(), TYPE_FLOAT);
3352             else if (global->m_vtype == TYPE_FIELD && global->m_fieldtype == TYPE_VECTOR)
3353                 gen_vector_defs(m_code.get(), def, global->m_name.c_str(), TYPE_FIELD);
3354             return true;
3355         }
3356     }
3357     if (islocal)
3358         return true;
3359
3360     switch (global->m_vtype)
3361     {
3362     case TYPE_VOID:
3363         if (0 == global->m_name.compare("end_sys_globals")) {
3364             // TODO: remember this point... all the defs before this one
3365             // should be checksummed and added to progdefs.h when we generate it.
3366         }
3367         else if (0 == global->m_name.compare("end_sys_fields")) {
3368             // TODO: same as above but for entity-fields rather than globsl
3369         }
3370         else if(irwarning(global->m_context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`",
3371                           global->m_name.c_str()))
3372         {
3373             /* Not bailing out */
3374             /* return false; */
3375         }
3376         /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far
3377          * the system fields actually go? Though the engine knows this anyway...
3378          * Maybe this could be an -foption
3379          * fteqcc creates data for end_sys_* - of size 1, so let's do the same
3380          */
3381         global->setCodeAddress(m_code->globals.size());
3382         m_code->globals.push_back(0);
3383         /* Add the def */
3384         if (pushdef)
3385             m_code->defs.push_back(def);
3386         return true;
3387     case TYPE_POINTER:
3388         if (pushdef)
3389             m_code->defs.push_back(def);
3390         return gen_global_pointer(m_code.get(), global);
3391     case TYPE_FIELD:
3392         if (pushdef) {
3393             m_code->defs.push_back(def);
3394             if (global->m_fieldtype == TYPE_VECTOR)
3395                 gen_vector_defs(m_code.get(), def, global->m_name.c_str(), TYPE_FIELD);
3396         }
3397         return gen_global_field(m_code.get(), global);
3398     case TYPE_ENTITY:
3399         /* fall through */
3400     case TYPE_FLOAT:
3401     {
3402         global->setCodeAddress(m_code->globals.size());
3403         if (global->m_hasvalue) {
3404             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3405                 return true;
3406             iptr = (int32_t*)&global->m_constval.ivec[0];
3407             m_code->globals.push_back(*iptr);
3408         } else {
3409             m_code->globals.push_back(0);
3410         }
3411         if (!islocal && global->m_cvq != CV_CONST)
3412             def.type |= DEF_SAVEGLOBAL;
3413         if (pushdef)
3414             m_code->defs.push_back(def);
3415
3416         return global->m_code.globaladdr >= 0;
3417     }
3418     case TYPE_STRING:
3419     {
3420         global->setCodeAddress(m_code->globals.size());
3421         if (global->m_hasvalue) {
3422             if (global->m_cvq == CV_CONST && global->m_reads.empty())
3423                 return true;
3424             uint32_t load = code_genstring(m_code.get(), global->m_constval.vstring);
3425             m_code->globals.push_back(load);
3426         } else {
3427             m_code->globals.push_back(0);
3428         }
3429         if (!islocal && global->m_cvq != CV_CONST)
3430             def.type |= DEF_SAVEGLOBAL;
3431         if (pushdef)
3432             m_code->defs.push_back(def);
3433         return global->m_code.globaladdr >= 0;
3434     }
3435     case TYPE_VECTOR:
3436     {
3437         size_t d;
3438         global->setCodeAddress(m_code->globals.size());
3439         if (global->m_hasvalue) {
3440             iptr = (int32_t*)&global->m_constval.ivec[0];
3441             m_code->globals.push_back(iptr[0]);
3442             if (global->m_code.globaladdr < 0)
3443                 return false;
3444             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3445                 m_code->globals.push_back(iptr[d]);
3446             }
3447         } else {
3448             m_code->globals.push_back(0);
3449             if (global->m_code.globaladdr < 0)
3450                 return false;
3451             for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3452                 m_code->globals.push_back(0);
3453             }
3454         }
3455         if (!islocal && global->m_cvq != CV_CONST)
3456             def.type |= DEF_SAVEGLOBAL;
3457
3458         if (pushdef) {
3459             m_code->defs.push_back(def);
3460             def.type &= ~DEF_SAVEGLOBAL;
3461             gen_vector_defs(m_code.get(), def, global->m_name.c_str(), TYPE_FLOAT);
3462         }
3463         return global->m_code.globaladdr >= 0;
3464     }
3465     case TYPE_FUNCTION:
3466         global->setCodeAddress(m_code->globals.size());
3467         if (!global->m_hasvalue) {
3468             m_code->globals.push_back(0);
3469             if (global->m_code.globaladdr < 0)
3470                 return false;
3471         } else {
3472             m_code->globals.push_back(m_code->functions.size());
3473             if (!generateGlobalFunction(global))
3474                 return false;
3475         }
3476         if (!islocal && global->m_cvq != CV_CONST)
3477             def.type |= DEF_SAVEGLOBAL;
3478         if (pushdef)
3479             m_code->defs.push_back(def);
3480         return true;
3481     case TYPE_VARIANT:
3482         /* assume biggest type */
3483             global->setCodeAddress(m_code->globals.size());
3484             m_code->globals.push_back(0);
3485             for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i)
3486                 m_code->globals.push_back(0);
3487             return true;
3488     default:
3489         /* refuse to create 'void' type or any other fancy business. */
3490         irerror(global->m_context, "Invalid type for global variable `%s`: %s",
3491                 global->m_name.c_str(), type_name[global->m_vtype]);
3492         return false;
3493     }
3494 }
3495
3496 static GMQCC_INLINE void ir_builder_prepare_field(code_t *code, ir_value *field)
3497 {
3498     field->m_code.fieldaddr = code_alloc_field(code, type_sizeof_[field->m_fieldtype]);
3499 }
3500
3501 static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
3502 {
3503     prog_section_def_t def;
3504     prog_section_field_t fld;
3505
3506     (void)self;
3507
3508     def.type   = (uint16_t)field->m_vtype;
3509     def.offset = (uint16_t)self->m_code->globals.size();
3510
3511     /* create a global named the same as the field */
3512     if (OPTS_OPTION_U32(OPTION_STANDARD) == COMPILER_GMQCC) {
3513         /* in our standard, the global gets a dot prefix */
3514         size_t len = field->m_name.length();
3515         char name[1024];
3516
3517         /* we really don't want to have to allocate this, and 1024
3518          * bytes is more than enough for a variable/field name
3519          */
3520         if (len+2 >= sizeof(name)) {
3521             irerror(field->m_context, "invalid field name size: %u", (unsigned int)len);
3522             return false;
3523         }
3524
3525         name[0] = '.';
3526         memcpy(name+1, field->m_name.c_str(), len); // no strncpy - we used strlen above
3527         name[len+1] = 0;
3528
3529         def.name = code_genstring(self->m_code.get(), name);
3530         fld.name = def.name + 1; /* we reuse that string table entry */
3531     } else {
3532         /* in plain QC, there cannot be a global with the same name,
3533          * and so we also name the global the same.
3534          * FIXME: fteqcc should create a global as well
3535          * check if it actually uses the same name. Probably does
3536          */
3537         def.name = code_genstring(self->m_code.get(), field->m_name.c_str());
3538         fld.name = def.name;
3539     }
3540
3541     field->m_code.name = def.name;
3542
3543     self->m_code->defs.push_back(def);
3544
3545     fld.type = field->m_fieldtype;
3546
3547     if (fld.type == TYPE_VOID) {
3548         irerror(field->m_context, "field is missing a type: %s - don't know its size", field->m_name.c_str());
3549         return false;
3550     }
3551
3552     fld.offset = field->m_code.fieldaddr;
3553
3554     self->m_code->fields.push_back(fld);
3555
3556     field->setCodeAddress(self->m_code->globals.size());
3557     self->m_code->globals.push_back(fld.offset);
3558     if (fld.type == TYPE_VECTOR) {
3559         self->m_code->globals.push_back(fld.offset+1);
3560         self->m_code->globals.push_back(fld.offset+2);
3561     }
3562
3563     if (field->m_fieldtype == TYPE_VECTOR) {
3564         gen_vector_defs  (self->m_code.get(), def, field->m_name.c_str(), TYPE_FIELD);
3565         gen_vector_fields(self->m_code.get(), fld, field->m_name.c_str());
3566     }
3567
3568     return field->m_code.globaladdr >= 0;
3569 }
3570
3571 static void ir_builder_collect_reusables(ir_builder *builder) {
3572     std::vector<ir_value*> reusables;
3573
3574     for (auto& gp : builder->m_globals) {
3575         ir_value *value = gp.get();
3576         if (value->m_vtype != TYPE_FLOAT || !value->m_hasvalue)
3577             continue;
3578         if (value->m_cvq == CV_CONST || (value->m_name.length() >= 1 && value->m_name[0] == '#'))
3579             reusables.emplace_back(value);
3580     }
3581     builder->m_const_floats = move(reusables);
3582 }
3583
3584 static void ir_builder_split_vector(ir_builder *self, ir_value *vec) {
3585     ir_value* found[3] = { nullptr, nullptr, nullptr };
3586
3587     // must not be written to
3588     if (vec->m_writes.size())
3589         return;
3590     // must not be trying to access individual members
3591     if (vec->m_members[0] || vec->m_members[1] || vec->m_members[2])
3592         return;
3593     // should be actually used otherwise it won't be generated anyway
3594     if (vec->m_reads.empty())
3595         return;
3596     //size_t count = vec->m_reads.size();
3597     //if (!count)
3598     //    return;
3599
3600     // may only be used directly as function parameters, so if we find some other instruction cancel
3601     for (ir_instr *user : vec->m_reads) {
3602         // we only split vectors if they're used directly as parameter to a call only!
3603         if ((user->m_opcode < INSTR_CALL0 || user->m_opcode > INSTR_CALL8) && user->m_opcode != VINSTR_NRCALL)
3604             return;
3605     }
3606
3607     vec->m_flags |= IR_FLAG_SPLIT_VECTOR;
3608
3609     // find existing floats making up the split
3610     for (ir_value *c : self->m_const_floats) {
3611         if (!found[0] && c->m_constval.vfloat == vec->m_constval.vvec.x)
3612             found[0] = c;
3613         if (!found[1] && c->m_constval.vfloat == vec->m_constval.vvec.y)
3614             found[1] = c;
3615         if (!found[2] && c->m_constval.vfloat == vec->m_constval.vvec.z)
3616             found[2] = c;
3617         if (found[0] && found[1] && found[2])
3618             break;
3619     }
3620
3621     // generate floats for not yet found components
3622     if (!found[0])
3623         found[0] = self->literalFloat(vec->m_constval.vvec.x, true);
3624     if (!found[1]) {
3625         if (vec->m_constval.vvec.y == vec->m_constval.vvec.x)
3626             found[1] = found[0];
3627         else
3628             found[1] = self->literalFloat(vec->m_constval.vvec.y, true);
3629     }
3630     if (!found[2]) {
3631         if (vec->m_constval.vvec.z == vec->m_constval.vvec.x)
3632             found[2] = found[0];
3633         else if (vec->m_constval.vvec.z == vec->m_constval.vvec.y)
3634             found[2] = found[1];
3635         else
3636             found[2] = self->literalFloat(vec->m_constval.vvec.z, true);
3637     }
3638
3639     // the .members array should be safe to use here
3640     vec->m_members[0] = found[0];
3641     vec->m_members[1] = found[1];
3642     vec->m_members[2] = found[2];
3643
3644     // register the readers for these floats
3645     found[0]->m_reads.insert(found[0]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3646     found[1]->m_reads.insert(found[1]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3647     found[2]->m_reads.insert(found[2]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3648 }
3649
3650 static void ir_builder_split_vectors(ir_builder *self) {
3651     // member values may be added to self->m_globals during this operation, but
3652     // no new vectors will be added, we need to iterate via an index as
3653     // c++ iterators would be invalidated
3654     const size_t count = self->m_globals.size();
3655     for (size_t i = 0; i != count; ++i) {
3656         ir_value *v = self->m_globals[i].get();
3657         if (v->m_vtype != TYPE_VECTOR || !v->m_name.length() || v->m_name[0] != '#')
3658             continue;
3659         ir_builder_split_vector(self, v);
3660     }
3661 }
3662
3663 bool ir_builder::generate(const char *filename)
3664 {
3665     prog_section_statement_t stmt;
3666     char  *lnofile = nullptr;
3667
3668     if (OPTS_FLAG(SPLIT_VECTOR_PARAMETERS)) {
3669         ir_builder_collect_reusables(this);
3670         if (!m_const_floats.empty())
3671             ir_builder_split_vectors(this);
3672     }
3673
3674     for (auto& fp : m_fields)
3675         ir_builder_prepare_field(m_code.get(), fp.get());
3676
3677     for (auto& gp : m_globals) {
3678         ir_value *global = gp.get();
3679         if (!generateGlobal(global, false)) {
3680             return false;
3681         }
3682         if (global->m_vtype == TYPE_FUNCTION) {
3683             ir_function *func = global->m_constval.vfunc;
3684             if (func && m_max_locals < func->m_allocated_locals &&
3685                 !(func->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3686             {
3687                 m_max_locals = func->m_allocated_locals;
3688             }
3689             if (func && m_max_globaltemps < func->m_globaltemps)
3690                 m_max_globaltemps = func->m_globaltemps;
3691         }
3692     }
3693
3694     for (auto& fp : m_fields) {
3695         if (!ir_builder_gen_field(this, fp.get()))
3696             return false;
3697     }
3698
3699     // generate nil
3700     m_nil->setCodeAddress(m_code->globals.size());
3701     m_code->globals.push_back(0);
3702     m_code->globals.push_back(0);
3703     m_code->globals.push_back(0);
3704
3705     // generate virtual-instruction temps
3706     for (size_t i = 0; i < IR_MAX_VINSTR_TEMPS; ++i) {
3707         m_vinstr_temp[i]->setCodeAddress(m_code->globals.size());
3708         m_code->globals.push_back(0);
3709         m_code->globals.push_back(0);
3710         m_code->globals.push_back(0);
3711     }
3712
3713     // generate global temps
3714     m_first_common_globaltemp = m_code->globals.size();
3715     m_code->globals.insert(m_code->globals.end(), m_max_globaltemps, 0);
3716     // FIXME:DELME:
3717     //for (size_t i = 0; i < m_max_globaltemps; ++i) {
3718     //    m_code->globals.push_back(0);
3719     //}
3720     // generate common locals
3721     m_first_common_local = m_code->globals.size();
3722     m_code->globals.insert(m_code->globals.end(), m_max_locals, 0);
3723     // FIXME:DELME:
3724     //for (i = 0; i < m_max_locals; ++i) {
3725     //    m_code->globals.push_back(0);
3726     //}
3727
3728     // generate function code
3729
3730     for (auto& gp : m_globals) {
3731         ir_value *global = gp.get();
3732         if (global->m_vtype == TYPE_FUNCTION) {
3733             if (!this->generateGlobalFunctionCode(global))
3734                 return false;
3735         }
3736     }
3737
3738     if (m_code->globals.size() >= 65536) {
3739         irerror(m_globals.back()->m_context,
3740             "This progs file would require more globals than the metadata can handle (%zu). Bailing out.",
3741             m_code->globals.size());
3742         return false;
3743     }
3744
3745     /* DP errors if the last instruction is not an INSTR_DONE. */
3746     if (m_code->statements.back().opcode != INSTR_DONE)
3747     {
3748         lex_ctx_t last;
3749
3750         stmt.opcode = INSTR_DONE;
3751         stmt.o1.u1  = 0;
3752         stmt.o2.u1  = 0;
3753         stmt.o3.u1  = 0;
3754         last.line   = m_code->linenums.back();
3755         last.column = m_code->columnnums.back();
3756
3757         code_push_statement(m_code.get(), &stmt, last);
3758     }
3759
3760     if (OPTS_OPTION_BOOL(OPTION_PP_ONLY))
3761         return true;
3762
3763     if (m_code->statements.size() != m_code->linenums.size()) {
3764         con_err("Linecounter wrong: %lu != %lu\n",
3765                 m_code->statements.size(),
3766                 m_code->linenums.size());
3767     } else if (OPTS_FLAG(LNO)) {
3768         char  *dot;
3769         size_t filelen = strlen(filename);
3770
3771         memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
3772         dot = strrchr(lnofile, '.');
3773         if (!dot) {
3774             vec_pop(lnofile);
3775         } else {
3776             vec_shrinkto(lnofile, dot - lnofile);
3777         }
3778         memcpy(vec_add(lnofile, 5), ".lno", 5);
3779     }
3780
3781     if (!code_write(m_code.get(), filename, lnofile)) {
3782         vec_free(lnofile);
3783         return false;
3784     }
3785
3786     vec_free(lnofile);
3787     return true;
3788 }
3789
3790 /***********************************************************************
3791  *IR DEBUG Dump functions...
3792  */
3793
3794 #define IND_BUFSZ 1024
3795
3796 static const char *qc_opname(int op)
3797 {
3798     if (op < 0) return "<INVALID>";
3799     if (op < VINSTR_END)
3800         return util_instr_str[op];
3801     switch (op) {
3802         case VINSTR_END:       return "END";
3803         case VINSTR_PHI:       return "PHI";
3804         case VINSTR_JUMP:      return "JUMP";
3805         case VINSTR_COND:      return "COND";
3806         case VINSTR_BITXOR:    return "BITXOR";
3807         case VINSTR_BITAND_V:  return "BITAND_V";
3808         case VINSTR_BITOR_V:   return "BITOR_V";
3809         case VINSTR_BITXOR_V:  return "BITXOR_V";
3810         case VINSTR_BITAND_VF: return "BITAND_VF";
3811         case VINSTR_BITOR_VF:  return "BITOR_VF";
3812         case VINSTR_BITXOR_VF: return "BITXOR_VF";
3813         case VINSTR_CROSS:     return "CROSS";
3814         case VINSTR_NEG_F:     return "NEG_F";
3815         case VINSTR_NEG_V:     return "NEG_V";
3816         default:               return "<UNK>";
3817     }
3818 }
3819
3820 void ir_builder::dump(int (*oprintf)(const char*, ...)) const
3821 {
3822     size_t i;
3823     char indent[IND_BUFSZ];
3824     indent[0] = '\t';
3825     indent[1] = 0;
3826
3827     oprintf("module %s\n", m_name.c_str());
3828     for (i = 0; i < m_globals.size(); ++i)
3829     {
3830         oprintf("global ");
3831         if (m_globals[i]->m_hasvalue)
3832             oprintf("%s = ", m_globals[i]->m_name.c_str());
3833         m_globals[i].get()->dump(oprintf);
3834         oprintf("\n");
3835     }
3836     for (i = 0; i < m_functions.size(); ++i)
3837         ir_function_dump(m_functions[i].get(), indent, oprintf);
3838     oprintf("endmodule %s\n", m_name.c_str());
3839 }
3840
3841 static const char *storenames[] = {
3842     "[global]", "[local]", "[param]", "[value]", "[return]"
3843 };
3844
3845 void ir_function_dump(ir_function *f, char *ind,
3846                       int (*oprintf)(const char*, ...))
3847 {
3848     size_t i;
3849     if (f->m_builtin != 0) {
3850         oprintf("%sfunction %s = builtin %i\n", ind, f->m_name.c_str(), -f->m_builtin);
3851         return;
3852     }
3853     oprintf("%sfunction %s\n", ind, f->m_name.c_str());
3854     util_strncat(ind, "\t", IND_BUFSZ-1);
3855     if (f->m_locals.size())
3856     {
3857         oprintf("%s%i locals:\n", ind, (int)f->m_locals.size());
3858         for (i = 0; i < f->m_locals.size(); ++i) {
3859             oprintf("%s\t", ind);
3860             f->m_locals[i].get()->dump(oprintf);
3861             oprintf("\n");
3862         }
3863     }
3864     oprintf("%sliferanges:\n", ind);
3865     for (i = 0; i < f->m_locals.size(); ++i) {
3866         const char *attr = "";
3867         size_t l, m;
3868         ir_value *v = f->m_locals[i].get();
3869         if (v->m_unique_life && v->m_locked)
3870             attr = "unique,locked ";
3871         else if (v->m_unique_life)
3872             attr = "unique ";
3873         else if (v->m_locked)
3874             attr = "locked ";
3875         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3876                 storenames[v->m_store],
3877                 attr, (v->m_callparam ? "callparam " : ""),
3878                 (int)v->m_code.local);
3879         if (v->m_life.empty())
3880             oprintf("[null]");
3881         for (l = 0; l < v->m_life.size(); ++l) {
3882             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3883         }
3884         oprintf("\n");
3885         for (m = 0; m < 3; ++m) {
3886             ir_value *vm = v->m_members[m];
3887             if (!vm)
3888                 continue;
3889             oprintf("%s\t%s: @%i ", ind, vm->m_name.c_str(), (int)vm->m_code.local);
3890             for (l = 0; l < vm->m_life.size(); ++l) {
3891                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3892             }
3893             oprintf("\n");
3894         }
3895     }
3896     for (i = 0; i < f->m_values.size(); ++i) {
3897         const char *attr = "";
3898         size_t l, m;
3899         ir_value *v = f->m_values[i].get();
3900         if (v->m_unique_life && v->m_locked)
3901             attr = "unique,locked ";
3902         else if (v->m_unique_life)
3903             attr = "unique ";
3904         else if (v->m_locked)
3905             attr = "locked ";
3906         oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3907                 storenames[v->m_store],
3908                 attr, (v->m_callparam ? "callparam " : ""),
3909                 (int)v->m_code.local);
3910         if (v->m_life.empty())
3911             oprintf("[null]");
3912         for (l = 0; l < v->m_life.size(); ++l) {
3913             oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3914         }
3915         oprintf("\n");
3916         for (m = 0; m < 3; ++m) {
3917             ir_value *vm = v->m_members[m];
3918             if (!vm)
3919                 continue;
3920             if (vm->m_unique_life && vm->m_locked)
3921                 attr = "unique,locked ";
3922             else if (vm->m_unique_life)
3923                 attr = "unique ";
3924             else if (vm->m_locked)
3925                 attr = "locked ";
3926             oprintf("%s\t%s: %s@%i ", ind, vm->m_name.c_str(), attr, (int)vm->m_code.local);
3927             for (l = 0; l < vm->m_life.size(); ++l) {
3928                 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3929             }
3930             oprintf("\n");
3931         }
3932     }
3933     if (f->m_blocks.size())
3934     {
3935         oprintf("%slife passes: %i\n", ind, (int)f->m_run_id);
3936         for (i = 0; i < f->m_blocks.size(); ++i) {
3937             ir_block_dump(f->m_blocks[i].get(), ind, oprintf);
3938         }
3939
3940     }
3941     ind[strlen(ind)-1] = 0;
3942     oprintf("%sendfunction %s\n", ind, f->m_name.c_str());
3943 }
3944
3945 void ir_block_dump(ir_block* b, char *ind,
3946                    int (*oprintf)(const char*, ...))
3947 {
3948     oprintf("%s:%s\n", ind, b->m_label.c_str());
3949     util_strncat(ind, "\t", IND_BUFSZ-1);
3950
3951     if (!b->m_instr.empty() && b->m_instr[0])
3952         oprintf("%s (%i) [entry]\n", ind, (int)(b->m_instr[0]->m_eid-1));
3953     for (auto &i : b->m_instr)
3954         ir_instr_dump(i, ind, oprintf);
3955     ind[strlen(ind)-1] = 0;
3956 }
3957
3958 static void dump_phi(ir_instr *in, int (*oprintf)(const char*, ...))
3959 {
3960     oprintf("%s <- phi ", in->_m_ops[0]->m_name.c_str());
3961     for (auto &it : in->m_phi) {
3962         oprintf("([%s] : %s) ", it.from->m_label.c_str(),
3963                                 it.value->m_name.c_str());
3964     }
3965     oprintf("\n");
3966 }
3967
3968 void ir_instr_dump(ir_instr *in, char *ind,
3969                        int (*oprintf)(const char*, ...))
3970 {
3971     size_t i;
3972     const char *comma = nullptr;
3973
3974     oprintf("%s (%i) ", ind, (int)in->m_eid);
3975
3976     if (in->m_opcode == VINSTR_PHI) {
3977         dump_phi(in, oprintf);
3978         return;
3979     }
3980
3981     util_strncat(ind, "\t", IND_BUFSZ-1);
3982
3983     if (in->_m_ops[0] && (in->_m_ops[1] || in->_m_ops[2])) {
3984         in->_m_ops[0]->dump(oprintf);
3985         if (in->_m_ops[1] || in->_m_ops[2])
3986             oprintf(" <- ");
3987     }
3988     if (in->m_opcode == INSTR_CALL0 || in->m_opcode == VINSTR_NRCALL) {
3989         oprintf("CALL%i\t", in->m_params.size());
3990     } else
3991         oprintf("%s\t", qc_opname(in->m_opcode));
3992
3993     if (in->_m_ops[0] && !(in->_m_ops[1] || in->_m_ops[2])) {
3994         in->_m_ops[0]->dump(oprintf);
3995         comma = ",\t";
3996     }
3997     else
3998     {
3999         for (i = 1; i != 3; ++i) {
4000             if (in->_m_ops[i]) {
4001                 if (comma)
4002                     oprintf(comma);
4003                 in->_m_ops[i]->dump(oprintf);
4004                 comma = ",\t";
4005             }
4006         }
4007     }
4008     if (in->m_bops[0]) {
4009         if (comma)
4010             oprintf(comma);
4011         oprintf("[%s]", in->m_bops[0]->m_label.c_str());
4012         comma = ",\t";
4013     }
4014     if (in->m_bops[1])
4015         oprintf("%s[%s]", comma, in->m_bops[1]->m_label.c_str());
4016     if (in->m_params.size()) {
4017         oprintf("\tparams: ");
4018         for (auto &it : in->m_params)
4019             oprintf("%s, ", it->m_name.c_str());
4020     }
4021     oprintf("\n");
4022     ind[strlen(ind)-1] = 0;
4023 }
4024
4025 static void ir_value_dump_string(const char *str, int (*oprintf)(const char*, ...))
4026 {
4027     oprintf("\"");
4028     for (; *str; ++str) {
4029         switch (*str) {
4030             case '\n': oprintf("\\n"); break;
4031             case '\r': oprintf("\\r"); break;
4032             case '\t': oprintf("\\t"); break;
4033             case '\v': oprintf("\\v"); break;
4034             case '\f': oprintf("\\f"); break;
4035             case '\b': oprintf("\\b"); break;
4036             case '\a': oprintf("\\a"); break;
4037             case '\\': oprintf("\\\\"); break;
4038             case '"': oprintf("\\\""); break;
4039             default: oprintf("%c", *str); break;
4040         }
4041     }
4042     oprintf("\"");
4043 }
4044
4045 void ir_value::dump(int (*oprintf)(const char*, ...)) const
4046 {
4047     if (m_hasvalue) {
4048         switch (m_vtype) {
4049             default:
4050             case TYPE_VOID:
4051                 oprintf("(void)");
4052                 break;
4053             case TYPE_FUNCTION:
4054                 oprintf("fn:%s", m_name.c_str());
4055                 break;
4056             case TYPE_FLOAT:
4057                 oprintf("%g", m_constval.vfloat);
4058                 break;
4059             case TYPE_VECTOR:
4060                 oprintf("'%g %g %g'",
4061                         m_constval.vvec.x,
4062                         m_constval.vvec.y,
4063                         m_constval.vvec.z);
4064                 break;
4065             case TYPE_ENTITY:
4066                 oprintf("(entity)");
4067                 break;
4068             case TYPE_STRING:
4069                 ir_value_dump_string(m_constval.vstring, oprintf);
4070                 break;
4071 #if 0
4072             case TYPE_INTEGER:
4073                 oprintf("%i", m_constval.vint);
4074                 break;
4075 #endif
4076             case TYPE_POINTER:
4077                 oprintf("&%s",
4078                     m_constval.vpointer->m_name.c_str());
4079                 break;
4080         }
4081     } else {
4082         oprintf("%s", m_name.c_str());
4083     }
4084 }
4085
4086 void ir_value::dumpLife(int (*oprintf)(const char*,...)) const
4087 {
4088     oprintf("Life of %12s:", m_name.c_str());
4089     for (size_t i = 0; i < m_life.size(); ++i)
4090     {
4091         oprintf(" + [%i, %i]\n", m_life[i].start, m_life[i].end);
4092     }
4093 }