]> git.xonotic.org Git - xonotic/gmqcc.git/blobdiff - ir.cpp
Stuff
[xonotic/gmqcc.git] / ir.cpp
diff --git a/ir.cpp b/ir.cpp
index 7ac84773830e0a148b19b1eca21268192e1fac4b..6eb17b0913dc06867094210855ec0fdcea591a2e 100644 (file)
--- a/ir.cpp
+++ b/ir.cpp
@@ -235,15 +235,14 @@ static bool GMQCC_WARN vec_ir_value_find(std::vector<ir_value *> &vec, const ir_
     return false;
 }
 
-static bool GMQCC_WARN vec_ir_block_find(ir_block **vec, ir_block *what, size_t *idx)
+static bool GMQCC_WARN vec_ir_block_find(std::vector<ir_block *> &vec, ir_block *what, size_t *idx)
 {
-    size_t i;
-    size_t len = vec_size(vec);
-    for (i = 0; i < len; ++i) {
-        if (vec[i] == what) {
-            if (idx) *idx = i;
-            return true;
-        }
+    for (auto &it : vec) {
+        if (it != what)
+            continue;
+        if (idx)
+            *idx = &it - &vec[0];
+        return true;
     }
     return false;
 }
@@ -314,7 +313,7 @@ ir_function* ir_builder::createFunction(const std::string& name, qc_type outtype
     if (fn)
         return nullptr;
 
-    fn = new ir_function(this, outtype);
+    fn = new ir_function(*this, outtype);
     fn->m_name = name;
     m_functions.emplace_back(fn);
     util_htset(m_htfunctions, name.c_str(), fn);
@@ -381,7 +380,7 @@ static void ir_function_enumerate(ir_function*);
 static bool ir_function_calculate_liferanges(ir_function*);
 static bool ir_function_allocate_locals(ir_function*);
 
-ir_function::ir_function(ir_builder* owner_, qc_type outtype_)
+ir_function::ir_function(ir_builder& owner_, qc_type outtype_)
 : m_owner(owner_),
   m_name("<@unnamed>"),
   m_outtype(outtype_)
@@ -412,8 +411,8 @@ ir_block* ir_function_create_block(lex_ctx_t ctx, ir_function *self, const char
     bn->m_context = ctx;
     self->m_blocks.emplace_back(bn);
 
-    if ((self->m_flags & IR_FLAG_BLOCK_COVERAGE) && self->m_owner->m_coverage_func)
-        (void)ir_block_create_call(bn, ctx, nullptr, self->m_owner->m_coverage_func, false);
+    if ((self->m_flags & IR_FLAG_BLOCK_COVERAGE) && self->m_owner.m_coverage_func)
+        (void)ir_block_create_call(bn, ctx, nullptr, self->m_owner.m_coverage_func, false);
 
     return bn;
 }
@@ -433,7 +432,7 @@ static bool ir_function_pass_peephole(ir_function *self)
 {
     for (auto& bp : self->m_blocks) {
         ir_block *block = bp.get();
-        for (size_t i = 0; i < vec_size(block->m_instr); ++i) {
+        for (size_t i = 0; i < block->m_instr.size(); ++i) {
             ir_instr *inst;
             inst = block->m_instr[i];
 
@@ -481,7 +480,7 @@ static bool ir_function_pass_peephole(ir_function *self)
                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
                 (void)!ir_instr_op(oper, 0, store->_m_ops[0], true);
 
-                vec_remove(block->m_instr, i, 1);
+                block->m_instr.erase(block->m_instr.begin() + i);
                 delete store;
             }
             else if (inst->m_opcode == VINSTR_COND)
@@ -515,15 +514,15 @@ static bool ir_function_pass_peephole(ir_function *self)
                     (void)!ir_instr_op(inst, 0, inot->_m_ops[1], false);
                     /* remove NOT */
                     tmp = inot->m_owner;
-                    for (inotid = 0; inotid < vec_size(tmp->m_instr); ++inotid) {
+                    for (inotid = 0; inotid < tmp->m_instr.size(); ++inotid) {
                         if (tmp->m_instr[inotid] == inot)
                             break;
                     }
-                    if (inotid >= vec_size(tmp->m_instr)) {
+                    if (inotid >= tmp->m_instr.size()) {
                         compile_error(inst->m_context, "sanity-check failed: failed to find instruction to optimize out");
                         return false;
                     }
-                    vec_remove(tmp->m_instr, inotid, 1);
+                    tmp->m_instr.erase(tmp->m_instr.begin() + inotid);
                     delete inot;
                     /* swap ontrue/onfalse */
                     tmp = inst->m_bops[0];
@@ -548,14 +547,14 @@ static bool ir_function_pass_tailrecursion(ir_function *self)
         ir_value *funcval;
         ir_instr *ret, *call, *store = nullptr;
 
-        if (!block->m_final || vec_size(block->m_instr) < 2)
+        if (!block->m_final || block->m_instr.size() < 2)
             continue;
 
-        ret = block->m_instr[vec_size(block->m_instr)-1];
+        ret = block->m_instr.back();
         if (ret->m_opcode != INSTR_DONE && ret->m_opcode != INSTR_RETURN)
             continue;
 
-        call = block->m_instr[vec_size(block->m_instr)-2];
+        call = block->m_instr[block->m_instr.size()-2];
         if (call->m_opcode >= INSTR_STORE_F && call->m_opcode <= INSTR_STORE_FNC) {
             /* account for the unoptimized
              * CALL
@@ -563,11 +562,11 @@ static bool ir_function_pass_tailrecursion(ir_function *self)
              * RETURN %tmp
              * version
              */
-            if (vec_size(block->m_instr) < 3)
+            if (block->m_instr.size() < 3)
                 continue;
 
             store = call;
-            call = block->m_instr[vec_size(block->m_instr)-3];
+            call = block->m_instr[block->m_instr.size()-3];
         }
 
         if (call->m_opcode < INSTR_CALL0 || call->m_opcode > INSTR_CALL8)
@@ -581,7 +580,7 @@ static bool ir_function_pass_tailrecursion(ir_function *self)
             {
                 ++opts_optimizationcount[OPTIM_PEEPHOLE];
                 call->_m_ops[0] = store->_m_ops[0];
-                vec_remove(block->m_instr, vec_size(block->m_instr) - 2, 1);
+                block->m_instr.erase(block->m_instr.end()-2);
                 delete store;
             }
             else
@@ -602,7 +601,7 @@ static bool ir_function_pass_tailrecursion(ir_function *self)
             continue;
 
         ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
-        vec_shrinkby(block->m_instr, 2);
+        block->m_instr.erase(block->m_instr.end()-2, block->m_instr.end());
 
         block->m_final = false; /* open it back up */
 
@@ -631,6 +630,36 @@ bool ir_function_finalize(ir_function *self)
     if (self->m_builtin)
         return true;
 
+    for (auto& lp : self->m_locals) {
+        ir_value *v = lp.get();
+        if (v->m_reads.empty() && v->m_writes.size() && !(v->m_flags & IR_FLAG_NOREF)) {
+            // if it's a vector check to ensure all it's members are unused before
+            // claiming it's unused, otherwise skip the vector entierly
+            if (v->m_vtype == TYPE_VECTOR)
+            {
+                size_t mask = (1 << 3) - 1, bits = 0;
+                for (size_t i = 0; i < 3; i++)
+                    if (!v->m_members[i] || (v->m_members[i]->m_reads.empty()
+                        && v->m_members[i]->m_writes.size()))
+                        bits |= (1 << i);
+                // all components are unused so just report the vector
+                if (bits == mask && irwarning(v->m_context, WARN_UNUSED_VARIABLE,
+                    "unused variable: `%s`", v->m_name.c_str()))
+                    return false;
+                else if (bits != mask)
+                    // individual components are unused so mention them
+                    for (size_t i = 0; i < 3; i++)
+                        if ((bits & (1 << i))
+                            && irwarning(v->m_context, WARN_UNUSED_COMPONENT,
+                                "unused vector component: `%s.%c`", v->m_name.c_str(), "xyz"[i]))
+                            return false;
+            }
+            // just a standard variable
+            else if (irwarning(v->m_context, WARN_UNUSED_VARIABLE,
+                    "unused variable: `%s`", v->m_name.c_str())) return false;
+        }
+    }
+
     if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
         if (!ir_function_pass_peephole(self)) {
             irerror(self->m_context, "generic optimization pass broke something in `%s`", self->m_name.c_str());
@@ -713,19 +742,15 @@ ir_block::ir_block(ir_function* owner, const std::string& name)
 
 ir_block::~ir_block()
 {
-    for (size_t i = 0; i != vec_size(m_instr); ++i)
-        delete m_instr[i];
-    vec_free(m_instr);
-    vec_free(m_entries);
-    vec_free(m_exits);
+    for (auto &i : m_instr)
+        delete i;
 }
 
 static void ir_block_delete_quick(ir_block* self)
 {
-    size_t i;
-    for (i = 0; i != vec_size(self->m_instr); ++i)
-        ir_instr_delete_quick(self->m_instr[i]);
-    vec_free(self->m_instr);
+    for (auto &i : self->m_instr)
+        ir_instr_delete_quick(i);
+    self->m_instr.clear();
     delete self;
 }
 
@@ -1236,7 +1261,7 @@ bool ir_block_create_store_op(ir_block *self, lex_ctx_t ctx, int op, ir_value *t
         delete in;
         return false;
     }
-    vec_push(self->m_instr, in);
+    self->m_instr.push_back(in);
     return true;
 }
 
@@ -1256,7 +1281,7 @@ bool ir_block_create_state_op(ir_block *self, lex_ctx_t ctx, ir_value *frame, ir
         delete in;
         return false;
     }
-    vec_push(self->m_instr, in);
+    self->m_instr.push_back(in);
     return true;
 }
 
@@ -1325,7 +1350,7 @@ bool ir_block_create_return(ir_block *self, lex_ctx_t ctx, ir_value *v)
         return false;
     }
 
-    vec_push(self->m_instr, in);
+    self->m_instr.push_back(in);
     return true;
 }
 
@@ -1349,12 +1374,12 @@ bool ir_block_create_if(ir_block *self, lex_ctx_t ctx, ir_value *v,
     in->m_bops[0] = ontrue;
     in->m_bops[1] = onfalse;
 
-    vec_push(self->m_instr, in);
+    self->m_instr.push_back(in);
 
-    vec_push(self->m_exits, ontrue);
-    vec_push(self->m_exits, onfalse);
-    vec_push(ontrue->m_entries,  self);
-    vec_push(onfalse->m_entries, self);
+    self->m_exits.push_back(ontrue);
+    self->m_exits.push_back(onfalse);
+    ontrue->m_entries.push_back(self);
+    onfalse->m_entries.push_back(self);
     return true;
 }
 
@@ -1369,10 +1394,10 @@ bool ir_block_create_jump(ir_block *self, lex_ctx_t ctx, ir_block *to)
         return false;
 
     in->m_bops[0] = to;
-    vec_push(self->m_instr, in);
+    self->m_instr.push_back(in);
 
-    vec_push(self->m_exits, to);
-    vec_push(to->m_entries, self);
+    self->m_exits.push_back(to);
+    to->m_entries.push_back(self);
     return true;
 }
 
@@ -1400,7 +1425,7 @@ ir_instr* ir_block_create_phi(ir_block *self, lex_ctx_t ctx, const char *label,
         delete in;
         return nullptr;
     }
-    vec_push(self->m_instr, in);
+    self->m_instr.push_back(in);
     return in;
 }
 
@@ -1451,7 +1476,7 @@ ir_instr* ir_block_create_call(ir_block *self, lex_ctx_t ctx, const char *label,
         delete in;
         return nullptr;
     }
-    vec_push(self->m_instr, in);
+    self->m_instr.push_back(in);
     /*
     if (noreturn) {
         if (!ir_block_create_return(self, ctx, nullptr)) {
@@ -1618,7 +1643,7 @@ ir_value* ir_block_create_unary(ir_block *self, lex_ctx_t ctx,
         case VINSTR_NEG_F:
             return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_F, nullptr, operand, ot);
         case VINSTR_NEG_V:
-            return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_V, nullptr, operand, TYPE_VECTOR);
+            return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_V, self->m_owner->m_owner.m_nil, operand, TYPE_VECTOR);
 
         default:
             ot = operand->m_vtype;
@@ -1655,7 +1680,7 @@ static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t ctx, co
         goto on_error;
     }
 
-    vec_push(self->m_instr, instr);
+    self->m_instr.push_back(instr);
 
     return out;
 on_error:
@@ -1729,13 +1754,13 @@ static bool ir_block_naive_phi(ir_block *self)
      * to a list so we don't need to loop through blocks
      * - anyway: "don't optimize YET"
      */
-    for (i = 0; i < vec_size(self->m_instr); ++i)
+    for (i = 0; i < self->m_instr.size(); ++i)
     {
         ir_instr *instr = self->m_instr[i];
         if (instr->m_opcode != VINSTR_PHI)
             continue;
 
-        vec_remove(self->m_instr, i, 1);
+        self->m_instr.erase(self->m_instr.begin()+i);
         --i; /* NOTE: i+1 below */
 
         for (auto &it : instr->m_phi) {
@@ -1747,14 +1772,14 @@ static bool ir_block_naive_phi(ir_block *self)
                     return false;
             } else {
                 /* force a move instruction */
-                ir_instr *prevjump = vec_last(b->m_instr);
-                vec_pop(b->m_instr);
+                ir_instr *prevjump = b->m_instr.back();
+                b->m_instr.pop_back();
                 b->m_final = false;
                 instr->_m_ops[0]->m_store = store_global;
                 if (!ir_block_create_store(b, instr->m_context, instr->_m_ops[0], v))
                     return false;
                 instr->_m_ops[0]->m_store = store_value;
-                vec_push(b->m_instr, prevjump);
+                b->m_instr.push_back(prevjump);
                 b->m_final = true;
             }
         }
@@ -1776,12 +1801,9 @@ static bool ir_block_naive_phi(ir_block *self)
  */
 static void ir_block_enumerate(ir_block *self, size_t *_eid)
 {
-    size_t i;
     size_t eid = *_eid;
-    for (i = 0; i < vec_size(self->m_instr); ++i)
-    {
-        self->m_instr[i]->m_eid = eid++;
-    }
+    for (auto &i : self->m_instr)
+        i->m_eid = eid++;
     *_eid = eid;
 }
 
@@ -1814,10 +1836,10 @@ void ir_function_enumerate(ir_function *self)
  * This is the counterpart to register-allocation in register machines.
  */
 struct function_allocator {
-    ir_value **locals;
-    size_t *sizes;
-    size_t *positions;
-    bool *unique;
+    std::vector<std::unique_ptr<ir_value>> locals;
+    std::vector<size_t> sizes;
+    std::vector<size_t> positions;
+    std::vector<bool> unique;
 };
 
 static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
@@ -1825,7 +1847,7 @@ static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
     ir_value *slot;
     size_t vsize = var->size();
 
-    var->m_code.local = vec_size(alloc->locals);
+    var->m_code.local = alloc->locals.size();
 
     slot = new ir_value("reg", store_global, var->m_vtype);
     if (!slot)
@@ -1834,9 +1856,9 @@ static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
     if (!slot->mergeLife(var))
         goto localerror;
 
-    vec_push(alloc->locals, slot);
-    vec_push(alloc->sizes, vsize);
-    vec_push(alloc->unique, var->m_unique_life);
+    alloc->locals.emplace_back(slot);
+    alloc->sizes.push_back(vsize);
+    alloc->unique.push_back(var->m_unique_life);
 
     return true;
 
@@ -1848,23 +1870,22 @@ localerror:
 static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
 {
     size_t a;
-    ir_value *slot;
 
     if (v->m_unique_life)
         return function_allocator_alloc(alloc, v);
 
-    for (a = 0; a < vec_size(alloc->locals); ++a)
+    for (a = 0; a < alloc->locals.size(); ++a)
     {
         /* if it's reserved for a unique liferange: skip */
         if (alloc->unique[a])
             continue;
 
-        slot = alloc->locals[a];
+        ir_value *slot = alloc->locals[a].get();
 
         /* never resize parameters
          * will be required later when overlapping temps + locals
          */
-        if (a < vec_size(self->m_params) &&
+        if (a < self->m_params.size() &&
             alloc->sizes[a] < v->size())
         {
             continue;
@@ -1883,7 +1904,7 @@ static bool ir_function_allocator_assign(ir_function *self, function_allocator *
         v->m_code.local = a;
         return true;
     }
-    if (a >= vec_size(alloc->locals)) {
+    if (a >= alloc->locals.size()) {
         if (!function_allocator_alloc(alloc, v))
             return false;
     }
@@ -1892,7 +1913,6 @@ static bool ir_function_allocator_assign(ir_function *self, function_allocator *
 
 bool ir_function_allocate_locals(ir_function *self)
 {
-    bool   retval = true;
     size_t pos;
     bool   opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
 
@@ -1901,15 +1921,6 @@ bool ir_function_allocate_locals(ir_function *self)
     if (self->m_locals.empty() && self->m_values.empty())
         return true;
 
-    globalloc.locals    = nullptr;
-    globalloc.sizes     = nullptr;
-    globalloc.positions = nullptr;
-    globalloc.unique    = nullptr;
-    lockalloc.locals    = nullptr;
-    lockalloc.sizes     = nullptr;
-    lockalloc.positions = nullptr;
-    lockalloc.unique    = nullptr;
-
     size_t i;
     for (i = 0; i < self->m_locals.size(); ++i)
     {
@@ -1918,12 +1929,12 @@ bool ir_function_allocate_locals(ir_function *self)
             v->m_locked      = true;
             v->m_unique_life = true;
         }
-        else if (i >= vec_size(self->m_params))
+        else if (i >= self->m_params.size())
             break;
         else
             v->m_locked = true; /* lock parameters locals */
         if (!function_allocator_alloc((v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
-            goto error;
+            return false;
     }
     for (; i < self->m_locals.size(); ++i)
     {
@@ -1931,7 +1942,7 @@ bool ir_function_allocate_locals(ir_function *self)
         if (v->m_life.empty())
             continue;
         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
-            goto error;
+            return false;
     }
 
     /* Allocate a slot for any value that still exists */
@@ -1957,23 +1968,23 @@ bool ir_function_allocate_locals(ir_function *self)
                 ir_instr *call = v->m_reads[0];
                 if (!vec_ir_value_find(call->m_params, v, &param)) {
                     irerror(call->m_context, "internal error: unlocked parameter %s not found", v->m_name.c_str());
-                    goto error;
+                    return false;
                 }
                 ++opts_optimizationcount[OPTIM_CALL_STORES];
                 v->m_callparam = true;
                 if (param < 8)
                     v->setCodeAddress(OFS_PARM0 + 3*param);
                 else {
-                    size_t nprotos = self->m_owner->m_extparam_protos.size();
+                    size_t nprotos = self->m_owner.m_extparam_protos.size();
                     ir_value *ep;
                     param -= 8;
                     if (nprotos > param)
-                        ep = self->m_owner->m_extparam_protos[param].get();
+                        ep = self->m_owner.m_extparam_protos[param].get();
                     else
                     {
-                        ep = self->m_owner->generateExtparamProto();
+                        ep = self->m_owner.generateExtparamProto();
                         while (++nprotos <= param)
-                            ep = self->m_owner->generateExtparamProto();
+                            ep = self->m_owner.generateExtparamProto();
                     }
                     ir_instr_op(v->m_writes[0], 0, ep, true);
                     call->m_params[param+8] = ep;
@@ -1991,33 +2002,33 @@ bool ir_function_allocate_locals(ir_function *self)
         }
 
         if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
-            goto error;
+            return false;
     }
 
-    if (!lockalloc.sizes && !globalloc.sizes) {
-        goto cleanup;
-    }
-    vec_push(lockalloc.positions, 0);
-    vec_push(globalloc.positions, 0);
+    if (lockalloc.sizes.empty() && globalloc.sizes.empty())
+        return true;
+
+    lockalloc.positions.push_back(0);
+    globalloc.positions.push_back(0);
 
     /* Adjust slot positions based on sizes */
-    if (lockalloc.sizes) {
-        pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
-        for (i = 1; i < vec_size(lockalloc.sizes); ++i)
+    if (!lockalloc.sizes.empty()) {
+        pos = (lockalloc.sizes.size() ? lockalloc.positions[0] : 0);
+        for (i = 1; i < lockalloc.sizes.size(); ++i)
         {
             pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
-            vec_push(lockalloc.positions, pos);
+            lockalloc.positions.push_back(pos);
         }
-        self->m_allocated_locals = pos + vec_last(lockalloc.sizes);
+        self->m_allocated_locals = pos + lockalloc.sizes.back();
     }
-    if (globalloc.sizes) {
-        pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
-        for (i = 1; i < vec_size(globalloc.sizes); ++i)
+    if (!globalloc.sizes.empty()) {
+        pos = (globalloc.sizes.size() ? globalloc.positions[0] : 0);
+        for (i = 1; i < globalloc.sizes.size(); ++i)
         {
             pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
-            vec_push(globalloc.positions, pos);
+            globalloc.positions.push_back(pos);
         }
-        self->m_globaltemps = pos + vec_last(globalloc.sizes);
+        self->m_globaltemps = pos + globalloc.sizes.back();
     }
 
     /* Locals need to know their new position */
@@ -2035,24 +2046,7 @@ bool ir_function_allocate_locals(ir_function *self)
             value->m_code.local = globalloc.positions[value->m_code.local];
     }
 
-    goto cleanup;
-
-error:
-    retval = false;
-cleanup:
-    for (i = 0; i < vec_size(lockalloc.locals); ++i)
-        delete lockalloc.locals[i];
-    for (i = 0; i < vec_size(globalloc.locals); ++i)
-        delete globalloc.locals[i];
-    vec_free(globalloc.unique);
-    vec_free(globalloc.locals);
-    vec_free(globalloc.sizes);
-    vec_free(globalloc.positions);
-    vec_free(lockalloc.unique);
-    vec_free(lockalloc.locals);
-    vec_free(lockalloc.sizes);
-    vec_free(lockalloc.positions);
-    return retval;
+    return true;
 }
 
 /* Get information about which operand
@@ -2117,21 +2111,19 @@ static bool ir_block_life_propagate(ir_block *self, bool *changed)
 {
     ir_instr *instr;
     ir_value *value;
-    size_t i, o, p, mem;
+    size_t i, o, mem;
     // bitmasks which operands are read from or written to
     size_t read, write;
 
     self->m_living.clear();
 
-    p = vec_size(self->m_exits);
-    for (i = 0; i < p; ++i) {
-        ir_block *prev = self->m_exits[i];
+    for (auto &prev : self->m_exits) {
         for (auto &it : prev->m_living)
             if (!vec_ir_value_find(self->m_living, it, nullptr))
                 self->m_living.push_back(it);
     }
 
-    i = vec_size(self->m_instr);
+    i = self->m_instr.size();
     while (i)
     { --i;
         instr = self->m_instr[i];
@@ -2322,7 +2314,7 @@ static bool ir_block_life_propagate(ir_block *self, bool *changed)
 bool ir_function_calculate_liferanges(ir_function *self)
 {
     /* parameters live at 0 */
-    for (size_t i = 0; i < vec_size(self->m_params); ++i)
+    for (size_t i = 0; i < self->m_params.size(); ++i)
         if (!self->m_locals[i].get()->setAlive(0))
             compile_error(self->m_context, "internal error: failed value-life merging");
 
@@ -2489,7 +2481,7 @@ static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *bloc
 
     block->m_generated = true;
     block->m_code_start = code->statements.size();
-    for (i = 0; i < vec_size(block->m_instr); ++i)
+    for (i = 0; i < block->m_instr.size(); ++i)
     {
         instr = block->m_instr[i];
 
@@ -2527,11 +2519,11 @@ static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *bloc
             stmt.opcode = INSTR_BITAND;
             stmt.o1.s1 = instr->_m_ops[1]->codeAddress();
             stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
-            stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
+            stmt.o3.s1 = func->m_owner.m_vinstr_temp[0]->codeAddress();
             code_push_statement(code, &stmt, instr->m_context);
             stmt.opcode = INSTR_SUB_F;
             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
-            stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
+            stmt.o2.s1 = func->m_owner.m_vinstr_temp[0]->codeAddress();
             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
             code_push_statement(code, &stmt, instr->m_context);
 
@@ -2587,12 +2579,12 @@ static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *bloc
                 stmt.opcode = INSTR_BITAND;
                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + j;
-                stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
+                stmt.o3.s1 = func->m_owner.m_vinstr_temp[0]->codeAddress() + j;
                 code_push_statement(code, &stmt, instr->m_context);
             }
             stmt.opcode = INSTR_SUB_V;
             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
-            stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
+            stmt.o2.s1 = func->m_owner.m_vinstr_temp[0]->codeAddress();
             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
             code_push_statement(code, &stmt, instr->m_context);
 
@@ -2644,12 +2636,12 @@ static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *bloc
                 stmt.opcode = INSTR_BITAND;
                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + j;
                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress();
-                stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
+                stmt.o3.s1 = func->m_owner.m_vinstr_temp[0]->codeAddress() + j;
                 code_push_statement(code, &stmt, instr->m_context);
             }
             stmt.opcode = INSTR_SUB_V;
             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
-            stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
+            stmt.o2.s1 = func->m_owner.m_vinstr_temp[0]->codeAddress();
             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
             code_push_statement(code, &stmt, instr->m_context);
 
@@ -2666,12 +2658,12 @@ static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *bloc
                 code_push_statement(code, &stmt, instr->m_context);
                 stmt.o1.s1 = instr->_m_ops[1]->codeAddress() + (j + 2) % 3;
                 stmt.o2.s1 = instr->_m_ops[2]->codeAddress() + (j + 1) % 3;
-                stmt.o3.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress() + j;
+                stmt.o3.s1 = func->m_owner.m_vinstr_temp[0]->codeAddress() + j;
                 code_push_statement(code, &stmt, instr->m_context);
             }
             stmt.opcode = INSTR_SUB_V;
             stmt.o1.s1 = instr->_m_ops[0]->codeAddress();
-            stmt.o2.s1 = func->m_owner->m_vinstr_temp[0]->codeAddress();
+            stmt.o2.s1 = func->m_owner.m_vinstr_temp[0]->codeAddress();
             stmt.o3.s1 = instr->_m_ops[0]->codeAddress();
             code_push_statement(code, &stmt, instr->m_context);
 
@@ -2811,17 +2803,17 @@ static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *bloc
             first = instr->m_params.size();
             for (; p < first; ++p)
             {
-                ir_builder *ir = func->m_owner;
+                ir_builder &ir = func->m_owner;
                 ir_value *param = instr->m_params[p];
                 ir_value *targetparam;
 
                 if (param->m_callparam)
                     continue;
 
-                if (p-8 >= ir->m_extparams.size())
-                    ir->generateExtparam();
+                if (p-8 >= ir.m_extparams.size())
+                    ir.generateExtparam();
 
-                targetparam = ir->m_extparams[p-8];
+                targetparam = ir.m_extparams[p-8];
 
                 stmt.opcode = INSTR_STORE_F;
                 stmt.o3.u1 = 0;
@@ -3010,7 +3002,7 @@ bool ir_builder::generateGlobalFunction(ir_value *global)
     fun.name = global->m_code.name;
     fun.file = filestring(global->m_context.file);
     fun.profile = 0; /* always 0 */
-    fun.nargs = vec_size(irfun->m_params);
+    fun.nargs = irfun->m_params.size();
     if (fun.nargs > 8)
         fun.nargs = 8;
 
@@ -3073,9 +3065,9 @@ void ir_builder::generateExtparam()
 
 static bool gen_function_extparam_copy(code_t *code, ir_function *self)
 {
-    ir_builder *ir = self->m_owner;
+    ir_builder &ir = self->m_owner;
 
-    size_t numparams = vec_size(self->m_params);
+    size_t numparams = self->m_params.size();
     if (!numparams)
         return true;
 
@@ -3084,10 +3076,10 @@ static bool gen_function_extparam_copy(code_t *code, ir_function *self)
     stmt.o3.s1 = 0;
     for (size_t i = 8; i < numparams; ++i) {
         size_t ext = i - 8;
-        if (ext >= ir->m_extparams.size())
-            ir->generateExtparam();
+        if (ext >= ir.m_extparams.size())
+            ir.generateExtparam();
 
-        ir_value *ep = ir->m_extparams[ext];
+        ir_value *ep = ir.m_extparams[ext];
 
         stmt.opcode = type_store_instr[self->m_locals[i]->m_vtype];
         if (self->m_locals[i]->m_vtype == TYPE_FIELD &&
@@ -3107,11 +3099,11 @@ static bool gen_function_varargs_copy(code_t *code, ir_function *self)
 {
     size_t i, ext, numparams, maxparams;
 
-    ir_builder *ir = self->m_owner;
+    ir_builder &ir = self->m_owner;
     ir_value   *ep;
     prog_section_statement_t stmt;
 
-    numparams = vec_size(self->m_params);
+    numparams = self->m_params.size();
     if (!numparams)
         return true;
 
@@ -3126,10 +3118,10 @@ static bool gen_function_varargs_copy(code_t *code, ir_function *self)
             continue;
         }
         ext = i - 8;
-        while (ext >= ir->m_extparams.size())
-            ir->generateExtparam();
+        while (ext >= ir.m_extparams.size())
+            ir.generateExtparam();
 
-        ep = ir->m_extparams[ext];
+        ep = ir.m_extparams[ext];
 
         stmt.o1.u1 = ep->codeAddress();
         stmt.o2.u1 = self->m_locals[i].get()->codeAddress();
@@ -3952,14 +3944,13 @@ void ir_function_dump(ir_function *f, char *ind,
 void ir_block_dump(ir_block* b, char *ind,
                    int (*oprintf)(const char*, ...))
 {
-    size_t i;
     oprintf("%s:%s\n", ind, b->m_label.c_str());
     util_strncat(ind, "\t", IND_BUFSZ-1);
 
-    if (b->m_instr && b->m_instr[0])
+    if (!b->m_instr.empty() && b->m_instr[0])
         oprintf("%s (%i) [entry]\n", ind, (int)(b->m_instr[0]->m_eid-1));
-    for (i = 0; i < vec_size(b->m_instr); ++i)
-        ir_instr_dump(b->m_instr[i], ind, oprintf);
+    for (auto &i : b->m_instr)
+        ir_instr_dump(i, ind, oprintf);
     ind[strlen(ind)-1] = 0;
 }