return false;
}
-static bool GMQCC_WARN vec_ir_block_find(ir_block **vec, ir_block *what, size_t *idx)
+static bool GMQCC_WARN vec_ir_block_find(std::vector<ir_block *> &vec, ir_block *what, size_t *idx)
{
- size_t i;
- size_t len = vec_size(vec);
- for (i = 0; i < len; ++i) {
- if (vec[i] == what) {
- if (idx) *idx = i;
- return true;
- }
+ for (auto &it : vec) {
+ if (it != what)
+ continue;
+ if (idx)
+ *idx = &it - &vec[0];
+ return true;
}
return false;
}
{
for (auto& bp : self->m_blocks) {
ir_block *block = bp.get();
- for (size_t i = 0; i < vec_size(block->m_instr); ++i) {
+ for (size_t i = 0; i < block->m_instr.size(); ++i) {
ir_instr *inst;
inst = block->m_instr[i];
++opts_optimizationcount[OPTIM_PEEPHOLE];
(void)!ir_instr_op(oper, 0, store->_m_ops[0], true);
- vec_remove(block->m_instr, i, 1);
+ block->m_instr.erase(block->m_instr.begin() + i);
delete store;
}
else if (inst->m_opcode == VINSTR_COND)
(void)!ir_instr_op(inst, 0, inot->_m_ops[1], false);
/* remove NOT */
tmp = inot->m_owner;
- for (inotid = 0; inotid < vec_size(tmp->m_instr); ++inotid) {
+ for (inotid = 0; inotid < tmp->m_instr.size(); ++inotid) {
if (tmp->m_instr[inotid] == inot)
break;
}
- if (inotid >= vec_size(tmp->m_instr)) {
+ if (inotid >= tmp->m_instr.size()) {
compile_error(inst->m_context, "sanity-check failed: failed to find instruction to optimize out");
return false;
}
- vec_remove(tmp->m_instr, inotid, 1);
+ tmp->m_instr.erase(tmp->m_instr.begin() + inotid);
delete inot;
/* swap ontrue/onfalse */
tmp = inst->m_bops[0];
ir_value *funcval;
ir_instr *ret, *call, *store = nullptr;
- if (!block->m_final || vec_size(block->m_instr) < 2)
+ if (!block->m_final || block->m_instr.size() < 2)
continue;
- ret = block->m_instr[vec_size(block->m_instr)-1];
+ ret = block->m_instr.back();
if (ret->m_opcode != INSTR_DONE && ret->m_opcode != INSTR_RETURN)
continue;
- call = block->m_instr[vec_size(block->m_instr)-2];
+ call = block->m_instr[block->m_instr.size()-2];
if (call->m_opcode >= INSTR_STORE_F && call->m_opcode <= INSTR_STORE_FNC) {
/* account for the unoptimized
* CALL
* RETURN %tmp
* version
*/
- if (vec_size(block->m_instr) < 3)
+ if (block->m_instr.size() < 3)
continue;
store = call;
- call = block->m_instr[vec_size(block->m_instr)-3];
+ call = block->m_instr[block->m_instr.size()-3];
}
if (call->m_opcode < INSTR_CALL0 || call->m_opcode > INSTR_CALL8)
{
++opts_optimizationcount[OPTIM_PEEPHOLE];
call->_m_ops[0] = store->_m_ops[0];
- vec_remove(block->m_instr, vec_size(block->m_instr) - 2, 1);
+ block->m_instr.erase(block->m_instr.end()-2);
delete store;
}
else
continue;
++opts_optimizationcount[OPTIM_TAIL_RECURSION];
- vec_shrinkby(block->m_instr, 2);
+ block->m_instr.erase(block->m_instr.end()-2, block->m_instr.end());
block->m_final = false; /* open it back up */
// claiming it's unused, otherwise skip the vector entierly
if (v->m_vtype == TYPE_VECTOR)
{
- size_t mask = (1 << 0) | (1 << 1) | (1 << 2), bits = 0;
+ size_t mask = (1 << 3) - 1, bits = 0;
for (size_t i = 0; i < 3; i++)
if (!v->m_members[i] || (v->m_members[i]->m_reads.empty()
&& v->m_members[i]->m_writes.size()))
ir_block::~ir_block()
{
- for (size_t i = 0; i != vec_size(m_instr); ++i)
- delete m_instr[i];
- vec_free(m_instr);
- vec_free(m_entries);
- vec_free(m_exits);
+ for (auto &i : m_instr)
+ delete i;
}
static void ir_block_delete_quick(ir_block* self)
{
- size_t i;
- for (i = 0; i != vec_size(self->m_instr); ++i)
- ir_instr_delete_quick(self->m_instr[i]);
- vec_free(self->m_instr);
+ for (auto &i : self->m_instr)
+ ir_instr_delete_quick(i);
+ self->m_instr.clear();
delete self;
}
delete in;
return false;
}
- vec_push(self->m_instr, in);
+ self->m_instr.push_back(in);
return true;
}
delete in;
return false;
}
- vec_push(self->m_instr, in);
+ self->m_instr.push_back(in);
return true;
}
return false;
}
- vec_push(self->m_instr, in);
+ self->m_instr.push_back(in);
return true;
}
in->m_bops[0] = ontrue;
in->m_bops[1] = onfalse;
- vec_push(self->m_instr, in);
+ self->m_instr.push_back(in);
- vec_push(self->m_exits, ontrue);
- vec_push(self->m_exits, onfalse);
- vec_push(ontrue->m_entries, self);
- vec_push(onfalse->m_entries, self);
+ self->m_exits.push_back(ontrue);
+ self->m_exits.push_back(onfalse);
+ ontrue->m_entries.push_back(self);
+ onfalse->m_entries.push_back(self);
return true;
}
return false;
in->m_bops[0] = to;
- vec_push(self->m_instr, in);
+ self->m_instr.push_back(in);
- vec_push(self->m_exits, to);
- vec_push(to->m_entries, self);
+ self->m_exits.push_back(to);
+ to->m_entries.push_back(self);
return true;
}
delete in;
return nullptr;
}
- vec_push(self->m_instr, in);
+ self->m_instr.push_back(in);
return in;
}
delete in;
return nullptr;
}
- vec_push(self->m_instr, in);
+ self->m_instr.push_back(in);
/*
if (noreturn) {
if (!ir_block_create_return(self, ctx, nullptr)) {
goto on_error;
}
- vec_push(self->m_instr, instr);
+ self->m_instr.push_back(instr);
return out;
on_error:
* to a list so we don't need to loop through blocks
* - anyway: "don't optimize YET"
*/
- for (i = 0; i < vec_size(self->m_instr); ++i)
+ for (i = 0; i < self->m_instr.size(); ++i)
{
ir_instr *instr = self->m_instr[i];
if (instr->m_opcode != VINSTR_PHI)
continue;
- vec_remove(self->m_instr, i, 1);
+ self->m_instr.erase(self->m_instr.begin()+i);
--i; /* NOTE: i+1 below */
for (auto &it : instr->m_phi) {
return false;
} else {
/* force a move instruction */
- ir_instr *prevjump = vec_last(b->m_instr);
- vec_pop(b->m_instr);
+ ir_instr *prevjump = b->m_instr.back();
+ b->m_instr.pop_back();
b->m_final = false;
instr->_m_ops[0]->m_store = store_global;
if (!ir_block_create_store(b, instr->m_context, instr->_m_ops[0], v))
return false;
instr->_m_ops[0]->m_store = store_value;
- vec_push(b->m_instr, prevjump);
+ b->m_instr.push_back(prevjump);
b->m_final = true;
}
}
*/
static void ir_block_enumerate(ir_block *self, size_t *_eid)
{
- size_t i;
size_t eid = *_eid;
- for (i = 0; i < vec_size(self->m_instr); ++i)
- {
- self->m_instr[i]->m_eid = eid++;
- }
+ for (auto &i : self->m_instr)
+ i->m_eid = eid++;
*_eid = eid;
}
* This is the counterpart to register-allocation in register machines.
*/
struct function_allocator {
- ir_value **locals;
- size_t *sizes;
- size_t *positions;
- bool *unique;
+ std::vector<std::unique_ptr<ir_value>> locals;
+ std::vector<size_t> sizes;
+ std::vector<size_t> positions;
+ std::vector<bool> unique;
};
static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
ir_value *slot;
size_t vsize = var->size();
- var->m_code.local = vec_size(alloc->locals);
+ var->m_code.local = alloc->locals.size();
slot = new ir_value("reg", store_global, var->m_vtype);
if (!slot)
if (!slot->mergeLife(var))
goto localerror;
- vec_push(alloc->locals, slot);
- vec_push(alloc->sizes, vsize);
- vec_push(alloc->unique, var->m_unique_life);
+ alloc->locals.emplace_back(slot);
+ alloc->sizes.push_back(vsize);
+ alloc->unique.push_back(var->m_unique_life);
return true;
static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
{
size_t a;
- ir_value *slot;
if (v->m_unique_life)
return function_allocator_alloc(alloc, v);
- for (a = 0; a < vec_size(alloc->locals); ++a)
+ for (a = 0; a < alloc->locals.size(); ++a)
{
/* if it's reserved for a unique liferange: skip */
if (alloc->unique[a])
continue;
- slot = alloc->locals[a];
+ ir_value *slot = alloc->locals[a].get();
/* never resize parameters
* will be required later when overlapping temps + locals
v->m_code.local = a;
return true;
}
- if (a >= vec_size(alloc->locals)) {
+ if (a >= alloc->locals.size()) {
if (!function_allocator_alloc(alloc, v))
return false;
}
bool ir_function_allocate_locals(ir_function *self)
{
- bool retval = true;
size_t pos;
bool opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
if (self->m_locals.empty() && self->m_values.empty())
return true;
- globalloc.locals = nullptr;
- globalloc.sizes = nullptr;
- globalloc.positions = nullptr;
- globalloc.unique = nullptr;
- lockalloc.locals = nullptr;
- lockalloc.sizes = nullptr;
- lockalloc.positions = nullptr;
- lockalloc.unique = nullptr;
-
size_t i;
for (i = 0; i < self->m_locals.size(); ++i)
{
else
v->m_locked = true; /* lock parameters locals */
if (!function_allocator_alloc((v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
- goto error;
+ return false;
}
for (; i < self->m_locals.size(); ++i)
{
if (v->m_life.empty())
continue;
if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
- goto error;
+ return false;
}
/* Allocate a slot for any value that still exists */
ir_instr *call = v->m_reads[0];
if (!vec_ir_value_find(call->m_params, v, ¶m)) {
irerror(call->m_context, "internal error: unlocked parameter %s not found", v->m_name.c_str());
- goto error;
+ return false;
}
++opts_optimizationcount[OPTIM_CALL_STORES];
v->m_callparam = true;
}
if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
- goto error;
+ return false;
}
- if (!lockalloc.sizes && !globalloc.sizes) {
- goto cleanup;
- }
- vec_push(lockalloc.positions, 0);
- vec_push(globalloc.positions, 0);
+ if (lockalloc.sizes.empty() && globalloc.sizes.empty())
+ return true;
+
+ lockalloc.positions.push_back(0);
+ globalloc.positions.push_back(0);
/* Adjust slot positions based on sizes */
- if (lockalloc.sizes) {
- pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
- for (i = 1; i < vec_size(lockalloc.sizes); ++i)
+ if (!lockalloc.sizes.empty()) {
+ pos = (lockalloc.sizes.size() ? lockalloc.positions[0] : 0);
+ for (i = 1; i < lockalloc.sizes.size(); ++i)
{
pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
- vec_push(lockalloc.positions, pos);
+ lockalloc.positions.push_back(pos);
}
- self->m_allocated_locals = pos + vec_last(lockalloc.sizes);
+ self->m_allocated_locals = pos + lockalloc.sizes.back();
}
- if (globalloc.sizes) {
- pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
- for (i = 1; i < vec_size(globalloc.sizes); ++i)
+ if (!globalloc.sizes.empty()) {
+ pos = (globalloc.sizes.size() ? globalloc.positions[0] : 0);
+ for (i = 1; i < globalloc.sizes.size(); ++i)
{
pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
- vec_push(globalloc.positions, pos);
+ globalloc.positions.push_back(pos);
}
- self->m_globaltemps = pos + vec_last(globalloc.sizes);
+ self->m_globaltemps = pos + globalloc.sizes.back();
}
/* Locals need to know their new position */
value->m_code.local = globalloc.positions[value->m_code.local];
}
- goto cleanup;
-
-error:
- retval = false;
-cleanup:
- for (i = 0; i < vec_size(lockalloc.locals); ++i)
- delete lockalloc.locals[i];
- for (i = 0; i < vec_size(globalloc.locals); ++i)
- delete globalloc.locals[i];
- vec_free(globalloc.unique);
- vec_free(globalloc.locals);
- vec_free(globalloc.sizes);
- vec_free(globalloc.positions);
- vec_free(lockalloc.unique);
- vec_free(lockalloc.locals);
- vec_free(lockalloc.sizes);
- vec_free(lockalloc.positions);
- return retval;
+ return true;
}
/* Get information about which operand
{
ir_instr *instr;
ir_value *value;
- size_t i, o, p, mem;
+ size_t i, o, mem;
// bitmasks which operands are read from or written to
size_t read, write;
self->m_living.clear();
- p = vec_size(self->m_exits);
- for (i = 0; i < p; ++i) {
- ir_block *prev = self->m_exits[i];
+ for (auto &prev : self->m_exits) {
for (auto &it : prev->m_living)
if (!vec_ir_value_find(self->m_living, it, nullptr))
self->m_living.push_back(it);
}
- i = vec_size(self->m_instr);
+ i = self->m_instr.size();
while (i)
{ --i;
instr = self->m_instr[i];
block->m_generated = true;
block->m_code_start = code->statements.size();
- for (i = 0; i < vec_size(block->m_instr); ++i)
+ for (i = 0; i < block->m_instr.size(); ++i)
{
instr = block->m_instr[i];
void ir_block_dump(ir_block* b, char *ind,
int (*oprintf)(const char*, ...))
{
- size_t i;
oprintf("%s:%s\n", ind, b->m_label.c_str());
util_strncat(ind, "\t", IND_BUFSZ-1);
- if (b->m_instr && b->m_instr[0])
+ if (!b->m_instr.empty() && b->m_instr[0])
oprintf("%s (%i) [entry]\n", ind, (int)(b->m_instr[0]->m_eid-1));
- for (i = 0; i < vec_size(b->m_instr); ++i)
- ir_instr_dump(b->m_instr[i], ind, oprintf);
+ for (auto &i : b->m_instr)
+ ir_instr_dump(i, ind, oprintf);
ind[strlen(ind)-1] = 0;
}