+static bool instr_is_operation(uint16_t op)
+{
+ return ( (op >= INSTR_MUL_F && op <= INSTR_GT) ||
+ (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) ||
+ (op == INSTR_ADDRESS) ||
+ (op >= INSTR_NOT_F && op <= INSTR_NOT_FNC) ||
+ (op >= INSTR_AND && op <= INSTR_BITOR) );
+}
+
+bool ir_function_pass_minor(ir_function *self)
+{
+ size_t b;
+
+ for (b = 0; b < vec_size(self->blocks); ++b) {
+ size_t i;
+ ir_block *block = self->blocks[b];
+
+ if (vec_size(block->instr) < 2)
+ continue;
+
+ for (i = 1; i < vec_size(block->instr); ++i) {
+ ir_instr *store;
+ ir_instr *oper;
+ ir_value *value;
+
+ store = block->instr[i];
+ if (store->opcode < INSTR_STORE_F ||
+ store->opcode > INSTR_STORE_FNC)
+ {
+ continue;
+ }
+
+ oper = block->instr[i-1];
+ if (!instr_is_operation(oper->opcode))
+ continue;
+
+ value = oper->_ops[0];
+
+ /* only do it for SSA values */
+ if (value->store != store_value)
+ continue;
+
+ /* don't optimize out the temp if it's used later again */
+ if (vec_size(value->reads) != 1)
+ continue;
+
+ /* The very next store must use this value */
+ if (value->reads[0] != store)
+ continue;
+
+ /* And of course the store must _read_ from it, so it's in
+ * OP 1 */
+ if (store->_ops[1] != value)
+ continue;
+
+ ++optimization_count[OPTIM_PEEPHOLE];
+ oper->_ops[0] = store->_ops[0];
+
+ vec_remove(block->instr, i, 1);
+ ir_instr_delete(store);
+ }
+ }
+
+ return true;
+}
+
+bool ir_function_pass_tailcall(ir_function *self)
+{
+ size_t b, p;
+
+ for (b = 0; b < vec_size(self->blocks); ++b) {
+ ir_value *funcval;
+ ir_instr *ret, *call, *store = NULL;
+ ir_block *block = self->blocks[b];
+
+ if (!block->final || vec_size(block->instr) < 2)
+ continue;
+
+ ret = block->instr[vec_size(block->instr)-1];
+ if (ret->opcode != INSTR_DONE && ret->opcode != INSTR_RETURN)
+ continue;
+
+ call = block->instr[vec_size(block->instr)-2];
+ if (call->opcode >= INSTR_STORE_F && call->opcode <= INSTR_STORE_FNC) {
+ /* account for the unoptimized
+ * CALL
+ * STORE %return, %tmp
+ * RETURN %tmp
+ * version
+ */
+ if (vec_size(block->instr) < 3)
+ continue;
+
+ store = call;
+ call = block->instr[vec_size(block->instr)-3];
+ }
+
+ if (call->opcode < INSTR_CALL0 || call->opcode > INSTR_CALL8)
+ continue;
+
+ if (store) {
+ /* optimize out the STORE */
+ if (ret->_ops[0] &&
+ ret->_ops[0] == store->_ops[0] &&
+ store->_ops[1] == call->_ops[0])
+ {
+ ++optimization_count[OPTIM_PEEPHOLE];
+ call->_ops[0] = store->_ops[0];
+ vec_remove(block->instr, vec_size(block->instr) - 2, 1);
+ ir_instr_delete(store);
+ }
+ else
+ continue;
+ }
+
+ if (!call->_ops[0])
+ continue;
+
+ funcval = call->_ops[1];
+ if (!funcval)
+ continue;
+ if (funcval->vtype != TYPE_FUNCTION || funcval->constval.vfunc != self)
+ continue;
+
+ /* now we have a CALL and a RET, check if it's a tailcall */
+ if (ret->_ops[0] && call->_ops[0] != ret->_ops[0])
+ continue;
+
+ ++optimization_count[OPTIM_TAIL_RECURSION];
+ vec_shrinkby(block->instr, 2);
+
+ block->final = false; /* open it back up */
+
+ /* emite parameter-stores */
+ for (p = 0; p < vec_size(call->params); ++p) {
+ /* assert(call->params_count <= self->locals_count); */
+ if (!ir_block_create_store(block, call->context, self->locals[p], call->params[p])) {
+ irerror(call->context, "failed to create tailcall store instruction for parameter %i", (int)p);
+ return false;
+ }
+ }
+ if (!ir_block_create_jump(block, call->context, self->blocks[0])) {
+ irerror(call->context, "failed to create tailcall jump");
+ return false;
+ }
+
+ ir_instr_delete(call);
+ ir_instr_delete(ret);
+ }
+
+ return true;
+}
+