* stage constant folding, where, witht he help of the AST, operator
* usages can be constant folded. Then there is the constant folding
* in the IR for things like eliding if statements, can occur.
- *
+ *
* This file is thus, split into two parts.
*/
/*
* Implementation of basic vector math for vec3_t, for trivial constant
* folding.
- *
+ *
* TODO: gcc/clang hinting for autovectorization
*/
static GMQCC_INLINE vec3_t vec3_add(vec3_t a, vec3_t b) {
return out;
}
+static GMQCC_INLINE vec3_t vec3_or(vec3_t a, vec3_t b) {
+ vec3_t out;
+ out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b.x));
+ out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b.y));
+ out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b.z));
+ return out;
+}
+
+static GMQCC_INLINE vec3_t vec3_orvf(vec3_t a, qcfloat_t b) {
+ vec3_t out;
+ out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b));
+ out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b));
+ out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b));
+ return out;
+}
+
+static GMQCC_INLINE vec3_t vec3_and(vec3_t a, vec3_t b) {
+ vec3_t out;
+ out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b.x));
+ out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b.y));
+ out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b.z));
+ return out;
+}
+
+static GMQCC_INLINE vec3_t vec3_andvf(vec3_t a, qcfloat_t b) {
+ vec3_t out;
+ out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b));
+ out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b));
+ out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b));
+ return out;
+}
+
static GMQCC_INLINE vec3_t vec3_xor(vec3_t a, vec3_t b) {
vec3_t out;
out.x = (qcfloat_t)(((qcint_t)a.x) ^ ((qcint_t)b.x));
return out;
}
+static GMQCC_INLINE vec3_t vec3_not(vec3_t a) {
+ vec3_t out;
+ out.x = (qcfloat_t)(~((qcint_t)a.x));
+ out.y = (qcfloat_t)(~((qcint_t)a.y));
+ out.z = (qcfloat_t)(~((qcint_t)a.z));
+ return out;
+}
+
static GMQCC_INLINE qcfloat_t vec3_mulvv(vec3_t a, vec3_t b) {
return (a.x * b.x + a.y * b.y + a.z * b.z);
}
return (a.x && a.y && a.z);
}
+static GMQCC_INLINE vec3_t vec3_cross(vec3_t a, vec3_t b) {
+ vec3_t out;
+ out.x = a.y * b.z - a.z * b.y;
+ out.y = a.z * b.x - a.x * b.z;
+ out.z = a.x * b.y - a.y * b.x;
+ return out;
+}
+
static lex_ctx_t fold_ctx(fold_t *fold) {
lex_ctx_t ctx;
if (fold->parser->lex)
return !!v->constval.vfloat;
case TYPE_INTEGER:
return !!v->constval.vint;
- case TYPE_VECTOR:
+ case TYPE_VECTOR:
if (OPTS_FLAG(CORRECT_LOGIC))
return vec3_pbool(v->constval.vvec);
return !!(v->constval.vvec.x);
((ast_expression*)(X))->vtype != TYPE_FUNCTION)
#define fold_can_2(X, Y) (fold_can_1(X) && fold_can_1(Y))
+#define fold_can_div(X) (fold_immvalue_float(X) != 0.0f)
#define fold_immvalue_float(E) ((E)->constval.vfloat)
#define fold_immvalue_vector(E) ((E)->constval.vvec)
#define fold_immvalue_string(E) ((E)->constval.vstring)
+#ifdef INFINITY
+# define fold_infinity_float INFINITY
+#else
+# define fold_infinity_float (1.0 / 0.0)
+#endif /*! INFINITY */
+
+#define fold_infinity_vector \
+ vec3_create( \
+ fold_infinity_float, \
+ fold_infinity_float, \
+ fold_infinity_float \
+ )
+
fold_t *fold_init(parser_t *parser) {
fold_t *fold = (fold_t*)mem_a(sizeof(fold_t));
fold->parser = parser;
(void)fold_constgen_float (fold, 0.0f);
(void)fold_constgen_float (fold, 1.0f);
(void)fold_constgen_float (fold, -1.0f);
+ (void)fold_constgen_float (fold, fold_infinity_float); /* +inf */
(void)fold_constgen_vector(fold, vec3_create(0.0f, 0.0f, 0.0f));
+ (void)fold_constgen_vector(fold, vec3_create(-1.0f, -1.0f, -1.0f));
+ (void)fold_constgen_vector(fold, fold_infinity_vector); /* +inf */
return fold;
}
/*
* vector-component constant folding works by matching the component sets
* to eliminate expensive operations on whole-vectors (3 components at runtime).
- * to achive this effect in a clean manner this function generalizes the
+ * to achive this effect in a clean manner this function generalizes the
* values through the use of a set paramater, which is used as an indexing method
* for creating the elided ast binary expression.
*
* vec.z is the same as set[2]-'x' for when set[2] is 'z', 'z'-'x' results in a
* literal value of 2, using this 2, we know that taking the address of vec->x (float)
* and indxing it with this literal will yeild the immediate address of that component
- *
+ *
* Of course more work needs to be done to generate the correct index for the ast_member_new
* call, which is no problem: set[0]-'x' suffices that job.
*/
out = (ast_expression*)ast_member_new(fold_ctx(fold), (ast_expression*)sel, set[0]-'x', NULL);
out->node.keep = false;
((ast_member*)out)->rvalue = true;
- if (x != -1)
+ if (x != -1.0f)
return (ast_expression*)ast_binary_new(fold_ctx(fold), INSTR_MUL_F, fold_constgen_float(fold, x), out);
}
return NULL;
static GMQCC_INLINE ast_expression *fold_op_div(fold_t *fold, ast_value *a, ast_value *b) {
if (isfloat(a)) {
- if (fold_can_2(a, b))
- return fold_constgen_float(fold, fold_immvalue_float(a) / fold_immvalue_float(b));
+ if (fold_can_2(a, b)) {
+ if (fold_can_div(b))
+ return fold_constgen_float(fold, fold_immvalue_float(a) / fold_immvalue_float(b));
+ else
+ return (ast_expression*)fold->imm_float[3]; /* inf */
+ } else if (fold_can_1(b)) {
+ return (ast_expression*)ast_binary_new(
+ fold_ctx(fold),
+ INSTR_MUL_F,
+ (ast_expression*)a,
+ fold_constgen_float(fold, 1.0f / fold_immvalue_float(b))
+ );
+ }
} else if (isvector(a)) {
- if (fold_can_2(a, b))
- return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(a), 1.0f / fold_immvalue_float(b)));
- else {
+ if (fold_can_2(a, b)) {
+ if (fold_can_div(b)) {
+ return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(a), 1.0f / fold_immvalue_float(b)));
+ }
+ else {
+ return (ast_expression*)fold->imm_vector[2]; /* inf */
+ }
+ } else {
return (ast_expression*)ast_binary_new(
fold_ctx(fold),
INSTR_MUL_VF,
}
static GMQCC_INLINE ast_expression *fold_op_mod(fold_t *fold, ast_value *a, ast_value *b) {
- if (fold_can_2(a, b))
- return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) % ((qcint_t)fold_immvalue_float(b))));
+ if (fold_can_2(a, b)) {
+ if (fold_can_div(b))
+ return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) % ((qcint_t)fold_immvalue_float(b))));
+ else
+ return (ast_expression*)fold->imm_float[3]; /* inf */
+ }
return NULL;
}
static GMQCC_INLINE ast_expression *fold_op_bor(fold_t *fold, ast_value *a, ast_value *b) {
- if (fold_can_2(a, b))
- return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) | ((qcint_t)fold_immvalue_float(b))));
+ if (isfloat(a)) {
+ if (fold_can_2(a, b))
+ return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) | ((qcint_t)fold_immvalue_float(b))));
+ } else {
+ if (isvector(b)) {
+ if (fold_can_2(a, b))
+ return fold_constgen_vector(fold, vec3_or(fold_immvalue_vector(a), fold_immvalue_vector(b)));
+ } else {
+ if (fold_can_2(a, b))
+ return fold_constgen_vector(fold, vec3_orvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
+ }
+ }
return NULL;
}
static GMQCC_INLINE ast_expression *fold_op_band(fold_t *fold, ast_value *a, ast_value *b) {
- if (fold_can_2(a, b))
- return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) & ((qcint_t)fold_immvalue_float(b))));
+ if (isfloat(a)) {
+ if (fold_can_2(a, b))
+ return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) & ((qcint_t)fold_immvalue_float(b))));
+ } else {
+ if (isvector(b)) {
+ if (fold_can_2(a, b))
+ return fold_constgen_vector(fold, vec3_and(fold_immvalue_vector(a), fold_immvalue_vector(b)));
+ } else {
+ if (fold_can_2(a, b))
+ return fold_constgen_vector(fold, vec3_andvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
+ }
+ }
return NULL;
}
return (ast_expression*)b;
} else {
return fold_constgen_float (
- fold,
+ fold,
((expr) ? (fold_immediate_true(fold, a) || fold_immediate_true(fold, b))
: (fold_immediate_true(fold, a) && fold_immediate_true(fold, b)))
? 1
}
static GMQCC_INLINE ast_expression *fold_op_bnot(fold_t *fold, ast_value *a) {
- if (fold_can_1(a))
- return fold_constgen_float(fold, ~((qcint_t)fold_immvalue_float(a)));
+ if (isfloat(a)) {
+ if (fold_can_1(a))
+ return fold_constgen_float(fold, ~((qcint_t)fold_immvalue_float(a)));
+ } else {
+ if (isvector(a)) {
+ if (fold_can_1(a))
+ return fold_constgen_vector(fold, vec3_not(fold_immvalue_vector(a)));
+ }
+ }
+ return NULL;
+}
+
+static GMQCC_INLINE ast_expression *fold_op_cross(fold_t *fold, ast_value *a, ast_value *b) {
+ if (fold_can_2(a, b))
+ return fold_constgen_vector(fold, vec3_cross(fold_immvalue_vector(a), fold_immvalue_vector(b)));
return NULL;
}
fold_op_case(2, ('!', '='), cmp, (fold, a, b, true));
fold_op_case(2, ('=', '='), cmp, (fold, a, b, false));
fold_op_case(2, ('~', 'P'), bnot, (fold, a));
+ fold_op_case(2, ('>', '<'), cross, (fold, a, b));
}
#undef fold_op_case
compile_error(fold_ctx(fold), "internal error: attempted to constant-fold for unsupported operator");
return NULL;
}
+/*
+ * Constant folding for compiler intrinsics, simaler approach to operator
+ * folding, primarly: individual functions for each intrinsics to fold,
+ * and a generic selection function.
+ */
+static GMQCC_INLINE ast_expression *fold_intrin_mod(fold_t *fold, ast_value *lhs, ast_value *rhs) {
+ return fold_constgen_float(
+ fold,
+ fmodf(
+ fold_immvalue_float(lhs),
+ fold_immvalue_float(rhs)
+ )
+ );
+}
+
+static GMQCC_INLINE ast_expression *fold_intrin_pow(fold_t *fold, ast_value *lhs, ast_value *rhs) {
+ return fold_constgen_float(
+ fold,
+ powf(
+ fold_immvalue_float(lhs),
+ fold_immvalue_float(rhs)
+ )
+ );
+}
+
+static GMQCC_INLINE ast_expression *fold_intrin_exp(fold_t *fold, ast_value *value) {
+ return fold_constgen_float(fold, exp(fold_immvalue_float(value)));
+}
+
+static GMQCC_INLINE ast_expression *fold_intrin_isnan(fold_t *fold, ast_value *value) {
+ return fold_constgen_float(fold, isnan(fold_immvalue_float(value)) != 0.0f);
+}
+
+static GMQCC_INLINE ast_expression *fold_intrin_fabs(fold_t *fold, ast_value *value) {
+ return fold_constgen_float(fold, fabs(fold_immvalue_float(value)));
+}
+
+ast_expression *fold_intrin(fold_t *fold, const char *intrin, ast_expression **arg) {
+ if (!strcmp(intrin, "mod")) return fold_intrin_mod (fold, (ast_value*)arg[0], (ast_value*)arg[1]);
+ if (!strcmp(intrin, "pow")) return fold_intrin_pow (fold, (ast_value*)arg[0], (ast_value*)arg[1]);
+ if (!strcmp(intrin, "exp")) return fold_intrin_exp (fold, (ast_value*)arg[0]);
+ if (!strcmp(intrin, "isnan")) return fold_intrin_isnan(fold, (ast_value*)arg[0]);
+ if (!strcmp(intrin, "fabs")) return fold_intrin_fabs (fold, (ast_value*)arg[0]);
+
+ return NULL;
+}
+
/*
* These are all the actual constant folding methods that happen in between
* the AST/IR stage of the compiler , i.e eliminating branches for const
* expressions, which is the only supported thing so far. We undefine the
* testing macros here because an ir_value is differant than an ast_value.
*/
+#undef expect
#undef isfloat
#undef isstring
#undef isvector
/*#define isstring(X) ((X)->vtype == TYPE_STRING)*/
/*#define isvector(X) ((X)->vtype == TYPE_VECTOR)*/
#define fold_immvalue_float(X) ((X)->constval.vfloat)
-/*#define fold_immvalue_vector(X) ((X)->constval.vvec)*/
+#define fold_immvalue_vector(X) ((X)->constval.vvec)
/*#define fold_immvalue_string(X) ((X)->constval.vstring)*/
#define fold_can_1(X) ((X)->hasvalue && (X)->cvq == CV_CONST)
/*#define fold_can_2(X,Y) (fold_can_1(X) && fold_can_1(Y))*/
+ast_expression *fold_superfluous(ast_expression *left, ast_expression *right, int op) {
+ ast_value *load;
+
+ if (!ast_istype(left, ast_value) || !fold_can_1((load = (ast_value*)right)))
+ return NULL;
+
+ switch (op) {
+ case INSTR_MUL_F:
+ case INSTR_DIV_F:
+ if (fold_immvalue_float(load) == 1.0f) {
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ return (ast_expression*)left;
+ }
+ break;
+
+
+ case INSTR_ADD_F:
+ case INSTR_SUB_F:
+ if (fold_immvalue_float(load) == 0.0f) {
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ return (ast_expression*)left;
+ }
+ break;
+
+ case INSTR_MUL_V:
+ if (vec3_cmp(fold_immvalue_vector(load), vec3_create(1, 1, 1))) {
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ return (ast_expression*)left;
+ }
+ break;
-int fold_cond(ir_value *condval, ast_function *func, ast_ifthen *branch) {
+ case INSTR_ADD_V:
+ case INSTR_SUB_V:
+ if (vec3_cmp(fold_immvalue_vector(load), vec3_create(0, 0, 0))) {
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ return (ast_expression*)left;
+ }
+ break;
+ }
+
+ return NULL;
+}
+
+static GMQCC_INLINE int fold_cond(ir_value *condval, ast_function *func, ast_ifthen *branch) {
if (isfloat(condval) && fold_can_1(condval) && OPTS_OPTIMIZATION(OPTIM_CONST_FOLD_DCE)) {
ast_expression_codegen *cgen;
ir_block *elide;
ir_value *dummy;
- bool istrue = (fold_immvalue_float(condval) == 1.0f && branch->on_true);
+ bool istrue = (fold_immvalue_float(condval) != 0.0f && branch->on_true);
bool isfalse = (fold_immvalue_float(condval) == 0.0f && branch->on_false);
ast_expression *path = (istrue) ? branch->on_true :
(isfalse) ? branch->on_false : NULL;
- if (!path)
- return false;
+ if (!path) {
+ /*
+ * no path to take implies that the evaluation is if(0) and there
+ * is no else block. so eliminate all the code.
+ */
+ ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE];
+ return true;
+ }
+
if (!(elide = ir_function_create_block(ast_ctx(branch), func->ir_func, ast_function_label(func, ((istrue) ? "ontrue" : "onfalse")))))
return false;
if (!(*(cgen = path->codegen))((ast_expression*)path, func, false, &dummy))
}
return -1; /* nothing done */
}
+
+int fold_cond_ternary(ir_value *condval, ast_function *func, ast_ternary *branch) {
+ return fold_cond(condval, func, (ast_ifthen*)branch);
+}
+
+int fold_cond_ifthen(ir_value *condval, ast_function *func, ast_ifthen *branch) {
+ return fold_cond(condval, func, branch);
+}