+
+/*
+ * Constant folding for compiler intrinsics, simaler approach to operator
+ * folding, primarly: individual functions for each intrinsics to fold,
+ * and a generic selection function.
+ */
+static GMQCC_INLINE ast_expression *fold_intrin_isfinite(fold_t *fold, ast_value *a) {
+ return fold_constgen_float(fold, isfinite(fold_immvalue_float(a)));
+}
+static GMQCC_INLINE ast_expression *fold_intrin_isinf(fold_t *fold, ast_value *a) {
+ return fold_constgen_float(fold, isinf(fold_immvalue_float(a)));
+}
+static GMQCC_INLINE ast_expression *fold_intrin_isnan(fold_t *fold, ast_value *a) {
+ return fold_constgen_float(fold, isnan(fold_immvalue_float(a)));
+}
+static GMQCC_INLINE ast_expression *fold_intrin_isnormal(fold_t *fold, ast_value *a) {
+ return fold_constgen_float(fold, isnormal(fold_immvalue_float(a)));
+}
+static GMQCC_INLINE ast_expression *fold_intrin_signbit(fold_t *fold, ast_value *a) {
+ return fold_constgen_float(fold, signbit(fold_immvalue_float(a)));
+}
+static GMQCC_INLINE ast_expression *fold_intirn_acosh(fold_t *fold, ast_value *a) {
+ return fold_constgen_float(fold, acoshf(fold_immvalue_float(a)));
+}
+static GMQCC_INLINE ast_expression *fold_intrin_asinh(fold_t *fold, ast_value *a) {
+ return fold_constgen_float(fold, asinhf(fold_immvalue_float(a)));
+}
+static GMQCC_INLINE ast_expression *fold_intrin_atanh(fold_t *fold, ast_value *a) {
+ return fold_constgen_float(fold, atanhf(fold_immvalue_float(a)));
+}
+static GMQCC_INLINE ast_expression *fold_intrin_exp(fold_t *fold, ast_value *a) {
+ return fold_constgen_float(fold, expf(fold_immvalue_float(a)));
+}
+static GMQCC_INLINE ast_expression *fold_intrin_exp2(fold_t *fold, ast_value *a) {
+ return fold_constgen_float(fold, exp2f(fold_immvalue_float(a)));
+}
+static GMQCC_INLINE ast_expression *fold_intrin_expm1(fold_t *fold, ast_value *a) {
+ return fold_constgen_float(fold, expm1f(fold_immvalue_float(a)));
+}
+static GMQCC_INLINE ast_expression *fold_intrin_mod(fold_t *fold, ast_value *lhs, ast_value *rhs) {
+ return fold_constgen_float(fold, fmodf(fold_immvalue_float(lhs), fold_immvalue_float(rhs)));
+}
+static GMQCC_INLINE ast_expression *fold_intrin_pow(fold_t *fold, ast_value *lhs, ast_value *rhs) {
+ return fold_constgen_float(fold, powf(fold_immvalue_float(lhs), fold_immvalue_float(rhs)));
+}
+static GMQCC_INLINE ast_expression *fold_intrin_fabs(fold_t *fold, ast_value *a) {
+ return fold_constgen_float(fold, fabsf(fold_immvalue_float(a)));
+}
+
+
+ast_expression *fold_intrin(fold_t *fold, const char *intrin, ast_expression **arg) {
+ ast_expression *ret = NULL;
+ ast_value *a = (ast_value*)arg[0];
+ ast_value *b = (ast_value*)arg[1];
+
+ if (!strcmp(intrin, "isfinite")) ret = fold_intrin_isfinite(fold, a);
+ if (!strcmp(intrin, "isinf")) ret = fold_intrin_isinf(fold, a);
+ if (!strcmp(intrin, "isnan")) ret = fold_intrin_isnan(fold, a);
+ if (!strcmp(intrin, "isnormal")) ret = fold_intrin_isnormal(fold, a);
+ if (!strcmp(intrin, "signbit")) ret = fold_intrin_signbit(fold, a);
+ if (!strcmp(intrin, "acosh")) ret = fold_intirn_acosh(fold, a);
+ if (!strcmp(intrin, "asinh")) ret = fold_intrin_asinh(fold, a);
+ if (!strcmp(intrin, "atanh")) ret = fold_intrin_atanh(fold, a);
+ if (!strcmp(intrin, "exp")) ret = fold_intrin_exp(fold, a);
+ if (!strcmp(intrin, "exp2")) ret = fold_intrin_exp2(fold, a);
+ if (!strcmp(intrin, "expm1")) ret = fold_intrin_expm1(fold, a);
+ if (!strcmp(intrin, "mod")) ret = fold_intrin_mod(fold, a, b);
+ if (!strcmp(intrin, "pow")) ret = fold_intrin_pow(fold, a, b);
+ if (!strcmp(intrin, "fabs")) ret = fold_intrin_fabs(fold, a);
+
+ if (ret)
+ ++opts_optimizationcount[OPTIM_CONST_FOLD];
+
+ return ret;
+}
+
+/*
+ * These are all the actual constant folding methods that happen in between
+ * the AST/IR stage of the compiler , i.e eliminating branches for const
+ * expressions, which is the only supported thing so far. We undefine the
+ * testing macros here because an ir_value is differant than an ast_value.
+ */
+#undef expect
+#undef isfloat
+#undef isstring
+#undef isvector
+#undef fold_immvalue_float
+#undef fold_immvalue_string
+#undef fold_immvalue_vector
+#undef fold_can_1
+#undef fold_can_2
+
+#define isfloat(X) ((X)->vtype == TYPE_FLOAT)
+/*#define isstring(X) ((X)->vtype == TYPE_STRING)*/
+/*#define isvector(X) ((X)->vtype == TYPE_VECTOR)*/
+#define fold_immvalue_float(X) ((X)->constval.vfloat)
+#define fold_immvalue_vector(X) ((X)->constval.vvec)
+/*#define fold_immvalue_string(X) ((X)->constval.vstring)*/
+#define fold_can_1(X) ((X)->hasvalue && (X)->cvq == CV_CONST)
+/*#define fold_can_2(X,Y) (fold_can_1(X) && fold_can_1(Y))*/
+
+static ast_expression *fold_superfluous(ast_expression *left, ast_expression *right, int op) {
+ ast_expression *swapped = NULL; /* using this as bool */
+ ast_value *load;
+
+ if (!ast_istype(right, ast_value) || !fold_can_1((load = (ast_value*)right))) {
+ swapped = left;
+ left = right;
+ right = swapped;
+ }
+
+ if (!ast_istype(right, ast_value) || !fold_can_1((load = (ast_value*)right)))
+ return NULL;
+
+ switch (op) {
+ case INSTR_DIV_F:
+ if (swapped)
+ return NULL;
+ case INSTR_MUL_F:
+ if (fold_immvalue_float(load) == 1.0f) {
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ ast_unref(right);
+ return left;
+ }
+ break;
+
+
+ case INSTR_SUB_F:
+ if (swapped)
+ return NULL;
+ case INSTR_ADD_F:
+ if (fold_immvalue_float(load) == 0.0f) {
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ ast_unref(right);
+ return left;
+ }
+ break;
+
+ case INSTR_MUL_V:
+ if (vec3_cmp(fold_immvalue_vector(load), vec3_create(1, 1, 1))) {
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ ast_unref(right);
+ return left;
+ }
+ break;
+
+ case INSTR_SUB_V:
+ if (swapped)
+ return NULL;
+ case INSTR_ADD_V:
+ if (vec3_cmp(fold_immvalue_vector(load), vec3_create(0, 0, 0))) {
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ ast_unref(right);
+ return left;
+ }
+ break;
+ }
+
+ return NULL;
+}
+
+ast_expression *fold_binary(lex_ctx_t ctx, int op, ast_expression *left, ast_expression *right) {
+ ast_expression *ret = fold_superfluous(left, right, op);
+ if (ret)
+ return ret;
+ return (ast_expression*)ast_binary_new(ctx, op, left, right);
+}
+
+static GMQCC_INLINE int fold_cond(ir_value *condval, ast_function *func, ast_ifthen *branch) {
+ if (isfloat(condval) && fold_can_1(condval) && OPTS_OPTIMIZATION(OPTIM_CONST_FOLD_DCE)) {
+ ast_expression_codegen *cgen;
+ ir_block *elide;
+ ir_value *dummy;
+ bool istrue = (fold_immvalue_float(condval) != 0.0f && branch->on_true);
+ bool isfalse = (fold_immvalue_float(condval) == 0.0f && branch->on_false);
+ ast_expression *path = (istrue) ? branch->on_true :
+ (isfalse) ? branch->on_false : NULL;
+ if (!path) {
+ /*
+ * no path to take implies that the evaluation is if(0) and there
+ * is no else block. so eliminate all the code.
+ */
+ ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE];
+ return true;
+ }
+
+ if (!(elide = ir_function_create_block(ast_ctx(branch), func->ir_func, ast_function_label(func, ((istrue) ? "ontrue" : "onfalse")))))
+ return false;
+ if (!(*(cgen = path->codegen))((ast_expression*)path, func, false, &dummy))
+ return false;
+ if (!ir_block_create_jump(func->curblock, ast_ctx(branch), elide))
+ return false;
+ /*
+ * now the branch has been eliminated and the correct block for the constant evaluation
+ * is expanded into the current block for the function.
+ */
+ func->curblock = elide;
+ ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE];
+ return true;
+ }
+ return -1; /* nothing done */
+}
+
+int fold_cond_ternary(ir_value *condval, ast_function *func, ast_ternary *branch) {
+ return fold_cond(condval, func, (ast_ifthen*)branch);
+}
+
+int fold_cond_ifthen(ir_value *condval, ast_function *func, ast_ifthen *branch) {
+ return fold_cond(condval, func, branch);
+}