2 * Copyright (C) 2012, 2013, 2014
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
6 * this software and associated documentation files (the "Software"), to deal in
7 * the Software without restriction, including without limitation the rights to
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is furnished to do
10 * so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 #define FOLD_STRING_UNTRANSLATE_HTSIZE 1024
30 #define FOLD_STRING_DOTRANSLATE_HTSIZE 1024
32 /* The options to use for inexact and arithmetic exceptions */
33 #define FOLD_ROUNDING SFLOAT_ROUND_NEAREST_EVEN
34 #define FOLD_TINYNESS SFLOAT_TBEFORE
37 * The constant folder is also responsible for validating if the constant
38 * expressions produce valid results. We cannot trust the FPU control
39 * unit for these exceptions because setting FPU control words might not
40 * work. Systems can set and enforce FPU modes of operation. It's also valid
41 * for libc's to simply ignore FPU exceptions. For instance ARM CPUs in
42 * glibc. We implement some trivial and IEE 754 conformant functions which
43 * emulate those operations. This is an entierly optional compiler feature
44 * which shouldn't be enabled for anything other than performing strict
45 * passes on constant expressions since it's quite slow.
47 typedef uint32_t sfloat_t;
58 SFLOAT_UNDERFLOW = 16,
60 } sfloat_exceptionflags_t;
63 SFLOAT_ROUND_NEAREST_EVEN,
67 } sfloat_roundingmode_t;
75 sfloat_roundingmode_t roundingmode;
76 sfloat_exceptionflags_t exceptionflags;
77 sfloat_tdetect_t tiny;
80 /* Count of leading zero bits before the most-significand 1 bit. */
82 /* MSVC has an intrinsic for this */
83 static GMQCC_INLINE uint32_t sfloat_clz(uint32_t x) {
85 _BitScanForward(&r, x);
88 # define SFLOAT_CLZ(X, SUB) \
89 (sfloat_clz((X)) - (SUB))
90 #elif defined(__GNUC__) || defined(__CLANG__)
91 /* Clang and GCC have a builtin for this */
92 # define SFLOAT_CLZ(X, SUB) \
93 (__builtin_clz((X)) - (SUB))
96 static GMQCC_INLINE uint32_t sfloat_popcnt(uint32_t x) {
97 x -= ((x >> 1) & 0x55555555);
98 x = (((x >> 2) & 0x33333333) + (x & 0x33333333));
99 x = (((x >> 4) + x) & 0x0F0F0F0F);
102 return x & 0x0000003F;
104 static GMQCC_INLINE uint32_t sfloat_clz(uint32_t x) {
110 return 32 - sfloat_popcnt(x);
112 # define SFLOAT_CLZ(X, SUB) \
113 (sfloat_clz((X) - (SUB)))
116 /* The value of a NaN */
117 #define SFLOAT_NAN 0xFFC00000
119 #define SFLOAT_ISNAN(A) \
120 (0xFF000000 < (uint32_t)((A) << 1))
121 /* Test if signaling NaN */
122 #define SFLOAT_ISSNAN(A) \
123 (((((A) >> 22) & 0x1FF) == 0x1FE) && ((A) & 0x003FFFFF))
124 /* Raise exception */
125 #define SFLOAT_RAISE(STATE, FLAGS) \
126 ((STATE)->exceptionflags |= (FLAGS))
128 * Shifts `A' right `COUNT' bits. Non-zero bits are stored in LSB. Size
129 * sets the arbitrarly-large limit.
131 #define SFLOAT_SHIFT(SIZE, A, COUNT, Z) \
132 *(Z) = ((COUNT) == 0) \
134 : (((COUNT) < (SIZE)) \
135 ? ((A) >> (COUNT)) | (((A) << ((-(COUNT)) & ((SIZE) - 1))) != 0) \
137 /* Extract fractional component */
138 #define SFLOAT_EXTRACT_FRAC(X) \
139 ((uint32_t)((X) & 0x007FFFFF))
140 /* Extract exponent component */
141 #define SFLOAT_EXTRACT_EXP(X) \
142 ((int16_t)((X) >> 23) & 0xFF)
143 /* Extract sign bit */
144 #define SFLOAT_EXTRACT_SIGN(X) \
146 /* Normalize a subnormal */
147 #define SFLOAT_SUBNORMALIZE(SA, Z, SZ) \
148 (void)(*(SZ) = (SA) << SFLOAT_CLZ((SA), 8), *(SZ) = 1 - SFLOAT_CLZ((SA), 8))
150 * Pack sign, exponent and significand and produce a float.
152 * Integer portions of the significand are added to the exponent. The
153 * exponent input should be one less than the result exponent whenever
154 * the significand is normalized since normalized significand will
155 * always have an integer portion of value one.
157 #define SFLOAT_PACK(SIGN, EXP, SIG) \
158 (sfloat_t)((((uint32_t)(SIGN)) << 31) + (((uint32_t)(EXP)) << 23) + (SIG))
160 /* Calculate NaN. If either operands are signaling then raise invalid */
161 static sfloat_t sfloat_propagate_nan(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
162 bool isnan_a = SFLOAT_ISNAN(a);
163 bool issnan_a = SFLOAT_ISSNAN(a);
164 bool isnan_b = SFLOAT_ISNAN(b);
165 bool issnan_b = SFLOAT_ISSNAN(b);
170 if (issnan_a | issnan_b)
171 SFLOAT_RAISE(state, SFLOAT_INEXACT);
175 return isnan_b ? b : a;
176 } else if (isnan_a) {
177 if (issnan_b | !isnan_b)
180 if ((uint32_t)(a << 1) < (uint32_t)(b << 1)) return b;
181 if ((uint32_t)(b << 1) < (uint32_t)(a << 1)) return a;
182 return (a < b) ? a : b;
188 static sfloat_t SFLOAT_PACK_round(sfloat_state_t *state, bool sign_z, int16_t exp_z, uint32_t sig_z) {
189 sfloat_roundingmode_t mode = state->roundingmode;
190 bool even = !!(mode == SFLOAT_ROUND_NEAREST_EVEN);
191 unsigned char increment = 0x40;
192 unsigned char bits = sig_z & 0x7F;
195 if (mode == SFLOAT_ROUND_TO_ZERO)
200 if (mode == SFLOAT_ROUND_UP)
203 if (mode == SFLOAT_ROUND_DOWN)
209 if (0xFD <= (uint16_t)exp_z) {
210 if ((0xFD < exp_z) || ((exp_z == 0xFD) && ((int32_t)(sig_z + increment) < 0))) {
211 SFLOAT_RAISE(state, SFLOAT_OVERFLOW | SFLOAT_INEXACT);
212 return SFLOAT_PACK(sign_z, 0xFF, 0) - (increment == 0);
215 /* Check for underflow */
216 bool tiny = (state->tiny == SFLOAT_TBEFORE) || (exp_z < -1) || (sig_z + increment < 0x80000000);
217 SFLOAT_SHIFT(32, sig_z, -exp_z, &sig_z);
221 SFLOAT_RAISE(state, SFLOAT_UNDERFLOW);
226 * Significand has point between bits 30 and 29, 7 bits to the left of
227 * the usual place. This shifted significand has to be normalized
228 * or smaller, if it isn't the exponent must be zero, in which case
229 * no rounding occurs since the result will be a subnormal.
232 SFLOAT_RAISE(state, SFLOAT_INEXACT);
233 sig_z = (sig_z + increment) >> 7;
234 sig_z &= ~(((bits ^ 0x40) == 0) & even);
237 return SFLOAT_PACK(sign_z, exp_z, sig_z);
240 /* Normalized round and pack */
241 static sfloat_t SFLOAT_PACK_normal(sfloat_state_t *state, bool sign_z, int16_t exp_z, uint32_t sig_z) {
242 unsigned char c = SFLOAT_CLZ(sig_z, 1);
243 return SFLOAT_PACK_round(state, sign_z, exp_z - c, sig_z << c);
246 static sfloat_t sfloat_add_impl(sfloat_state_t *state, sfloat_t a, sfloat_t b, bool sign_z) {
247 int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
248 int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
250 int16_t exp_d = exp_a - exp_b;
251 uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a) << 6;
252 uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b) << 6;
257 return sig_a ? sfloat_propagate_nan(state, a, b) : a;
262 SFLOAT_SHIFT(32, sig_b, exp_d, &sig_b);
264 } else if (exp_d < 0) {
266 return sig_b ? sfloat_propagate_nan(state, a, b) : SFLOAT_PACK(sign_z, 0xFF, 0);
271 SFLOAT_SHIFT(32, sig_a, -exp_d, &sig_a);
275 return (sig_a | sig_b) ? sfloat_propagate_nan(state, a, b) : a;
277 return SFLOAT_PACK(sign_z, 0, (sig_a + sig_b) >> 6);
278 sig_z = 0x40000000 + sig_a + sig_b;
283 sig_z = (sig_a + sig_b) << 1;
285 if ((int32_t)sig_z < 0) {
286 sig_z = sig_a + sig_b;
290 return SFLOAT_PACK_round(state, sign_z, exp_z, sig_z);
293 static sfloat_t sfloat_sub_impl(sfloat_state_t *state, sfloat_t a, sfloat_t b, bool sign_z) {
294 int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
295 int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
297 int16_t exp_d = exp_a - exp_b;
298 uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a) << 7;
299 uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b) << 7;
302 if (0 < exp_d) goto exp_greater_a;
303 if (exp_d < 0) goto exp_greater_b;
307 return sfloat_propagate_nan(state, a, b);
308 SFLOAT_RAISE(state, SFLOAT_INVALID);
315 if (sig_b < sig_a) goto greater_a;
316 if (sig_a < sig_b) goto greater_b;
318 return SFLOAT_PACK(state->roundingmode == SFLOAT_ROUND_DOWN, 0, 0);
322 return (sig_b) ? sfloat_propagate_nan(state, a, b) : SFLOAT_PACK(sign_z ^ 1, 0xFF, 0);
327 SFLOAT_SHIFT(32, sig_a, -exp_d, &sig_a);
330 sig_z = sig_b - sig_a;
337 return (sig_a) ? sfloat_propagate_nan(state, a, b) : a;
342 SFLOAT_SHIFT(32, sig_b, exp_d, &sig_b);
345 sig_z = sig_a - sig_b;
350 return SFLOAT_PACK_normal(state, sign_z, exp_z, sig_z);
353 static GMQCC_INLINE sfloat_t sfloat_add(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
354 bool sign_a = SFLOAT_EXTRACT_SIGN(a);
355 bool sign_b = SFLOAT_EXTRACT_SIGN(b);
356 return (sign_a == sign_b) ? sfloat_add_impl(state, a, b, sign_a)
357 : sfloat_sub_impl(state, a, b, sign_a);
360 static GMQCC_INLINE sfloat_t sfloat_sub(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
361 bool sign_a = SFLOAT_EXTRACT_SIGN(a);
362 bool sign_b = SFLOAT_EXTRACT_SIGN(b);
363 return (sign_a == sign_b) ? sfloat_sub_impl(state, a, b, sign_a)
364 : sfloat_add_impl(state, a, b, sign_a);
367 static sfloat_t sfloat_mul(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
368 int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
369 int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
371 uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a);
372 uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b);
374 uint64_t sig_z64 = 0;
375 bool sign_a = SFLOAT_EXTRACT_SIGN(a);
376 bool sign_b = SFLOAT_EXTRACT_SIGN(b);
377 bool sign_z = sign_a ^ sign_b;
380 if (sig_a || ((exp_b == 0xFF) && sig_b))
381 return sfloat_propagate_nan(state, a, b);
382 if ((exp_b | sig_b) == 0) {
383 SFLOAT_RAISE(state, SFLOAT_INVALID);
386 return SFLOAT_PACK(sign_z, 0xFF, 0);
390 return sfloat_propagate_nan(state, a, b);
391 if ((exp_a | sig_a) == 0) {
392 SFLOAT_RAISE(state, SFLOAT_INVALID);
395 return SFLOAT_PACK(sign_z, 0xFF, 0);
399 return SFLOAT_PACK(sign_z, 0, 0);
400 SFLOAT_SUBNORMALIZE(sig_a, &exp_a, &sig_a);
404 return SFLOAT_PACK(sign_z, 0, 0);
405 SFLOAT_SUBNORMALIZE(sig_b, &exp_b, &sig_b);
407 exp_z = exp_a + exp_b - 0x7F;
408 sig_a = (sig_a | 0x00800000) << 7;
409 sig_b = (sig_b | 0x00800000) << 8;
410 SFLOAT_SHIFT(64, ((uint64_t)sig_a) * sig_b, 32, &sig_z64);
412 if (0 <= (int32_t)(sig_z << 1)) {
416 return SFLOAT_PACK_round(state, sign_z, exp_z, sig_z);
419 static sfloat_t sfloat_div(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
420 int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
421 int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
423 uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a);
424 uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b);
426 bool sign_a = SFLOAT_EXTRACT_SIGN(a);
427 bool sign_b = SFLOAT_EXTRACT_SIGN(b);
428 bool sign_z = sign_a ^ sign_b;
432 return sfloat_propagate_nan(state, a, b);
435 return sfloat_propagate_nan(state, a, b);
436 SFLOAT_RAISE(state, SFLOAT_INVALID);
439 return SFLOAT_PACK(sign_z, 0xFF, 0);
442 return (sig_b) ? sfloat_propagate_nan(state, a, b) : SFLOAT_PACK(sign_z, 0, 0);
445 if ((exp_a | sig_a) == 0) {
446 SFLOAT_RAISE(state, SFLOAT_INVALID);
449 SFLOAT_RAISE(state, SFLOAT_DIVBYZERO);
450 return SFLOAT_PACK(sign_z, 0xFF, 0);
452 SFLOAT_SUBNORMALIZE(sig_b, &exp_b, &sig_b);
456 return SFLOAT_PACK(sign_z, 0, 0);
457 SFLOAT_SUBNORMALIZE(sig_a, &exp_a, &sig_a);
459 exp_z = exp_a - exp_b + 0x7D;
460 sig_a = (sig_a | 0x00800000) << 7;
461 sig_b = (sig_b | 0x00800000) << 8;
462 if (sig_b <= (sig_a + sig_a)) {
466 sig_z = (((uint64_t)sig_a) << 32) / sig_b;
467 if ((sig_z & 0x3F) == 0)
468 sig_z |= ((uint64_t)sig_b * sig_z != ((uint64_t)sig_a) << 32);
469 return SFLOAT_PACK_round(state, sign_z, exp_z, sig_z);
472 static GMQCC_INLINE void sfloat_check(lex_ctx_t ctx, sfloat_state_t *state, const char *vec) {
473 /* Exception comes from vector component */
475 if (state->exceptionflags & SFLOAT_DIVBYZERO)
476 compile_error(ctx, "division by zero in `%s' component", vec);
477 if (state->exceptionflags & SFLOAT_INVALID)
478 compile_error(ctx, "undefined (inf) in `%s' component", vec);
479 if (state->exceptionflags & SFLOAT_OVERFLOW)
480 compile_error(ctx, "arithmetic overflow in `%s' component", vec);
481 if (state->exceptionflags & SFLOAT_UNDERFLOW)
482 compile_error(ctx, "arithmetic underflow in `%s' component", vec);
485 if (state->exceptionflags & SFLOAT_DIVBYZERO)
486 compile_error(ctx, "division by zero");
487 if (state->exceptionflags & SFLOAT_INVALID)
488 compile_error(ctx, "undefined (inf)");
489 if (state->exceptionflags & SFLOAT_OVERFLOW)
490 compile_error(ctx, "arithmetic overflow");
491 if (state->exceptionflags & SFLOAT_UNDERFLOW)
492 compile_error(ctx, "arithmetic underflow");
496 * There is two stages to constant folding in GMQCC: there is the parse
497 * stage constant folding, where, witht he help of the AST, operator
498 * usages can be constant folded. Then there is the constant folding
499 * in the IR for things like eliding if statements, can occur.
501 * This file is thus, split into two parts.
504 #define isfloat(X) (((ast_expression*)(X))->vtype == TYPE_FLOAT)
505 #define isvector(X) (((ast_expression*)(X))->vtype == TYPE_VECTOR)
506 #define isstring(X) (((ast_expression*)(X))->vtype == TYPE_STRING)
507 #define isfloats(X,Y) (isfloat (X) && isfloat (Y))
510 * Implementation of basic vector math for vec3_t, for trivial constant
513 * TODO: gcc/clang hinting for autovectorization
529 sfloat_state_t state[3];
532 static GMQCC_INLINE vec3_soft_t vec3_soft_convert(vec3_t vec) {
540 static GMQCC_INLINE bool vec3_soft_exception(vec3_soft_state_t *vstate, size_t index) {
541 sfloat_exceptionflags_t flags = vstate->state[index].exceptionflags;
542 if (flags & SFLOAT_DIVBYZERO) return true;
543 if (flags & SFLOAT_INVALID) return true;
544 if (flags & SFLOAT_OVERFLOW) return true;
545 if (flags & SFLOAT_UNDERFLOW) return true;
549 static GMQCC_INLINE void vec3_soft_eval(vec3_soft_state_t *state,
550 sfloat_t (*callback)(sfloat_state_t *, sfloat_t, sfloat_t),
554 vec3_soft_t sa = vec3_soft_convert(a);
555 vec3_soft_t sb = vec3_soft_convert(b);
556 callback(&state->state[0], sa.x.s, sb.x.s);
557 if (vec3_soft_exception(state, 0)) state->faults |= VEC_COMP_X;
558 callback(&state->state[1], sa.y.s, sb.y.s);
559 if (vec3_soft_exception(state, 1)) state->faults |= VEC_COMP_Y;
560 callback(&state->state[2], sa.z.s, sb.z.s);
561 if (vec3_soft_exception(state, 2)) state->faults |= VEC_COMP_Z;
564 static GMQCC_INLINE void vec3_check_except(vec3_t a,
567 sfloat_t (*callback)(sfloat_state_t *, sfloat_t, sfloat_t))
569 vec3_soft_state_t state;
570 state.state[0].exceptionflags = 0;
571 state.state[0].roundingmode = FOLD_ROUNDING;
572 state.state[0].tiny = FOLD_TINYNESS;
573 memcpy(&state.state[1], &state.state[0], sizeof(sfloat_state_t) * 2);
575 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS))
578 vec3_soft_eval(&state, callback, a, b);
579 if (state.faults & VEC_COMP_X) sfloat_check(ctx, &state.state[0], "x");
580 if (state.faults & VEC_COMP_Y) sfloat_check(ctx, &state.state[1], "y");
581 if (state.faults & VEC_COMP_Z) sfloat_check(ctx, &state.state[2], "z");
584 static GMQCC_INLINE vec3_t vec3_add(lex_ctx_t ctx, vec3_t a, vec3_t b) {
586 vec3_check_except(a, b, ctx, &sfloat_add);
593 static GMQCC_INLINE vec3_t vec3_sub(lex_ctx_t ctx, vec3_t a, vec3_t b) {
595 vec3_check_except(a, b, ctx, &sfloat_sub);
602 static GMQCC_INLINE vec3_t vec3_neg(vec3_t a) {
610 static GMQCC_INLINE vec3_t vec3_or(vec3_t a, vec3_t b) {
612 out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b.x));
613 out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b.y));
614 out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b.z));
618 static GMQCC_INLINE vec3_t vec3_orvf(vec3_t a, qcfloat_t b) {
620 out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b));
621 out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b));
622 out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b));
626 static GMQCC_INLINE vec3_t vec3_and(vec3_t a, vec3_t b) {
628 out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b.x));
629 out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b.y));
630 out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b.z));
634 static GMQCC_INLINE vec3_t vec3_andvf(vec3_t a, qcfloat_t b) {
636 out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b));
637 out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b));
638 out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b));
642 static GMQCC_INLINE vec3_t vec3_xor(vec3_t a, vec3_t b) {
644 out.x = (qcfloat_t)(((qcint_t)a.x) ^ ((qcint_t)b.x));
645 out.y = (qcfloat_t)(((qcint_t)a.y) ^ ((qcint_t)b.y));
646 out.z = (qcfloat_t)(((qcint_t)a.z) ^ ((qcint_t)b.z));
650 static GMQCC_INLINE vec3_t vec3_xorvf(vec3_t a, qcfloat_t b) {
652 out.x = (qcfloat_t)(((qcint_t)a.x) ^ ((qcint_t)b));
653 out.y = (qcfloat_t)(((qcint_t)a.y) ^ ((qcint_t)b));
654 out.z = (qcfloat_t)(((qcint_t)a.z) ^ ((qcint_t)b));
658 static GMQCC_INLINE vec3_t vec3_not(vec3_t a) {
666 static GMQCC_INLINE qcfloat_t vec3_mulvv(vec3_t a, vec3_t b) {
667 return (a.x * b.x + a.y * b.y + a.z * b.z);
670 static GMQCC_INLINE vec3_t vec3_mulvf(vec3_t a, qcfloat_t b) {
678 static GMQCC_INLINE bool vec3_cmp(vec3_t a, vec3_t b) {
684 static GMQCC_INLINE vec3_t vec3_create(float x, float y, float z) {
692 static GMQCC_INLINE qcfloat_t vec3_notf(vec3_t a) {
693 return (!a.x && !a.y && !a.z);
696 static GMQCC_INLINE bool vec3_pbool(vec3_t a) {
697 return (a.x || a.y || a.z);
700 static GMQCC_INLINE vec3_t vec3_cross(vec3_t a, vec3_t b) {
702 out.x = a.y * b.z - a.z * b.y;
703 out.y = a.z * b.x - a.x * b.z;
704 out.z = a.x * b.y - a.y * b.x;
708 static lex_ctx_t fold_ctx(fold_t *fold) {
710 if (fold->parser->lex)
711 return parser_ctx(fold->parser);
713 memset(&ctx, 0, sizeof(ctx));
717 static GMQCC_INLINE bool fold_immediate_true(fold_t *fold, ast_value *v) {
718 switch (v->expression.vtype) {
720 return !!v->constval.vfloat;
722 return !!v->constval.vint;
724 if (OPTS_FLAG(CORRECT_LOGIC))
725 return vec3_pbool(v->constval.vvec);
726 return !!(v->constval.vvec.x);
728 if (!v->constval.vstring)
730 if (OPTS_FLAG(TRUE_EMPTY_STRINGS))
732 return !!v->constval.vstring[0];
734 compile_error(fold_ctx(fold), "internal error: fold_immediate_true on invalid type");
737 return !!v->constval.vfunc;
740 /* Handy macros to determine if an ast_value can be constant folded. */
741 #define fold_can_1(X) \
742 (ast_istype(((ast_expression*)(X)), ast_value) && (X)->hasvalue && ((X)->cvq == CV_CONST) && \
743 ((ast_expression*)(X))->vtype != TYPE_FUNCTION)
745 #define fold_can_2(X, Y) (fold_can_1(X) && fold_can_1(Y))
747 #define fold_immvalue_float(E) ((E)->constval.vfloat)
748 #define fold_immvalue_vector(E) ((E)->constval.vvec)
749 #define fold_immvalue_string(E) ((E)->constval.vstring)
751 fold_t *fold_init(parser_t *parser) {
752 fold_t *fold = (fold_t*)mem_a(sizeof(fold_t));
753 fold->parser = parser;
754 fold->imm_float = NULL;
755 fold->imm_vector = NULL;
756 fold->imm_string = NULL;
757 fold->imm_string_untranslate = util_htnew(FOLD_STRING_UNTRANSLATE_HTSIZE);
758 fold->imm_string_dotranslate = util_htnew(FOLD_STRING_DOTRANSLATE_HTSIZE);
761 * prime the tables with common constant values at constant
764 (void)fold_constgen_float (fold, 0.0f, false);
765 (void)fold_constgen_float (fold, 1.0f, false);
766 (void)fold_constgen_float (fold, -1.0f, false);
767 (void)fold_constgen_float (fold, 2.0f, false);
769 (void)fold_constgen_vector(fold, vec3_create(0.0f, 0.0f, 0.0f));
770 (void)fold_constgen_vector(fold, vec3_create(-1.0f, -1.0f, -1.0f));
775 bool fold_generate(fold_t *fold, ir_builder *ir) {
776 /* generate globals for immediate folded values */
780 for (i = 0; i < vec_size(fold->imm_float); ++i)
781 if (!ast_global_codegen ((cur = fold->imm_float[i]), ir, false)) goto err;
782 for (i = 0; i < vec_size(fold->imm_vector); ++i)
783 if (!ast_global_codegen((cur = fold->imm_vector[i]), ir, false)) goto err;
784 for (i = 0; i < vec_size(fold->imm_string); ++i)
785 if (!ast_global_codegen((cur = fold->imm_string[i]), ir, false)) goto err;
790 con_out("failed to generate global %s\n", cur->name);
791 ir_builder_delete(ir);
795 void fold_cleanup(fold_t *fold) {
798 for (i = 0; i < vec_size(fold->imm_float); ++i) ast_delete(fold->imm_float[i]);
799 for (i = 0; i < vec_size(fold->imm_vector); ++i) ast_delete(fold->imm_vector[i]);
800 for (i = 0; i < vec_size(fold->imm_string); ++i) ast_delete(fold->imm_string[i]);
802 vec_free(fold->imm_float);
803 vec_free(fold->imm_vector);
804 vec_free(fold->imm_string);
806 util_htdel(fold->imm_string_untranslate);
807 util_htdel(fold->imm_string_dotranslate);
812 ast_expression *fold_constgen_float(fold_t *fold, qcfloat_t value, bool inexact) {
813 ast_value *out = NULL;
816 for (i = 0; i < vec_size(fold->imm_float); i++) {
817 if (!memcmp(&fold->imm_float[i]->constval.vfloat, &value, sizeof(qcfloat_t)))
818 return (ast_expression*)fold->imm_float[i];
821 out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_FLOAT);
823 out->hasvalue = true;
824 out->inexact = inexact;
825 out->constval.vfloat = value;
827 vec_push(fold->imm_float, out);
829 return (ast_expression*)out;
832 ast_expression *fold_constgen_vector(fold_t *fold, vec3_t value) {
836 for (i = 0; i < vec_size(fold->imm_vector); i++) {
837 if (vec3_cmp(fold->imm_vector[i]->constval.vvec, value))
838 return (ast_expression*)fold->imm_vector[i];
841 out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_VECTOR);
843 out->hasvalue = true;
844 out->constval.vvec = value;
846 vec_push(fold->imm_vector, out);
848 return (ast_expression*)out;
851 ast_expression *fold_constgen_string(fold_t *fold, const char *str, bool translate) {
852 hash_table_t *table = (translate) ? fold->imm_string_untranslate : fold->imm_string_dotranslate;
853 ast_value *out = NULL;
854 size_t hash = util_hthash(table, str);
856 if ((out = (ast_value*)util_htgeth(table, str, hash)))
857 return (ast_expression*)out;
861 util_snprintf(name, sizeof(name), "dotranslate_%lu", (unsigned long)(fold->parser->translated++));
862 out = ast_value_new(parser_ctx(fold->parser), name, TYPE_STRING);
863 out->expression.flags |= AST_FLAG_INCLUDE_DEF; /* def needs to be included for translatables */
865 out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_STRING);
868 out->hasvalue = true;
870 out->constval.vstring = parser_strdup(str);
872 vec_push(fold->imm_string, out);
873 util_htseth(table, str, hash, out);
875 return (ast_expression*)out;
879 static GMQCC_INLINE ast_expression *fold_op_mul_vec(fold_t *fold, vec3_t vec, ast_value *sel, const char *set) {
881 * vector-component constant folding works by matching the component sets
882 * to eliminate expensive operations on whole-vectors (3 components at runtime).
883 * to achive this effect in a clean manner this function generalizes the
884 * values through the use of a set paramater, which is used as an indexing method
885 * for creating the elided ast binary expression.
887 * Consider 'n 0 0' where y, and z need to be tested for 0, and x is
888 * used as the value in a binary operation generating an INSTR_MUL instruction,
889 * to acomplish the indexing of the correct component value we use set[0], set[1], set[2]
890 * as x, y, z, where the values of those operations return 'x', 'y', 'z'. Because
891 * of how ASCII works we can easily deliniate:
892 * vec.z is the same as set[2]-'x' for when set[2] is 'z', 'z'-'x' results in a
893 * literal value of 2, using this 2, we know that taking the address of vec->x (float)
894 * and indxing it with this literal will yeild the immediate address of that component
896 * Of course more work needs to be done to generate the correct index for the ast_member_new
897 * call, which is no problem: set[0]-'x' suffices that job.
899 qcfloat_t x = (&vec.x)[set[0]-'x'];
900 qcfloat_t y = (&vec.x)[set[1]-'x'];
901 qcfloat_t z = (&vec.x)[set[2]-'x'];
905 ++opts_optimizationcount[OPTIM_VECTOR_COMPONENTS];
906 out = (ast_expression*)ast_member_new(fold_ctx(fold), (ast_expression*)sel, set[0]-'x', NULL);
907 out->node.keep = false;
908 ((ast_member*)out)->rvalue = true;
910 return (ast_expression*)ast_binary_new(fold_ctx(fold), INSTR_MUL_F, fold_constgen_float(fold, x, false), out);
916 static GMQCC_INLINE ast_expression *fold_op_neg(fold_t *fold, ast_value *a) {
919 return fold_constgen_float(fold, -fold_immvalue_float(a), false);
920 } else if (isvector(a)) {
922 return fold_constgen_vector(fold, vec3_neg(fold_immvalue_vector(a)));
927 static GMQCC_INLINE ast_expression *fold_op_not(fold_t *fold, ast_value *a) {
930 return fold_constgen_float(fold, !fold_immvalue_float(a), false);
931 } else if (isvector(a)) {
933 return fold_constgen_float(fold, vec3_notf(fold_immvalue_vector(a)), false);
934 } else if (isstring(a)) {
936 if (OPTS_FLAG(TRUE_EMPTY_STRINGS))
937 return fold_constgen_float(fold, !fold_immvalue_string(a), false);
939 return fold_constgen_float(fold, !fold_immvalue_string(a) || !*fold_immvalue_string(a), false);
945 static bool fold_check_except_float(sfloat_t (*callback)(sfloat_state_t *, sfloat_t, sfloat_t),
954 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS) && !OPTS_WARN(WARN_INEXACT_COMPARES))
957 s.roundingmode = FOLD_ROUNDING;
958 s.tiny = FOLD_TINYNESS;
959 s.exceptionflags = 0;
960 ca.f = fold_immvalue_float(a);
961 cb.f = fold_immvalue_float(b);
963 callback(&s, ca.s, cb.s);
964 if (s.exceptionflags == 0)
967 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS))
968 goto inexact_possible;
970 sfloat_check(fold_ctx(fold), &s, NULL);
973 return s.exceptionflags & SFLOAT_INEXACT;
976 static bool fold_check_inexact_float(fold_t *fold, ast_value *a, ast_value *b) {
977 lex_ctx_t ctx = fold_ctx(fold);
978 if (!OPTS_WARN(WARN_INEXACT_COMPARES))
980 if (!a->inexact && !b->inexact)
982 return compile_warning(ctx, WARN_INEXACT_COMPARES, "inexact value in comparison");
985 static GMQCC_INLINE ast_expression *fold_op_add(fold_t *fold, ast_value *a, ast_value *b) {
987 if (fold_can_2(a, b)) {
988 bool inexact = fold_check_except_float(&sfloat_add, fold, a, b);
989 return fold_constgen_float(fold, fold_immvalue_float(a) + fold_immvalue_float(b), inexact);
991 } else if (isvector(a)) {
992 if (fold_can_2(a, b))
993 return fold_constgen_vector(fold, vec3_add(fold_ctx(fold),
994 fold_immvalue_vector(a),
995 fold_immvalue_vector(b)));
1000 static GMQCC_INLINE ast_expression *fold_op_sub(fold_t *fold, ast_value *a, ast_value *b) {
1002 if (fold_can_2(a, b)) {
1003 bool inexact = fold_check_except_float(&sfloat_sub, fold, a, b);
1004 return fold_constgen_float(fold, fold_immvalue_float(a) - fold_immvalue_float(b), inexact);
1006 } else if (isvector(a)) {
1007 if (fold_can_2(a, b))
1008 return fold_constgen_vector(fold, vec3_sub(fold_ctx(fold),
1009 fold_immvalue_vector(a),
1010 fold_immvalue_vector(b)));
1015 static GMQCC_INLINE ast_expression *fold_op_mul(fold_t *fold, ast_value *a, ast_value *b) {
1018 if (fold_can_2(a, b))
1019 return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(b), fold_immvalue_float(a)));
1021 if (fold_can_2(a, b)) {
1022 bool inexact = fold_check_except_float(&sfloat_mul, fold, a, b);
1023 return fold_constgen_float(fold, fold_immvalue_float(a) * fold_immvalue_float(b), inexact);
1026 } else if (isvector(a)) {
1028 if (fold_can_2(a, b))
1029 return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
1031 if (fold_can_2(a, b)) {
1032 return fold_constgen_float(fold, vec3_mulvv(fold_immvalue_vector(a), fold_immvalue_vector(b)), false);
1033 } else if (OPTS_OPTIMIZATION(OPTIM_VECTOR_COMPONENTS) && fold_can_1(a)) {
1034 ast_expression *out;
1035 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "xyz"))) return out;
1036 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "yxz"))) return out;
1037 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "zxy"))) return out;
1038 } else if (OPTS_OPTIMIZATION(OPTIM_VECTOR_COMPONENTS) && fold_can_1(b)) {
1039 ast_expression *out;
1040 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(b), a, "xyz"))) return out;
1041 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(b), a, "yxz"))) return out;
1042 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(b), a, "zxy"))) return out;
1049 static GMQCC_INLINE ast_expression *fold_op_div(fold_t *fold, ast_value *a, ast_value *b) {
1051 if (fold_can_2(a, b)) {
1052 bool inexact = fold_check_except_float(&sfloat_div, fold, a, b);
1053 return fold_constgen_float(fold, fold_immvalue_float(a) / fold_immvalue_float(b), inexact);
1054 } else if (fold_can_1(b)) {
1055 return (ast_expression*)ast_binary_new(
1059 fold_constgen_float(fold, 1.0f / fold_immvalue_float(b), false)
1062 } else if (isvector(a)) {
1063 if (fold_can_2(a, b)) {
1064 return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(a), 1.0f / fold_immvalue_float(b)));
1066 return (ast_expression*)ast_binary_new(
1071 ? (ast_expression*)fold_constgen_float(fold, 1.0f / fold_immvalue_float(b), false)
1072 : (ast_expression*)ast_binary_new(
1075 (ast_expression*)fold->imm_float[1],
1084 static GMQCC_INLINE ast_expression *fold_op_mod(fold_t *fold, ast_value *a, ast_value *b) {
1085 return (fold_can_2(a, b))
1086 ? fold_constgen_float(fold, fmod(fold_immvalue_float(a), fold_immvalue_float(b)), false)
1090 static GMQCC_INLINE ast_expression *fold_op_bor(fold_t *fold, ast_value *a, ast_value *b) {
1092 if (fold_can_2(a, b))
1093 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) | ((qcint_t)fold_immvalue_float(b))), false);
1096 if (fold_can_2(a, b))
1097 return fold_constgen_vector(fold, vec3_or(fold_immvalue_vector(a), fold_immvalue_vector(b)));
1099 if (fold_can_2(a, b))
1100 return fold_constgen_vector(fold, vec3_orvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
1106 static GMQCC_INLINE ast_expression *fold_op_band(fold_t *fold, ast_value *a, ast_value *b) {
1108 if (fold_can_2(a, b))
1109 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) & ((qcint_t)fold_immvalue_float(b))), false);
1112 if (fold_can_2(a, b))
1113 return fold_constgen_vector(fold, vec3_and(fold_immvalue_vector(a), fold_immvalue_vector(b)));
1115 if (fold_can_2(a, b))
1116 return fold_constgen_vector(fold, vec3_andvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
1122 static GMQCC_INLINE ast_expression *fold_op_xor(fold_t *fold, ast_value *a, ast_value *b) {
1124 if (fold_can_2(a, b))
1125 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) ^ ((qcint_t)fold_immvalue_float(b))), false);
1127 if (fold_can_2(a, b)) {
1129 return fold_constgen_vector(fold, vec3_xor(fold_immvalue_vector(a), fold_immvalue_vector(b)));
1131 return fold_constgen_vector(fold, vec3_xorvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
1137 static GMQCC_INLINE ast_expression *fold_op_lshift(fold_t *fold, ast_value *a, ast_value *b) {
1138 if (fold_can_2(a, b) && isfloats(a, b))
1139 return fold_constgen_float(fold, (qcfloat_t)floorf(fold_immvalue_float(a) * powf(2.0f, fold_immvalue_float(b))), false);
1143 static GMQCC_INLINE ast_expression *fold_op_rshift(fold_t *fold, ast_value *a, ast_value *b) {
1144 if (fold_can_2(a, b) && isfloats(a, b))
1145 return fold_constgen_float(fold, (qcfloat_t)floorf(fold_immvalue_float(a) / powf(2.0f, fold_immvalue_float(b))), false);
1149 static GMQCC_INLINE ast_expression *fold_op_andor(fold_t *fold, ast_value *a, ast_value *b, float expr) {
1150 if (fold_can_2(a, b)) {
1151 if (OPTS_FLAG(PERL_LOGIC)) {
1153 return (fold_immediate_true(fold, a)) ? (ast_expression*)a : (ast_expression*)b;
1155 return (fold_immediate_true(fold, a)) ? (ast_expression*)b : (ast_expression*)a;
1157 return fold_constgen_float (
1159 ((expr) ? (fold_immediate_true(fold, a) || fold_immediate_true(fold, b))
1160 : (fold_immediate_true(fold, a) && fold_immediate_true(fold, b)))
1170 static GMQCC_INLINE ast_expression *fold_op_tern(fold_t *fold, ast_value *a, ast_value *b, ast_value *c) {
1171 if (fold_can_1(a)) {
1172 return fold_immediate_true(fold, a)
1173 ? (ast_expression*)b
1174 : (ast_expression*)c;
1179 static GMQCC_INLINE ast_expression *fold_op_exp(fold_t *fold, ast_value *a, ast_value *b) {
1180 if (fold_can_2(a, b))
1181 return fold_constgen_float(fold, (qcfloat_t)powf(fold_immvalue_float(a), fold_immvalue_float(b)), false);
1185 static GMQCC_INLINE ast_expression *fold_op_lteqgt(fold_t *fold, ast_value *a, ast_value *b) {
1186 if (fold_can_2(a,b)) {
1187 fold_check_inexact_float(fold, a, b);
1188 if (fold_immvalue_float(a) < fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[2];
1189 if (fold_immvalue_float(a) == fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[0];
1190 if (fold_immvalue_float(a) > fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[1];
1195 static GMQCC_INLINE ast_expression *fold_op_ltgt(fold_t *fold, ast_value *a, ast_value *b, bool lt) {
1196 if (fold_can_2(a, b)) {
1197 fold_check_inexact_float(fold, a, b);
1198 return (lt) ? (ast_expression*)fold->imm_float[!!(fold_immvalue_float(a) < fold_immvalue_float(b))]
1199 : (ast_expression*)fold->imm_float[!!(fold_immvalue_float(a) > fold_immvalue_float(b))];
1204 static GMQCC_INLINE ast_expression *fold_op_cmp(fold_t *fold, ast_value *a, ast_value *b, bool ne) {
1205 if (fold_can_2(a, b)) {
1206 if (isfloat(a) && isfloat(b)) {
1207 float la = fold_immvalue_float(a);
1208 float lb = fold_immvalue_float(b);
1209 fold_check_inexact_float(fold, a, b);
1210 return (ast_expression*)fold->imm_float[!(ne ? la == lb : la != lb)];
1211 } if (isvector(a) && isvector(b)) {
1212 vec3_t la = fold_immvalue_vector(a);
1213 vec3_t lb = fold_immvalue_vector(b);
1214 return (ast_expression*)fold->imm_float[!(ne ? vec3_cmp(la, lb) : !vec3_cmp(la, lb))];
1220 static GMQCC_INLINE ast_expression *fold_op_bnot(fold_t *fold, ast_value *a) {
1223 return fold_constgen_float(fold, -1-fold_immvalue_float(a), false);
1227 return fold_constgen_vector(fold, vec3_not(fold_immvalue_vector(a)));
1233 static GMQCC_INLINE ast_expression *fold_op_cross(fold_t *fold, ast_value *a, ast_value *b) {
1234 if (fold_can_2(a, b))
1235 return fold_constgen_vector(fold, vec3_cross(fold_immvalue_vector(a), fold_immvalue_vector(b)));
1239 ast_expression *fold_op(fold_t *fold, const oper_info *info, ast_expression **opexprs) {
1240 ast_value *a = (ast_value*)opexprs[0];
1241 ast_value *b = (ast_value*)opexprs[1];
1242 ast_value *c = (ast_value*)opexprs[2];
1243 ast_expression *e = NULL;
1245 /* can a fold operation be applied to this operator usage? */
1249 switch(info->operands) {
1250 case 3: if(!c) return NULL;
1251 case 2: if(!b) return NULL;
1254 compile_error(fold_ctx(fold), "internal error: fold_op no operands to fold\n");
1260 * we could use a boolean and default case but ironically gcc produces
1261 * invalid broken assembly from that operation. clang/tcc get it right,
1262 * but interestingly ignore compiling this to a jump-table when I do that,
1263 * this happens to be the most efficent method, since you have per-level
1264 * granularity on the pointer check happening only for the case you check
1265 * it in. Opposed to the default method which would involve a boolean and
1266 * pointer check after wards.
1268 #define fold_op_case(ARGS, ARGS_OPID, OP, ARGS_FOLD) \
1269 case opid##ARGS ARGS_OPID: \
1270 if ((e = fold_op_##OP ARGS_FOLD)) { \
1271 ++opts_optimizationcount[OPTIM_CONST_FOLD]; \
1276 fold_op_case(2, ('-', 'P'), neg, (fold, a));
1277 fold_op_case(2, ('!', 'P'), not, (fold, a));
1278 fold_op_case(1, ('+'), add, (fold, a, b));
1279 fold_op_case(1, ('-'), sub, (fold, a, b));
1280 fold_op_case(1, ('*'), mul, (fold, a, b));
1281 fold_op_case(1, ('/'), div, (fold, a, b));
1282 fold_op_case(1, ('%'), mod, (fold, a, b));
1283 fold_op_case(1, ('|'), bor, (fold, a, b));
1284 fold_op_case(1, ('&'), band, (fold, a, b));
1285 fold_op_case(1, ('^'), xor, (fold, a, b));
1286 fold_op_case(1, ('<'), ltgt, (fold, a, b, true));
1287 fold_op_case(1, ('>'), ltgt, (fold, a, b, false));
1288 fold_op_case(2, ('<', '<'), lshift, (fold, a, b));
1289 fold_op_case(2, ('>', '>'), rshift, (fold, a, b));
1290 fold_op_case(2, ('|', '|'), andor, (fold, a, b, true));
1291 fold_op_case(2, ('&', '&'), andor, (fold, a, b, false));
1292 fold_op_case(2, ('?', ':'), tern, (fold, a, b, c));
1293 fold_op_case(2, ('*', '*'), exp, (fold, a, b));
1294 fold_op_case(3, ('<','=','>'), lteqgt, (fold, a, b));
1295 fold_op_case(2, ('!', '='), cmp, (fold, a, b, true));
1296 fold_op_case(2, ('=', '='), cmp, (fold, a, b, false));
1297 fold_op_case(2, ('~', 'P'), bnot, (fold, a));
1298 fold_op_case(2, ('>', '<'), cross, (fold, a, b));
1301 compile_error(fold_ctx(fold), "internal error: attempted to constant-fold for unsupported operator");
1306 * Constant folding for compiler intrinsics, simaler approach to operator
1307 * folding, primarly: individual functions for each intrinsics to fold,
1308 * and a generic selection function.
1310 static GMQCC_INLINE ast_expression *fold_intrin_isfinite(fold_t *fold, ast_value *a) {
1311 return fold_constgen_float(fold, isfinite(fold_immvalue_float(a)), false);
1313 static GMQCC_INLINE ast_expression *fold_intrin_isinf(fold_t *fold, ast_value *a) {
1314 return fold_constgen_float(fold, isinf(fold_immvalue_float(a)), false);
1316 static GMQCC_INLINE ast_expression *fold_intrin_isnan(fold_t *fold, ast_value *a) {
1317 return fold_constgen_float(fold, isnan(fold_immvalue_float(a)), false);
1319 static GMQCC_INLINE ast_expression *fold_intrin_isnormal(fold_t *fold, ast_value *a) {
1320 return fold_constgen_float(fold, isnormal(fold_immvalue_float(a)), false);
1322 static GMQCC_INLINE ast_expression *fold_intrin_signbit(fold_t *fold, ast_value *a) {
1323 return fold_constgen_float(fold, signbit(fold_immvalue_float(a)), false);
1325 static GMQCC_INLINE ast_expression *fold_intirn_acosh(fold_t *fold, ast_value *a) {
1326 return fold_constgen_float(fold, acoshf(fold_immvalue_float(a)), false);
1328 static GMQCC_INLINE ast_expression *fold_intrin_asinh(fold_t *fold, ast_value *a) {
1329 return fold_constgen_float(fold, asinhf(fold_immvalue_float(a)), false);
1331 static GMQCC_INLINE ast_expression *fold_intrin_atanh(fold_t *fold, ast_value *a) {
1332 return fold_constgen_float(fold, (float)atanh(fold_immvalue_float(a)), false);
1334 static GMQCC_INLINE ast_expression *fold_intrin_exp(fold_t *fold, ast_value *a) {
1335 return fold_constgen_float(fold, expf(fold_immvalue_float(a)), false);
1337 static GMQCC_INLINE ast_expression *fold_intrin_exp2(fold_t *fold, ast_value *a) {
1338 return fold_constgen_float(fold, exp2f(fold_immvalue_float(a)), false);
1340 static GMQCC_INLINE ast_expression *fold_intrin_expm1(fold_t *fold, ast_value *a) {
1341 return fold_constgen_float(fold, expm1f(fold_immvalue_float(a)), false);
1343 static GMQCC_INLINE ast_expression *fold_intrin_mod(fold_t *fold, ast_value *lhs, ast_value *rhs) {
1344 return fold_constgen_float(fold, fmodf(fold_immvalue_float(lhs), fold_immvalue_float(rhs)), false);
1346 static GMQCC_INLINE ast_expression *fold_intrin_pow(fold_t *fold, ast_value *lhs, ast_value *rhs) {
1347 return fold_constgen_float(fold, powf(fold_immvalue_float(lhs), fold_immvalue_float(rhs)), false);
1349 static GMQCC_INLINE ast_expression *fold_intrin_fabs(fold_t *fold, ast_value *a) {
1350 return fold_constgen_float(fold, fabsf(fold_immvalue_float(a)), false);
1354 ast_expression *fold_intrin(fold_t *fold, const char *intrin, ast_expression **arg) {
1355 ast_expression *ret = NULL;
1356 ast_value *a = (ast_value*)arg[0];
1357 ast_value *b = (ast_value*)arg[1];
1359 if (!strcmp(intrin, "isfinite")) ret = fold_intrin_isfinite(fold, a);
1360 if (!strcmp(intrin, "isinf")) ret = fold_intrin_isinf(fold, a);
1361 if (!strcmp(intrin, "isnan")) ret = fold_intrin_isnan(fold, a);
1362 if (!strcmp(intrin, "isnormal")) ret = fold_intrin_isnormal(fold, a);
1363 if (!strcmp(intrin, "signbit")) ret = fold_intrin_signbit(fold, a);
1364 if (!strcmp(intrin, "acosh")) ret = fold_intirn_acosh(fold, a);
1365 if (!strcmp(intrin, "asinh")) ret = fold_intrin_asinh(fold, a);
1366 if (!strcmp(intrin, "atanh")) ret = fold_intrin_atanh(fold, a);
1367 if (!strcmp(intrin, "exp")) ret = fold_intrin_exp(fold, a);
1368 if (!strcmp(intrin, "exp2")) ret = fold_intrin_exp2(fold, a);
1369 if (!strcmp(intrin, "expm1")) ret = fold_intrin_expm1(fold, a);
1370 if (!strcmp(intrin, "mod")) ret = fold_intrin_mod(fold, a, b);
1371 if (!strcmp(intrin, "pow")) ret = fold_intrin_pow(fold, a, b);
1372 if (!strcmp(intrin, "fabs")) ret = fold_intrin_fabs(fold, a);
1375 ++opts_optimizationcount[OPTIM_CONST_FOLD];
1381 * These are all the actual constant folding methods that happen in between
1382 * the AST/IR stage of the compiler , i.e eliminating branches for const
1383 * expressions, which is the only supported thing so far. We undefine the
1384 * testing macros here because an ir_value is differant than an ast_value.
1390 #undef fold_immvalue_float
1391 #undef fold_immvalue_string
1392 #undef fold_immvalue_vector
1396 #define isfloat(X) ((X)->vtype == TYPE_FLOAT)
1397 /*#define isstring(X) ((X)->vtype == TYPE_STRING)*/
1398 /*#define isvector(X) ((X)->vtype == TYPE_VECTOR)*/
1399 #define fold_immvalue_float(X) ((X)->constval.vfloat)
1400 #define fold_immvalue_vector(X) ((X)->constval.vvec)
1401 /*#define fold_immvalue_string(X) ((X)->constval.vstring)*/
1402 #define fold_can_1(X) ((X)->hasvalue && (X)->cvq == CV_CONST)
1403 /*#define fold_can_2(X,Y) (fold_can_1(X) && fold_can_1(Y))*/
1405 static ast_expression *fold_superfluous(ast_expression *left, ast_expression *right, int op) {
1406 ast_expression *swapped = NULL; /* using this as bool */
1409 if (!ast_istype(right, ast_value) || !fold_can_1((load = (ast_value*)right))) {
1415 if (!ast_istype(right, ast_value) || !fold_can_1((load = (ast_value*)right)))
1423 if (fold_immvalue_float(load) == 1.0f) {
1424 ++opts_optimizationcount[OPTIM_PEEPHOLE];
1435 if (fold_immvalue_float(load) == 0.0f) {
1436 ++opts_optimizationcount[OPTIM_PEEPHOLE];
1443 if (vec3_cmp(fold_immvalue_vector(load), vec3_create(1, 1, 1))) {
1444 ++opts_optimizationcount[OPTIM_PEEPHOLE];
1454 if (vec3_cmp(fold_immvalue_vector(load), vec3_create(0, 0, 0))) {
1455 ++opts_optimizationcount[OPTIM_PEEPHOLE];
1465 ast_expression *fold_binary(lex_ctx_t ctx, int op, ast_expression *left, ast_expression *right) {
1466 ast_expression *ret = fold_superfluous(left, right, op);
1469 return (ast_expression*)ast_binary_new(ctx, op, left, right);
1472 static GMQCC_INLINE int fold_cond(ir_value *condval, ast_function *func, ast_ifthen *branch) {
1473 if (isfloat(condval) && fold_can_1(condval) && OPTS_OPTIMIZATION(OPTIM_CONST_FOLD_DCE)) {
1474 ast_expression_codegen *cgen;
1477 bool istrue = (fold_immvalue_float(condval) != 0.0f && branch->on_true);
1478 bool isfalse = (fold_immvalue_float(condval) == 0.0f && branch->on_false);
1479 ast_expression *path = (istrue) ? branch->on_true :
1480 (isfalse) ? branch->on_false : NULL;
1483 * no path to take implies that the evaluation is if(0) and there
1484 * is no else block. so eliminate all the code.
1486 ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE];
1490 if (!(elide = ir_function_create_block(ast_ctx(branch), func->ir_func, ast_function_label(func, ((istrue) ? "ontrue" : "onfalse")))))
1492 if (!(*(cgen = path->codegen))((ast_expression*)path, func, false, &dummy))
1494 if (!ir_block_create_jump(func->curblock, ast_ctx(branch), elide))
1497 * now the branch has been eliminated and the correct block for the constant evaluation
1498 * is expanded into the current block for the function.
1500 func->curblock = elide;
1501 ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE];
1504 return -1; /* nothing done */
1507 int fold_cond_ternary(ir_value *condval, ast_function *func, ast_ternary *branch) {
1508 return fold_cond(condval, func, (ast_ifthen*)branch);
1511 int fold_cond_ifthen(ir_value *condval, ast_function *func, ast_ifthen *branch) {
1512 return fold_cond(condval, func, branch);