2 * Copyright (C) 2012, 2013, 2014
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
6 * this software and associated documentation files (the "Software"), to deal in
7 * the Software without restriction, including without limitation the rights to
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is furnished to do
10 * so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 #define FOLD_STRING_UNTRANSLATE_HTSIZE 1024
30 #define FOLD_STRING_DOTRANSLATE_HTSIZE 1024
32 /* The options to use for inexact and arithmetic exceptions */
33 #define FOLD_ROUNDING SFLOAT_ROUND_NEAREST_EVEN
34 #define FOLD_TINYNESS SFLOAT_TBEFORE
37 * Comparing float values is an unsafe operation when the operands to the
38 * comparison are floating point values that are inexact. For instance 1/3 is an
39 * inexact value. The FPU is meant to raise exceptions when these sorts of things
40 * happen, including division by zero, underflows and overflows. The C standard
41 * library provides us with the <fenv.h> header to gain access to the floating-
42 * point environment and lets us set the rounding mode and check for these exceptions.
43 * The problem is the standard C library allows an implementation to leave these
44 * stubbed out and does not require they be implemented. Furthermore, depending
45 * on implementations there is no control over the FPU. This is an IEE 754
46 * conforming implementation in software to compensate.
48 typedef uint32_t sfloat_t;
60 SFLOAT_UNDERFLOW = 16,
62 } sfloat_exceptionflags_t;
65 SFLOAT_ROUND_NEAREST_EVEN,
69 } sfloat_roundingmode_t;
77 sfloat_roundingmode_t roundingmode;
78 sfloat_exceptionflags_t exceptionflags;
79 sfloat_tdetect_t tiny;
82 /* Counts the number of leading zero bits before the most-significand one bit. */
84 /* MSVC has an intrinsic for this */
85 static GMQCC_INLINE uint32_t sfloat_clz(uint32_t x) {
87 _BitScanForward(&r, x);
90 # define SFLOAT_CLZ(X, SUB) \
91 (sfloat_clz((X)) - (SUB))
92 #elif defined(__GNUC__) || defined(__CLANG__)
93 /* Clang and GCC have a builtin for this */
94 # define SFLOAT_CLZ(X, SUB) \
95 (__builtin_clz((X)) - (SUB))
98 static GMQCC_INLINE uint32_t sfloat_popcnt(uint32_t x) {
99 x -= ((x >> 1) & 0x55555555);
100 x = (((x >> 2) & 0x33333333) + (x & 0x33333333));
101 x = (((x >> 4) + x) & 0x0F0F0F0F);
104 return x & 0x0000003F;
106 static GMQCC_INLINE uint32_t sfloat_clz(uint32_t x) {
112 return 32 - sfloat_popcnt(x);
114 # define SFLOAT_CLZ(X, SUB) \
115 (sfloat_clz((X) - (SUB)))
118 /* The value of a NaN */
119 #define SFLOAT_NAN 0xFFFFFFFF
121 #define SFLOAT_ISNAN(A) \
122 (0xFF000000 < (uint32_t)((A) << 1))
123 /* Test if signaling NaN */
124 #define SFLOAT_ISSNAN(A) \
125 (((((A) >> 22) & 0x1FF) == 0x1FE) && ((A) & 0x003FFFFF))
126 /* Raise exception */
127 #define SFLOAT_RAISE(STATE, FLAGS) \
128 ((STATE)->exceptionflags = (sfloat_exceptionflags_t)((STATE)->exceptionflags | (FLAGS)))
130 * Shifts `A' right by the number of bits given in `COUNT'. If any non-zero bits
131 * are shifted off they are forced into the least significand bit of the result
132 * by setting it to one. As a result of this, the value of `COUNT' can be
133 * arbitrarily large; if `COUNT' is greater than 32, the result will be either
134 * zero or one, depending on whether `A' is a zero or non-zero. The result is
135 * stored into the value pointed by `Z'.
137 #define SFLOAT_SHIFT(SIZE, A, COUNT, Z) \
138 *(Z) = ((COUNT) == 0) \
140 : (((COUNT) < (SIZE)) \
141 ? ((A) >> (COUNT)) | (((A) << ((-(COUNT)) & ((SIZE) - 1))) != 0) \
144 /* Extract fractional component */
145 #define SFLOAT_EXTRACT_FRAC(X) \
146 ((uint32_t)((X) & 0x007FFFFF))
147 /* Extract exponent component */
148 #define SFLOAT_EXTRACT_EXP(X) \
149 ((int16_t)((X) >> 23) & 0xFF)
150 /* Extract sign bit */
151 #define SFLOAT_EXTRACT_SIGN(X) \
154 * Normalizes the subnormal value represented by the denormalized significand
155 * `SA'. The normalized exponent and significand are stored at the locations
156 * pointed by `Z' and `SZ' respectively.
158 #define SFLOAT_SUBNORMALIZE(SA, Z, SZ) \
159 (void)(*(SZ) = (SA) << SFLOAT_CLZ((SA), 8), *(Z) = 1 - SFLOAT_CLZ((SA), 8))
161 * Packs the sign `SIGN', exponent `EXP' and significand `SIG' into the value
164 * After the shifting into their proper positions, the fields are added together
165 * to form the result. This means any integer portion of `SIG' will be added
166 * to the exponent. Similarly, because a properly normalized significand will
167 * always have an integer portion equal to one, the exponent input `EXP' should
168 * be one less than the desired result exponent whenever the significant input
169 * `SIG' is a complete, normalized significand.
171 #define SFLOAT_PACK(SIGN, EXP, SIG) \
172 (sfloat_t)((((uint32_t)(SIGN)) << 31) + (((uint32_t)(EXP)) << 23) + (SIG))
175 * Takes two values `a' and `b', one of which is a NaN, and returns the appropriate
176 * NaN result. If either `a' or `b' is a signaling NaN than an invalid exception is
179 static sfloat_t sfloat_propagate_nan(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
180 bool isnan_a = SFLOAT_ISNAN(a);
181 bool issnan_a = SFLOAT_ISSNAN(a);
182 bool isnan_b = SFLOAT_ISNAN(b);
183 bool issnan_b = SFLOAT_ISSNAN(b);
188 if (issnan_a | issnan_b)
189 SFLOAT_RAISE(state, SFLOAT_INVALID);
191 return (issnan_a & isnan_b) ? b : a;
196 * Takes an abstract value having sign `sign_z', exponent `exp_z', and significand
197 * `sig_z' and returns the appropriate value corresponding to the abstract input.
199 * The abstract value is simply rounded and packed into the format. If the abstract
200 * input cannot be represented exactly an inexact exception is raised. If the
201 * abstract input is too large, the overflow and inexact exceptions are both raised
202 * and an infinity or maximal finite value is returned. If the abstract value is
203 * too small, the value is rounded to a subnormal and the underflow and inexact
204 * exceptions are only raised if the value cannot be represented exactly with
207 * The input significand `sig_z' has it's binary point between bits 30 and 29,
208 * this is seven bits to the left of its usual location. The shifted significand
209 * must be normalized or smaller than this. If it's not normalized then the exponent
210 * `exp_z' must be zero; in that case, the result returned is a subnormal number
211 * and is must not require rounding. In the more usual case where the significand
212 * is normalized, the exponent must be one less than the *true* exponent.
214 * The handling of underflow and overflow is otherwise in alignment with IEC/IEEE.
216 static sfloat_t SFLOAT_PACK_round(sfloat_state_t *state, bool sign_z, int16_t exp_z, uint32_t sig_z) {
217 sfloat_roundingmode_t mode = state->roundingmode;
218 bool even = !!(mode == SFLOAT_ROUND_NEAREST_EVEN);
219 unsigned char increment = 0x40;
220 unsigned char bits = sig_z & 0x7F;
223 if (mode == SFLOAT_ROUND_TO_ZERO)
228 if (mode == SFLOAT_ROUND_UP)
231 if (mode == SFLOAT_ROUND_DOWN)
237 if (0xFD <= (uint16_t)exp_z) {
238 if ((0xFD < exp_z) || ((exp_z == 0xFD) && ((int32_t)(sig_z + increment) < 0))) {
239 SFLOAT_RAISE(state, SFLOAT_OVERFLOW | SFLOAT_INEXACT);
240 return SFLOAT_PACK(sign_z, 0xFF, 0) - (increment == 0);
243 /* Check for underflow */
244 bool tiny = (state->tiny == SFLOAT_TBEFORE) || (exp_z < -1) || (sig_z + increment < 0x80000000);
245 SFLOAT_SHIFT(32, sig_z, -exp_z, &sig_z);
249 SFLOAT_RAISE(state, SFLOAT_UNDERFLOW);
253 SFLOAT_RAISE(state, SFLOAT_INEXACT);
254 sig_z = (sig_z + increment) >> 7;
255 sig_z &= ~(((bits ^ 0x40) == 0) & even);
258 return SFLOAT_PACK(sign_z, exp_z, sig_z);
262 * Takes an abstract value having sign `sign_z', exponent `exp_z' and significand
263 * `sig_z' and returns the appropriate value corresponding to the abstract input.
264 * This function is exactly like `PACK_round' except the significand does not have
267 * Bit 31 of the significand must be zero and the exponent must be one less than
268 * the *true* exponent.
270 static sfloat_t SFLOAT_PACK_normal(sfloat_state_t *state, bool sign_z, int16_t exp_z, uint32_t sig_z) {
271 unsigned char c = SFLOAT_CLZ(sig_z, 1);
272 return SFLOAT_PACK_round(state, sign_z, exp_z - c, sig_z << c);
276 * Returns the result of adding the absolute values of `a' and `b'. The sign
277 * `sign_z' is ignored if the result is a NaN.
279 static sfloat_t sfloat_add_impl(sfloat_state_t *state, sfloat_t a, sfloat_t b, bool sign_z) {
280 int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
281 int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
283 int16_t exp_d = exp_a - exp_b;
284 uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a) << 6;
285 uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b) << 6;
290 return sig_a ? sfloat_propagate_nan(state, a, b) : a;
295 SFLOAT_SHIFT(32, sig_b, exp_d, &sig_b);
297 } else if (exp_d < 0) {
299 return sig_b ? sfloat_propagate_nan(state, a, b) : SFLOAT_PACK(sign_z, 0xFF, 0);
304 SFLOAT_SHIFT(32, sig_a, -exp_d, &sig_a);
308 return (sig_a | sig_b) ? sfloat_propagate_nan(state, a, b) : a;
310 return SFLOAT_PACK(sign_z, 0, (sig_a + sig_b) >> 6);
311 sig_z = 0x40000000 + sig_a + sig_b;
316 sig_z = (sig_a + sig_b) << 1;
318 if ((int32_t)sig_z < 0) {
319 sig_z = sig_a + sig_b;
323 return SFLOAT_PACK_round(state, sign_z, exp_z, sig_z);
327 * Returns the result of subtracting the absolute values of `a' and `b'. If the
328 * sign `sign_z' is one, the difference is negated before being returned. The
329 * sign is ignored if the result is a NaN.
331 static sfloat_t sfloat_sub_impl(sfloat_state_t *state, sfloat_t a, sfloat_t b, bool sign_z) {
332 int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
333 int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
335 int16_t exp_d = exp_a - exp_b;
336 uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a) << 7;
337 uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b) << 7;
340 if (0 < exp_d) goto exp_greater_a;
341 if (exp_d < 0) goto exp_greater_b;
345 return sfloat_propagate_nan(state, a, b);
346 SFLOAT_RAISE(state, SFLOAT_INVALID);
353 if (sig_b < sig_a) goto greater_a;
354 if (sig_a < sig_b) goto greater_b;
356 return SFLOAT_PACK(state->roundingmode == SFLOAT_ROUND_DOWN, 0, 0);
360 return (sig_b) ? sfloat_propagate_nan(state, a, b) : SFLOAT_PACK(sign_z ^ 1, 0xFF, 0);
365 SFLOAT_SHIFT(32, sig_a, -exp_d, &sig_a);
368 sig_z = sig_b - sig_a;
375 return (sig_a) ? sfloat_propagate_nan(state, a, b) : a;
380 SFLOAT_SHIFT(32, sig_b, exp_d, &sig_b);
383 sig_z = sig_a - sig_b;
388 return SFLOAT_PACK_normal(state, sign_z, exp_z, sig_z);
391 static GMQCC_INLINE sfloat_t sfloat_add(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
392 bool sign_a = SFLOAT_EXTRACT_SIGN(a);
393 bool sign_b = SFLOAT_EXTRACT_SIGN(b);
394 return (sign_a == sign_b) ? sfloat_add_impl(state, a, b, sign_a)
395 : sfloat_sub_impl(state, a, b, sign_a);
398 static GMQCC_INLINE sfloat_t sfloat_sub(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
399 bool sign_a = SFLOAT_EXTRACT_SIGN(a);
400 bool sign_b = SFLOAT_EXTRACT_SIGN(b);
401 return (sign_a == sign_b) ? sfloat_sub_impl(state, a, b, sign_a)
402 : sfloat_add_impl(state, a, b, sign_a);
405 static sfloat_t sfloat_mul(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
406 int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
407 int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
409 uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a);
410 uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b);
412 uint64_t sig_z64 = 0;
413 bool sign_a = SFLOAT_EXTRACT_SIGN(a);
414 bool sign_b = SFLOAT_EXTRACT_SIGN(b);
415 bool sign_z = sign_a ^ sign_b;
418 if (sig_a || ((exp_b == 0xFF) && sig_b))
419 return sfloat_propagate_nan(state, a, b);
420 if ((exp_b | sig_b) == 0) {
421 SFLOAT_RAISE(state, SFLOAT_INVALID);
424 return SFLOAT_PACK(sign_z, 0xFF, 0);
428 return sfloat_propagate_nan(state, a, b);
429 if ((exp_a | sig_a) == 0) {
430 SFLOAT_RAISE(state, SFLOAT_INVALID);
433 return SFLOAT_PACK(sign_z, 0xFF, 0);
437 return SFLOAT_PACK(sign_z, 0, 0);
438 SFLOAT_SUBNORMALIZE(sig_a, &exp_a, &sig_a);
442 return SFLOAT_PACK(sign_z, 0, 0);
443 SFLOAT_SUBNORMALIZE(sig_b, &exp_b, &sig_b);
445 exp_z = exp_a + exp_b - 0x7F;
446 sig_a = (sig_a | 0x00800000) << 7;
447 sig_b = (sig_b | 0x00800000) << 8;
448 SFLOAT_SHIFT(64, ((uint64_t)sig_a) * sig_b, 32, &sig_z64);
450 if (0 <= (int32_t)(sig_z << 1)) {
454 return SFLOAT_PACK_round(state, sign_z, exp_z, sig_z);
457 static sfloat_t sfloat_div(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
458 int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
459 int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
461 uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a);
462 uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b);
464 bool sign_a = SFLOAT_EXTRACT_SIGN(a);
465 bool sign_b = SFLOAT_EXTRACT_SIGN(b);
466 bool sign_z = sign_a ^ sign_b;
470 return sfloat_propagate_nan(state, a, b);
473 return sfloat_propagate_nan(state, a, b);
474 SFLOAT_RAISE(state, SFLOAT_INVALID);
477 return SFLOAT_PACK(sign_z, 0xFF, 0);
480 return (sig_b) ? sfloat_propagate_nan(state, a, b) : SFLOAT_PACK(sign_z, 0, 0);
483 if ((exp_a | sig_a) == 0) {
484 SFLOAT_RAISE(state, SFLOAT_INVALID);
487 SFLOAT_RAISE(state, SFLOAT_DIVBYZERO);
488 return SFLOAT_PACK(sign_z, 0xFF, 0);
490 SFLOAT_SUBNORMALIZE(sig_b, &exp_b, &sig_b);
494 return SFLOAT_PACK(sign_z, 0, 0);
495 SFLOAT_SUBNORMALIZE(sig_a, &exp_a, &sig_a);
497 exp_z = exp_a - exp_b + 0x7D;
498 sig_a = (sig_a | 0x00800000) << 7;
499 sig_b = (sig_b | 0x00800000) << 8;
500 if (sig_b <= (sig_a + sig_a)) {
504 sig_z = (((uint64_t)sig_a) << 32) / sig_b;
505 if ((sig_z & 0x3F) == 0)
506 sig_z |= ((uint64_t)sig_b * sig_z != ((uint64_t)sig_a) << 32);
507 return SFLOAT_PACK_round(state, sign_z, exp_z, sig_z);
510 static sfloat_t sfloat_neg(sfloat_state_t *state, sfloat_t a) {
513 return sfloat_mul(state, a, neg.s);
516 static GMQCC_INLINE void sfloat_check(lex_ctx_t ctx, sfloat_state_t *state, const char *vec) {
517 /* Exception comes from vector component */
519 if (state->exceptionflags & SFLOAT_DIVBYZERO)
520 compile_error(ctx, "division by zero in `%s' component", vec);
521 if (state->exceptionflags & SFLOAT_INVALID)
522 compile_error(ctx, "undefined (inf) in `%s' component", vec);
523 if (state->exceptionflags & SFLOAT_OVERFLOW)
524 compile_error(ctx, "arithmetic overflow in `%s' component", vec);
525 if (state->exceptionflags & SFLOAT_UNDERFLOW)
526 compile_error(ctx, "arithmetic underflow in `%s' component", vec);
529 if (state->exceptionflags & SFLOAT_DIVBYZERO)
530 compile_error(ctx, "division by zero");
531 if (state->exceptionflags & SFLOAT_INVALID)
532 compile_error(ctx, "undefined (inf)");
533 if (state->exceptionflags & SFLOAT_OVERFLOW)
534 compile_error(ctx, "arithmetic overflow");
535 if (state->exceptionflags & SFLOAT_UNDERFLOW)
536 compile_error(ctx, "arithmetic underflow");
539 static GMQCC_INLINE void sfloat_init(sfloat_state_t *state) {
540 state->exceptionflags = SFLOAT_NOEXCEPT;
541 state->roundingmode = FOLD_ROUNDING;
542 state->tiny = FOLD_TINYNESS;
546 * There is two stages to constant folding in GMQCC: there is the parse
547 * stage constant folding, where, witht he help of the AST, operator
548 * usages can be constant folded. Then there is the constant folding
549 * in the IR for things like eliding if statements, can occur.
551 * This file is thus, split into two parts.
554 #define isfloat(X) (((ast_expression*)(X))->vtype == TYPE_FLOAT)
555 #define isvector(X) (((ast_expression*)(X))->vtype == TYPE_VECTOR)
556 #define isstring(X) (((ast_expression*)(X))->vtype == TYPE_STRING)
557 #define isarray(X) (((ast_expression*)(X))->vtype == TYPE_ARRAY)
558 #define isfloats(X,Y) (isfloat (X) && isfloat (Y))
561 * Implementation of basic vector math for vec3_t, for trivial constant
564 * TODO: gcc/clang hinting for autovectorization
580 sfloat_state_t state[3];
583 static GMQCC_INLINE vec3_soft_t vec3_soft_convert(vec3_t vec) {
591 static GMQCC_INLINE bool vec3_soft_exception(vec3_soft_state_t *vstate, size_t index) {
592 sfloat_exceptionflags_t flags = vstate->state[index].exceptionflags;
593 if (flags & SFLOAT_DIVBYZERO) return true;
594 if (flags & SFLOAT_INVALID) return true;
595 if (flags & SFLOAT_OVERFLOW) return true;
596 if (flags & SFLOAT_UNDERFLOW) return true;
600 static GMQCC_INLINE void vec3_soft_eval(vec3_soft_state_t *state,
601 sfloat_t (*callback)(sfloat_state_t *, sfloat_t, sfloat_t),
605 vec3_soft_t sa = vec3_soft_convert(a);
606 vec3_soft_t sb = vec3_soft_convert(b);
607 callback(&state->state[0], sa.x.s, sb.x.s);
608 if (vec3_soft_exception(state, 0)) state->faults = (vec3_comp_t)(state->faults | VEC_COMP_X);
609 callback(&state->state[1], sa.y.s, sb.y.s);
610 if (vec3_soft_exception(state, 1)) state->faults = (vec3_comp_t)(state->faults | VEC_COMP_Y);
611 callback(&state->state[2], sa.z.s, sb.z.s);
612 if (vec3_soft_exception(state, 2)) state->faults = (vec3_comp_t)(state->faults | VEC_COMP_Z);
615 static GMQCC_INLINE void vec3_check_except(vec3_t a,
618 sfloat_t (*callback)(sfloat_state_t *, sfloat_t, sfloat_t))
620 vec3_soft_state_t state;
622 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS))
625 sfloat_init(&state.state[0]);
626 sfloat_init(&state.state[1]);
627 sfloat_init(&state.state[2]);
629 vec3_soft_eval(&state, callback, a, b);
630 if (state.faults & VEC_COMP_X) sfloat_check(ctx, &state.state[0], "x");
631 if (state.faults & VEC_COMP_Y) sfloat_check(ctx, &state.state[1], "y");
632 if (state.faults & VEC_COMP_Z) sfloat_check(ctx, &state.state[2], "z");
635 static GMQCC_INLINE vec3_t vec3_add(lex_ctx_t ctx, vec3_t a, vec3_t b) {
637 vec3_check_except(a, b, ctx, &sfloat_add);
644 static GMQCC_INLINE vec3_t vec3_sub(lex_ctx_t ctx, vec3_t a, vec3_t b) {
646 vec3_check_except(a, b, ctx, &sfloat_sub);
653 static GMQCC_INLINE vec3_t vec3_neg(lex_ctx_t ctx, vec3_t a) {
658 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS))
669 sfloat_neg(&s[0], v[0].s);
670 sfloat_neg(&s[1], v[1].s);
671 sfloat_neg(&s[2], v[2].s);
673 sfloat_check(ctx, &s[0], NULL);
674 sfloat_check(ctx, &s[1], NULL);
675 sfloat_check(ctx, &s[2], NULL);
684 static GMQCC_INLINE vec3_t vec3_or(vec3_t a, vec3_t b) {
686 out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b.x));
687 out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b.y));
688 out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b.z));
692 static GMQCC_INLINE vec3_t vec3_orvf(vec3_t a, qcfloat_t b) {
694 out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b));
695 out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b));
696 out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b));
700 static GMQCC_INLINE vec3_t vec3_and(vec3_t a, vec3_t b) {
702 out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b.x));
703 out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b.y));
704 out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b.z));
708 static GMQCC_INLINE vec3_t vec3_andvf(vec3_t a, qcfloat_t b) {
710 out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b));
711 out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b));
712 out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b));
716 static GMQCC_INLINE vec3_t vec3_xor(vec3_t a, vec3_t b) {
718 out.x = (qcfloat_t)(((qcint_t)a.x) ^ ((qcint_t)b.x));
719 out.y = (qcfloat_t)(((qcint_t)a.y) ^ ((qcint_t)b.y));
720 out.z = (qcfloat_t)(((qcint_t)a.z) ^ ((qcint_t)b.z));
724 static GMQCC_INLINE vec3_t vec3_xorvf(vec3_t a, qcfloat_t b) {
726 out.x = (qcfloat_t)(((qcint_t)a.x) ^ ((qcint_t)b));
727 out.y = (qcfloat_t)(((qcint_t)a.y) ^ ((qcint_t)b));
728 out.z = (qcfloat_t)(((qcint_t)a.z) ^ ((qcint_t)b));
732 static GMQCC_INLINE vec3_t vec3_not(vec3_t a) {
740 static GMQCC_INLINE qcfloat_t vec3_mulvv(lex_ctx_t ctx, vec3_t a, vec3_t b) {
746 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS))
749 sa = vec3_soft_convert(a);
750 sb = vec3_soft_convert(b);
758 r[0] = sfloat_mul(&s[0], sa.x.s, sb.x.s);
759 r[1] = sfloat_mul(&s[1], sa.y.s, sb.y.s);
760 r[2] = sfloat_mul(&s[2], sa.z.s, sb.z.s);
761 r[3] = sfloat_add(&s[3], r[0], r[1]);
762 r[4] = sfloat_add(&s[4], r[3], r[2]);
764 sfloat_check(ctx, &s[0], NULL);
765 sfloat_check(ctx, &s[1], NULL);
766 sfloat_check(ctx, &s[2], NULL);
767 sfloat_check(ctx, &s[3], NULL);
768 sfloat_check(ctx, &s[4], NULL);
771 return (a.x * b.x + a.y * b.y + a.z * b.z);
774 static GMQCC_INLINE vec3_t vec3_mulvf(lex_ctx_t ctx, vec3_t a, qcfloat_t b) {
780 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS))
783 sa = vec3_soft_convert(a);
789 sfloat_mul(&s[0], sa.x.s, sb.s);
790 sfloat_mul(&s[1], sa.y.s, sb.s);
791 sfloat_mul(&s[2], sa.z.s, sb.s);
793 sfloat_check(ctx, &s[0], "x");
794 sfloat_check(ctx, &s[1], "y");
795 sfloat_check(ctx, &s[2], "z");
804 static GMQCC_INLINE bool vec3_cmp(vec3_t a, vec3_t b) {
810 static GMQCC_INLINE vec3_t vec3_create(float x, float y, float z) {
818 static GMQCC_INLINE qcfloat_t vec3_notf(vec3_t a) {
819 return (!a.x && !a.y && !a.z);
822 static GMQCC_INLINE bool vec3_pbool(vec3_t a) {
823 return (a.x || a.y || a.z);
826 static GMQCC_INLINE vec3_t vec3_cross(lex_ctx_t ctx, vec3_t a, vec3_t b) {
833 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS))
836 sa = vec3_soft_convert(a);
837 sb = vec3_soft_convert(b);
849 r[0] = sfloat_mul(&s[0], sa.y.s, sb.z.s);
850 r[1] = sfloat_mul(&s[1], sa.z.s, sb.y.s);
851 r[2] = sfloat_mul(&s[2], sa.z.s, sb.x.s);
852 r[3] = sfloat_mul(&s[3], sa.x.s, sb.z.s);
853 r[4] = sfloat_mul(&s[4], sa.x.s, sb.y.s);
854 r[5] = sfloat_mul(&s[5], sa.y.s, sb.x.s);
855 r[6] = sfloat_sub(&s[6], r[0], r[1]);
856 r[7] = sfloat_sub(&s[7], r[2], r[3]);
857 r[8] = sfloat_sub(&s[8], r[4], r[5]);
859 sfloat_check(ctx, &s[0], NULL);
860 sfloat_check(ctx, &s[1], NULL);
861 sfloat_check(ctx, &s[2], NULL);
862 sfloat_check(ctx, &s[3], NULL);
863 sfloat_check(ctx, &s[4], NULL);
864 sfloat_check(ctx, &s[5], NULL);
865 sfloat_check(ctx, &s[6], "x");
866 sfloat_check(ctx, &s[7], "y");
867 sfloat_check(ctx, &s[8], "z");
870 out.x = a.y * b.z - a.z * b.y;
871 out.y = a.z * b.x - a.x * b.z;
872 out.z = a.x * b.y - a.y * b.x;
876 static lex_ctx_t fold_ctx(fold_t *fold) {
878 if (fold->parser->lex)
879 return parser_ctx(fold->parser);
881 memset(&ctx, 0, sizeof(ctx));
885 static GMQCC_INLINE bool fold_immediate_true(fold_t *fold, ast_value *v) {
886 switch (v->expression.vtype) {
888 return !!v->constval.vfloat;
890 return !!v->constval.vint;
892 if (OPTS_FLAG(CORRECT_LOGIC))
893 return vec3_pbool(v->constval.vvec);
894 return !!(v->constval.vvec.x);
896 if (!v->constval.vstring)
898 if (OPTS_FLAG(TRUE_EMPTY_STRINGS))
900 return !!v->constval.vstring[0];
902 compile_error(fold_ctx(fold), "internal error: fold_immediate_true on invalid type");
905 return !!v->constval.vfunc;
908 /* Handy macros to determine if an ast_value can be constant folded. */
909 #define fold_can_1(X) \
910 (ast_istype(((ast_expression*)(X)), ast_value) && (X)->hasvalue && ((X)->cvq == CV_CONST) && \
911 ((ast_expression*)(X))->vtype != TYPE_FUNCTION)
913 #define fold_can_2(X, Y) (fold_can_1(X) && fold_can_1(Y))
915 #define fold_immvalue_float(E) ((E)->constval.vfloat)
916 #define fold_immvalue_vector(E) ((E)->constval.vvec)
917 #define fold_immvalue_string(E) ((E)->constval.vstring)
919 fold_t *fold_init(parser_t *parser) {
920 fold_t *fold = (fold_t*)mem_a(sizeof(fold_t));
921 fold->parser = parser;
922 fold->imm_float = NULL;
923 fold->imm_vector = NULL;
924 fold->imm_string = NULL;
925 fold->imm_string_untranslate = util_htnew(FOLD_STRING_UNTRANSLATE_HTSIZE);
926 fold->imm_string_dotranslate = util_htnew(FOLD_STRING_DOTRANSLATE_HTSIZE);
929 * prime the tables with common constant values at constant
932 (void)fold_constgen_float (fold, 0.0f, false);
933 (void)fold_constgen_float (fold, 1.0f, false);
934 (void)fold_constgen_float (fold, -1.0f, false);
935 (void)fold_constgen_float (fold, 2.0f, false);
937 (void)fold_constgen_vector(fold, vec3_create(0.0f, 0.0f, 0.0f));
938 (void)fold_constgen_vector(fold, vec3_create(-1.0f, -1.0f, -1.0f));
943 bool fold_generate(fold_t *fold, ir_builder *ir) {
944 /* generate globals for immediate folded values */
948 for (i = 0; i < vec_size(fold->imm_float); ++i)
949 if (!ast_global_codegen ((cur = fold->imm_float[i]), ir, false)) goto err;
950 for (i = 0; i < vec_size(fold->imm_vector); ++i)
951 if (!ast_global_codegen((cur = fold->imm_vector[i]), ir, false)) goto err;
952 for (i = 0; i < vec_size(fold->imm_string); ++i)
953 if (!ast_global_codegen((cur = fold->imm_string[i]), ir, false)) goto err;
958 con_out("failed to generate global %s\n", cur->name);
959 ir_builder_delete(ir);
963 void fold_cleanup(fold_t *fold) {
966 for (i = 0; i < vec_size(fold->imm_float); ++i) ast_delete(fold->imm_float[i]);
967 for (i = 0; i < vec_size(fold->imm_vector); ++i) ast_delete(fold->imm_vector[i]);
968 for (i = 0; i < vec_size(fold->imm_string); ++i) ast_delete(fold->imm_string[i]);
970 vec_free(fold->imm_float);
971 vec_free(fold->imm_vector);
972 vec_free(fold->imm_string);
974 util_htdel(fold->imm_string_untranslate);
975 util_htdel(fold->imm_string_dotranslate);
980 ast_expression *fold_constgen_float(fold_t *fold, qcfloat_t value, bool inexact) {
981 ast_value *out = NULL;
984 for (i = 0; i < vec_size(fold->imm_float); i++) {
985 if (!memcmp(&fold->imm_float[i]->constval.vfloat, &value, sizeof(qcfloat_t)))
986 return (ast_expression*)fold->imm_float[i];
989 out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_FLOAT);
991 out->hasvalue = true;
992 out->inexact = inexact;
993 out->constval.vfloat = value;
995 vec_push(fold->imm_float, out);
997 return (ast_expression*)out;
1000 ast_expression *fold_constgen_vector(fold_t *fold, vec3_t value) {
1004 for (i = 0; i < vec_size(fold->imm_vector); i++) {
1005 if (vec3_cmp(fold->imm_vector[i]->constval.vvec, value))
1006 return (ast_expression*)fold->imm_vector[i];
1009 out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_VECTOR);
1010 out->cvq = CV_CONST;
1011 out->hasvalue = true;
1012 out->constval.vvec = value;
1014 vec_push(fold->imm_vector, out);
1016 return (ast_expression*)out;
1019 ast_expression *fold_constgen_string(fold_t *fold, const char *str, bool translate) {
1020 hash_table_t *table = (translate) ? fold->imm_string_untranslate : fold->imm_string_dotranslate;
1021 ast_value *out = NULL;
1022 size_t hash = util_hthash(table, str);
1024 if ((out = (ast_value*)util_htgeth(table, str, hash)))
1025 return (ast_expression*)out;
1029 util_snprintf(name, sizeof(name), "dotranslate_%lu", (unsigned long)(fold->parser->translated++));
1030 out = ast_value_new(parser_ctx(fold->parser), name, TYPE_STRING);
1031 out->expression.flags |= AST_FLAG_INCLUDE_DEF; /* def needs to be included for translatables */
1033 out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_STRING);
1035 out->cvq = CV_CONST;
1036 out->hasvalue = true;
1038 out->constval.vstring = parser_strdup(str);
1040 vec_push(fold->imm_string, out);
1041 util_htseth(table, str, hash, out);
1043 return (ast_expression*)out;
1047 void (*callback)(void);
1048 sfloat_t (*binary)(sfloat_state_t *, sfloat_t, sfloat_t);
1049 sfloat_t (*unary)(sfloat_state_t *, sfloat_t);
1050 } float_check_callback_t;
1052 static bool fold_check_except_float_impl(void (*callback)(void),
1057 float_check_callback_t call;
1061 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS) && !OPTS_WARN(WARN_INEXACT_COMPARES))
1064 call.callback = callback;
1066 ca.f = fold_immvalue_float(a);
1069 cb.f = fold_immvalue_float(b);
1070 call.binary(&s, ca.s, cb.s);
1072 call.unary(&s, ca.s);
1075 if (s.exceptionflags == 0)
1078 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS))
1079 goto inexact_possible;
1081 sfloat_check(fold_ctx(fold), &s, NULL);
1084 return s.exceptionflags & SFLOAT_INEXACT;
1087 #define fold_check_except_float(CALLBACK, FOLD, A, B) \
1088 fold_check_except_float_impl(((void (*)(void))(CALLBACK)), (FOLD), (A), (B))
1090 static bool fold_check_inexact_float(fold_t *fold, ast_value *a, ast_value *b) {
1091 lex_ctx_t ctx = fold_ctx(fold);
1092 if (!OPTS_WARN(WARN_INEXACT_COMPARES))
1094 if (!a->inexact && !b->inexact)
1096 return compile_warning(ctx, WARN_INEXACT_COMPARES, "inexact value in comparison");
1099 static GMQCC_INLINE ast_expression *fold_op_mul_vec(fold_t *fold, vec3_t vec, ast_value *sel, const char *set) {
1101 * vector-component constant folding works by matching the component sets
1102 * to eliminate expensive operations on whole-vectors (3 components at runtime).
1103 * to achive this effect in a clean manner this function generalizes the
1104 * values through the use of a set paramater, which is used as an indexing method
1105 * for creating the elided ast binary expression.
1107 * Consider 'n 0 0' where y, and z need to be tested for 0, and x is
1108 * used as the value in a binary operation generating an INSTR_MUL instruction,
1109 * to acomplish the indexing of the correct component value we use set[0], set[1], set[2]
1110 * as x, y, z, where the values of those operations return 'x', 'y', 'z'. Because
1111 * of how ASCII works we can easily deliniate:
1112 * vec.z is the same as set[2]-'x' for when set[2] is 'z', 'z'-'x' results in a
1113 * literal value of 2, using this 2, we know that taking the address of vec->x (float)
1114 * and indxing it with this literal will yeild the immediate address of that component
1116 * Of course more work needs to be done to generate the correct index for the ast_member_new
1117 * call, which is no problem: set[0]-'x' suffices that job.
1119 qcfloat_t x = (&vec.x)[set[0]-'x'];
1120 qcfloat_t y = (&vec.x)[set[1]-'x'];
1121 qcfloat_t z = (&vec.x)[set[2]-'x'];
1124 ast_expression *out;
1125 ++opts_optimizationcount[OPTIM_VECTOR_COMPONENTS];
1126 out = (ast_expression*)ast_member_new(fold_ctx(fold), (ast_expression*)sel, set[0]-'x', NULL);
1127 out->node.keep = false;
1128 ((ast_member*)out)->rvalue = true;
1130 return (ast_expression*)ast_binary_new(fold_ctx(fold), INSTR_MUL_F, fold_constgen_float(fold, x, false), out);
1136 static GMQCC_INLINE ast_expression *fold_op_neg(fold_t *fold, ast_value *a) {
1138 if (fold_can_1(a)) {
1139 /* Negation can produce inexact as well */
1140 bool inexact = fold_check_except_float(&sfloat_neg, fold, a, NULL);
1141 return fold_constgen_float(fold, -fold_immvalue_float(a), inexact);
1143 } else if (isvector(a)) {
1145 return fold_constgen_vector(fold, vec3_neg(fold_ctx(fold), fold_immvalue_vector(a)));
1150 static GMQCC_INLINE ast_expression *fold_op_not(fold_t *fold, ast_value *a) {
1153 return fold_constgen_float(fold, !fold_immvalue_float(a), false);
1154 } else if (isvector(a)) {
1156 return fold_constgen_float(fold, vec3_notf(fold_immvalue_vector(a)), false);
1157 } else if (isstring(a)) {
1158 if (fold_can_1(a)) {
1159 if (OPTS_FLAG(TRUE_EMPTY_STRINGS))
1160 return fold_constgen_float(fold, !fold_immvalue_string(a), false);
1162 return fold_constgen_float(fold, !fold_immvalue_string(a) || !*fold_immvalue_string(a), false);
1168 static GMQCC_INLINE ast_expression *fold_op_add(fold_t *fold, ast_value *a, ast_value *b) {
1170 if (fold_can_2(a, b)) {
1171 bool inexact = fold_check_except_float(&sfloat_add, fold, a, b);
1172 return fold_constgen_float(fold, fold_immvalue_float(a) + fold_immvalue_float(b), inexact);
1174 } else if (isvector(a)) {
1175 if (fold_can_2(a, b))
1176 return fold_constgen_vector(fold, vec3_add(fold_ctx(fold),
1177 fold_immvalue_vector(a),
1178 fold_immvalue_vector(b)));
1183 static GMQCC_INLINE ast_expression *fold_op_sub(fold_t *fold, ast_value *a, ast_value *b) {
1185 if (fold_can_2(a, b)) {
1186 bool inexact = fold_check_except_float(&sfloat_sub, fold, a, b);
1187 return fold_constgen_float(fold, fold_immvalue_float(a) - fold_immvalue_float(b), inexact);
1189 } else if (isvector(a)) {
1190 if (fold_can_2(a, b))
1191 return fold_constgen_vector(fold, vec3_sub(fold_ctx(fold),
1192 fold_immvalue_vector(a),
1193 fold_immvalue_vector(b)));
1198 static GMQCC_INLINE ast_expression *fold_op_mul(fold_t *fold, ast_value *a, ast_value *b) {
1201 if (fold_can_2(a, b))
1202 return fold_constgen_vector(fold, vec3_mulvf(fold_ctx(fold), fold_immvalue_vector(b), fold_immvalue_float(a)));
1204 if (fold_can_2(a, b)) {
1205 bool inexact = fold_check_except_float(&sfloat_mul, fold, a, b);
1206 return fold_constgen_float(fold, fold_immvalue_float(a) * fold_immvalue_float(b), inexact);
1209 } else if (isvector(a)) {
1211 if (fold_can_2(a, b))
1212 return fold_constgen_vector(fold, vec3_mulvf(fold_ctx(fold), fold_immvalue_vector(a), fold_immvalue_float(b)));
1214 if (fold_can_2(a, b)) {
1215 return fold_constgen_float(fold, vec3_mulvv(fold_ctx(fold), fold_immvalue_vector(a), fold_immvalue_vector(b)), false);
1216 } else if (OPTS_OPTIMIZATION(OPTIM_VECTOR_COMPONENTS) && fold_can_1(a)) {
1217 ast_expression *out;
1218 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "xyz"))) return out;
1219 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "yxz"))) return out;
1220 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "zxy"))) return out;
1221 } else if (OPTS_OPTIMIZATION(OPTIM_VECTOR_COMPONENTS) && fold_can_1(b)) {
1222 ast_expression *out;
1223 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(b), a, "xyz"))) return out;
1224 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(b), a, "yxz"))) return out;
1225 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(b), a, "zxy"))) return out;
1232 static GMQCC_INLINE ast_expression *fold_op_div(fold_t *fold, ast_value *a, ast_value *b) {
1234 if (fold_can_2(a, b)) {
1235 bool inexact = fold_check_except_float(&sfloat_div, fold, a, b);
1236 return fold_constgen_float(fold, fold_immvalue_float(a) / fold_immvalue_float(b), inexact);
1237 } else if (fold_can_1(b)) {
1238 return (ast_expression*)ast_binary_new(
1242 fold_constgen_float(fold, 1.0f / fold_immvalue_float(b), false)
1245 } else if (isvector(a)) {
1246 if (fold_can_2(a, b)) {
1247 return fold_constgen_vector(fold, vec3_mulvf(fold_ctx(fold), fold_immvalue_vector(a), 1.0f / fold_immvalue_float(b)));
1249 return (ast_expression*)ast_binary_new(
1254 ? (ast_expression*)fold_constgen_float(fold, 1.0f / fold_immvalue_float(b), false)
1255 : (ast_expression*)ast_binary_new(
1258 (ast_expression*)fold->imm_float[1],
1267 static GMQCC_INLINE ast_expression *fold_op_mod(fold_t *fold, ast_value *a, ast_value *b) {
1268 return (fold_can_2(a, b))
1269 ? fold_constgen_float(fold, fmod(fold_immvalue_float(a), fold_immvalue_float(b)), false)
1273 static GMQCC_INLINE ast_expression *fold_op_bor(fold_t *fold, ast_value *a, ast_value *b) {
1275 if (fold_can_2(a, b))
1276 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) | ((qcint_t)fold_immvalue_float(b))), false);
1279 if (fold_can_2(a, b))
1280 return fold_constgen_vector(fold, vec3_or(fold_immvalue_vector(a), fold_immvalue_vector(b)));
1282 if (fold_can_2(a, b))
1283 return fold_constgen_vector(fold, vec3_orvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
1289 static GMQCC_INLINE ast_expression *fold_op_band(fold_t *fold, ast_value *a, ast_value *b) {
1291 if (fold_can_2(a, b))
1292 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) & ((qcint_t)fold_immvalue_float(b))), false);
1295 if (fold_can_2(a, b))
1296 return fold_constgen_vector(fold, vec3_and(fold_immvalue_vector(a), fold_immvalue_vector(b)));
1298 if (fold_can_2(a, b))
1299 return fold_constgen_vector(fold, vec3_andvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
1305 static GMQCC_INLINE ast_expression *fold_op_xor(fold_t *fold, ast_value *a, ast_value *b) {
1307 if (fold_can_2(a, b))
1308 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) ^ ((qcint_t)fold_immvalue_float(b))), false);
1310 if (fold_can_2(a, b)) {
1312 return fold_constgen_vector(fold, vec3_xor(fold_immvalue_vector(a), fold_immvalue_vector(b)));
1314 return fold_constgen_vector(fold, vec3_xorvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
1320 static GMQCC_INLINE ast_expression *fold_op_lshift(fold_t *fold, ast_value *a, ast_value *b) {
1321 if (fold_can_2(a, b) && isfloats(a, b))
1322 return fold_constgen_float(fold, (qcfloat_t)floorf(fold_immvalue_float(a) * powf(2.0f, fold_immvalue_float(b))), false);
1326 static GMQCC_INLINE ast_expression *fold_op_rshift(fold_t *fold, ast_value *a, ast_value *b) {
1327 if (fold_can_2(a, b) && isfloats(a, b))
1328 return fold_constgen_float(fold, (qcfloat_t)floorf(fold_immvalue_float(a) / powf(2.0f, fold_immvalue_float(b))), false);
1332 static GMQCC_INLINE ast_expression *fold_op_andor(fold_t *fold, ast_value *a, ast_value *b, float expr) {
1333 if (fold_can_2(a, b)) {
1334 if (OPTS_FLAG(PERL_LOGIC)) {
1336 return (fold_immediate_true(fold, a)) ? (ast_expression*)a : (ast_expression*)b;
1338 return (fold_immediate_true(fold, a)) ? (ast_expression*)b : (ast_expression*)a;
1340 return fold_constgen_float (
1342 ((expr) ? (fold_immediate_true(fold, a) || fold_immediate_true(fold, b))
1343 : (fold_immediate_true(fold, a) && fold_immediate_true(fold, b)))
1353 static GMQCC_INLINE ast_expression *fold_op_tern(fold_t *fold, ast_value *a, ast_value *b, ast_value *c) {
1354 if (fold_can_1(a)) {
1355 return fold_immediate_true(fold, a)
1356 ? (ast_expression*)b
1357 : (ast_expression*)c;
1362 static GMQCC_INLINE ast_expression *fold_op_exp(fold_t *fold, ast_value *a, ast_value *b) {
1363 if (fold_can_2(a, b))
1364 return fold_constgen_float(fold, (qcfloat_t)powf(fold_immvalue_float(a), fold_immvalue_float(b)), false);
1368 static GMQCC_INLINE ast_expression *fold_op_lteqgt(fold_t *fold, ast_value *a, ast_value *b) {
1369 if (fold_can_2(a,b)) {
1370 fold_check_inexact_float(fold, a, b);
1371 if (fold_immvalue_float(a) < fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[2];
1372 if (fold_immvalue_float(a) == fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[0];
1373 if (fold_immvalue_float(a) > fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[1];
1378 static GMQCC_INLINE ast_expression *fold_op_ltgt(fold_t *fold, ast_value *a, ast_value *b, bool lt) {
1379 if (fold_can_2(a, b)) {
1380 fold_check_inexact_float(fold, a, b);
1381 return (lt) ? (ast_expression*)fold->imm_float[!!(fold_immvalue_float(a) < fold_immvalue_float(b))]
1382 : (ast_expression*)fold->imm_float[!!(fold_immvalue_float(a) > fold_immvalue_float(b))];
1387 static GMQCC_INLINE ast_expression *fold_op_cmp(fold_t *fold, ast_value *a, ast_value *b, bool ne) {
1388 if (fold_can_2(a, b)) {
1389 if (isfloat(a) && isfloat(b)) {
1390 float la = fold_immvalue_float(a);
1391 float lb = fold_immvalue_float(b);
1392 fold_check_inexact_float(fold, a, b);
1393 return (ast_expression*)fold->imm_float[!(ne ? la == lb : la != lb)];
1394 } if (isvector(a) && isvector(b)) {
1395 vec3_t la = fold_immvalue_vector(a);
1396 vec3_t lb = fold_immvalue_vector(b);
1397 return (ast_expression*)fold->imm_float[!(ne ? vec3_cmp(la, lb) : !vec3_cmp(la, lb))];
1403 static GMQCC_INLINE ast_expression *fold_op_bnot(fold_t *fold, ast_value *a) {
1406 return fold_constgen_float(fold, -1-fold_immvalue_float(a), false);
1410 return fold_constgen_vector(fold, vec3_not(fold_immvalue_vector(a)));
1416 static GMQCC_INLINE ast_expression *fold_op_cross(fold_t *fold, ast_value *a, ast_value *b) {
1417 if (fold_can_2(a, b))
1418 return fold_constgen_vector(fold, vec3_cross(fold_ctx(fold),
1419 fold_immvalue_vector(a),
1420 fold_immvalue_vector(b)));
1424 static GMQCC_INLINE ast_expression *fold_op_length(fold_t *fold, ast_value *a) {
1425 if (fold_can_1(a) && isstring(a))
1426 return fold_constgen_float(fold, strlen(fold_immvalue_string(a)), false);
1428 return fold_constgen_float(fold, vec_size(a->initlist), false);
1432 ast_expression *fold_op(fold_t *fold, const oper_info *info, ast_expression **opexprs) {
1433 ast_value *a = (ast_value*)opexprs[0];
1434 ast_value *b = (ast_value*)opexprs[1];
1435 ast_value *c = (ast_value*)opexprs[2];
1436 ast_expression *e = NULL;
1438 /* can a fold operation be applied to this operator usage? */
1442 switch(info->operands) {
1443 case 3: if(!c) return NULL;
1444 case 2: if(!b) return NULL;
1447 compile_error(fold_ctx(fold), "internal error: fold_op no operands to fold\n");
1453 * we could use a boolean and default case but ironically gcc produces
1454 * invalid broken assembly from that operation. clang/tcc get it right,
1455 * but interestingly ignore compiling this to a jump-table when I do that,
1456 * this happens to be the most efficent method, since you have per-level
1457 * granularity on the pointer check happening only for the case you check
1458 * it in. Opposed to the default method which would involve a boolean and
1459 * pointer check after wards.
1461 #define fold_op_case(ARGS, ARGS_OPID, OP, ARGS_FOLD) \
1462 case opid##ARGS ARGS_OPID: \
1463 if ((e = fold_op_##OP ARGS_FOLD)) { \
1464 ++opts_optimizationcount[OPTIM_CONST_FOLD]; \
1469 fold_op_case(2, ('-', 'P'), neg, (fold, a));
1470 fold_op_case(2, ('!', 'P'), not, (fold, a));
1471 fold_op_case(1, ('+'), add, (fold, a, b));
1472 fold_op_case(1, ('-'), sub, (fold, a, b));
1473 fold_op_case(1, ('*'), mul, (fold, a, b));
1474 fold_op_case(1, ('/'), div, (fold, a, b));
1475 fold_op_case(1, ('%'), mod, (fold, a, b));
1476 fold_op_case(1, ('|'), bor, (fold, a, b));
1477 fold_op_case(1, ('&'), band, (fold, a, b));
1478 fold_op_case(1, ('^'), xor, (fold, a, b));
1479 fold_op_case(1, ('<'), ltgt, (fold, a, b, true));
1480 fold_op_case(1, ('>'), ltgt, (fold, a, b, false));
1481 fold_op_case(2, ('<', '<'), lshift, (fold, a, b));
1482 fold_op_case(2, ('>', '>'), rshift, (fold, a, b));
1483 fold_op_case(2, ('|', '|'), andor, (fold, a, b, true));
1484 fold_op_case(2, ('&', '&'), andor, (fold, a, b, false));
1485 fold_op_case(2, ('?', ':'), tern, (fold, a, b, c));
1486 fold_op_case(2, ('*', '*'), exp, (fold, a, b));
1487 fold_op_case(3, ('<','=','>'), lteqgt, (fold, a, b));
1488 fold_op_case(2, ('!', '='), cmp, (fold, a, b, true));
1489 fold_op_case(2, ('=', '='), cmp, (fold, a, b, false));
1490 fold_op_case(2, ('~', 'P'), bnot, (fold, a));
1491 fold_op_case(2, ('>', '<'), cross, (fold, a, b));
1492 fold_op_case(3, ('l', 'e', 'n'), length, (fold, a));
1495 compile_error(fold_ctx(fold), "internal error: attempted to constant-fold for unsupported operator");
1500 * Constant folding for compiler intrinsics, simaler approach to operator
1501 * folding, primarly: individual functions for each intrinsics to fold,
1502 * and a generic selection function.
1504 static GMQCC_INLINE ast_expression *fold_intrin_isfinite(fold_t *fold, ast_value *a) {
1505 return fold_constgen_float(fold, isfinite(fold_immvalue_float(a)), false);
1507 static GMQCC_INLINE ast_expression *fold_intrin_isinf(fold_t *fold, ast_value *a) {
1508 return fold_constgen_float(fold, isinf(fold_immvalue_float(a)), false);
1510 static GMQCC_INLINE ast_expression *fold_intrin_isnan(fold_t *fold, ast_value *a) {
1511 return fold_constgen_float(fold, isnan(fold_immvalue_float(a)), false);
1513 static GMQCC_INLINE ast_expression *fold_intrin_isnormal(fold_t *fold, ast_value *a) {
1514 return fold_constgen_float(fold, isnormal(fold_immvalue_float(a)), false);
1516 static GMQCC_INLINE ast_expression *fold_intrin_signbit(fold_t *fold, ast_value *a) {
1517 return fold_constgen_float(fold, signbit(fold_immvalue_float(a)), false);
1519 static GMQCC_INLINE ast_expression *fold_intirn_acosh(fold_t *fold, ast_value *a) {
1520 return fold_constgen_float(fold, acoshf(fold_immvalue_float(a)), false);
1522 static GMQCC_INLINE ast_expression *fold_intrin_asinh(fold_t *fold, ast_value *a) {
1523 return fold_constgen_float(fold, asinhf(fold_immvalue_float(a)), false);
1525 static GMQCC_INLINE ast_expression *fold_intrin_atanh(fold_t *fold, ast_value *a) {
1526 return fold_constgen_float(fold, (float)atanh(fold_immvalue_float(a)), false);
1528 static GMQCC_INLINE ast_expression *fold_intrin_exp(fold_t *fold, ast_value *a) {
1529 return fold_constgen_float(fold, expf(fold_immvalue_float(a)), false);
1531 static GMQCC_INLINE ast_expression *fold_intrin_exp2(fold_t *fold, ast_value *a) {
1532 return fold_constgen_float(fold, exp2f(fold_immvalue_float(a)), false);
1534 static GMQCC_INLINE ast_expression *fold_intrin_expm1(fold_t *fold, ast_value *a) {
1535 return fold_constgen_float(fold, expm1f(fold_immvalue_float(a)), false);
1537 static GMQCC_INLINE ast_expression *fold_intrin_mod(fold_t *fold, ast_value *lhs, ast_value *rhs) {
1538 return fold_constgen_float(fold, fmodf(fold_immvalue_float(lhs), fold_immvalue_float(rhs)), false);
1540 static GMQCC_INLINE ast_expression *fold_intrin_pow(fold_t *fold, ast_value *lhs, ast_value *rhs) {
1541 return fold_constgen_float(fold, powf(fold_immvalue_float(lhs), fold_immvalue_float(rhs)), false);
1543 static GMQCC_INLINE ast_expression *fold_intrin_fabs(fold_t *fold, ast_value *a) {
1544 return fold_constgen_float(fold, fabsf(fold_immvalue_float(a)), false);
1548 ast_expression *fold_intrin(fold_t *fold, const char *intrin, ast_expression **arg) {
1549 ast_expression *ret = NULL;
1550 ast_value *a = (ast_value*)arg[0];
1551 ast_value *b = (ast_value*)arg[1];
1553 if (!strcmp(intrin, "isfinite")) ret = fold_intrin_isfinite(fold, a);
1554 if (!strcmp(intrin, "isinf")) ret = fold_intrin_isinf(fold, a);
1555 if (!strcmp(intrin, "isnan")) ret = fold_intrin_isnan(fold, a);
1556 if (!strcmp(intrin, "isnormal")) ret = fold_intrin_isnormal(fold, a);
1557 if (!strcmp(intrin, "signbit")) ret = fold_intrin_signbit(fold, a);
1558 if (!strcmp(intrin, "acosh")) ret = fold_intirn_acosh(fold, a);
1559 if (!strcmp(intrin, "asinh")) ret = fold_intrin_asinh(fold, a);
1560 if (!strcmp(intrin, "atanh")) ret = fold_intrin_atanh(fold, a);
1561 if (!strcmp(intrin, "exp")) ret = fold_intrin_exp(fold, a);
1562 if (!strcmp(intrin, "exp2")) ret = fold_intrin_exp2(fold, a);
1563 if (!strcmp(intrin, "expm1")) ret = fold_intrin_expm1(fold, a);
1564 if (!strcmp(intrin, "mod")) ret = fold_intrin_mod(fold, a, b);
1565 if (!strcmp(intrin, "pow")) ret = fold_intrin_pow(fold, a, b);
1566 if (!strcmp(intrin, "fabs")) ret = fold_intrin_fabs(fold, a);
1569 ++opts_optimizationcount[OPTIM_CONST_FOLD];
1575 * These are all the actual constant folding methods that happen in between
1576 * the AST/IR stage of the compiler , i.e eliminating branches for const
1577 * expressions, which is the only supported thing so far. We undefine the
1578 * testing macros here because an ir_value is differant than an ast_value.
1584 #undef fold_immvalue_float
1585 #undef fold_immvalue_string
1586 #undef fold_immvalue_vector
1590 #define isfloat(X) ((X)->vtype == TYPE_FLOAT)
1591 /*#define isstring(X) ((X)->vtype == TYPE_STRING)*/
1592 /*#define isvector(X) ((X)->vtype == TYPE_VECTOR)*/
1593 #define fold_immvalue_float(X) ((X)->constval.vfloat)
1594 #define fold_immvalue_vector(X) ((X)->constval.vvec)
1595 /*#define fold_immvalue_string(X) ((X)->constval.vstring)*/
1596 #define fold_can_1(X) ((X)->hasvalue && (X)->cvq == CV_CONST)
1597 /*#define fold_can_2(X,Y) (fold_can_1(X) && fold_can_1(Y))*/
1599 static ast_expression *fold_superfluous(ast_expression *left, ast_expression *right, int op) {
1600 ast_expression *swapped = NULL; /* using this as bool */
1603 if (!ast_istype(right, ast_value) || !fold_can_1((load = (ast_value*)right))) {
1609 if (!ast_istype(right, ast_value) || !fold_can_1((load = (ast_value*)right)))
1617 if (fold_immvalue_float(load) == 1.0f) {
1618 ++opts_optimizationcount[OPTIM_PEEPHOLE];
1629 if (fold_immvalue_float(load) == 0.0f) {
1630 ++opts_optimizationcount[OPTIM_PEEPHOLE];
1637 if (vec3_cmp(fold_immvalue_vector(load), vec3_create(1, 1, 1))) {
1638 ++opts_optimizationcount[OPTIM_PEEPHOLE];
1648 if (vec3_cmp(fold_immvalue_vector(load), vec3_create(0, 0, 0))) {
1649 ++opts_optimizationcount[OPTIM_PEEPHOLE];
1659 ast_expression *fold_binary(lex_ctx_t ctx, int op, ast_expression *left, ast_expression *right) {
1660 ast_expression *ret = fold_superfluous(left, right, op);
1663 return (ast_expression*)ast_binary_new(ctx, op, left, right);
1666 static GMQCC_INLINE int fold_cond(ir_value *condval, ast_function *func, ast_ifthen *branch) {
1667 if (isfloat(condval) && fold_can_1(condval) && OPTS_OPTIMIZATION(OPTIM_CONST_FOLD_DCE)) {
1668 ast_expression_codegen *cgen;
1671 bool istrue = (fold_immvalue_float(condval) != 0.0f && branch->on_true);
1672 bool isfalse = (fold_immvalue_float(condval) == 0.0f && branch->on_false);
1673 ast_expression *path = (istrue) ? branch->on_true :
1674 (isfalse) ? branch->on_false : NULL;
1677 * no path to take implies that the evaluation is if(0) and there
1678 * is no else block. so eliminate all the code.
1680 ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE];
1684 if (!(elide = ir_function_create_block(ast_ctx(branch), func->ir_func, ast_function_label(func, ((istrue) ? "ontrue" : "onfalse")))))
1686 if (!(*(cgen = path->codegen))((ast_expression*)path, func, false, &dummy))
1688 if (!ir_block_create_jump(func->curblock, ast_ctx(branch), elide))
1691 * now the branch has been eliminated and the correct block for the constant evaluation
1692 * is expanded into the current block for the function.
1694 func->curblock = elide;
1695 ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE];
1698 return -1; /* nothing done */
1701 int fold_cond_ternary(ir_value *condval, ast_function *func, ast_ternary *branch) {
1702 return fold_cond(condval, func, (ast_ifthen*)branch);
1705 int fold_cond_ifthen(ir_value *condval, ast_function *func, ast_ifthen *branch) {
1706 return fold_cond(condval, func, branch);