2 * Copyright (C) 2012, 2013
6 * Permission is hereby granted, free of charge, to any person obtaining a copy of
7 * this software and associated documentation files (the "Software"), to deal in
8 * the Software without restriction, including without limitation the rights to
9 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
10 * of the Software, and to permit persons to whom the Software is furnished to do
11 * so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 /* TODO: remove globals ... */
38 struct memblock_t *next;
39 struct memblock_t *prev;
42 static struct memblock_t *mem_start = NULL;
44 void *util_memory_a(size_t byte, unsigned int line, const char *file) {
45 struct memblock_t *info = (struct memblock_t*)malloc(sizeof(struct memblock_t) + byte);
46 void *data = (void*)(info+1);
47 if (!info) return NULL;
52 info->next = mem_start;
54 mem_start->prev = info;
58 util_debug("MEM", "allocation: % 8u (bytes) address 0x%08X @ %s:%u\n", byte, data, file, line);
67 void util_memory_d(void *ptrn, unsigned int line, const char *file) {
68 struct memblock_t *info = NULL;
71 info = ((struct memblock_t*)ptrn - 1);
74 util_debug("MEM", "released: % 8u (bytes) address 0x%08X @ %s:%u\n", info->byte, ptrn, file, line);
81 info->prev->next = info->next;
83 info->next->prev = info->prev;
84 if (info == mem_start)
85 mem_start = info->next;
90 void *util_memory_r(void *ptrn, size_t byte, unsigned int line, const char *file) {
91 struct memblock_t *oldinfo = NULL;
93 struct memblock_t *newinfo;
96 return util_memory_a(byte, line, file);
98 util_memory_d(ptrn, line, file);
102 oldinfo = ((struct memblock_t*)ptrn - 1);
103 newinfo = ((struct memblock_t*)malloc(sizeof(struct memblock_t) + byte));
106 util_debug("MEM", "reallocation: % 8u -> %u (bytes) address 0x%08X -> 0x%08X @ %s:%u\n", oldinfo->byte, byte, ptrn, (void*)(newinfo+1), file, line);
111 util_memory_d(oldinfo+1, line, file);
116 memcpy(newinfo+1, oldinfo+1, oldinfo->byte);
120 oldinfo->prev->next = oldinfo->next;
122 oldinfo->next->prev = oldinfo->prev;
123 if (oldinfo == mem_start)
124 mem_start = oldinfo->next;
127 newinfo->line = line;
128 newinfo->byte = byte;
129 newinfo->file = file;
130 newinfo->prev = NULL;
131 newinfo->next = mem_start;
133 mem_start->prev = newinfo;
136 mem_ab -= oldinfo->byte;
137 mem_ab += newinfo->byte;
144 void util_meminfo() {
145 struct memblock_t *info;
150 for (info = mem_start; info; info = info->next) {
151 util_debug("MEM", "lost: % 8u (bytes) at %s:%u\n",
157 util_debug("MEM", "Memory information:\n\
158 Total allocations: %llu\n\
159 Total deallocations: %llu\n\
160 Total allocated: %llu (bytes)\n\
161 Total deallocated: %llu (bytes)\n\
162 Leaks found: lost %llu (bytes) in %d allocations\n",
171 * Some string utility functions, because strdup uses malloc, and we want
172 * to track all memory (without replacing malloc).
174 char *util_strdup(const char *s) {
181 if ((len = strlen(s)) && (ptr = (char*)mem_a(len+1))) {
188 void util_debug(const char *area, const char *ms, ...) {
193 if (!strcmp(area, "MEM") && !opts.memchk)
197 con_out ("[%s] ", area);
203 * only required if big endian .. otherwise no need to swap
206 #if PLATFORM_BYTE_ORDER == GMQCC_BYTE_ORDER_BIG
207 static GMQCC_INLINE void util_swap16(uint16_t *d, size_t l) {
209 d[l] = (d[l] << 8) | (d[l] >> 8);
213 static GMQCC_INLINE void util_swap32(uint32_t *d, size_t l) {
216 v = ((d[l] << 8) & 0xFF00FF00) | ((d[l] >> 8) & 0x00FF00FF);
217 d[l] = (v << 16) | (v >> 16);
221 /* Some strange system doesn't like constants that big, AND doesn't recognize an ULL suffix
222 * so let's go the safe way
224 static GMQCC_INLINE void util_swap64(uint32_t *d, size_t l) {
228 v = ((d[l] << 8) & 0xFF00FF00FF00FF00) | ((d[l] >> 8) & 0x00FF00FF00FF00FF);
229 v = ((v << 16) & 0xFFFF0000FFFF0000) | ((v >> 16) & 0x0000FFFF0000FFFF);
230 d[l] = (v << 32) | (v >> 32);
234 for (i = 0; i < l; i += 2) {
243 void util_endianswap(void *_data, size_t length, unsigned int typesize) {
244 # if PLATFORM_BYTE_ORDER == -1 /* runtime check */
245 if (*((char*)&typesize))
248 /* prevent unused warnings */
253 # if PLATFORM_BYTE_ORDER == GMQCC_BYTE_ORDER_LITTLE
259 util_swap16((uint16_t*)_data, length>>1);
262 util_swap32((uint32_t*)_data, length>>2);
265 util_swap64((uint32_t*)_data, length>>3);
268 default: abort(); /* please blow the fuck up! */
275 * CRC algorithms vary in the width of the polynomial, the value of said polynomial,
276 * the initial value used for the register, weather the bits of each byte are reflected
277 * before being processed, weather the algorithm itself feeds input bytes through the
278 * register or XORs them with a byte from one end and then straight into the table, as
279 * well as (but not limited to the idea of reflected versions) where the final register
280 * value becomes reversed, and finally weather the value itself is used to XOR the final
281 * register value. AS such you can already imagine how painfully annoying CRCs are,
282 * of course we stand to target Quake, which expects it's certian set of rules for proper
283 * calculation of a CRC.
285 * In most traditional CRC algorithms on uses a reflected table driven method where a value
286 * or register is reflected if it's bits are swapped around it's center. For example:
287 * take the bits 0101 is the 4-bit reflection of 1010, and respectfully 0011 would be the
288 * reflection of 1100. Quake however expects a NON-Reflected CRC on the output, but still
289 * requires a final XOR on the values (0xFFFF and 0x0000) this is a standard CCITT CRC-16
290 * which I respectfully as a programmer don't agree with.
292 * So now you know what we target, and why we target it, despite how unsettling it may seem
293 * but those are what Quake seems to request.
296 static const uint16_t util_crc16_table[] = {
297 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5,
298 0x60C6, 0x70E7, 0x8108, 0x9129, 0xA14A, 0xB16B,
299 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF, 0x1231, 0x0210,
300 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
301 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C,
302 0xF3FF, 0xE3DE, 0x2462, 0x3443, 0x0420, 0x1401,
303 0x64E6, 0x74C7, 0x44A4, 0x5485, 0xA56A, 0xB54B,
304 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
305 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6,
306 0x5695, 0x46B4, 0xB75B, 0xA77A, 0x9719, 0x8738,
307 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC, 0x48C4, 0x58E5,
308 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
309 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969,
310 0xA90A, 0xB92B, 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96,
311 0x1A71, 0x0A50, 0x3A33, 0x2A12, 0xDBFD, 0xCBDC,
312 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
313 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03,
314 0x0C60, 0x1C41, 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD,
315 0xAD2A, 0xBD0B, 0x8D68, 0x9D49, 0x7E97, 0x6EB6,
316 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
317 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A,
318 0x9F59, 0x8F78, 0x9188, 0x81A9, 0xB1CA, 0xA1EB,
319 0xD10C, 0xC12D, 0xF14E, 0xE16F, 0x1080, 0x00A1,
320 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
321 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C,
322 0xE37F, 0xF35E, 0x02B1, 0x1290, 0x22F3, 0x32D2,
323 0x4235, 0x5214, 0x6277, 0x7256, 0xB5EA, 0xA5CB,
324 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
325 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447,
326 0x5424, 0x4405, 0xA7DB, 0xB7FA, 0x8799, 0x97B8,
327 0xE75F, 0xF77E, 0xC71D, 0xD73C, 0x26D3, 0x36F2,
328 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
329 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9,
330 0xB98A, 0xA9AB, 0x5844, 0x4865, 0x7806, 0x6827,
331 0x18C0, 0x08E1, 0x3882, 0x28A3, 0xCB7D, 0xDB5C,
332 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
333 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0,
334 0x2AB3, 0x3A92, 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D,
335 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9, 0x7C26, 0x6C07,
336 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
337 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA,
338 0x8FD9, 0x9FF8, 0x6E17, 0x7E36, 0x4E55, 0x5E74,
339 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
342 /* Non - Reflected */
343 uint16_t util_crc16(uint16_t current, const char *k, size_t len) {
344 register uint16_t h = current;
345 for (; len; --len, ++k)
346 h = util_crc16_table[(h>>8)^((unsigned char)*k)]^(h<<8);
349 /* Reflective Varation (for reference) */
351 uint16_t util_crc16(const char *k, int len, const short clamp) {
352 register uint16_t h= (uint16_t)0xFFFFFFFF;
353 for (; len; --len, ++k)
354 h = util_crc16_table[(h^((unsigned char)*k))&0xFF]^(h>>8);
359 size_t util_strtocmd(const char *in, char *out, size_t outsz) {
361 for (; *in && sz < outsz; ++in, ++out, ++sz)
362 *out = (*in == '-') ? '_' : (isalpha(*in) && !isupper(*in)) ? *in + 'A' - 'a': *in;
367 size_t util_strtononcmd(const char *in, char *out, size_t outsz) {
369 for (; *in && sz < outsz; ++in, ++out, ++sz)
370 *out = (*in == '_') ? '-' : (isalpha(*in) && isupper(*in)) ? *in + 'a' - 'A' : *in;
375 /* TODO: rewrite ... when I redo the ve cleanup */
376 void _util_vec_grow(void **a, size_t i, size_t s) {
377 vector_t *d = vec_meta(*a);
378 size_t m = *a ? 2 * d->allocated +i : i+1;
379 void *p = mem_r((*a ? d : NULL), s * m + sizeof(vector_t));
382 ((vector_t*)p)->used = 0;
383 *a = (vector_t*)p + 1;
385 vec_meta(*a)->allocated = m;
389 * Hash table for generic data, based on dynamic memory allocations
390 * all around. This is the internal interface, please look for
391 * EXPOSED INTERFACE comment below
393 typedef struct hash_node_t {
394 char *key; /* the key for this node in table */
395 void *value; /* pointer to the data as void* */
396 struct hash_node_t *next; /* next node (linked list) */
399 GMQCC_INLINE size_t util_hthash(hash_table_t *ht, const char *key) {
400 const uint32_t mix = 0x5BD1E995;
401 const uint32_t rot = 24;
402 size_t size = strlen(key);
403 uint32_t hash = 0x1EF0 /* LICRC TAB */ ^ size;
405 const unsigned char *data = (const unsigned char*)key;
408 alias = *(uint32_t*)data;
411 alias ^= alias >> rot;
422 case 3: hash ^= data[2] << 16;
423 case 2: hash ^= data[1] << 8;
424 case 1: hash ^= data[0];
432 return (size_t) (hash % ht->size);
435 hash_node_t *_util_htnewpair(const char *key, void *value) {
437 if (!(node = (hash_node_t*)mem_a(sizeof(hash_node_t))))
440 if (!(node->key = util_strdup(key))) {
452 * EXPOSED INTERFACE for the hashtable implementation
453 * util_htnew(size) -- to make a new hashtable
454 * util_htset(table, key, value, sizeof(value)) -- to set something in the table
455 * util_htget(table, key) -- to get something from the table
456 * util_htdel(table) -- to delete the table
458 hash_table_t *util_htnew(size_t size) {
459 hash_table_t *hashtable = NULL;
463 if (!(hashtable = (hash_table_t*)mem_a(sizeof(hash_table_t))))
466 if (!(hashtable->table = (hash_node_t**)mem_a(sizeof(hash_node_t*) * size))) {
471 hashtable->size = size;
472 memset(hashtable->table, 0, sizeof(hash_node_t*) * size);
477 void util_htseth(hash_table_t *ht, const char *key, size_t bin, void *value) {
478 hash_node_t *newnode = NULL;
479 hash_node_t *next = NULL;
480 hash_node_t *last = NULL;
482 next = ht->table[bin];
484 while (next && next->key && strcmp(key, next->key) > 0)
485 last = next, next = next->next;
487 /* already in table, do a replace */
488 if (next && next->key && strcmp(key, next->key) == 0) {
491 /* not found, grow a pair man :P */
492 newnode = _util_htnewpair(key, value);
493 if (next == ht->table[bin]) {
494 newnode->next = next;
495 ht->table[bin] = newnode;
497 last->next = newnode;
499 newnode->next = next;
500 last->next = newnode;
505 void util_htset(hash_table_t *ht, const char *key, void *value) {
506 util_htseth(ht, key, util_hthash(ht, key), value);
509 void *util_htgeth(hash_table_t *ht, const char *key, size_t bin) {
510 hash_node_t *pair = ht->table[bin];
512 while (pair && pair->key && strcmp(key, pair->key) > 0)
515 if (!pair || !pair->key || strcmp(key, pair->key) != 0)
521 void *util_htget(hash_table_t *ht, const char *key) {
522 return util_htgeth(ht, key, util_hthash(ht, key));
525 void *code_util_str_htgeth(hash_table_t *ht, const char *key, size_t bin) {
530 keylen = strlen(key);
532 pair = ht->table[bin];
533 while (pair && pair->key) {
534 len = strlen(pair->key);
540 cmp = strcmp(key, pair->key);
548 cmp = strcmp(key, pair->key + len - keylen);
550 uintptr_t up = (uintptr_t)pair->value;
560 * Free all allocated data in a hashtable, this is quite the amount
563 void util_htdel(hash_table_t *ht) {
565 for (; i < ht->size; i++) {
566 hash_node_t *n = ht->table[i];
585 * A basic implementation of a hash-set. Unlike a hashtable, a hash
586 * set doesn't maintain key-value pairs. It simply maintains a key
587 * that can be set, removed, and checked for.
589 * See EXPOSED interface comment below
591 #define GMQCC_HASHSET_PRIME0 0x0049
592 #define GMQCC_HASHSET_PRIME1 0x1391
594 static int util_hsput(hash_set_t *set, void *item) {
595 size_t hash = (size_t)item; /* shouldn't drop the bits */
598 /* a == 0 || a == 1 */
602 iter = set->mask & (GMQCC_HASHSET_PRIME0 * hash);
604 /* while (set->items[iter] != 0 && set->items[iter] != 1) */
605 while (!(set->items[iter] >> 1)) {
606 if (set->items[iter] == hash)
609 iter = set->mask & (iter + GMQCC_HASHSET_PRIME1);
613 set->items[iter] = hash;
618 static void util_hsupdate(hash_set_t *set) {
623 /* time to rehash? */
624 if ((float)set->total >= (size_t)((double)set->capacity * 0.85)) {
629 set->capacity = (size_t)(1 << set->bits);
630 set->mask = set->capacity - 1;
631 set->items = mem_a(set->capacity * sizeof(size_t));
634 /*assert(set->items);*/
637 * this shouldn't be slow? if so unroll it a little perhaps
638 * (shouldn't be though)
640 for (itr = 0; itr < end; itr++)
641 util_hsput(set, (void*)old[itr]);
648 * EXPOSED interface: all of these functions are exposed to the outside
649 * for use. The stuff above is static because it's the "internal" mechanics
650 * for syncronizing the set for updating, and putting data into the set.
652 int util_hsadd(hash_set_t *set, void *item) {
653 int run = util_hsput(set, item); /* inlined */
659 /* remove item in set */
660 int util_hsrem(hash_set_t *set, void *item) {
661 size_t hash = (size_t)item;
662 size_t iter = set->mask & (GMQCC_HASHSET_PRIME0 * hash);
664 while (set->items[iter]) {
665 if (set->items[iter] == hash) {
666 set->items[iter] = 1;
671 iter = set->mask & (iter + GMQCC_HASHSET_PRIME1);
677 /* check if item is set */
678 int util_hshas(hash_set_t *set, void *item) {
679 size_t hash = (size_t)item;
680 size_t iter = set->mask & (GMQCC_HASHSET_PRIME0 * hash);
682 while (set->items[iter]) {
683 if (set->items[iter] == hash)
686 iter = set->mask & (iter + GMQCC_HASHSET_PRIME1);
692 hash_set_t *util_hsnew(void) {
695 if (!(set = mem_a(sizeof(hash_set_t))))
700 set->capacity = (size_t)(1 << set->bits);
701 set->mask = set->capacity - 1;
702 set->items = mem_a(set->capacity * sizeof(size_t));
712 void util_hsdel(hash_set_t *set) {
720 #undef GMQCC_HASHSET_PRIME0
721 #undef GMQCC_HASHSET_PRIME1
725 * Portable implementation of vasprintf/asprintf. Assumes vsnprintf
726 * exists, otherwise compiler error.
728 * TODO: fix for MSVC ....
730 int util_vasprintf(char **dat, const char *fmt, va_list args) {
736 * For visuals tido _vsnprintf doesn't tell you the length of a
737 * formatted string if it overflows. However there is a MSVC
738 * intrinsic (which is documented wrong) called _vcsprintf which
739 * will return the required amount to allocate.
743 if ((len = _vscprintf(fmt, args)) < 0) {
748 tmp = mem_a(len + 1);
749 if ((ret = _vsnprintf(tmp, len+1, fmt, args)) != len) {
758 * For everything else we have a decent conformint vsnprintf that
759 * returns the number of bytes needed. We give it a try though on
760 * a short buffer, since efficently speaking, it could be nice to
761 * above a second vsnprintf call.
766 len = vsnprintf(buf, sizeof(buf), fmt, cpy);
769 if (len < (int)sizeof(buf)) {
770 *dat = util_strdup(buf);
774 /* not large enough ... */
775 tmp = mem_a(len + 1);
776 if ((ret = vsnprintf(tmp, len + 1, fmt, args)) != len) {
785 /* never reached ... */
788 int util_asprintf(char **ret, const char *fmt, ...) {
792 read = util_vasprintf(ret, fmt, args);
799 * Implementation of the Mersenne twister PRNG (pseudo random numer
800 * generator). Implementation of MT19937. Has a period of 2^19937-1
801 * which is a Mersenne Prime (hence the name).
803 * Implemented from specification and original paper:
804 * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/ARTICLES/mt.pdf
806 * This code is placed in the public domain by me personally
807 * (Dale Weiler, a.k.a graphitemaster).
811 #define MT_PERIOD 397
812 #define MT_SPACE (MT_SIZE - MT_PERIOD)
814 static uint32_t mt_state[MT_SIZE];
815 static size_t mt_index = 0;
817 static GMQCC_INLINE void mt_generate() {
819 * The loop has been unrolled here: the original paper and implemenation
820 * Called for the following code:
821 * for (register unsigned i = 0; i < MT_SIZE; ++i) {
822 * register uint32_t load;
823 * load = (0x80000000 & mt_state[i]) // most significant 32nd bit
824 * load |= (0x7FFFFFFF & mt_state[(i + 1) % MT_SIZE]) // least significant 31nd bit
826 * mt_state[i] = mt_state[(i + MT_PERIOD) % MT_SIZE] ^ (load >> 1);
828 * if (load & 1) mt_state[i] ^= 0x9908B0DF;
831 * This essentially is a waste: we have two modulus operations, and
832 * a branch that is executed every iteration from [0, MT_SIZE).
834 * Please see: http://www.quadibloc.com/crypto/co4814.htm for more
835 * information on how this clever trick works.
837 static const uint32_t matrix[2] = {
842 * This register gives up a little more speed by instructing the compiler
843 * to force these into CPU registers (they're counters for indexing mt_state
844 * which we can force the compiler to generate prefetch instructions for)
850 * Said loop has been unrolled for MT_SPACE (226 iterations), opposed
851 * to [0, MT_SIZE) (634 iterations).
853 for (i = 0; i < MT_SPACE; ++i) {
854 y = (0x80000000 & mt_state[i]) | (0x7FFFFFF & mt_state[i + 1]);
855 mt_state[i] = mt_state[i + MT_PERIOD] ^ (y >> 1) ^ matrix[y & 1];
857 i ++; /* loop unroll */
859 y = (0x80000000 & mt_state[i]) | (0x7FFFFFF & mt_state[i + 1]);
860 mt_state[i] = mt_state[i + MT_PERIOD] ^ (y >> 1) ^ matrix[y & 1];
864 * collapsing the walls unrolled (evenly dividing 396 [632-227 = 396
868 while (i < MT_SIZE - 1) {
870 * We expand this 11 times .. manually, no macros are required
871 * here. This all fits in the CPU cache.
873 y = (0x80000000 & mt_state[i]) | (0x7FFFFFFF & mt_state[i + 1]);
874 mt_state[i] = mt_state[i - MT_SPACE] ^ (y >> 1) ^ matrix[y & 1];
876 y = (0x80000000 & mt_state[i]) | (0x7FFFFFFF & mt_state[i + 1]);
877 mt_state[i] = mt_state[i - MT_SPACE] ^ (y >> 1) ^ matrix[y & 1];
879 y = (0x80000000 & mt_state[i]) | (0x7FFFFFFF & mt_state[i + 1]);
880 mt_state[i] = mt_state[i - MT_SPACE] ^ (y >> 1) ^ matrix[y & 1];
882 y = (0x80000000 & mt_state[i]) | (0x7FFFFFFF & mt_state[i + 1]);
883 mt_state[i] = mt_state[i - MT_SPACE] ^ (y >> 1) ^ matrix[y & 1];
885 y = (0x80000000 & mt_state[i]) | (0x7FFFFFFF & mt_state[i + 1]);
886 mt_state[i] = mt_state[i - MT_SPACE] ^ (y >> 1) ^ matrix[y & 1];
888 y = (0x80000000 & mt_state[i]) | (0x7FFFFFFF & mt_state[i + 1]);
889 mt_state[i] = mt_state[i - MT_SPACE] ^ (y >> 1) ^ matrix[y & 1];
891 y = (0x80000000 & mt_state[i]) | (0x7FFFFFFF & mt_state[i + 1]);
892 mt_state[i] = mt_state[i - MT_SPACE] ^ (y >> 1) ^ matrix[y & 1];
894 y = (0x80000000 & mt_state[i]) | (0x7FFFFFFF & mt_state[i + 1]);
895 mt_state[i] = mt_state[i - MT_SPACE] ^ (y >> 1) ^ matrix[y & 1];
897 y = (0x80000000 & mt_state[i]) | (0x7FFFFFFF & mt_state[i + 1]);
898 mt_state[i] = mt_state[i - MT_SPACE] ^ (y >> 1) ^ matrix[y & 1];
900 y = (0x80000000 & mt_state[i]) | (0x7FFFFFFF & mt_state[i + 1]);
901 mt_state[i] = mt_state[i - MT_SPACE] ^ (y >> 1) ^ matrix[y & 1];
903 y = (0x80000000 & mt_state[i]) | (0x7FFFFFFF & mt_state[i + 1]);
904 mt_state[i] = mt_state[i - MT_SPACE] ^ (y >> 1) ^ matrix[y & 1];
908 /* i = mt_state[623] */
909 y = (0x80000000 & mt_state[MT_SIZE - 1]) | (0x7FFFFFFF & mt_state[MT_SIZE - 1]);
910 mt_state[MT_SIZE - 1] = mt_state[MT_PERIOD - 1] ^ (y >> 1) ^ matrix[y & 1];
913 void util_seed(uint32_t value) {
915 * We seed the mt_state with a LCG (linear congruential generator)
916 * We're operating exactly on exactly m=32, so there is no need to
919 * The multipler of choice is 0x6C07865, also knows as the Borosh-
920 * Niederreiter multipler used for modulus 2^32. More can be read
921 * about this in Knuth's TAOCP Volume 2, page 106.
923 * If you don't own TAOCP something is wrong with you :-) .. so I
924 * also provided a link to the original paper by Borosh and
925 * Niederreiter. It's called "Optional Multipliers for PRNG by The
926 * Linear Congruential Method" (1983).
927 * http://en.wikipedia.org/wiki/Linear_congruential_generator
929 * From said page, it says the following:
930 * "A common Mersenne twister implementation, interestingly enough
931 * used an LCG to generate seed data."
934 * The data we're operating on is 32-bits for the mt_state array, so
935 * there is no masking required with 0xFFFFFFFF
940 for (i = 1; i < MT_SIZE; ++i)
941 mt_state[i] = 0x6C078965 * (mt_state[i - 1] ^ mt_state[i - 1] >> 30) + i;
944 uint32_t util_rand() {
948 * This is inlined with any sane compiler (I checked)
949 * for some reason though, SubC seems to be generating invalid
950 * code when it inlines this.
955 y = mt_state[mt_index];
957 /* Standard tempering */
958 y ^= y >> 11; /* +7 */
959 y ^= y << 7 & 0x9D2C5680; /* +4 */
960 y ^= y << 15 & 0xEFC60000; /* -4 */
961 y ^= y >> 18; /* -7 */
963 if(++mt_index == MT_SIZE)