2 * Copyright (C) 2012, 2013
6 * Permission is hereby granted, free of charge, to any person obtaining a copy of
7 * this software and associated documentation files (the "Software"), to deal in
8 * the Software without restriction, including without limitation the rights to
9 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
10 * of the Software, and to permit persons to whom the Software is furnished to do
11 * so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * For the valgrind integration of our allocator. This allows us to have
32 * more `accurate` valgrind output for our allocator, and also secures the
33 * possible underflows (where one could obtain access to the redzone that
34 * represents info about that allocation).
37 # include <valgrind/valgrind.h>
38 # include <valgrind/memcheck.h>
40 # define VALGRIND_MALLOCLIKE_BLOCK(PTR, ALLOC_SIZE, REDZONE_SIZE, ZEROED)
41 # define VALGRIND_FREELIKE_BLOCK(PTR, REDZONE_SIZE)
42 # define VALGRIND_MAKE_MEM_DEFINED(PTR, REDZONE_SIZE)
43 # define VALGRIND_MAKE_MEM_NOACCESS(PTR, REDZONE_SIZE)
47 * GMQCC performs tons of allocations, constructions, and crazyness
48 * all around. When trying to optimizes systems, or just get fancy
49 * statistics out of the compiler, it's often printf mess. This file
50 * implements the statistics system of the compiler. I.E the allocator
51 * we use to track allocations, and other systems of interest.
55 typedef struct stat_mem_block_s {
59 struct stat_mem_block_s *next;
60 struct stat_mem_block_s *prev;
66 } stat_size_entry_t, **stat_size_table_t;
68 static uint64_t stat_mem_allocated = 0;
69 static uint64_t stat_mem_deallocated = 0;
70 static uint64_t stat_mem_allocated_total = 0;
71 static uint64_t stat_mem_deallocated_total = 0;
72 static uint64_t stat_mem_high = 0;
73 static uint64_t stat_mem_peak = 0;
74 static uint64_t stat_mem_strdups = 0;
75 static uint64_t stat_used_strdups = 0;
76 static uint64_t stat_used_vectors = 0;
77 static uint64_t stat_used_hashtables = 0;
78 static uint64_t stat_type_vectors = 0;
79 static uint64_t stat_type_hashtables = 0;
80 static stat_size_table_t stat_size_vectors = NULL;
81 static stat_size_table_t stat_size_hashtables = NULL;
82 static stat_mem_block_t *stat_mem_block_root = NULL;
85 * A tiny size_t key-value hashtbale for tracking vector and hashtable
86 * sizes. We can use it for other things too, if we need to. This is
87 * very TIGHT, and efficent in terms of space though.
89 static stat_size_table_t stat_size_new(void) {
90 return (stat_size_table_t)memset(
91 mem_a(sizeof(stat_size_entry_t*) * ST_SIZE),
92 0, ST_SIZE * sizeof(stat_size_entry_t*)
96 static void stat_size_del(stat_size_table_t table) {
98 for (; i < ST_SIZE; i++) if(table[i]) mem_d(table[i]);
102 static stat_size_entry_t *stat_size_get(stat_size_table_t table, size_t key) {
103 size_t hash = (key % ST_SIZE);
104 while (table[hash] && table[hash]->key != key)
105 hash = (hash + 1) % ST_SIZE;
108 static void stat_size_put(stat_size_table_t table, size_t key, size_t value) {
109 size_t hash = (key % ST_SIZE);
110 while (table[hash] && table[hash]->key != key)
111 hash = (hash + 1) % ST_SIZE;
112 table[hash] = (stat_size_entry_t*)mem_a(sizeof(stat_size_entry_t));
113 table[hash]->key = key;
114 table[hash]->value = value;
118 * A basic header of information wrapper allocator. Simply stores
119 * information as a header, returns the memory + 1 past it, can be
120 * retrieved again with - 1. Where type is stat_mem_block_t*.
122 void *stat_mem_allocate(size_t size, size_t line, const char *file) {
123 stat_mem_block_t *info = (stat_mem_block_t*)malloc(sizeof(stat_mem_block_t) + size);
124 void *data = (void*)(info + 1);
126 if(GMQCC_UNLIKELY(!info))
133 info->next = stat_mem_block_root;
135 /* likely since it only happens once */
136 if (GMQCC_LIKELY(stat_mem_block_root != NULL)) {
137 VALGRIND_MAKE_MEM_DEFINED(stat_mem_block_root, sizeof(stat_mem_block_t));
138 stat_mem_block_root->prev = info;
139 VALGRIND_MAKE_MEM_NOACCESS(stat_mem_block_root, sizeof(stat_mem_block_t));
142 stat_mem_block_root = info;
143 stat_mem_allocated += size;
144 stat_mem_high += size;
145 stat_mem_allocated_total ++;
147 if (stat_mem_high > stat_mem_peak)
148 stat_mem_peak = stat_mem_high;
150 VALGRIND_MALLOCLIKE_BLOCK(data, size, sizeof(stat_mem_block_t), 0);
154 void stat_mem_deallocate(void *ptr) {
155 stat_mem_block_t *info = NULL;
157 if (GMQCC_UNLIKELY(!ptr))
160 info = ((stat_mem_block_t*)ptr - 1);
163 * we need access to the redzone that represents the info block
166 VALGRIND_MAKE_MEM_DEFINED(info, sizeof(stat_mem_block_t));
168 stat_mem_deallocated += info->size;
169 stat_mem_high -= info->size;
170 stat_mem_deallocated_total ++;
173 /* just need access for a short period */
174 VALGRIND_MAKE_MEM_DEFINED(info->prev, sizeof(stat_mem_block_t));
175 info->prev->next = info->next;
176 /* don't need access anymore */
177 VALGRIND_MAKE_MEM_NOACCESS(info->prev, sizeof(stat_mem_block_t));
180 /* just need access for a short period */
181 VALGRIND_MAKE_MEM_DEFINED(info->next, sizeof(stat_mem_block_t));
182 info->next->prev = info->prev;
183 /* don't need access anymore */
184 VALGRIND_MAKE_MEM_NOACCESS(info->next, sizeof(stat_mem_block_t));
188 if (info == stat_mem_block_root)
189 stat_mem_block_root = info->next;
192 VALGRIND_MAKE_MEM_NOACCESS(info, sizeof(stat_mem_block_t));
193 VALGRIND_FREELIKE_BLOCK(ptr, sizeof(stat_mem_block_t));
196 void *stat_mem_reallocate(void *ptr, size_t size, size_t line, const char *file) {
197 stat_mem_block_t *oldinfo = NULL;
198 stat_mem_block_t *newinfo;
200 if (GMQCC_UNLIKELY(!ptr))
201 return stat_mem_allocate(size, line, file);
203 /* stay consistent with glibc */
204 if (GMQCC_UNLIKELY(!size)) {
205 stat_mem_deallocate(ptr);
209 oldinfo = ((stat_mem_block_t*)ptr - 1);
210 newinfo = ((stat_mem_block_t*)malloc(sizeof(stat_mem_block_t) + size));
212 if (GMQCC_UNLIKELY(!newinfo)) {
213 stat_mem_deallocate(ptr);
217 VALGRIND_MALLOCLIKE_BLOCK(newinfo + 1, size, sizeof(stat_mem_block_t), 0);
219 /* we need access to the old info redzone */
220 VALGRIND_MAKE_MEM_DEFINED(oldinfo, sizeof(stat_mem_block_t));
222 memcpy(newinfo+1, oldinfo+1, oldinfo->size);
225 /* just need access for a short period */
226 VALGRIND_MAKE_MEM_DEFINED(oldinfo->prev, sizeof(stat_mem_block_t));
227 oldinfo->prev->next = oldinfo->next;
228 /* don't need access anymore */
229 VALGRIND_MAKE_MEM_NOACCESS(oldinfo->prev, sizeof(stat_mem_block_t));
233 /* just need access for a short period */
234 VALGRIND_MAKE_MEM_DEFINED(oldinfo->next, sizeof(stat_mem_block_t));
235 oldinfo->next->prev = oldinfo->prev;
236 /* don't need access anymore */
237 VALGRIND_MAKE_MEM_NOACCESS(oldinfo->next, sizeof(stat_mem_block_t));
241 if (oldinfo == stat_mem_block_root)
242 stat_mem_block_root = oldinfo->next;
244 /* we need access to the redzone for the newinfo block */
245 VALGRIND_MAKE_MEM_DEFINED(newinfo, sizeof(stat_mem_block_t));
247 newinfo->line = line;
248 newinfo->size = size;
249 newinfo->file = file;
250 newinfo->prev = NULL;
251 newinfo->next = stat_mem_block_root;
254 * likely since the only time there is no root is when it's
255 * being initialized first.
257 if (GMQCC_LIKELY(stat_mem_block_root != NULL)) {
258 /* we need access to the root */
259 VALGRIND_MAKE_MEM_DEFINED(stat_mem_block_root, sizeof(stat_mem_block_t));
260 stat_mem_block_root->prev = newinfo;
262 VALGRIND_MAKE_MEM_NOACCESS(stat_mem_block_root, sizeof(stat_mem_block_t));
265 stat_mem_block_root = newinfo;
266 stat_mem_allocated -= oldinfo->size;
267 stat_mem_high -= oldinfo->size;
268 stat_mem_allocated += newinfo->size;
269 stat_mem_high += newinfo->size;
272 * we're finished with the redzones, lets kill the access
275 VALGRIND_MAKE_MEM_NOACCESS(newinfo, sizeof(stat_mem_block_t));
276 VALGRIND_MAKE_MEM_NOACCESS(oldinfo, sizeof(stat_mem_block_t));
278 if (stat_mem_high > stat_mem_peak)
279 stat_mem_peak = stat_mem_high;
282 VALGRIND_FREELIKE_BLOCK(ptr, sizeof(stat_mem_block_t));
287 * strdup does it's own malloc, we need to track malloc. We don't want
288 * to overwrite malloc though, infact, we can't really hook it at all
289 * without library specific assumptions. So we re implement strdup.
291 char *stat_mem_strdup(const char *src, size_t line, const char *file, bool empty) {
299 if (((!empty) ? len : true) && (ptr = (char*)stat_mem_allocate(len + 1, line, file))) {
300 memcpy(ptr, src, len);
304 stat_used_strdups ++;
305 stat_mem_strdups += len;
310 * The reallocate function for resizing vectors.
312 void _util_vec_grow(void **a, size_t i, size_t s) {
313 vector_t *d = vec_meta(*a);
315 stat_size_entry_t *e = NULL;
319 m = 2 * d->allocated + i;
320 p = mem_r(d, s * m + sizeof(vector_t));
323 p = mem_a(s * m + sizeof(vector_t));
324 ((vector_t*)p)->used = 0;
328 if (!stat_size_vectors)
329 stat_size_vectors = stat_size_new();
331 if ((e = stat_size_get(stat_size_vectors, s))) {
334 stat_size_put(stat_size_vectors, s, 1); /* start off with 1 */
338 *a = (vector_t*)p + 1;
339 vec_meta(*a)->allocated = m;
343 * Hash table for generic data, based on dynamic memory allocations
344 * all around. This is the internal interface, please look for
345 * EXPOSED INTERFACE comment below
347 typedef struct hash_node_t {
348 char *key; /* the key for this node in table */
349 void *value; /* pointer to the data as void* */
350 struct hash_node_t *next; /* next node (linked list) */
354 * This is a patched version of the Murmur2 hashing function to use
355 * a proper pre-mix and post-mix setup. Infact this is Murmur3 for
356 * the most part just reinvented.
358 * Murmur 2 contains an inner loop such as:
371 * The two u32s that form the key are the same value x (pulled from data)
372 * this premix stage will perform the same results for both values. Unrolled
373 * this produces just:
383 * This appears to be fine, except what happens when m == 1? well x
384 * cancels out entierly, leaving just:
389 * So all keys hash to the same value, but how often does m == 1?
390 * well, it turns out testing x for all possible values yeilds only
391 * 172,013,942 unique results instead of 2^32. So nearly ~4.6 bits
392 * are cancelled out on average!
394 * This means we have a 14.5% (rounded) chance of colliding more, which
395 * results in another bucket/chain for the hashtable.
397 * We fix it buy upgrading the pre and post mix ssystems to align with murmur
401 #define GMQCC_ROTL32(X, R) (((X) << (R)) | ((X) >> (32 - (R))))
402 GMQCC_INLINE size_t util_hthash(hash_table_t *ht, const char *key) {
403 const unsigned char *data = (const unsigned char *)key;
404 const size_t len = strlen(key);
405 const size_t block = len / 4;
406 const uint32_t mask1 = 0xCC9E2D51;
407 const uint32_t mask2 = 0x1B873593;
408 const uint32_t *blocks = (const uint32_t*)(data + block * 4);
409 const unsigned char *tail = (const unsigned char *)(data + block * 4);
413 uint32_t h = 0x1EF0 ^ len;
415 for (i = -((int)block); i; i++) {
418 k = GMQCC_ROTL32(k, 15);
421 h = GMQCC_ROTL32(h, 13);
422 h = h * 5 + 0xE6546B64;
434 k = GMQCC_ROTL32(k, 15);
446 return (size_t) (h % ht->size);
450 /* We keep the old for reference */
451 GMQCC_INLINE size_t util_hthash(hash_table_t *ht, const char *key) {
452 const uint32_t mix = 0x5BD1E995;
453 const uint32_t rot = 24;
454 size_t size = strlen(key);
455 uint32_t hash = 0x1EF0 /* LICRC TAB */ ^ size;
457 const unsigned char *data = (const unsigned char*)key;
460 alias = (data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24));
462 alias ^= alias >> rot;
473 case 3: hash ^= data[2] << 16;
474 case 2: hash ^= data[1] << 8;
475 case 1: hash ^= data[0];
483 return (size_t) (hash % ht->size);
487 static hash_node_t *_util_htnewpair(const char *key, void *value) {
489 if (!(node = (hash_node_t*)mem_a(sizeof(hash_node_t))))
492 if (!(node->key = util_strdupe(key))) {
504 * EXPOSED INTERFACE for the hashtable implementation
505 * util_htnew(size) -- to make a new hashtable
506 * util_htset(table, key, value, sizeof(value)) -- to set something in the table
507 * util_htget(table, key) -- to get something from the table
508 * util_htdel(table) -- to delete the table
510 hash_table_t *util_htnew(size_t size) {
511 hash_table_t *hashtable = NULL;
512 stat_size_entry_t *find = NULL;
517 if (!stat_size_hashtables)
518 stat_size_hashtables = stat_size_new();
520 if (!(hashtable = (hash_table_t*)mem_a(sizeof(hash_table_t))))
523 if (!(hashtable->table = (hash_node_t**)mem_a(sizeof(hash_node_t*) * size))) {
528 if ((find = stat_size_get(stat_size_hashtables, size)))
531 stat_type_hashtables++;
532 stat_size_put(stat_size_hashtables, size, 1);
535 hashtable->size = size;
536 memset(hashtable->table, 0, sizeof(hash_node_t*) * size);
538 stat_used_hashtables++;
542 void util_htseth(hash_table_t *ht, const char *key, size_t bin, void *value) {
543 hash_node_t *newnode = NULL;
544 hash_node_t *next = NULL;
545 hash_node_t *last = NULL;
547 next = ht->table[bin];
549 while (next && next->key && strcmp(key, next->key) > 0)
550 last = next, next = next->next;
552 /* already in table, do a replace */
553 if (next && next->key && strcmp(key, next->key) == 0) {
556 /* not found, grow a pair man :P */
557 newnode = _util_htnewpair(key, value);
558 if (next == ht->table[bin]) {
559 newnode->next = next;
560 ht->table[bin] = newnode;
562 last->next = newnode;
564 newnode->next = next;
565 last->next = newnode;
570 void util_htset(hash_table_t *ht, const char *key, void *value) {
571 util_htseth(ht, key, util_hthash(ht, key), value);
574 void *util_htgeth(hash_table_t *ht, const char *key, size_t bin) {
575 hash_node_t *pair = ht->table[bin];
577 while (pair && pair->key && strcmp(key, pair->key) > 0)
580 if (!pair || !pair->key || strcmp(key, pair->key) != 0)
586 void *util_htget(hash_table_t *ht, const char *key) {
587 return util_htgeth(ht, key, util_hthash(ht, key));
590 void *code_util_str_htgeth(hash_table_t *ht, const char *key, size_t bin);
591 void *code_util_str_htgeth(hash_table_t *ht, const char *key, size_t bin) {
596 keylen = strlen(key);
598 pair = ht->table[bin];
599 while (pair && pair->key) {
600 len = strlen(pair->key);
606 cmp = strcmp(key, pair->key);
614 cmp = strcmp(key, pair->key + len - keylen);
616 uintptr_t up = (uintptr_t)pair->value;
626 * Free all allocated data in a hashtable, this is quite the amount
629 void util_htrem(hash_table_t *ht, void (*callback)(void *data)) {
632 for (; i < ht->size; ++i) {
633 hash_node_t *n = ht->table[i];
653 void util_htrmh(hash_table_t *ht, const char *key, size_t bin, void (*cb)(void*)) {
654 hash_node_t **pair = &ht->table[bin];
657 while (*pair && (*pair)->key && strcmp(key, (*pair)->key) > 0)
658 pair = &(*pair)->next;
661 if (!tmp || !tmp->key || strcmp(key, tmp->key) != 0)
672 void util_htrm(hash_table_t *ht, const char *key, void (*cb)(void*)) {
673 util_htrmh(ht, key, util_hthash(ht, key), cb);
676 void util_htdel(hash_table_t *ht) {
677 util_htrem(ht, NULL);
681 * The following functions below implement printing / dumping of statistical
684 static void stat_dump_mem_contents(stat_mem_block_t *memory, uint16_t cols) {
686 for (i = 0; i < memory->size + ((memory->size % cols) ? (cols - memory->size % cols) : 0); i++) {
687 if (i % cols == 0) con_out(" 0x%06X: ", i);
688 if (i < memory->size) con_out("%02X " , 0xFF & ((unsigned char*)(memory + 1))[i]);
691 if ((uint16_t)(i % cols) == (cols - 1)) {
692 for (j = i - (cols - 1); j <= i; j++) {
696 : (util_isprint(((unsigned char*)(memory + 1))[j]))
697 ? 0xFF & ((unsigned char*)(memory + 1)) [j]
706 static void stat_dump_mem_leaks(void) {
707 stat_mem_block_t *info;
708 /* we need access to the root for this */
709 VALGRIND_MAKE_MEM_DEFINED(stat_mem_block_root, sizeof(stat_mem_block_t));
710 for (info = stat_mem_block_root; info; info = info->next) {
711 /* we need access to the block */
712 VALGRIND_MAKE_MEM_DEFINED(info, sizeof(stat_mem_block_t));
713 con_out("lost: %u (bytes) at %s:%u\n",
719 stat_dump_mem_contents(info, OPTS_OPTION_U16(OPTION_MEMDUMPCOLS));
722 * we're finished with the access, the redzone should be marked
723 * inaccesible so that invalid read/writes that could 'step-into'
724 * those redzones will show up as invalid read/writes in valgrind.
726 VALGRIND_MAKE_MEM_NOACCESS(info, sizeof(stat_mem_block_t));
728 VALGRIND_MAKE_MEM_NOACCESS(stat_mem_block_root, sizeof(stat_mem_block_t));
731 static void stat_dump_mem_info(void) {
732 con_out("Memory Information:\n\
733 Total allocations: %llu\n\
734 Total deallocations: %llu\n\
735 Total allocated: %f (MB)\n\
736 Total deallocated: %f (MB)\n\
737 Total peak memory: %f (MB)\n\
738 Total leaked memory: %f (MB) in %llu allocations\n",
739 stat_mem_allocated_total,
740 stat_mem_deallocated_total,
741 (float)(stat_mem_allocated) / 1048576.0f,
742 (float)(stat_mem_deallocated) / 1048576.0f,
743 (float)(stat_mem_peak) / 1048576.0f,
744 (float)(stat_mem_allocated - stat_mem_deallocated) / 1048576.0f,
745 stat_mem_allocated_total - stat_mem_deallocated_total
749 static void stat_dump_stats_table(stat_size_table_t table, const char *string, uint64_t *size) {
755 for (i = 0, j = 1; i < ST_SIZE; i++) {
756 stat_size_entry_t *entry;
758 if (!(entry = table[i]))
761 con_out(string, (unsigned)j, (unsigned)entry->key, (unsigned)entry->value);
765 *size += entry->key * entry->value;
770 if (OPTS_OPTION_BOOL(OPTION_MEMCHK) ||
771 OPTS_OPTION_BOOL(OPTION_STATISTICS)) {
774 con_out("Memory Statistics:\n\
775 Total vectors allocated: %llu\n\
776 Total string duplicates: %llu\n\
777 Total string duplicate memory: %f (MB)\n\
778 Total hashtables allocated: %llu\n\
779 Total unique vector sizes: %llu\n",
782 (float)(stat_mem_strdups) / 1048576.0f,
783 stat_used_hashtables,
787 stat_dump_stats_table (
789 " %2u| # of %5u byte vectors: %u\n",
794 " Total unique hashtable sizes: %llu\n",
798 stat_dump_stats_table (
799 stat_size_hashtables,
800 " %2u| # of %5u element hashtables: %u\n",
805 " Total vector memory: %f (MB)\n\n",
806 (float)(mem) / 1048576.0f
810 if (stat_size_vectors)
811 stat_size_del(stat_size_vectors);
812 if (stat_size_hashtables)
813 stat_size_del(stat_size_hashtables);
815 if (OPTS_OPTION_BOOL(OPTION_DEBUG) ||
816 OPTS_OPTION_BOOL(OPTION_MEMCHK))
817 stat_dump_mem_info();
819 if (OPTS_OPTION_BOOL(OPTION_DEBUG))
820 stat_dump_mem_leaks();