qboolean mem_bigendian = false;
void *mem_mutex = NULL;
+// divVerent: enables file backed malloc using mmap to conserve swap space (instead of malloc)
+#ifndef FILE_BACKED_MALLOC
+# define FILE_BACKED_MALLOC 0
+#endif
+
// LordHavoc: enables our own low-level allocator (instead of malloc)
-#define MEMCLUMPING 0
-#define MEMCLUMPING_FREECLUMPS 0
+#ifndef MEMCLUMPING
+# define MEMCLUMPING 0
+#endif
+#ifndef MEMCLUMPING_FREECLUMPS
+# define MEMCLUMPING_FREECLUMPS 0
+#endif
#if MEMCLUMPING
// smallest unit we care about is this many bytes
#define MEMUNIT 128
// try to do 32MB clumps, but overhead eats into this
-#define MEMWANTCLUMPSIZE (1<<27)
+#ifndef MEMWANTCLUMPSIZE
+# define MEMWANTCLUMPSIZE (1<<27)
+#endif
// give malloc padding so we can't waste most of a page at the end
#define MEMCLUMPSIZE (MEMWANTCLUMPSIZE - MEMWANTCLUMPSIZE/MEMUNIT/32 - 128)
#define MEMBITS (MEMCLUMPSIZE / MEMUNIT)
void Mem_PrintStats(void);
void Mem_PrintList(size_t minallocationsize);
+#if FILE_BACKED_MALLOC
+#include <stdlib.h>
+#include <sys/mman.h>
+typedef struct mmap_data_s
+{
+ size_t len;
+}
+mmap_data_t;
+static void *mmap_malloc(size_t size)
+{
+ char vabuf[MAX_OSPATH + 1];
+ char *tmpdir = getenv("TEMP");
+ mmap_data_t *data;
+ int fd;
+ size += sizeof(mmap_data_t); // waste block
+ dpsnprintf(vabuf, sizeof(vabuf), "%s/darkplaces.XXXXXX", tmpdir ? tmpdir : "/tmp");
+ fd = mkstemp(vabuf);
+ if(fd < 0)
+ return NULL;
+ ftruncate(fd, size);
+ data = (unsigned char *) mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd, 0);
+ close(fd);
+ unlink(vabuf);
+ if(!data)
+ return NULL;
+ data->len = size;
+ return (void *) (data + 1);
+}
+static void mmap_free(void *mem)
+{
+ mmap_data_t *data;
+ if(!mem)
+ return;
+ data = ((mmap_data_t *) mem) - 1;
+ munmap(data, data->len);
+}
+#define malloc mmap_malloc
+#define free mmap_free
+#endif
+
#if MEMCLUMPING != 2
// some platforms have a malloc that returns NULL but succeeds later
// (Windows growing its swapfile for example)
return NULL;
}
if (pool == NULL)
- Sys_Error("Mem_Alloc: pool == NULL (alloc at %s:%i)", filename, fileline);
+ {
+ if(olddata)
+ pool = ((memheader_t *)((unsigned char *) olddata - sizeof(memheader_t)))->pool;
+ else
+ Sys_Error("Mem_Alloc: pool == NULL (alloc at %s:%i)", filename, fileline);
+ }
if (mem_mutex)
Thread_LockMutex(mem_mutex);
if (developer_memory.integer)
memset(l, 0, sizeof(*l));
}
-// VorteX: hacked Mem_ExpandableArray_AllocRecord, it does allocate record at certain index
-void *Mem_ExpandableArray_AllocRecordAtIndex(memexpandablearray_t *l, size_t index)
-{
- size_t j;
- if (index >= l->numarrays)
- {
- if (l->numarrays == l->maxarrays)
- {
- memexpandablearray_array_t *oldarrays = l->arrays;
- l->maxarrays = max(l->maxarrays * 2, 128);
- l->arrays = (memexpandablearray_array_t*) Mem_Alloc(l->mempool, l->maxarrays * sizeof(*l->arrays));
- if (oldarrays)
- {
- memcpy(l->arrays, oldarrays, l->numarrays * sizeof(*l->arrays));
- Mem_Free(oldarrays);
- }
- }
- l->arrays[index].numflaggedrecords = 0;
- l->arrays[index].data = (unsigned char *) Mem_Alloc(l->mempool, (l->recordsize + 1) * l->numrecordsperarray);
- l->arrays[index].allocflags = l->arrays[index].data + l->recordsize * l->numrecordsperarray;
- l->numarrays++;
- }
- if (l->arrays[index].numflaggedrecords < l->numrecordsperarray)
- {
- for (j = 0;j < l->numrecordsperarray;j++)
- {
- if (!l->arrays[index].allocflags[j])
- {
- l->arrays[index].allocflags[j] = true;
- l->arrays[index].numflaggedrecords++;
- memset(l->arrays[index].data + l->recordsize * j, 0, l->recordsize);
- return (void *)(l->arrays[index].data + l->recordsize * j);
- }
- }
- }
- return NULL;
-}
-
void *Mem_ExpandableArray_AllocRecord(memexpandablearray_t *l)
{
size_t i, j;
u.s = 0x100;
mem_bigendian = u.b[0] != 0;
- if (Thread_HasThreads())
- mem_mutex = Thread_CreateMutex();
-
sentinel_seed = rand();
poolchain = NULL;
tempmempool = Mem_AllocPool("Temporary Memory", POOLFLAG_TEMP, NULL);
zonemempool = Mem_AllocPool("Zone", 0, NULL);
+
+ if (Thread_HasThreads())
+ mem_mutex = Thread_CreateMutex();
}
void Memory_Shutdown (void)