+// avoid reusing the same buffer objects on consecutive frames
+#define R_BUFFERDATA_CYCLE 3
+
+typedef struct r_bufferdata_buffer_s
+{
+ struct r_bufferdata_buffer_s *purge; // older buffer to free on next frame
+ size_t size; // how much usable space
+ size_t current; // how much space in use
+ r_meshbuffer_t *buffer; // the buffer itself
+}
+r_bufferdata_buffer_t;
+
+static int r_bufferdata_cycle = 0; // incremented and wrapped each frame
+static r_bufferdata_buffer_t *r_bufferdata_buffer[R_BUFFERDATA_CYCLE][R_BUFFERDATA_COUNT];
+
+/// frees all dynamic buffers
+void R_BufferData_Reset(void)
+{
+ int cycle, type;
+ r_bufferdata_buffer_t **p, *mem;
+ for (cycle = 0;cycle < R_BUFFERDATA_CYCLE;cycle++)
+ {
+ for (type = 0;type < R_BUFFERDATA_COUNT;type++)
+ {
+ // free all buffers
+ p = &r_bufferdata_buffer[cycle][type];
+ while (*p)
+ {
+ mem = *p;
+ *p = (*p)->purge;
+ if (mem->buffer)
+ R_Mesh_DestroyMeshBuffer(mem->buffer);
+ Mem_Free(mem);
+ }
+ }
+ }
+}
+
+// resize buffer as needed (this actually makes a new one, the old one will be recycled next frame)
+static void R_BufferData_Resize(r_bufferdata_type_t type, qboolean mustgrow, size_t minsize)
+{
+ r_bufferdata_buffer_t *mem = r_bufferdata_buffer[r_bufferdata_cycle][type];
+ size_t size;
+ float newvalue = r_buffermegs[type].value;
+
+ // increase the cvar if we have to (but only if we already have a mem)
+ if (mustgrow && mem)
+ newvalue *= 2.0f;
+ newvalue = bound(0.25f, newvalue, 256.0f);
+ while (newvalue * 1024*1024 < minsize)
+ newvalue *= 2.0f;
+
+ // clamp the cvar to valid range
+ newvalue = bound(0.25f, newvalue, 256.0f);
+ if (r_buffermegs[type].value != newvalue)
+ Cvar_SetValueQuick(&r_buffermegs[type], newvalue);
+
+ // calculate size in bytes
+ size = (size_t)(newvalue * 1024*1024);
+ size = bound(131072, size, 256*1024*1024);
+
+ // allocate a new buffer if the size is different (purge old one later)
+ // or if we were told we must grow the buffer
+ if (!mem || mem->size != size || mustgrow)
+ {
+ mem = (r_bufferdata_buffer_t *)Mem_Alloc(r_main_mempool, sizeof(*mem));
+ mem->size = size;
+ mem->current = 0;
+ if (type == R_BUFFERDATA_VERTEX)
+ mem->buffer = R_Mesh_CreateMeshBuffer(NULL, mem->size, "dynamicbuffervertex", false, false, true, false);
+ else if (type == R_BUFFERDATA_INDEX16)
+ mem->buffer = R_Mesh_CreateMeshBuffer(NULL, mem->size, "dynamicbufferindex16", true, false, true, true);
+ else if (type == R_BUFFERDATA_INDEX32)
+ mem->buffer = R_Mesh_CreateMeshBuffer(NULL, mem->size, "dynamicbufferindex32", true, false, true, false);
+ else if (type == R_BUFFERDATA_UNIFORM)
+ mem->buffer = R_Mesh_CreateMeshBuffer(NULL, mem->size, "dynamicbufferuniform", false, true, true, false);
+ mem->purge = r_bufferdata_buffer[r_bufferdata_cycle][type];
+ r_bufferdata_buffer[r_bufferdata_cycle][type] = mem;
+ }
+}
+
+void R_BufferData_NewFrame(void)
+{
+ int type;
+ r_bufferdata_buffer_t **p, *mem;
+ // cycle to the next frame's buffers
+ r_bufferdata_cycle = (r_bufferdata_cycle + 1) % R_BUFFERDATA_CYCLE;
+ // if we ran out of space on the last time we used these buffers, free the old memory now
+ for (type = 0;type < R_BUFFERDATA_COUNT;type++)
+ {
+ if (r_bufferdata_buffer[r_bufferdata_cycle][type])
+ {
+ R_BufferData_Resize((r_bufferdata_type_t)type, false, 131072);
+ // free all but the head buffer, this is how we recycle obsolete
+ // buffers after they are no longer in use
+ p = &r_bufferdata_buffer[r_bufferdata_cycle][type]->purge;
+ while (*p)
+ {
+ mem = *p;
+ *p = (*p)->purge;
+ if (mem->buffer)
+ R_Mesh_DestroyMeshBuffer(mem->buffer);
+ Mem_Free(mem);
+ }
+ // reset the current offset
+ r_bufferdata_buffer[r_bufferdata_cycle][type]->current = 0;
+ }
+ }
+}
+
+r_meshbuffer_t *R_BufferData_Store(size_t datasize, const void *data, r_bufferdata_type_t type, int *returnbufferoffset)
+{
+ r_bufferdata_buffer_t *mem;
+ int offset = 0;
+ int padsize;
+
+ *returnbufferoffset = 0;
+
+ // align size to a byte boundary appropriate for the buffer type, this
+ // makes all allocations have aligned start offsets
+ if (type == R_BUFFERDATA_UNIFORM)
+ padsize = (datasize + r_uniformbufferalignment - 1) & ~(r_uniformbufferalignment - 1);
+ else
+ padsize = (datasize + 15) & ~15;
+
+ // if we ran out of space in this buffer we must allocate a new one
+ if (!r_bufferdata_buffer[r_bufferdata_cycle][type] || r_bufferdata_buffer[r_bufferdata_cycle][type]->current + padsize > r_bufferdata_buffer[r_bufferdata_cycle][type]->size)
+ R_BufferData_Resize(type, true, padsize);
+
+ // if the resize did not give us enough memory, fail
+ if (!r_bufferdata_buffer[r_bufferdata_cycle][type] || r_bufferdata_buffer[r_bufferdata_cycle][type]->current + padsize > r_bufferdata_buffer[r_bufferdata_cycle][type]->size)
+ Sys_Error("R_BufferData_Store: failed to create a new buffer of sufficient size\n");
+
+ mem = r_bufferdata_buffer[r_bufferdata_cycle][type];
+ offset = (int)mem->current;
+ mem->current += padsize;
+
+ // upload the data to the buffer at the chosen offset
+ if (offset == 0)
+ R_Mesh_UpdateMeshBuffer(mem->buffer, NULL, mem->size, false, 0);
+ R_Mesh_UpdateMeshBuffer(mem->buffer, data, datasize, true, offset);
+
+ // count the usage for stats
+ r_refdef.stats[r_stat_bufferdatacurrent_vertex + type] = max(r_refdef.stats[r_stat_bufferdatacurrent_vertex + type], (int)mem->current);
+ r_refdef.stats[r_stat_bufferdatasize_vertex + type] = max(r_refdef.stats[r_stat_bufferdatasize_vertex + type], (int)mem->size);
+
+ // return the buffer offset
+ *returnbufferoffset = offset;
+
+ return mem->buffer;
+}
+
+//==================================================================================
+