Removed extras directory
This commit is contained in:
parent
7743af853a
commit
043fa4cba7
5 changed files with 109 additions and 77 deletions
|
@ -228,7 +228,7 @@ endif
|
|||
# Define include paths for required headers: INCLUDE_PATHS
|
||||
# NOTE: Some external/extras libraries could be required (stb, easings...)
|
||||
#------------------------------------------------------------------------------------------------
|
||||
INCLUDE_PATHS = -I. -I$(RAYLIB_PATH)/src -I$(RAYLIB_PATH)/src/external -I$(RAYLIB_PATH)/src/extras
|
||||
INCLUDE_PATHS = -I. -I$(RAYLIB_PATH)/src -I$(RAYLIB_PATH)/src/external
|
||||
|
||||
# Define additional directories containing required header files
|
||||
ifeq ($(PLATFORM),PLATFORM_DESKTOP)
|
||||
|
|
|
@ -239,7 +239,7 @@ endif
|
|||
|
||||
# Define include paths for required headers
|
||||
# NOTE: Some external/extras libraries could be required (stb, easings...)
|
||||
INCLUDE_PATHS = -I. -I$(RAYLIB_PATH)/src -I$(RAYLIB_PATH)/src/external -I$(RAYLIB_PATH)/src/extras
|
||||
INCLUDE_PATHS = -I. -I$(RAYLIB_PATH)/src -I$(RAYLIB_PATH)/src/external
|
||||
|
||||
# Define additional directories containing required header files
|
||||
ifeq ($(PLATFORM),PLATFORM_RPI)
|
||||
|
|
|
@ -19,8 +19,8 @@ parse:
|
|||
./raylib_parser -i ../src/raylib.h -o raylib_api.$(EXTENSION) -f $(FORMAT) -d RLAPI
|
||||
./raylib_parser -i ../src/raymath.h -o raymath_api.$(EXTENSION) -f $(FORMAT) -d RMAPI
|
||||
./raylib_parser -i ../src/rlgl.h -o rlgl_api.$(EXTENSION) -f $(FORMAT) -d RLAPI -t "RLGL IMPLEMENTATION"
|
||||
./raylib_parser -i ../src/extras/easings.h -o easings_api.$(EXTENSION) -f $(FORMAT) -d EASEDEF
|
||||
./raylib_parser -i ../src/extras/rmem.h -o rmem_api.$(EXTENSION) -f $(FORMAT) -d RMEMAPI -t "RMEM IMPLEMENTATION"
|
||||
./raylib_parser -i ../src/easings.h -o easings_api.$(EXTENSION) -f $(FORMAT) -d EASEDEF
|
||||
./raylib_parser -i ../src/rmem.h -o rmem_api.$(EXTENSION) -f $(FORMAT) -d RMEMAPI -t "RMEM IMPLEMENTATION"
|
||||
./raylib_parser -i ../physac.h -o physac_api.$(EXTENSION) -f $(FORMAT) -d PHYSACDEF -t "PHYSAC IMPLEMENTATION"
|
||||
./raylib_parser -i ../raygui.h -o raygui_api.$(EXTENSION) -f $(FORMAT) -d RAYGUIAPI -t "RAYGUI IMPLEMENTATION"
|
||||
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
/**********************************************************************************************
|
||||
*
|
||||
* rmem - raylib memory pool and objects pool
|
||||
* rmem v1.3 - raylib memory pool and objects pool
|
||||
*
|
||||
* A quick, efficient, and minimal free list and arena-based allocator
|
||||
*
|
||||
* PURPOSE:
|
||||
* - A quicker, efficient memory allocator alternative to 'malloc' and friends.
|
||||
* - Reduce the possibilities of memory leaks for beginner developers using Raylib.
|
||||
* - A quicker, efficient memory allocator alternative to 'malloc()' and friends.
|
||||
* - Reduce the possibilities of memory leaks for beginner developers using raylib.
|
||||
* - Being able to flexibly range check memory if necessary.
|
||||
*
|
||||
* CONFIGURATION:
|
||||
|
@ -16,6 +16,17 @@
|
|||
* If not defined, the library is in header only mode and can be included in other headers
|
||||
* or source files without problems. But only ONE file should hold the implementation.
|
||||
*
|
||||
* CHANGELOG:
|
||||
*
|
||||
* v1.0: First version
|
||||
* v1.1: Bug patches for the mempool and addition of object pool
|
||||
* v1.2: Addition of bidirectional arena
|
||||
* v1.3: Several changes:
|
||||
* Pptimizations of allocators
|
||||
* Renamed 'Stack' to 'Arena'
|
||||
* Replaced certain define constants with an anonymous enum
|
||||
* Refactored MemPool to no longer require active or deferred defragging
|
||||
*
|
||||
*
|
||||
* LICENSE: zlib/libpng
|
||||
*
|
||||
|
@ -61,7 +72,13 @@
|
|||
// Types and Structures Definition
|
||||
//----------------------------------------------------------------------------------
|
||||
|
||||
// Memory Pool
|
||||
enum {
|
||||
MEMPOOL_BUCKET_SIZE = 8,
|
||||
MEMPOOL_BUCKET_BITS = (sizeof(uintptr_t) >> 1) + 1,
|
||||
MEM_SPLIT_THRESHOLD = sizeof(uintptr_t) * 4
|
||||
};
|
||||
|
||||
// Memory pool node
|
||||
typedef struct MemNode MemNode;
|
||||
struct MemNode {
|
||||
size_t size;
|
||||
|
@ -74,33 +91,25 @@ typedef struct AllocList {
|
|||
size_t len;
|
||||
} AllocList;
|
||||
|
||||
// Arena allocator.
|
||||
// Arena allocator
|
||||
typedef struct Arena {
|
||||
uintptr_t mem, offs;
|
||||
size_t size;
|
||||
} Arena;
|
||||
|
||||
|
||||
enum {
|
||||
MEMPOOL_BUCKET_SIZE = 8,
|
||||
MEMPOOL_BUCKET_BITS = (sizeof(uintptr_t) >> 1) + 1,
|
||||
MEM_SPLIT_THRESHOLD = sizeof(uintptr_t) * 4
|
||||
};
|
||||
|
||||
// Memory pool
|
||||
typedef struct MemPool {
|
||||
AllocList large, buckets[MEMPOOL_BUCKET_SIZE];
|
||||
Arena arena;
|
||||
} MemPool;
|
||||
|
||||
|
||||
// Object Pool
|
||||
// Object pool
|
||||
typedef struct ObjPool {
|
||||
uintptr_t mem, offs;
|
||||
size_t objSize, freeBlocks, memSize;
|
||||
} ObjPool;
|
||||
|
||||
|
||||
// Double-Ended Stack aka Deque
|
||||
// Double-ended stack (aka Deque)
|
||||
typedef struct BiStack {
|
||||
uintptr_t mem, front, back;
|
||||
size_t size;
|
||||
|
@ -166,9 +175,8 @@ RMEMAPI intptr_t BiStackMargins(BiStack destack);
|
|||
|
||||
#if defined(RMEM_IMPLEMENTATION)
|
||||
|
||||
#include <stdio.h> // Required for:
|
||||
#include <stdlib.h> // Required for:
|
||||
#include <string.h> // Required for:
|
||||
#include <stdlib.h> // Required for: malloc(), calloc(), free()
|
||||
#include <string.h> // Required for: memset(), memcpy(), memmove()
|
||||
|
||||
//----------------------------------------------------------------------------------
|
||||
// Defines and Macros
|
||||
|
@ -201,18 +209,21 @@ static MemNode *__SplitMemNode(MemNode *const node, const size_t bytes)
|
|||
MemNode *const r = ( MemNode* )(n + (node->size - bytes));
|
||||
node->size -= bytes;
|
||||
r->size = bytes;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void __InsertMemNodeBefore(AllocList *const list, MemNode *const insert, MemNode *const curr)
|
||||
{
|
||||
insert->next = curr;
|
||||
|
||||
if (curr->prev==NULL) list->head = insert;
|
||||
else
|
||||
{
|
||||
insert->prev = curr->prev;
|
||||
curr->prev->next = insert;
|
||||
}
|
||||
|
||||
curr->prev = insert;
|
||||
}
|
||||
|
||||
|
@ -220,10 +231,9 @@ static void __ReplaceMemNode(MemNode *const old, MemNode *const replace)
|
|||
{
|
||||
replace->prev = old->prev;
|
||||
replace->next = old->next;
|
||||
if( old->prev != NULL )
|
||||
old->prev->next = replace;
|
||||
if( old->next != NULL )
|
||||
old->next->prev = replace;
|
||||
|
||||
if (old->prev != NULL) old->prev->next = replace;
|
||||
if (old->next != NULL) old->next->prev = replace;
|
||||
}
|
||||
|
||||
|
||||
|
@ -244,7 +254,9 @@ static MemNode *__RemoveMemNode(AllocList *const list, MemNode *const node)
|
|||
if (list->tail != NULL) list->tail->next = NULL;
|
||||
else list->head = NULL;
|
||||
}
|
||||
|
||||
list->len--;
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
|
@ -253,10 +265,12 @@ static MemNode *__FindMemNode(AllocList *const list, const size_t bytes)
|
|||
for (MemNode *node = list->head; node != NULL; node = node->next)
|
||||
{
|
||||
if (node->size < bytes) continue;
|
||||
// close in size - reduce fragmentation by not splitting.
|
||||
|
||||
// Close in size - reduce fragmentation by not splitting
|
||||
else if (node->size <= bytes + MEM_SPLIT_THRESHOLD) return __RemoveMemNode(list, node);
|
||||
else return __SplitMemNode(node, bytes);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -276,24 +290,29 @@ static void __InsertMemNode(MemPool *const mempool, AllocList *const list, MemNo
|
|||
mempool->arena.offs += iter->size;
|
||||
__RemoveMemNode(list, iter);
|
||||
iter = list->head;
|
||||
if (iter == NULL) {
|
||||
|
||||
if (iter == NULL)
|
||||
{
|
||||
list->head = node;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const uintptr_t inode = (uintptr_t)node;
|
||||
const uintptr_t iiter = (uintptr_t)iter;
|
||||
const uintptr_t iter_end = iiter + iter->size;
|
||||
const uintptr_t node_end = inode + node->size;
|
||||
|
||||
if (iter == node) return;
|
||||
else if (iter < node)
|
||||
{
|
||||
// node was coalesced prior.
|
||||
if (iter_end > inode) return;
|
||||
else if (iter_end==inode && !is_bucket)
|
||||
else if ((iter_end == inode) && !is_bucket)
|
||||
{
|
||||
// if we can coalesce, do so.
|
||||
iter->size += node->size;
|
||||
|
||||
return;
|
||||
}
|
||||
else if (iter->next == NULL)
|
||||
|
@ -302,6 +321,7 @@ static void __InsertMemNode(MemPool *const mempool, AllocList *const list, MemNo
|
|||
iter->next = node;
|
||||
node->prev = iter;
|
||||
list->len++;
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -309,7 +329,7 @@ static void __InsertMemNode(MemPool *const mempool, AllocList *const list, MemNo
|
|||
{
|
||||
// Address sort, lowest to highest aka ascending order.
|
||||
if (iiter < node_end) return;
|
||||
else if (iter==list->head && !is_bucket)
|
||||
else if ((iter == list->head) && !is_bucket)
|
||||
{
|
||||
if (iter_end == inode) iter->size += node->size;
|
||||
else if (node_end == iiter)
|
||||
|
@ -327,9 +347,10 @@ static void __InsertMemNode(MemPool *const mempool, AllocList *const list, MemNo
|
|||
list->head = node;
|
||||
list->len++;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
else if (iter_end==inode && !is_bucket)
|
||||
else if ((iter_end == inode) && !is_bucket)
|
||||
{
|
||||
// if we can coalesce, do so.
|
||||
iter->size += node->size;
|
||||
|
@ -359,12 +380,14 @@ MemPool CreateMemPool(const size_t size)
|
|||
{
|
||||
// Align the mempool size to at least the size of an alloc node.
|
||||
uint8_t *const restrict buf = malloc(size*sizeof *buf);
|
||||
|
||||
if (buf==NULL) return mempool;
|
||||
else
|
||||
{
|
||||
mempool.arena.size = size;
|
||||
mempool.arena.mem = (uintptr_t)buf;
|
||||
mempool.arena.offs = mempool.arena.mem + mempool.arena.size;
|
||||
|
||||
return mempool;
|
||||
}
|
||||
}
|
||||
|
@ -373,12 +396,14 @@ MemPool CreateMemPool(const size_t size)
|
|||
MemPool CreateMemPoolFromBuffer(void *const restrict buf, const size_t size)
|
||||
{
|
||||
MemPool mempool = { 0 };
|
||||
|
||||
if ((size == 0) || (buf == NULL) || (size <= sizeof(MemNode))) return mempool;
|
||||
else
|
||||
{
|
||||
mempool.arena.size = size;
|
||||
mempool.arena.mem = (uintptr_t)buf;
|
||||
mempool.arena.offs = mempool.arena.mem + mempool.arena.size;
|
||||
|
||||
return mempool;
|
||||
}
|
||||
}
|
||||
|
@ -441,6 +466,7 @@ void *MemPoolAlloc(MemPool *const mempool, const size_t size)
|
|||
// --------------
|
||||
new_mem->next = new_mem->prev = NULL;
|
||||
uint8_t *const restrict final_mem = (uint8_t *)new_mem + sizeof *new_mem;
|
||||
|
||||
return memset(final_mem, 0, new_mem->size - sizeof *new_mem);
|
||||
}
|
||||
}
|
||||
|
@ -448,7 +474,7 @@ void *MemPoolAlloc(MemPool *const mempool, const size_t size)
|
|||
void *MemPoolRealloc(MemPool *const restrict mempool, void *const ptr, const size_t size)
|
||||
{
|
||||
if (size > mempool->arena.size) return NULL;
|
||||
// NULL ptr should make this work like regular Allocation.
|
||||
// NULL ptr should make this work like regular Allocation
|
||||
else if (ptr == NULL) return MemPoolAlloc(mempool, size);
|
||||
else if ((uintptr_t)ptr - sizeof(MemNode) < mempool->arena.mem) return NULL;
|
||||
else
|
||||
|
@ -456,12 +482,14 @@ void *MemPoolRealloc(MemPool *const restrict mempool, void *const ptr, const siz
|
|||
MemNode *const node = (MemNode *)((uint8_t *)ptr - sizeof *node);
|
||||
const size_t NODE_SIZE = sizeof *node;
|
||||
uint8_t *const resized_block = MemPoolAlloc(mempool, size);
|
||||
|
||||
if (resized_block == NULL) return NULL;
|
||||
else
|
||||
{
|
||||
MemNode *const resized = (MemNode *)(resized_block - sizeof *resized);
|
||||
memmove(resized_block, ptr, (node->size > resized->size)? (resized->size - NODE_SIZE) : (node->size - NODE_SIZE));
|
||||
MemPoolFree(mempool, ptr);
|
||||
|
||||
return resized_block;
|
||||
}
|
||||
}
|
||||
|
@ -470,6 +498,7 @@ void *MemPoolRealloc(MemPool *const restrict mempool, void *const ptr, const siz
|
|||
void MemPoolFree(MemPool *const restrict mempool, void *const ptr)
|
||||
{
|
||||
const uintptr_t p = (uintptr_t)ptr;
|
||||
|
||||
if ((ptr == NULL) || (p - sizeof(MemNode) < mempool->arena.mem)) return;
|
||||
else
|
||||
{
|
||||
|
@ -522,11 +551,13 @@ void MemPoolReset(MemPool *const mempool)
|
|||
{
|
||||
mempool->large.head = mempool->large.tail = NULL;
|
||||
mempool->large.len = 0;
|
||||
|
||||
for (size_t i = 0; i < MEMPOOL_BUCKET_SIZE; i++)
|
||||
{
|
||||
mempool->buckets[i].head = mempool->buckets[i].tail = NULL;
|
||||
mempool->buckets[i].len = 0;
|
||||
}
|
||||
|
||||
mempool->arena.offs = mempool->arena.mem + mempool->arena.size;
|
||||
}
|
||||
|
||||
|
@ -537,11 +568,13 @@ void MemPoolReset(MemPool *const mempool)
|
|||
ObjPool CreateObjPool(const size_t objsize, const size_t len)
|
||||
{
|
||||
ObjPool objpool = { 0 };
|
||||
|
||||
if ((len == 0) || (objsize == 0)) return objpool;
|
||||
else
|
||||
{
|
||||
const size_t aligned_size = __AlignSize(objsize, sizeof(size_t));
|
||||
uint8_t *const restrict buf = calloc(len, aligned_size);
|
||||
|
||||
if (buf == NULL) return objpool;
|
||||
objpool.objSize = aligned_size;
|
||||
objpool.memSize = objpool.freeBlocks = len;
|
||||
|
@ -562,8 +595,9 @@ ObjPool CreateObjPoolFromBuffer(void *const restrict buf, const size_t objsize,
|
|||
{
|
||||
ObjPool objpool = { 0 };
|
||||
|
||||
// If the object size isn't large enough to align to a size_t, then we can't use it.
|
||||
// If the object size isn't large enough to align to a size_t, then we can't use it
|
||||
const size_t aligned_size = __AlignSize(objsize, sizeof(size_t));
|
||||
|
||||
if ((buf == NULL) || (len == 0) || (objsize < sizeof(size_t)) || (objsize*len != aligned_size*len)) return objpool;
|
||||
else
|
||||
{
|
||||
|
@ -589,6 +623,7 @@ void DestroyObjPool(ObjPool *const restrict objpool)
|
|||
{
|
||||
void *const restrict ptr = (void *)objpool->mem;
|
||||
free(ptr);
|
||||
|
||||
*objpool = (ObjPool){ 0 };
|
||||
}
|
||||
}
|
||||
|
@ -603,9 +638,10 @@ void *ObjPoolAlloc(ObjPool *const objpool)
|
|||
size_t *const restrict block = (size_t *)objpool->offs;
|
||||
objpool->freeBlocks--;
|
||||
|
||||
// after allocating, we set head to the address of the index that *Head holds.
|
||||
// After allocating, we set head to the address of the index that *Head holds.
|
||||
// Head = &pool[*Head * pool.objsize];
|
||||
objpool->offs = (objpool->freeBlocks != 0)? objpool->mem + (*block*objpool->objSize) : 0;
|
||||
|
||||
return memset(block, 0, objpool->objSize);
|
||||
}
|
||||
else return NULL;
|
||||
|
@ -614,6 +650,7 @@ void *ObjPoolAlloc(ObjPool *const objpool)
|
|||
void ObjPoolFree(ObjPool *const restrict objpool, void *const ptr)
|
||||
{
|
||||
uintptr_t block = (uintptr_t)ptr;
|
||||
|
||||
if ((ptr == NULL) || (block < objpool->mem) || (block > objpool->mem + objpool->memSize*objpool->objSize)) return;
|
||||
else
|
||||
{
|
||||
|
@ -641,9 +678,11 @@ void ObjPoolCleanUp(ObjPool *const restrict objpool, void **const restrict ptrre
|
|||
//----------------------------------------------------------------------------------
|
||||
// Module Functions Definition - Double-Ended Stack
|
||||
//----------------------------------------------------------------------------------
|
||||
|
||||
BiStack CreateBiStack(const size_t len)
|
||||
{
|
||||
BiStack destack = { 0 };
|
||||
|
||||
if (len == 0) return destack;
|
||||
|
||||
uint8_t *const buf = malloc(len*sizeof *buf);
|
||||
|
@ -652,18 +691,21 @@ BiStack CreateBiStack(const size_t len)
|
|||
destack.mem = (uintptr_t)buf;
|
||||
destack.front = destack.mem;
|
||||
destack.back = destack.mem + len;
|
||||
|
||||
return destack;
|
||||
}
|
||||
|
||||
BiStack CreateBiStackFromBuffer(void *const buf, const size_t len)
|
||||
{
|
||||
BiStack destack = { 0 };
|
||||
if (len == 0 || buf == NULL) return destack;
|
||||
|
||||
if ((len == 0) || (buf == NULL)) return destack;
|
||||
else
|
||||
{
|
||||
destack.size = len;
|
||||
destack.mem = destack.front = (uintptr_t)buf;
|
||||
destack.back = destack.mem + len;
|
||||
|
||||
return destack;
|
||||
}
|
||||
}
|
||||
|
@ -691,6 +733,7 @@ void *BiStackAllocFront(BiStack *const restrict destack, const size_t len)
|
|||
{
|
||||
uint8_t *const restrict ptr = (uint8_t *)destack->front;
|
||||
destack->front += ALIGNED_LEN;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
}
|
||||
|
@ -708,6 +751,7 @@ void *BiStackAllocBack(BiStack *const restrict destack, const size_t len)
|
|||
{
|
||||
destack->back -= ALIGNED_LEN;
|
||||
uint8_t *const restrict ptr = (uint8_t *)destack->back;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
}
|
||||
|
@ -737,15 +781,3 @@ inline intptr_t BiStackMargins(const BiStack destack)
|
|||
}
|
||||
|
||||
#endif // RMEM_IMPLEMENTATION
|
||||
|
||||
/*******
|
||||
* Changelog
|
||||
* v1.0: First Creation.
|
||||
* v1.1: bug patches for the mempool and addition of object pool.
|
||||
* v1.2: addition of bidirectional arena.
|
||||
* v1.3:
|
||||
* optimizations of allocators.
|
||||
* renamed 'Stack' to 'Arena'.
|
||||
* replaced certain define constants with an anonymous enum.
|
||||
* refactored MemPool to no longer require active or deferred defragging.
|
||||
********/
|
Loading…
Add table
Add a link
Reference in a new issue