Merge branch 'yalloc' into 'master'
Add upstream yalloc allocator See merge request simulant/GLdc!65
This commit is contained in:
commit
8ee28fdaac
|
@ -374,6 +374,10 @@ void _glKosThrowError(GLenum error, const char *function);
|
|||
void _glKosPrintError();
|
||||
GLubyte _glKosHasError();
|
||||
|
||||
GLuint _glFreeTextureMemory();
|
||||
GLuint _glUsedTextureMemory();
|
||||
GLuint _glFreeContiguousTextureMemory();
|
||||
|
||||
#define PVR_VERTEX_BUF_SIZE 2560 * 256
|
||||
#define MAX_TEXTURE_UNITS 2
|
||||
#define MAX_LIGHTS 8
|
||||
|
|
12
GL/state.c
12
GL/state.c
|
@ -696,6 +696,16 @@ void APIENTRY glGetIntegerv(GLenum pname, GLint *params) {
|
|||
params[i] = COMPRESSED_FORMATS[i];
|
||||
}
|
||||
} break;
|
||||
case GL_TEXTURE_FREE_MEMORY_ATI:
|
||||
case GL_FREE_TEXTURE_MEMORY_KOS:
|
||||
*params = _glFreeTextureMemory();
|
||||
break;
|
||||
case GL_USED_TEXTURE_MEMORY_KOS:
|
||||
*params = _glUsedTextureMemory();
|
||||
break;
|
||||
case GL_FREE_CONTIGUOUS_TEXTURE_MEMORY_KOS:
|
||||
*params = _glFreeContiguousTextureMemory();
|
||||
break;
|
||||
default:
|
||||
_glKosThrowError(GL_INVALID_ENUM, __func__);
|
||||
_glKosPrintError();
|
||||
|
@ -715,7 +725,7 @@ const GLubyte *glGetString(GLenum name) {
|
|||
return (const GLubyte*) "1.2 (partial) - GLdc 1.1";
|
||||
|
||||
case GL_EXTENSIONS:
|
||||
return (const GLubyte*) "GL_ARB_framebuffer_object, GL_ARB_multitexture, GL_ARB_texture_rg, GL_EXT_paletted_texture, GL_EXT_shared_texture_palette, GL_KOS_multiple_shared_palette, GL_ARB_vertex_array_bgra, GL_ARB_vertex_type_2_10_10_10_rev";
|
||||
return (const GLubyte*) "GL_ARB_framebuffer_object, GL_ARB_multitexture, GL_ARB_texture_rg, GL_EXT_paletted_texture, GL_EXT_shared_texture_palette, GL_KOS_multiple_shared_palette, GL_ARB_vertex_array_bgra, GL_ARB_vertex_type_2_10_10_10_rev, GL_KOS_texture_memory_management, GL_ATI_meminfo";
|
||||
}
|
||||
|
||||
return (const GLubyte*) "GL_KOS_ERROR: ENUM Unsupported\n";
|
||||
|
|
80
GL/texture.c
80
GL/texture.c
|
@ -9,8 +9,15 @@
|
|||
#include "config.h"
|
||||
#include "../include/glext.h"
|
||||
#include "../include/glkos.h"
|
||||
|
||||
#include "yalloc/yalloc.h"
|
||||
|
||||
#include <kos/string.h>
|
||||
|
||||
/* We always leave this amount of vram unallocated to prevent
|
||||
* issues with the allocator */
|
||||
#define PVR_MEM_BUFFER_SIZE (64 * 1024)
|
||||
|
||||
#define CLAMP_U (1<<1)
|
||||
#define CLAMP_V (1<<0)
|
||||
|
||||
|
@ -26,6 +33,24 @@ static GLboolean BANKS_USED[4]; // Each time a 256 colour bank is used, this is
|
|||
static GLboolean SUBBANKS_USED[4][16]; // 4 counts of the used 16 colour banks within the 256 ones
|
||||
static GLenum INTERNAL_PALETTE_FORMAT = GL_RGBA4;
|
||||
|
||||
static void* YALLOC_BASE = NULL;
|
||||
static size_t YALLOC_SIZE = 0;
|
||||
|
||||
static void* yalloc_alloc_and_defrag(size_t size) {
|
||||
void* ret = yalloc_alloc(YALLOC_BASE, size);
|
||||
|
||||
if(!ret) {
|
||||
/* Tried to allocate, but out of room, let's try defragging
|
||||
* and repeating the alloc */
|
||||
glDefragmentTextureMemory_KOS();
|
||||
ret = yalloc_alloc(YALLOC_BASE, size);
|
||||
}
|
||||
|
||||
assert(ret && "Out of PVR memory!");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static TexturePalette* _initTexturePalette() {
|
||||
TexturePalette* palette = (TexturePalette*) malloc(sizeof(TexturePalette));
|
||||
assert(palette);
|
||||
|
@ -307,6 +332,11 @@ GLubyte _glInitTextures() {
|
|||
|
||||
memset((void*) BANKS_USED, 0x0, sizeof(BANKS_USED));
|
||||
memset((void*) SUBBANKS_USED, 0x0, sizeof(SUBBANKS_USED));
|
||||
|
||||
size_t vram_free = pvr_mem_available();
|
||||
YALLOC_SIZE = vram_free - PVR_MEM_BUFFER_SIZE; /* Take all but 64kb VRAM */
|
||||
YALLOC_BASE = pvr_mem_malloc(YALLOC_SIZE);
|
||||
yalloc_init(YALLOC_BASE, YALLOC_SIZE);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -393,7 +423,7 @@ void APIENTRY glDeleteTextures(GLsizei n, GLuint *textures) {
|
|||
}
|
||||
|
||||
if(txr->data) {
|
||||
pvr_mem_free(txr->data);
|
||||
yalloc_free(YALLOC_BASE, txr->data);
|
||||
txr->data = NULL;
|
||||
}
|
||||
|
||||
|
@ -594,13 +624,15 @@ void APIENTRY glCompressedTexImage2DARB(GLenum target,
|
|||
active->isCompressed = GL_TRUE;
|
||||
|
||||
/* Odds are slim new data is same size as old, so free always */
|
||||
if(active->data)
|
||||
pvr_mem_free(active->data);
|
||||
if(active->data) {
|
||||
yalloc_free(YALLOC_BASE, active->data);
|
||||
}
|
||||
|
||||
active->data = pvr_mem_malloc(imageSize);
|
||||
active->data = yalloc_alloc_and_defrag(imageSize);
|
||||
|
||||
if(data)
|
||||
if(data) {
|
||||
sq_cpy(active->data, data, imageSize);
|
||||
}
|
||||
}
|
||||
|
||||
static GLint _cleanInternalFormat(GLint internalFormat) {
|
||||
|
@ -897,7 +929,7 @@ GLboolean _glIsMipmapComplete(const TextureObject* obj) {
|
|||
}
|
||||
|
||||
GLsizei i = 0;
|
||||
for(; i < obj->mipmapCount; ++i) {
|
||||
for(; i < (GLubyte) obj->mipmapCount; ++i) {
|
||||
if((obj->mipmap & (1 << i)) == 0) {
|
||||
return GL_FALSE;
|
||||
}
|
||||
|
@ -924,13 +956,13 @@ void _glAllocateSpaceForMipmaps(TextureObject* active) {
|
|||
memcpy(temp, active->data, size);
|
||||
|
||||
/* Free the PVR data */
|
||||
pvr_mem_free(active->data);
|
||||
yalloc_free(YALLOC_BASE, active->data);
|
||||
active->data = NULL;
|
||||
|
||||
/* Figure out how much room to allocate for mipmaps */
|
||||
GLuint bytes = _glGetMipmapDataSize(active);
|
||||
|
||||
active->data = pvr_mem_malloc(bytes);
|
||||
active->data = yalloc_alloc_and_defrag(bytes);
|
||||
|
||||
/* If there was existing data, then copy it where it should go */
|
||||
memcpy(_glGetMipmapLocation(active, 0), temp, size);
|
||||
|
@ -1048,7 +1080,7 @@ void APIENTRY glTexImage2D(GLenum target, GLint level, GLint internalFormat,
|
|||
active->height != height ||
|
||||
active->color != pvr_format) {
|
||||
/* changed - free old texture memory */
|
||||
pvr_mem_free(active->data);
|
||||
yalloc_free(YALLOC_BASE, active->data);
|
||||
active->data = NULL;
|
||||
active->mipmap = 0;
|
||||
active->mipmapCount = 0;
|
||||
|
@ -1087,7 +1119,7 @@ void APIENTRY glTexImage2D(GLenum target, GLint level, GLint internalFormat,
|
|||
/* If we're uploading a mipmap level, we need to allocate the full amount of space */
|
||||
_glAllocateSpaceForMipmaps(active);
|
||||
} else {
|
||||
active->data = pvr_mem_malloc(active->baseDataSize);
|
||||
active->data = yalloc_alloc_and_defrag(active->baseDataSize);
|
||||
}
|
||||
|
||||
assert(active->data);
|
||||
|
@ -1527,3 +1559,31 @@ GLAPI void APIENTRY glReadPixels(GLint x, GLint y, GLsizei width, GLsizei height
|
|||
_GL_UNUSED(pixels);
|
||||
assert(0 && "Not Implemented");
|
||||
}
|
||||
|
||||
GLuint _glFreeTextureMemory() {
|
||||
return yalloc_count_free(YALLOC_BASE);
|
||||
}
|
||||
|
||||
GLuint _glUsedTextureMemory() {
|
||||
return YALLOC_SIZE - _glFreeTextureMemory();
|
||||
}
|
||||
|
||||
GLuint _glFreeContiguousTextureMemory() {
|
||||
return yalloc_count_continuous(YALLOC_BASE);
|
||||
}
|
||||
|
||||
GLAPI GLvoid APIENTRY glDefragmentTextureMemory_KOS(void) {
|
||||
yalloc_defrag_start(YALLOC_BASE);
|
||||
|
||||
GLuint id;
|
||||
|
||||
/* Replace all texture pointers */
|
||||
for(id = 0; id < MAX_TEXTURE_COUNT; id++){
|
||||
if(glIsTexture(id)){
|
||||
TextureObject* txr = (TextureObject*) named_array_get(&TEXTURE_OBJECTS, id);
|
||||
txr->data = yalloc_defrag_address(YALLOC_BASE, txr->data);
|
||||
}
|
||||
}
|
||||
|
||||
yalloc_defrag_commit(YALLOC_BASE);
|
||||
}
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#define GLDC_VERSION "1.2.0alpha"
|
21
GL/yalloc/LICENSE
Normal file
21
GL/yalloc/LICENSE
Normal file
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) [year] [fullname]
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
158
GL/yalloc/README.md
Normal file
158
GL/yalloc/README.md
Normal file
|
@ -0,0 +1,158 @@
|
|||
# Summary
|
||||
|
||||
yalloc is a memory efficient allocator which is intended for embedded
|
||||
applications that only have a low amount of RAM and want to maximize its
|
||||
utilization. Properties of the allocator:
|
||||
|
||||
- pools can be up to 128k
|
||||
- user data is 32bit aligned
|
||||
- 4 bytes overhead per allocation
|
||||
- supports defragmentation
|
||||
- uses a free list for first fit allocation strategy (most recently freed
|
||||
blocks are used first)
|
||||
- extensively tested (see section below)
|
||||
- MIT license
|
||||
|
||||
# Defragmentation
|
||||
|
||||
This feature was the initial motivation for this implementation. Especially
|
||||
when dealing with highly memory constrained environments fragmenting memory
|
||||
pools can be annoying. For this reason this implementation supports
|
||||
defragmentation which moves all allocated blocks into a contiguous range at the
|
||||
beginning of the pool, leaving a maximized free range at the end.
|
||||
|
||||
As there is no garbage collector or other runtime system involved that updates
|
||||
the references, the application must do so. This is done in three steps:
|
||||
|
||||
1. yalloc_defrag_start() is called. This calculates the new
|
||||
post-defragmentation-addresses for all allocations, but otherwise leaves
|
||||
the allocations untouched.
|
||||
|
||||
2. yalloc_defrag_address() is called by the application for every pointer that
|
||||
points to an allocation. It returns the post-defragmentation-address for
|
||||
the allocation. The application must update all its relevant pointers this
|
||||
way. Care must be taken not not yet dereference that moved pointers. If the
|
||||
application works with hierarchical data then this can easily be done by
|
||||
updating the pointers button up (first the leafs then their parents).
|
||||
|
||||
3. yalloc_defrag_commit() is called to finally perform the defragmentation.
|
||||
All allocated blocks are moved to their post-defragmentation-address and
|
||||
the application can continue using the pool the normal way.
|
||||
|
||||
It is up to the application when (and if) it performs defragmentation. One
|
||||
strategy would be to delay it until an allocation failure. Another approach
|
||||
would be to perform the defragmentation regularly when there is nothing else to
|
||||
do.
|
||||
|
||||
# Configurable Defines
|
||||
|
||||
INTERNAL_VALIDATE
|
||||
|
||||
If this is not defined on the compiler commandline it will be defined as 0 if
|
||||
NDEBUG is defined and otherwise as 1. If you want to disable internal
|
||||
validation when NDEBUG is not defined then define INERNAL_VALIDATE as 0 on the
|
||||
compiler commandline.
|
||||
|
||||
If it is nonzero the heap will be validated via a bunch of assert() calls at
|
||||
the end of every function that modifies the heap. This has roughly O(N*M)
|
||||
overhead where N is the number of allocated blocks and M the number of free
|
||||
blocks in a heap. For applications with enough live allocations this will get
|
||||
significant.
|
||||
|
||||
YALLOC_VALGRIND
|
||||
|
||||
If this is defined in yalloc.c and NVALGRIND is not defined then
|
||||
valgrind/memcheck.h is included and the the allocator functions tell valgrind
|
||||
about the pool, the allocations and makes the block headers inaccessible outside
|
||||
of yalloc-functions. This allows valgrind to detect a lot of the accidents that
|
||||
can happen when dealing dynamic memory. This also adds some overhead for every
|
||||
yalloc-call because most of them will "unprotect" the internal structure on
|
||||
entry and "protect" it again (marking it as inaccessible for valgrind) before
|
||||
returning.
|
||||
|
||||
# Tests
|
||||
|
||||
The tests rely on internal validation of the pool (see INTERNAL_VALIDATE) to
|
||||
check that no assumptions about the internal structure of the pool are
|
||||
violated. They additionally check for correctness of observations that can be
|
||||
made by using the public functions of the allocator (like checking if user data
|
||||
stays unmodified). There are a few different scripts that run tests:
|
||||
|
||||
- run_coverage.sh runs a bunch of testfunctions that are carefully crafted to
|
||||
cover all code paths. Coverage data is generated by clang and a summary is
|
||||
shown at the end of the test.
|
||||
|
||||
- run_valgrind.sh tests if the valgrind integration is working as expected,
|
||||
runs the functions from the coverage test and some randomly generated
|
||||
testcases under valgrind.
|
||||
|
||||
- run_libfuzzer.sh uses libfuzzer from clang to generate interesting testcases
|
||||
and runs them in multiple jobs in parallel for 10 seconds. It also generates
|
||||
coverage data at the end (it always got 100% coverage in my testruns).
|
||||
|
||||
All tests exit with 0 and print "All fine!" at the end if there where no
|
||||
errors. Coverage deficits are not counted as error, so you have to look at the
|
||||
summary (they should show 100% coverage!).
|
||||
|
||||
|
||||
# Implementation Details
|
||||
|
||||
The Headers and the user data are 32bit aligned. Headers have two 16bit fields
|
||||
where the high 15 bits represent offsets (relative to the pools address) to the
|
||||
previous/next block. The macros HDR_PTR() and HDR_OFFSET() are used to
|
||||
translate an offset to an address and back. The 32bit alignment is exploited to
|
||||
allow pools of up to 128k with that 15 significant bits.
|
||||
|
||||
A pool is always occupied by non-overlapping blocks that link to their
|
||||
previous/next block in address order via the prev/next field of Header.
|
||||
|
||||
Free blocks are always joined: No two free blocks will ever be neighbors.
|
||||
|
||||
Free blocks have an additional header of the same structure. This additional
|
||||
header is used to build a list of free blocks (independent of their address
|
||||
order).
|
||||
|
||||
yalloc_free() will insert the freed block to the front of the free list.
|
||||
yalloc_alloc() searches that list front to back and takes the first block that
|
||||
is big enough to satisfy the allocation.
|
||||
|
||||
There is always a Header at the front and at the end of the pool. The Header at
|
||||
the end is degenerate: It is marked as "used" but has no next block (which is
|
||||
usually used to determine the size of a block).
|
||||
|
||||
The prev-field of the very first block in the pool has special meaning: It
|
||||
points to the first free block in the pool. Or, if the pool is currently
|
||||
defragmenting (after yalloc_defrag_start() and before yalloc_defrag_commit()),
|
||||
points to the last header of the pool. This state can be recognized by checking
|
||||
if it points to an empty block (normal pool state) or a used block
|
||||
(defragmentation in progress). This logic can be seen in
|
||||
yalloc_defrag_in_progress().
|
||||
|
||||
The lowest bit of next/prev have special meaning:
|
||||
|
||||
- low bit of prev is set for free blocks
|
||||
|
||||
- low bit of next is set for blocks with 32bit padding after the user data.
|
||||
This is needed when a block is allocated from a free block that leaves only
|
||||
4 free bytes after the user data... which is not enough to insert a
|
||||
free-header (which is needs 8 bytes). The padding will be reclaimed when
|
||||
that block is freed or when the pool is defragmented. The predicate
|
||||
isPadded() can be used to test if a block is padded. Free blocks are never
|
||||
padded.
|
||||
|
||||
The predicate isNil() can be used to test if an offset points nowhere (it tests
|
||||
if all 15 high bits of an offset are 1). The constant NIL has all but the
|
||||
lowest bit set. It is used to set offsets to point to nowhere, and in some
|
||||
places it is used to mask out the actual address bits of an offset. This should
|
||||
be kept in mind when modifying the code and updating prev/next: Think carefully
|
||||
if you have to preserve the low bit when updating an offset!
|
||||
|
||||
Defragmentation is done in two phases: First the user calls
|
||||
yalloc_defrag_start(). This will put the pool in a special state where no
|
||||
alloc/free-calls are allowed. In this state the prev-fields of the used blocks
|
||||
have a special meaning: They store the offset that the block will have after
|
||||
defragmentation finished. This information is used by yalloc_defrag_address()
|
||||
which can be called by the application to query the new addresses for its
|
||||
allocations. After the application has updated all its pointers it must call
|
||||
yalloc_defrag_commit() which moves all used blocks in contiguous space at the
|
||||
beginning of the pool, leaving one maximized free block at the end.
|
802
GL/yalloc/yalloc.c
Normal file
802
GL/yalloc/yalloc.c
Normal file
|
@ -0,0 +1,802 @@
|
|||
#include "yalloc.h"
|
||||
#include "yalloc_internals.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
#define ALIGN(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
|
||||
|
||||
#if defined(YALLOC_VALGRIND) && !defined(NVALGRIND)
|
||||
# define USE_VALGRIND 1
|
||||
#else
|
||||
# define USE_VALGRIND 0
|
||||
#endif
|
||||
|
||||
#if USE_VALGRIND
|
||||
# include <valgrind/memcheck.h>
|
||||
#else
|
||||
# define VALGRIND_MAKE_MEM_UNDEFINED(p, s) ((void)0)
|
||||
# define VALGRIND_MAKE_MEM_DEFINED(p, s) ((void)0)
|
||||
# define VALGRIND_MAKE_MEM_NOACCESS(p, s) ((void)0)
|
||||
# define VALGRIND_CREATE_MEMPOOL(pool, rz, z) ((void)0)
|
||||
# define VALGRIND_MEMPOOL_ALLOC(pool, p, s) ((void)0)
|
||||
# define VALGRIND_MEMPOOL_FREE(pool, p) ((void)0)
|
||||
# define VALGRIND_MEMPOOL_CHANGE(pool, a, b, s) ((void)0)
|
||||
#endif
|
||||
|
||||
#define MARK_NEW_FREE_HDR(p) VALGRIND_MAKE_MEM_UNDEFINED(p, sizeof(Header) * 2)
|
||||
#define MARK_NEW_HDR(p) VALGRIND_MAKE_MEM_UNDEFINED(p, sizeof(Header))
|
||||
#define PROTECT_HDR(p) VALGRIND_MAKE_MEM_NOACCESS(p, sizeof(Header))
|
||||
#define PROTECT_FREE_HDR(p) VALGRIND_MAKE_MEM_NOACCESS(p, sizeof(Header) * 2)
|
||||
#define UNPROTECT_HDR(p) VALGRIND_MAKE_MEM_DEFINED(p, sizeof(Header))
|
||||
#define UNPROTECT_FREE_HDR(p) VALGRIND_MAKE_MEM_DEFINED(p, sizeof(Header) * 2)
|
||||
|
||||
|
||||
#if USE_VALGRIND
|
||||
static void _unprotect_pool(void * pool)
|
||||
{
|
||||
Header * cur = (Header*)pool;
|
||||
for (;;)
|
||||
{
|
||||
UNPROTECT_HDR(cur);
|
||||
if (isFree(cur))
|
||||
UNPROTECT_HDR(cur + 1);
|
||||
|
||||
if (isNil(cur->next))
|
||||
break;
|
||||
|
||||
cur = HDR_PTR(cur->next);
|
||||
}
|
||||
}
|
||||
|
||||
static void _protect_pool(void * pool)
|
||||
{
|
||||
Header * cur = (Header*)pool;
|
||||
while (cur)
|
||||
{
|
||||
Header * next = isNil(cur->next) ? NULL : HDR_PTR(cur->next);
|
||||
|
||||
if (isFree(cur))
|
||||
VALGRIND_MAKE_MEM_NOACCESS(cur, (char*)next - (char*)cur);
|
||||
else
|
||||
PROTECT_HDR(cur);
|
||||
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
#define assert_is_pool(pool) assert(VALGRIND_MEMPOOL_EXISTS(pool));
|
||||
|
||||
#else
|
||||
|
||||
static void _unprotect_pool(void * pool){(void)pool;}
|
||||
static void _protect_pool(void * pool){(void)pool;}
|
||||
#define assert_is_pool(pool) ((void)0)
|
||||
#endif
|
||||
|
||||
// internal version that does not unprotect/protect the pool
|
||||
static int _yalloc_defrag_in_progress(void * pool)
|
||||
{
|
||||
// fragmentation is indicated by a free list with one entry: the last block of the pool, which has its "free"-bit cleared.
|
||||
Header * p = (Header*)pool;
|
||||
if (isNil(p->prev))
|
||||
return 0;
|
||||
|
||||
return !(HDR_PTR(p->prev)->prev & 1);
|
||||
}
|
||||
|
||||
int yalloc_defrag_in_progress(void * pool)
|
||||
{
|
||||
_unprotect_pool(pool);
|
||||
int ret = _yalloc_defrag_in_progress(pool);
|
||||
_protect_pool(pool);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if YALLOC_INTERNAL_VALIDATE
|
||||
|
||||
static size_t _count_free_list_occurences(Header * pool, Header * blk)
|
||||
{
|
||||
int n = 0;
|
||||
if (!isNil(pool->prev))
|
||||
{
|
||||
Header * cur = HDR_PTR(pool->prev);
|
||||
for (;;)
|
||||
{
|
||||
if (cur == blk)
|
||||
++n;
|
||||
|
||||
if (isNil(cur[1].next))
|
||||
break;
|
||||
|
||||
cur = HDR_PTR(cur[1].next);
|
||||
}
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
static size_t _count_addr_list_occurences(Header * pool, Header * blk)
|
||||
{
|
||||
size_t n = 0;
|
||||
Header * cur = pool;
|
||||
for (;;)
|
||||
{
|
||||
if (cur == blk)
|
||||
++n;
|
||||
|
||||
if (isNil(cur->next))
|
||||
break;
|
||||
|
||||
cur = HDR_PTR(cur->next);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
static void _validate_user_ptr(void * pool, void * p)
|
||||
{
|
||||
Header * hdr = (Header*)p - 1;
|
||||
size_t n = _count_addr_list_occurences((Header*)pool, hdr);
|
||||
assert(n == 1 && !isFree(hdr));
|
||||
}
|
||||
|
||||
/**
|
||||
Validates if all the invariants of a pool are intact.
|
||||
|
||||
This is very expensive when there are enough blocks in the heap (quadratic complexity!).
|
||||
*/
|
||||
static void _yalloc_validate(void * pool_)
|
||||
{
|
||||
Header * pool = (Header*)pool_;
|
||||
Header * cur = pool;
|
||||
|
||||
assert(!isNil(pool->next)); // there must always be at least two blocks: a free/used one and the final block at the end
|
||||
|
||||
if (_yalloc_defrag_in_progress(pool))
|
||||
{
|
||||
Header * prevUsed = NULL;
|
||||
while (!isNil(cur->next))
|
||||
{
|
||||
if (!isFree(cur))
|
||||
{ // it is a used block
|
||||
Header * newAddr = cur == pool ? pool : HDR_PTR(cur->prev);
|
||||
assert(newAddr <= cur);
|
||||
assert(newAddr >= pool);
|
||||
|
||||
if (prevUsed)
|
||||
{
|
||||
Header * prevNewAddr = prevUsed == pool ? pool : HDR_PTR(prevUsed->prev);
|
||||
size_t prevBruttoSize = (char*)HDR_PTR(prevUsed->next) - (char*)prevUsed;
|
||||
if (isPadded(prevUsed))
|
||||
prevBruttoSize -= 4; // remove padding
|
||||
assert((char*)newAddr == (char*)prevNewAddr + prevBruttoSize);
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(newAddr == pool);
|
||||
}
|
||||
|
||||
prevUsed = cur;
|
||||
}
|
||||
|
||||
cur = HDR_PTR(cur->next);
|
||||
}
|
||||
|
||||
assert(cur == HDR_PTR(pool->prev)); // the free-list should point to the last block
|
||||
assert(!isFree(cur)); // the last block must not be free
|
||||
}
|
||||
else
|
||||
{
|
||||
Header * prev = NULL;
|
||||
|
||||
// iterate blocks in address order
|
||||
for (;;)
|
||||
{
|
||||
if (prev)
|
||||
{
|
||||
Header * x = HDR_PTR(cur->prev);
|
||||
assert(x == prev);
|
||||
}
|
||||
|
||||
int n = _count_free_list_occurences(pool, cur);
|
||||
if (isFree(cur))
|
||||
{ // it is a free block
|
||||
assert(n == 1);
|
||||
assert(!isPadded(cur)); // free blocks must have a zero padding-bit
|
||||
|
||||
if (prev)
|
||||
{
|
||||
assert(!isFree(prev)); // free blocks must not be direct neighbours
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(n == 0);
|
||||
}
|
||||
|
||||
if (isNil(cur->next))
|
||||
break;
|
||||
|
||||
Header * next = HDR_PTR(cur->next);
|
||||
assert((char*)next >= (char*)cur + sizeof(Header) * 2);
|
||||
prev = cur;
|
||||
cur = next;
|
||||
}
|
||||
|
||||
assert(isNil(cur->next));
|
||||
|
||||
if (!isNil(pool->prev))
|
||||
{
|
||||
// iterate free-list
|
||||
Header * f = HDR_PTR(pool->prev);
|
||||
assert(isNil(f[1].prev));
|
||||
for (;;)
|
||||
{
|
||||
assert(isFree(f)); // must be free
|
||||
|
||||
int n = _count_addr_list_occurences(pool, f);
|
||||
assert(n == 1);
|
||||
|
||||
if (isNil(f[1].next))
|
||||
break;
|
||||
|
||||
f = HDR_PTR(f[1].next);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
static void _yalloc_validate(void * pool){(void)pool;}
|
||||
static void _validate_user_ptr(void * pool, void * p){(void)pool; (void)p;}
|
||||
#endif
|
||||
|
||||
int yalloc_init(void * pool, size_t size)
|
||||
{
|
||||
if (size > MAX_POOL_SIZE)
|
||||
return -1;
|
||||
|
||||
// TODO: Error when pool is not properly aligned
|
||||
|
||||
// TODO: Error when size is not a multiple of the alignment?
|
||||
while (size % sizeof(Header))
|
||||
--size;
|
||||
|
||||
if(size < sizeof(Header) * 3)
|
||||
return -1;
|
||||
|
||||
VALGRIND_CREATE_MEMPOOL(pool, 0, 0);
|
||||
|
||||
Header * first = (Header*)pool;
|
||||
Header * last = (Header*)((char*)pool + size) - 1;
|
||||
|
||||
MARK_NEW_FREE_HDR(first);
|
||||
MARK_NEW_HDR(first);
|
||||
|
||||
first->prev = HDR_OFFSET(first) | 1;
|
||||
first->next = HDR_OFFSET(last);
|
||||
first[1].prev = NIL;
|
||||
first[1].next = NIL;
|
||||
|
||||
last->prev = HDR_OFFSET(first);
|
||||
last->next = NIL;
|
||||
|
||||
_unprotect_pool(pool);
|
||||
_yalloc_validate(pool);
|
||||
_protect_pool(pool);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void yalloc_deinit(void * pool)
|
||||
{
|
||||
#if USE_VALGRIND
|
||||
VALGRIND_DESTROY_MEMPOOL(pool);
|
||||
|
||||
Header * last = (Header*)pool;
|
||||
UNPROTECT_HDR(last);
|
||||
while (!isNil(last->next))
|
||||
{
|
||||
Header * next = HDR_PTR(last->next);
|
||||
UNPROTECT_HDR(next);
|
||||
last = next;
|
||||
}
|
||||
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(pool, (char*)(last + 1) - (char*)pool);
|
||||
#else
|
||||
(void)pool;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void * yalloc_alloc(void * pool, size_t size)
|
||||
{
|
||||
assert_is_pool(pool);
|
||||
_unprotect_pool(pool);
|
||||
assert(!_yalloc_defrag_in_progress(pool));
|
||||
_yalloc_validate(pool);
|
||||
if (!size)
|
||||
{
|
||||
_protect_pool(pool);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Header * root = (Header*)pool;
|
||||
if (isNil(root->prev))
|
||||
{
|
||||
_protect_pool(pool);
|
||||
return NULL; /* no free block, no chance to allocate anything */ // TODO: Just read up which C standard supports single line comments and then fucking use them!
|
||||
}
|
||||
|
||||
/* round up to alignment */
|
||||
size = ALIGN(size, 64);
|
||||
|
||||
size_t bruttoSize = size + sizeof(Header);
|
||||
Header * prev = NULL;
|
||||
Header * cur = HDR_PTR(root->prev);
|
||||
for (;;)
|
||||
{
|
||||
size_t curSize = (char*)HDR_PTR(cur->next) - (char*)cur; /* size of the block, including its header */
|
||||
|
||||
if (curSize >= bruttoSize) // it is big enough
|
||||
{
|
||||
// take action for unused space in the free block
|
||||
if (curSize >= bruttoSize + sizeof(Header) * 2)
|
||||
{ // the leftover space is big enough to make it a free block
|
||||
// Build a free block from the unused space and insert it into the list of free blocks after the current free block
|
||||
Header * tail = (Header*)((char*)cur + bruttoSize);
|
||||
MARK_NEW_FREE_HDR(tail);
|
||||
|
||||
// update address-order-list
|
||||
tail->next = cur->next;
|
||||
tail->prev = HDR_OFFSET(cur) | 1;
|
||||
HDR_PTR(cur->next)->prev = HDR_OFFSET(tail); // NOTE: We know the next block is used because free blocks are never neighbours. So we don't have to care about the lower bit which would be set for the prev of a free block.
|
||||
cur->next = HDR_OFFSET(tail);
|
||||
|
||||
// update list of free blocks
|
||||
tail[1].next = cur[1].next;
|
||||
// NOTE: tail[1].prev is updated in the common path below (assignment to "HDR_PTR(cur[1].next)[1].prev")
|
||||
|
||||
if (!isNil(cur[1].next))
|
||||
HDR_PTR(cur[1].next)[1].prev = HDR_OFFSET(tail);
|
||||
cur[1].next = HDR_OFFSET(tail);
|
||||
}
|
||||
else if (curSize > bruttoSize)
|
||||
{ // there will be unused space, but not enough to insert a free header
|
||||
internal_assert(curSize - bruttoSize == sizeof(Header)); // unused space must be enough to build a free-block or it should be exactly the size of a Header
|
||||
cur->next |= 1; // set marker for "has unused trailing space"
|
||||
}
|
||||
else
|
||||
{
|
||||
internal_assert(curSize == bruttoSize);
|
||||
}
|
||||
|
||||
cur->prev &= NIL; // clear marker for "is a free block"
|
||||
|
||||
// remove from linked list of free blocks
|
||||
if (prev)
|
||||
prev[1].next = cur[1].next;
|
||||
else
|
||||
{
|
||||
uint32_t freeBit = isFree(root);
|
||||
root->prev = (cur[1].next & NIL) | freeBit;
|
||||
}
|
||||
|
||||
if (!isNil(cur[1].next))
|
||||
HDR_PTR(cur[1].next)[1].prev = prev ? HDR_OFFSET(prev) : NIL;
|
||||
|
||||
_yalloc_validate(pool);
|
||||
VALGRIND_MEMPOOL_ALLOC(pool, cur + 1, size);
|
||||
_protect_pool(pool);
|
||||
return cur + 1; // return address after the header
|
||||
}
|
||||
|
||||
if (isNil(cur[1].next))
|
||||
break;
|
||||
|
||||
prev = cur;
|
||||
cur = HDR_PTR(cur[1].next);
|
||||
}
|
||||
|
||||
_yalloc_validate(pool);
|
||||
_protect_pool(pool);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Removes a block from the free-list and moves the pools first-free-bock pointer to its successor if it pointed to that block.
|
||||
static void unlink_from_free_list(Header * pool, Header * blk)
|
||||
{
|
||||
// update the pools pointer to the first block in the free list if necessary
|
||||
if (isNil(blk[1].prev))
|
||||
{ // the block is the first in the free-list
|
||||
// make the pools first-free-pointer point to the next in the free list
|
||||
uint32_t freeBit = isFree(pool);
|
||||
pool->prev = (blk[1].next & NIL) | freeBit;
|
||||
}
|
||||
else
|
||||
HDR_PTR(blk[1].prev)[1].next = blk[1].next;
|
||||
|
||||
if (!isNil(blk[1].next))
|
||||
HDR_PTR(blk[1].next)[1].prev = blk[1].prev;
|
||||
}
|
||||
|
||||
size_t yalloc_block_size(void * pool, void * p)
|
||||
{
|
||||
Header * a = (Header*)p - 1;
|
||||
UNPROTECT_HDR(a);
|
||||
Header * b = HDR_PTR(a->next);
|
||||
size_t payloadSize = (char*)b - (char*)p;
|
||||
if (isPadded(a))
|
||||
payloadSize -= sizeof(Header);
|
||||
PROTECT_HDR(a);
|
||||
return payloadSize;
|
||||
}
|
||||
|
||||
void yalloc_free(void * pool_, void * p)
|
||||
{
|
||||
assert_is_pool(pool_);
|
||||
assert(!yalloc_defrag_in_progress(pool_));
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
_unprotect_pool(pool_);
|
||||
|
||||
Header * pool = (Header*)pool_;
|
||||
Header * cur = (Header*)p - 1;
|
||||
|
||||
// get pointers to previous/next block in address order
|
||||
Header * prev = cur == pool || isNil(cur->prev) ? NULL : HDR_PTR(cur->prev);
|
||||
Header * next = isNil(cur->next) ? NULL : HDR_PTR(cur->next);
|
||||
|
||||
int prevFree = prev && isFree(prev);
|
||||
int nextFree = next && isFree(next);
|
||||
|
||||
#if USE_VALGRIND
|
||||
{
|
||||
unsigned errs = VALGRIND_COUNT_ERRORS;
|
||||
VALGRIND_MEMPOOL_FREE(pool, p);
|
||||
if (VALGRIND_COUNT_ERRORS > errs)
|
||||
{ // early exit if the free was invalid (so we get a valgrind error and don't mess up the pool, which is helpful for testing if invalid frees are detected by valgrind)
|
||||
_protect_pool(pool_);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
_validate_user_ptr(pool_, p);
|
||||
|
||||
if (prevFree && nextFree)
|
||||
{ // the freed block has two free neighbors
|
||||
unlink_from_free_list(pool, prev);
|
||||
unlink_from_free_list(pool, next);
|
||||
|
||||
// join prev, cur and next
|
||||
prev->next = next->next;
|
||||
HDR_PTR(next->next)->prev = cur->prev;
|
||||
|
||||
// prev is now the block we want to push onto the free-list
|
||||
cur = prev;
|
||||
}
|
||||
else if (prevFree)
|
||||
{
|
||||
unlink_from_free_list(pool, prev);
|
||||
|
||||
// join prev and cur
|
||||
prev->next = cur->next;
|
||||
HDR_PTR(cur->next)->prev = cur->prev;
|
||||
|
||||
// prev is now the block we want to push onto the free-list
|
||||
cur = prev;
|
||||
}
|
||||
else if (nextFree)
|
||||
{
|
||||
unlink_from_free_list(pool, next);
|
||||
|
||||
// join cur and next
|
||||
cur->next = next->next;
|
||||
HDR_PTR(next->next)->prev = next->prev & NIL;
|
||||
}
|
||||
|
||||
// if there is a previous block and that block has padding then we want to grow the new free block into that padding
|
||||
if (cur != pool && !isNil(cur->prev))
|
||||
{ // there is a previous block
|
||||
Header * left = HDR_PTR(cur->prev);
|
||||
if (isPadded(left))
|
||||
{ // the previous block has padding, so extend the current block to consume move the padding to the current free block
|
||||
Header * grown = cur - 1;
|
||||
MARK_NEW_HDR(grown);
|
||||
grown->next = cur->next;
|
||||
grown->prev = cur->prev;
|
||||
left->next = HDR_OFFSET(grown);
|
||||
if (!isNil(cur->next))
|
||||
HDR_PTR(cur->next)->prev = HDR_OFFSET(grown);
|
||||
|
||||
cur = grown;
|
||||
}
|
||||
}
|
||||
|
||||
cur->prev |= 1; // it becomes a free block
|
||||
cur->next &= NIL; // reset padding-bit
|
||||
UNPROTECT_HDR(cur + 1);
|
||||
cur[1].prev = NIL; // it will be the first free block in the free list, so it has no prevFree
|
||||
|
||||
if (!isNil(pool->prev))
|
||||
{ // the free-list was already non-empty
|
||||
HDR_PTR(pool->prev)[1].prev = HDR_OFFSET(cur); // make the first entry in the free list point back to the new free block (it will become the first one)
|
||||
cur[1].next = pool->prev; // the next free block is the first of the old free-list
|
||||
}
|
||||
else
|
||||
cur[1].next = NIL; // free-list was empty, so there is no successor
|
||||
|
||||
VALGRIND_MAKE_MEM_NOACCESS(cur + 2, (char*)HDR_PTR(cur->next) - (char*)(cur + 2));
|
||||
|
||||
// now the freed block is the first in the free-list
|
||||
|
||||
// update the offset to the first element of the free list
|
||||
uint32_t freeBit = isFree(pool); // remember the free-bit of the offset
|
||||
pool->prev = HDR_OFFSET(cur) | freeBit; // update the offset and restore the free-bit
|
||||
_yalloc_validate(pool);
|
||||
_protect_pool(pool);
|
||||
}
|
||||
|
||||
size_t yalloc_count_free(void * pool_)
|
||||
{
|
||||
assert_is_pool(pool_);
|
||||
_unprotect_pool(pool_);
|
||||
assert(!_yalloc_defrag_in_progress(pool_));
|
||||
Header * pool = (Header*)pool_;
|
||||
size_t bruttoFree = 0;
|
||||
Header * cur = pool;
|
||||
|
||||
_yalloc_validate(pool);
|
||||
|
||||
for (;;)
|
||||
{
|
||||
if (isFree(cur))
|
||||
{ // it is a free block
|
||||
bruttoFree += (char*)HDR_PTR(cur->next) - (char*)cur;
|
||||
}
|
||||
else
|
||||
{ // it is a used block
|
||||
if (isPadded(cur))
|
||||
{ // the used block is padded
|
||||
bruttoFree += sizeof(Header);
|
||||
}
|
||||
}
|
||||
|
||||
if (isNil(cur->next))
|
||||
break;
|
||||
|
||||
cur = HDR_PTR(cur->next);
|
||||
}
|
||||
|
||||
_protect_pool(pool);
|
||||
|
||||
if (bruttoFree < sizeof(Header))
|
||||
{
|
||||
internal_assert(!bruttoFree); // free space should always be a multiple of sizeof(Header)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return bruttoFree - sizeof(Header);
|
||||
}
|
||||
|
||||
size_t yalloc_count_continuous(void * pool_)
|
||||
{
|
||||
assert_is_pool(pool_);
|
||||
_unprotect_pool(pool_);
|
||||
assert(!_yalloc_defrag_in_progress(pool_));
|
||||
Header * pool = (Header*)pool_;
|
||||
size_t largestFree = 0;
|
||||
Header * cur = pool;
|
||||
|
||||
_yalloc_validate(pool);
|
||||
|
||||
for (;;)
|
||||
{
|
||||
if (isFree(cur))
|
||||
{ // it is a free block
|
||||
size_t temp = (uintptr_t)HDR_PTR(cur->next) - (uintptr_t)cur;
|
||||
if(temp > largestFree)
|
||||
largestFree = temp;
|
||||
}
|
||||
|
||||
if (isNil(cur->next))
|
||||
break;
|
||||
|
||||
cur = HDR_PTR(cur->next);
|
||||
}
|
||||
|
||||
_protect_pool(pool);
|
||||
|
||||
if (largestFree < sizeof(Header))
|
||||
{
|
||||
internal_assert(!largestFree); // free space should always be a multiple of sizeof(Header)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return largestFree - sizeof(Header);
|
||||
}
|
||||
|
||||
void * yalloc_first_used(void * pool)
|
||||
{
|
||||
assert_is_pool(pool);
|
||||
_unprotect_pool(pool);
|
||||
Header * blk = (Header*)pool;
|
||||
while (!isNil(blk->next))
|
||||
{
|
||||
if (!isFree(blk))
|
||||
{
|
||||
_protect_pool(pool);
|
||||
return blk + 1;
|
||||
}
|
||||
|
||||
blk = HDR_PTR(blk->next);
|
||||
}
|
||||
|
||||
_protect_pool(pool);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void * yalloc_next_used(void * pool, void * p)
|
||||
{
|
||||
assert_is_pool(pool);
|
||||
_unprotect_pool(pool);
|
||||
_validate_user_ptr(pool, p);
|
||||
Header * prev = (Header*)p - 1;
|
||||
assert(!isNil(prev->next)); // the last block should never end up as input to this function (because it is not user-visible)
|
||||
|
||||
Header * blk = HDR_PTR(prev->next);
|
||||
while (!isNil(blk->next))
|
||||
{
|
||||
if (!isFree(blk))
|
||||
{
|
||||
_protect_pool(pool);
|
||||
return blk + 1;
|
||||
}
|
||||
|
||||
blk = HDR_PTR(blk->next);
|
||||
}
|
||||
|
||||
_protect_pool(pool);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void yalloc_defrag_start(void * pool_)
|
||||
{
|
||||
assert_is_pool(pool_);
|
||||
_unprotect_pool(pool_);
|
||||
assert(!_yalloc_defrag_in_progress(pool_));
|
||||
Header * pool = (Header*)pool_;
|
||||
|
||||
// iterate over all blocks in address order and store the post-defragment address of used blocks in their "prev" field
|
||||
size_t end = 0; // offset for the next used block
|
||||
Header * blk = (Header*)pool;
|
||||
for (; !isNil(blk->next); blk = HDR_PTR(blk->next))
|
||||
{
|
||||
if (!isFree(blk))
|
||||
{ // it is a used block
|
||||
blk->prev = end >> 1;
|
||||
internal_assert((char*)HDR_PTR(blk->prev) == (char*)pool + end);
|
||||
|
||||
size_t bruttoSize = (char*)HDR_PTR(blk->next) - (char*)blk;
|
||||
|
||||
if (isPadded(blk))
|
||||
{ // the block is padded
|
||||
bruttoSize -= sizeof(Header);
|
||||
}
|
||||
|
||||
end += bruttoSize;
|
||||
internal_assert(end % sizeof(Header) == 0);
|
||||
}
|
||||
}
|
||||
|
||||
// blk is now the last block (the dummy "used" block at the end of the pool)
|
||||
internal_assert(isNil(blk->next));
|
||||
internal_assert(!isFree(blk));
|
||||
|
||||
// mark the pool as "defragementation in progress"
|
||||
uint32_t freeBit = isFree(pool);
|
||||
pool->prev = (HDR_OFFSET(blk) & NIL) | freeBit;
|
||||
|
||||
_yalloc_validate(pool);
|
||||
internal_assert(yalloc_defrag_in_progress(pool));
|
||||
_protect_pool(pool);
|
||||
}
|
||||
|
||||
void * yalloc_defrag_address(void * pool_, void * p)
|
||||
{
|
||||
assert_is_pool(pool_);
|
||||
assert(yalloc_defrag_in_progress(pool_));
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
Header * pool = (Header*)pool_;
|
||||
|
||||
_unprotect_pool(pool);
|
||||
_validate_user_ptr(pool_, p);
|
||||
|
||||
if (pool + 1 == p)
|
||||
return pool + 1; // "prev" of the first block points to the last used block to mark the pool as "defragmentation in progress"
|
||||
|
||||
Header * blk = (Header*)p - 1;
|
||||
|
||||
void * defragP = HDR_PTR(blk->prev) + 1;
|
||||
|
||||
_protect_pool(pool);
|
||||
return defragP;
|
||||
}
|
||||
|
||||
void yalloc_defrag_commit(void * pool_)
|
||||
{
|
||||
assert_is_pool(pool_);
|
||||
_unprotect_pool(pool_);
|
||||
assert(_yalloc_defrag_in_progress(pool_));
|
||||
Header * pool = (Header*)pool_;
|
||||
|
||||
// iterate over all blocks in address order and move them
|
||||
size_t end = 0; // offset for the next used block
|
||||
Header * blk = pool;
|
||||
Header * lastUsed = NULL;
|
||||
while (!isNil(blk->next))
|
||||
{
|
||||
if (!isFree(blk))
|
||||
{ // it is a used block
|
||||
size_t bruttoSize = (char*)HDR_PTR(blk->next) - (char*)blk;
|
||||
|
||||
if (isPadded(blk))
|
||||
{ // the block is padded
|
||||
bruttoSize -= sizeof(Header);
|
||||
}
|
||||
|
||||
Header * next = HDR_PTR(blk->next);
|
||||
|
||||
blk->prev = lastUsed ? HDR_OFFSET(lastUsed) : NIL;
|
||||
blk->next = (end + bruttoSize) >> 1;
|
||||
|
||||
lastUsed = (Header*)((char*)pool + end);
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(lastUsed, (char*)blk - (char*)lastUsed);
|
||||
memmove(lastUsed, blk, bruttoSize);
|
||||
VALGRIND_MEMPOOL_CHANGE(pool, blk + 1, lastUsed + 1, bruttoSize - sizeof(Header));
|
||||
|
||||
end += bruttoSize;
|
||||
blk = next;
|
||||
}
|
||||
else
|
||||
blk = HDR_PTR(blk->next);
|
||||
}
|
||||
|
||||
// blk is now the last block (the dummy "used" block at the end of the pool)
|
||||
internal_assert(isNil(blk->next));
|
||||
internal_assert(!isFree(blk));
|
||||
|
||||
if (lastUsed)
|
||||
{
|
||||
Header * gap = HDR_PTR(lastUsed->next);
|
||||
if (gap == blk)
|
||||
{ // there is no gap
|
||||
pool->prev = NIL; // the free list is empty
|
||||
blk->prev = HDR_OFFSET(lastUsed);
|
||||
}
|
||||
else if (blk - gap > 1)
|
||||
{ // the gap is big enouogh for a free Header
|
||||
|
||||
// set a free list that contains the gap as only element
|
||||
gap->prev = HDR_OFFSET(lastUsed) | 1;
|
||||
gap->next = HDR_OFFSET(blk);
|
||||
gap[1].prev = NIL;
|
||||
gap[1].next = NIL;
|
||||
pool->prev = blk->prev = HDR_OFFSET(gap);
|
||||
}
|
||||
else
|
||||
{ // there is a gap, but it is too small to be used as free-list-node, so just make it padding of the last used block
|
||||
lastUsed->next = HDR_OFFSET(blk) | 1;
|
||||
pool->prev = NIL;
|
||||
blk->prev = HDR_OFFSET(lastUsed);
|
||||
}
|
||||
}
|
||||
else
|
||||
{ // the pool is empty
|
||||
pool->prev = 1;
|
||||
}
|
||||
|
||||
internal_assert(!_yalloc_defrag_in_progress(pool));
|
||||
_yalloc_validate(pool);
|
||||
_protect_pool(pool);
|
||||
}
|
176
GL/yalloc/yalloc.h
Normal file
176
GL/yalloc/yalloc.h
Normal file
|
@ -0,0 +1,176 @@
|
|||
/**
|
||||
@file
|
||||
|
||||
API of the yalloc allocator.
|
||||
*/
|
||||
|
||||
#ifndef YALLOC_H
|
||||
#define YALLOC_H
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
/**
|
||||
Maximum supported pool size. yalloc_init() will fail for larger pools.
|
||||
*/
|
||||
#define MAX_POOL_SIZE ((2 << 24) - 4)
|
||||
|
||||
/**
|
||||
Creates a pool inside a given buffer.
|
||||
|
||||
Pools must be deinitialized with yalloc_deinit() when they are no longer needed.
|
||||
|
||||
@param pool The starting address of the pool. It must have at least 16bit
|
||||
alignment (internal structure uses 16bit integers). Allocations are placed at
|
||||
32bit boundaries starting from this address, so if the user data should be
|
||||
32bit aligned then this address has to be 32bit aligned. Typically an address
|
||||
of static memory, or an array on the stack is used if the pool is only used
|
||||
temporarily.
|
||||
@param size Size of the pool.
|
||||
@return 0 on success, nonzero if the size is not supported.
|
||||
*/
|
||||
int yalloc_init(void * pool, size_t size);
|
||||
|
||||
/**
|
||||
Deinitializes the buffer that is used by the pool and makes it available for other use.
|
||||
|
||||
The content of the buffer is undefined after this.
|
||||
|
||||
@param pool The starting address of an initialized pool.
|
||||
*/
|
||||
void yalloc_deinit(void * pool);
|
||||
|
||||
/**
|
||||
Allocates a block of memory from a pool.
|
||||
|
||||
This function mimics malloc().
|
||||
|
||||
The pool must not be in the "defragmenting" state when this function is called.
|
||||
|
||||
@param pool The starting address of an initialized pool.
|
||||
@param size Number of bytes to allocate.
|
||||
@return Allocated buffer or \c NULL if there was no free range that could serve
|
||||
the allocation. See @ref yalloc_defrag_start() for a way to remove
|
||||
fragmentation which may cause allocations to fail even when there is enough
|
||||
space in total.
|
||||
*/
|
||||
void * yalloc_alloc(void * pool, size_t size);
|
||||
|
||||
/**
|
||||
Returns an allocation to a pool.
|
||||
|
||||
This function mimics free().
|
||||
|
||||
The pool must not be in the "defragmenting" state when this function is called.
|
||||
|
||||
@param pool The starting address of the initialized pool the allocation comes from.
|
||||
@param p An address that was returned from yalloc_alloc() of the same pool.
|
||||
*/
|
||||
void yalloc_free(void * pool, void * p);
|
||||
|
||||
/**
|
||||
Returns the maximum size of a successful allocation (assuming a completely unfragmented heap).
|
||||
|
||||
After defragmentation the first allocation with the returned size is guaranteed to succeed.
|
||||
|
||||
@param pool The starting address of an initialized pool.
|
||||
@return Number of bytes that can be allocated (assuming the pool is defragmented).
|
||||
*/
|
||||
size_t yalloc_count_free(void * pool);
|
||||
|
||||
/**
|
||||
Returns the maximum continuous free area.
|
||||
|
||||
@param pool The starting address of an initialized pool.
|
||||
@return Number of free bytes that exist continuously.
|
||||
*/
|
||||
size_t yalloc_count_continuous(void * pool_);
|
||||
|
||||
/**
|
||||
Queries the usable size of an allocated block.
|
||||
|
||||
@param pool The starting address of the initialized pool the allocation comes from.
|
||||
@param p An address that was returned from yalloc_alloc() of the same pool.
|
||||
@return Size of the memory block. This is the size passed to @ref yalloc_alloc() rounded up to 4.
|
||||
*/
|
||||
size_t yalloc_block_size(void * pool, void * p);
|
||||
|
||||
/**
|
||||
Finds the first (in address order) allocation of a pool.
|
||||
|
||||
@param pool The starting address of an initialized pool.
|
||||
@return Address of the allocation the lowest address inside the pool (this is
|
||||
what @ref yalloc_alloc() returned), or \c NULL if there is no used block.
|
||||
*/
|
||||
void * yalloc_first_used(void * pool);
|
||||
|
||||
/**
|
||||
Given a pointer to an allocation finds the next (in address order) used block of a pool.
|
||||
|
||||
@param pool The starting address of the initialized pool the allocation comes from.
|
||||
@param p Pointer to an allocation in that pool, typically comes from a previous
|
||||
call to @ref yalloc_first_used()
|
||||
*/
|
||||
void * yalloc_next_used(void * pool, void * p);
|
||||
|
||||
/**
|
||||
Starts defragmentation for a pool.
|
||||
|
||||
Allocations will stay where they are. But the pool is put in the "defagmenting"
|
||||
state (see @ref yalloc_defrag_in_progress()).
|
||||
|
||||
The pool must not be in the "defragmenting" state when this function is called.
|
||||
The pool is put into the "defragmenting" state by this function.
|
||||
|
||||
@param pool The starting address of an initialized pool.
|
||||
*/
|
||||
void yalloc_defrag_start(void * pool);
|
||||
|
||||
/**
|
||||
Returns the address that an allocation will have after @ref yalloc_defrag_commit() is called.
|
||||
|
||||
The pool must be in the "defragmenting" state when this function is called.
|
||||
|
||||
@param pool The starting address of the initialized pool the allocation comes from.
|
||||
@param p Pointer to an allocation in that pool.
|
||||
@return The address the alloation will have after @ref yalloc_defrag_commit() is called.
|
||||
*/
|
||||
void * yalloc_defrag_address(void * pool, void * p);
|
||||
|
||||
/**
|
||||
Finishes the defragmentation.
|
||||
|
||||
The content of all allocations in the pool will be moved to the address that
|
||||
was reported by @ref yalloc_defrag_address(). The pool will then have only one
|
||||
free block. This means that an <tt>yalloc_alloc(pool, yalloc_count_free(pool))</tt>
|
||||
will succeed.
|
||||
|
||||
The pool must be in the "defragmenting" state when this function is called. The
|
||||
pool is put back to normal state by this function.
|
||||
|
||||
@param pool The starting address of an initialized pool.
|
||||
*/
|
||||
void yalloc_defrag_commit(void * pool);
|
||||
|
||||
/**
|
||||
Tells if the pool is in the "defragmenting" state (after a @ref yalloc_defrag_start() and before a @ref yalloc_defrag_commit()).
|
||||
|
||||
@param pool The starting address of an initialized pool.
|
||||
@return Nonzero if the pool is currently in the "defragmenting" state.
|
||||
*/
|
||||
int yalloc_defrag_in_progress(void * pool);
|
||||
|
||||
|
||||
/**
|
||||
Helper function that dumps the state of the pool to stdout.
|
||||
|
||||
This function is only available if build with <tt>yalloc_dump.c</tt>. This
|
||||
function only exists for debugging purposes and can be ignored by normal users
|
||||
that are not interested in the internal structure of the implementation.
|
||||
|
||||
@param pool The starting address of an initialized pool.
|
||||
@param name A string that is used as "Title" for the output.
|
||||
*/
|
||||
void yalloc_dump(void * pool, char * name);
|
||||
|
||||
|
||||
#endif // YALLOC_H
|
39
GL/yalloc/yalloc_dump.c
Normal file
39
GL/yalloc/yalloc_dump.c
Normal file
|
@ -0,0 +1,39 @@
|
|||
#include "yalloc_internals.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
static void printOffset(void * pool, char * name, uint16_t offset)
|
||||
{
|
||||
if (isNil(offset))
|
||||
printf(" %s: nil\n", name);
|
||||
else
|
||||
printf(" %s: %td\n", name, (char*)HDR_PTR(offset) - (char*)pool);
|
||||
}
|
||||
|
||||
void yalloc_dump(void * pool, char * name)
|
||||
{
|
||||
printf("---- %s ----\n", name);
|
||||
Header * cur = (Header*)pool;
|
||||
for (;;)
|
||||
{
|
||||
printf(isFree(cur) ? "%td: free @%p\n" : "%td: used @%p\n", (char*)cur - (char*)pool, cur);
|
||||
printOffset(pool, cur == pool ? "first free" : "prev", cur->prev);
|
||||
printOffset(pool, "next", cur->next);
|
||||
if (isFree(cur))
|
||||
{
|
||||
printOffset(pool, "prevFree", cur[1].prev);
|
||||
printOffset(pool, "nextFree", cur[1].next);
|
||||
}
|
||||
else
|
||||
printf(" payload includes padding: %i\n", isPadded(cur));
|
||||
|
||||
if (isNil(cur->next))
|
||||
break;
|
||||
|
||||
printf(" %td bytes payload\n", (char*)HDR_PTR(cur->next) - (char*)cur - sizeof(Header));
|
||||
|
||||
cur = HDR_PTR(cur->next);
|
||||
}
|
||||
|
||||
fflush(stdout);
|
||||
}
|
59
GL/yalloc/yalloc_internals.h
Normal file
59
GL/yalloc/yalloc_internals.h
Normal file
|
@ -0,0 +1,59 @@
|
|||
#ifndef YALLOC_INTERNALS_H
|
||||
#define YALLOC_INTERNALS_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct
|
||||
{
|
||||
uint32_t prev; // low bit set if free
|
||||
uint32_t next; // for used blocks: low bit set if unused header at the end
|
||||
} Header;
|
||||
|
||||
// NOTE: We have 32bit aligned data and 16bit offsets where the lowest bit is used as flag. So we remove the low bit and shift by 1 to address 128k bytes with the 15bit significant offset bits.
|
||||
|
||||
#define NIL 0xFFFFFFFEu
|
||||
|
||||
// return Header-address for a prev/next
|
||||
#define HDR_PTR(offset) ((Header*)((char*)pool + (((offset) & NIL)<<1)))
|
||||
|
||||
// return a prev/next for a Header-address
|
||||
#define HDR_OFFSET(blockPtr) ((uint32_t)(((char*)blockPtr - (char*)pool) >> 1))
|
||||
|
||||
#ifndef YALLOC_INTERNAL_VALIDATE
|
||||
# ifdef NDEBUG
|
||||
# define YALLOC_INTERNAL_VALIDATE 0
|
||||
# else
|
||||
# define YALLOC_INTERNAL_VALIDATE 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
internal_assert() is used in some places to check internal expections.
|
||||
Activate this if you modify the code to detect problems as early as possible.
|
||||
In other cases this should be deactivated.
|
||||
*/
|
||||
#if 0
|
||||
#define internal_assert assert
|
||||
#else
|
||||
#define internal_assert(condition)((void) 0)
|
||||
#endif
|
||||
|
||||
// detects offsets that point nowhere
|
||||
static inline int isNil(uint32_t offset)
|
||||
{
|
||||
return (offset | 1) == 0xFFFFFFFF;
|
||||
}
|
||||
|
||||
static inline int isFree(Header * hdr)
|
||||
{
|
||||
return hdr->prev & 1;
|
||||
}
|
||||
|
||||
static inline int isPadded(Header * hdr)
|
||||
{
|
||||
return hdr->next & 1;
|
||||
}
|
||||
|
||||
|
||||
#endif // YALLOC_INTERNALS_H
|
1
Makefile
1
Makefile
|
@ -8,6 +8,7 @@
|
|||
TARGET = libGLdc.a
|
||||
OBJS = GL/draw.o GL/flush.o GL/framebuffer.o GL/immediate.o GL/lighting.o GL/state.o GL/texture.o GL/glu.o GL/version.h
|
||||
OBJS += GL/matrix.o GL/fog.o GL/error.o GL/clip.o containers/stack.o containers/named_array.o containers/aligned_vector.o GL/profiler.o
|
||||
OBJS += GL/yalloc/yalloc.o
|
||||
|
||||
SUBDIRS =
|
||||
|
||||
|
|
|
@ -205,6 +205,11 @@ GLAPI void APIENTRY glCompressedTexImage2DARB(GLenum target,
|
|||
#define GL_TEXTURE_LOD_BIAS_EXT 0x8501
|
||||
#endif /* GL_EXT_texture_lod_bias */
|
||||
|
||||
/* ATI_meminfo */
|
||||
#define GL_VBO_FREE_MEMORY_ATI 0x87FB
|
||||
#define GL_TEXTURE_FREE_MEMORY_ATI 0x87FC
|
||||
#define GL_RENDERBUFFER_FREE_MEMORY_ATI 0x87FD
|
||||
|
||||
__END_DECLS
|
||||
|
||||
#endif /* !__GL_GLEXT_H */
|
||||
|
|
|
@ -111,5 +111,12 @@ GLAPI void APIENTRY glKosSwapBuffers();
|
|||
/* Pass to glTexParameteri to set the shared bank */
|
||||
#define GL_SHARED_TEXTURE_BANK_KOS 0xEF00
|
||||
|
||||
/* Memory allocation extension (GL_KOS_texture_memory_management) */
|
||||
GLAPI GLvoid APIENTRY glDefragmentTextureMemory_KOS(void);
|
||||
|
||||
#define GL_FREE_TEXTURE_MEMORY_KOS 0xEF01
|
||||
#define GL_USED_TEXTURE_MEMORY_KOS 0xEF02
|
||||
#define GL_FREE_CONTIGUOUS_TEXTURE_MEMORY_KOS 0xEF03
|
||||
|
||||
__END_DECLS
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user