Merge branch 'texture-refactor' into 'master'
Drastically refactor glTexImage2D See merge request simulant/GLdc!107
This commit is contained in:
commit
f0d799d14f
|
@ -1,5 +1,6 @@
|
||||||
stages:
|
stages:
|
||||||
- build
|
- build
|
||||||
|
- test
|
||||||
|
|
||||||
build:sh4-gcc:
|
build:sh4-gcc:
|
||||||
stage: build
|
stage: build
|
||||||
|
@ -17,11 +18,28 @@ build:sh4-gcc:
|
||||||
|
|
||||||
build:x86-gcc:
|
build:x86-gcc:
|
||||||
stage: build
|
stage: build
|
||||||
image: fedora:34
|
image: fedora:38
|
||||||
before_script:
|
before_script:
|
||||||
- sudo dnf install -y cmake gcc gcc-c++ SDL2-devel glibc-devel pkgconf-pkg-config glibc-devel.i686 SDL2-devel.i686
|
- sudo dnf install -y cmake gcc gcc-c++ SDL2.i686 SDL2-devel.x86_64 glibc-devel glibc-devel.i686 SDL2-devel.i686 pkgconf-pkg-config.i686 pkgconf-pkg-config.x86_64
|
||||||
script:
|
script:
|
||||||
- mkdir builddir
|
- mkdir builddir
|
||||||
- cd builddir
|
- cd builddir
|
||||||
- cmake -DCMAKE_BUILD_TYPE=Release ..
|
- cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||||
- make
|
- make
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- builddir/tests/gldc_tests
|
||||||
|
|
||||||
|
test:x86-gcc:
|
||||||
|
stage: test
|
||||||
|
image: fedora:38
|
||||||
|
dependencies:
|
||||||
|
- build:x86-gcc
|
||||||
|
before_script:
|
||||||
|
- sudo dnf install -y cmake gcc gcc-c++ SDL2.i686 SDL2-devel glibc-devel pkgconf-pkg-config glibc-devel.i686 SDL2-devel.i686 pkgconf-pkg-config.i686
|
||||||
|
script:
|
||||||
|
- cd builddir/tests/
|
||||||
|
- SDL_VIDEODRIVER=dummy ./gldc_tests --junit-xml=report.xml
|
||||||
|
artifacts:
|
||||||
|
reports:
|
||||||
|
junit: builddir/tests/report.xml
|
||||||
|
|
|
@ -54,10 +54,10 @@ else()
|
||||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -ffast-math")
|
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -ffast-math")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -O3 -mpretend-cmove -fexpensive-optimizations -fomit-frame-pointer -finline-functions")
|
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -O3 -fexpensive-optimizations -fomit-frame-pointer -finline-functions")
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -std=c++14 -O3 -g0 -s -fomit-frame-pointer -fstrict-aliasing")
|
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -std=c++14 -O3 -g0 -s -fomit-frame-pointer -fstrict-aliasing")
|
||||||
|
|
||||||
set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 -mpretend-cmove -fexpensive-optimizations -fomit-frame-pointer -finline-functions")
|
set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 -fexpensive-optimizations -fomit-frame-pointer -finline-functions")
|
||||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -std=c++14 -O3 -fomit-frame-pointer -fstrict-aliasing")
|
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -std=c++14 -O3 -fomit-frame-pointer -fstrict-aliasing")
|
||||||
|
|
||||||
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g -Wall -Wextra")
|
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g -Wall -Wextra")
|
||||||
|
@ -80,7 +80,7 @@ set(
|
||||||
GL/state.c
|
GL/state.c
|
||||||
GL/texture.c
|
GL/texture.c
|
||||||
GL/util.c
|
GL/util.c
|
||||||
GL/yalloc/yalloc.c
|
GL/alloc/alloc.c
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/version.c
|
${CMAKE_CURRENT_BINARY_DIR}/version.c
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -175,6 +175,8 @@ function(gen_sample sample)
|
||||||
endif()
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
|
add_subdirectory(tests)
|
||||||
|
|
||||||
gen_sample(blend_test samples/blend_test/main.c)
|
gen_sample(blend_test samples/blend_test/main.c)
|
||||||
gen_sample(depth_funcs samples/depth_funcs/main.c)
|
gen_sample(depth_funcs samples/depth_funcs/main.c)
|
||||||
gen_sample(depth_funcs_alpha_testing samples/depth_funcs_alpha_testing/main.c samples/depth_funcs_alpha_testing/gl_png.c)
|
gen_sample(depth_funcs_alpha_testing samples/depth_funcs_alpha_testing/main.c samples/depth_funcs_alpha_testing/gl_png.c)
|
||||||
|
@ -206,12 +208,13 @@ gen_sample(zclip_trianglestrip samples/zclip_trianglestrip/main.c)
|
||||||
gen_sample(scissor samples/scissor/main.c)
|
gen_sample(scissor samples/scissor/main.c)
|
||||||
gen_sample(polymark samples/polymark/main.c)
|
gen_sample(polymark samples/polymark/main.c)
|
||||||
gen_sample(cubes samples/cubes/main.cpp)
|
gen_sample(cubes samples/cubes/main.cpp)
|
||||||
|
|
||||||
gen_sample(zclip_test tests/zclip/main.cpp)
|
gen_sample(zclip_test tests/zclip/main.cpp)
|
||||||
|
|
||||||
if(PLATFORM_DREAMCAST)
|
if(PLATFORM_DREAMCAST)
|
||||||
gen_sample(trimark samples/trimark/main.c)
|
gen_sample(trimark samples/trimark/main.c)
|
||||||
gen_sample(quadmark samples/quadmark/main.c samples/profiler.c)
|
gen_sample(quadmark samples/quadmark/main.c samples/profiler.c)
|
||||||
|
gen_sample(prof_texture_upload samples/prof_texture_upload/main.c samples/profiler.c)
|
||||||
else()
|
else()
|
||||||
gen_sample(quadmark samples/quadmark/main.c)
|
gen_sample(quadmark samples/quadmark/main.c)
|
||||||
|
gen_sample(prof_texture_upload samples/prof_texture_upload/main.c)
|
||||||
endif()
|
endif()
|
||||||
|
|
534
GL/alloc/alloc.c
Normal file
534
GL/alloc/alloc.c
Normal file
|
@ -0,0 +1,534 @@
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#include "alloc.h"
|
||||||
|
|
||||||
|
|
||||||
|
/* This allocator is designed so that ideally all allocations larger
|
||||||
|
* than 2k, fall on a 2k boundary. Smaller allocations will
|
||||||
|
* never cross a 2k boundary.
|
||||||
|
*
|
||||||
|
* House keeping is stored in RAM to avoid reading back from the
|
||||||
|
* VRAM to check for usage. Headers can't be easily stored in the
|
||||||
|
* blocks anyway as they have to be 2k aligned (so you'd need to
|
||||||
|
* store them in reverse or something)
|
||||||
|
*
|
||||||
|
* Defragmenting the pool will move larger allocations first, then
|
||||||
|
* smaller ones, recursively until you tell it to stop, or until things
|
||||||
|
* stop moving.
|
||||||
|
*
|
||||||
|
* The maximum pool size is 8M, made up of:
|
||||||
|
*
|
||||||
|
* - 4096 blocks of 2k
|
||||||
|
* - each with 8 sub-blocks of 256 bytes
|
||||||
|
*
|
||||||
|
* Why?
|
||||||
|
*
|
||||||
|
* The PVR performs better if textures don't cross 2K memory
|
||||||
|
* addresses, so we try to avoid that. Obviously we can't
|
||||||
|
* if the allocation is > 2k, but in that case we can at least
|
||||||
|
* align with 2k and the VQ codebook (which is usually 2k) will
|
||||||
|
* be in its own page.
|
||||||
|
*
|
||||||
|
* The smallest PVR texture allowed is 8x8 at 16 bit (so 128 bytes)
|
||||||
|
* but we're unlikely to use too many of those, so having a min sub-block
|
||||||
|
* size of 256 should be OK (a 16x16 image is 512, so two sub-blocks).
|
||||||
|
*
|
||||||
|
* We could go down to 128 bytes if wastage is an issue, but then we have
|
||||||
|
* to store double the number of usage markers.
|
||||||
|
*
|
||||||
|
* FIXME:
|
||||||
|
*
|
||||||
|
* - Only operates on one pool (ignores what you pass)
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#define EIGHT_MEG (8 * 1024 * 1024)
|
||||||
|
#define TWO_KILOBYTES (2 * 1024)
|
||||||
|
#define BLOCK_COUNT (EIGHT_MEG / TWO_KILOBYTES)
|
||||||
|
|
||||||
|
#define ALLOC_DEBUG 0
|
||||||
|
#if ALLOC_DEBUG
|
||||||
|
#define DBG_MSG(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__)
|
||||||
|
#else
|
||||||
|
#define DBG_MSG(fmt, ...) do {} while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
static inline intptr_t round_up(intptr_t n, int multiple)
|
||||||
|
{
|
||||||
|
if((n % multiple) == 0) {
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(multiple);
|
||||||
|
return ((n + multiple - 1) / multiple) * multiple;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct AllocEntry {
|
||||||
|
void* pointer;
|
||||||
|
size_t size;
|
||||||
|
struct AllocEntry* next;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
/* This is a usage bitmask for each block. A block
|
||||||
|
* is divided into 8 x 256 byte subblocks. If a block
|
||||||
|
* is entirely used, it's value will be 255, if
|
||||||
|
* it's entirely free then it will be 0.
|
||||||
|
*/
|
||||||
|
uint8_t block_usage[BLOCK_COUNT];
|
||||||
|
uint8_t* pool; // Pointer to the memory pool
|
||||||
|
size_t pool_size; // Size of the memory pool
|
||||||
|
uint8_t* base_address; // First 2k aligned address in the pool
|
||||||
|
size_t block_count; // Number of 2k blocks in the pool
|
||||||
|
|
||||||
|
/* It's frustrating that we need to do this dynamically
|
||||||
|
* but we need to know the size allocated when we free()...
|
||||||
|
* we could store it statically but it would take 64k if we had
|
||||||
|
* an array of block_index -> block size where there would be 2 ** 32
|
||||||
|
* entries of 16 bit block sizes. The drawback (aside the memory usage)
|
||||||
|
* would be that we won't be able to order by size, so defragging will
|
||||||
|
* take much more time.*/
|
||||||
|
struct AllocEntry* allocations;
|
||||||
|
} PoolHeader;
|
||||||
|
|
||||||
|
|
||||||
|
static PoolHeader pool_header = {
|
||||||
|
{0}, NULL, 0, NULL, 0, NULL
|
||||||
|
};
|
||||||
|
|
||||||
|
void* alloc_base_address(void* pool) {
|
||||||
|
(void) pool;
|
||||||
|
return pool_header.base_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t alloc_block_count(void* pool) {
|
||||||
|
(void) pool;
|
||||||
|
return pool_header.block_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void* calc_address(
|
||||||
|
uint8_t* block_usage_iterator,
|
||||||
|
int bit_offset,
|
||||||
|
size_t required_subblocks,
|
||||||
|
size_t* start_subblock_out
|
||||||
|
) {
|
||||||
|
uintptr_t offset = (block_usage_iterator - pool_header.block_usage) * 8;
|
||||||
|
offset += (bit_offset + 1);
|
||||||
|
offset -= required_subblocks;
|
||||||
|
|
||||||
|
if(start_subblock_out) {
|
||||||
|
*start_subblock_out = offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
return pool_header.base_address + (offset * 256);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* alloc_next_available_ex(void* pool, size_t required_size, size_t* start_subblock, size_t* required_subblocks);
|
||||||
|
|
||||||
|
void* alloc_next_available(void* pool, size_t required_size) {
|
||||||
|
return alloc_next_available_ex(pool, required_size, NULL, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* alloc_next_available_ex(void* pool, size_t required_size, size_t* start_subblock_out, size_t* required_subblocks_out) {
|
||||||
|
(void) pool;
|
||||||
|
|
||||||
|
uint8_t* it = pool_header.block_usage;
|
||||||
|
uint32_t required_subblocks = (required_size / 256);
|
||||||
|
if(required_size % 256) required_subblocks += 1;
|
||||||
|
|
||||||
|
/* Anything gte to 2048 must be aligned to a 2048 boundary */
|
||||||
|
bool requires_alignment = required_size >= 2048;
|
||||||
|
|
||||||
|
if(required_subblocks_out) {
|
||||||
|
*required_subblocks_out = required_subblocks;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This is a fallback option. If while we're searching we find a possible slot
|
||||||
|
* but it's not aligned, or it's straddling a 2k boundary, then we store
|
||||||
|
* it here and if we reach the end of the search and find nothing better
|
||||||
|
* we use this instead */
|
||||||
|
uint8_t* poor_option = NULL;
|
||||||
|
size_t poor_start_subblock = 0;
|
||||||
|
|
||||||
|
uint32_t found_subblocks = 0;
|
||||||
|
uint32_t found_poor_subblocks = 0;
|
||||||
|
|
||||||
|
for(size_t j = 0; j < pool_header.block_count; ++j, ++it) {
|
||||||
|
/* We just need to find enough consecutive blocks */
|
||||||
|
if(found_subblocks < required_subblocks) {
|
||||||
|
uint8_t t = *it;
|
||||||
|
|
||||||
|
/* Optimisation only. Skip over full blocks */
|
||||||
|
if(t == 255) {
|
||||||
|
found_subblocks = 0;
|
||||||
|
found_poor_subblocks = 0;
|
||||||
|
} else {
|
||||||
|
/* Now let's see how many consecutive blocks we can find */
|
||||||
|
for(int i = 0; i < 8; ++i) {
|
||||||
|
if((t & 0x80) == 0) {
|
||||||
|
bool block_overflow = (
|
||||||
|
required_size < 2048 && found_subblocks > 0 && i == 0
|
||||||
|
);
|
||||||
|
|
||||||
|
bool reset_subblocks = (
|
||||||
|
(requires_alignment && found_subblocks == 0 && i != 0) ||
|
||||||
|
block_overflow
|
||||||
|
);
|
||||||
|
|
||||||
|
if(reset_subblocks) {
|
||||||
|
// Ignore this subblock, because we want the first subblock to be aligned
|
||||||
|
// at a 2048 boundary and this one isn't (i != 0)
|
||||||
|
found_subblocks = 0;
|
||||||
|
} else {
|
||||||
|
found_subblocks++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If we reset the subblocks due to an overflow, we still
|
||||||
|
* want to count this free subblock in our count */
|
||||||
|
if(block_overflow) {
|
||||||
|
found_subblocks++;
|
||||||
|
}
|
||||||
|
|
||||||
|
found_poor_subblocks++;
|
||||||
|
|
||||||
|
if(found_subblocks >= required_subblocks) {
|
||||||
|
/* We found space! Now calculate the address */
|
||||||
|
return calc_address(it, i, required_subblocks, start_subblock_out);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!poor_option && (found_poor_subblocks >= required_subblocks)) {
|
||||||
|
poor_option = calc_address(it, i, required_subblocks, &poor_start_subblock);
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
found_subblocks = 0;
|
||||||
|
found_poor_subblocks = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
t <<= 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if(poor_option) {
|
||||||
|
if(start_subblock_out) {
|
||||||
|
*start_subblock_out = poor_start_subblock;
|
||||||
|
}
|
||||||
|
|
||||||
|
return poor_option;
|
||||||
|
} else {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int alloc_init(void* pool, size_t size) {
|
||||||
|
(void) pool;
|
||||||
|
|
||||||
|
if(pool_header.pool) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(size > EIGHT_MEG) { // FIXME: >= ?
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint8_t* p = (uint8_t*) pool;
|
||||||
|
|
||||||
|
memset(pool_header.block_usage, 0, BLOCK_COUNT);
|
||||||
|
pool_header.pool = pool;
|
||||||
|
pool_header.pool_size = size;
|
||||||
|
|
||||||
|
intptr_t base_address = (intptr_t) pool_header.pool;
|
||||||
|
base_address = round_up(base_address, 2048);
|
||||||
|
|
||||||
|
pool_header.base_address = (uint8_t*) base_address;
|
||||||
|
pool_header.block_count = ((p + size) - pool_header.base_address) / 2048;
|
||||||
|
pool_header.allocations = NULL;
|
||||||
|
|
||||||
|
assert(((uintptr_t) pool_header.base_address) % 2048 == 0);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void alloc_shutdown(void* pool) {
|
||||||
|
(void) pool;
|
||||||
|
|
||||||
|
if(!pool_header.pool) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct AllocEntry* it = pool_header.allocations;
|
||||||
|
while(it) {
|
||||||
|
struct AllocEntry* next = it->next;
|
||||||
|
free(it);
|
||||||
|
it = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(&pool_header, 0, sizeof(pool_header));
|
||||||
|
pool_header.pool = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline uint32_t size_to_subblock_count(size_t size) {
|
||||||
|
uint32_t required_subblocks = (size / 256);
|
||||||
|
if(size % 256) required_subblocks += 1;
|
||||||
|
return required_subblocks;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline uint32_t subblock_from_pointer(void* p) {
|
||||||
|
uint8_t* ptr = (uint8_t*) p;
|
||||||
|
return (ptr - pool_header.base_address) / 256;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void block_and_offset_from_subblock(size_t sb, size_t* b, uint8_t* off) {
|
||||||
|
*b = sb / 8;
|
||||||
|
*off = (sb % 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* alloc_malloc(void* pool, size_t size) {
|
||||||
|
DBG_MSG("Allocating: %d\n", size);
|
||||||
|
|
||||||
|
size_t start_subblock, required_subblocks;
|
||||||
|
void* ret = alloc_next_available_ex(pool, size, &start_subblock, &required_subblocks);
|
||||||
|
|
||||||
|
if(ret) {
|
||||||
|
size_t block;
|
||||||
|
uint8_t offset;
|
||||||
|
|
||||||
|
block_and_offset_from_subblock(start_subblock, &block, &offset);
|
||||||
|
|
||||||
|
uint8_t mask = 0;
|
||||||
|
|
||||||
|
DBG_MSG("Alloc: size: %d, rs: %d, sb: %d, b: %d, off: %d\n", size, required_subblocks, start_subblock, start_subblock / 8, start_subblock % 8);
|
||||||
|
|
||||||
|
/* Toggle any bits for the first block */
|
||||||
|
int c = (required_subblocks < 8) ? required_subblocks : 8;
|
||||||
|
for(int i = 0; i < c; ++i) {
|
||||||
|
mask |= (1 << (7 - (offset + i)));
|
||||||
|
required_subblocks--;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(mask) {
|
||||||
|
pool_header.block_usage[block++] |= mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Fill any full blocks in the middle of the allocation */
|
||||||
|
while(required_subblocks > 8) {
|
||||||
|
pool_header.block_usage[block++] = 255;
|
||||||
|
required_subblocks -= 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Fill out any trailing subblocks */
|
||||||
|
mask = 0;
|
||||||
|
for(size_t i = 0; i < required_subblocks; ++i) {
|
||||||
|
mask |= (1 << (7 - i));
|
||||||
|
}
|
||||||
|
|
||||||
|
if(mask) {
|
||||||
|
pool_header.block_usage[block++] |= mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Insert allocations in the list by size descending so that when we
|
||||||
|
* defrag we can move the larger blocks before the smaller ones without
|
||||||
|
* much effort */
|
||||||
|
struct AllocEntry* new_entry = (struct AllocEntry*) malloc(sizeof(struct AllocEntry));
|
||||||
|
new_entry->pointer = ret;
|
||||||
|
new_entry->size = size;
|
||||||
|
new_entry->next = NULL;
|
||||||
|
|
||||||
|
struct AllocEntry* it = pool_header.allocations;
|
||||||
|
struct AllocEntry* last = NULL;
|
||||||
|
|
||||||
|
if(!it) {
|
||||||
|
pool_header.allocations = new_entry;
|
||||||
|
} else {
|
||||||
|
while(it) {
|
||||||
|
if(it->size < size) {
|
||||||
|
if(last) {
|
||||||
|
last->next = new_entry;
|
||||||
|
} else {
|
||||||
|
pool_header.allocations = new_entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
new_entry->next = it;
|
||||||
|
break;
|
||||||
|
} else if(!it->next) {
|
||||||
|
it->next = new_entry;
|
||||||
|
new_entry->next = NULL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
last = it;
|
||||||
|
it = it->next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
DBG_MSG("Alloc done\n");
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void alloc_release_blocks(struct AllocEntry* it) {
|
||||||
|
size_t used_subblocks = size_to_subblock_count(it->size);
|
||||||
|
size_t subblock = subblock_from_pointer(it->pointer);
|
||||||
|
size_t block;
|
||||||
|
uint8_t offset;
|
||||||
|
block_and_offset_from_subblock(subblock, &block, &offset);
|
||||||
|
|
||||||
|
uint8_t mask = 0;
|
||||||
|
|
||||||
|
DBG_MSG("Free: size: %d, us: %d, sb: %d, off: %d\n", it->size, used_subblocks, block, offset);
|
||||||
|
|
||||||
|
/* Wipe out any leading subblocks */
|
||||||
|
int c = (used_subblocks < 8) ? used_subblocks : 8;
|
||||||
|
for(int i = 0; i < c; ++i) {
|
||||||
|
mask |= (1 << (7 - (offset + i)));
|
||||||
|
used_subblocks--;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(mask) {
|
||||||
|
pool_header.block_usage[block++] &= ~mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Clear any full blocks in the middle of the allocation */
|
||||||
|
while(used_subblocks > 8) {
|
||||||
|
pool_header.block_usage[block++] = 0;
|
||||||
|
used_subblocks -= 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Wipe out any trailing subblocks */
|
||||||
|
mask = 0;
|
||||||
|
for(size_t i = 0; i < used_subblocks; ++i) {
|
||||||
|
mask |= (1 << (7 - i));
|
||||||
|
}
|
||||||
|
|
||||||
|
if(mask) {
|
||||||
|
pool_header.block_usage[block++] &= ~mask;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void alloc_free(void* pool, void* p) {
|
||||||
|
(void) pool;
|
||||||
|
|
||||||
|
struct AllocEntry* it = pool_header.allocations;
|
||||||
|
struct AllocEntry* last = NULL;
|
||||||
|
while(it) {
|
||||||
|
if(it->pointer == p) {
|
||||||
|
alloc_release_blocks(it);
|
||||||
|
|
||||||
|
if(last) {
|
||||||
|
last->next = it->next;
|
||||||
|
} else {
|
||||||
|
assert(it == pool_header.allocations);
|
||||||
|
pool_header.allocations = it->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
DBG_MSG("Freed: size: %d, us: %d, sb: %d, off: %d\n", it->size, used_subblocks, block, offset);
|
||||||
|
free(it);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
last = it;
|
||||||
|
it = it->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
DBG_MSG("Free done\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
void alloc_run_defrag(void* pool, defrag_address_move callback, int max_iterations, void* user_data) {
|
||||||
|
|
||||||
|
for(int i = 0; i < max_iterations; ++i) {
|
||||||
|
bool move_occurred = false;
|
||||||
|
|
||||||
|
struct AllocEntry* it = pool_header.allocations;
|
||||||
|
|
||||||
|
if(!it) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
while(it) {
|
||||||
|
void* potential_dest = alloc_next_available(pool, it->size);
|
||||||
|
if(potential_dest < it->pointer) {
|
||||||
|
potential_dest = alloc_malloc(pool, it->size);
|
||||||
|
memcpy(potential_dest, it->pointer, it->size);
|
||||||
|
|
||||||
|
/* Mark this block as now free, but don't fiddle with the
|
||||||
|
* allocation list */
|
||||||
|
alloc_release_blocks(it);
|
||||||
|
|
||||||
|
callback(it->pointer, potential_dest, user_data);
|
||||||
|
|
||||||
|
it->pointer = potential_dest;
|
||||||
|
move_occurred = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
it = it->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!move_occurred) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline uint8_t count_ones(uint8_t byte) {
|
||||||
|
static const uint8_t NIBBLE_LOOKUP [16] = {
|
||||||
|
0, 1, 1, 2, 1, 2, 2, 3,
|
||||||
|
1, 2, 2, 3, 2, 3, 3, 4
|
||||||
|
};
|
||||||
|
return NIBBLE_LOOKUP[byte & 0x0F] + NIBBLE_LOOKUP[byte >> 4];
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t alloc_count_free(void* pool) {
|
||||||
|
(void) pool;
|
||||||
|
|
||||||
|
uint8_t* it = pool_header.block_usage;
|
||||||
|
uint8_t* end = it + pool_header.block_count;
|
||||||
|
|
||||||
|
size_t total_free = 0;
|
||||||
|
|
||||||
|
while(it < end) {
|
||||||
|
total_free += count_ones(*it) * 256;
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
|
||||||
|
return total_free;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t alloc_count_continuous(void* pool) {
|
||||||
|
(void) pool;
|
||||||
|
|
||||||
|
size_t largest_block = 0;
|
||||||
|
|
||||||
|
uint8_t* it = pool_header.block_usage;
|
||||||
|
uint8_t* end = it + pool_header.block_count;
|
||||||
|
|
||||||
|
size_t current_block = 0;
|
||||||
|
while(it < end) {
|
||||||
|
uint8_t t = *it++;
|
||||||
|
if(!t) {
|
||||||
|
current_block += 2048;
|
||||||
|
} else {
|
||||||
|
for(int i = 7; i >= 0; --i) {
|
||||||
|
bool bitset = (t & (1 << i));
|
||||||
|
if(bitset) {
|
||||||
|
current_block += (7 - i) * 256;
|
||||||
|
if(largest_block < current_block) {
|
||||||
|
largest_block = current_block;
|
||||||
|
current_block = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return largest_block;
|
||||||
|
}
|
29
GL/alloc/alloc.h
Normal file
29
GL/alloc/alloc.h
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <stdbool.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int alloc_init(void* pool, size_t size);
|
||||||
|
void alloc_shutdown(void* pool);
|
||||||
|
|
||||||
|
void *alloc_malloc(void* pool, size_t size);
|
||||||
|
void alloc_free(void* pool, void* p);
|
||||||
|
|
||||||
|
typedef void (defrag_address_move)(void*, void*, void*);
|
||||||
|
void alloc_run_defrag(void* pool, defrag_address_move callback, int max_iterations, void* user_data);
|
||||||
|
|
||||||
|
size_t alloc_count_free(void* pool);
|
||||||
|
size_t alloc_count_continuous(void* pool);
|
||||||
|
|
||||||
|
void* alloc_next_available(void* pool, size_t required_size);
|
||||||
|
void* alloc_base_address(void* pool);
|
||||||
|
size_t alloc_block_count(void* pool);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
24
GL/draw.c
24
GL/draw.c
|
@ -78,7 +78,7 @@ static void _readVertexData3f3f(const GLubyte* __restrict__ in, GLubyte* __restr
|
||||||
|
|
||||||
// 10:10:10:2REV format
|
// 10:10:10:2REV format
|
||||||
static void _readVertexData1i3f(const GLubyte* in, GLubyte* out) {
|
static void _readVertexData1i3f(const GLubyte* in, GLubyte* out) {
|
||||||
const static float MULTIPLIER = 1.0f / 1023.0f;
|
static const float MULTIPLIER = 1.0f / 1023.0f;
|
||||||
|
|
||||||
GLfloat* output = (GLfloat*) out;
|
GLfloat* output = (GLfloat*) out;
|
||||||
|
|
||||||
|
@ -585,7 +585,6 @@ static void _readPositionData(ReadDiffuseFunc func, const GLuint first, const GL
|
||||||
const GLubyte* vptr = ((GLubyte*) ATTRIB_POINTERS.vertex.ptr + (first * vstride));
|
const GLubyte* vptr = ((GLubyte*) ATTRIB_POINTERS.vertex.ptr + (first * vstride));
|
||||||
|
|
||||||
float pos[3];
|
float pos[3];
|
||||||
float w = 0.0f;
|
|
||||||
|
|
||||||
ITERATE(count) {
|
ITERATE(count) {
|
||||||
PREFETCH(vptr + vstride);
|
PREFETCH(vptr + vstride);
|
||||||
|
@ -726,9 +725,7 @@ typedef struct {
|
||||||
} Float2;
|
} Float2;
|
||||||
|
|
||||||
static const Float3 F3Z = {0.0f, 0.0f, 1.0f};
|
static const Float3 F3Z = {0.0f, 0.0f, 1.0f};
|
||||||
static const Float3 F3ZERO = {0.0f, 0.0f, 0.0f};
|
|
||||||
static const Float2 F2ZERO = {0.0f, 0.0f};
|
static const Float2 F2ZERO = {0.0f, 0.0f};
|
||||||
static const uint32_t U4ONE = ~0;
|
|
||||||
|
|
||||||
static void generateElementsFastPath(
|
static void generateElementsFastPath(
|
||||||
SubmissionTarget* target, const GLsizei first, const GLuint count,
|
SubmissionTarget* target, const GLsizei first, const GLuint count,
|
||||||
|
@ -910,24 +907,6 @@ static void transform(SubmissionTarget* target) {
|
||||||
TransformVertices(vertex, target->count);
|
TransformVertices(vertex, target->count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mat_transform3(const float* xyz, const float* xyzOut, const uint32_t count, const uint32_t inStride, const uint32_t outStride) {
|
|
||||||
const uint8_t* dataIn = (const uint8_t*) xyz;
|
|
||||||
uint8_t* dataOut = (uint8_t*) xyzOut;
|
|
||||||
|
|
||||||
ITERATE(count) {
|
|
||||||
const float* in = (const float*) dataIn;
|
|
||||||
float* out = (float*) dataOut;
|
|
||||||
|
|
||||||
TransformVec3NoMod(
|
|
||||||
in,
|
|
||||||
out
|
|
||||||
);
|
|
||||||
|
|
||||||
dataIn += inStride;
|
|
||||||
dataOut += outStride;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mat_transform_normal3(const float* xyz, const float* xyzOut, const uint32_t count, const uint32_t inStride, const uint32_t outStride) {
|
static void mat_transform_normal3(const float* xyz, const float* xyzOut, const uint32_t count, const uint32_t inStride, const uint32_t outStride) {
|
||||||
const uint8_t* dataIn = (const uint8_t*) xyz;
|
const uint8_t* dataIn = (const uint8_t*) xyz;
|
||||||
uint8_t* dataOut = (uint8_t*) xyzOut;
|
uint8_t* dataOut = (uint8_t*) xyzOut;
|
||||||
|
@ -1224,7 +1203,6 @@ GL_FORCE_INLINE void submitVertices(GLenum mode, GLsizei first, GLuint count, GL
|
||||||
target->header_offset = vector_size;
|
target->header_offset = vector_size;
|
||||||
target->start_offset = target->header_offset + (header_required ? 1 : 0);
|
target->start_offset = target->header_offset + (header_required ? 1 : 0);
|
||||||
|
|
||||||
gl_assert(target->header_offset >= 0);
|
|
||||||
gl_assert(target->start_offset >= target->header_offset);
|
gl_assert(target->start_offset >= target->header_offset);
|
||||||
gl_assert(target->count);
|
gl_assert(target->count);
|
||||||
|
|
||||||
|
|
26
GL/flush.c
26
GL/flush.c
|
@ -46,10 +46,22 @@ void APIENTRY glKosInitConfig(GLdcConfig* config) {
|
||||||
config->initial_pt_capacity = 512 * 3;
|
config->initial_pt_capacity = 512 * 3;
|
||||||
config->initial_tr_capacity = 1024 * 3;
|
config->initial_tr_capacity = 1024 * 3;
|
||||||
config->initial_immediate_capacity = 1024 * 3;
|
config->initial_immediate_capacity = 1024 * 3;
|
||||||
config->internal_palette_format = GL_RGBA8;
|
|
||||||
|
// RGBA4444 is the fastest general format - 8888 will cause a perf issue
|
||||||
|
config->internal_palette_format = GL_RGBA4;
|
||||||
|
|
||||||
|
config->texture_twiddle = GL_TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool _initialized = false;
|
||||||
|
|
||||||
void APIENTRY glKosInitEx(GLdcConfig* config) {
|
void APIENTRY glKosInitEx(GLdcConfig* config) {
|
||||||
|
if(_initialized) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
_initialized = true;
|
||||||
|
|
||||||
TRACE();
|
TRACE();
|
||||||
|
|
||||||
printf("\nWelcome to GLdc! Git revision: %s\n\n", GLDC_VERSION);
|
printf("\nWelcome to GLdc! Git revision: %s\n\n", GLDC_VERSION);
|
||||||
|
@ -70,6 +82,10 @@ void APIENTRY glKosInitEx(GLdcConfig* config) {
|
||||||
|
|
||||||
_glInitTextures();
|
_glInitTextures();
|
||||||
|
|
||||||
|
if(config->texture_twiddle) {
|
||||||
|
glEnable(GL_TEXTURE_TWIDDLE_KOS);
|
||||||
|
}
|
||||||
|
|
||||||
OP_LIST.list_type = GPU_LIST_OP_POLY;
|
OP_LIST.list_type = GPU_LIST_OP_POLY;
|
||||||
PT_LIST.list_type = GPU_LIST_PT_POLY;
|
PT_LIST.list_type = GPU_LIST_PT_POLY;
|
||||||
TR_LIST.list_type = GPU_LIST_TR_POLY;
|
TR_LIST.list_type = GPU_LIST_TR_POLY;
|
||||||
|
@ -83,6 +99,12 @@ void APIENTRY glKosInitEx(GLdcConfig* config) {
|
||||||
aligned_vector_reserve(&TR_LIST.vector, config->initial_tr_capacity);
|
aligned_vector_reserve(&TR_LIST.vector, config->initial_tr_capacity);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void APIENTRY glKosShutdown() {
|
||||||
|
aligned_vector_clear(&OP_LIST.vector);
|
||||||
|
aligned_vector_clear(&PT_LIST.vector);
|
||||||
|
aligned_vector_clear(&TR_LIST.vector);
|
||||||
|
}
|
||||||
|
|
||||||
void APIENTRY glKosInit() {
|
void APIENTRY glKosInit() {
|
||||||
GLdcConfig config;
|
GLdcConfig config;
|
||||||
glKosInitConfig(&config);
|
glKosInitConfig(&config);
|
||||||
|
@ -117,4 +139,4 @@ void APIENTRY glKosSwapBuffers() {
|
||||||
aligned_vector_clear(&TR_LIST.vector);
|
aligned_vector_clear(&TR_LIST.vector);
|
||||||
|
|
||||||
_glApplyScissor(true);
|
_glApplyScissor(true);
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
#define CLIP_DEBUG 0
|
#define CLIP_DEBUG 0
|
||||||
#define ZNEAR_CLIPPING_ENABLED 1
|
#define ZNEAR_CLIPPING_ENABLED 1
|
||||||
|
|
||||||
static size_t AVAILABLE_VRAM = 16 * 1024 * 1024;
|
static size_t AVAILABLE_VRAM = 8 * 1024 * 1024;
|
||||||
static Matrix4x4 MATRIX;
|
static Matrix4x4 MATRIX;
|
||||||
|
|
||||||
static SDL_Window* WINDOW = NULL;
|
static SDL_Window* WINDOW = NULL;
|
||||||
|
@ -33,6 +33,10 @@ static VideoMode vid_mode = {
|
||||||
AlignedVector vbuffer;
|
AlignedVector vbuffer;
|
||||||
|
|
||||||
void InitGPU(_Bool autosort, _Bool fsaa) {
|
void InitGPU(_Bool autosort, _Bool fsaa) {
|
||||||
|
|
||||||
|
// 32-bit SDL has trouble with the wayland driver for some reason
|
||||||
|
setenv("SDL_VIDEODRIVER", "x11", 1);
|
||||||
|
|
||||||
SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS);
|
SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS);
|
||||||
|
|
||||||
WINDOW = SDL_CreateWindow(
|
WINDOW = SDL_CreateWindow(
|
||||||
|
|
|
@ -48,7 +48,8 @@ void TransformVec3NoMod(const float* v, float* ret);
|
||||||
|
|
||||||
/* Transform a 3-element normal using the stored matrix (w == 0)*/
|
/* Transform a 3-element normal using the stored matrix (w == 0)*/
|
||||||
static inline void TransformNormalNoMod(const float* xIn, float* xOut) {
|
static inline void TransformNormalNoMod(const float* xIn, float* xOut) {
|
||||||
|
(void) xIn;
|
||||||
|
(void) xOut;
|
||||||
}
|
}
|
||||||
|
|
||||||
void TransformVertices(Vertex* vertices, const int count);
|
void TransformVertices(Vertex* vertices, const int count);
|
||||||
|
|
|
@ -164,7 +164,9 @@ typedef struct {
|
||||||
GLboolean isCompressed;
|
GLboolean isCompressed;
|
||||||
GLboolean isPaletted;
|
GLboolean isPaletted;
|
||||||
//50
|
//50
|
||||||
GLubyte padding[14]; // Pad to 64-bytes
|
GLenum internalFormat;
|
||||||
|
//54
|
||||||
|
GLubyte padding[10]; // Pad to 64-bytes
|
||||||
} __attribute__((aligned(32))) TextureObject;
|
} __attribute__((aligned(32))) TextureObject;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -376,6 +378,9 @@ extern GLubyte ACTIVE_TEXTURE;
|
||||||
extern GLboolean TEXTURES_ENABLED[];
|
extern GLboolean TEXTURES_ENABLED[];
|
||||||
|
|
||||||
GLubyte _glGetActiveTexture();
|
GLubyte _glGetActiveTexture();
|
||||||
|
GLint _glGetTextureInternalFormat();
|
||||||
|
GLboolean _glGetTextureTwiddle();
|
||||||
|
void _glSetTextureTwiddle(GLboolean v);
|
||||||
|
|
||||||
GLuint _glGetActiveClientTexture();
|
GLuint _glGetActiveClientTexture();
|
||||||
TexturePalette* _glGetSharedPalette(GLshort bank);
|
TexturePalette* _glGetSharedPalette(GLshort bank);
|
||||||
|
|
12
GL/state.c
12
GL/state.c
|
@ -494,7 +494,11 @@ GLAPI void APIENTRY glEnable(GLenum cap) {
|
||||||
GPUState.is_dirty = GL_TRUE;
|
GPUState.is_dirty = GL_TRUE;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case GL_TEXTURE_TWIDDLE_KOS:
|
||||||
|
_glSetTextureTwiddle(GL_TRUE);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
|
_glKosThrowError(GL_INVALID_VALUE, __func__);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -596,7 +600,11 @@ GLAPI void APIENTRY glDisable(GLenum cap) {
|
||||||
GPUState.is_dirty = GL_TRUE;
|
GPUState.is_dirty = GL_TRUE;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case GL_TEXTURE_TWIDDLE_KOS:
|
||||||
|
_glSetTextureTwiddle(GL_FALSE);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
|
_glKosThrowError(GL_INVALID_VALUE, __func__);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -977,6 +985,10 @@ void APIENTRY glGetIntegerv(GLenum pname, GLint *params) {
|
||||||
case GL_FREE_CONTIGUOUS_TEXTURE_MEMORY_KOS:
|
case GL_FREE_CONTIGUOUS_TEXTURE_MEMORY_KOS:
|
||||||
*params = _glFreeContiguousTextureMemory();
|
*params = _glFreeContiguousTextureMemory();
|
||||||
break;
|
break;
|
||||||
|
case GL_TEXTURE_INTERNAL_FORMAT_KOS:
|
||||||
|
*params = _glGetTextureInternalFormat();
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
_glKosThrowError(GL_INVALID_ENUM, __func__);
|
_glKosThrowError(GL_INVALID_ENUM, __func__);
|
||||||
break;
|
break;
|
||||||
|
|
1070
GL/texture.c
1070
GL/texture.c
File diff suppressed because it is too large
Load Diff
|
@ -1,21 +0,0 @@
|
||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) [year] [fullname]
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
|
@ -1,158 +0,0 @@
|
||||||
# Summary
|
|
||||||
|
|
||||||
yalloc is a memory efficient allocator which is intended for embedded
|
|
||||||
applications that only have a low amount of RAM and want to maximize its
|
|
||||||
utilization. Properties of the allocator:
|
|
||||||
|
|
||||||
- pools can be up to 128k
|
|
||||||
- user data is 32bit aligned
|
|
||||||
- 4 bytes overhead per allocation
|
|
||||||
- supports defragmentation
|
|
||||||
- uses a free list for first fit allocation strategy (most recently freed
|
|
||||||
blocks are used first)
|
|
||||||
- extensively tested (see section below)
|
|
||||||
- MIT license
|
|
||||||
|
|
||||||
# Defragmentation
|
|
||||||
|
|
||||||
This feature was the initial motivation for this implementation. Especially
|
|
||||||
when dealing with highly memory constrained environments fragmenting memory
|
|
||||||
pools can be annoying. For this reason this implementation supports
|
|
||||||
defragmentation which moves all allocated blocks into a contiguous range at the
|
|
||||||
beginning of the pool, leaving a maximized free range at the end.
|
|
||||||
|
|
||||||
As there is no garbage collector or other runtime system involved that updates
|
|
||||||
the references, the application must do so. This is done in three steps:
|
|
||||||
|
|
||||||
1. yalloc_defrag_start() is called. This calculates the new
|
|
||||||
post-defragmentation-addresses for all allocations, but otherwise leaves
|
|
||||||
the allocations untouched.
|
|
||||||
|
|
||||||
2. yalloc_defrag_address() is called by the application for every pointer that
|
|
||||||
points to an allocation. It returns the post-defragmentation-address for
|
|
||||||
the allocation. The application must update all its relevant pointers this
|
|
||||||
way. Care must be taken not not yet dereference that moved pointers. If the
|
|
||||||
application works with hierarchical data then this can easily be done by
|
|
||||||
updating the pointers button up (first the leafs then their parents).
|
|
||||||
|
|
||||||
3. yalloc_defrag_commit() is called to finally perform the defragmentation.
|
|
||||||
All allocated blocks are moved to their post-defragmentation-address and
|
|
||||||
the application can continue using the pool the normal way.
|
|
||||||
|
|
||||||
It is up to the application when (and if) it performs defragmentation. One
|
|
||||||
strategy would be to delay it until an allocation failure. Another approach
|
|
||||||
would be to perform the defragmentation regularly when there is nothing else to
|
|
||||||
do.
|
|
||||||
|
|
||||||
# Configurable Defines
|
|
||||||
|
|
||||||
INTERNAL_VALIDATE
|
|
||||||
|
|
||||||
If this is not defined on the compiler commandline it will be defined as 0 if
|
|
||||||
NDEBUG is defined and otherwise as 1. If you want to disable internal
|
|
||||||
validation when NDEBUG is not defined then define INERNAL_VALIDATE as 0 on the
|
|
||||||
compiler commandline.
|
|
||||||
|
|
||||||
If it is nonzero the heap will be validated via a bunch of assert() calls at
|
|
||||||
the end of every function that modifies the heap. This has roughly O(N*M)
|
|
||||||
overhead where N is the number of allocated blocks and M the number of free
|
|
||||||
blocks in a heap. For applications with enough live allocations this will get
|
|
||||||
significant.
|
|
||||||
|
|
||||||
YALLOC_VALGRIND
|
|
||||||
|
|
||||||
If this is defined in yalloc.c and NVALGRIND is not defined then
|
|
||||||
valgrind/memcheck.h is included and the the allocator functions tell valgrind
|
|
||||||
about the pool, the allocations and makes the block headers inaccessible outside
|
|
||||||
of yalloc-functions. This allows valgrind to detect a lot of the accidents that
|
|
||||||
can happen when dealing dynamic memory. This also adds some overhead for every
|
|
||||||
yalloc-call because most of them will "unprotect" the internal structure on
|
|
||||||
entry and "protect" it again (marking it as inaccessible for valgrind) before
|
|
||||||
returning.
|
|
||||||
|
|
||||||
# Tests
|
|
||||||
|
|
||||||
The tests rely on internal validation of the pool (see INTERNAL_VALIDATE) to
|
|
||||||
check that no assumptions about the internal structure of the pool are
|
|
||||||
violated. They additionally check for correctness of observations that can be
|
|
||||||
made by using the public functions of the allocator (like checking if user data
|
|
||||||
stays unmodified). There are a few different scripts that run tests:
|
|
||||||
|
|
||||||
- run_coverage.sh runs a bunch of testfunctions that are carefully crafted to
|
|
||||||
cover all code paths. Coverage data is generated by clang and a summary is
|
|
||||||
shown at the end of the test.
|
|
||||||
|
|
||||||
- run_valgrind.sh tests if the valgrind integration is working as expected,
|
|
||||||
runs the functions from the coverage test and some randomly generated
|
|
||||||
testcases under valgrind.
|
|
||||||
|
|
||||||
- run_libfuzzer.sh uses libfuzzer from clang to generate interesting testcases
|
|
||||||
and runs them in multiple jobs in parallel for 10 seconds. It also generates
|
|
||||||
coverage data at the end (it always got 100% coverage in my testruns).
|
|
||||||
|
|
||||||
All tests exit with 0 and print "All fine!" at the end if there where no
|
|
||||||
errors. Coverage deficits are not counted as error, so you have to look at the
|
|
||||||
summary (they should show 100% coverage!).
|
|
||||||
|
|
||||||
|
|
||||||
# Implementation Details
|
|
||||||
|
|
||||||
The Headers and the user data are 32bit aligned. Headers have two 16bit fields
|
|
||||||
where the high 15 bits represent offsets (relative to the pools address) to the
|
|
||||||
previous/next block. The macros HDR_PTR() and HDR_OFFSET() are used to
|
|
||||||
translate an offset to an address and back. The 32bit alignment is exploited to
|
|
||||||
allow pools of up to 128k with that 15 significant bits.
|
|
||||||
|
|
||||||
A pool is always occupied by non-overlapping blocks that link to their
|
|
||||||
previous/next block in address order via the prev/next field of Header.
|
|
||||||
|
|
||||||
Free blocks are always joined: No two free blocks will ever be neighbors.
|
|
||||||
|
|
||||||
Free blocks have an additional header of the same structure. This additional
|
|
||||||
header is used to build a list of free blocks (independent of their address
|
|
||||||
order).
|
|
||||||
|
|
||||||
yalloc_free() will insert the freed block to the front of the free list.
|
|
||||||
yalloc_alloc() searches that list front to back and takes the first block that
|
|
||||||
is big enough to satisfy the allocation.
|
|
||||||
|
|
||||||
There is always a Header at the front and at the end of the pool. The Header at
|
|
||||||
the end is degenerate: It is marked as "used" but has no next block (which is
|
|
||||||
usually used to determine the size of a block).
|
|
||||||
|
|
||||||
The prev-field of the very first block in the pool has special meaning: It
|
|
||||||
points to the first free block in the pool. Or, if the pool is currently
|
|
||||||
defragmenting (after yalloc_defrag_start() and before yalloc_defrag_commit()),
|
|
||||||
points to the last header of the pool. This state can be recognized by checking
|
|
||||||
if it points to an empty block (normal pool state) or a used block
|
|
||||||
(defragmentation in progress). This logic can be seen in
|
|
||||||
yalloc_defrag_in_progress().
|
|
||||||
|
|
||||||
The lowest bit of next/prev have special meaning:
|
|
||||||
|
|
||||||
- low bit of prev is set for free blocks
|
|
||||||
|
|
||||||
- low bit of next is set for blocks with 32bit padding after the user data.
|
|
||||||
This is needed when a block is allocated from a free block that leaves only
|
|
||||||
4 free bytes after the user data... which is not enough to insert a
|
|
||||||
free-header (which is needs 8 bytes). The padding will be reclaimed when
|
|
||||||
that block is freed or when the pool is defragmented. The predicate
|
|
||||||
isPadded() can be used to test if a block is padded. Free blocks are never
|
|
||||||
padded.
|
|
||||||
|
|
||||||
The predicate isNil() can be used to test if an offset points nowhere (it tests
|
|
||||||
if all 15 high bits of an offset are 1). The constant NIL has all but the
|
|
||||||
lowest bit set. It is used to set offsets to point to nowhere, and in some
|
|
||||||
places it is used to mask out the actual address bits of an offset. This should
|
|
||||||
be kept in mind when modifying the code and updating prev/next: Think carefully
|
|
||||||
if you have to preserve the low bit when updating an offset!
|
|
||||||
|
|
||||||
Defragmentation is done in two phases: First the user calls
|
|
||||||
yalloc_defrag_start(). This will put the pool in a special state where no
|
|
||||||
alloc/free-calls are allowed. In this state the prev-fields of the used blocks
|
|
||||||
have a special meaning: They store the offset that the block will have after
|
|
||||||
defragmentation finished. This information is used by yalloc_defrag_address()
|
|
||||||
which can be called by the application to query the new addresses for its
|
|
||||||
allocations. After the application has updated all its pointers it must call
|
|
||||||
yalloc_defrag_commit() which moves all used blocks in contiguous space at the
|
|
||||||
beginning of the pool, leaving one maximized free block at the end.
|
|
|
@ -1,803 +0,0 @@
|
||||||
#include "yalloc.h"
|
|
||||||
#include "yalloc_internals.h"
|
|
||||||
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#define ALIGN(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
|
|
||||||
|
|
||||||
#if defined(YALLOC_VALGRIND) && !defined(NVALGRIND)
|
|
||||||
# define USE_VALGRIND 1
|
|
||||||
#else
|
|
||||||
# define USE_VALGRIND 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if USE_VALGRIND
|
|
||||||
# include <valgrind/memcheck.h>
|
|
||||||
#else
|
|
||||||
# define VALGRIND_MAKE_MEM_UNDEFINED(p, s) ((void)0)
|
|
||||||
# define VALGRIND_MAKE_MEM_DEFINED(p, s) ((void)0)
|
|
||||||
# define VALGRIND_MAKE_MEM_NOACCESS(p, s) ((void)0)
|
|
||||||
# define VALGRIND_CREATE_MEMPOOL(pool, rz, z) ((void)0)
|
|
||||||
# define VALGRIND_MEMPOOL_ALLOC(pool, p, s) ((void)0)
|
|
||||||
# define VALGRIND_MEMPOOL_FREE(pool, p) ((void)0)
|
|
||||||
# define VALGRIND_MEMPOOL_CHANGE(pool, a, b, s) ((void)0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define MARK_NEW_FREE_HDR(p) VALGRIND_MAKE_MEM_UNDEFINED(p, sizeof(Header) * 2)
|
|
||||||
#define MARK_NEW_HDR(p) VALGRIND_MAKE_MEM_UNDEFINED(p, sizeof(Header))
|
|
||||||
#define PROTECT_HDR(p) VALGRIND_MAKE_MEM_NOACCESS(p, sizeof(Header))
|
|
||||||
#define PROTECT_FREE_HDR(p) VALGRIND_MAKE_MEM_NOACCESS(p, sizeof(Header) * 2)
|
|
||||||
#define UNPROTECT_HDR(p) VALGRIND_MAKE_MEM_DEFINED(p, sizeof(Header))
|
|
||||||
#define UNPROTECT_FREE_HDR(p) VALGRIND_MAKE_MEM_DEFINED(p, sizeof(Header) * 2)
|
|
||||||
|
|
||||||
|
|
||||||
#if USE_VALGRIND
|
|
||||||
static void _unprotect_pool(void * pool)
|
|
||||||
{
|
|
||||||
Header * cur = (Header*)pool;
|
|
||||||
for (;;)
|
|
||||||
{
|
|
||||||
UNPROTECT_HDR(cur);
|
|
||||||
if (isFree(cur))
|
|
||||||
UNPROTECT_HDR(cur + 1);
|
|
||||||
|
|
||||||
if (isNil(cur->next))
|
|
||||||
break;
|
|
||||||
|
|
||||||
cur = HDR_PTR(cur->next);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void _protect_pool(void * pool)
|
|
||||||
{
|
|
||||||
Header * cur = (Header*)pool;
|
|
||||||
while (cur)
|
|
||||||
{
|
|
||||||
Header * next = isNil(cur->next) ? NULL : HDR_PTR(cur->next);
|
|
||||||
|
|
||||||
if (isFree(cur))
|
|
||||||
VALGRIND_MAKE_MEM_NOACCESS(cur, (char*)next - (char*)cur);
|
|
||||||
else
|
|
||||||
PROTECT_HDR(cur);
|
|
||||||
|
|
||||||
cur = next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#define assert_is_pool(pool) assert(VALGRIND_MEMPOOL_EXISTS(pool));
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
static void _unprotect_pool(void * pool){(void)pool;}
|
|
||||||
static void _protect_pool(void * pool){(void)pool;}
|
|
||||||
#define assert_is_pool(pool) ((void)0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// internal version that does not unprotect/protect the pool
|
|
||||||
static int _yalloc_defrag_in_progress(void * pool)
|
|
||||||
{
|
|
||||||
// fragmentation is indicated by a free list with one entry: the last block of the pool, which has its "free"-bit cleared.
|
|
||||||
Header * p = (Header*)pool;
|
|
||||||
if (isNil(p->prev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return !(HDR_PTR(p->prev)->prev & 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
int yalloc_defrag_in_progress(void * pool)
|
|
||||||
{
|
|
||||||
_unprotect_pool(pool);
|
|
||||||
int ret = _yalloc_defrag_in_progress(pool);
|
|
||||||
_protect_pool(pool);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if YALLOC_INTERNAL_VALIDATE
|
|
||||||
|
|
||||||
static size_t _count_free_list_occurences(Header * pool, Header * blk)
|
|
||||||
{
|
|
||||||
int n = 0;
|
|
||||||
if (!isNil(pool->prev))
|
|
||||||
{
|
|
||||||
Header * cur = HDR_PTR(pool->prev);
|
|
||||||
for (;;)
|
|
||||||
{
|
|
||||||
if (cur == blk)
|
|
||||||
++n;
|
|
||||||
|
|
||||||
if (isNil(cur[1].next))
|
|
||||||
break;
|
|
||||||
|
|
||||||
cur = HDR_PTR(cur[1].next);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n;
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t _count_addr_list_occurences(Header * pool, Header * blk)
|
|
||||||
{
|
|
||||||
size_t n = 0;
|
|
||||||
Header * cur = pool;
|
|
||||||
for (;;)
|
|
||||||
{
|
|
||||||
if (cur == blk)
|
|
||||||
++n;
|
|
||||||
|
|
||||||
if (isNil(cur->next))
|
|
||||||
break;
|
|
||||||
|
|
||||||
cur = HDR_PTR(cur->next);
|
|
||||||
}
|
|
||||||
return n;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void _validate_user_ptr(void * pool, void * p)
|
|
||||||
{
|
|
||||||
Header * hdr = (Header*)p - 1;
|
|
||||||
size_t n = _count_addr_list_occurences((Header*)pool, hdr);
|
|
||||||
assert(n == 1 && !isFree(hdr));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
Validates if all the invariants of a pool are intact.
|
|
||||||
|
|
||||||
This is very expensive when there are enough blocks in the heap (quadratic complexity!).
|
|
||||||
*/
|
|
||||||
static void _yalloc_validate(void * pool_)
|
|
||||||
{
|
|
||||||
Header * pool = (Header*)pool_;
|
|
||||||
Header * cur = pool;
|
|
||||||
|
|
||||||
assert(!isNil(pool->next)); // there must always be at least two blocks: a free/used one and the final block at the end
|
|
||||||
|
|
||||||
if (_yalloc_defrag_in_progress(pool))
|
|
||||||
{
|
|
||||||
Header * prevUsed = NULL;
|
|
||||||
while (!isNil(cur->next))
|
|
||||||
{
|
|
||||||
if (!isFree(cur))
|
|
||||||
{ // it is a used block
|
|
||||||
Header * newAddr = cur == pool ? pool : HDR_PTR(cur->prev);
|
|
||||||
assert(newAddr <= cur);
|
|
||||||
assert(newAddr >= pool);
|
|
||||||
|
|
||||||
if (prevUsed)
|
|
||||||
{
|
|
||||||
Header * prevNewAddr = prevUsed == pool ? pool : HDR_PTR(prevUsed->prev);
|
|
||||||
size_t prevBruttoSize = (char*)HDR_PTR(prevUsed->next) - (char*)prevUsed;
|
|
||||||
if (isPadded(prevUsed))
|
|
||||||
prevBruttoSize -= 4; // remove padding
|
|
||||||
assert((char*)newAddr == (char*)prevNewAddr + prevBruttoSize);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
assert(newAddr == pool);
|
|
||||||
}
|
|
||||||
|
|
||||||
prevUsed = cur;
|
|
||||||
}
|
|
||||||
|
|
||||||
cur = HDR_PTR(cur->next);
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(cur == HDR_PTR(pool->prev)); // the free-list should point to the last block
|
|
||||||
assert(!isFree(cur)); // the last block must not be free
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
Header * prev = NULL;
|
|
||||||
|
|
||||||
// iterate blocks in address order
|
|
||||||
for (;;)
|
|
||||||
{
|
|
||||||
if (prev)
|
|
||||||
{
|
|
||||||
Header * x = HDR_PTR(cur->prev);
|
|
||||||
assert(x == prev);
|
|
||||||
}
|
|
||||||
|
|
||||||
int n = _count_free_list_occurences(pool, cur);
|
|
||||||
if (isFree(cur))
|
|
||||||
{ // it is a free block
|
|
||||||
assert(n == 1);
|
|
||||||
assert(!isPadded(cur)); // free blocks must have a zero padding-bit
|
|
||||||
|
|
||||||
if (prev)
|
|
||||||
{
|
|
||||||
assert(!isFree(prev)); // free blocks must not be direct neighbours
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
assert(n == 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isNil(cur->next))
|
|
||||||
break;
|
|
||||||
|
|
||||||
Header * next = HDR_PTR(cur->next);
|
|
||||||
assert((char*)next >= (char*)cur + sizeof(Header) * 2);
|
|
||||||
prev = cur;
|
|
||||||
cur = next;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(isNil(cur->next));
|
|
||||||
|
|
||||||
if (!isNil(pool->prev))
|
|
||||||
{
|
|
||||||
// iterate free-list
|
|
||||||
Header * f = HDR_PTR(pool->prev);
|
|
||||||
assert(isNil(f[1].prev));
|
|
||||||
for (;;)
|
|
||||||
{
|
|
||||||
assert(isFree(f)); // must be free
|
|
||||||
|
|
||||||
int n = _count_addr_list_occurences(pool, f);
|
|
||||||
assert(n == 1);
|
|
||||||
|
|
||||||
if (isNil(f[1].next))
|
|
||||||
break;
|
|
||||||
|
|
||||||
f = HDR_PTR(f[1].next);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
static void _yalloc_validate(void * pool){(void)pool;}
|
|
||||||
static void _validate_user_ptr(void * pool, void * p){(void)pool; (void)p;}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int yalloc_init(void * pool, size_t size)
|
|
||||||
{
|
|
||||||
if (size > MAX_POOL_SIZE)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
// TODO: Error when pool is not properly aligned
|
|
||||||
|
|
||||||
// TODO: Error when size is not a multiple of the alignment?
|
|
||||||
while (size % sizeof(Header))
|
|
||||||
--size;
|
|
||||||
|
|
||||||
if(size < sizeof(Header) * 3)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
VALGRIND_CREATE_MEMPOOL(pool, 0, 0);
|
|
||||||
|
|
||||||
Header * first = (Header*)pool;
|
|
||||||
Header * last = (Header*)((char*)pool + size) - 1;
|
|
||||||
|
|
||||||
MARK_NEW_FREE_HDR(first);
|
|
||||||
MARK_NEW_HDR(first);
|
|
||||||
|
|
||||||
first->prev = HDR_OFFSET(first) | 1;
|
|
||||||
first->next = HDR_OFFSET(last);
|
|
||||||
first[1].prev = NIL;
|
|
||||||
first[1].next = NIL;
|
|
||||||
|
|
||||||
last->prev = HDR_OFFSET(first);
|
|
||||||
last->next = NIL;
|
|
||||||
|
|
||||||
_unprotect_pool(pool);
|
|
||||||
_yalloc_validate(pool);
|
|
||||||
_protect_pool(pool);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void yalloc_deinit(void * pool)
|
|
||||||
{
|
|
||||||
#if USE_VALGRIND
|
|
||||||
VALGRIND_DESTROY_MEMPOOL(pool);
|
|
||||||
|
|
||||||
Header * last = (Header*)pool;
|
|
||||||
UNPROTECT_HDR(last);
|
|
||||||
while (!isNil(last->next))
|
|
||||||
{
|
|
||||||
Header * next = HDR_PTR(last->next);
|
|
||||||
UNPROTECT_HDR(next);
|
|
||||||
last = next;
|
|
||||||
}
|
|
||||||
|
|
||||||
VALGRIND_MAKE_MEM_UNDEFINED(pool, (char*)(last + 1) - (char*)pool);
|
|
||||||
#else
|
|
||||||
(void)pool;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void * yalloc_alloc(void * pool, size_t size)
|
|
||||||
{
|
|
||||||
assert_is_pool(pool);
|
|
||||||
_unprotect_pool(pool);
|
|
||||||
assert(!_yalloc_defrag_in_progress(pool));
|
|
||||||
_yalloc_validate(pool);
|
|
||||||
if (!size)
|
|
||||||
{
|
|
||||||
_protect_pool(pool);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
Header * root = (Header*)pool;
|
|
||||||
if (isNil(root->prev))
|
|
||||||
{
|
|
||||||
_protect_pool(pool);
|
|
||||||
return NULL; /* no free block, no chance to allocate anything */ // TODO: Just read up which C standard supports single line comments and then fucking use them!
|
|
||||||
}
|
|
||||||
|
|
||||||
/* round up to alignment */
|
|
||||||
size = ALIGN(size, 32);
|
|
||||||
|
|
||||||
size_t bruttoSize = size + sizeof(Header);
|
|
||||||
Header * prev = NULL;
|
|
||||||
Header * cur = HDR_PTR(root->prev);
|
|
||||||
for (;;)
|
|
||||||
{
|
|
||||||
size_t curSize = (char*)HDR_PTR(cur->next) - (char*)cur; /* size of the block, including its header */
|
|
||||||
|
|
||||||
if (curSize >= bruttoSize) // it is big enough
|
|
||||||
{
|
|
||||||
// take action for unused space in the free block
|
|
||||||
if (curSize >= bruttoSize + sizeof(Header) * 2)
|
|
||||||
{ // the leftover space is big enough to make it a free block
|
|
||||||
// Build a free block from the unused space and insert it into the list of free blocks after the current free block
|
|
||||||
Header * tail = (Header*)((char*)cur + bruttoSize);
|
|
||||||
MARK_NEW_FREE_HDR(tail);
|
|
||||||
|
|
||||||
// update address-order-list
|
|
||||||
tail->next = cur->next;
|
|
||||||
tail->prev = HDR_OFFSET(cur) | 1;
|
|
||||||
HDR_PTR(cur->next)->prev = HDR_OFFSET(tail); // NOTE: We know the next block is used because free blocks are never neighbours. So we don't have to care about the lower bit which would be set for the prev of a free block.
|
|
||||||
cur->next = HDR_OFFSET(tail);
|
|
||||||
|
|
||||||
// update list of free blocks
|
|
||||||
tail[1].next = cur[1].next;
|
|
||||||
// NOTE: tail[1].prev is updated in the common path below (assignment to "HDR_PTR(cur[1].next)[1].prev")
|
|
||||||
|
|
||||||
if (!isNil(cur[1].next))
|
|
||||||
HDR_PTR(cur[1].next)[1].prev = HDR_OFFSET(tail);
|
|
||||||
cur[1].next = HDR_OFFSET(tail);
|
|
||||||
}
|
|
||||||
else if (curSize > bruttoSize)
|
|
||||||
{ // there will be unused space, but not enough to insert a free header
|
|
||||||
internal_assert(curSize - bruttoSize == sizeof(Header)); // unused space must be enough to build a free-block or it should be exactly the size of a Header
|
|
||||||
cur->next |= 1; // set marker for "has unused trailing space"
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
internal_assert(curSize == bruttoSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
cur->prev &= NIL; // clear marker for "is a free block"
|
|
||||||
|
|
||||||
// remove from linked list of free blocks
|
|
||||||
if (prev)
|
|
||||||
prev[1].next = cur[1].next;
|
|
||||||
else
|
|
||||||
{
|
|
||||||
uint32_t freeBit = isFree(root);
|
|
||||||
root->prev = (cur[1].next & NIL) | freeBit;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!isNil(cur[1].next))
|
|
||||||
HDR_PTR(cur[1].next)[1].prev = prev ? HDR_OFFSET(prev) : NIL;
|
|
||||||
|
|
||||||
_yalloc_validate(pool);
|
|
||||||
VALGRIND_MEMPOOL_ALLOC(pool, cur + 1, size);
|
|
||||||
_protect_pool(pool);
|
|
||||||
return cur + 1; // return address after the header
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isNil(cur[1].next))
|
|
||||||
break;
|
|
||||||
|
|
||||||
prev = cur;
|
|
||||||
cur = HDR_PTR(cur[1].next);
|
|
||||||
}
|
|
||||||
|
|
||||||
_yalloc_validate(pool);
|
|
||||||
_protect_pool(pool);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removes a block from the free-list and moves the pools first-free-bock pointer to its successor if it pointed to that block.
|
|
||||||
static void unlink_from_free_list(Header * pool, Header * blk)
|
|
||||||
{
|
|
||||||
// update the pools pointer to the first block in the free list if necessary
|
|
||||||
if (isNil(blk[1].prev))
|
|
||||||
{ // the block is the first in the free-list
|
|
||||||
// make the pools first-free-pointer point to the next in the free list
|
|
||||||
uint32_t freeBit = isFree(pool);
|
|
||||||
pool->prev = (blk[1].next & NIL) | freeBit;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
HDR_PTR(blk[1].prev)[1].next = blk[1].next;
|
|
||||||
|
|
||||||
if (!isNil(blk[1].next))
|
|
||||||
HDR_PTR(blk[1].next)[1].prev = blk[1].prev;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t yalloc_block_size(void * pool, void * p)
|
|
||||||
{
|
|
||||||
Header * a = (Header*)p - 1;
|
|
||||||
UNPROTECT_HDR(a);
|
|
||||||
Header * b = HDR_PTR(a->next);
|
|
||||||
size_t payloadSize = (char*)b - (char*)p;
|
|
||||||
if (isPadded(a))
|
|
||||||
payloadSize -= sizeof(Header);
|
|
||||||
PROTECT_HDR(a);
|
|
||||||
return payloadSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
void yalloc_free(void * pool_, void * p)
|
|
||||||
{
|
|
||||||
assert_is_pool(pool_);
|
|
||||||
assert(!yalloc_defrag_in_progress(pool_));
|
|
||||||
if (!p)
|
|
||||||
return;
|
|
||||||
|
|
||||||
_unprotect_pool(pool_);
|
|
||||||
|
|
||||||
Header * pool = (Header*)pool_;
|
|
||||||
Header * cur = (Header*)p - 1;
|
|
||||||
|
|
||||||
// get pointers to previous/next block in address order
|
|
||||||
Header * prev = cur == pool || isNil(cur->prev) ? NULL : HDR_PTR(cur->prev);
|
|
||||||
Header * next = isNil(cur->next) ? NULL : HDR_PTR(cur->next);
|
|
||||||
|
|
||||||
int prevFree = prev && isFree(prev);
|
|
||||||
int nextFree = next && isFree(next);
|
|
||||||
|
|
||||||
#if USE_VALGRIND
|
|
||||||
{
|
|
||||||
unsigned errs = VALGRIND_COUNT_ERRORS;
|
|
||||||
VALGRIND_MEMPOOL_FREE(pool, p);
|
|
||||||
if (VALGRIND_COUNT_ERRORS > errs)
|
|
||||||
{ // early exit if the free was invalid (so we get a valgrind error and don't mess up the pool, which is helpful for testing if invalid frees are detected by valgrind)
|
|
||||||
_protect_pool(pool_);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
_validate_user_ptr(pool_, p);
|
|
||||||
|
|
||||||
if (prevFree && nextFree)
|
|
||||||
{ // the freed block has two free neighbors
|
|
||||||
unlink_from_free_list(pool, prev);
|
|
||||||
unlink_from_free_list(pool, next);
|
|
||||||
|
|
||||||
// join prev, cur and next
|
|
||||||
prev->next = next->next;
|
|
||||||
HDR_PTR(next->next)->prev = cur->prev;
|
|
||||||
|
|
||||||
// prev is now the block we want to push onto the free-list
|
|
||||||
cur = prev;
|
|
||||||
}
|
|
||||||
else if (prevFree)
|
|
||||||
{
|
|
||||||
unlink_from_free_list(pool, prev);
|
|
||||||
|
|
||||||
// join prev and cur
|
|
||||||
prev->next = cur->next;
|
|
||||||
HDR_PTR(cur->next)->prev = cur->prev;
|
|
||||||
|
|
||||||
// prev is now the block we want to push onto the free-list
|
|
||||||
cur = prev;
|
|
||||||
}
|
|
||||||
else if (nextFree)
|
|
||||||
{
|
|
||||||
unlink_from_free_list(pool, next);
|
|
||||||
|
|
||||||
// join cur and next
|
|
||||||
cur->next = next->next;
|
|
||||||
HDR_PTR(next->next)->prev = next->prev & NIL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if there is a previous block and that block has padding then we want to grow the new free block into that padding
|
|
||||||
if (cur != pool && !isNil(cur->prev))
|
|
||||||
{ // there is a previous block
|
|
||||||
Header * left = HDR_PTR(cur->prev);
|
|
||||||
if (isPadded(left))
|
|
||||||
{ // the previous block has padding, so extend the current block to consume move the padding to the current free block
|
|
||||||
Header * grown = cur - 1;
|
|
||||||
MARK_NEW_HDR(grown);
|
|
||||||
grown->next = cur->next;
|
|
||||||
grown->prev = cur->prev;
|
|
||||||
left->next = HDR_OFFSET(grown);
|
|
||||||
if (!isNil(cur->next))
|
|
||||||
HDR_PTR(cur->next)->prev = HDR_OFFSET(grown);
|
|
||||||
|
|
||||||
cur = grown;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cur->prev |= 1; // it becomes a free block
|
|
||||||
cur->next &= NIL; // reset padding-bit
|
|
||||||
UNPROTECT_HDR(cur + 1);
|
|
||||||
cur[1].prev = NIL; // it will be the first free block in the free list, so it has no prevFree
|
|
||||||
|
|
||||||
if (!isNil(pool->prev))
|
|
||||||
{ // the free-list was already non-empty
|
|
||||||
HDR_PTR(pool->prev)[1].prev = HDR_OFFSET(cur); // make the first entry in the free list point back to the new free block (it will become the first one)
|
|
||||||
cur[1].next = pool->prev; // the next free block is the first of the old free-list
|
|
||||||
}
|
|
||||||
else
|
|
||||||
cur[1].next = NIL; // free-list was empty, so there is no successor
|
|
||||||
|
|
||||||
VALGRIND_MAKE_MEM_NOACCESS(cur + 2, (char*)HDR_PTR(cur->next) - (char*)(cur + 2));
|
|
||||||
|
|
||||||
// now the freed block is the first in the free-list
|
|
||||||
|
|
||||||
// update the offset to the first element of the free list
|
|
||||||
uint32_t freeBit = isFree(pool); // remember the free-bit of the offset
|
|
||||||
pool->prev = HDR_OFFSET(cur) | freeBit; // update the offset and restore the free-bit
|
|
||||||
_yalloc_validate(pool);
|
|
||||||
_protect_pool(pool);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t yalloc_count_free(void * pool_)
|
|
||||||
{
|
|
||||||
assert_is_pool(pool_);
|
|
||||||
_unprotect_pool(pool_);
|
|
||||||
assert(!_yalloc_defrag_in_progress(pool_));
|
|
||||||
Header * pool = (Header*)pool_;
|
|
||||||
size_t bruttoFree = 0;
|
|
||||||
Header * cur = pool;
|
|
||||||
|
|
||||||
_yalloc_validate(pool);
|
|
||||||
|
|
||||||
for (;;)
|
|
||||||
{
|
|
||||||
if (isFree(cur))
|
|
||||||
{ // it is a free block
|
|
||||||
bruttoFree += (char*)HDR_PTR(cur->next) - (char*)cur;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{ // it is a used block
|
|
||||||
if (isPadded(cur))
|
|
||||||
{ // the used block is padded
|
|
||||||
bruttoFree += sizeof(Header);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isNil(cur->next))
|
|
||||||
break;
|
|
||||||
|
|
||||||
cur = HDR_PTR(cur->next);
|
|
||||||
}
|
|
||||||
|
|
||||||
_protect_pool(pool);
|
|
||||||
|
|
||||||
if (bruttoFree < sizeof(Header))
|
|
||||||
{
|
|
||||||
internal_assert(!bruttoFree); // free space should always be a multiple of sizeof(Header)
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return bruttoFree - sizeof(Header);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t yalloc_count_continuous(void * pool_)
|
|
||||||
{
|
|
||||||
assert_is_pool(pool_);
|
|
||||||
_unprotect_pool(pool_);
|
|
||||||
assert(!_yalloc_defrag_in_progress(pool_));
|
|
||||||
Header * pool = (Header*)pool_;
|
|
||||||
size_t largestFree = 0;
|
|
||||||
Header * cur = pool;
|
|
||||||
|
|
||||||
_yalloc_validate(pool);
|
|
||||||
|
|
||||||
for (;;)
|
|
||||||
{
|
|
||||||
if (isFree(cur))
|
|
||||||
{ // it is a free block
|
|
||||||
size_t temp = (uintptr_t)HDR_PTR(cur->next) - (uintptr_t)cur;
|
|
||||||
if(temp > largestFree)
|
|
||||||
largestFree = temp;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isNil(cur->next))
|
|
||||||
break;
|
|
||||||
|
|
||||||
cur = HDR_PTR(cur->next);
|
|
||||||
}
|
|
||||||
|
|
||||||
_protect_pool(pool);
|
|
||||||
|
|
||||||
if (largestFree < sizeof(Header))
|
|
||||||
{
|
|
||||||
internal_assert(!largestFree); // free space should always be a multiple of sizeof(Header)
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return largestFree - sizeof(Header);
|
|
||||||
}
|
|
||||||
|
|
||||||
void * yalloc_first_used(void * pool)
|
|
||||||
{
|
|
||||||
assert_is_pool(pool);
|
|
||||||
_unprotect_pool(pool);
|
|
||||||
Header * blk = (Header*)pool;
|
|
||||||
while (!isNil(blk->next))
|
|
||||||
{
|
|
||||||
if (!isFree(blk))
|
|
||||||
{
|
|
||||||
_protect_pool(pool);
|
|
||||||
return blk + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
blk = HDR_PTR(blk->next);
|
|
||||||
}
|
|
||||||
|
|
||||||
_protect_pool(pool);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void * yalloc_next_used(void * pool, void * p)
|
|
||||||
{
|
|
||||||
assert_is_pool(pool);
|
|
||||||
_unprotect_pool(pool);
|
|
||||||
_validate_user_ptr(pool, p);
|
|
||||||
Header * prev = (Header*)p - 1;
|
|
||||||
assert(!isNil(prev->next)); // the last block should never end up as input to this function (because it is not user-visible)
|
|
||||||
|
|
||||||
Header * blk = HDR_PTR(prev->next);
|
|
||||||
while (!isNil(blk->next))
|
|
||||||
{
|
|
||||||
if (!isFree(blk))
|
|
||||||
{
|
|
||||||
_protect_pool(pool);
|
|
||||||
return blk + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
blk = HDR_PTR(blk->next);
|
|
||||||
}
|
|
||||||
|
|
||||||
_protect_pool(pool);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void yalloc_defrag_start(void * pool_)
|
|
||||||
{
|
|
||||||
assert_is_pool(pool_);
|
|
||||||
_unprotect_pool(pool_);
|
|
||||||
assert(!_yalloc_defrag_in_progress(pool_));
|
|
||||||
Header * pool = (Header*)pool_;
|
|
||||||
|
|
||||||
// iterate over all blocks in address order and store the post-defragment address of used blocks in their "prev" field
|
|
||||||
size_t end = 0; // offset for the next used block
|
|
||||||
Header * blk = (Header*)pool;
|
|
||||||
for (; !isNil(blk->next); blk = HDR_PTR(blk->next))
|
|
||||||
{
|
|
||||||
if (!isFree(blk))
|
|
||||||
{ // it is a used block
|
|
||||||
blk->prev = end >> 1;
|
|
||||||
internal_assert((char*)HDR_PTR(blk->prev) == (char*)pool + end);
|
|
||||||
|
|
||||||
size_t bruttoSize = (char*)HDR_PTR(blk->next) - (char*)blk;
|
|
||||||
|
|
||||||
if (isPadded(blk))
|
|
||||||
{ // the block is padded
|
|
||||||
bruttoSize -= sizeof(Header);
|
|
||||||
}
|
|
||||||
|
|
||||||
end += bruttoSize;
|
|
||||||
internal_assert(end % sizeof(Header) == 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// blk is now the last block (the dummy "used" block at the end of the pool)
|
|
||||||
internal_assert(isNil(blk->next));
|
|
||||||
internal_assert(!isFree(blk));
|
|
||||||
|
|
||||||
// mark the pool as "defragementation in progress"
|
|
||||||
uint32_t freeBit = isFree(pool);
|
|
||||||
pool->prev = (HDR_OFFSET(blk) & NIL) | freeBit;
|
|
||||||
|
|
||||||
_yalloc_validate(pool);
|
|
||||||
internal_assert(yalloc_defrag_in_progress(pool));
|
|
||||||
_protect_pool(pool);
|
|
||||||
}
|
|
||||||
|
|
||||||
void * yalloc_defrag_address(void * pool_, void * p)
|
|
||||||
{
|
|
||||||
assert_is_pool(pool_);
|
|
||||||
assert(yalloc_defrag_in_progress(pool_));
|
|
||||||
if (!p)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
Header * pool = (Header*)pool_;
|
|
||||||
|
|
||||||
_unprotect_pool(pool);
|
|
||||||
_validate_user_ptr(pool_, p);
|
|
||||||
|
|
||||||
if (pool + 1 == p)
|
|
||||||
return pool + 1; // "prev" of the first block points to the last used block to mark the pool as "defragmentation in progress"
|
|
||||||
|
|
||||||
Header * blk = (Header*)p - 1;
|
|
||||||
|
|
||||||
void * defragP = HDR_PTR(blk->prev) + 1;
|
|
||||||
|
|
||||||
_protect_pool(pool);
|
|
||||||
return defragP;
|
|
||||||
}
|
|
||||||
|
|
||||||
void yalloc_defrag_commit(void * pool_)
|
|
||||||
{
|
|
||||||
assert_is_pool(pool_);
|
|
||||||
_unprotect_pool(pool_);
|
|
||||||
assert(_yalloc_defrag_in_progress(pool_));
|
|
||||||
Header * pool = (Header*)pool_;
|
|
||||||
|
|
||||||
// iterate over all blocks in address order and move them
|
|
||||||
size_t end = 0; // offset for the next used block
|
|
||||||
Header * blk = pool;
|
|
||||||
Header * lastUsed = NULL;
|
|
||||||
while (!isNil(blk->next))
|
|
||||||
{
|
|
||||||
if (!isFree(blk))
|
|
||||||
{ // it is a used block
|
|
||||||
size_t bruttoSize = (char*)HDR_PTR(blk->next) - (char*)blk;
|
|
||||||
|
|
||||||
if (isPadded(blk))
|
|
||||||
{ // the block is padded
|
|
||||||
bruttoSize -= sizeof(Header);
|
|
||||||
}
|
|
||||||
|
|
||||||
Header * next = HDR_PTR(blk->next);
|
|
||||||
|
|
||||||
blk->prev = lastUsed ? HDR_OFFSET(lastUsed) : NIL;
|
|
||||||
blk->next = (end + bruttoSize) >> 1;
|
|
||||||
|
|
||||||
lastUsed = (Header*)((char*)pool + end);
|
|
||||||
VALGRIND_MAKE_MEM_UNDEFINED(lastUsed, (char*)blk - (char*)lastUsed);
|
|
||||||
memmove(lastUsed, blk, bruttoSize);
|
|
||||||
VALGRIND_MEMPOOL_CHANGE(pool, blk + 1, lastUsed + 1, bruttoSize - sizeof(Header));
|
|
||||||
|
|
||||||
end += bruttoSize;
|
|
||||||
blk = next;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
blk = HDR_PTR(blk->next);
|
|
||||||
}
|
|
||||||
|
|
||||||
// blk is now the last block (the dummy "used" block at the end of the pool)
|
|
||||||
internal_assert(isNil(blk->next));
|
|
||||||
internal_assert(!isFree(blk));
|
|
||||||
|
|
||||||
if (lastUsed)
|
|
||||||
{
|
|
||||||
Header * gap = HDR_PTR(lastUsed->next);
|
|
||||||
if (gap == blk)
|
|
||||||
{ // there is no gap
|
|
||||||
pool->prev = NIL; // the free list is empty
|
|
||||||
blk->prev = HDR_OFFSET(lastUsed);
|
|
||||||
}
|
|
||||||
else if (blk - gap > 1)
|
|
||||||
{ // the gap is big enouogh for a free Header
|
|
||||||
|
|
||||||
// set a free list that contains the gap as only element
|
|
||||||
gap->prev = HDR_OFFSET(lastUsed) | 1;
|
|
||||||
gap->next = HDR_OFFSET(blk);
|
|
||||||
gap[1].prev = NIL;
|
|
||||||
gap[1].next = NIL;
|
|
||||||
pool->prev = blk->prev = HDR_OFFSET(gap);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{ // there is a gap, but it is too small to be used as free-list-node, so just make it padding of the last used block
|
|
||||||
lastUsed->next = HDR_OFFSET(blk) | 1;
|
|
||||||
pool->prev = NIL;
|
|
||||||
blk->prev = HDR_OFFSET(lastUsed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{ // the pool is empty
|
|
||||||
pool->prev = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
internal_assert(!_yalloc_defrag_in_progress(pool));
|
|
||||||
_yalloc_validate(pool);
|
|
||||||
_protect_pool(pool);
|
|
||||||
}
|
|
|
@ -1,176 +0,0 @@
|
||||||
/**
|
|
||||||
@file
|
|
||||||
|
|
||||||
API of the yalloc allocator.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef YALLOC_H
|
|
||||||
#define YALLOC_H
|
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
|
|
||||||
/**
|
|
||||||
Maximum supported pool size. yalloc_init() will fail for larger pools.
|
|
||||||
*/
|
|
||||||
#define MAX_POOL_SIZE ((2 << 24) - 4)
|
|
||||||
|
|
||||||
/**
|
|
||||||
Creates a pool inside a given buffer.
|
|
||||||
|
|
||||||
Pools must be deinitialized with yalloc_deinit() when they are no longer needed.
|
|
||||||
|
|
||||||
@param pool The starting address of the pool. It must have at least 16bit
|
|
||||||
alignment (internal structure uses 16bit integers). Allocations are placed at
|
|
||||||
32bit boundaries starting from this address, so if the user data should be
|
|
||||||
32bit aligned then this address has to be 32bit aligned. Typically an address
|
|
||||||
of static memory, or an array on the stack is used if the pool is only used
|
|
||||||
temporarily.
|
|
||||||
@param size Size of the pool.
|
|
||||||
@return 0 on success, nonzero if the size is not supported.
|
|
||||||
*/
|
|
||||||
int yalloc_init(void * pool, size_t size);
|
|
||||||
|
|
||||||
/**
|
|
||||||
Deinitializes the buffer that is used by the pool and makes it available for other use.
|
|
||||||
|
|
||||||
The content of the buffer is undefined after this.
|
|
||||||
|
|
||||||
@param pool The starting address of an initialized pool.
|
|
||||||
*/
|
|
||||||
void yalloc_deinit(void * pool);
|
|
||||||
|
|
||||||
/**
|
|
||||||
Allocates a block of memory from a pool.
|
|
||||||
|
|
||||||
This function mimics malloc().
|
|
||||||
|
|
||||||
The pool must not be in the "defragmenting" state when this function is called.
|
|
||||||
|
|
||||||
@param pool The starting address of an initialized pool.
|
|
||||||
@param size Number of bytes to allocate.
|
|
||||||
@return Allocated buffer or \c NULL if there was no free range that could serve
|
|
||||||
the allocation. See @ref yalloc_defrag_start() for a way to remove
|
|
||||||
fragmentation which may cause allocations to fail even when there is enough
|
|
||||||
space in total.
|
|
||||||
*/
|
|
||||||
void * yalloc_alloc(void * pool, size_t size);
|
|
||||||
|
|
||||||
/**
|
|
||||||
Returns an allocation to a pool.
|
|
||||||
|
|
||||||
This function mimics free().
|
|
||||||
|
|
||||||
The pool must not be in the "defragmenting" state when this function is called.
|
|
||||||
|
|
||||||
@param pool The starting address of the initialized pool the allocation comes from.
|
|
||||||
@param p An address that was returned from yalloc_alloc() of the same pool.
|
|
||||||
*/
|
|
||||||
void yalloc_free(void * pool, void * p);
|
|
||||||
|
|
||||||
/**
|
|
||||||
Returns the maximum size of a successful allocation (assuming a completely unfragmented heap).
|
|
||||||
|
|
||||||
After defragmentation the first allocation with the returned size is guaranteed to succeed.
|
|
||||||
|
|
||||||
@param pool The starting address of an initialized pool.
|
|
||||||
@return Number of bytes that can be allocated (assuming the pool is defragmented).
|
|
||||||
*/
|
|
||||||
size_t yalloc_count_free(void * pool);
|
|
||||||
|
|
||||||
/**
|
|
||||||
Returns the maximum continuous free area.
|
|
||||||
|
|
||||||
@param pool The starting address of an initialized pool.
|
|
||||||
@return Number of free bytes that exist continuously.
|
|
||||||
*/
|
|
||||||
size_t yalloc_count_continuous(void * pool_);
|
|
||||||
|
|
||||||
/**
|
|
||||||
Queries the usable size of an allocated block.
|
|
||||||
|
|
||||||
@param pool The starting address of the initialized pool the allocation comes from.
|
|
||||||
@param p An address that was returned from yalloc_alloc() of the same pool.
|
|
||||||
@return Size of the memory block. This is the size passed to @ref yalloc_alloc() rounded up to 4.
|
|
||||||
*/
|
|
||||||
size_t yalloc_block_size(void * pool, void * p);
|
|
||||||
|
|
||||||
/**
|
|
||||||
Finds the first (in address order) allocation of a pool.
|
|
||||||
|
|
||||||
@param pool The starting address of an initialized pool.
|
|
||||||
@return Address of the allocation the lowest address inside the pool (this is
|
|
||||||
what @ref yalloc_alloc() returned), or \c NULL if there is no used block.
|
|
||||||
*/
|
|
||||||
void * yalloc_first_used(void * pool);
|
|
||||||
|
|
||||||
/**
|
|
||||||
Given a pointer to an allocation finds the next (in address order) used block of a pool.
|
|
||||||
|
|
||||||
@param pool The starting address of the initialized pool the allocation comes from.
|
|
||||||
@param p Pointer to an allocation in that pool, typically comes from a previous
|
|
||||||
call to @ref yalloc_first_used()
|
|
||||||
*/
|
|
||||||
void * yalloc_next_used(void * pool, void * p);
|
|
||||||
|
|
||||||
/**
|
|
||||||
Starts defragmentation for a pool.
|
|
||||||
|
|
||||||
Allocations will stay where they are. But the pool is put in the "defagmenting"
|
|
||||||
state (see @ref yalloc_defrag_in_progress()).
|
|
||||||
|
|
||||||
The pool must not be in the "defragmenting" state when this function is called.
|
|
||||||
The pool is put into the "defragmenting" state by this function.
|
|
||||||
|
|
||||||
@param pool The starting address of an initialized pool.
|
|
||||||
*/
|
|
||||||
void yalloc_defrag_start(void * pool);
|
|
||||||
|
|
||||||
/**
|
|
||||||
Returns the address that an allocation will have after @ref yalloc_defrag_commit() is called.
|
|
||||||
|
|
||||||
The pool must be in the "defragmenting" state when this function is called.
|
|
||||||
|
|
||||||
@param pool The starting address of the initialized pool the allocation comes from.
|
|
||||||
@param p Pointer to an allocation in that pool.
|
|
||||||
@return The address the alloation will have after @ref yalloc_defrag_commit() is called.
|
|
||||||
*/
|
|
||||||
void * yalloc_defrag_address(void * pool, void * p);
|
|
||||||
|
|
||||||
/**
|
|
||||||
Finishes the defragmentation.
|
|
||||||
|
|
||||||
The content of all allocations in the pool will be moved to the address that
|
|
||||||
was reported by @ref yalloc_defrag_address(). The pool will then have only one
|
|
||||||
free block. This means that an <tt>yalloc_alloc(pool, yalloc_count_free(pool))</tt>
|
|
||||||
will succeed.
|
|
||||||
|
|
||||||
The pool must be in the "defragmenting" state when this function is called. The
|
|
||||||
pool is put back to normal state by this function.
|
|
||||||
|
|
||||||
@param pool The starting address of an initialized pool.
|
|
||||||
*/
|
|
||||||
void yalloc_defrag_commit(void * pool);
|
|
||||||
|
|
||||||
/**
|
|
||||||
Tells if the pool is in the "defragmenting" state (after a @ref yalloc_defrag_start() and before a @ref yalloc_defrag_commit()).
|
|
||||||
|
|
||||||
@param pool The starting address of an initialized pool.
|
|
||||||
@return Nonzero if the pool is currently in the "defragmenting" state.
|
|
||||||
*/
|
|
||||||
int yalloc_defrag_in_progress(void * pool);
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
Helper function that dumps the state of the pool to stdout.
|
|
||||||
|
|
||||||
This function is only available if build with <tt>yalloc_dump.c</tt>. This
|
|
||||||
function only exists for debugging purposes and can be ignored by normal users
|
|
||||||
that are not interested in the internal structure of the implementation.
|
|
||||||
|
|
||||||
@param pool The starting address of an initialized pool.
|
|
||||||
@param name A string that is used as "Title" for the output.
|
|
||||||
*/
|
|
||||||
void yalloc_dump(void * pool, char * name);
|
|
||||||
|
|
||||||
|
|
||||||
#endif // YALLOC_H
|
|
|
@ -1,39 +0,0 @@
|
||||||
#include "yalloc_internals.h"
|
|
||||||
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
static void printOffset(void * pool, char * name, uint16_t offset)
|
|
||||||
{
|
|
||||||
if (isNil(offset))
|
|
||||||
printf(" %s: nil\n", name);
|
|
||||||
else
|
|
||||||
printf(" %s: %td\n", name, (char*)HDR_PTR(offset) - (char*)pool);
|
|
||||||
}
|
|
||||||
|
|
||||||
void yalloc_dump(void * pool, char * name)
|
|
||||||
{
|
|
||||||
printf("---- %s ----\n", name);
|
|
||||||
Header * cur = (Header*)pool;
|
|
||||||
for (;;)
|
|
||||||
{
|
|
||||||
printf(isFree(cur) ? "%td: free @%p\n" : "%td: used @%p\n", (char*)cur - (char*)pool, cur);
|
|
||||||
printOffset(pool, cur == pool ? "first free" : "prev", cur->prev);
|
|
||||||
printOffset(pool, "next", cur->next);
|
|
||||||
if (isFree(cur))
|
|
||||||
{
|
|
||||||
printOffset(pool, "prevFree", cur[1].prev);
|
|
||||||
printOffset(pool, "nextFree", cur[1].next);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
printf(" payload includes padding: %i\n", isPadded(cur));
|
|
||||||
|
|
||||||
if (isNil(cur->next))
|
|
||||||
break;
|
|
||||||
|
|
||||||
printf(" %td bytes payload\n", (char*)HDR_PTR(cur->next) - (char*)cur - sizeof(Header));
|
|
||||||
|
|
||||||
cur = HDR_PTR(cur->next);
|
|
||||||
}
|
|
||||||
|
|
||||||
fflush(stdout);
|
|
||||||
}
|
|
|
@ -1,63 +0,0 @@
|
||||||
#ifndef YALLOC_INTERNALS_H
|
|
||||||
#define YALLOC_INTERNALS_H
|
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
|
|
||||||
typedef struct
|
|
||||||
{
|
|
||||||
uint32_t prev; // low bit set if free
|
|
||||||
uint32_t next; // for used blocks: low bit set if unused header at the end
|
|
||||||
|
|
||||||
/* We need user data to be 32-byte aligned, so the header needs
|
|
||||||
* to be 32 bytes in size (as user data follows the header) */
|
|
||||||
uint8_t padding[32 - (sizeof(uint32_t) * 2)];
|
|
||||||
} Header;
|
|
||||||
|
|
||||||
// NOTE: We have 32bit aligned data and 16bit offsets where the lowest bit is used as flag. So we remove the low bit and shift by 1 to address 128k bytes with the 15bit significant offset bits.
|
|
||||||
|
|
||||||
#define NIL 0xFFFFFFFEu
|
|
||||||
|
|
||||||
// return Header-address for a prev/next
|
|
||||||
#define HDR_PTR(offset) ((Header*)((char*)pool + (((offset) & NIL)<<1)))
|
|
||||||
|
|
||||||
// return a prev/next for a Header-address
|
|
||||||
#define HDR_OFFSET(blockPtr) ((uint32_t)(((char*)blockPtr - (char*)pool) >> 1))
|
|
||||||
|
|
||||||
#ifndef YALLOC_INTERNAL_VALIDATE
|
|
||||||
# ifdef NDEBUG
|
|
||||||
# define YALLOC_INTERNAL_VALIDATE 0
|
|
||||||
# else
|
|
||||||
# define YALLOC_INTERNAL_VALIDATE 1
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
internal_assert() is used in some places to check internal expections.
|
|
||||||
Activate this if you modify the code to detect problems as early as possible.
|
|
||||||
In other cases this should be deactivated.
|
|
||||||
*/
|
|
||||||
#if 0
|
|
||||||
#define internal_assert assert
|
|
||||||
#else
|
|
||||||
#define internal_assert(condition)((void) 0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// detects offsets that point nowhere
|
|
||||||
static inline int isNil(uint32_t offset)
|
|
||||||
{
|
|
||||||
return (offset | 1) == 0xFFFFFFFF;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int isFree(Header * hdr)
|
|
||||||
{
|
|
||||||
return hdr->prev & 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int isPadded(Header * hdr)
|
|
||||||
{
|
|
||||||
return hdr->next & 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#endif // YALLOC_INTERNALS_H
|
|
|
@ -310,7 +310,7 @@ __BEGIN_DECLS
|
||||||
#define GL_4_BYTES 0x1409
|
#define GL_4_BYTES 0x1409
|
||||||
|
|
||||||
/* ErrorCode */
|
/* ErrorCode */
|
||||||
#define GL_NO_ERROR 0
|
#define GL_NO_ERROR ((GLenum) 0)
|
||||||
#define GL_INVALID_ENUM 0x0500
|
#define GL_INVALID_ENUM 0x0500
|
||||||
#define GL_INVALID_VALUE 0x0501
|
#define GL_INVALID_VALUE 0x0501
|
||||||
#define GL_INVALID_OPERATION 0x0502
|
#define GL_INVALID_OPERATION 0x0502
|
||||||
|
@ -371,6 +371,31 @@ __BEGIN_DECLS
|
||||||
#define GL_RGBA 0x1908
|
#define GL_RGBA 0x1908
|
||||||
#define GL_LUMINANCE 0x1909
|
#define GL_LUMINANCE 0x1909
|
||||||
#define GL_LUMINANCE_ALPHA 0x190A
|
#define GL_LUMINANCE_ALPHA 0x190A
|
||||||
|
|
||||||
|
#define GL_R3_G3_B2 0x2A10
|
||||||
|
|
||||||
|
#define GL_ALPHA4 0x803B
|
||||||
|
#define GL_ALPHA8 0x803C
|
||||||
|
#define GL_ALPHA12 0x803D
|
||||||
|
#define GL_ALPHA16 0x803E
|
||||||
|
|
||||||
|
#define GL_LUMINANCE4 0x803F
|
||||||
|
#define GL_LUMINANCE8 0x8040
|
||||||
|
#define GL_LUMINANCE12 0x8041
|
||||||
|
#define GL_LUMINANCE16 0x8042
|
||||||
|
|
||||||
|
#define GL_LUMINANCE4_ALPHA4 0x8043
|
||||||
|
#define GL_LUMINANCE6_ALPHA2 0x8044
|
||||||
|
#define GL_LUMINANCE8_ALPHA8 0x8045
|
||||||
|
#define GL_LUMINANCE12_ALPHA4 0x8046
|
||||||
|
#define GL_LUMINANCE12_ALPHA12 0x8047
|
||||||
|
#define GL_LUMINANCE16_ALPHA16 0x8048
|
||||||
|
|
||||||
|
#define GL_INTENSITY4 0x804A
|
||||||
|
#define GL_INTENSITY8 0x804B
|
||||||
|
#define GL_INTENSITY12 0x804C
|
||||||
|
#define GL_INTENSITY16 0x804D
|
||||||
|
|
||||||
#define GL_BGRA 0x80E1
|
#define GL_BGRA 0x80E1
|
||||||
#define GL_INTENSITY 0x8049
|
#define GL_INTENSITY 0x8049
|
||||||
#define GL_RGB4 0x804F
|
#define GL_RGB4 0x804F
|
||||||
|
@ -387,6 +412,14 @@ __BEGIN_DECLS
|
||||||
#define GL_RGBA12 0x805A
|
#define GL_RGBA12 0x805A
|
||||||
#define GL_RGBA16 0x805B
|
#define GL_RGBA16 0x805B
|
||||||
|
|
||||||
|
#define GL_R8 0x8229
|
||||||
|
#define GL_RG8 0x822B
|
||||||
|
#define GL_RG 0x8227
|
||||||
|
#define GL_R16 0x822A
|
||||||
|
#define GL_RG16 0x822C
|
||||||
|
#define GL_COMPRESSED_RED 0x8225
|
||||||
|
#define GL_COMPRESSED_RG 0x8226
|
||||||
|
|
||||||
/* Polygons */
|
/* Polygons */
|
||||||
#define GL_POINT 0x1B00
|
#define GL_POINT 0x1B00
|
||||||
#define GL_LINE 0x1B01
|
#define GL_LINE 0x1B01
|
||||||
|
|
|
@ -35,8 +35,6 @@ extern const char* GLDC_VERSION;
|
||||||
|
|
||||||
#define GL_NEARZ_CLIPPING_KOS 0xEEFA
|
#define GL_NEARZ_CLIPPING_KOS 0xEEFA
|
||||||
|
|
||||||
#define GL_UNSIGNED_BYTE_TWID_KOS 0xEEFB
|
|
||||||
|
|
||||||
|
|
||||||
/* Initialize the GL pipeline. GL will initialize the PVR. */
|
/* Initialize the GL pipeline. GL will initialize the PVR. */
|
||||||
GLAPI void APIENTRY glKosInit();
|
GLAPI void APIENTRY glKosInit();
|
||||||
|
@ -57,6 +55,13 @@ typedef struct {
|
||||||
GLuint initial_pt_capacity;
|
GLuint initial_pt_capacity;
|
||||||
GLuint initial_immediate_capacity;
|
GLuint initial_immediate_capacity;
|
||||||
|
|
||||||
|
/* Default: True
|
||||||
|
*
|
||||||
|
* Whether glTexImage should automatically twiddle textures
|
||||||
|
* if the internal format is a generic format (e.g. GL_RGB).
|
||||||
|
* this is the same as calling glEnable(GL_TEXTURE_TWIDDLE_KOS)
|
||||||
|
* on boot */
|
||||||
|
GLboolean texture_twiddle;
|
||||||
} GLdcConfig;
|
} GLdcConfig;
|
||||||
|
|
||||||
|
|
||||||
|
@ -87,7 +92,7 @@ GLAPI void APIENTRY glKosInitConfig(GLdcConfig* config);
|
||||||
*/
|
*/
|
||||||
GLAPI void APIENTRY glKosInitEx(GLdcConfig* config);
|
GLAPI void APIENTRY glKosInitEx(GLdcConfig* config);
|
||||||
GLAPI void APIENTRY glKosSwapBuffers();
|
GLAPI void APIENTRY glKosSwapBuffers();
|
||||||
|
GLAPI void APIENTRY glKosShutdown();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CUSTOM EXTENSION multiple_shared_palette_KOS
|
* CUSTOM EXTENSION multiple_shared_palette_KOS
|
||||||
|
@ -186,12 +191,28 @@ GLAPI void APIENTRY glKosSwapBuffers();
|
||||||
/* Memory allocation extension (GL_KOS_texture_memory_management) */
|
/* Memory allocation extension (GL_KOS_texture_memory_management) */
|
||||||
GLAPI GLvoid APIENTRY glDefragmentTextureMemory_KOS(void);
|
GLAPI GLvoid APIENTRY glDefragmentTextureMemory_KOS(void);
|
||||||
|
|
||||||
|
/* glGet extensions */
|
||||||
#define GL_FREE_TEXTURE_MEMORY_KOS 0xEF3D
|
#define GL_FREE_TEXTURE_MEMORY_KOS 0xEF3D
|
||||||
#define GL_USED_TEXTURE_MEMORY_KOS 0xEF3E
|
#define GL_USED_TEXTURE_MEMORY_KOS 0xEF3E
|
||||||
#define GL_FREE_CONTIGUOUS_TEXTURE_MEMORY_KOS 0xEF3F
|
#define GL_FREE_CONTIGUOUS_TEXTURE_MEMORY_KOS 0xEF3F
|
||||||
|
|
||||||
//for palette internal format (glfcConfig)
|
//for palette internal format (glfcConfig)
|
||||||
#define GL_RGB565_KOS 0xEF40
|
#define GL_RGB565_KOS 0xEF40
|
||||||
|
#define GL_ARGB4444_KOS 0xEF41
|
||||||
|
#define GL_ARGB1555_KOS 0xEF42
|
||||||
|
#define GL_RGB565_TWID_KOS 0xEF43
|
||||||
|
#define GL_ARGB4444_TWID_KOS 0xEF44
|
||||||
|
#define GL_ARGB1555_TWID_KOS 0xEF45
|
||||||
|
#define GL_COLOR_INDEX8_TWID_KOS 0xEF46
|
||||||
|
#define GL_COLOR_INDEX4_TWID_KOS 0xEF47
|
||||||
|
#define GL_RGB_TWID_KOS 0xEF48
|
||||||
|
#define GL_RGBA_TWID_KOS 0xEF49
|
||||||
|
|
||||||
|
/* glGet extensions */
|
||||||
|
#define GL_TEXTURE_INTERNAL_FORMAT_KOS 0xEF50
|
||||||
|
|
||||||
|
/* If enabled, will twiddle texture uploads where possible */
|
||||||
|
#define GL_TEXTURE_TWIDDLE_KOS 0xEF51
|
||||||
|
|
||||||
__END_DECLS
|
__END_DECLS
|
||||||
|
|
||||||
|
|
|
@ -145,7 +145,7 @@ int check_start() {
|
||||||
|
|
||||||
void DrawCube(float x, float z) {
|
void DrawCube(float x, float z) {
|
||||||
static float pos = 0.0f;
|
static float pos = 0.0f;
|
||||||
const static float radius = 30.0f;
|
static const float radius = 30.0f;
|
||||||
|
|
||||||
pos += 0.001f;
|
pos += 0.001f;
|
||||||
|
|
||||||
|
|
|
@ -53,10 +53,10 @@ void LoadGLTextures() {
|
||||||
|
|
||||||
// 2d texture, level of detail 0 (normal), 3 components (red, green, blue), x size from image, y size from image,
|
// 2d texture, level of detail 0 (normal), 3 components (red, green, blue), x size from image, y size from image,
|
||||||
// border 0 (normal), rgb color data, unsigned byte data, and finally the data itself.
|
// border 0 (normal), rgb color data, unsigned byte data, and finally the data itself.
|
||||||
glTexImage2D(GL_TEXTURE_2D, 0, 3, image1->sizeX, image1->sizeY, 0, GL_RGB, GL_UNSIGNED_BYTE, image1->data);
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image1->sizeX, image1->sizeY, 0, GL_RGB, GL_UNSIGNED_BYTE, image1->data);
|
||||||
|
|
||||||
free(image1);
|
free(image1);
|
||||||
};
|
}
|
||||||
|
|
||||||
/* A general OpenGL initialization function. Sets all of the initial parameters. */
|
/* A general OpenGL initialization function. Sets all of the initial parameters. */
|
||||||
void InitGL(int Width, int Height) // We call this right after our OpenGL window is created.
|
void InitGL(int Width, int Height) // We call this right after our OpenGL window is created.
|
||||||
|
@ -74,7 +74,7 @@ void InitGL(int Width, int Height) // We call this right after our OpenG
|
||||||
|
|
||||||
gluPerspective(45.0f,(GLfloat)Width/(GLfloat)Height,0.1f,100.0f); // Calculate The Aspect Ratio Of The Window
|
gluPerspective(45.0f,(GLfloat)Width/(GLfloat)Height,0.1f,100.0f); // Calculate The Aspect Ratio Of The Window
|
||||||
|
|
||||||
glMatrixMode(GL_MODELVIEW);
|
glMatrixMode(GL_MODELVIEW);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The function called when our window is resized (which shouldn't happen, because we're fullscreen) */
|
/* The function called when our window is resized (which shouldn't happen, because we're fullscreen) */
|
||||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 192 KiB After Width: | Height: | Size: 96 KiB |
|
@ -132,7 +132,7 @@ void LoadGLTextures() {
|
||||||
|
|
||||||
// 2d texture, level of detail 0 (normal), 3 components (red, green, blue), x size from image, y size from image,
|
// 2d texture, level of detail 0 (normal), 3 components (red, green, blue), x size from image, y size from image,
|
||||||
// border 0 (normal), rgb color data, unsigned byte data, and finally the data itself.
|
// border 0 (normal), rgb color data, unsigned byte data, and finally the data itself.
|
||||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX8_EXT, image1->width, image1->height, 0, GL_COLOR_INDEX, GL_UNSIGNED_BYTE_TWID_KOS, image1->data);
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX8_EXT, image1->width, image1->height, 0, GL_COLOR_INDEX8_TWID_KOS, GL_UNSIGNED_BYTE, image1->data);
|
||||||
glGenerateMipmapEXT(GL_TEXTURE_2D);
|
glGenerateMipmapEXT(GL_TEXTURE_2D);
|
||||||
|
|
||||||
free(image1);
|
free(image1);
|
||||||
|
|
|
@ -254,6 +254,8 @@ int BMP_Infos(FILE *pFile, uint32_t *width, uint32_t *height)
|
||||||
*width = (uint32_t)BmpInfoHeader.Width;
|
*width = (uint32_t)BmpInfoHeader.Width;
|
||||||
*height = (uint32_t)BmpInfoHeader.Height;
|
*height = (uint32_t)BmpInfoHeader.Height;
|
||||||
|
|
||||||
|
fseek(pFile, BmpInfoHeader.Size + 14, SEEK_SET);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,6 +272,7 @@ int BMP_GetPalette(FILE *pFile)
|
||||||
bitCount = BmpInfoHeader.ClrImportant * sizeof(RGB_QUAD);
|
bitCount = BmpInfoHeader.ClrImportant * sizeof(RGB_QUAD);
|
||||||
|
|
||||||
if (fread(BmpRgbQuad, 1, bitCount, pFile) != bitCount){
|
if (fread(BmpRgbQuad, 1, bitCount, pFile) != bitCount){
|
||||||
|
fprintf(stderr, "Failed to read palette: %d\n", bitCount);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -281,6 +284,8 @@ int BMP_GetPalette(FILE *pFile)
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "BitCount: %d\n", BmpInfoHeader.BitCount);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -346,7 +351,7 @@ int LoadPalettedBMP(const char* filename, Image* image)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!BMP_GetPalette(fp)) {
|
if (!BMP_GetPalette(fp)) {
|
||||||
printf("Only 16c BMP are supported for this sample");
|
printf("Only 16c BMP are supported for this sample\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -429,7 +434,7 @@ void LoadGLTextures() {
|
||||||
#ifndef USE_16C_PALETTE
|
#ifndef USE_16C_PALETTE
|
||||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX8_EXT, image1.width, image1.height, 0, GL_COLOR_INDEX, GL_UNSIGNED_BYTE, image1.data);
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX8_EXT, image1.width, image1.height, 0, GL_COLOR_INDEX, GL_UNSIGNED_BYTE, image1.data);
|
||||||
#else
|
#else
|
||||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX4_EXT, image1.width, image1.height, 0, GL_COLOR_INDEX, GL_UNSIGNED_BYTE, image1.data);
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX4_EXT, image1.width, image1.height, 0, GL_COLOR_INDEX4_EXT, GL_UNSIGNED_BYTE, image1.data);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
glBindTexture(GL_TEXTURE_2D, textures[1]); // 2d texture (x and y size)
|
glBindTexture(GL_TEXTURE_2D, textures[1]); // 2d texture (x and y size)
|
||||||
|
@ -444,7 +449,7 @@ void LoadGLTextures() {
|
||||||
#ifndef USE_16C_PALETTE
|
#ifndef USE_16C_PALETTE
|
||||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX8_EXT, image1.width, image1.height, 0, GL_COLOR_INDEX, GL_UNSIGNED_BYTE, image1.data);
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX8_EXT, image1.width, image1.height, 0, GL_COLOR_INDEX, GL_UNSIGNED_BYTE, image1.data);
|
||||||
#else
|
#else
|
||||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX4_EXT, image1.width, image1.height, 0, GL_COLOR_INDEX, GL_UNSIGNED_BYTE, image1.data);
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX4_EXT, image1.width, image1.height, 0, GL_COLOR_INDEX4_EXT, GL_UNSIGNED_BYTE, image1.data);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
glBindTexture(GL_TEXTURE_2D, textures[2]);
|
glBindTexture(GL_TEXTURE_2D, textures[2]);
|
||||||
|
@ -463,7 +468,7 @@ void LoadGLTextures() {
|
||||||
#ifndef USE_16C_PALETTE
|
#ifndef USE_16C_PALETTE
|
||||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX8_EXT, image2.width, image2.height, 0, GL_COLOR_INDEX, GL_UNSIGNED_BYTE, image2.data);
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX8_EXT, image2.width, image2.height, 0, GL_COLOR_INDEX, GL_UNSIGNED_BYTE, image2.data);
|
||||||
#else
|
#else
|
||||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX4_EXT, image2.width, image2.height, 0, GL_COLOR_INDEX, GL_UNSIGNED_BYTE, image2.data);
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_COLOR_INDEX4_EXT, image2.width, image2.height, 0, GL_COLOR_INDEX4_EXT, GL_UNSIGNED_BYTE, image2.data);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 32 KiB After Width: | Height: | Size: 16 KiB |
8207
samples/prof_texture_upload/image.h
Normal file
8207
samples/prof_texture_upload/image.h
Normal file
File diff suppressed because it is too large
Load Diff
64
samples/prof_texture_upload/main.c
Normal file
64
samples/prof_texture_upload/main.c
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <time.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#ifdef __DREAMCAST__
|
||||||
|
#include <kos.h>
|
||||||
|
#include "../profiler.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <GL/gl.h>
|
||||||
|
#include <GL/glkos.h>
|
||||||
|
|
||||||
|
#include "image.h"
|
||||||
|
|
||||||
|
#define PROFILE 0
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
(void) argc;
|
||||||
|
(void) argv;
|
||||||
|
|
||||||
|
fprintf(stdout, "Initializing\n");
|
||||||
|
glKosInit();
|
||||||
|
glClearColor(0.5f, 0.0f, 0.5f, 1.0f);
|
||||||
|
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
|
||||||
|
glKosSwapBuffers();
|
||||||
|
|
||||||
|
GLuint texture_id = 0;
|
||||||
|
glGenTextures(1, &texture_id);
|
||||||
|
glBindTexture(GL_TEXTURE_2D, texture_id);
|
||||||
|
|
||||||
|
time_t start = time(NULL);
|
||||||
|
time_t end = start;
|
||||||
|
|
||||||
|
int counter = 0;
|
||||||
|
|
||||||
|
fprintf(stderr, "Starting test run...\n");
|
||||||
|
|
||||||
|
#ifdef __DREAMCAST__
|
||||||
|
#if PROFILE
|
||||||
|
profiler_init("/pc/gmon.out");
|
||||||
|
profiler_start();
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
while((end - start) < 5) {
|
||||||
|
glTexImage2D(
|
||||||
|
GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, header_data
|
||||||
|
);
|
||||||
|
|
||||||
|
++counter;
|
||||||
|
end = time(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef __DREAMCAST__
|
||||||
|
#if PROFILE
|
||||||
|
profiler_stop();
|
||||||
|
profiler_clean_up();
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
fprintf(stderr, "Called glTexImage2D %d times (%.4f per call)\n", counter, (float)(end - start) / (float)(counter));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
26
tests/CMakeLists.txt
Normal file
26
tests/CMakeLists.txt
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
|
||||||
|
|
||||||
|
FILE(GLOB GL_TESTS ${CMAKE_CURRENT_SOURCE_DIR}/test_*.h)
|
||||||
|
|
||||||
|
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR})
|
||||||
|
|
||||||
|
SET(TEST_GENERATOR_BIN ${CMAKE_SOURCE_DIR}/tools/test_generator.py)
|
||||||
|
SET(TEST_MAIN_FILENAME ${CMAKE_CURRENT_BINARY_DIR}/main.cpp)
|
||||||
|
|
||||||
|
ADD_CUSTOM_COMMAND(
|
||||||
|
OUTPUT ${TEST_MAIN_FILENAME}
|
||||||
|
COMMAND ${TEST_GENERATOR_BIN} --output ${TEST_MAIN_FILENAME} ${TEST_FILES} ${GL_TESTS}
|
||||||
|
DEPENDS ${TEST_FILES} ${GL_TESTS} ${TEST_GENERATOR_BIN}
|
||||||
|
)
|
||||||
|
|
||||||
|
add_executable(gldc_tests ${TEST_FILES} ${TEST_SOURCES} ${TEST_MAIN_FILENAME})
|
||||||
|
target_link_libraries(gldc_tests GLdc)
|
||||||
|
|
||||||
|
if(NOT PLATFORM_DREAMCAST)
|
||||||
|
set_target_properties(
|
||||||
|
gldc_tests
|
||||||
|
PROPERTIES
|
||||||
|
COMPILE_OPTIONS "-m32"
|
||||||
|
LINK_OPTIONS "-m32"
|
||||||
|
)
|
||||||
|
endif()
|
189
tests/test_allocator.h
Normal file
189
tests/test_allocator.h
Normal file
|
@ -0,0 +1,189 @@
|
||||||
|
#include "tools/test.h"
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cassert>
|
||||||
|
#include <malloc.h>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
#include <GL/gl.h>
|
||||||
|
#include <GL/glkos.h>
|
||||||
|
|
||||||
|
#include "GL/alloc/alloc.h"
|
||||||
|
|
||||||
|
static inline int round_up(int n, int multiple)
|
||||||
|
{
|
||||||
|
assert(multiple);
|
||||||
|
return ((n + multiple - 1) / multiple) * multiple;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define POOL_SIZE (16 * 2048)
|
||||||
|
|
||||||
|
class AllocatorTests : public test::TestCase {
|
||||||
|
public:
|
||||||
|
uint8_t* pool = NULL;
|
||||||
|
|
||||||
|
std::vector<std::pair<void*, void*>> defrag_moves;
|
||||||
|
|
||||||
|
void set_up() {
|
||||||
|
pool = (uint8_t*) memalign(2048, POOL_SIZE);
|
||||||
|
assert(((intptr_t) pool) % 2048 == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void tear_down() {
|
||||||
|
alloc_shutdown(pool);
|
||||||
|
free(pool);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void on_defrag(void* src, void* dst, void* user_data) {
|
||||||
|
AllocatorTests* self = (AllocatorTests*) user_data;
|
||||||
|
self->defrag_moves.push_back(std::make_pair(src, dst));
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_defrag() {
|
||||||
|
alloc_init(pool, POOL_SIZE);
|
||||||
|
|
||||||
|
alloc_malloc(pool, 256);
|
||||||
|
void* a2 = alloc_malloc(pool, 256);
|
||||||
|
void* a3 = alloc_malloc(pool, 256);
|
||||||
|
|
||||||
|
alloc_free(pool, a2);
|
||||||
|
|
||||||
|
alloc_run_defrag(pool, &AllocatorTests::on_defrag, 5, this);
|
||||||
|
|
||||||
|
assert_equal(defrag_moves.size(), 1u); // Moved a3 -> a2
|
||||||
|
|
||||||
|
assert_equal(defrag_moves[0].first, a3);
|
||||||
|
assert_equal(defrag_moves[0].second, a2);
|
||||||
|
|
||||||
|
assert_equal(alloc_malloc(pool, 256), a3);
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_poor_alloc_aligned() {
|
||||||
|
/* If we try to allocate and there are no suitable aligned
|
||||||
|
* slots available, we fallback to any available unaligned slots */
|
||||||
|
alloc_init(pool, POOL_SIZE);
|
||||||
|
|
||||||
|
// Leave only space for an unaligned block
|
||||||
|
alloc_malloc(pool, (15 * 2048) - 256);
|
||||||
|
|
||||||
|
// Should work, we have space (just) but it's not aligned
|
||||||
|
void* a1 = alloc_malloc(pool, 2048 + 256);
|
||||||
|
assert_is_not_null(a1);
|
||||||
|
assert_equal(a1, pool + ((15 * 2048) - 256));
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_poor_alloc_straddling() {
|
||||||
|
/*
|
||||||
|
* If we try to allocate a small block, it should not
|
||||||
|
* cross a 2048 boundary unless there is no other option */
|
||||||
|
alloc_init(pool, POOL_SIZE);
|
||||||
|
alloc_malloc(pool, (15 * 2048) - 256);
|
||||||
|
void* a1 = alloc_malloc(pool, 512);
|
||||||
|
assert_true((uintptr_t(a1) % 2048) == 0); // Should've aligned to the last 2048 block
|
||||||
|
|
||||||
|
/* Allocate the rest of the last block, this leaves a 256 block in the
|
||||||
|
* penultimate block */
|
||||||
|
alloc_malloc(pool, 1536);
|
||||||
|
alloc_free(pool, a1);
|
||||||
|
|
||||||
|
/* No choice but to straddle the boundary */
|
||||||
|
a1 = alloc_malloc(pool, 768);
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_alloc_init() {
|
||||||
|
alloc_init(pool, POOL_SIZE);
|
||||||
|
|
||||||
|
void* expected_base_address = (void*) round_up((uintptr_t) pool, 2048);
|
||||||
|
assert_equal(alloc_next_available(pool, 16), expected_base_address);
|
||||||
|
assert_equal(alloc_base_address(pool), expected_base_address);
|
||||||
|
|
||||||
|
size_t expected_blocks = (
|
||||||
|
uintptr_t(pool + POOL_SIZE) -
|
||||||
|
uintptr_t(expected_base_address)
|
||||||
|
) / 2048;
|
||||||
|
|
||||||
|
assert_equal(alloc_block_count(pool), expected_blocks);
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_complex_case() {
|
||||||
|
uint8_t* large_pool = (uint8_t*) malloc(8 * 1024 * 1024);
|
||||||
|
|
||||||
|
alloc_init(large_pool, 8 * 1024 * 1024);
|
||||||
|
alloc_malloc(large_pool, 262144);
|
||||||
|
alloc_malloc(large_pool, 262144);
|
||||||
|
void* a1 = alloc_malloc(large_pool, 524288);
|
||||||
|
alloc_free(large_pool, a1);
|
||||||
|
alloc_malloc(large_pool, 699056);
|
||||||
|
alloc_malloc(large_pool, 128);
|
||||||
|
alloc_shutdown(large_pool);
|
||||||
|
|
||||||
|
free(large_pool);
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_complex_case2() {
|
||||||
|
uint8_t* large_pool = (uint8_t*) malloc(8 * 1024 * 1024);
|
||||||
|
alloc_init(large_pool, 8 * 1024 * 1024);
|
||||||
|
|
||||||
|
void* a1 = alloc_malloc(large_pool, 131072);
|
||||||
|
alloc_free(large_pool, a1);
|
||||||
|
|
||||||
|
alloc_malloc(large_pool, 174768);
|
||||||
|
void* a2 = alloc_malloc(large_pool, 131072);
|
||||||
|
alloc_free(large_pool, a2);
|
||||||
|
|
||||||
|
alloc_malloc(large_pool, 174768);
|
||||||
|
void* a3 = alloc_malloc(large_pool, 128);
|
||||||
|
|
||||||
|
alloc_free(large_pool, a3);
|
||||||
|
|
||||||
|
alloc_shutdown(large_pool);
|
||||||
|
free(large_pool);
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_alloc_malloc() {
|
||||||
|
alloc_init(pool, POOL_SIZE);
|
||||||
|
|
||||||
|
uint8_t* base_address = (uint8_t*) alloc_base_address(pool);
|
||||||
|
void* a1 = alloc_malloc(pool, 1024);
|
||||||
|
|
||||||
|
/* First alloc should always be the base address */
|
||||||
|
assert_equal(a1, base_address);
|
||||||
|
|
||||||
|
/* An allocation of <= 2048 (well 1024) will not necessarily be at
|
||||||
|
* a 2k boundary */
|
||||||
|
void* expected_next_available = base_address + uintptr_t(1024);
|
||||||
|
assert_equal(alloc_next_available(pool, 1024), expected_next_available);
|
||||||
|
|
||||||
|
/* Requesting 2k though will force to a 2k boundary */
|
||||||
|
expected_next_available = base_address + uintptr_t(2048);
|
||||||
|
assert_equal(alloc_next_available(pool, 2048), expected_next_available);
|
||||||
|
|
||||||
|
/* Now alloc 2048 bytes, this should be on the 2k boundary */
|
||||||
|
void* a2 = alloc_malloc(pool, 2048);
|
||||||
|
assert_equal(a2, expected_next_available);
|
||||||
|
|
||||||
|
/* If we try to allocate 1k, this should go in the second half of the
|
||||||
|
* first block */
|
||||||
|
expected_next_available = base_address + uintptr_t(1024);
|
||||||
|
void* a3 = alloc_malloc(pool, 1024);
|
||||||
|
assert_equal(a3, expected_next_available);
|
||||||
|
|
||||||
|
alloc_free(pool, a1);
|
||||||
|
|
||||||
|
/* Next allocation would go in the just freed block */
|
||||||
|
expected_next_available = base_address;
|
||||||
|
assert_equal(alloc_next_available(pool, 64), expected_next_available);
|
||||||
|
|
||||||
|
/* Now allocate 14 more 2048 size blocks, the following one should
|
||||||
|
* return NULL */
|
||||||
|
for(int i = 0; i < 14; ++i) {
|
||||||
|
alloc_malloc(pool, 2048);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_is_null(alloc_malloc(pool, 2048));
|
||||||
|
|
||||||
|
/* But we should still have room in the second block for this */
|
||||||
|
assert_is_not_null(alloc_malloc(pool, 64));
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
77
tests/test_glteximage2d.h
Normal file
77
tests/test_glteximage2d.h
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
#include "tools/test.h"
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <GL/gl.h>
|
||||||
|
#include <GL/glkos.h>
|
||||||
|
|
||||||
|
|
||||||
|
class TexImage2DTests : public test::TestCase {
|
||||||
|
public:
|
||||||
|
uint8_t image_data[8 * 8 * 4] = {0};
|
||||||
|
|
||||||
|
void set_up() {
|
||||||
|
GLdcConfig config;
|
||||||
|
glKosInitConfig(&config);
|
||||||
|
config.texture_twiddle = false;
|
||||||
|
glKosInitEx(&config);
|
||||||
|
|
||||||
|
/* Init image data so each texel RGBA value matches the
|
||||||
|
* position in the array */
|
||||||
|
for(int i = 0; i < 8 * 8 * 4; i += 4) {
|
||||||
|
image_data[i + 0] = i;
|
||||||
|
image_data[i + 1] = i;
|
||||||
|
image_data[i + 2] = i;
|
||||||
|
image_data[i + 3] = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void tear_down() {
|
||||||
|
glKosShutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_rgb_to_rgb565() {
|
||||||
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 8, 8, 0, GL_RGB, GL_UNSIGNED_BYTE, image_data);
|
||||||
|
assert_equal(glGetError(), GL_NO_ERROR);
|
||||||
|
|
||||||
|
GLint internalFormat;
|
||||||
|
glGetIntegerv(GL_TEXTURE_INTERNAL_FORMAT_KOS, &internalFormat);
|
||||||
|
|
||||||
|
assert_equal(internalFormat, GL_RGB565_KOS);
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_rgb_to_rgb565_twiddle() {
|
||||||
|
glEnable(GL_TEXTURE_TWIDDLE_KOS);
|
||||||
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 8, 8, 0, GL_RGB, GL_UNSIGNED_BYTE, image_data);
|
||||||
|
glDisable(GL_TEXTURE_TWIDDLE_KOS);
|
||||||
|
|
||||||
|
assert_equal(glGetError(), GL_NO_ERROR);
|
||||||
|
|
||||||
|
GLint internalFormat;
|
||||||
|
glGetIntegerv(GL_TEXTURE_INTERNAL_FORMAT_KOS, &internalFormat);
|
||||||
|
|
||||||
|
assert_equal(internalFormat, GL_RGB565_TWID_KOS);
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_rgba_to_argb4444() {
|
||||||
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 8, 8, 0, GL_RGBA, GL_UNSIGNED_BYTE, image_data);
|
||||||
|
assert_equal(glGetError(), GL_NO_ERROR);
|
||||||
|
|
||||||
|
GLint internalFormat;
|
||||||
|
glGetIntegerv(GL_TEXTURE_INTERNAL_FORMAT_KOS, &internalFormat);
|
||||||
|
|
||||||
|
assert_equal(internalFormat, GL_ARGB4444_KOS);
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_rgba_to_argb4444_twiddle() {
|
||||||
|
glEnable(GL_TEXTURE_TWIDDLE_KOS);
|
||||||
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 8, 8, 0, GL_RGBA, GL_UNSIGNED_BYTE, image_data);
|
||||||
|
glDisable(GL_TEXTURE_TWIDDLE_KOS);
|
||||||
|
|
||||||
|
assert_equal(glGetError(), GL_NO_ERROR);
|
||||||
|
|
||||||
|
GLint internalFormat;
|
||||||
|
glGetIntegerv(GL_TEXTURE_INTERNAL_FORMAT_KOS, &internalFormat);
|
||||||
|
|
||||||
|
assert_equal(internalFormat, GL_ARGB4444_TWID_KOS);
|
||||||
|
}
|
||||||
|
};
|
451
tools/test.h
Normal file
451
tools/test.h
Normal file
|
@ -0,0 +1,451 @@
|
||||||
|
/* * Copyright (c) 2011-2017 Luke Benstead https://simulant-engine.appspot.com
|
||||||
|
*
|
||||||
|
* This file is part of Simulant.
|
||||||
|
*
|
||||||
|
* Simulant is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* Simulant is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU Lesser General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Lesser General Public License
|
||||||
|
* along with Simulant. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <functional>
|
||||||
|
#include <stdexcept>
|
||||||
|
#include <iostream>
|
||||||
|
#include <sstream>
|
||||||
|
#include <algorithm>
|
||||||
|
#include <fstream>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#define assert_equal(expected, actual) _assert_equal((expected), (actual), __FILE__, __LINE__)
|
||||||
|
#define assert_not_equal(expected, actual) _assert_not_equal((expected), (actual), __FILE__, __LINE__)
|
||||||
|
#define assert_false(actual) _assert_false((actual), __FILE__, __LINE__)
|
||||||
|
#define assert_true(actual) _assert_true((actual), __FILE__, __LINE__)
|
||||||
|
#define assert_close(expected, actual, difference) _assert_close((expected), (actual), (difference), __FILE__, __LINE__)
|
||||||
|
#define assert_is_null(actual) _assert_is_null((actual), __FILE__, __LINE__)
|
||||||
|
#define assert_is_not_null(actual) _assert_is_not_null((actual), __FILE__, __LINE__)
|
||||||
|
#define assert_raises(exception, func) _assert_raises<exception>((func), __FILE__, __LINE__)
|
||||||
|
#define assert_items_equal(expected, actual) _assert_items_equal((actual), (expected), __FILE__, __LINE__)
|
||||||
|
#define not_implemented() _not_implemented(__FILE__, __LINE__)
|
||||||
|
|
||||||
|
|
||||||
|
namespace test {
|
||||||
|
|
||||||
|
class StringFormatter {
|
||||||
|
public:
|
||||||
|
StringFormatter(const std::string& templ):
|
||||||
|
templ_(templ) { }
|
||||||
|
|
||||||
|
struct Counter {
|
||||||
|
Counter(uint32_t c): c(c) {}
|
||||||
|
uint32_t c;
|
||||||
|
};
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
std::string format(T value) {
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << value;
|
||||||
|
return _do_format(0, ss.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
std::string format(Counter count, T value) {
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << value;
|
||||||
|
return _do_format(count.c, ss.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T, typename... Args>
|
||||||
|
std::string format(T value, const Args&... args) {
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << value;
|
||||||
|
return StringFormatter(_do_format(0, ss.str())).format(Counter(1), args...);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T, typename... Args>
|
||||||
|
std::string format(Counter count, T value, const Args&... args) {
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << value;
|
||||||
|
return StringFormatter(_do_format(count.c, ss.str())).format(Counter(count.c + 1), args...);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string _do_format(uint32_t counter, const std::string& value) {
|
||||||
|
std::stringstream ss; // Can't use to_string on all platforms
|
||||||
|
ss << counter;
|
||||||
|
|
||||||
|
const std::string to_replace = "{" + ss.str() + "}";
|
||||||
|
std::string output = templ_;
|
||||||
|
|
||||||
|
auto replace = [](std::string& str, const std::string& from, const std::string& to) -> bool {
|
||||||
|
size_t start_pos = str.find(from);
|
||||||
|
if(start_pos == std::string::npos)
|
||||||
|
return false;
|
||||||
|
str.replace(start_pos, from.length(), to);
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
replace(output, to_replace, value);
|
||||||
|
return output;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string templ_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class StringSplitter {
|
||||||
|
public:
|
||||||
|
StringSplitter(const std::string& str):
|
||||||
|
str_(str) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::string> split() {
|
||||||
|
std::vector<std::string> result;
|
||||||
|
std::string buffer;
|
||||||
|
|
||||||
|
for(auto c: str_) {
|
||||||
|
if(c == '\n') {
|
||||||
|
if(!buffer.empty()) {
|
||||||
|
result.push_back(buffer);
|
||||||
|
buffer.clear();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
buffer.push_back(c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!buffer.empty()) {
|
||||||
|
result.push_back(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string str_;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef StringFormatter _Format;
|
||||||
|
|
||||||
|
class AssertionError : public std::logic_error {
|
||||||
|
public:
|
||||||
|
AssertionError(const std::string& what):
|
||||||
|
std::logic_error(what),
|
||||||
|
file(""),
|
||||||
|
line(-1) {
|
||||||
|
}
|
||||||
|
|
||||||
|
AssertionError(const std::pair<std::string, int> file_and_line, const std::string& what):
|
||||||
|
std::logic_error(what),
|
||||||
|
file(file_and_line.first),
|
||||||
|
line(file_and_line.second) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
~AssertionError() noexcept (true) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string file;
|
||||||
|
int line;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class NotImplementedError: public std::logic_error {
|
||||||
|
public:
|
||||||
|
NotImplementedError(const std::string& file, int line):
|
||||||
|
std::logic_error(_Format("Not implemented at {0}:{1}").format(file, line)) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class SkippedTestError: public std::logic_error {
|
||||||
|
public:
|
||||||
|
SkippedTestError(const std::string& reason):
|
||||||
|
std::logic_error(reason) {
|
||||||
|
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class TestCase {
|
||||||
|
public:
|
||||||
|
virtual ~TestCase() {}
|
||||||
|
|
||||||
|
virtual void set_up() {}
|
||||||
|
virtual void tear_down() {}
|
||||||
|
|
||||||
|
void skip_if(const bool& flag, const std::string& reason) {
|
||||||
|
if(flag) { throw test::SkippedTestError(reason); }
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T, typename U>
|
||||||
|
void _assert_equal(T expected, U actual, std::string file, int line) {
|
||||||
|
if(expected != actual) {
|
||||||
|
auto file_and_line = std::make_pair(file, line);
|
||||||
|
throw test::AssertionError(file_and_line, test::_Format("{0} does not match {1}").format(actual, expected));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T, typename U>
|
||||||
|
void _assert_not_equal(T lhs, U rhs, std::string file, int line) {
|
||||||
|
if(lhs == (T) rhs) {
|
||||||
|
auto file_and_line = std::make_pair(file, line);
|
||||||
|
throw test::AssertionError(file_and_line, test::_Format("{0} should not match {1}").format(lhs, rhs));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
void _assert_true(T actual, std::string file, int line) {
|
||||||
|
if(!bool(actual)) {
|
||||||
|
auto file_and_line = std::make_pair(file, line);
|
||||||
|
throw test::AssertionError(file_and_line, test::_Format("{0} is not true").format(bool(actual) ? "true" : "false"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
void _assert_false(T actual, std::string file, int line) {
|
||||||
|
if(bool(actual)) {
|
||||||
|
auto file_and_line = std::make_pair(file, line);
|
||||||
|
throw test::AssertionError(file_and_line, test::_Format("{0} is not false").format(bool(actual) ? "true" : "false"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T, typename U, typename V>
|
||||||
|
void _assert_close(T expected, U actual, V difference, std::string file, int line) {
|
||||||
|
if(actual < expected - difference ||
|
||||||
|
actual > expected + difference) {
|
||||||
|
auto file_and_line = std::make_pair(file, line);
|
||||||
|
throw test::AssertionError(file_and_line, test::_Format("{0} is not close enough to {1}").format(actual, expected));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
void _assert_is_null(T* thing, std::string file, int line) {
|
||||||
|
if(thing != nullptr) {
|
||||||
|
auto file_and_line = std::make_pair(file, line);
|
||||||
|
throw test::AssertionError(file_and_line, "Pointer was not NULL");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
void _assert_is_not_null(T* thing, std::string file, int line) {
|
||||||
|
if(thing == nullptr) {
|
||||||
|
auto file_and_line = std::make_pair(file, line);
|
||||||
|
throw test::AssertionError(file_and_line, "Pointer was unexpectedly NULL");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T, typename Func>
|
||||||
|
void _assert_raises(Func func, std::string file, int line) {
|
||||||
|
try {
|
||||||
|
func();
|
||||||
|
auto file_and_line = std::make_pair(file, line);
|
||||||
|
throw test::AssertionError(file_and_line, test::_Format("Expected exception ({0}) was not thrown").format(typeid(T).name()));
|
||||||
|
} catch(T& e) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T, typename U>
|
||||||
|
void _assert_items_equal(const T& lhs, const U& rhs, std::string file, int line) {
|
||||||
|
auto file_and_line = std::make_pair(file, line);
|
||||||
|
|
||||||
|
if(lhs.size() != rhs.size()) {
|
||||||
|
throw test::AssertionError(file_and_line, "Containers are not the same length");
|
||||||
|
}
|
||||||
|
|
||||||
|
for(auto item: lhs) {
|
||||||
|
if(std::find(rhs.begin(), rhs.end(), item) == rhs.end()) {
|
||||||
|
throw test::AssertionError(file_and_line, test::_Format("Container does not contain {0}").format(item));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void _not_implemented(std::string file, int line) {
|
||||||
|
throw test::NotImplementedError(file, line);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class TestRunner {
|
||||||
|
public:
|
||||||
|
template<typename T, typename U>
|
||||||
|
void register_case(std::vector<U> methods, std::vector<std::string> names) {
|
||||||
|
std::shared_ptr<TestCase> instance = std::make_shared<T>();
|
||||||
|
|
||||||
|
instances_.push_back(instance); //Hold on to it
|
||||||
|
|
||||||
|
for(std::string name: names) {
|
||||||
|
names_.push_back(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
for(U& method: methods) {
|
||||||
|
std::function<void()> func = std::bind(method, dynamic_cast<T*>(instance.get()));
|
||||||
|
tests_.push_back([=]() {
|
||||||
|
instance->set_up();
|
||||||
|
try {
|
||||||
|
func();
|
||||||
|
} catch(...) {
|
||||||
|
instance->tear_down();
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
|
instance->tear_down();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t run(const std::string& test_case, const std::string& junit_output="") {
|
||||||
|
int failed = 0;
|
||||||
|
int skipped = 0;
|
||||||
|
int ran = 0;
|
||||||
|
int crashed = 0;
|
||||||
|
|
||||||
|
auto new_tests = tests_;
|
||||||
|
auto new_names = names_;
|
||||||
|
|
||||||
|
if(!test_case.empty()) {
|
||||||
|
new_tests.clear();
|
||||||
|
new_names.clear();
|
||||||
|
|
||||||
|
for(uint32_t i = 0; i < names_.size(); ++i) {
|
||||||
|
if(names_[i].find(test_case) == 0) {
|
||||||
|
new_tests.push_back(tests_[i]);
|
||||||
|
new_names.push_back(names_[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << std::endl << "Running " << new_tests.size() << " tests" << std::endl << std::endl;
|
||||||
|
|
||||||
|
std::vector<std::string> junit_lines;
|
||||||
|
junit_lines.push_back("<testsuites>\n");
|
||||||
|
|
||||||
|
std::string klass = "";
|
||||||
|
|
||||||
|
for(std::function<void ()> test: new_tests) {
|
||||||
|
std::string name = new_names[ran];
|
||||||
|
std::string this_klass(name.begin(), name.begin() + name.find_first_of(":"));
|
||||||
|
bool close_klass = ran == (int) new_tests.size() - 1;
|
||||||
|
|
||||||
|
if(this_klass != klass) {
|
||||||
|
if(!klass.empty()) {
|
||||||
|
junit_lines.push_back(" </testsuite>\n");
|
||||||
|
}
|
||||||
|
klass = this_klass;
|
||||||
|
junit_lines.push_back(" <testsuite name=\"" + this_klass + "\">\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
junit_lines.push_back(" <testcase name=\"" + new_names[ran] + "\">\n");
|
||||||
|
std::string output = " " + new_names[ran];
|
||||||
|
|
||||||
|
for(int i = output.length(); i < 76; ++i) {
|
||||||
|
output += " ";
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << output;
|
||||||
|
test();
|
||||||
|
std::cout << "\033[32m" << " OK " << "\033[0m" << std::endl;
|
||||||
|
junit_lines.push_back(" </testcase>\n");
|
||||||
|
} catch(test::NotImplementedError& e) {
|
||||||
|
std::cout << "\033[34m" << " SKIPPED" << "\033[0m" << std::endl;
|
||||||
|
++skipped;
|
||||||
|
junit_lines.push_back(" </testcase>\n");
|
||||||
|
} catch(test::SkippedTestError& e) {
|
||||||
|
std::cout << "\033[34m" << " SKIPPED" << "\033[0m" << std::endl;
|
||||||
|
++skipped;
|
||||||
|
junit_lines.push_back(" </testcase>\n");
|
||||||
|
} catch(test::AssertionError& e) {
|
||||||
|
std::cout << "\033[33m" << " FAILED " << "\033[0m" << std::endl;
|
||||||
|
std::cout << " " << e.what() << std::endl;
|
||||||
|
if(!e.file.empty()) {
|
||||||
|
std::cout << " " << e.file << ":" << e.line << std::endl;
|
||||||
|
|
||||||
|
std::ifstream ifs(e.file);
|
||||||
|
if(ifs.good()) {
|
||||||
|
std::string buffer;
|
||||||
|
std::vector<std::string> lines;
|
||||||
|
while(std::getline(ifs, buffer)) {
|
||||||
|
lines.push_back(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
int line_count = lines.size();
|
||||||
|
if(line_count && e.line <= line_count) {
|
||||||
|
std::cout << lines.at(e.line - 1) << std::endl << std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
++failed;
|
||||||
|
|
||||||
|
junit_lines.push_back(" <failure message=\"" + std::string(e.what()) + "\"/>\n");
|
||||||
|
junit_lines.push_back(" </testcase>\n");
|
||||||
|
} catch(std::exception& e) {
|
||||||
|
std::cout << "\033[31m" << " EXCEPT " << std::endl;
|
||||||
|
std::cout << " " << e.what() << "\033[0m" << std::endl;
|
||||||
|
++crashed;
|
||||||
|
|
||||||
|
junit_lines.push_back(" <failure message=\"" + std::string(e.what()) + "\"/>\n");
|
||||||
|
junit_lines.push_back(" </testcase>\n");
|
||||||
|
}
|
||||||
|
std::cout << "\033[0m";
|
||||||
|
++ran;
|
||||||
|
|
||||||
|
if(close_klass) {
|
||||||
|
junit_lines.push_back(" </testsuite>\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
junit_lines.push_back("</testsuites>\n");
|
||||||
|
|
||||||
|
if(!junit_output.empty()) {
|
||||||
|
FILE* f = fopen(junit_output.c_str(), "wt");
|
||||||
|
if(f) {
|
||||||
|
for(auto& line: junit_lines) {
|
||||||
|
fwrite(line.c_str(), sizeof(char), line.length(), f);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fclose(f);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << "-----------------------" << std::endl;
|
||||||
|
if(!failed && !crashed && !skipped) {
|
||||||
|
std::cout << "All tests passed" << std::endl << std::endl;
|
||||||
|
} else {
|
||||||
|
if(skipped) {
|
||||||
|
std::cout << skipped << " tests skipped";
|
||||||
|
}
|
||||||
|
|
||||||
|
if(failed) {
|
||||||
|
if(skipped) {
|
||||||
|
std::cout << ", ";
|
||||||
|
}
|
||||||
|
std::cout << failed << " tests failed";
|
||||||
|
}
|
||||||
|
|
||||||
|
if(crashed) {
|
||||||
|
if(failed) {
|
||||||
|
std::cout << ", ";
|
||||||
|
}
|
||||||
|
std::cout << crashed << " tests crashed";
|
||||||
|
}
|
||||||
|
std::cout << std::endl << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
return failed + crashed;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<std::shared_ptr<TestCase>> instances_;
|
||||||
|
std::vector<std::function<void()> > tests_;
|
||||||
|
std::vector<std::string> names_;
|
||||||
|
};
|
||||||
|
} // test
|
||||||
|
|
212
tools/test_generator.py
Executable file
212
tools/test_generator.py
Executable file
|
@ -0,0 +1,212 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description="Generate C++ unit tests")
|
||||||
|
parser.add_argument("--output", type=str, nargs=1, help="The output source file for the generated test main()", required=True)
|
||||||
|
parser.add_argument("test_files", type=str, nargs="+", help="The list of C++ files containing your tests")
|
||||||
|
parser.add_argument("--verbose", help="Verbose logging", action="store_true", default=False)
|
||||||
|
|
||||||
|
|
||||||
|
CLASS_REGEX = r"\s*class\s+(\w+)\s*([\:|,]\s*(?:public|private|protected)\s+[\w|::]+\s*)*"
|
||||||
|
TEST_FUNC_REGEX = r"void\s+(?P<func_name>test_\S[^\(]+)\(\s*(void)?\s*\)"
|
||||||
|
|
||||||
|
|
||||||
|
INCLUDE_TEMPLATE = "#include \"%(file_path)s\""
|
||||||
|
|
||||||
|
REGISTER_TEMPLATE = """
|
||||||
|
runner->register_case<%(class_name)s>(
|
||||||
|
std::vector<void (%(class_name)s::*)()>({%(members)s}),
|
||||||
|
{%(names)s}
|
||||||
|
);"""
|
||||||
|
|
||||||
|
MAIN_TEMPLATE = """
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
#include <map>
|
||||||
|
|
||||||
|
#include "tools/test.h"
|
||||||
|
|
||||||
|
%(includes)s
|
||||||
|
|
||||||
|
|
||||||
|
std::map<std::string, std::string> parse_args(int argc, char* argv[]) {
|
||||||
|
std::map<std::string, std::string> ret;
|
||||||
|
|
||||||
|
for(int i = 1; i < argc; ++i) {
|
||||||
|
std::string arg = argv[i];
|
||||||
|
|
||||||
|
auto eq = arg.find('=');
|
||||||
|
if(eq != std::string::npos && arg[0] == '-' && arg[1] == '-') {
|
||||||
|
auto key = std::string(arg.begin(), arg.begin() + eq);
|
||||||
|
auto value = std::string(arg.begin() + eq + 1, arg.end());
|
||||||
|
ret[key] = value;
|
||||||
|
} else if(arg[0] == '-' && arg[1] == '-') {
|
||||||
|
auto key = arg;
|
||||||
|
if(i < (argc - 1)) {
|
||||||
|
auto value = argv[++i];
|
||||||
|
ret[key] = value;
|
||||||
|
} else {
|
||||||
|
ret[key] = "";
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ret[arg] = ""; // Positional, not key=value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
auto runner = std::make_shared<test::TestRunner>();
|
||||||
|
|
||||||
|
auto args = parse_args(argc, argv);
|
||||||
|
|
||||||
|
std::string junit_xml;
|
||||||
|
auto junit_xml_it = args.find("--junit-xml");
|
||||||
|
if(junit_xml_it != args.end()) {
|
||||||
|
junit_xml = junit_xml_it->second;
|
||||||
|
std::cout << " Outputting junit XML to: " << junit_xml << std::endl;
|
||||||
|
args.erase(junit_xml_it);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string test_case;
|
||||||
|
if(args.size()) {
|
||||||
|
test_case = args.begin()->first;
|
||||||
|
}
|
||||||
|
|
||||||
|
%(registrations)s
|
||||||
|
|
||||||
|
return runner->run(test_case, junit_xml);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
VERBOSE = False
|
||||||
|
|
||||||
|
def log_verbose(message):
|
||||||
|
if VERBOSE:
|
||||||
|
print(message)
|
||||||
|
|
||||||
|
|
||||||
|
def find_tests(files):
|
||||||
|
|
||||||
|
subclasses = []
|
||||||
|
|
||||||
|
# First pass, find all class definitions
|
||||||
|
for path in files:
|
||||||
|
with open(path, "rt") as f:
|
||||||
|
source_file_data = f.read().replace("\r\n", "").replace("\n", "")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
match = re.search(CLASS_REGEX, source_file_data)
|
||||||
|
if not match:
|
||||||
|
break
|
||||||
|
|
||||||
|
class_name = match.group().split(":")[0].replace("class", "").strip()
|
||||||
|
|
||||||
|
try:
|
||||||
|
parents = match.group().split(":", 1)[1]
|
||||||
|
except IndexError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
parents = [ x.strip() for x in parents.split(",") ]
|
||||||
|
parents = [
|
||||||
|
x.replace("public", "").replace("private", "").replace("protected", "").strip()
|
||||||
|
for x in parents
|
||||||
|
]
|
||||||
|
|
||||||
|
subclasses.append((path, class_name, parents, []))
|
||||||
|
log_verbose("Found: %s" % str(subclasses[-1]))
|
||||||
|
|
||||||
|
start = match.end()
|
||||||
|
|
||||||
|
# Find the next opening brace
|
||||||
|
while source_file_data[start] in (' ', '\t'):
|
||||||
|
start += 1
|
||||||
|
|
||||||
|
start -= 1
|
||||||
|
end = start
|
||||||
|
if source_file_data[start+1] == '{':
|
||||||
|
|
||||||
|
class_data = []
|
||||||
|
brace_counter = 1
|
||||||
|
for i in range(start+2, len(source_file_data)):
|
||||||
|
class_data.append(source_file_data[i])
|
||||||
|
if class_data[-1] == '{': brace_counter += 1
|
||||||
|
if class_data[-1] == '}': brace_counter -= 1
|
||||||
|
if not brace_counter:
|
||||||
|
end = i
|
||||||
|
break
|
||||||
|
|
||||||
|
class_data = "".join(class_data)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
match = re.search(TEST_FUNC_REGEX, class_data)
|
||||||
|
if not match:
|
||||||
|
break
|
||||||
|
|
||||||
|
subclasses[-1][-1].append(match.group('func_name'))
|
||||||
|
class_data = class_data[match.end():]
|
||||||
|
|
||||||
|
source_file_data = source_file_data[end:]
|
||||||
|
|
||||||
|
|
||||||
|
# Now, simplify the list by finding all potential superclasses, and then keeping any classes
|
||||||
|
# that subclass them.
|
||||||
|
test_case_subclasses = []
|
||||||
|
i = 0
|
||||||
|
while i < len(subclasses):
|
||||||
|
subclass_names = [x.rsplit("::")[-1] for x in subclasses[i][2]]
|
||||||
|
|
||||||
|
# If this subclasses TestCase, or it subclasses any of the already found testcase subclasses
|
||||||
|
# then add it to the list
|
||||||
|
if "TestCase" in subclass_names or "SimulantTestCase" in subclass_names or any(x[1] in subclasses[i][2] for x in test_case_subclasses):
|
||||||
|
if subclasses[i] not in test_case_subclasses:
|
||||||
|
test_case_subclasses.append(subclasses[i])
|
||||||
|
|
||||||
|
i = 0 # Go back to the start, as we may have just found another parent class
|
||||||
|
continue
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
log_verbose("\n".join([str(x) for x in test_case_subclasses]))
|
||||||
|
return test_case_subclasses
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
global VERBOSE
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
VERBOSE = args.verbose
|
||||||
|
|
||||||
|
testcases = find_tests(args.test_files)
|
||||||
|
|
||||||
|
includes = "\n".join([ INCLUDE_TEMPLATE % { 'file_path' : x } for x in set([y[0] for y in testcases]) ])
|
||||||
|
registrations = []
|
||||||
|
|
||||||
|
for path, class_name, superclasses, funcs in testcases:
|
||||||
|
BIND_TEMPLATE = "&%(class_name)s::%(func)s"
|
||||||
|
|
||||||
|
members = ", ".join([ BIND_TEMPLATE % { 'class_name' : class_name, 'func' : x } for x in funcs ])
|
||||||
|
names = ", ".join([ '"%s::%s"' % (class_name, x) for x in funcs ])
|
||||||
|
|
||||||
|
registrations.append(REGISTER_TEMPLATE % { 'class_name' : class_name, 'members' : members, 'names' : names })
|
||||||
|
|
||||||
|
registrations = "\n".join(registrations)
|
||||||
|
|
||||||
|
final = MAIN_TEMPLATE % {
|
||||||
|
'registrations' : registrations,
|
||||||
|
'includes' : includes
|
||||||
|
}
|
||||||
|
|
||||||
|
open(args.output[0], "w").write(final)
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main())
|
Loading…
Reference in New Issue
Block a user