GLdc/containers/aligned_vector.h

215 lines
6.2 KiB
C
Raw Normal View History

#pragma once
2021-04-09 15:24:47 +00:00
#include <assert.h>
#include <stdlib.h>
2021-09-12 14:04:52 +00:00
#include <string.h>
2023-03-06 13:54:31 +00:00
#include <stdint.h>
2023-05-11 14:22:46 +00:00
#include <stdio.h>
2018-05-05 19:38:55 +00:00
2018-05-28 07:52:44 +00:00
#ifdef __cplusplus
extern "C" {
#endif
2021-09-12 14:04:52 +00:00
#if defined(__APPLE__) || defined(__WIN32__)
/* Linux + Kos define this, OSX does not, so just use malloc there */
static inline void* memalign(size_t alignment, size_t size) {
2023-04-20 19:45:59 +00:00
(void) alignment;
2021-09-12 14:04:52 +00:00
return malloc(size);
}
#else
#include <malloc.h>
#endif
2023-03-06 14:05:14 +00:00
#ifdef __cplusplus
#define AV_FORCE_INLINE static inline
#else
#define AV_NO_INSTRUMENT inline __attribute__((no_instrument_function))
#define AV_INLINE_DEBUG AV_NO_INSTRUMENT __attribute__((always_inline))
#define AV_FORCE_INLINE static AV_INLINE_DEBUG
#endif
#ifdef __DREAMCAST__
#include <kos/string.h>
2023-03-06 14:05:14 +00:00
AV_FORCE_INLINE void *AV_MEMCPY4(void *dest, const void *src, size_t len)
{
if(!len)
{
return dest;
}
const uint8_t *s = (uint8_t *)src;
uint8_t *d = (uint8_t *)dest;
uint32_t diff = (uint32_t)d - (uint32_t)(s + 1); // extra offset because input gets incremented before output is calculated
// Underflow would be like adding a negative offset
// Can use 'd' as a scratch reg now
asm volatile (
"clrs\n" // Align for parallelism (CO) - SH4a use "stc SR, Rn" instead with a dummy Rn
".align 2\n"
"0:\n\t"
"dt %[size]\n\t" // (--len) ? 0 -> T : 1 -> T (EX 1)
"mov.b @%[in]+, %[scratch]\n\t" // scratch = *(s++) (LS 1/2)
"bf.s 0b\n\t" // while(s != nexts) aka while(!T) (BR 1/2)
" mov.b %[scratch], @(%[offset], %[in])\n" // *(datatype_of_s*) ((char*)s + diff) = scratch, where src + diff = dest (LS 1)
: [in] "+&r" ((uint32_t)s), [scratch] "=&r" ((uint32_t)d), [size] "+&r" (len) // outputs
: [offset] "z" (diff) // inputs
: "t", "memory" // clobbers
);
return dest;
}
#else
#define AV_MEMCPY4 memcpy
#endif
2018-05-05 19:38:55 +00:00
typedef struct {
2023-03-06 13:44:17 +00:00
uint32_t size;
uint32_t capacity;
uint32_t element_size;
2023-05-12 19:51:36 +00:00
} __attribute__((aligned(32))) AlignedVectorHeader;
typedef struct {
AlignedVectorHeader hdr;
uint8_t* data;
2018-05-05 19:38:55 +00:00
} AlignedVector;
2019-03-13 07:28:23 +00:00
#define ALIGNED_VECTOR_CHUNK_SIZE 256u
2021-04-20 15:49:00 +00:00
2021-09-12 14:04:52 +00:00
#define ROUND_TO_CHUNK_SIZE(v) \
((((v) + ALIGNED_VECTOR_CHUNK_SIZE - 1) / ALIGNED_VECTOR_CHUNK_SIZE) * ALIGNED_VECTOR_CHUNK_SIZE)
2023-05-12 19:51:36 +00:00
void aligned_vector_init(AlignedVector* vector, uint32_t element_size);
AV_FORCE_INLINE void* aligned_vector_at(const AlignedVector* vector, const uint32_t index) {
const AlignedVectorHeader* hdr = &vector->hdr;
assert(index < hdr->size);
return vector->data + (index * hdr->element_size);
}
AV_FORCE_INLINE void* aligned_vector_reserve(AlignedVector* vector, uint32_t element_count) {
AlignedVectorHeader* hdr = &vector->hdr;
2021-09-12 14:04:52 +00:00
2023-05-12 19:51:36 +00:00
if(element_count < hdr->capacity) {
return aligned_vector_at(vector, element_count);
2021-09-12 14:04:52 +00:00
}
2023-05-12 19:51:36 +00:00
uint32_t original_byte_size = (hdr->size * hdr->element_size);
2021-09-12 14:04:52 +00:00
/* We overallocate so that we don't make small allocations during push backs */
element_count = ROUND_TO_CHUNK_SIZE(element_count);
2023-05-12 19:51:36 +00:00
uint32_t new_byte_size = (element_count * hdr->element_size);
uint8_t* original_data = vector->data;
2021-09-12 14:04:52 +00:00
2023-05-12 19:51:36 +00:00
vector->data = (uint8_t*) memalign(0x20, new_byte_size);
2021-09-12 14:04:52 +00:00
assert(vector->data);
2023-05-12 19:51:36 +00:00
AV_MEMCPY4(vector->data, original_data, original_byte_size);
free(original_data);
2021-09-12 14:04:52 +00:00
2023-05-12 19:51:36 +00:00
hdr->capacity = element_count;
2021-09-12 14:04:52 +00:00
return vector->data + original_byte_size;
}
2021-04-20 15:49:00 +00:00
2023-05-12 19:51:36 +00:00
AV_FORCE_INLINE AlignedVectorHeader* aligned_vector_header(const AlignedVector* vector) {
return (AlignedVectorHeader*) &vector->hdr;
}
2021-09-12 14:04:52 +00:00
2023-05-12 19:51:36 +00:00
AV_FORCE_INLINE uint32_t aligned_vector_size(const AlignedVector* vector) {
const AlignedVectorHeader* hdr = &vector->hdr;
return hdr->size;
}
2021-09-12 14:04:52 +00:00
2023-05-12 19:51:36 +00:00
AV_FORCE_INLINE uint32_t aligned_vector_capacity(const AlignedVector* vector) {
const AlignedVectorHeader* hdr = &vector->hdr;
return hdr->capacity;
}
AV_FORCE_INLINE void* aligned_vector_front(const AlignedVector* vector) {
return vector->data;
}
2021-09-12 14:04:52 +00:00
2023-05-12 19:51:36 +00:00
/* Resizes the array and returns a pointer to the first new element (if upsizing) or NULL (if downsizing) */
AV_FORCE_INLINE void* aligned_vector_resize(AlignedVector* vector, const uint32_t element_count) {
void* ret = NULL;
AlignedVectorHeader* hdr = &vector->hdr;
uint32_t previous_count = hdr->size;
if(hdr->capacity <= element_count) {
/* If we didn't have capacity, increase capacity (slow) */
2023-05-12 19:51:36 +00:00
aligned_vector_reserve(vector, element_count);
hdr->size = element_count;
ret = aligned_vector_at(vector, previous_count);
assert(hdr->size == element_count);
assert(hdr->size <= hdr->capacity);
} else if(previous_count < element_count) {
/* So we grew, but had the capacity, just get a pointer to
* where we were */
2023-05-12 19:51:36 +00:00
hdr->size = element_count;
assert(hdr->size < hdr->capacity);
ret = aligned_vector_at(vector, previous_count);
} else if(hdr->size != element_count) {
hdr->size = element_count;
assert(hdr->size < hdr->capacity);
2021-09-12 14:04:52 +00:00
}
return ret;
2021-09-12 14:04:52 +00:00
}
2023-05-12 19:51:36 +00:00
AV_FORCE_INLINE void* aligned_vector_push_back(AlignedVector* vector, const void* objs, uint32_t count) {
2021-09-12 14:04:52 +00:00
/* Resize enough room */
2023-05-12 19:51:36 +00:00
AlignedVectorHeader* hdr = &vector->hdr;
2021-09-12 14:04:52 +00:00
2023-05-12 19:51:36 +00:00
assert(count);
assert(hdr->element_size);
2021-09-12 14:04:52 +00:00
2023-05-12 19:51:36 +00:00
#ifndef NDEBUG
uint32_t element_size = hdr->element_size;
uint32_t initial_size = hdr->size;
#endif
2021-09-12 14:04:52 +00:00
2023-05-12 19:51:36 +00:00
uint8_t* dest = (uint8_t*) aligned_vector_resize(vector, hdr->size + count);
assert(dest);
2021-09-12 14:04:52 +00:00
/* Copy the objects in */
2023-05-12 19:51:36 +00:00
AV_MEMCPY4(dest, objs, hdr->element_size * count);
2021-09-12 14:04:52 +00:00
2023-05-12 19:51:36 +00:00
assert(hdr->element_size == element_size);
assert(hdr->size == initial_size + count);
2021-09-12 14:04:52 +00:00
return dest;
}
2023-05-12 19:51:36 +00:00
AV_FORCE_INLINE void* aligned_vector_extend(AlignedVector* vector, const uint32_t additional_count) {
AlignedVectorHeader* hdr = &vector->hdr;
void* ret = aligned_vector_resize(vector, hdr->size + additional_count);
assert(ret); // Should always return something
return ret;
2021-09-12 14:04:52 +00:00
}
2021-04-20 15:49:00 +00:00
AV_FORCE_INLINE void aligned_vector_clear(AlignedVector* vector){
2023-05-12 19:51:36 +00:00
AlignedVectorHeader* hdr = &vector->hdr;
hdr->size = 0;
}
2023-05-12 19:51:36 +00:00
void aligned_vector_shrink_to_fit(AlignedVector* vector);
void aligned_vector_cleanup(AlignedVector* vector);
2023-05-12 19:51:36 +00:00
AV_FORCE_INLINE void* aligned_vector_back(AlignedVector* vector){
AlignedVectorHeader* hdr = &vector->hdr;
return aligned_vector_at(vector, hdr->size - 1);
}
2018-05-05 19:38:55 +00:00
2018-05-28 07:52:44 +00:00
#ifdef __cplusplus
}
#endif