WIP: start making memory management better

This commit is contained in:
MysterD 2023-05-12 13:19:10 -07:00
parent 3478db9655
commit e391faf574
7 changed files with 97 additions and 99 deletions

View File

@ -139,6 +139,8 @@ static void _RelocateGraphNodePointers(struct GraphNode *aHead, u64 aOffset) {
static Array<Pair<void *, void *>> sLoadedGraphNodes = {};
// DO NOT COMMIT:
// need to separate pools into one that will be free'd and one that wont be and when
void *DynOS_Geo_GetGraphNode(const void *aGeoLayout, bool aKeepInMemory) {
if (aKeepInMemory) {
s32 _LoadedGraphNodeIndex = sLoadedGraphNodes.FindIf([&aGeoLayout](const Pair<void *, void *> &aLoadedGraphNode) { return aLoadedGraphNode.first == aGeoLayout; });
@ -148,31 +150,28 @@ void *DynOS_Geo_GetGraphNode(const void *aGeoLayout, bool aKeepInMemory) {
}
// Process the geo layout on a large pool of memory (16 MB)
struct AllocOnlyPool *_Pool = (struct AllocOnlyPool *) calloc(1, 0x1000000);
_Pool->totalSpace = 0x1000000 - sizeof(struct AllocOnlyPool);
_Pool->usedSpace = 0;
_Pool->startPtr = (u8 *) _Pool + sizeof(struct AllocOnlyPool);
_Pool->freePtr = (u8 *) _Pool + sizeof(struct AllocOnlyPool);
struct AllocOnlyPool *_Pool = alloc_only_pool_init();
void *_Processed = process_geo_layout(_Pool, (void *) aGeoLayout);
// Copy the graph node data to the minimum amount of memory needed
if (_Processed && _Pool->usedSpace != 0) {
struct GraphNode *_Node = (struct GraphNode *) calloc(1, _Pool->usedSpace);
/*struct GraphNode *_Node = (struct GraphNode *) calloc(1, _Pool->usedSpace);
memcpy(_Node, _Pool->startPtr, _Pool->usedSpace);
// Relocate all graph pointers
u64 _Offset = (u64) _Node - (u64) _Pool->startPtr;
_RelocateGraphNodePointers(_Node, _Offset);
_RelocateGraphNodePointers(_Node, _Offset);*/
// Add it to loaded graph nodes
if (aKeepInMemory) {
sLoadedGraphNodes.Add({ (void *) aGeoLayout, (void *) _Node });
if (aKeepInMemory || true) { // DO NOT COMMIT
//sLoadedGraphNodes.Add({ (void *) aGeoLayout, (void *) _Node });
sLoadedGraphNodes.Add({ (void *) aGeoLayout, (void *) _Processed });
}
free(_Pool);
return _Node;
return _Processed;
} else {
alloc_only_pool_free(_Pool);
}
free(_Pool);
return NULL;
}

View File

@ -107,8 +107,7 @@ static s32 eval_script_op(s8 op, s32 arg) {
struct ObjectWarpNode *area_create_warp_node(u8 id, u8 destLevel, u8 destArea, u8 destNode, u8 checkpoint, struct Object *o) {
if (sCurrAreaIndex != -1) {
struct ObjectWarpNode *warpNode =
alloc_only_pool_alloc(sLevelPool, sizeof(struct ObjectWarpNode));
struct ObjectWarpNode *warpNode = alloc_only_pool_alloc(sLevelPool, sizeof(struct ObjectWarpNode));
warpNode->node.id = id;
warpNode->node.destLevel = destLevel + checkpoint;
@ -395,9 +394,15 @@ static void level_cmd_alloc_level_pool(void) {
}
}
// free previous level pool
if (sLevelPool != NULL) {
alloc_only_pool_free(sLevelPool);
sLevelPool = NULL;
}
// allocate new level pool
if (sLevelPool == NULL) {
sLevelPool = alloc_only_pool_init(main_pool_available() - sizeof(struct AllocOnlyPool),
MEMORY_POOL_LEFT);
sLevelPool = alloc_only_pool_init();
}
sCurrentCmd = CMD_NEXT;
@ -406,8 +411,11 @@ static void level_cmd_alloc_level_pool(void) {
static void level_cmd_free_level_pool(void) {
s32 i;
alloc_only_pool_resize(sLevelPool, sLevelPool->usedSpace);
if (!sFinishedLoadingPerm) {
sFinishedLoadingPerm = true;
// make sure we don't free the pool with the permanent models
sLevelPool = NULL;
}
for (i = 0; i < 8; i++) {
if (gAreaData[i].terrainData != NULL) {
@ -472,9 +480,6 @@ static void level_cmd_load_model_from_geo(void) {
gLoadedGraphNodes[arg0] = process_geo_layout(sLevelPool, arg1);
if (sFinishedLoadingPerm) { sLevelOwnedGraphNodes[arg0] = true; }
smlua_model_util_remember(arg0, LAYER_OPAQUE, arg1, 0);
if (arg0 == MODEL_ERROR_MODEL) {
sFinishedLoadingPerm = true;
}
}
sCurrentCmd = CMD_NEXT;

View File

@ -218,6 +218,7 @@ u32 main_pool_push_state(void) {
* amount of free space left in the pool.
*/
u32 main_pool_pop_state(void) {
if (!gMainPoolState) { return sPoolFreeSpace; }
sPoolFreeSpace = gMainPoolState->freeSpace;
sPoolListHeadL = gMainPoolState->listHeadL;
sPoolListHeadR = gMainPoolState->listHeadR;
@ -248,71 +249,6 @@ static void *dynamic_dma_read(u8 *srcStart, u8 *srcEnd, u32 side) {
return dest;
}
/**
* Allocate an allocation-only pool from the main pool. This pool doesn't
* support freeing allocated memory.
* Return NULL if there is not enough space in the main pool.
*/
struct AllocOnlyPool *alloc_only_pool_init(u32 size, u32 side) {
void *addr;
struct AllocOnlyPool *subPool = NULL;
size = ALIGN4(size);
addr = main_pool_alloc(size + sizeof(struct AllocOnlyPool), side);
if (addr != NULL) {
subPool = (struct AllocOnlyPool *) addr;
subPool->totalSpace = size;
subPool->usedSpace = 0;
subPool->startPtr = (u8 *) addr + sizeof(struct AllocOnlyPool);
subPool->freePtr = (u8 *) addr + sizeof(struct AllocOnlyPool);
}
if (addr == NULL) {
LOG_ERROR("Allocate only pool failed to initalize memory of size 0x%X on side %d.", size, side);
}
return subPool;
}
/**
* Allocate from an allocation-only pool.
* Return NULL if there is not enough space.
*/
void *alloc_only_pool_alloc(struct AllocOnlyPool *pool, s32 size) {
void *addr = NULL;
size = ALIGN4(size);
if (size > 0 && pool->usedSpace + size <= pool->totalSpace) {
addr = pool->freePtr;
pool->freePtr += size;
pool->usedSpace += size;
}
if (addr == NULL) {
LOG_ERROR("Allocate only pool failed to allocate memory of size 0x%X on at pool %p.", size, pool);
} else {
memset(addr, 0, size);
}
return addr;
}
/**
* Resize an allocation-only pool.
* If the pool is increasing in size, the pool must be the last thing allocated
* from the left end of the main pool.
* The pool does not move.
*/
struct AllocOnlyPool *alloc_only_pool_resize(struct AllocOnlyPool *pool, u32 size) {
struct AllocOnlyPool *newPool;
size = ALIGN4(size);
newPool = main_pool_realloc(pool, size + sizeof(struct AllocOnlyPool));
if (newPool != NULL) {
pool->totalSpace = size;
}
if (newPool == NULL) {
LOG_ERROR("Allocate only pool failed to reallocate memory of size 0x%X on at pool %p.", size, pool);
}
return newPool;
}
/**
* Allocate a memory pool from the main pool. This pool supports arbitrary
* order for allocation/freeing.

View File

@ -13,10 +13,14 @@
struct AllocOnlyPool
{
s32 totalSpace;
s32 usedSpace;
u8 *startPtr;
u8 *freePtr;
u32 usedSpace;
struct AllocOnlyNode* tail;
};
struct AllocOnlyNode
{
void* ptr;
struct AllocOnlyNode* prev;
};
struct MemoryPool;
@ -50,9 +54,10 @@ u32 main_pool_pop_state(void);
#define load_segment_decompress_heap(...)
#define load_engine_code_segment(...)
struct AllocOnlyPool *alloc_only_pool_init(u32 size, u32 side);
void *alloc_only_pool_alloc(struct AllocOnlyPool *pool, s32 size);
struct AllocOnlyPool* alloc_only_pool_init(void);
void* alloc_only_pool_alloc(struct AllocOnlyPool *pool, u32 size);
struct AllocOnlyPool* alloc_only_pool_resize(struct AllocOnlyPool *pool, u32 size);
void alloc_only_pool_free(struct AllocOnlyPool *pool);
struct MemoryPool *mem_pool_init(u32 size, u32 side);
void *mem_pool_alloc(struct MemoryPool *pool, u32 size);

48
src/game/memory_dynamic.c Normal file
View File

@ -0,0 +1,48 @@
#include <PR/ultratypes.h>
#include <string.h>
#include "sm64.h"
#define INCLUDED_FROM_MEMORY_C
#include "buffers/buffers.h"
#include "decompress.h"
#include "game_init.h"
#include "main.h"
#include "memory.h"
#include "segment_symbols.h"
#include "segments.h"
#include "pc/debuglog.h"
struct AllocOnlyPool* alloc_only_pool_init(void) {
struct AllocOnlyPool* pool = calloc(1, sizeof(struct AllocOnlyPool));
pool->usedSpace = 0;
pool->tail = NULL;
return pool;
}
void* alloc_only_pool_alloc(struct AllocOnlyPool *pool, u32 size) {
struct AllocOnlyNode* node = calloc(1, sizeof(struct AllocOnlyNode));
node->ptr = calloc(1, size);
node->prev = pool->tail;
pool->tail = node;
pool->usedSpace += size;
return node->ptr;
}
struct AllocOnlyPool* alloc_only_pool_resize(UNUSED struct AllocOnlyPool* pool, UNUSED u32 size) {
return NULL;
}
void alloc_only_pool_free(struct AllocOnlyPool *pool) {
struct AllocOnlyNode* node = pool->tail;
while (node) {
struct AllocOnlyNode* prev = node->prev;
free(node->ptr);
free(node);
node = prev;
}
free(pool);
}

View File

@ -1623,7 +1623,7 @@ void geo_process_root(struct GraphNodeRoot *node, Vp *b, Vp *c, s32 clearColor)
Vp *viewport = alloc_display_list(sizeof(*viewport));
if (viewport == NULL) { return; }
gDisplayListHeap = alloc_only_pool_init(main_pool_available() - sizeof(struct AllocOnlyPool), MEMORY_POOL_LEFT);
gDisplayListHeap = alloc_only_pool_init();
Mtx *initialMatrix = alloc_display_list(sizeof(*initialMatrix));
if (initialMatrix == NULL) { return; }
@ -1665,9 +1665,9 @@ void geo_process_root(struct GraphNodeRoot *node, Vp *b, Vp *c, s32 clearColor)
geo_process_node_and_siblings(node->node.children);
}
gCurGraphNodeRoot = NULL;
if (gShowDebugText) {
print_text_fmt_int(180, 36, "MEM %d", gDisplayListHeap->totalSpace - gDisplayListHeap->usedSpace);
}
main_pool_free(gDisplayListHeap);
//if (gShowDebugText) {
// print_text_fmt_int(180, 36, "MEM %d", gDisplayListHeap->totalSpace - gDisplayListHeap->usedSpace);
//}
alloc_only_pool_free(gDisplayListHeap);
}
}

View File

@ -548,6 +548,11 @@ void smlua_model_util_clear(void) {
//LOG_INFO("Cleared runtime model cache.");
}
// DO NOT COMMIT
// smlua_model_util_load_with_pool_and_cache_id() needs to be reworked to use dynamic pools correctly
// DO NOT COMMIT
u16 smlua_model_util_load_with_pool_and_cache_id(enum ModelExtendedId extId, struct AllocOnlyPool* pool, u16 loadedId) {
if (extId == E_MODEL_NONE) { return MODEL_NONE; }
if (extId >= (u16)(E_MODEL_MAX + sCustomModelsCount)) {
@ -593,7 +598,7 @@ u16 smlua_model_util_load_with_pool_and_cache_id(enum ModelExtendedId extId, str
// load
bool resizePool = false;
if (pool == NULL) {
pool = alloc_only_pool_init(main_pool_available() - sizeof(struct AllocOnlyPool), MEMORY_POOL_LEFT);
pool = alloc_only_pool_init();
resizePool = true;
}
info->shouldFreeAsset = false;