Files
moo/lib/xma.c

932 lines
25 KiB
C
Raw Normal View History

2020-10-31 04:39:48 +00:00
/*
Copyright (c) 2014-2019 Chung, Hyung-Hwan. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <moo-xma.h>
#include "moo-prv.h"
2025-07-16 23:18:46 +09:00
#include <assert.h> /* TODO: replace assert() with MOO_ASSERT() or something */
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
/*
2020-11-03 07:18:38 +00:00
* in the following run, movaps tries to write to the address 0x7fffea722f78.
2025-07-16 23:18:46 +09:00
* since the instruction deals with 16-byte aligned data only, it triggered
2020-11-03 07:18:38 +00:00
* the general protection error.
*
$ gdb ~/xxx/bin/xxx
Program received signal SIGSEGV, Segmentation fault.
0x000000000042156a in set_global (rtx=rtx@entry=0x7fffea718ff8, idx=idx@entry=2,
var=var@entry=0x0, val=val@entry=0x1, assign=assign@entry=0) at ../../../lib/run.c:358
358 rtx->gbl.fnr = lv;
(gdb) print &rtx->gbl.fnr
$1 = (xxx_int_t *) 0x7fffea722f78
(gdb) disp /i 0x42156a
1: x/i 0x42156a
=> 0x42156a <set_global+874>: movaps %xmm2,0x9f80(%rbp)
*/
/* set ALIGN to twice the pointer size to prevent unaligned memory access by
* instructions dealing with data larger than the system word size. e.g. movaps on x86_64 */
#define ALIGN (MOO_SIZEOF_VOID_P * 2) /* this must be a power of 2 */
#define MBLKHDRSIZE (MOO_SIZEOF(moo_xma_mblk_t))
#define MINALLOCSIZE (MOO_SIZEOF(moo_xma_fblk_t) - MOO_SIZEOF(moo_xma_mblk_t))
#define FBLKMINSIZE (MBLKHDRSIZE + MINALLOCSIZE) /* or MOO_SIZEOF(moo_xma_fblk_t) - need space for the free links when the block is freeed */
/* NOTE: you must ensure that FBLKMINSIZE is equal to ALIGN or multiples of ALIGN */
2020-10-31 04:39:48 +00:00
2020-11-02 08:23:16 +00:00
#define SYS_TO_USR(b) ((moo_uint8_t*)(b) + MBLKHDRSIZE)
#define USR_TO_SYS(b) ((moo_uint8_t*)(b) - MBLKHDRSIZE)
2020-10-31 04:39:48 +00:00
/*
* the xfree array is divided into three region
* 0 ....................... FIXED ......................... XFIMAX-1 ... XFIMAX
* | small fixed-size chains | large chains | huge chain |
*/
#define FIXED MOO_XMA_FIXED
#define XFIMAX(xma) (MOO_COUNTOF(xma->xfree)-1)
2025-07-16 23:18:46 +09:00
#define fblk_free(b) (((moo_xma_fblk_t*)(b))->free)
#define fblk_size(b) (((moo_xma_fblk_t*)(b))->size)
#define fblk_prev_size(b) (((moo_xma_fblk_t*)(b))->prev_size)
#if 0
#define mblk_free(b) (((moo_xma_mblk_t*)(b))->free)
2020-11-02 08:23:16 +00:00
#define mblk_size(b) (((moo_xma_mblk_t*)(b))->size)
#define mblk_prev_size(b) (((moo_xma_mblk_t*)(b))->prev_size)
2025-07-16 23:18:46 +09:00
#else
/* Let mblk_free(), mblk_size(), mblk_prev_size() be an alias to
* fblk_free(), fblk_size(), fblk_prev_size() to follow strict aliasing rule.
* if gcc/clang is used, specifying __attribute__((__may_alias__)) to moo_xma_mblk_t
* and moo_xma_fblk_t would also work. */
#define mblk_free(b) fblk_free(b)
#define mblk_size(b) fblk_size(b)
#define mblk_prev_size(b) fblk_prev_size(b)
#endif
2020-11-02 08:23:16 +00:00
#define next_mblk(b) ((moo_xma_mblk_t*)((moo_uint8_t*)b + MBLKHDRSIZE + mblk_size(b)))
#define prev_mblk(b) ((moo_xma_mblk_t*)((moo_uint8_t*)b - (MBLKHDRSIZE + mblk_prev_size(b))))
struct moo_xma_mblk_t
2020-10-31 04:39:48 +00:00
{
moo_oow_t prev_size;
2020-11-03 07:18:38 +00:00
/* the block size is shifted by 1 bit and the maximum value is
* offset by 1 bit because of the 'free' bit-field.
2025-07-16 23:18:46 +09:00
* i could keep 'size' without shifting with bit manipulation
* because the actual size is aligned and the last bit will
2020-11-03 07:18:38 +00:00
* never be 1. i don't think there is a practical use case where
2025-07-16 23:18:46 +09:00
* you need to allocate a huge chunk covering the entire
2020-11-03 07:18:38 +00:00
* address space of your machine. */
moo_oow_t free: 1;
moo_oow_t size: MOO_XMA_SIZE_BITS; /**< block size */
};
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
struct moo_xma_fblk_t
{
moo_oow_t prev_size;
2020-11-03 07:18:38 +00:00
moo_oow_t free: 1;
moo_oow_t size: MOO_XMA_SIZE_BITS;/**< block size */
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
/* the fields are must be identical to the fields of moo_xma_mblk_t */
/* the two fields below are used only if the block is free */
2020-11-02 08:23:16 +00:00
moo_xma_fblk_t* free_prev; /**< link to the previous free block */
moo_xma_fblk_t* free_next; /**< link to the next free block */
2020-10-31 04:39:48 +00:00
};
2020-11-03 07:18:38 +00:00
/*#define VERIFY*/
2020-11-02 08:23:16 +00:00
#if defined(VERIFY)
static void DBG_VERIFY (moo_xma_t* xma, const char* desc)
{
moo_xma_mblk_t* tmp, * next;
moo_oow_t cnt;
2025-07-16 23:18:46 +09:00
moo_oow_t fsum, asum;
2020-11-03 07:18:38 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
moo_oow_t isum;
#endif
for (tmp = (moo_xma_mblk_t*)xma->start, cnt = 0, fsum = 0, asum = 0; (moo_uint8_t*)tmp < xma->end; tmp = next, cnt++)
2020-11-02 08:23:16 +00:00
{
next = next_mblk(tmp);
if ((moo_uint8_t*)tmp == xma->start)
{
assert (tmp->prev_size == 0);
}
if ((moo_uint8_t*)next < xma->end)
{
assert (next->prev_size == tmp->size);
}
2020-11-03 07:18:38 +00:00
if (tmp->free) fsum += tmp->size;
else asum += tmp->size;
2020-11-02 08:23:16 +00:00
}
2020-11-03 07:18:38 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
isum = (xma->stat.nfree + xma->stat.nused) * MBLKHDRSIZE;
assert (asum == xma->stat.alloc);
assert (fsum == xma->stat.avail);
assert (isum == xma->stat.total - (xma->stat.alloc + xma->stat.avail));
assert (asum + fsum + isum == xma->stat.total);
#endif
2020-11-02 08:23:16 +00:00
}
#else
#define DBG_VERIFY(xma, desc)
#endif
2025-07-16 23:18:46 +09:00
static MOO_INLINE moo_oow_t szlog2 (moo_oow_t n)
2020-10-31 04:39:48 +00:00
{
/*
* 2**x = n;
* x = log2(n);
* -------------------------------------------
* unsigned int x = 0;
* while((n >> x) > 1) ++x;
* return x;
*/
#define BITS (MOO_SIZEOF_OOW_T * 8)
int x = BITS - 1;
#if MOO_SIZEOF_OOW_T >= 128
# error moo_oow_t too large. unsupported platform
#endif
#if MOO_SIZEOF_OOW_T >= 64
if ((n & (~(moo_oow_t)0 << (BITS-128))) == 0) { x -= 256; n <<= 256; }
#endif
#if MOO_SIZEOF_OOW_T >= 32
if ((n & (~(moo_oow_t)0 << (BITS-128))) == 0) { x -= 128; n <<= 128; }
#endif
#if MOO_SIZEOF_OOW_T >= 16
if ((n & (~(moo_oow_t)0 << (BITS-64))) == 0) { x -= 64; n <<= 64; }
#endif
#if MOO_SIZEOF_OOW_T >= 8
if ((n & (~(moo_oow_t)0 << (BITS-32))) == 0) { x -= 32; n <<= 32; }
#endif
2025-07-16 23:18:46 +09:00
#if MOO_SIZEOF_OOW_T >= 4
2020-10-31 04:39:48 +00:00
if ((n & (~(moo_oow_t)0 << (BITS-16))) == 0) { x -= 16; n <<= 16; }
#endif
#if MOO_SIZEOF_OOW_T >= 2
if ((n & (~(moo_oow_t)0 << (BITS-8))) == 0) { x -= 8; n <<= 8; }
#endif
#if MOO_SIZEOF_OOW_T >= 1
if ((n & (~(moo_oow_t)0 << (BITS-4))) == 0) { x -= 4; n <<= 4; }
#endif
if ((n & (~(moo_oow_t)0 << (BITS-2))) == 0) { x -= 2; n <<= 2; }
if ((n & (~(moo_oow_t)0 << (BITS-1))) == 0) { x -= 1; }
return x;
#undef BITS
}
2025-07-16 23:18:46 +09:00
static MOO_INLINE moo_oow_t getxfi (moo_xma_t* xma, moo_oow_t size)
2020-10-31 04:39:48 +00:00
{
moo_oow_t xfi = ((size) / ALIGN) - 1;
if (xfi >= FIXED) xfi = szlog2(size) - (xma)->bdec + FIXED;
if (xfi > XFIMAX(xma)) xfi = XFIMAX(xma);
return xfi;
}
moo_xma_t* moo_xma_open (moo_mmgr_t* mmgr, moo_oow_t xtnsize, void* zoneptr, moo_oow_t zonesize)
2020-10-31 04:39:48 +00:00
{
moo_xma_t* xma;
xma = (moo_xma_t*)MOO_MMGR_ALLOC(mmgr, MOO_SIZEOF(*xma) + xtnsize);
if (MOO_UNLIKELY(!xma)) return MOO_NULL;
2020-10-31 04:39:48 +00:00
if (moo_xma_init(xma, mmgr, zoneptr, zonesize) <= -1)
2020-10-31 04:39:48 +00:00
{
MOO_MMGR_FREE (mmgr, xma);
return MOO_NULL;
}
MOO_MEMSET (xma + 1, 0, xtnsize);
return xma;
}
void moo_xma_close (moo_xma_t* xma)
{
moo_xma_fini (xma);
MOO_MMGR_FREE (xma->_mmgr, xma);
}
int moo_xma_init (moo_xma_t* xma, moo_mmgr_t* mmgr, void* zoneptr, moo_oow_t zonesize)
2020-10-31 04:39:48 +00:00
{
2020-11-03 07:18:38 +00:00
moo_xma_fblk_t* first;
2020-10-31 04:39:48 +00:00
moo_oow_t xfi;
2020-11-02 10:18:13 +00:00
int internal = 0;
2020-10-31 04:39:48 +00:00
if (!zoneptr)
{
/* round 'zonesize' to be the multiples of ALIGN */
zonesize = MOO_ALIGN_POW2(zonesize, ALIGN);
/* adjust 'zonesize' to be large enough to hold a single smallest block */
2020-11-03 07:18:38 +00:00
if (zonesize < FBLKMINSIZE) zonesize = FBLKMINSIZE;
2020-10-31 04:39:48 +00:00
zoneptr = MOO_MMGR_ALLOC(mmgr, zonesize);
if (MOO_UNLIKELY(!zoneptr)) return -1;
2020-11-02 10:18:13 +00:00
internal = 1; /* internally created. must be freed upon moo_xma_fini() */
}
2025-07-16 23:18:46 +09:00
else if (zonesize < FBLKMINSIZE)
{
/* the zone size is too small for an externally allocated zone. */
/* TODO: difference error code from memory allocation failure.. this is not really memory shortage */
return -1;
}
2020-10-31 04:39:48 +00:00
2020-11-03 07:18:38 +00:00
first = (moo_xma_fblk_t*)zoneptr;
/* initialize the header part of the free chunk. the entire zone is a single free block */
2020-11-03 07:18:38 +00:00
first->prev_size = 0;
first->free = 1;
first->size = zonesize - MBLKHDRSIZE; /* size excluding the block header */
first->free_prev = MOO_NULL;
first->free_next = MOO_NULL;
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
MOO_MEMSET(xma, 0, MOO_SIZEOF(*xma));
2020-10-31 04:39:48 +00:00
xma->_mmgr = mmgr;
xma->bdec = szlog2(FIXED * ALIGN); /* precalculate the decrement value */
/* at this point, the 'free' chunk is a only block available */
/* get the free block index */
2020-11-03 07:18:38 +00:00
xfi = getxfi(xma, first->size);
2020-10-31 04:39:48 +00:00
/* locate it into an apporopriate slot */
2025-07-16 23:18:46 +09:00
xma->xfree[xfi] = first;
2020-10-31 04:39:48 +00:00
/* let it be the head, which is natural with only a block */
2020-11-03 07:18:38 +00:00
xma->start = (moo_uint8_t*)first;
xma->end = xma->start + zonesize;
xma->internal = internal;
2020-10-31 04:39:48 +00:00
/* initialize some statistical variables */
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.total = zonesize;
xma->stat.alloc = 0;
2020-11-02 08:23:16 +00:00
xma->stat.avail = zonesize - MBLKHDRSIZE;
2020-10-31 04:39:48 +00:00
xma->stat.nfree = 1;
xma->stat.nused = 0;
2025-07-16 23:18:46 +09:00
xma->stat.alloc_hwmark = 0;
xma->stat.nallocops = 0;
xma->stat.nallocgoodops = 0;
xma->stat.nallocbadops = 0;
xma->stat.nreallocops = 0;
xma->stat.nreallocgoodops = 0;
xma->stat.nreallocbadops = 0;
xma->stat.nfreeops = 0;
2020-10-31 04:39:48 +00:00
#endif
2025-07-16 23:18:46 +09:00
2020-10-31 04:39:48 +00:00
return 0;
}
void moo_xma_fini (moo_xma_t* xma)
{
/* the head must point to the free chunk allocated in init().
* let's deallocate it */
2020-11-02 10:18:13 +00:00
if (xma->internal) MOO_MMGR_FREE (xma->_mmgr, xma->start);
xma->start = MOO_NULL;
xma->end = MOO_NULL;
2020-10-31 04:39:48 +00:00
}
static MOO_INLINE void attach_to_freelist (moo_xma_t* xma, moo_xma_fblk_t* b)
2020-10-31 04:39:48 +00:00
{
2025-07-16 23:18:46 +09:00
/*
* attach a block to a free list
2020-10-31 04:39:48 +00:00
*/
/* get the free list index for the block size */
2025-07-16 23:18:46 +09:00
moo_oow_t xfi = getxfi(xma, b->size);
2020-10-31 04:39:48 +00:00
/* let it be the head of the free list doubly-linked */
2025-07-16 23:18:46 +09:00
b->free_prev = MOO_NULL;
2020-11-02 08:23:16 +00:00
b->free_next = xma->xfree[xfi];
if (xma->xfree[xfi]) xma->xfree[xfi]->free_prev = b;
2020-10-31 04:39:48 +00:00
xma->xfree[xfi] = b;
}
static MOO_INLINE void detach_from_freelist (moo_xma_t* xma, moo_xma_fblk_t* b)
2020-10-31 04:39:48 +00:00
{
2020-11-02 08:23:16 +00:00
/* detach a block from a free list */
moo_xma_fblk_t* p, * n;
2020-10-31 04:39:48 +00:00
/* alias the previous and the next with short variable names */
2020-11-02 08:23:16 +00:00
p = b->free_prev;
n = b->free_next;
2020-10-31 04:39:48 +00:00
if (p)
{
2025-07-16 23:18:46 +09:00
/* the previous item exists. let its 'next' pointer point to
2020-10-31 04:39:48 +00:00
* the block's next item. */
2020-11-02 08:23:16 +00:00
p->free_next = n;
2020-10-31 04:39:48 +00:00
}
2025-07-16 23:18:46 +09:00
else
2020-10-31 04:39:48 +00:00
{
/* the previous item does not exist. the block is the first
* item in the free list. */
moo_oow_t xfi = getxfi(xma, b->size);
assert (b == xma->xfree[xfi]);
/* let's update the free list head */
xma->xfree[xfi] = n;
}
2025-07-16 23:18:46 +09:00
/* let the 'prev' pointer of the block's next item point to the
2020-10-31 04:39:48 +00:00
* block's previous item */
2020-11-02 08:23:16 +00:00
if (n) n->free_prev = p;
2020-10-31 04:39:48 +00:00
}
static moo_xma_fblk_t* alloc_from_freelist (moo_xma_t* xma, moo_oow_t xfi, moo_oow_t size)
2020-10-31 04:39:48 +00:00
{
2020-11-03 07:18:38 +00:00
moo_xma_fblk_t* cand;
2020-10-31 04:39:48 +00:00
2020-11-03 07:18:38 +00:00
for (cand = xma->xfree[xfi]; cand; cand = cand->free_next)
2020-10-31 04:39:48 +00:00
{
2020-11-03 07:18:38 +00:00
if (cand->size >= size)
2020-10-31 04:39:48 +00:00
{
moo_oow_t rem;
2025-07-16 23:18:46 +09:00
detach_from_freelist(xma, cand);
2020-10-31 04:39:48 +00:00
2020-11-03 07:18:38 +00:00
rem = cand->size - size;
if (rem >= FBLKMINSIZE)
2020-10-31 04:39:48 +00:00
{
2020-11-02 08:23:16 +00:00
moo_xma_mblk_t* y, * z;
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
/* the remaining part is large enough to hold
* another block. let's split it
2020-10-31 04:39:48 +00:00
*/
2020-11-03 07:18:38 +00:00
/* shrink the size of the 'cand' block */
cand->size = size;
2020-10-31 04:39:48 +00:00
2020-11-03 13:00:42 +00:00
/* let 'y' point to the remaining part */
y = next_mblk(cand);
2020-10-31 04:39:48 +00:00
/* initialize some fields */
2020-11-03 07:18:38 +00:00
y->free = 1;
2020-11-02 08:23:16 +00:00
y->size = rem - MBLKHDRSIZE;
2020-11-03 07:18:38 +00:00
y->prev_size = cand->size;
2020-10-31 04:39:48 +00:00
/* add the remaining part to the free list */
2025-07-16 23:18:46 +09:00
attach_to_freelist(xma, (moo_xma_fblk_t*)y);
2020-11-02 08:23:16 +00:00
z = next_mblk(y);
if ((moo_uint8_t*)z < xma->end) z->prev_size = y->size;
2020-10-31 04:39:48 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
2020-11-02 08:23:16 +00:00
xma->stat.avail -= MBLKHDRSIZE;
2020-10-31 04:39:48 +00:00
#endif
}
#if defined(MOO_XMA_ENABLE_STAT)
else
{
/* decrement the number of free blocks as the current
* block is allocated as a whole without being split */
xma->stat.nfree--;
}
#endif
2020-11-03 07:18:38 +00:00
cand->free = 0;
2020-10-31 04:39:48 +00:00
/*
2020-11-03 07:18:38 +00:00
cand->free_next = MOO_NULL;
cand->free_prev = MOO_NULL;
2020-10-31 04:39:48 +00:00
*/
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.nused++;
2020-11-03 07:18:38 +00:00
xma->stat.alloc += cand->size;
xma->stat.avail -= cand->size;
2025-07-16 23:18:46 +09:00
if (xma->stat.alloc > xma->stat.alloc_hwmark) xma->stat.alloc_hwmark = xma->stat.alloc;
2020-10-31 04:39:48 +00:00
#endif
2020-11-03 07:18:38 +00:00
return cand;
2020-10-31 04:39:48 +00:00
}
}
return MOO_NULL;
}
2020-11-02 08:23:16 +00:00
2020-10-31 04:39:48 +00:00
void* moo_xma_alloc (moo_xma_t* xma, moo_oow_t size)
{
2020-11-03 07:18:38 +00:00
moo_xma_fblk_t* cand;
2025-07-16 23:18:46 +09:00
moo_oow_t xfi, native_xfi;
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
DBG_VERIFY(xma, "alloc start");
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.nallocops++;
#endif
2020-10-31 04:39:48 +00:00
/* round up 'size' to the multiples of ALIGN */
2020-11-02 08:23:16 +00:00
if (size < MINALLOCSIZE) size = MINALLOCSIZE;
2020-10-31 04:39:48 +00:00
size = MOO_ALIGN_POW2(size, ALIGN);
assert (size >= ALIGN);
xfi = getxfi(xma, size);
2025-07-16 23:18:46 +09:00
native_xfi = xfi;
2020-10-31 04:39:48 +00:00
/*if (xfi < XFIMAX(xma) && xma->xfree[xfi])*/
if (xfi < FIXED && xma->xfree[xfi])
{
/* try the best fit */
2020-11-03 07:18:38 +00:00
cand = xma->xfree[xfi];
2020-10-31 04:39:48 +00:00
2020-11-03 07:18:38 +00:00
assert (cand->free != 0);
assert (cand->size == size);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
detach_from_freelist(xma, cand);
2020-11-03 07:18:38 +00:00
cand->free = 0;
2020-10-31 04:39:48 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.nfree--;
xma->stat.nused++;
2020-11-03 07:18:38 +00:00
xma->stat.alloc += cand->size;
xma->stat.avail -= cand->size;
2025-07-16 23:18:46 +09:00
if (xma->stat.alloc > xma->stat.alloc_hwmark) xma->stat.alloc_hwmark = xma->stat.alloc;
2020-10-31 04:39:48 +00:00
#endif
}
else if (xfi == XFIMAX(xma))
{
/* huge block */
2020-11-03 07:18:38 +00:00
cand = alloc_from_freelist(xma, XFIMAX(xma), size);
2025-07-16 23:18:46 +09:00
if (!cand)
{
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.nallocbadops++;
#endif
return MOO_NULL;
}
2020-10-31 04:39:48 +00:00
}
else
{
if (xfi >= FIXED)
{
/* get the block from its own large chain */
2020-11-03 07:18:38 +00:00
cand = alloc_from_freelist(xma, xfi, size);
if (!cand)
2020-10-31 04:39:48 +00:00
{
/* borrow a large block from the huge block chain */
2020-11-03 07:18:38 +00:00
cand = alloc_from_freelist(xma, XFIMAX(xma), size);
2020-10-31 04:39:48 +00:00
}
}
else
{
/* borrow a small block from the huge block chain */
2020-11-03 07:18:38 +00:00
cand = alloc_from_freelist(xma, XFIMAX(xma), size);
if (!cand) xfi = FIXED - 1;
2020-10-31 04:39:48 +00:00
}
2020-11-03 07:18:38 +00:00
if (!cand)
2020-10-31 04:39:48 +00:00
{
/* try each large block chain left */
for (++xfi; xfi < XFIMAX(xma) - 1; xfi++)
{
2020-11-03 07:18:38 +00:00
cand = alloc_from_freelist(xma, xfi, size);
if (cand) break;
2020-10-31 04:39:48 +00:00
}
2025-07-16 23:18:46 +09:00
if (!cand)
{
/* try fixed-sized free chains */
for (xfi = native_xfi + 1; xfi < FIXED; xfi++)
{
cand = alloc_from_freelist(xma, xfi, size);
if (cand) break;
}
if (!cand)
{
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.nallocbadops++;
#endif
return MOO_NULL;
}
}
2020-10-31 04:39:48 +00:00
}
}
2025-07-16 23:18:46 +09:00
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.nallocgoodops++;
#endif
DBG_VERIFY(xma, "alloc end");
2020-11-03 07:18:38 +00:00
return SYS_TO_USR(cand);
2020-10-31 04:39:48 +00:00
}
static void* _realloc_merge (moo_xma_t* xma, void* b, moo_oow_t size)
{
2025-07-16 23:18:46 +09:00
moo_uint8_t* blk = (moo_uint8_t*)USR_TO_SYS(b);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
DBG_VERIFY(xma, "realloc merge start");
/* rounds up 'size' to be multiples of ALIGN */
2020-11-02 08:23:16 +00:00
if (size < MINALLOCSIZE) size = MINALLOCSIZE;
size = MOO_ALIGN_POW2(size, ALIGN);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
if (size > mblk_size(blk))
2020-10-31 04:39:48 +00:00
{
/* grow the current block */
2020-10-31 04:39:48 +00:00
moo_oow_t req;
2025-07-16 23:18:46 +09:00
moo_uint8_t* n;
2020-10-31 04:39:48 +00:00
moo_oow_t rem;
2025-07-16 23:18:46 +09:00
req = size - mblk_size(blk); /* required size additionally */
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
n = (moo_uint8_t*)next_mblk(blk);
2020-10-31 04:39:48 +00:00
/* check if the next adjacent block is available */
2025-07-16 23:18:46 +09:00
if (n >= xma->end || !mblk_free(n) || req > mblk_size(n)) return MOO_NULL; /* no! */
2020-11-03 07:18:38 +00:00
/* TODO: check more blocks if the next block is free but small in size.
* check the previous adjacent blocks also */
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
assert(mblk_size(blk) == mblk_prev_size(n));
2020-11-02 08:23:16 +00:00
2020-10-31 04:39:48 +00:00
/* let's merge the current block with the next block */
2025-07-16 23:18:46 +09:00
detach_from_freelist(xma, (moo_xma_fblk_t*)n);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
rem = (MBLKHDRSIZE + mblk_size(n)) - req;
2020-11-03 07:18:38 +00:00
if (rem >= FBLKMINSIZE)
2020-10-31 04:39:48 +00:00
{
2025-07-16 23:18:46 +09:00
/*
* the remaining part of the next block is large enough
2020-10-31 04:39:48 +00:00
* to hold a block. break the next block.
*/
2025-07-16 23:18:46 +09:00
moo_uint8_t* y, * z;
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
mblk_size(blk) += req;
y = (moo_uint8_t*)next_mblk(blk);
mblk_free(y) = 1;
mblk_size(y) = rem - MBLKHDRSIZE;
mblk_prev_size(y) = mblk_size(blk);
attach_to_freelist(xma, (moo_xma_fblk_t*)y);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
z = (moo_uint8_t*)next_mblk(y);
if (z < xma->end) mblk_prev_size(z) = mblk_size(y);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
#if defined(MOO_XMA_ENABLE_STAT)
2020-10-31 04:39:48 +00:00
xma->stat.alloc += req;
2020-11-02 08:23:16 +00:00
xma->stat.avail -= req; /* req + MBLKHDRSIZE(tmp) - MBLKHDRSIZE(n) */
2025-07-16 23:18:46 +09:00
if (xma->stat.alloc > xma->stat.alloc_hwmark) xma->stat.alloc_hwmark = xma->stat.alloc;
#endif
2020-10-31 04:39:48 +00:00
}
else
{
2025-07-16 23:18:46 +09:00
moo_uint8_t* z;
2020-11-03 07:18:38 +00:00
2020-11-02 08:23:16 +00:00
/* the remaining part of the next block is too small to form an indepent block.
2020-10-31 04:39:48 +00:00
* utilize the whole block by merging to the resizing block */
2025-07-16 23:18:46 +09:00
mblk_size(blk) += MBLKHDRSIZE + mblk_size(n);
2025-07-16 23:18:46 +09:00
z = (moo_uint8_t*)next_mblk(blk);
if (z < xma->end) mblk_prev_size(z) = mblk_size(blk);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
#if defined(MOO_XMA_ENABLE_STAT)
2020-10-31 04:39:48 +00:00
xma->stat.nfree--;
2025-07-16 23:18:46 +09:00
xma->stat.alloc += MBLKHDRSIZE + mblk_size(n);
xma->stat.avail -= mblk_size(n);
if (xma->stat.alloc > xma->stat.alloc_hwmark) xma->stat.alloc_hwmark = xma->stat.alloc;
#endif
2020-10-31 04:39:48 +00:00
}
}
2025-07-16 23:18:46 +09:00
else if (size < mblk_size(blk))
2020-10-31 04:39:48 +00:00
{
/* shrink the block */
2025-07-16 23:18:46 +09:00
moo_oow_t rem = mblk_size(blk) - size;
if (rem >= FBLKMINSIZE)
2020-10-31 04:39:48 +00:00
{
2025-07-16 23:18:46 +09:00
moo_uint8_t* n;
2025-07-16 23:18:46 +09:00
n = (moo_uint8_t*)next_mblk(blk);
2020-10-31 04:39:48 +00:00
2020-11-03 07:18:38 +00:00
/* the leftover is large enough to hold a block of minimum size.split the current block */
2025-07-16 23:18:46 +09:00
if (n < xma->end && mblk_free(n))
2020-10-31 04:39:48 +00:00
{
2025-07-16 23:18:46 +09:00
moo_uint8_t* y, * z;
2020-11-03 07:18:38 +00:00
/* make the leftover block merge with the next block */
2025-07-16 23:18:46 +09:00
detach_from_freelist(xma, (moo_xma_fblk_t*)n);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
mblk_size(blk) = size;
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
y = (moo_uint8_t*)next_mblk(blk); /* update y to the leftover block with the new block size set above */
mblk_free(y) = 1;
mblk_size(y) = rem + mblk_size(n); /* add up the adjust block - (rem + MBLKHDRSIZE(n) + n->size) - MBLKHDRSIZE(y) */
mblk_prev_size(y) = mblk_size(blk);
2020-11-03 07:18:38 +00:00
/* add 'y' to the free list */
2025-07-16 23:18:46 +09:00
attach_to_freelist(xma, (moo_xma_fblk_t*)y);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
z = (moo_uint8_t*)next_mblk(y); /* get adjacent block to the merged block */
if (z < xma->end) mblk_prev_size(z) = mblk_size(y);
2020-11-02 08:23:16 +00:00
2025-07-16 23:18:46 +09:00
#if defined(MOO_XMA_ENABLE_STAT)
2020-10-31 04:39:48 +00:00
xma->stat.alloc -= rem;
2020-11-03 07:18:38 +00:00
xma->stat.avail += rem; /* rem - MBLKHDRSIZE(y) + MBLKHDRSIZE(n) */
2025-07-16 23:18:46 +09:00
#endif
2020-10-31 04:39:48 +00:00
}
else
{
2025-07-16 23:18:46 +09:00
moo_uint8_t* y;
2020-11-03 07:18:38 +00:00
2020-11-02 08:23:16 +00:00
/* link the leftover block to the free list */
2025-07-16 23:18:46 +09:00
mblk_size(blk) = size;
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
y = (moo_uint8_t*)next_mblk(blk); /* update y to the leftover block with the new block size set above */
mblk_free(y) = 1;
mblk_size(y) = rem - MBLKHDRSIZE;
mblk_prev_size(y) = mblk_size(blk);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
attach_to_freelist(xma, (moo_xma_fblk_t*)y);
/*n = (moo_uint8_t*)next_mblk(y);
if (n < xma->end)*/ mblk_prev_size(n) = mblk_size(y);
2020-11-02 08:23:16 +00:00
2025-07-16 23:18:46 +09:00
#if defined(MOO_XMA_ENABLE_STAT)
2020-10-31 04:39:48 +00:00
xma->stat.nfree++;
xma->stat.alloc -= rem;
2025-07-16 23:18:46 +09:00
xma->stat.avail += mblk_size(y);
#endif
2020-10-31 04:39:48 +00:00
}
}
}
2025-07-16 23:18:46 +09:00
DBG_VERIFY(xma, "realloc merge end");
2020-10-31 04:39:48 +00:00
return b;
}
void* moo_xma_calloc (moo_xma_t* xma, moo_oow_t size)
{
2020-11-02 08:23:16 +00:00
void* ptr = moo_xma_alloc(xma, size);
2020-10-31 04:39:48 +00:00
if (ptr) MOO_MEMSET (ptr, 0, size);
return ptr;
}
void* moo_xma_realloc (moo_xma_t* xma, void* b, moo_oow_t size)
{
void* n;
2025-07-16 23:18:46 +09:00
if (!b)
2020-10-31 04:39:48 +00:00
{
/* 'realloc' with NULL is the same as 'alloc' */
2020-11-02 08:23:16 +00:00
n = moo_xma_alloc(xma, size);
2020-10-31 04:39:48 +00:00
}
else
{
/* try reallocation by merging the adjacent continuous blocks */
2025-07-16 23:18:46 +09:00
#if defined(HAWK_XMA_ENABLE_STAT)
xma->stat.nreallocops++;
#endif
n = _realloc_merge(xma, b, size);
2020-11-02 08:23:16 +00:00
if (!n)
2020-10-31 04:39:48 +00:00
{
2025-07-16 23:18:46 +09:00
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.nreallocbadops++;
#endif
2020-10-31 04:39:48 +00:00
/* reallocation by merging failed. fall back to the slow
* allocation-copy-free scheme */
2020-11-02 08:23:16 +00:00
n = moo_xma_alloc(xma, size);
2020-10-31 04:39:48 +00:00
if (n)
{
2025-07-16 23:18:46 +09:00
MOO_MEMCPY(n, b, size);
moo_xma_free(xma, b);
2020-10-31 04:39:48 +00:00
}
}
2025-07-16 23:18:46 +09:00
else
{
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.nreallocgoodops++;
#endif
}
2020-10-31 04:39:48 +00:00
}
return n;
}
void moo_xma_free (moo_xma_t* xma, void* b)
{
2025-07-16 23:18:46 +09:00
moo_uint8_t* blk = (moo_uint8_t*)USR_TO_SYS(b);
moo_uint8_t* x, * y;
2020-11-03 13:00:42 +00:00
moo_oow_t org_blk_size;
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
DBG_VERIFY(xma, "free start");
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
org_blk_size = mblk_size(blk);
2020-11-03 13:00:42 +00:00
2020-10-31 04:39:48 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
/* update statistical variables */
xma->stat.nused--;
2020-11-03 13:00:42 +00:00
xma->stat.alloc -= org_blk_size;
2025-07-16 23:18:46 +09:00
xma->stat.nfreeops++;
2020-10-31 04:39:48 +00:00
#endif
2025-07-16 23:18:46 +09:00
x = (moo_uint8_t*)prev_mblk(blk);
y = (moo_uint8_t*)next_mblk(blk);
if ((x >= xma->start && mblk_free(x)) && (y < xma->end && mblk_free(y)))
2020-10-31 04:39:48 +00:00
{
/*
* Merge the block with surrounding blocks
*
2025-07-16 23:18:46 +09:00
* blk
* |
* v
2020-10-31 04:39:48 +00:00
* +------------+------------+------------+------------+
* | X | | Y | Z |
* +------------+------------+------------+------------+
2025-07-16 23:18:46 +09:00
*
*
2020-10-31 04:39:48 +00:00
* +--------------------------------------+------------+
* | X | Z |
* +--------------------------------------+------------+
*
2020-10-31 04:39:48 +00:00
*/
2025-07-16 23:18:46 +09:00
moo_uint8_t* z;
2020-11-03 13:00:42 +00:00
moo_oow_t ns = MBLKHDRSIZE + org_blk_size + MBLKHDRSIZE;
2025-07-16 23:18:46 +09:00
/* blk's header size + blk->size + y's header size */
moo_oow_t bs = ns + mblk_size(y);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
detach_from_freelist(xma, (moo_xma_fblk_t*)x);
detach_from_freelist(xma, (moo_xma_fblk_t*)y);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
mblk_size(x) += bs;
attach_to_freelist(xma, (moo_xma_fblk_t*)x);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
z = (moo_uint8_t*)next_mblk(x);
if ((moo_uint8_t*)z < xma->end) mblk_prev_size(z) = mblk_size(x);
2020-11-02 08:23:16 +00:00
2020-10-31 04:39:48 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.nfree--;
xma->stat.avail += ns;
#endif
}
2025-07-16 23:18:46 +09:00
else if (y < xma->end && mblk_free(y))
2020-10-31 04:39:48 +00:00
{
/*
* Merge the block with the next block
*
* blk
* |
* v
2020-10-31 04:39:48 +00:00
* +------------+------------+------------+
* | | Y | Z |
2020-10-31 04:39:48 +00:00
* +------------+------------+------------+
2025-07-16 23:18:46 +09:00
*
*
2020-10-31 04:39:48 +00:00
*
* blk
* |
* v
2020-10-31 04:39:48 +00:00
* +-------------------------+------------+
* | | Z |
2020-10-31 04:39:48 +00:00
* +-------------------------+------------+
2025-07-16 23:18:46 +09:00
*
*
2020-10-31 04:39:48 +00:00
*/
2025-07-16 23:18:46 +09:00
moo_uint8_t* z = (moo_uint8_t*)next_mblk(y);
2020-10-31 04:39:48 +00:00
/* detach y from the free list */
2025-07-16 23:18:46 +09:00
detach_from_freelist(xma, (moo_xma_fblk_t*)y);
2020-10-31 04:39:48 +00:00
/* update the block availability */
2025-07-16 23:18:46 +09:00
mblk_free(blk) = 1;
2020-11-02 08:23:16 +00:00
/* update the block size. MBLKHDRSIZE for the header space in x */
2025-07-16 23:18:46 +09:00
mblk_size(blk) += MBLKHDRSIZE + mblk_size(y);
2020-10-31 04:39:48 +00:00
/* update the backward link of Y */
2025-07-16 23:18:46 +09:00
if ((moo_uint8_t*)z < xma->end) mblk_prev_size(z) = mblk_size(blk);
2020-10-31 04:39:48 +00:00
/* attach blk to the free list */
2025-07-16 23:18:46 +09:00
attach_to_freelist(xma, (moo_xma_fblk_t*)blk);
2020-11-03 13:00:42 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.avail += org_blk_size + MBLKHDRSIZE;
#endif
2020-10-31 04:39:48 +00:00
}
2025-07-16 23:18:46 +09:00
else if (x >= xma->start && mblk_free(x))
2020-10-31 04:39:48 +00:00
{
/*
2025-07-16 23:18:46 +09:00
* Merge the block with the previous block
2020-10-31 04:39:48 +00:00
*
2020-11-02 08:23:16 +00:00
* blk
* |
* v
2020-10-31 04:39:48 +00:00
* +------------+------------+------------+
* | X | | Y |
* +------------+------------+------------+
*
* +-------------------------+------------+
* | X | Y |
* +-------------------------+------------+
*/
2025-07-16 23:18:46 +09:00
detach_from_freelist(xma, (moo_xma_fblk_t*)x);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
mblk_size(x) += MBLKHDRSIZE + org_blk_size;
2020-11-03 13:00:42 +00:00
2025-07-16 23:18:46 +09:00
assert(y == next_mblk(x));
if ((moo_uint8_t*)y < xma->end) mblk_prev_size(y) = mblk_size(x);
2020-10-31 04:39:48 +00:00
2025-07-16 23:18:46 +09:00
attach_to_freelist(xma, (moo_xma_fblk_t*)x);
2020-11-03 13:00:42 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.avail += MBLKHDRSIZE + org_blk_size;
#endif
2020-10-31 04:39:48 +00:00
}
else
{
2025-07-16 23:18:46 +09:00
mblk_free(blk) = 1;
attach_to_freelist(xma, (moo_xma_fblk_t*)blk);
2020-10-31 04:39:48 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
xma->stat.nfree++;
2020-11-03 13:00:42 +00:00
xma->stat.avail += org_blk_size;
2020-10-31 04:39:48 +00:00
#endif
}
2020-11-02 08:23:16 +00:00
2025-07-16 23:18:46 +09:00
DBG_VERIFY(xma, "free end");
2020-10-31 04:39:48 +00:00
}
void moo_xma_dump (moo_xma_t* xma, moo_xma_dumper_t dumper, void* ctx)
{
moo_xma_mblk_t* tmp;
2025-07-16 23:18:46 +09:00
moo_oow_t fsum, asum, xfi;
2020-10-31 04:39:48 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
moo_oow_t isum;
#endif
2025-07-16 23:18:46 +09:00
dumper(ctx, "[XMA DUMP]\n");
2020-10-31 04:39:48 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
2025-07-16 23:18:46 +09:00
dumper(ctx, "== statistics ==\n");
dumper(ctx, "Total = %zu\n", xma->stat.total);
dumper(ctx, "Alloc = %zu\n", xma->stat.alloc);
dumper(ctx, "Avail = %zu\n", xma->stat.avail);
dumper(ctx, "Alloc High Watermark = %zu\n", xma->stat.alloc_hwmark);
2020-10-31 04:39:48 +00:00
#endif
2025-07-16 23:18:46 +09:00
dumper(ctx, "== blocks ==\n");
dumper(ctx, " size avail address\n");
for (tmp = (moo_xma_mblk_t*)xma->start, fsum = 0, asum = 0; (moo_uint8_t*)tmp < xma->end; tmp = next_mblk(tmp))
2020-10-31 04:39:48 +00:00
{
2025-07-16 23:18:46 +09:00
dumper(ctx, " %-18zu %-5u %p\n", tmp->size, (unsigned int)tmp->free, tmp);
2020-11-03 07:18:38 +00:00
if (tmp->free) fsum += tmp->size;
2020-10-31 04:39:48 +00:00
else asum += tmp->size;
}
2025-07-16 23:18:46 +09:00
dumper(ctx, "== free list ==\n");
for (xfi = 0; xfi <= XFIMAX(xma); xfi++)
{
if (xma->xfree[xfi])
{
moo_xma_fblk_t* f;
for (f = xma->xfree[xfi]; f; f = f->free_next)
{
dumper(ctx, " xfi %d fblk %p size %lu\n", xfi, f, (unsigned long)f->size);
}
}
}
2020-10-31 04:39:48 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
2020-11-02 08:23:16 +00:00
isum = (xma->stat.nfree + xma->stat.nused) * MBLKHDRSIZE;
2020-10-31 04:39:48 +00:00
#endif
2025-07-16 23:18:46 +09:00
dumper(ctx, "---------------------------------------\n");
dumper(ctx, "Allocated blocks : %18zu bytes\n", asum);
dumper(ctx, "Available blocks : %18zu bytes\n", fsum);
2020-10-31 04:39:48 +00:00
#if defined(MOO_XMA_ENABLE_STAT)
2025-07-16 23:18:46 +09:00
dumper(ctx, "Internal use : %18zu bytes\n", isum);
dumper(ctx, "Total : %18zu bytes\n", (asum + fsum + isum));
dumper(ctx, "Alloc operations : %18zu\n", xma->stat.nallocops);
dumper(ctx, "Good alloc operations : %18zu\n", xma->stat.nallocgoodops);
dumper(ctx, "Bad alloc operations : %18zu\n", xma->stat.nallocbadops);
dumper(ctx, "Realloc operations : %18zu\n", xma->stat.nreallocops);
dumper(ctx, "Good realloc operations: %18zu\n", xma->stat.nreallocgoodops);
dumper(ctx, "Bad realloc operations : %18zu\n", xma->stat.nreallocbadops);
dumper(ctx, "Free operations : %18zu\n", xma->stat.nfreeops);
2020-10-31 04:39:48 +00:00
#endif
#if defined(MOO_XMA_ENABLE_STAT)
2025-07-16 23:18:46 +09:00
assert(asum == xma->stat.alloc);
assert(fsum == xma->stat.avail);
assert(isum == xma->stat.total - (xma->stat.alloc + xma->stat.avail));
assert(asum + fsum + isum == xma->stat.total);
2020-10-31 04:39:48 +00:00
#endif
}