diff --git a/moo/lib/moo-xma.h b/moo/lib/moo-xma.h new file mode 100644 index 0000000..b36137e --- /dev/null +++ b/moo/lib/moo-xma.h @@ -0,0 +1,231 @@ +/* + * $Id$ + * + Copyright (c) 2014-2019 Chung, Hyung-Hwan. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MOO_XMA_H_ +#define _MOO_XMA_H_ + +/** @file + * This file defines an extravagant memory allocator. Why? It may be so. + * The memory allocator allows you to maintain memory blocks from a + * larger memory chunk allocated with an outer memory allocator. + * Typically, an outer memory allocator is a standard memory allocator + * like malloc(). You can isolate memory blocks into a particular chunk. + * + * See the example below. Note it omits error handling. + * + * @code + * #include + * #include + * int main () + * { + * moo_xma_t* xma; + * void* ptr1, * ptr2; + * + * // create a new memory allocator obtaining a 100K byte zone + * // with the default memory allocator + * xma = moo_xma_open (MOO_NULL, 0, 100000L); + * + * ptr1 = moo_xma_alloc (xma, 5000); // allocate a 5K block from the zone + * ptr2 = moo_xma_alloc (xma, 1000); // allocate a 1K block from the zone + * ptr1 = moo_xma_realloc (xma, ptr1, 6000); // resize the 5K block to 6K. + * + * moo_xma_dump (xma, moo_fprintf, MOO_STDOUT); // dump memory blocks + * + * // the following two lines are not actually needed as the allocator + * // is closed after them. + * moo_xma_free (xma, ptr2); // dispose of the 1K block + * moo_xma_free (xma, ptr1); // dispose of the 6K block + * + * moo_xma_close (xma); // destroy the memory allocator + * return 0; + * } + * @endcode + */ +#include + +#ifdef MOO_BUILD_DEBUG +# define MOO_XMA_ENABLE_STAT +#endif + +/** @struct moo_xma_t + * The moo_xma_t type defines a simple memory allocator over a memory zone. + * It can obtain a relatively large zone of memory and manage it. + */ +typedef struct moo_xma_t moo_xma_t; + +/** + * The moo_xma_blk_t type defines a memory block allocated. + */ +typedef struct moo_xma_blk_t moo_xma_blk_t; + +#define MOO_XMA_FIXED 32 +#define MOO_XMA_SIZE_BITS ((MOO_SIZEOF_OOW_T*8)-1) + +struct moo_xma_t +{ + moo_mmgr_t* _mmgr; + + /** pointer to the first memory block */ + moo_xma_blk_t* head; + + /** pointer array to free memory blocks */ + moo_xma_blk_t* xfree[MOO_XMA_FIXED + MOO_XMA_SIZE_BITS + 1]; + + /** pre-computed value for fast xfree index calculation */ + moo_oow_t bdec; + +#ifdef MOO_XMA_ENABLE_STAT + struct + { + moo_oow_t total; + moo_oow_t alloc; + moo_oow_t avail; + moo_oow_t nused; + moo_oow_t nfree; + } stat; +#endif +}; + +/** + * The moo_xma_dumper_t type defines a printf-like output function + * for moo_xma_dump(). + */ +typedef int (*moo_xma_dumper_t) ( + void* ctx, + const moo_bch_t* fmt, + ... +); + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * The moo_xma_open() function creates a memory allocator. It obtains a memory + * zone of the @a zonesize bytes with the memory manager @a mmgr. It also makes + * available the extension area of the @a xtnsize bytes that you can get the + * pointer to with moo_xma_getxtn(). + * + * @return pointer to a memory allocator on success, #MOO_NULL on failure + */ +MOO_EXPORT moo_xma_t* moo_xma_open ( + moo_mmgr_t* mmgr, /**< memory manager */ + moo_oow_t xtnsize, /**< extension size in bytes */ + moo_oow_t zonesize /**< zone size in bytes */ +); + +/** + * The moo_xma_close() function destroys a memory allocator. It also frees + * the memory zone obtained, which invalidates the memory blocks within + * the zone. Call this function to destroy a memory allocator created with + * moo_xma_open(). + */ +MOO_EXPORT void moo_xma_close ( + moo_xma_t* xma /**< memory allocator */ +); + +#if defined(MOO_HAVE_INLINE) +static MOO_INLINE moo_mmgr_t* moo_xma_getmmgr (moo_xma_t* xma) { return xma->_mmgr; } +#else +# define moo_xma_getmmgr(xma) (((moo_xma_t*)(xma))->_mmgr) +#endif + +#if defined(MOO_HAVE_INLINE) +static MOO_INLINE void* moo_xma_getxtn (moo_xma_t* xma) { return (void*)(xma + 1); } +#else +#define moo_xma_getxtn(xma) ((void*)((moo_xma_t*)(xma) + 1)) +#endif + +/** + * The moo_xma_init() initializes a memory allocator. If you have the moo_xma_t + * structure statically declared or already allocated, you may pass the pointer + * to this function instead of calling moo_xma_open(). It obtains a memory zone + * of @a zonesize bytes with the memory manager @a mmgr. Unlike moo_xma_open(), + * it does not accept the extension size, thus not creating an extention area. + * @return 0 on success, -1 on failure + */ +MOO_EXPORT int moo_xma_init ( + moo_xma_t* xma, /**< memory allocator */ + moo_mmgr_t* mmgr, /**< memory manager */ + moo_oow_t zonesize /**< zone size in bytes */ +); + +/** + * The moo_xma_fini() function finalizes a memory allocator. Call this + * function to finalize a memory allocator initialized with moo_xma_init(). + */ +MOO_EXPORT void moo_xma_fini ( + moo_xma_t* xma /**< memory allocator */ +); + +/** + * The moo_xma_alloc() function allocates @a size bytes. + * @return pointer to a memory block on success, #MOO_NULL on failure + */ +MOO_EXPORT void* moo_xma_alloc ( + moo_xma_t* xma, /**< memory allocator */ + moo_oow_t size /**< size in bytes */ +); + +MOO_EXPORT void* moo_xma_calloc ( + moo_xma_t* xma, + moo_oow_t size +); + +/** + * The moo_xma_alloc() function resizes the memory block @a b to @a size bytes. + * @return pointer to a resized memory block on success, #MOO_NULL on failure + */ +MOO_EXPORT void* moo_xma_realloc ( + moo_xma_t* xma, /**< memory allocator */ + void* b, /**< memory block */ + moo_oow_t size /**< new size in bytes */ +); + +/** + * The moo_xma_alloc() function frees the memory block @a b. + */ +MOO_EXPORT void moo_xma_free ( + moo_xma_t* xma, /**< memory allocator */ + void* b /**< memory block */ +); + +/** + * The moo_xma_dump() function dumps the contents of the memory zone + * with the output function @a dumper provided. The debug build shows + * more statistical counters. + */ +MOO_EXPORT void moo_xma_dump ( + moo_xma_t* xma, /**< memory allocator */ + moo_xma_dumper_t dumper, /**< output function */ + void* ctx /**< first parameter to output function */ +); + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/moo/lib/xma.c b/moo/lib/xma.c new file mode 100644 index 0000000..e8247b7 --- /dev/null +++ b/moo/lib/xma.c @@ -0,0 +1,764 @@ +/* + * $Id$ + * + Copyright (c) 2014-2019 Chung, Hyung-Hwan. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include "moo-prv.h" +#include /* TODO: replace assert() with builtin substition */ + + +#define ALIGN MOO_SIZEOF(moo_oow_t) /* this must be a power of 2 */ +#define HDRSIZE MOO_SIZEOF(moo_xma_blk_t) +#define MINBLKLEN (HDRSIZE + ALIGN) + +#define SYS_TO_USR(_) (((moo_xma_blk_t*)_) + 1) +#define USR_TO_SYS(_) (((moo_xma_blk_t*)_) - 1) + +/* + * the xfree array is divided into three region + * 0 ....................... FIXED ......................... XFIMAX-1 ... XFIMAX + * | small fixed-size chains | large chains | huge chain | + */ +#define FIXED MOO_XMA_FIXED +#define XFIMAX(xma) (MOO_COUNTOF(xma->xfree)-1) + +struct moo_xma_blk_t +{ + moo_oow_t avail: 1; + moo_oow_t size: MOO_XMA_SIZE_BITS;/**< block size */ + + struct + { + moo_xma_blk_t* prev; /**< link to the previous free block */ + moo_xma_blk_t* next; /**< link to the next free block */ + } f; + + struct + { + moo_xma_blk_t* prev; /**< link to the previous adjacent block */ + moo_xma_blk_t* next; /**< link to the next adjacent block */ + } b; +}; + +static MOO_INLINE moo_oow_t szlog2 (moo_oow_t n) +{ + /* + * 2**x = n; + * x = log2(n); + * ------------------------------------------- + * unsigned int x = 0; + * while((n >> x) > 1) ++x; + * return x; + */ + +#define BITS (MOO_SIZEOF_OOW_T * 8) + int x = BITS - 1; + +#if MOO_SIZEOF_OOW_T >= 128 +# error moo_oow_t too large. unsupported platform +#endif + +#if MOO_SIZEOF_OOW_T >= 64 + if ((n & (~(moo_oow_t)0 << (BITS-128))) == 0) { x -= 256; n <<= 256; } +#endif +#if MOO_SIZEOF_OOW_T >= 32 + if ((n & (~(moo_oow_t)0 << (BITS-128))) == 0) { x -= 128; n <<= 128; } +#endif +#if MOO_SIZEOF_OOW_T >= 16 + if ((n & (~(moo_oow_t)0 << (BITS-64))) == 0) { x -= 64; n <<= 64; } +#endif +#if MOO_SIZEOF_OOW_T >= 8 + if ((n & (~(moo_oow_t)0 << (BITS-32))) == 0) { x -= 32; n <<= 32; } +#endif +#if MOO_SIZEOF_OOW_T >= 4 + if ((n & (~(moo_oow_t)0 << (BITS-16))) == 0) { x -= 16; n <<= 16; } +#endif +#if MOO_SIZEOF_OOW_T >= 2 + if ((n & (~(moo_oow_t)0 << (BITS-8))) == 0) { x -= 8; n <<= 8; } +#endif +#if MOO_SIZEOF_OOW_T >= 1 + if ((n & (~(moo_oow_t)0 << (BITS-4))) == 0) { x -= 4; n <<= 4; } +#endif + if ((n & (~(moo_oow_t)0 << (BITS-2))) == 0) { x -= 2; n <<= 2; } + if ((n & (~(moo_oow_t)0 << (BITS-1))) == 0) { x -= 1; } + + return x; +#undef BITS +} + +static MOO_INLINE moo_oow_t getxfi (moo_xma_t* xma, moo_oow_t size) +{ + moo_oow_t xfi = ((size) / ALIGN) - 1; + if (xfi >= FIXED) xfi = szlog2(size) - (xma)->bdec + FIXED; + if (xfi > XFIMAX(xma)) xfi = XFIMAX(xma); + return xfi; +} + +moo_xma_t* moo_xma_open (moo_mmgr_t* mmgr, moo_oow_t xtnsize, moo_oow_t zonesize) +{ + moo_xma_t* xma; + + xma = (moo_xma_t*) MOO_MMGR_ALLOC (mmgr, MOO_SIZEOF(*xma) + xtnsize); + if (xma == MOO_NULL) return MOO_NULL; + + if (moo_xma_init (xma, mmgr, zonesize) <= -1) + { + MOO_MMGR_FREE (mmgr, xma); + return MOO_NULL; + } + + MOO_MEMSET (xma + 1, 0, xtnsize); + return xma; +} + +void moo_xma_close (moo_xma_t* xma) +{ + moo_xma_fini (xma); + MOO_MMGR_FREE (xma->_mmgr, xma); +} + +int moo_xma_init (moo_xma_t* xma, moo_mmgr_t* mmgr, moo_oow_t zonesize) +{ + moo_xma_blk_t* free; + moo_oow_t xfi; + + /* round 'zonesize' to be the multiples of ALIGN */ + zonesize = MOO_ALIGN_POW2(zonesize, ALIGN); + + /* adjust 'zonesize' to be large enough to hold a single smallest block */ + if (zonesize < MINBLKLEN) zonesize = MINBLKLEN; + + /* allocate a memory chunk to use for actual memory allocation */ + free = MOO_MMGR_ALLOC(mmgr, zonesize); + if (free == MOO_NULL) return -1; + + /* initialize the header part of the free chunk */ + free->avail = 1; + free->size = zonesize - HDRSIZE; /* size excluding the block header */ + free->f.prev = MOO_NULL; + free->f.next = MOO_NULL; + free->b.next = MOO_NULL; + free->b.prev = MOO_NULL; + + MOO_MEMSET (xma, 0, MOO_SIZEOF(*xma)); + xma->_mmgr = mmgr; + xma->bdec = szlog2(FIXED * ALIGN); /* precalculate the decrement value */ + + /* at this point, the 'free' chunk is a only block available */ + + /* get the free block index */ + xfi = getxfi(xma, free->size); + /* locate it into an apporopriate slot */ + xma->xfree[xfi] = free; + /* let it be the head, which is natural with only a block */ + xma->head = free; + + /* initialize some statistical variables */ +#if defined(MOO_XMA_ENABLE_STAT) + xma->stat.total = zonesize; + xma->stat.alloc = 0; + xma->stat.avail = zonesize - HDRSIZE; + xma->stat.nfree = 1; + xma->stat.nused = 0; +#endif + + return 0; +} + +void moo_xma_fini (moo_xma_t* xma) +{ + /* the head must point to the free chunk allocated in init(). + * let's deallocate it */ + MOO_MMGR_FREE (xma->_mmgr, xma->head); +} + +static MOO_INLINE void attach_to_freelist (moo_xma_t* xma, moo_xma_blk_t* b) +{ + /* + * attach a block to a free list + */ + + /* get the free list index for the block size */ + moo_oow_t xfi = getxfi(xma, b->size); + + /* let it be the head of the free list doubly-linked */ + b->f.prev = MOO_NULL; + b->f.next = xma->xfree[xfi]; + if (xma->xfree[xfi]) xma->xfree[xfi]->f.prev = b; + xma->xfree[xfi] = b; +} + +static MOO_INLINE void detach_from_freelist (moo_xma_t* xma, moo_xma_blk_t* b) +{ + /* + * detach a block from a free list + */ + moo_xma_blk_t* p, * n; + + /* alias the previous and the next with short variable names */ + p = b->f.prev; + n = b->f.next; + + if (p) + { + /* the previous item exists. let its 'next' pointer point to + * the block's next item. */ + p->f.next = n; + } + else + { + /* the previous item does not exist. the block is the first + * item in the free list. */ + + moo_oow_t xfi = getxfi(xma, b->size); + assert (b == xma->xfree[xfi]); + /* let's update the free list head */ + xma->xfree[xfi] = n; + } + + /* let the 'prev' pointer of the block's next item point to the + * block's previous item */ + if (n) n->f.prev = p; +} + +static moo_xma_blk_t* alloc_from_freelist ( + moo_xma_t* xma, moo_oow_t xfi, moo_oow_t size) +{ + moo_xma_blk_t* free; + + for (free = xma->xfree[xfi]; free; free = free->f.next) + { + if (free->size >= size) + { + moo_oow_t rem; + + detach_from_freelist (xma, free); + + rem = free->size - size; + if (rem >= MINBLKLEN) + { + moo_xma_blk_t* tmp; + + /* the remaining part is large enough to hold + * another block. let's split it + */ + + /* shrink the size of the 'free' block */ + free->size = size; + + /* let 'tmp' point to the remaining part */ + tmp = (moo_xma_blk_t*)(((moo_uint8_t*)(free + 1)) + size); + + /* initialize some fields */ + tmp->avail = 1; + tmp->size = rem - HDRSIZE; + + /* link 'tmp' to the block list */ + tmp->b.next = free->b.next; + tmp->b.prev = free; + if (free->b.next) free->b.next->b.prev = tmp; + free->b.next = tmp; + + /* add the remaining part to the free list */ + attach_to_freelist (xma, tmp); + +#if defined(MOO_XMA_ENABLE_STAT) + xma->stat.avail -= HDRSIZE; +#endif + } +#if defined(MOO_XMA_ENABLE_STAT) + else + { + /* decrement the number of free blocks as the current + * block is allocated as a whole without being split */ + xma->stat.nfree--; + } +#endif + + free->avail = 0; + /* + free->f.next = MOO_NULL; + free->f.prev = MOO_NULL; + */ + +#if defined(MOO_XMA_ENABLE_STAT) + xma->stat.nused++; + xma->stat.alloc += free->size; + xma->stat.avail -= free->size; +#endif + return free; + } + } + + return MOO_NULL; +} + +void* moo_xma_alloc (moo_xma_t* xma, moo_oow_t size) +{ + moo_xma_blk_t* free; + moo_oow_t xfi; + + if (size <= 0) size = 1; + + /* round up 'size' to the multiples of ALIGN */ + size = MOO_ALIGN_POW2(size, ALIGN); + + assert (size >= ALIGN); + xfi = getxfi(xma, size); + + /*if (xfi < XFIMAX(xma) && xma->xfree[xfi])*/ + if (xfi < FIXED && xma->xfree[xfi]) + { + /* try the best fit */ + free = xma->xfree[xfi]; + + assert (free->avail != 0); + assert (free->size == size); + + detach_from_freelist (xma, free); + free->avail = 0; + +#if defined(MOO_XMA_ENABLE_STAT) + xma->stat.nfree--; + xma->stat.nused++; + xma->stat.alloc += free->size; + xma->stat.avail -= free->size; +#endif + } + else if (xfi == XFIMAX(xma)) + { + /* huge block */ + free = alloc_from_freelist (xma, XFIMAX(xma), size); + if (free == MOO_NULL) return MOO_NULL; + } + else + { + if (xfi >= FIXED) + { + /* get the block from its own large chain */ + free = alloc_from_freelist (xma, xfi, size); + if (free == MOO_NULL) + { + /* borrow a large block from the huge block chain */ + free = alloc_from_freelist (xma, XFIMAX(xma), size); + } + } + else + { + /* borrow a small block from the huge block chain */ + free = alloc_from_freelist (xma, XFIMAX(xma), size); + if (free == MOO_NULL) xfi = FIXED - 1; + } + + if (free == MOO_NULL) + { + /* try each large block chain left */ + for (++xfi; xfi < XFIMAX(xma) - 1; xfi++) + { + free = alloc_from_freelist (xma, xfi, size); + if (free) break; + } + if (free == MOO_NULL) return MOO_NULL; + } + } + + return SYS_TO_USR(free); +} + +static void* _realloc_merge (moo_xma_t* xma, void* b, moo_oow_t size) +{ + moo_xma_blk_t* blk = USR_TO_SYS(b); + + /* rounds up 'size' to be multiples of ALIGN */ + size = MOO_ALIGN_POW2 (size, ALIGN); + + if (size > blk->size) + { + /* + * grow the current block + */ + moo_oow_t req; + moo_xma_blk_t* n; + moo_oow_t rem; + + req = size - blk->size; + + n = blk->b.next; + + /* check if the next adjacent block is available */ + if (!n || !n->avail || req > n->size) return MOO_NULL; /* no! */ + + /* let's merge the current block with the next block */ + detach_from_freelist (xma, n); + + rem = (HDRSIZE + n->size) - req; + if (rem >= MINBLKLEN) + { + /* + * the remaining part of the next block is large enough + * to hold a block. break the next block. + */ + + moo_xma_blk_t* tmp; + + /* store n->b.next in case 'tmp' begins somewhere + * in the header part of n */ + moo_xma_blk_t* nn = n->b.next; + + tmp = (moo_xma_blk_t*)(((moo_uint8_t*)n) + req); + + tmp->avail = 1; + tmp->size = rem - HDRSIZE; + attach_to_freelist (xma, tmp); + + blk->size += req; + + tmp->b.next = nn; + if (nn) nn->b.prev = tmp; + + blk->b.next = tmp; + tmp->b.prev = blk; + +#if defined(MOO_XMA_ENABLE_STAT) + xma->stat.alloc += req; + xma->stat.avail -= req; /* req + HDRSIZE(tmp) - HDRSIZE(n) */ +#endif + } + else + { + /* the remaining part of the next block is negligible. + * utilize the whole block by merging to the resizing block */ + blk->size += HDRSIZE + n->size; + blk->b.next = n->b.next; + if (n->b.next) n->b.next->b.prev = blk; + +#if defined(MOO_XMA_ENABLE_STAT) + xma->stat.nfree--; + xma->stat.alloc += HDRSIZE + n->size; + xma->stat.avail -= n->size; +#endif + } + } + else if (size < blk->size) + { + /* + * shrink the block + */ + + moo_oow_t rem = blk->size - size; + if (rem >= MINBLKLEN) + { + moo_xma_blk_t* tmp; + moo_xma_blk_t* n = blk->b.next; + + /* the leftover is large enough to hold a block + * of minimum size. split the current block. + * let 'tmp' point to the leftover. */ + tmp = (moo_xma_blk_t*)(((moo_uint8_t*)(blk + 1)) + size); + tmp->avail = 1; + + if (n && n->avail) + { + /* merge with the next block */ + detach_from_freelist (xma, n); + + tmp->b.next = n->b.next; + tmp->b.prev = blk; + if (n->b.next) n->b.next->b.prev = tmp; + blk->b.next = tmp; + blk->size = size; + + tmp->size = rem - HDRSIZE + HDRSIZE + n->size; + +#if defined(MOO_XMA_ENABLE_STAT) + xma->stat.alloc -= rem; + /* rem - HDRSIZE(tmp) + HDRSIZE(n) */ + xma->stat.avail += rem; +#endif + } + else + { + /* link 'tmp' to the block list */ + tmp->b.next = n; + tmp->b.prev = blk; + if (n) n->b.prev = tmp; + blk->b.next = tmp; + blk->size = size; + + tmp->size = rem - HDRSIZE; + +#if defined(MOO_XMA_ENABLE_STAT) + xma->stat.nfree++; + xma->stat.alloc -= rem; + xma->stat.avail += tmp->size; +#endif + } + + /* add 'tmp' to the free list */ + attach_to_freelist (xma, tmp); + } + } + + return b; +} + +void* moo_xma_calloc (moo_xma_t* xma, moo_oow_t size) +{ + void* ptr = moo_xma_alloc (xma, size); + if (ptr) MOO_MEMSET (ptr, 0, size); + return ptr; +} + +void* moo_xma_realloc (moo_xma_t* xma, void* b, moo_oow_t size) +{ + void* n; + + if (b == MOO_NULL) + { + /* 'realloc' with NULL is the same as 'alloc' */ + n = moo_xma_alloc (xma, size); + } + else + { + /* try reallocation by merging the adjacent continuous blocks */ + n = _realloc_merge (xma, b, size); + if (n == MOO_NULL) + { + /* reallocation by merging failed. fall back to the slow + * allocation-copy-free scheme */ + n = moo_xma_alloc (xma, size); + if (n) + { + MOO_MEMCPY (n, b, size); + moo_xma_free (xma, b); + } + } + } + + return n; +} + +void moo_xma_free (moo_xma_t* xma, void* b) +{ + moo_xma_blk_t* blk = USR_TO_SYS(b); + + /*assert (blk->f.next == MOO_NULL);*/ + +#if defined(MOO_XMA_ENABLE_STAT) + /* update statistical variables */ + xma->stat.nused--; + xma->stat.alloc -= blk->size; +#endif + + if ((blk->b.prev && blk->b.prev->avail) && + (blk->b.next && blk->b.next->avail)) + { + /* + * Merge the block with surrounding blocks + * + * blk + * +-----+ | +-----+ +------+ + * | V v | v | V + * +------------+------------+------------+------------+ + * | X | | Y | Z | + * +------------+------------+------------+------------+ + * ^ | ^ | ^ | + * +-----+ +------+ +------+ + * + * + * +-----------------------------------+ + * | V + * +--------------------------------------+------------+ + * | X | Z | + * +--------------------------------------+------------+ + * ^ | + * +-----------------------------------+ + */ + moo_xma_blk_t* x = blk->b.prev; + moo_xma_blk_t* y = blk->b.next; + moo_xma_blk_t* z = y->b.next; + moo_oow_t ns = HDRSIZE + blk->size + HDRSIZE; + moo_oow_t bs = ns + y->size; + + detach_from_freelist (xma, x); + detach_from_freelist (xma, y); + + x->size += bs; + x->b.next = z; + if (z) z->b.prev = x; + + attach_to_freelist (xma, x); + +#if defined(MOO_XMA_ENABLE_STAT) + xma->stat.nfree--; + xma->stat.avail += ns; +#endif + } + else if (blk->b.next && blk->b.next->avail) + { + /* + * Merge the block with the next block + * + * blk + * | +-----+ +------+ + * v | v | V + * +------------+------------+------------+ + * | | X | Y | + * +------------+------------+------------+ + * ^ | ^ | + * +------+ +------+ + * + * blk + * | +------------------+ + * v | V + * +-------------------------+------------+ + * | | Y | + * +-------------------------+------------+ + * ^ | + * +-------------------+ + */ + moo_xma_blk_t* x = blk->b.next; + moo_xma_blk_t* y = x->b.next; + +#if defined(MOO_XMA_ENABLE_STAT) + xma->stat.avail += blk->size + HDRSIZE; +#endif + + /* detach x from the free list */ + detach_from_freelist (xma, x); + + /* update the block availability */ + blk->avail = 1; + /* update the block size. HDRSIZE for the header space in x */ + blk->size += HDRSIZE + x->size; + + /* update the backward link of Y */ + if (y) y->b.prev = blk; + /* update the forward link of the block being freed */ + blk->b.next = y; + + /* attach blk to the free list */ + attach_to_freelist (xma, blk); + + } + else if (blk->b.prev && blk->b.prev->avail) + { + /* + * Merge the block with the previous block + * + * blk + * +-----+ | +-----+ + * | V v | v + * +------------+------------+------------+ + * | X | | Y | + * +------------+------------+------------+ + * ^ | ^ | + * +------+ +------+ + * + * + * +---------------------+ + * | v + * +-------------------------+------------+ + * | X | Y | + * +-------------------------+------------+ + * ^ | + * +--------------------+ + * + */ + moo_xma_blk_t* x = blk->b.prev; + moo_xma_blk_t* y = blk->b.next; + +#if defined(MOO_XMA_ENABLE_STAT) + xma->stat.avail += HDRSIZE + blk->size; +#endif + + detach_from_freelist (xma, x); + + x->size += HDRSIZE + blk->size; + x->b.next = y; + if (y) y->b.prev = x; + + attach_to_freelist (xma, x); + } + else + { + blk->avail = 1; + attach_to_freelist (xma, blk); + +#if defined(MOO_XMA_ENABLE_STAT) + xma->stat.nfree++; + xma->stat.avail += blk->size; +#endif + } +} + +void moo_xma_dump (moo_xma_t* xma, moo_xma_dumper_t dumper, void* ctx) +{ + moo_xma_blk_t* tmp; + moo_oow_t fsum, asum; +#if defined(MOO_XMA_ENABLE_STAT) + moo_oow_t isum; +#endif + + dumper (ctx, "\n"); + +#if defined(MOO_XMA_ENABLE_STAT) + dumper (ctx, "== statistics ==\n"); + dumper (ctx, "total = %zu\n", xma->stat.total); + dumper (ctx, "alloc = %zu\n", xma->stat.alloc); + dumper (ctx, "avail = %zu\n", xma->stat.avail); +#endif + + dumper (ctx, "== blocks ==\n"); + dumper (ctx, " size avail address\n"); + for (tmp = xma->head, fsum = 0, asum = 0; tmp; tmp = tmp->b.next) + { + dumper (ctx, " %-18zu %-5u %p\n", tmp->size, (unsigned int)tmp->avail, tmp); + if (tmp->avail) fsum += tmp->size; + else asum += tmp->size; + } + +#if defined(MOO_XMA_ENABLE_STAT) + isum = (xma->stat.nfree + xma->stat.nused) * HDRSIZE; +#endif + + dumper (ctx, "---------------------------------------\n"); + dumper (ctx, "Allocated blocks: %18zu bytes\n", asum); + dumper (ctx, "Available blocks: %18zu bytes\n", fsum); + + +#if defined(MOO_XMA_ENABLE_STAT) + dumper (ctx, "Internal use : %18zu bytes\n", isum); + dumper (ctx, "Total : %18zu bytes\n", (asum + fsum + isum)); +#endif + +#if defined(MOO_XMA_ENABLE_STAT) + assert (asum == xma->stat.alloc); + assert (fsum == xma->stat.avail); + assert (isum == xma->stat.total - (xma->stat.alloc + xma->stat.avail)); + assert (asum + fsum + isum == xma->stat.total); +#endif +} +