qse/lib/cmn/mem.c

672 lines
14 KiB
C
Raw Normal View History

/*
2012-08-16 03:47:55 +00:00
* $Id$
*
Copyright (c) 2006-2019 Chung, Hyung-Hwan. All rights reserved.
2014-11-19 14:42:24 +00:00
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
2014-11-19 14:42:24 +00:00
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
2008-12-21 21:35:07 +00:00
#include <qse/cmn/mem.h>
#if defined(_WIN32)
# include <windows.h>
/*
#elif defined(__OS2__)
# define INCL_DOSMEMMGR
# include <os2.h>
# include <bsememf.h>
*/
#else
# include <stdlib.h>
#endif
2008-04-25 07:08:14 +00:00
#if defined(__SPU__)
#include <spu_intrinsics.h>
2008-12-21 21:35:07 +00:00
#define SPU_VUC_SIZE QSE_SIZEOF(vector unsigned char)
2008-04-25 07:08:14 +00:00
#endif
#define IS_UNALIGNED(ptr) \
QSE_IS_UNALIGNED_POW2((qse_size_t)ptr, QSE_SIZEOF(qse_size_t))
2007-12-25 21:25:45 +00:00
#define IS_ALIGNED(ptr) (!IS_UNALIGNED(ptr))
2007-12-25 21:37:48 +00:00
#define IS_EITHER_UNALIGNED(ptr1,ptr2) \
2008-12-21 21:35:07 +00:00
(((qse_size_t)ptr1|(qse_size_t)ptr2)&(QSE_SIZEOF(qse_size_t)-1))
2007-12-25 21:37:48 +00:00
#define IS_BOTH_ALIGNED(ptr1,ptr2) (!IS_EITHER_UNALIGNED(ptr1,ptr2))
2008-12-21 21:35:07 +00:00
void* qse_memcpy (void* dst, const void* src, qse_size_t n)
{
2008-12-21 21:35:07 +00:00
#if defined(QSE_BUILD_FOR_SIZE)
2008-04-25 22:50:34 +00:00
2008-12-21 21:35:07 +00:00
qse_byte_t* d = (qse_byte_t*)dst;
qse_byte_t* s = (qse_byte_t*)src;
2008-04-25 06:19:44 +00:00
while (n-- > 0) *d++ = *s++;
return dst;
2008-04-25 22:50:34 +00:00
#elif defined(__SPU__)
2008-12-21 21:35:07 +00:00
qse_byte_t* d;
qse_byte_t* s;
2008-04-25 22:50:34 +00:00
2008-04-25 22:59:00 +00:00
if (n >= SPU_VUC_SIZE &&
2008-12-21 21:35:07 +00:00
(((qse_size_t)dst) & (SPU_VUC_SIZE-1)) == 0 &&
(((qse_size_t)src) & (SPU_VUC_SIZE-1)) == 0)
2008-04-25 22:50:34 +00:00
{
vector unsigned char* du = (vector unsigned char*)dst;
vector unsigned char* su = (vector unsigned char*)src;
do
{
*du++ = *su++;
n -= SPU_VUC_SIZE;
}
while (n >= SPU_VUC_SIZE);
2008-12-21 21:35:07 +00:00
d = (qse_byte_t*)du;
s = (qse_byte_t*)su;
2008-04-25 22:50:34 +00:00
}
else
{
2008-12-21 21:35:07 +00:00
d = (qse_byte_t*)dst;
s = (qse_byte_t*)src;
2008-04-25 22:50:34 +00:00
}
while (n-- > 0) *d++ = *s++;
return dst;
#elif defined(__GNUC__) && (defined(__x86_64) || defined(__amd64))
/* i don't really care about alignments for x86-64 at this moment. fix it later */
__asm__ volatile (
"cld\n\t"
"rep movsq\n"
: /* no output */
:"D" (dst), "S" (src), "c" (n >> 3) /* input: %rdi = d, %rsi = src, %rcx = n / 8 */
:"memory"
);
__asm__ volatile (
"rep movsb\n"
: /* no output */
:"c" (n & 7) /* %rcx = n % 8, use existing %rdi and %rsi */
:"memory", "%rdi", "%rsi"
);
return dst;
#if 0
qse_byte_t* d = dst;
__asm__ volatile (
"cld\n\t"
"rep movsq\n"
: "=D" (d), "=S" (src) /* output: d = %rdi, src = %rsi */
:"0" (d), "1" (src), "c" (n >> 3) /* input: %rdi = d, %rsi = src, %rcx = n / 8 */
:"memory"
);
__asm__ volatile (
"rep movsb"
: /* no output */
:"D" (d), "S" (src), "c" (n & 7) /* input: %rdi = d, %rsi = src, %rcx = n % 8 */
:"memory"
);
return dst;
#endif
#elif defined(__GNUC__) && (defined(__i386) || defined(i386))
/* i don't really care about alignments for x86 at this moment. fix it later */
2008-04-25 22:50:34 +00:00
__asm__ volatile (
"cld\n\t"
"rep\n\tmovsl\n"
: /* no output */
2014-10-20 12:38:56 +00:00
:"D" (dst), "S" (src), "c" (n >> 2) /* input: %edi = d, %esi = src, %ecx = n / 4 */
:"memory"
);
__asm__ volatile (
"rep\n\tmovsb\n"
: /* no output */
2014-10-20 12:38:56 +00:00
:"c" (n & 3) /* %rcx = n % 4, use existing %edi and %esi */
:"memory", "%edi", "%esi"
);
return dst;
#if 0
qse_byte_t* d = dst;
__asm__ volatile (
"cld\n\t"
"rep movsl\n"
:"=D" (d), "=S" (src) /* output: d = %edi, src = %esi */
:"0" (d), "1" (src), "c" (n >> 2) /* input: %edi = d, %esi = src, %ecx = n / 4 */
:"memory"
);
__asm__ volatile (
"rep movsb\n"
:
:"D" (d), "S" (src), "c" (n & 3) /* input: %edi = d, %esi = src, %ecx = n % 4 */
:"memory"
);
return dst;
#endif
#else
2008-12-21 21:35:07 +00:00
qse_byte_t* d;
qse_byte_t* s;
2007-12-25 21:25:45 +00:00
if (n < 8)
{
d = (qse_byte_t*)dst;
s = (qse_byte_t*)src;
switch (n)
{
case 7: *d++ = *s++;
case 6: *d++ = *s++;
case 5: *d++ = *s++;
case 4: *d++ = *s++;
case 3: *d++ = *s++;
case 2: *d++ = *s++;
case 1: *d++ = *s++;
}
return dst;
}
2008-12-21 21:35:07 +00:00
if (n >= QSE_SIZEOF(qse_size_t) && IS_BOTH_ALIGNED(dst,src))
2007-12-25 21:25:45 +00:00
{
2008-12-21 21:35:07 +00:00
qse_size_t* du = (qse_size_t*)dst;
qse_size_t* su = (qse_size_t*)src;
2008-04-25 07:34:17 +00:00
do
2007-12-25 21:25:45 +00:00
{
2008-04-25 06:19:44 +00:00
*du++ = *su++;
2008-12-21 21:35:07 +00:00
n -= QSE_SIZEOF(qse_size_t);
2007-12-25 21:25:45 +00:00
}
2008-12-21 21:35:07 +00:00
while (n >= QSE_SIZEOF(qse_size_t));
2007-12-25 21:25:45 +00:00
2008-12-21 21:35:07 +00:00
d = (qse_byte_t*)du;
s = (qse_byte_t*)su;
2007-12-25 21:25:45 +00:00
}
2008-04-25 06:19:44 +00:00
else
{
2008-12-21 21:35:07 +00:00
d = (qse_byte_t*)dst;
s = (qse_byte_t*)src;
}
2008-04-25 06:19:44 +00:00
while (n-- > 0) *d++ = *s++;
return dst;
2008-04-25 22:50:34 +00:00
2008-04-25 06:19:44 +00:00
#endif
}
2008-12-21 21:35:07 +00:00
void* qse_memmove (void* dst, const void* src, qse_size_t n)
2008-04-26 22:58:10 +00:00
{
2008-12-21 21:35:07 +00:00
const qse_byte_t* sre = (const qse_byte_t*)src + n;
2008-04-26 22:58:10 +00:00
if (dst <= src || dst >= (const void*)sre)
{
2008-12-21 21:35:07 +00:00
qse_byte_t* d = (qse_byte_t*)dst;
const qse_byte_t* s = (const qse_byte_t*)src;
2008-04-26 22:58:10 +00:00
while (n-- > 0) *d++ = *s++;
}
else
{
2008-12-21 21:35:07 +00:00
qse_byte_t* dse = (qse_byte_t*)dst + n;
2008-04-26 22:58:10 +00:00
while (n-- > 0) *--dse = *--sre;
}
return dst;
}
2008-12-21 21:35:07 +00:00
void* qse_memset (void* dst, int val, qse_size_t n)
{
2008-12-21 21:35:07 +00:00
#if defined(QSE_BUILD_FOR_SIZE)
2008-04-25 07:08:14 +00:00
2008-12-21 21:35:07 +00:00
qse_byte_t* d = (qse_byte_t*)dst;
while (n-- > 0) *d++ = (qse_byte_t)val;
2008-04-25 06:19:44 +00:00
return dst;
2008-04-25 07:08:14 +00:00
2008-04-25 06:19:44 +00:00
#elif defined(__SPU__)
2008-04-25 07:08:14 +00:00
2008-12-21 21:35:07 +00:00
qse_byte_t* d;
qse_size_t rem;
2008-04-25 07:08:14 +00:00
2008-04-25 08:25:47 +00:00
if (n <= 0) return dst;
2008-12-21 21:35:07 +00:00
d = (qse_byte_t*)dst;
2008-04-25 08:25:47 +00:00
2008-04-25 07:08:14 +00:00
/* spu SIMD instructions require 16-byte alignment */
2008-12-21 21:35:07 +00:00
rem = ((qse_size_t)dst) & (SPU_VUC_SIZE-1);
2008-04-25 07:08:14 +00:00
if (rem > 0)
{
2008-04-25 22:27:37 +00:00
/* handle leading unaligned part */
2008-12-21 21:35:07 +00:00
do { *d++ = (qse_byte_t)val; }
2008-04-25 22:50:34 +00:00
while (n-- > 0 && ++rem < SPU_VUC_SIZE);
2008-04-25 07:08:14 +00:00
}
/* do the vector copy */
2008-04-25 22:50:34 +00:00
if (n >= SPU_VUC_SIZE)
2008-04-25 07:08:14 +00:00
{
2008-04-25 07:40:41 +00:00
/* a vector of 16 unsigned char cells */
vector unsigned char v16;
/* a pointer to such a vector */
vector unsigned char* vd = (vector unsigned char*)d;
2008-04-25 07:08:14 +00:00
2008-04-25 07:40:41 +00:00
/* fills all 16 unsigned char cells with the same value
* no need to use shift and bitwise-or owing to splats */
2008-12-21 21:35:07 +00:00
v16 = spu_splats((qse_byte_t)val);
2008-04-25 07:40:41 +00:00
do
{
*vd++ = v16;
2008-04-25 22:50:34 +00:00
n -= SPU_VUC_SIZE;
2008-04-25 07:40:41 +00:00
}
2008-04-25 22:50:34 +00:00
while (n >= SPU_VUC_SIZE);
2008-04-25 07:40:41 +00:00
2008-12-21 21:35:07 +00:00
d = (qse_byte_t*)vd;
2008-04-25 06:19:44 +00:00
}
2008-04-25 22:27:37 +00:00
/* handle the trailing part */
2008-12-21 21:35:07 +00:00
while (n-- > 0) *d++ = (qse_byte_t)val;
2008-04-25 07:08:14 +00:00
return dst;
#elif defined(__GNUC__) && (defined(__x86_64) || defined(__amd64))
/* i don't really care about alignments for x86-64 at this moment. fix it later */
2008-04-25 07:08:14 +00:00
qse_byte_t* d = dst;
__asm__ volatile ("cld\n");
if (n >= 8)
{
qse_size_t qw = (qse_byte_t)val;
if (qw)
{
qw = (qw << 8) | (qse_byte_t)val;
qw = (qw << 8) | (qse_byte_t)val;
qw = (qw << 8) | (qse_byte_t)val;
qw = (qw << 8) | (qse_byte_t)val;
qw = (qw << 8) | (qse_byte_t)val;
qw = (qw << 8) | (qse_byte_t)val;
qw = (qw << 8) | (qse_byte_t)val;
}
__asm__ volatile (
"rep stosq\n"
:"=D" (d) /* output: d = %rdi */
:"0" (d), "a" (qw), "c" (n >> 3) /* input: %rdi = d, %rax = qw, %rcx = n / 8 */
:"memory"
);
}
__asm__ volatile (
"rep stosb\n"
: /* no output */
:"D" (d), "a" (val), "c" (n & 7) /* input: %rdi = d, %rax = src, %rcx = n % 8 */
:"memory"
);
return dst;
#elif defined(__GNUC__) && (defined(__i386) || defined(i386))
/* i don't really care about alignments for x86 at this moment. fix it later */
qse_byte_t* d = dst;
__asm__ volatile ("cld\n");
if (n >= 4)
{
qse_size_t dw = (qse_byte_t)val;
if (dw)
{
dw = (dw << 8) | (qse_byte_t)val;
dw = (dw << 8) | (qse_byte_t)val;
dw = (dw << 8) | (qse_byte_t)val;
}
__asm__ volatile (
"rep\n\tstosl\n"
:"=D" (d) /* output: d = %edi */
:"0" (d), "a" (dw), "c" (n >> 2) /* input: %edi = d, %eax = dw, %ecx = n / 4 */
:"memory"
);
}
__asm__ volatile (
"rep\n\tstosb\n"
: /* no output */
:"D" (d), "a" (val), "c" (n & 3) /* input: %edi = d, %eax = src, %ecx = n % 4 */
:"memory"
);
return dst;
#else
2008-12-21 21:35:07 +00:00
qse_byte_t* d;
qse_size_t rem;
2008-04-25 08:25:47 +00:00
if (n <= 0) return dst;
2008-12-21 21:35:07 +00:00
d = (qse_byte_t*)dst;
2008-04-25 08:25:47 +00:00
2008-04-25 07:21:17 +00:00
rem = IS_UNALIGNED(dst);
if (rem > 0)
{
2008-12-21 21:35:07 +00:00
do { *d++ = (qse_byte_t)val; }
while (n-- > 0 && ++rem < QSE_SIZEOF(qse_size_t));
2008-04-25 07:21:17 +00:00
}
2008-12-21 21:35:07 +00:00
if (n >= QSE_SIZEOF(qse_size_t))
2008-04-25 07:21:17 +00:00
{
2008-12-21 21:35:07 +00:00
qse_size_t* u = (qse_size_t*)d;
qse_size_t uv = 0;
2008-04-25 06:19:44 +00:00
int i;
if (val != 0)
{
2008-12-21 21:35:07 +00:00
for (i = 0; i < QSE_SIZEOF(qse_size_t); i++)
uv = (uv << 8) | (qse_byte_t)val;
2008-04-25 06:19:44 +00:00
}
2008-12-21 21:35:07 +00:00
QSE_ASSERT (IS_ALIGNED(u));
2008-04-25 07:34:17 +00:00
do
2008-04-25 06:19:44 +00:00
{
*u++ = uv;
2008-12-21 21:35:07 +00:00
n -= QSE_SIZEOF(qse_size_t);
2008-04-25 06:19:44 +00:00
}
2008-12-21 21:35:07 +00:00
while (n >= QSE_SIZEOF(qse_size_t));
2008-04-25 06:19:44 +00:00
2008-12-21 21:35:07 +00:00
d = (qse_byte_t*)u;
}
2008-12-21 21:35:07 +00:00
while (n-- > 0) *d++ = (qse_byte_t)val;
return dst;
2008-04-25 07:08:14 +00:00
2008-04-25 06:19:44 +00:00
#endif
}
2008-12-21 21:35:07 +00:00
int qse_memcmp (const void* s1, const void* s2, qse_size_t n)
{
2008-12-21 21:35:07 +00:00
#if defined(QSE_BUILD_FOR_SIZE)
2007-12-29 06:39:01 +00:00
2008-12-21 21:35:07 +00:00
const qse_byte_t* b1 = (const qse_byte_t*)s1;
const qse_byte_t* b2 = (const qse_byte_t*)s2;
2008-04-25 06:19:44 +00:00
while (n-- > 0)
{
if (*b1 != *b2) return *b1 - *b2;
b1++; b2++;
}
return 0;
2008-04-26 01:01:05 +00:00
#elif defined(__SPU__)
2008-12-21 21:35:07 +00:00
const qse_byte_t* b1;
const qse_byte_t* b2;
2008-04-26 01:01:05 +00:00
if (n >= SPU_VUC_SIZE &&
2008-12-21 21:35:07 +00:00
(((qse_size_t)s1) & (SPU_VUC_SIZE-1)) == 0 &&
(((qse_size_t)s2) & (SPU_VUC_SIZE-1)) == 0)
2008-04-26 01:01:05 +00:00
{
2008-04-26 02:57:55 +00:00
vector unsigned char* v1 = (vector unsigned char*)s1;
vector unsigned char* v2 = (vector unsigned char*)s2;
vector unsigned int tmp;
2008-04-26 01:01:05 +00:00
do
{
2008-04-26 02:57:55 +00:00
unsigned int cnt;
unsigned int pat;
2008-04-26 03:18:57 +00:00
/* compare 16 chars at one time */
2008-04-26 02:57:55 +00:00
tmp = spu_gather(spu_cmpeq(*v1,*v2));
2008-04-26 03:18:57 +00:00
/* extract the bit pattern */
pat = spu_extract(tmp, 0);
/* invert the bit patterns */
2008-04-26 02:57:55 +00:00
pat = 0xFFFF & ~pat;
2008-04-26 03:18:57 +00:00
/* put it back to the vector */
2008-04-26 02:57:55 +00:00
tmp = spu_insert (pat, tmp, 0);
2008-04-26 03:18:57 +00:00
/* count the leading zeros */
2008-04-26 02:57:55 +00:00
cnt = spu_extract(spu_cntlz(tmp),0);
2008-04-26 03:18:57 +00:00
/* 32 leading zeros mean that
* all characters are the same */
2008-04-26 02:57:55 +00:00
if (cnt != 32)
2008-04-26 02:06:58 +00:00
{
2008-04-26 03:18:57 +00:00
/* otherwise, calculate the
* unmatching pointer address */
2008-12-21 21:35:07 +00:00
b1 = (const qse_byte_t*)v1 + (cnt - 16);
b2 = (const qse_byte_t*)v2 + (cnt - 16);
2008-04-26 02:06:58 +00:00
break;
}
2008-04-26 01:01:05 +00:00
v1++; v2++;
n -= SPU_VUC_SIZE;
2008-04-26 02:06:58 +00:00
if (n < SPU_VUC_SIZE)
{
2008-12-21 21:35:07 +00:00
b1 = (const qse_byte_t*)v1;
b2 = (const qse_byte_t*)v2;
2008-04-26 02:06:58 +00:00
break;
}
2008-04-26 01:01:05 +00:00
}
2008-04-26 02:06:58 +00:00
while (1);
2008-04-26 01:01:05 +00:00
}
else
{
2008-12-21 21:35:07 +00:00
b1 = (const qse_byte_t*)s1;
b2 = (const qse_byte_t*)s2;
2008-04-26 01:01:05 +00:00
}
while (n-- > 0)
{
if (*b1 != *b2) return *b1 - *b2;
b1++; b2++;
}
return 0;
2008-04-25 06:19:44 +00:00
#else
2008-12-21 21:35:07 +00:00
const qse_byte_t* b1;
const qse_byte_t* b2;
2008-04-25 06:19:44 +00:00
2008-12-21 21:35:07 +00:00
if (n >= QSE_SIZEOF(qse_size_t) && IS_BOTH_ALIGNED(s1,s2))
2008-04-25 06:19:44 +00:00
{
2008-12-21 21:35:07 +00:00
const qse_size_t* u1 = (const qse_size_t*)s1;
const qse_size_t* u2 = (const qse_size_t*)s2;
2008-04-25 06:19:44 +00:00
2008-04-25 07:34:17 +00:00
do
2008-04-25 06:19:44 +00:00
{
2008-04-25 06:33:26 +00:00
if (*u1 != *u2) break;
2008-04-25 06:19:44 +00:00
u1++; u2++;
2008-12-21 21:35:07 +00:00
n -= QSE_SIZEOF(qse_size_t);
2008-04-25 06:19:44 +00:00
}
2008-12-21 21:35:07 +00:00
while (n >= QSE_SIZEOF(qse_size_t));
2008-04-25 06:33:26 +00:00
2008-12-21 21:35:07 +00:00
b1 = (const qse_byte_t*)u1;
b2 = (const qse_byte_t*)u2;
2008-04-25 06:19:44 +00:00
}
else
{
2008-12-21 21:35:07 +00:00
b1 = (const qse_byte_t*)s1;
b2 = (const qse_byte_t*)s2;
2008-04-25 06:19:44 +00:00
}
2007-12-29 06:39:01 +00:00
2008-04-25 06:19:44 +00:00
while (n-- > 0)
2007-12-29 06:39:01 +00:00
{
2008-04-25 06:19:44 +00:00
if (*b1 != *b2) return *b1 - *b2;
b1++; b2++;
2007-12-29 06:39:01 +00:00
}
return 0;
2008-04-25 06:19:44 +00:00
#endif
}
2008-04-26 22:58:10 +00:00
void* qse_membyte (const void* s, int val, qse_size_t n)
2008-04-26 22:58:10 +00:00
{
2008-12-21 21:35:07 +00:00
const qse_byte_t* x = (const qse_byte_t*)s;
2008-04-26 22:58:10 +00:00
while (n-- > 0)
{
2008-12-21 21:35:07 +00:00
if (*x == (qse_byte_t)val) return (void*)x;
2008-04-26 22:58:10 +00:00
x++;
}
2008-12-21 21:35:07 +00:00
return QSE_NULL;
2008-04-26 22:58:10 +00:00
}
void* qse_memrbyte (const void* s, int val, qse_size_t n)
2008-04-26 22:58:10 +00:00
{
2008-12-21 21:35:07 +00:00
const qse_byte_t* x = (qse_byte_t*)s + n - 1;
2008-04-26 22:58:10 +00:00
while (n-- > 0)
{
2008-12-21 21:35:07 +00:00
if (*x == (qse_byte_t)val) return (void*)x;
2008-04-26 22:58:10 +00:00
x--;
}
2008-12-21 21:35:07 +00:00
return QSE_NULL;
2008-04-26 22:58:10 +00:00
}
2008-12-21 21:35:07 +00:00
void* qse_memmem (const void* hs, qse_size_t hl, const void* nd, qse_size_t nl)
2008-04-26 22:58:10 +00:00
{
if (nl <= hl)
{
2008-12-21 21:35:07 +00:00
qse_size_t i;
const qse_byte_t* h = (const qse_byte_t*)hs;
2008-04-26 22:58:10 +00:00
for (i = hl - nl + 1; i > 0; i--)
{
2008-12-21 21:35:07 +00:00
if (qse_memcmp(h, nd, nl) == 0) return (void*)h;
2008-04-26 22:58:10 +00:00
h++;
}
}
2008-12-21 21:35:07 +00:00
return QSE_NULL;
2008-04-26 22:58:10 +00:00
}
2008-12-21 21:35:07 +00:00
void* qse_memrmem (const void* hs, qse_size_t hl, const void* nd, qse_size_t nl)
2008-04-26 22:58:10 +00:00
{
if (nl <= hl)
{
2008-12-21 21:35:07 +00:00
qse_size_t i;
const qse_byte_t* h;
2008-04-26 22:58:10 +00:00
/* things are slightly more complacated
* when searching backward */
if (nl == 0)
{
/* when the needle is empty, it returns
* the pointer to the last byte of the haystack.
2008-12-21 21:35:07 +00:00
* this is because qse_memmem returns the pointer
2008-04-26 22:58:10 +00:00
* to the first byte of the haystack when the
* needle is empty. but I'm not so sure if this
* is really desirable behavior */
2008-12-21 21:35:07 +00:00
h = (const qse_byte_t*)hs + hl - 1;
2008-04-26 22:58:10 +00:00
return (void*)h;
}
2008-12-21 21:35:07 +00:00
h = (const qse_byte_t*)hs + hl - nl;
2008-04-26 22:58:10 +00:00
for (i = hl - nl + 1; i > 0; i--)
{
2008-12-21 21:35:07 +00:00
if (qse_memcmp(h, nd, nl) == 0) return (void*)h;
2008-04-26 22:58:10 +00:00
h--;
}
}
2008-12-21 21:35:07 +00:00
return QSE_NULL;
2008-04-26 22:58:10 +00:00
}
static void* mmgr_alloc (qse_mmgr_t* mmgr, qse_size_t n)
{
#if defined(_WIN32)
HANDLE heap;
heap = GetProcessHeap ();
2014-11-18 16:10:12 +00:00
if (!heap) return QSE_NULL;
return HeapAlloc (heap, 0, n);
#else
/* TODO: need to rewrite this for __OS2__ using DosAllocMem()? */
return malloc (n);
#endif
}
static void* mmgr_realloc (qse_mmgr_t* mmgr, void* ptr, qse_size_t n)
{
#if defined(_WIN32)
HANDLE heap;
heap = GetProcessHeap ();
2014-11-18 16:10:12 +00:00
if (!heap) return QSE_NULL;
return ptr? HeapReAlloc (heap, 0, ptr, n):
HeapAlloc (heap, 0, n);
#else
2014-11-18 16:10:12 +00:00
/* realloc() on some old systems doesn't work like
* modern realloc() when ptr is NULL. let's divert
* it to malloc() explicitly in such a case */
/*return realloc (ptr, n);*/
return ptr? realloc (ptr, n): malloc (n);
#endif
}
static void mmgr_free (qse_mmgr_t* mmgr, void* ptr)
{
#if defined(_WIN32)
HANDLE heap;
heap = GetProcessHeap ();
if (heap) HeapFree (heap, 0, ptr);
#else
free (ptr);
#endif
}
2011-03-23 20:21:14 +00:00
static qse_mmgr_t builtin_mmgr =
{
2008-08-21 02:22:07 +00:00
mmgr_alloc,
mmgr_realloc,
mmgr_free,
2008-12-21 21:35:07 +00:00
QSE_NULL
};
2011-03-23 20:21:14 +00:00
static qse_mmgr_t* dfl_mmgr = &builtin_mmgr;
2011-12-31 15:24:48 +00:00
qse_mmgr_t* qse_getdflmmgr (void)
2011-03-23 20:21:14 +00:00
{
return dfl_mmgr;
}
void qse_setdflmmgr (qse_mmgr_t* mmgr)
{
dfl_mmgr = (mmgr? mmgr: &builtin_mmgr);
}