added hcl_abort()

taken out vm_startup and vm_cleanup from hcl_vmprim_t and moved them to hcl_cb_t
This commit is contained in:
hyung-hwan 2018-03-10 17:53:44 +00:00
parent 986e02ae69
commit 536f7fd9f2
4 changed files with 279 additions and 164 deletions

View File

@ -117,9 +117,21 @@ static HCL_INLINE const char* proc_state_to_string (int state)
static int vm_startup (hcl_t* hcl)
{
hcl_cb_t* cb;
HCL_DEBUG1 (hcl, "VM started up at IP %zd\n", hcl->ip);
if (hcl->vmprim.vm_startup(hcl) <= -1) return -1;
for (cb = hcl->cblist; cb; cb = cb->next)
{
if (cb->vm_startup && cb->vm_startup(hcl) <= -1)
{
for (cb = cb->prev; cb; cb = cb->prev)
{
if (cb->vm_cleanup) cb->vm_cleanup (hcl);
}
return -1;
}
}
hcl->vmprim.vm_gettime (hcl, &hcl->exec_start_time); /* raw time. no adjustment */
return 0;
@ -127,11 +139,23 @@ static int vm_startup (hcl_t* hcl)
static void vm_cleanup (hcl_t* hcl)
{
hcl_cb_t* cb;
hcl->vmprim.vm_gettime (hcl, &hcl->exec_end_time); /* raw time. no adjustment */
hcl->vmprim.vm_cleanup (hcl);
for (cb = hcl->cblist; cb; cb = cb->next)
{
if (cb->vm_cleanup) cb->vm_cleanup(hcl);
}
HCL_DEBUG1 (hcl, "VM cleaned up at IP %zd\n", hcl->ip);
}
static void vm_checkpoint (hcl_t* hcl)
{
hcl_cb_t* cb;
for (cb = hcl->cblist; cb; cb = cb->next)
{
if (cb->vm_checkpoint) cb->vm_checkpoint(hcl);
}
}
/* ------------------------------------------------------------------------- */
static HCL_INLINE hcl_oop_t make_context (hcl_t* hcl, hcl_ooi_t ntmprs)
@ -1073,11 +1097,15 @@ static int execute (hcl_t* hcl)
HCL_ASSERT (hcl, hcl->active_context != HCL_NULL);
if (vm_startup (hcl) <= -1) return -1;
hcl->abort_req = 0;
if (vm_startup(hcl) <= -1) return -1;
hcl->proc_switched = 0;
while (1)
{
#if defined(ENABLE_MULTI_PROCS)
/* i don't think i will ever implement this in HCL.
* but let's keep the code here for some while */
if (hcl->sem_heap_count > 0)
{
hcl_ntime_t ft, now;
@ -1135,6 +1163,7 @@ static int execute (hcl_t* hcl)
}
while (hcl->sem_heap_count > 0);
}
#endif
if (hcl->processor->active == hcl->nil_process)
{
@ -1152,44 +1181,46 @@ static int execute (hcl_t* hcl)
break;
}
#if defined(ENABLE_MULTI_PROCS)
while (hcl->sem_list_count > 0)
{
/* handle async signals */
--hcl->sem_list_count;
signal_semaphore (hcl, hcl->sem_list[hcl->sem_list_count]);
}
/*
if (semaphore heap has pending request)
{
signal them...
}*/
/* TODO: implement different process switching scheme - time-slice or clock based??? */
#if defined(HCL_EXTERNAL_PROCESS_SWITCH)
#if defined(HCL_EXTERNAL_PROCESS_SWITCH)
if (!hcl->proc_switched && hcl->switch_proc) { switch_to_next_runnable_process (hcl); }
hcl->switch_proc = 0;
#else
#else
if (!hcl->proc_switched) { switch_to_next_runnable_process (hcl); }
#endif
#endif
hcl->proc_switched = 0;
#endif
if (hcl->ip >= hcl->code.bc.len)
if (HCL_UNLIKELY(hcl->ip >= hcl->code.bc.len) || HCL_UNLIKELY(hcl->abort_req))
{
HCL_DEBUG1 (hcl, "IP reached the end of bytecode(%zu). Stopping execution\n", hcl->code.bc.len);
if (hcl->abort_req)
HCL_DEBUG0 (hcl, "Stopping execution for abortion request\n");
else
HCL_DEBUG1 (hcl, "Stopping executeion as IP reached the end of bytecode(%zu)\n", hcl->code.bc.len);
return_value = hcl->_nil;
goto handle_return;
}
#if defined(HCL_DEBUG_VM_EXEC)
#if defined(HCL_DEBUG_VM_EXEC)
fetched_instruction_pointer = hcl->ip;
#endif
#endif
FETCH_BYTE_CODE_TO (hcl, bcode);
/*while (bcode == HCL_CODE_NOOP) FETCH_BYTE_CODE_TO (hcl, bcode);*/
#if defined(HCL_PROFILE_VM)
if (hcl->vm_checkpoint_cb_count) vm_checkpoint (hcl);
#if defined(HCL_PROFILE_VM)
inst_counter++;
#endif
#endif
switch (bcode)
{
@ -2235,3 +2266,8 @@ hcl_oop_t hcl_execute (hcl_t* hcl)
{
return hcl_executefromip (hcl, 0);
}
void hcl_abort (hcl_t* hcl)
{
hcl->abort_req = 1;
}

View File

@ -416,6 +416,8 @@ hcl_cb_t* hcl_regcb (hcl_t* hcl, hcl_cb_t* tmpl)
actual->prev = HCL_NULL;
hcl->cblist = actual;
if (actual->vm_checkpoint) hcl->vm_checkpoint_cb_count++;
return actual;
}
@ -432,6 +434,11 @@ void hcl_deregcb (hcl_t* hcl, hcl_cb_t* cb)
if (cb->prev) cb->prev->next = cb->next;
}
if (cb->vm_checkpoint)
{
HCL_ASSERT (hcl, hcl->vm_checkpoint_cb_count > 0);
hcl->vm_checkpoint_cb_count--;
}
hcl_freemem (hcl, cb);
}

View File

@ -706,10 +706,7 @@ typedef void* (*hcl_vmprim_dlopen_t) (hcl_t* hcl, const hcl_ooch_t* name, int fl
typedef void (*hcl_vmprim_dlclose_t) (hcl_t* hcl, void* handle);
typedef void* (*hcl_vmprim_dlgetsym_t) (hcl_t* hcl, void* handle, const hcl_ooch_t* name);
typedef int (*hcl_vmprim_startup_t) (hcl_t* hcl);
typedef void (*hcl_vmprim_cleanup_t) (hcl_t* hcl);
typedef void (*hcl_vmprim_gettime_t) (hcl_t* hcl, hcl_ntime_t* now);
typedef void (*hcl_vmprim_sleep_t) (hcl_t* hcl, const hcl_ntime_t* duration);
struct hcl_vmprim_t
@ -718,27 +715,24 @@ struct hcl_vmprim_t
* before hcl is fully initialized. so few features are availble
* in this callback function. If it's not provided, the default
* implementation is used. */
hcl_alloc_heap_t alloc_heap;
hcl_alloc_heap_t alloc_heap; /* optional */
/* If you customize the heap allocator by providing the alloc_heap
* callback, you should implement the heap freer. otherwise the default
* implementation doesn't know how to free the heap allocated by
* the allocator callback. */
hcl_free_heap_t free_heap;
hcl_free_heap_t free_heap; /* optional */
hcl_log_write_t log_write;
hcl_syserrstrb_t syserrstrb;
hcl_log_write_t log_write; /* required */
hcl_syserrstrb_t syserrstrb; /* one of syserrstrb or syserrstru required */
hcl_syserrstru_t syserrstru;
hcl_vmprim_dlopen_t dl_open;
hcl_vmprim_dlclose_t dl_close;
hcl_vmprim_dlgetsym_t dl_getsym;
hcl_vmprim_dlopen_t dl_open; /* required */
hcl_vmprim_dlclose_t dl_close; /* required */
hcl_vmprim_dlgetsym_t dl_getsym; /* requried */
hcl_vmprim_startup_t vm_startup;
hcl_vmprim_cleanup_t vm_cleanup;
hcl_vmprim_gettime_t vm_gettime;
hcl_vmprim_sleep_t vm_sleep;
hcl_vmprim_gettime_t vm_gettime; /* required */
hcl_vmprim_sleep_t vm_sleep; /* required */
};
typedef struct hcl_vmprim_t hcl_vmprim_t;
@ -863,13 +857,23 @@ typedef int (*hcl_ioimpl_t) (
/* =========================================================================
* CALLBACK MANIPULATION
* ========================================================================= */
typedef void (*hcl_cbimpl_t) (hcl_t* hcl);
typedef void (*hcl_cb_fini_t) (hcl_t* hcl);
typedef void (*hcl_cb_gc_t) (hcl_t* hcl);
typedef int (*hcl_cb_vm_startup_t) (hcl_t* hcl);
typedef void (*hcl_cb_vm_cleanup_t) (hcl_t* hcl);
typedef void (*hcl_cb_vm_checkpoint_t) (hcl_t* hcl);
typedef struct hcl_cb_t hcl_cb_t;
struct hcl_cb_t
{
hcl_cbimpl_t gc;
hcl_cbimpl_t fini;
hcl_cb_gc_t gc;
hcl_cb_fini_t fini;
hcl_cb_vm_startup_t vm_startup;
hcl_cb_vm_cleanup_t vm_cleanup;
hcl_cb_vm_checkpoint_t vm_checkpoint;
/* private below */
hcl_cb_t* prev;
@ -1023,6 +1027,7 @@ struct hcl_t
hcl_vmprim_t vmprim;
hcl_oow_t vm_checkpoint_cb_count;
hcl_cb_t* cblist;
hcl_rbt_t modtab; /* primitive module table */
@ -1094,6 +1099,7 @@ struct hcl_t
hcl_ooi_t ip;
int proc_switched; /* TODO: this is temporary. implement something else to skip immediate context switching */
int switch_proc;
int abort_req;
hcl_oop_t last_retv;
hcl_ntime_t exec_start_time;
@ -1557,6 +1563,10 @@ HCL_EXPORT hcl_oop_t hcl_executefromip (
hcl_ooi_t initial_ip
);
HCL_EXPORT void hcl_abort (
hcl_t* hcl
);
HCL_EXPORT int hcl_attachio (
hcl_t* hcl,
hcl_ioimpl_t reader,

View File

@ -912,6 +912,130 @@ static void* dl_getsym (hcl_t* hcl, void* handle, const hcl_ooch_t* name)
/* ========================================================================= */
static void vm_gettime (hcl_t* hcl, hcl_ntime_t* now)
{
#if defined(_WIN32)
/* TODO: */
#elif defined(__OS2__)
ULONG out;
/* TODO: handle overflow?? */
/* TODO: use DosTmrQueryTime() and DosTmrQueryFreq()? */
DosQuerySysInfo (QSV_MS_COUNT, QSV_MS_COUNT, &out, HCL_SIZEOF(out)); /* milliseconds */
/* it must return NO_ERROR */
HCL_INITNTIME (now, HCL_MSEC_TO_SEC(out), HCL_MSEC_TO_NSEC(out));
#elif defined(__DOS__) && (defined(_INTELC32_) || defined(__WATCOMC__))
clock_t c;
/* TODO: handle overflow?? */
c = clock ();
now->sec = c / CLOCKS_PER_SEC;
#if (CLOCKS_PER_SEC == 100)
now->nsec = HCL_MSEC_TO_NSEC((c % CLOCKS_PER_SEC) * 10);
#elif (CLOCKS_PER_SEC == 1000)
now->nsec = HCL_MSEC_TO_NSEC(c % CLOCKS_PER_SEC);
#elif (CLOCKS_PER_SEC == 1000000L)
now->nsec = HCL_USEC_TO_NSEC(c % CLOCKS_PER_SEC);
#elif (CLOCKS_PER_SEC == 1000000000L)
now->nsec = (c % CLOCKS_PER_SEC);
#else
# error UNSUPPORTED CLOCKS_PER_SEC
#endif
#elif defined(macintosh)
UnsignedWide tick;
hcl_uint64_t tick64;
Microseconds (&tick);
tick64 = *(hcl_uint64_t*)&tick;
HCL_INITNTIME (now, HCL_USEC_TO_SEC(tick64), HCL_USEC_TO_NSEC(tick64));
#elif defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
struct timespec ts;
clock_gettime (CLOCK_MONOTONIC, &ts);
HCL_INITNTIME(now, ts.tv_sec, ts.tv_nsec);
#elif defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_REALTIME)
struct timespec ts;
clock_gettime (CLOCK_REALTIME, &ts);
HCL_INITNTIME(now, ts.tv_sec, ts.tv_nsec);
#else
struct timeval tv;
gettimeofday (&tv, HCL_NULL);
HCL_INITNTIME(now, tv.tv_sec, HCL_USEC_TO_NSEC(tv.tv_usec));
#endif
}
static void vm_sleep (hcl_t* hcl, const hcl_ntime_t* dur)
{
#if defined(_WIN32)
xtn_t* xtn = (xtn_t*)hcl_getxtn(hcl);
if (xtn->waitable_timer)
{
LARGE_INTEGER li;
li.QuadPart = -HCL_SECNSEC_TO_NSEC(dur->sec, dur->nsec);
if(SetWaitableTimer(timer, &li, 0, HCL_NULL, HCL_NULL, FALSE) == FALSE) goto normal_sleep;
WaitForSingleObject(timer, INFINITE);
}
else
{
normal_sleep:
/* fallback to normal Sleep() */
Sleep (HCL_SECNSEC_TO_MSEC(dur->sec,dur->nsec));
}
#elif defined(__OS2__)
/* TODO: in gui mode, this is not a desirable method???
* this must be made event-driven coupled with the main event loop */
DosSleep (HCL_SECNSEC_TO_MSEC(dur->sec,dur->nsec));
#elif defined(macintosh)
/* TODO: ... */
#elif defined(__DOS__) && (defined(_INTELC32_) || defined(__WATCOMC__))
clock_t c;
c = clock ();
c += dur->sec * CLOCKS_PER_SEC;
#if (CLOCKS_PER_SEC == 100)
c += HCL_NSEC_TO_MSEC(dur->nsec) / 10;
#elif (CLOCKS_PER_SEC == 1000)
c += HCL_NSEC_TO_MSEC(dur->nsec);
#elif (CLOCKS_PER_SEC == 1000000L)
c += HCL_NSEC_TO_USEC(dur->nsec);
#elif (CLOCKS_PER_SEC == 1000000000L)
c += dur->nsec;
#else
# error UNSUPPORTED CLOCKS_PER_SEC
#endif
/* TODO: handle clock overvlow */
/* TODO: check if there is abortion request or interrupt */
while (c > clock())
{
_halt_cpu();
}
#else
#if defined(USE_THREAD)
/* the sleep callback is called only if there is no IO semaphore
* waiting. so i can safely call vm_muxwait() without a muxwait callback
* when USE_THREAD is true */
vm_muxwait (hcl, dur, HCL_NULL);
#elif defined(HAVE_NANOSLEEP)
struct timespec ts;
ts.tv_sec = dur->sec;
ts.tv_nsec = dur->nsec;
nanosleep (&ts, HCL_NULL);
#elif defined(HAVE_USLEEP)
usleep (HCL_SECNSEC_TO_USEC(dur->sec, dur->nsec));
#else
# error UNSUPPORT SLEEP
#endif
#endif
}
/* ========================================================================= */
static int vm_startup (hcl_t* hcl)
{
#if defined(_WIN32)
@ -1096,131 +1220,6 @@ static void vm_cleanup (hcl_t* hcl)
#endif
}
static void vm_gettime (hcl_t* hcl, hcl_ntime_t* now)
{
#if defined(_WIN32)
/* TODO: */
#elif defined(__OS2__)
ULONG out;
/* TODO: handle overflow?? */
/* TODO: use DosTmrQueryTime() and DosTmrQueryFreq()? */
DosQuerySysInfo (QSV_MS_COUNT, QSV_MS_COUNT, &out, HCL_SIZEOF(out)); /* milliseconds */
/* it must return NO_ERROR */
HCL_INITNTIME (now, HCL_MSEC_TO_SEC(out), HCL_MSEC_TO_NSEC(out));
#elif defined(__DOS__) && (defined(_INTELC32_) || defined(__WATCOMC__))
clock_t c;
/* TODO: handle overflow?? */
c = clock ();
now->sec = c / CLOCKS_PER_SEC;
#if (CLOCKS_PER_SEC == 100)
now->nsec = HCL_MSEC_TO_NSEC((c % CLOCKS_PER_SEC) * 10);
#elif (CLOCKS_PER_SEC == 1000)
now->nsec = HCL_MSEC_TO_NSEC(c % CLOCKS_PER_SEC);
#elif (CLOCKS_PER_SEC == 1000000L)
now->nsec = HCL_USEC_TO_NSEC(c % CLOCKS_PER_SEC);
#elif (CLOCKS_PER_SEC == 1000000000L)
now->nsec = (c % CLOCKS_PER_SEC);
#else
# error UNSUPPORTED CLOCKS_PER_SEC
#endif
#elif defined(macintosh)
UnsignedWide tick;
hcl_uint64_t tick64;
Microseconds (&tick);
tick64 = *(hcl_uint64_t*)&tick;
HCL_INITNTIME (now, HCL_USEC_TO_SEC(tick64), HCL_USEC_TO_NSEC(tick64));
#elif defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
struct timespec ts;
clock_gettime (CLOCK_MONOTONIC, &ts);
HCL_INITNTIME(now, ts.tv_sec, ts.tv_nsec);
#elif defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_REALTIME)
struct timespec ts;
clock_gettime (CLOCK_REALTIME, &ts);
HCL_INITNTIME(now, ts.tv_sec, ts.tv_nsec);
#else
struct timeval tv;
gettimeofday (&tv, HCL_NULL);
HCL_INITNTIME(now, tv.tv_sec, HCL_USEC_TO_NSEC(tv.tv_usec));
#endif
}
static void vm_sleep (hcl_t* hcl, const hcl_ntime_t* dur)
{
#if defined(_WIN32)
xtn_t* xtn = (xtn_t*)hcl_getxtn(hcl);
if (xtn->waitable_timer)
{
LARGE_INTEGER li;
li.QuadPart = -HCL_SECNSEC_TO_NSEC(dur->sec, dur->nsec);
if(SetWaitableTimer(timer, &li, 0, HCL_NULL, HCL_NULL, FALSE) == FALSE) goto normal_sleep;
WaitForSingleObject(timer, INFINITE);
}
else
{
normal_sleep:
/* fallback to normal Sleep() */
Sleep (HCL_SECNSEC_TO_MSEC(dur->sec,dur->nsec));
}
#elif defined(__OS2__)
/* TODO: in gui mode, this is not a desirable method???
* this must be made event-driven coupled with the main event loop */
DosSleep (HCL_SECNSEC_TO_MSEC(dur->sec,dur->nsec));
#elif defined(macintosh)
/* TODO: ... */
#elif defined(__DOS__) && (defined(_INTELC32_) || defined(__WATCOMC__))
clock_t c;
c = clock ();
c += dur->sec * CLOCKS_PER_SEC;
#if (CLOCKS_PER_SEC == 100)
c += HCL_NSEC_TO_MSEC(dur->nsec) / 10;
#elif (CLOCKS_PER_SEC == 1000)
c += HCL_NSEC_TO_MSEC(dur->nsec);
#elif (CLOCKS_PER_SEC == 1000000L)
c += HCL_NSEC_TO_USEC(dur->nsec);
#elif (CLOCKS_PER_SEC == 1000000000L)
c += dur->nsec;
#else
# error UNSUPPORTED CLOCKS_PER_SEC
#endif
/* TODO: handle clock overvlow */
/* TODO: check if there is abortion request or interrupt */
while (c > clock())
{
_halt_cpu();
}
#else
#if defined(USE_THREAD)
/* the sleep callback is called only if there is no IO semaphore
* waiting. so i can safely call vm_muxwait() without a muxwait callback
* when USE_THREAD is true */
vm_muxwait (hcl, dur, HCL_NULL);
#elif defined(HAVE_NANOSLEEP)
struct timespec ts;
ts.tv_sec = dur->sec;
ts.tv_nsec = dur->nsec;
nanosleep (&ts, HCL_NULL);
#elif defined(HAVE_USLEEP)
usleep (HCL_SECNSEC_TO_USEC(dur->sec, dur->nsec));
#else
# error UNSUPPORT SLEEP
#endif
#endif
}
/* ========================================================================= */
static void gc_hcl (hcl_t* hcl)
{
xtn_t* xtn = (xtn_t*)hcl_getxtn(hcl);
@ -1238,6 +1237,7 @@ static void fini_hcl (hcl_t* hcl)
}
}
/* ========================================================================= */
static int handle_logopt (hcl_t* hcl, const hcl_bch_t* str)
{
@ -1466,6 +1466,63 @@ static void cancel_tick (void)
/* ========================================================================= */
#if defined(__MSDOS__) && defined(_INTELC32_)
typedef void(*signal_handler_t)(int);
#elif defined(macintosh)
typedef void(*signal_handler_t)(int);
#else
typedef void(*signal_handler_t)(int, siginfo_t*, void*);
#endif
#if defined(__MSDOS__) && defined(_INTELC32_)
/* TODO: implement this */
#elif defined(macintosh)
/* TODO: implement this */
#else
static void handle_sigint (int sig, siginfo_t* siginfo, void* ctx)
{
if (g_hcl) hcl_abort (g_hcl);
}
#endif
static void set_signal (int sig, signal_handler_t handler)
{
#if defined(__MSDOS__) && defined(_INTELC32_)
/* TODO: implement this */
#elif defined(macintosh)
/* TODO: implement this */
#else
struct sigaction sa;
memset (&sa, 0, sizeof(sa));
/*sa.sa_handler = handler;*/
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = handler;
sigemptyset (&sa.sa_mask);
sigaction (sig, &sa, NULL);
#endif
}
static void set_signal_to_default (int sig)
{
#if defined(__MSDOS__) && defined(_INTELC32_)
/* TODO: implement this */
#elif defined(macintosh)
/* TODO: implement this */
#else
struct sigaction sa;
memset (&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
sa.sa_flags = 0;
sigemptyset (&sa.sa_mask);
sigaction (sig, &sa, NULL);
#endif
}
/* ========================================================================= */
@ -1600,8 +1657,6 @@ int main (int argc, char* argv[])
vmprim.dl_open = dl_open;
vmprim.dl_close = dl_close;
vmprim.dl_getsym = dl_getsym;
vmprim.vm_startup = vm_startup;
vmprim.vm_cleanup = vm_cleanup;
vmprim.vm_gettime = vm_gettime;
vmprim.vm_sleep = vm_sleep;
@ -1641,6 +1696,8 @@ int main (int argc, char* argv[])
memset (&hclcb, 0, HCL_SIZEOF(hclcb));
hclcb.fini = fini_hcl;
hclcb.gc = gc_hcl;
hclcb.vm_startup = vm_startup;
hclcb.vm_cleanup = vm_cleanup;
hcl_regcb (hcl, &hclcb);
@ -1705,6 +1762,10 @@ int main (int argc, char* argv[])
HCL_OBJ_SET_FLAGS_KERNEL (xtn->sym_errstr, 1);
}
/* -- from this point onward, any failure leads to jumping to the oops label
* -- instead of returning -1 immediately. --*/
set_signal (SIGINT, handle_sigint);
while (1)
{
hcl_oop_t obj;
@ -1713,7 +1774,6 @@ static int count = 0;
if (count %5 == 0) hcl_reset (hcl);
count++;
*/
obj = hcl_read(hcl);
if (!obj)
{
@ -1820,10 +1880,12 @@ count++;
/*hcl_dumpsymtab (hcl);*/
}
set_signal_to_default (SIGINT);
hcl_close (hcl);
return 0;
oops:
set_signal_to_default (SIGINT);
hcl_close (hcl);
return -1;
}