made list management more consistent

This commit is contained in:
hyung-hwan 2020-04-30 14:48:39 +00:00
parent d3a74e1075
commit 26b720099c
5 changed files with 121 additions and 92 deletions

View File

@ -762,7 +762,7 @@ mio_svc_dnc_t* mio_svc_dnc_start (mio_t* mio, const mio_skad_t* serv_addr, const
if (mio_dev_sck_bind(dnc->udp_sck, &bi) <= -1) goto oops; if (mio_dev_sck_bind(dnc->udp_sck, &bi) <= -1) goto oops;
} }
MIO_SVC_REGISTER (mio, (mio_svc_t*)dnc); MIO_SVCL_APPEND_SVC (&mio->actsvc, (mio_svc_t*)dnc);
return dnc; return dnc;
oops: oops:
@ -781,7 +781,7 @@ void mio_svc_dnc_stop (mio_svc_dnc_t* dnc)
if (dnc->udp_sck) mio_dev_sck_kill (dnc->udp_sck); if (dnc->udp_sck) mio_dev_sck_kill (dnc->udp_sck);
if (dnc->tcp_sck) mio_dev_sck_kill (dnc->tcp_sck); if (dnc->tcp_sck) mio_dev_sck_kill (dnc->tcp_sck);
while (dnc->pending_req) release_dns_msg (dnc, dnc->pending_req); while (dnc->pending_req) release_dns_msg (dnc, dnc->pending_req);
MIO_SVC_UNREGISTER (mio, dnc); MIO_SVCL_UNLINK_SVC (dnc);
mio_freemem (mio, dnc); mio_freemem (mio, dnc);
} }

View File

@ -322,7 +322,7 @@ static int parse_answer_rr (mio_t* mio, mio_dns_rr_part_t rr_part, mio_oow_t pos
if (pi->_rrdptr) if (pi->_rrdptr)
{ {
mx = (mio_dns_brrd_soa_t*)pi->_rrdptr; mx = (mio_dns_brrd_mx_t*)pi->_rrdptr;
pi->_rrdptr += MIO_SIZEOF(*mx); pi->_rrdptr += MIO_SIZEOF(*mx);
MIO_MEMCPY (&mx->preference, pi->_ptr, 2); pi->_ptr += 2; MIO_MEMCPY (&mx->preference, pi->_ptr, 2); pi->_ptr += 2;

View File

@ -387,9 +387,15 @@ typedef void (*mio_svc_dnc_on_resolve_t) (
mio_oow_t len mio_oow_t len
); );
#define mio_svc_dns_getmio(svc) mio_svc_getmio(svc) #if defined(MIO_HAVE_INLINE)
#define mio_svc_dnc_getmio(svc) mio_svc_getmio(svc) static MIO_INLINE mio_t* mio_svc_dns_getmio(mio_svc_dns_t* svc) { return mio_svc_getmio((mio_svc_t*)svc); }
#define mio_svc_dnr_getmio(svc) mio_svc_getmio(svc) static MIO_INLINE mio_t* mio_svc_dnc_getmio(mio_svc_dnc_t* svc) { return mio_svc_getmio((mio_svc_t*)svc); }
static MIO_INLINE mio_t* mio_svc_dnr_getmio(mio_svc_dnr_t* svc) { return mio_svc_getmio((mio_svc_t*)svc); }
#else
# define mio_svc_dns_getmio(svc) mio_svc_getmio(svc)
# define mio_svc_dnc_getmio(svc) mio_svc_getmio(svc)
# define mio_svc_dnr_getmio(svc) mio_svc_getmio(svc)
#endif
enum mio_svc_dnc_send_flag_t enum mio_svc_dnc_send_flag_t
{ {

View File

@ -129,20 +129,25 @@ int mio_init (mio_t* mio, mio_mmgr_t* mmgr, mio_cmgr_t* cmgr, mio_oow_t tmrcapa)
* reallocation fails */ * reallocation fails */
/* +1 required for consistency with put_oocs and put_ooch in fmtout.c */ /* +1 required for consistency with put_oocs and put_ooch in fmtout.c */
mio->log.ptr = mio_allocmem(mio, (mio->log.capa + 1) * MIO_SIZEOF(*mio->log.ptr)); mio->log.ptr = mio_allocmem(mio, (mio->log.capa + 1) * MIO_SIZEOF(*mio->log.ptr));
if (!mio->log.ptr) goto oops; if (MIO_UNLIKELY(!mio->log.ptr)) goto oops;
/* inititalize the system-side logging */ /* inititalize the system-side logging */
if (mio_sys_init(mio) <= -1) goto oops; if (MIO_UNLIKELY(mio_sys_init(mio) <= -1)) goto oops;
sys_inited = 1; sys_inited = 1;
/* initialize the timer object */ /* initialize the timer object */
if (tmrcapa <= 0) tmrcapa = 1; if (tmrcapa <= 0) tmrcapa = 1;
mio->tmr.jobs = mio_allocmem(mio, tmrcapa * MIO_SIZEOF(mio_tmrjob_t)); mio->tmr.jobs = mio_allocmem(mio, tmrcapa * MIO_SIZEOF(mio_tmrjob_t));
if (!mio->tmr.jobs) goto oops; if (MIO_UNLIKELY(!mio->tmr.jobs)) goto oops;
mio->tmr.capa = tmrcapa; mio->tmr.capa = tmrcapa;
MIO_DEVL_INIT (&mio->actdev);
MIO_DEVL_INIT (&mio->hltdev);
MIO_DEVL_INIT (&mio->zmbdev);
MIO_CWQ_INIT (&mio->cwq); MIO_CWQ_INIT (&mio->cwq);
MIO_SVCL_INIT (&mio->actsvc);
mio_sys_gettime (mio, &mio->init_time); mio_sys_gettime (mio, &mio->init_time);
return 0; return 0;
@ -159,11 +164,7 @@ oops:
void mio_fini (mio_t* mio) void mio_fini (mio_t* mio)
{ {
mio_dev_t* dev, * next_dev; mio_dev_t* dev, * next_dev;
struct mio_dev_t diehard;
{
mio_dev_t* head;
mio_dev_t* tail;
} diehard;
mio_oow_t i; mio_oow_t i;
/* clean up free cwq list */ /* clean up free cwq list */
@ -177,62 +178,65 @@ void mio_fini (mio_t* mio)
} }
} }
while (mio->actsvc.head) while (!MIO_SVCL_IS_EMPTY(&mio->actsvc))
{ {
if (mio->actsvc.head->stop) mio_svc_t* svc;
svc = MIO_SVCL_FIRST_SVC(&mio->actsvc);
if (svc->stop)
{ {
/* the stop callback must unregister itself */ /* the stop callback must unregister itself */
mio->actsvc.head->stop (mio->actsvc.head); svc->stop (svc);
} }
else else
{ {
/* unregistration only if no stop callback is designated */ /* unregistration only if no stop callback is designated */
MIO_SVC_UNREGISTER (mio, mio->actsvc.head); MIO_SVCL_UNLINK_SVC (svc);
} }
} }
/* kill all registered devices */ /* kill all registered devices */
while (mio->actdev.head) while (!MIO_DEVL_IS_EMPTY(&mio->actdev))
{ {
mio_dev_kill (mio->actdev.head); mio_dev_kill (MIO_DEVL_FIRST_DEV(&mio->actdev));
} }
/* kill all halted devices */ /* kill all halted devices */
while (mio->hltdev.head) while (!MIO_DEVL_IS_EMPTY(&mio->hltdev))
{ {
mio_dev_kill (mio->hltdev.head); mio_dev_kill (MIO_DEVL_FIRST_DEV(&mio->hltdev));
} }
/* clean up all zombie devices */ /* clean up all zombie devices */
MIO_MEMSET (&diehard, 0, MIO_SIZEOF(diehard)); MIO_DEVL_INIT (&diehard);
for (dev = mio->zmbdev.head; dev; ) for (dev = MIO_DEVL_FIRST_DEV(&mio->zmbdev); !MIO_DEVL_IS_NIL_DEV(&mio->zmbdev, dev); )
{ {
kill_and_free_device (dev, 1); kill_and_free_device (dev, 1);
if (mio->zmbdev.head == dev) if (MIO_DEVL_FIRST_DEV(&mio->zmbdev) == dev)
{ {
/* the deive has not been freed. go on to the next one */ /* the deive has not been freed. go on to the next one */
next_dev = dev->dev_next; next_dev = dev->dev_next;
/* remove the device from the zombie device list */ /* remove the device from the zombie device list */
UNLINK_DEVICE_FROM_LIST (&mio->zmbdev, dev); MIO_DEVL_UNLINK_DEV (dev);
dev->dev_cap &= ~MIO_DEV_CAP_ZOMBIE; dev->dev_cap &= ~MIO_DEV_CAP_ZOMBIE;
/* put it to a private list for aborting */ /* put it to a private list for aborting */
APPEND_DEVICE_TO_LIST (&diehard, dev); MIO_DEVL_APPEND_DEV (&diehard, dev);
dev = next_dev; dev = next_dev;
} }
else dev = mio->zmbdev.head; else dev = MIO_DEVL_FIRST_DEV(&mio->zmbdev);
} }
while (diehard.head) while (!MIO_DEVL_IS_EMPTY(&diehard))
{ {
/* if the kill method returns failure, it can leak some resource /* if the kill method returns failure, it can leak some resource
* because the device is freed regardless of the failure when 2 * because the device is freed regardless of the failure when 2
* is given to kill_and_free_device(). */ * is given to kill_and_free_device(). */
dev = diehard.head; dev = MIO_DEVL_FIRST_DEV(&diehard);
MIO_ASSERT (mio, !(dev->dev_cap & (MIO_DEV_CAP_ACTIVE | MIO_DEV_CAP_HALTED | MIO_DEV_CAP_ZOMBIE))); MIO_ASSERT (mio, !(dev->dev_cap & (MIO_DEV_CAP_ACTIVE | MIO_DEV_CAP_HALTED | MIO_DEV_CAP_ZOMBIE)));
UNLINK_DEVICE_FROM_LIST (&diehard, dev); MIO_DEVL_UNLINK_DEV (dev);
kill_and_free_device (dev, 2); kill_and_free_device (dev, 2);
} }
@ -346,7 +350,7 @@ static MIO_INLINE void handle_event (mio_t* mio, mio_dev_t* dev, int events, int
if (dev && (events & MIO_DEV_EVENT_OUT)) if (dev && (events & MIO_DEV_EVENT_OUT))
{ {
/* write pending requests */ /* write pending requests */
while (!MIO_WQ_ISEMPTY(&dev->wq)) while (!MIO_WQ_IS_EMPTY(&dev->wq))
{ {
mio_wq_t* q; mio_wq_t* q;
const mio_uint8_t* uptr; const mio_uint8_t* uptr;
@ -408,7 +412,7 @@ static MIO_INLINE void handle_event (mio_t* mio, mio_dev_t* dev, int events, int
{ {
/* drain all pending requests. /* drain all pending requests.
* callbacks are skipped for drained requests */ * callbacks are skipped for drained requests */
while (!MIO_WQ_ISEMPTY(&dev->wq)) while (!MIO_WQ_IS_EMPTY(&dev->wq))
{ {
q = MIO_WQ_HEAD(&dev->wq); q = MIO_WQ_HEAD(&dev->wq);
unlink_wq (mio, q); unlink_wq (mio, q);
@ -421,7 +425,7 @@ static MIO_INLINE void handle_event (mio_t* mio, mio_dev_t* dev, int events, int
} }
} }
if (dev && MIO_WQ_ISEMPTY(&dev->wq)) if (dev && MIO_WQ_IS_EMPTY(&dev->wq))
{ {
/* no pending request to write */ /* no pending request to write */
if ((dev->dev_cap & MIO_DEV_CAP_IN_CLOSED) && if ((dev->dev_cap & MIO_DEV_CAP_IN_CLOSED) &&
@ -574,7 +578,7 @@ int mio_exec (mio_t* mio)
int ret = 0; int ret = 0;
/* execute callbacks for completed write operations */ /* execute callbacks for completed write operations */
while (!MIO_CWQ_ISEMPTY(&mio->cwq)) while (!MIO_CWQ_IS_EMPTY(&mio->cwq))
{ {
mio_cwq_t* cwq; mio_cwq_t* cwq;
mio_oow_t cwqfl_index; mio_oow_t cwqfl_index;
@ -602,7 +606,7 @@ int mio_exec (mio_t* mio)
* multiplexer. the scheduled jobs can safely destroy the devices */ * multiplexer. the scheduled jobs can safely destroy the devices */
mio_firetmrjobs (mio, MIO_NULL, MIO_NULL); mio_firetmrjobs (mio, MIO_NULL, MIO_NULL);
if (mio->actdev.head) if (!MIO_DEVL_IS_EMPTY(&mio->actdev))
{ {
/* wait on the multiplexer only if there is at least 1 active device */ /* wait on the multiplexer only if there is at least 1 active device */
mio_ntime_t tmout; mio_ntime_t tmout;
@ -622,13 +626,13 @@ int mio_exec (mio_t* mio)
} }
/* kill all halted devices */ /* kill all halted devices */
while (mio->hltdev.head) while (!MIO_DEVL_IS_EMPTY(&mio->hltdev))
{ {
MIO_DEBUG1 (mio, "Killing HALTED device %p\n", mio->hltdev.head); mio_dev_t* dev = MIO_DEVL_FIRST_DEV(&mio->hltdev);
mio_dev_kill (mio->hltdev.head); MIO_DEBUG1 (mio, "Killing HALTED device %p\n", dev);
mio_dev_kill (dev);
} }
MIO_ASSERT (mio, mio->hltdev.tail == MIO_NULL);
return ret; return ret;
} }
@ -639,13 +643,13 @@ void mio_stop (mio_t* mio, mio_stopreq_t stopreq)
int mio_loop (mio_t* mio) int mio_loop (mio_t* mio)
{ {
if (!mio->actdev.head) return 0; if (MIO_DEVL_IS_EMPTY(&mio->actdev)) return 0;
mio->stopreq = MIO_STOPREQ_NONE; mio->stopreq = MIO_STOPREQ_NONE;
if (mio_prologue(mio) <= -1) return -1; if (mio_prologue(mio) <= -1) return -1;
while (mio->stopreq == MIO_STOPREQ_NONE && mio->actdev.head) while (mio->stopreq == MIO_STOPREQ_NONE && !MIO_DEVL_IS_EMPTY(&mio->actdev))
{ {
if (mio_exec(mio) <= -1) break; if (mio_exec(mio) <= -1) break;
/* you can do other things here */ /* you can do other things here */
@ -702,7 +706,7 @@ mio_dev_t* mio_dev_make (mio_t* mio, mio_oow_t dev_size, mio_dev_mth_t* dev_mth,
if (mio_dev_watch(dev, MIO_DEV_WATCH_START, 0) <= -1) goto oops_after_make; if (mio_dev_watch(dev, MIO_DEV_WATCH_START, 0) <= -1) goto oops_after_make;
/* and place the new device object at the back of the active device list */ /* and place the new device object at the back of the active device list */
APPEND_DEVICE_TO_LIST (&mio->actdev, dev); MIO_DEVL_APPEND_DEV (&mio->actdev, dev);
dev->dev_cap |= MIO_DEV_CAP_ACTIVE; dev->dev_cap |= MIO_DEV_CAP_ACTIVE;
return dev; return dev;
@ -753,7 +757,7 @@ static int kill_and_free_device (mio_dev_t* dev, int force)
if (!(dev->dev_cap & MIO_DEV_CAP_ZOMBIE)) if (!(dev->dev_cap & MIO_DEV_CAP_ZOMBIE))
{ {
APPEND_DEVICE_TO_LIST (&mio->zmbdev, dev); MIO_DEVL_APPEND_DEV (&mio->zmbdev, dev);
dev->dev_cap |= MIO_DEV_CAP_ZOMBIE; dev->dev_cap |= MIO_DEV_CAP_ZOMBIE;
} }
@ -764,7 +768,7 @@ free_device:
if (dev->dev_cap & MIO_DEV_CAP_ZOMBIE) if (dev->dev_cap & MIO_DEV_CAP_ZOMBIE)
{ {
/* detach it from the zombie device list */ /* detach it from the zombie device list */
UNLINK_DEVICE_FROM_LIST (&mio->zmbdev, dev); MIO_DEVL_UNLINK_DEV (dev);
dev->dev_cap &= ~MIO_DEV_CAP_ZOMBIE; dev->dev_cap &= ~MIO_DEV_CAP_ZOMBIE;
} }
@ -822,7 +826,7 @@ void mio_dev_kill (mio_dev_t* dev)
if (dev->dev_cap & MIO_DEV_CAP_ZOMBIE) if (dev->dev_cap & MIO_DEV_CAP_ZOMBIE)
{ {
MIO_ASSERT (mio, MIO_WQ_ISEMPTY(&dev->wq)); MIO_ASSERT (mio, MIO_WQ_IS_EMPTY(&dev->wq));
MIO_ASSERT (mio, dev->cw_count == 0); MIO_ASSERT (mio, dev->cw_count == 0);
MIO_ASSERT (mio, dev->rtmridx == MIO_TMRIDX_INVALID); MIO_ASSERT (mio, dev->rtmridx == MIO_TMRIDX_INVALID);
goto kill_device; goto kill_device;
@ -852,7 +856,7 @@ void mio_dev_kill (mio_dev_t* dev)
} }
/* clear pending send requests */ /* clear pending send requests */
while (!MIO_WQ_ISEMPTY(&dev->wq)) while (!MIO_WQ_IS_EMPTY(&dev->wq))
{ {
mio_wq_t* q; mio_wq_t* q;
q = MIO_WQ_HEAD(&dev->wq); q = MIO_WQ_HEAD(&dev->wq);
@ -864,13 +868,13 @@ void mio_dev_kill (mio_dev_t* dev)
{ {
/* this device is in the halted state. /* this device is in the halted state.
* unlink it from the halted device list */ * unlink it from the halted device list */
UNLINK_DEVICE_FROM_LIST (&mio->hltdev, dev); MIO_DEVL_UNLINK_DEV (dev);
dev->dev_cap &= ~MIO_DEV_CAP_HALTED; dev->dev_cap &= ~MIO_DEV_CAP_HALTED;
} }
else else
{ {
MIO_ASSERT (mio, dev->dev_cap & MIO_DEV_CAP_ACTIVE); MIO_ASSERT (mio, dev->dev_cap & MIO_DEV_CAP_ACTIVE);
UNLINK_DEVICE_FROM_LIST (&mio->actdev, dev); MIO_DEVL_UNLINK_DEV (dev);
dev->dev_cap &= ~MIO_DEV_CAP_ACTIVE; dev->dev_cap &= ~MIO_DEV_CAP_ACTIVE;
} }
@ -905,11 +909,11 @@ void mio_dev_halt (mio_dev_t* dev)
if (dev->dev_cap & MIO_DEV_CAP_ACTIVE) if (dev->dev_cap & MIO_DEV_CAP_ACTIVE)
{ {
/* delink the device object from the active device list */ /* delink the device object from the active device list */
UNLINK_DEVICE_FROM_LIST (&mio->actdev, dev); MIO_DEVL_UNLINK_DEV (dev);
dev->dev_cap &= ~MIO_DEV_CAP_ACTIVE; dev->dev_cap &= ~MIO_DEV_CAP_ACTIVE;
/* place it at the back of the halted device list */ /* place it at the back of the halted device list */
APPEND_DEVICE_TO_LIST (&mio->hltdev, dev); MIO_DEVL_APPEND_DEV (&mio->hltdev, dev);
dev->dev_cap |= MIO_DEV_CAP_HALTED; dev->dev_cap |= MIO_DEV_CAP_HALTED;
} }
} }
@ -956,7 +960,7 @@ int mio_dev_watch (mio_dev_t* dev, mio_dev_watch_cmd_t cmd, int events)
* output watching is requested only if there're enqueued * output watching is requested only if there're enqueued
* data for writing. */ * data for writing. */
events = MIO_DEV_EVENT_IN; events = MIO_DEV_EVENT_IN;
if (!MIO_WQ_ISEMPTY(&dev->wq)) events |= MIO_DEV_EVENT_OUT; if (!MIO_WQ_IS_EMPTY(&dev->wq)) events |= MIO_DEV_EVENT_OUT;
/* fall through */ /* fall through */
case MIO_DEV_WATCH_UPDATE: case MIO_DEV_WATCH_UPDATE:
/* honor event watching requests as given by the caller */ /* honor event watching requests as given by the caller */
@ -1128,7 +1132,7 @@ static int __dev_write (mio_dev_t* dev, const void* data, mio_iolen_t len, const
uptr = data; uptr = data;
urem = len; urem = len;
if (!MIO_WQ_ISEMPTY(&dev->wq)) if (!MIO_WQ_IS_EMPTY(&dev->wq))
{ {
/* the writing queue is not empty. /* the writing queue is not empty.
* enqueue this request immediately */ * enqueue this request immediately */
@ -1315,7 +1319,7 @@ static int __dev_writev (mio_dev_t* dev, mio_iovec_t* iov, mio_iolen_t iovcnt, c
len = 0; len = 0;
for (i = 0; i < iovcnt; i++) len += iov[i].iov_len; for (i = 0; i < iovcnt; i++) len += iov[i].iov_len;
if (!MIO_WQ_ISEMPTY(&dev->wq)) if (!MIO_WQ_IS_EMPTY(&dev->wq))
{ {
/* the writing queue is not empty. /* the writing queue is not empty.
* enqueue this request immediately */ * enqueue this request immediately */

View File

@ -229,7 +229,7 @@ struct mio_q_t
#define MIO_Q_INIT(q) ((q)->next = (q)->prev = (q)) #define MIO_Q_INIT(q) ((q)->next = (q)->prev = (q))
#define MIO_Q_TAIL(q) ((q)->prev) #define MIO_Q_TAIL(q) ((q)->prev)
#define MIO_Q_HEAD(q) ((q)->next) #define MIO_Q_HEAD(q) ((q)->next)
#define MIO_Q_ISEMPTY(q) (MIO_Q_HEAD(q) == (q)) #define MIO_Q_IS_EMPTY(q) (MIO_Q_HEAD(q) == (q))
#define MIO_Q_ISNODE(q,x) ((q) != (x)) #define MIO_Q_ISNODE(q,x) ((q) != (x))
#define MIO_Q_ISHEAD(q,x) (MIO_Q_HEAD(q) == (x)) #define MIO_Q_ISHEAD(q,x) (MIO_Q_HEAD(q) == (x))
#define MIO_Q_ISTAIL(q,x) (MIO_Q_TAIL(q) == (x)) #define MIO_Q_ISTAIL(q,x) (MIO_Q_TAIL(q) == (x))
@ -280,7 +280,7 @@ struct mio_cwq_t
#define MIO_CWQ_INIT(cwq) ((cwq)->next = (cwq)->prev = (cwq)) #define MIO_CWQ_INIT(cwq) ((cwq)->next = (cwq)->prev = (cwq))
#define MIO_CWQ_TAIL(cwq) ((cwq)->prev) #define MIO_CWQ_TAIL(cwq) ((cwq)->prev)
#define MIO_CWQ_HEAD(cwq) ((cwq)->next) #define MIO_CWQ_HEAD(cwq) ((cwq)->next)
#define MIO_CWQ_ISEMPTY(cwq) (MIO_CWQ_HEAD(cwq) == (cwq)) #define MIO_CWQ_IS_EMPTY(cwq) (MIO_CWQ_HEAD(cwq) == (cwq))
#define MIO_CWQ_ISNODE(cwq,x) ((cwq) != (x)) #define MIO_CWQ_ISNODE(cwq,x) ((cwq) != (x))
#define MIO_CWQ_ISHEAD(cwq,x) (MIO_CWQ_HEAD(cwq) == (x)) #define MIO_CWQ_ISHEAD(cwq,x) (MIO_CWQ_HEAD(cwq) == (x))
#define MIO_CWQ_ISTAIL(cwq,x) (MIO_CWQ_TAIL(cwq) == (x)) #define MIO_CWQ_ISTAIL(cwq,x) (MIO_CWQ_TAIL(cwq) == (x))
@ -311,7 +311,7 @@ struct mio_wq_t
#define MIO_WQ_INIT(wq) ((wq)->next = (wq)->prev = (wq)) #define MIO_WQ_INIT(wq) ((wq)->next = (wq)->prev = (wq))
#define MIO_WQ_TAIL(wq) ((wq)->prev) #define MIO_WQ_TAIL(wq) ((wq)->prev)
#define MIO_WQ_HEAD(wq) ((wq)->next) #define MIO_WQ_HEAD(wq) ((wq)->next)
#define MIO_WQ_ISEMPTY(wq) (MIO_WQ_HEAD(wq) == (wq)) #define MIO_WQ_IS_EMPTY(wq) (MIO_WQ_HEAD(wq) == (wq))
#define MIO_WQ_ISNODE(wq,x) ((wq) != (x)) #define MIO_WQ_ISNODE(wq,x) ((wq) != (x))
#define MIO_WQ_ISHEAD(wq,x) (MIO_WQ_HEAD(wq) == (x)) #define MIO_WQ_ISHEAD(wq,x) (MIO_WQ_HEAD(wq) == (x))
#define MIO_WQ_ISTAIL(wq,x) (MIO_WQ_TAIL(wq) == (x)) #define MIO_WQ_ISTAIL(wq,x) (MIO_WQ_TAIL(wq) == (x))
@ -341,6 +341,32 @@ struct mio_dev_t
MIO_DEV_HEADERS; MIO_DEV_HEADERS;
}; };
#define MIO_DEVL_PREPEND_DEV(lh,dev) do { \
(dev)->dev_prev = (lh); \
(dev)->dev_next = (lh)->dev_next; \
(dev)->dev_next->dev_prev = (dev); \
(lh)->dev_next = (dev); \
} while(0)
#define MIO_DEVL_APPEND_DEV(lh,dev) do { \
(dev)->dev_next = (lh); \
(dev)->dev_prev = (lh)->dev_prev; \
(dev)->dev_prev->dev_next = (dev); \
(lh)->dev_prev = (dev); \
} while(0)
#define MIO_DEVL_UNLINK_DEV(dev) do { \
(dev)->dev_prev->dev_next = (dev)->dev_next; \
(dev)->dev_next->dev_prev = (dev)->dev_prev; \
} while (0)
#define MIO_DEVL_INIT(lh) ((lh)->dev_next = (lh)->dev_prev = lh)
#define MIO_DEVL_FIRST_DEV(lh) ((lh)->dev_next)
#define MIO_DEVL_LAST_DEV(lh) ((lh)->dev_prev)
#define MIO_DEVL_IS_EMPTY(lh) (MIO_DEVL_FIRST_DEV(lh) == (lh))
#define MIO_DEVL_IS_NIL_DEV(lh,dev) ((dev) == (lh))
enum mio_dev_cap_t enum mio_dev_cap_t
{ {
MIO_DEV_CAP_VIRTUAL = (1 << 0), MIO_DEV_CAP_VIRTUAL = (1 << 0),
@ -405,30 +431,41 @@ typedef void (*mio_svc_stop_t) (mio_svc_t* svc);
mio_svc_t* svc_next mio_svc_t* svc_next
/* the stop callback is called if it's not NULL and the service is still /* the stop callback is called if it's not NULL and the service is still
* alive when mio_close() is reached. it still calls MIO_SVC_UNREGISTER() * alive when mio_close() is reached. it still calls MIO_SVCL_UNLINK_SVC()
* if the stop callback is NULL. The stop callback, if specified, must * if the stop callback is NULL. The stop callback, if specified, must
* call MIO_SVC_UNREGISTER(). */ * call MIO_SVCL_UNLINK_SVC(). */
struct mio_svc_t struct mio_svc_t
{ {
MIO_SVC_HEADERS; MIO_SVC_HEADERS;
}; };
#define MIO_SVC_REGISTER(mio,svc) do { \ #define MIO_SVCL_PREPEND_SVC(lh,svc) do { \
if ((mio)->actsvc.tail) (mio)->actsvc.tail->svc_next = (svc); \ (svc)->svc_prev = (lh); \
else (mio)->actsvc.head = (svc); \ (svc)->svc_next = (lh)->svc_next; \
(svc)->svc_prev = (mio)->actsvc.tail; \ (svc)->svc_next->svc_prev = (svc); \
(svc)->svc_next = MIO_NULL; \ (lh)->svc_next = (svc); \
(mio)->actsvc.tail = (svc); \
} while(0) } while(0)
#define MIO_SVC_UNREGISTER(mio,svc) do { \ #define MIO_SVCL_APPEND_SVC(lh,svc) do { \
if ((svc)->svc_prev) (svc)->svc_prev->svc_next = (svc)->svc_next; \ (svc)->svc_next = (lh); \
else (mio)->actsvc.head = (svc)->svc_next; \ (svc)->svc_prev = (lh)->svc_prev; \
if ((svc)->svc_next) (svc)->svc_next->svc_prev = (svc)->svc_prev; \ (svc)->svc_prev->svc_next = (svc); \
else (mio)->actsvc.tail = (svc)->svc_prev; \ (lh)->svc_prev = (svc); \
} while(0)
#define MIO_SVCL_UNLINK_SVC(svc) do { \
(svc)->svc_prev->svc_next = (svc)->svc_next; \
(svc)->svc_next->svc_prev = (svc)->svc_prev; \
} while (0) } while (0)
#define MIO_SVCL_INIT(lh) ((lh)->svc_next = (lh)->svc_prev = lh)
#define MIO_SVCL_FIRST_SVC(lh) ((lh)->svc_next)
#define MIO_SVCL_LAST_SVC(lh) ((lh)->svc_prev)
#define MIO_SVCL_IS_EMPTY(lh) (MIO_SVCL_FIRST_SVC(lh) == (lh))
#define MIO_SVCL_IS_NIL_SVC(lh,svc) ((svc) == (lh))
/* ========================================================================= /* =========================================================================
* MIO LOGGING * MIO LOGGING
* ========================================================================= */ * ========================================================================= */
@ -562,23 +599,9 @@ struct mio_t
mio_stopreq_t stopreq; /* stop request to abort mio_loop() */ mio_stopreq_t stopreq; /* stop request to abort mio_loop() */
struct mio_dev_t actdev; /* list head of active devices */
{ mio_dev_t hltdev; /* list head of halted devices */
mio_dev_t* head; mio_dev_t zmbdev; /* list head of zombie devices */
mio_dev_t* tail;
} actdev; /* active devices */
struct
{
mio_dev_t* head;
mio_dev_t* tail;
} hltdev; /* halted devices */
struct
{
mio_dev_t* head;
mio_dev_t* tail;
} zmbdev; /* zombie devices */
mio_uint8_t bigbuf[65535]; /* TODO: make this dynamic depending on devices added. device may indicate a buffer size required??? */ mio_uint8_t bigbuf[65535]; /* TODO: make this dynamic depending on devices added. device may indicate a buffer size required??? */
@ -593,11 +616,7 @@ struct mio_t
mio_cwq_t cwq; mio_cwq_t cwq;
mio_cwq_t* cwqfl[MIO_CWQFL_SIZE]; /* list of free cwq objects */ mio_cwq_t* cwqfl[MIO_CWQFL_SIZE]; /* list of free cwq objects */
struct mio_svc_t actsvc; /* list head of active services */
{
mio_svc_t* head;
mio_svc_t* tail;
} actsvc; /* active services */
/* platform specific fields below */ /* platform specific fields below */
mio_sys_t* sysdep; mio_sys_t* sysdep;