enhanced kqueue manipulation with fd tracking

This commit is contained in:
hyunghwan.chung 2019-09-25 10:05:39 +00:00
parent da2a8a464a
commit 452caae336
2 changed files with 165 additions and 37 deletions

View File

@ -622,6 +622,7 @@ struct moo_ntime_t
#define MOO_SIZEOF(x) (sizeof(x)) #define MOO_SIZEOF(x) (sizeof(x))
#define MOO_COUNTOF(x) (sizeof(x) / sizeof((x)[0])) #define MOO_COUNTOF(x) (sizeof(x) / sizeof((x)[0]))
#define MOO_BITSOF(x) (sizeof(x) * MOO_BITS_PER_BYTE)
/** /**
* The MOO_OFFSETOF() macro returns the offset of a field from the beginning * The MOO_OFFSETOF() macro returns the offset of a field from the beginning
@ -648,11 +649,11 @@ struct moo_ntime_t
/* make a bit mask that can mask off low n bits */ /* make a bit mask that can mask off low n bits */
#define MOO_LBMASK(type,n) (~(~((type)0) << (n))) #define MOO_LBMASK(type,n) (~(~((type)0) << (n)))
#define MOO_LBMASK_SAFE(type,n) (((n) < MOO_SIZEOF(type) * MOO_BITS_PER_BYTE)? MOO_LBMASK(type,n): ~(type)0) #define MOO_LBMASK_SAFE(type,n) (((n) < MOO_BITSOF(type))? MOO_LBMASK(type,n): ~(type)0)
/* make a bit mask that can mask off hig n bits */ /* make a bit mask that can mask off hig n bits */
#define MOO_HBMASK(type,n) (~(~((type)0) >> (n))) #define MOO_HBMASK(type,n) (~(~((type)0) >> (n)))
#define MOO_HBMASK_SAFE(type,n) (((n) < MOO_SIZEOF(type) * MOO_BITS_PER_BYTE)? MOO_HBMASK(type,n): ~(type)0) #define MOO_HBMASK_SAFE(type,n) (((n) < MOO_BITSOF(type))? MOO_HBMASK(type,n): ~(type)0)
/* get 'length' bits starting from the bit at the 'offset' */ /* get 'length' bits starting from the bit at the 'offset' */
#define MOO_GETBITS(type,value,offset,length) \ #define MOO_GETBITS(type,value,offset,length) \
@ -679,7 +680,7 @@ struct moo_ntime_t
* \endcode * \endcode
*/ */
/*#define MOO_BITS_MAX(type,nbits) ((((type)1) << (nbits)) - 1)*/ /*#define MOO_BITS_MAX(type,nbits) ((((type)1) << (nbits)) - 1)*/
#define MOO_BITS_MAX(type,nbits) ((~(type)0) >> (MOO_SIZEOF(type) * MOO_BITS_PER_BYTE - (nbits))) #define MOO_BITS_MAX(type,nbits) ((~(type)0) >> (MOO_BITSOF(type) - (nbits)))
/* ========================================================================= /* =========================================================================
* MMGR * MMGR
@ -840,11 +841,11 @@ typedef struct moo_t moo_t;
#define MOO_TYPE_IS_UNSIGNED(type) (((type)0) < ((type)-1)) #define MOO_TYPE_IS_UNSIGNED(type) (((type)0) < ((type)-1))
#define MOO_TYPE_SIGNED_MAX(type) \ #define MOO_TYPE_SIGNED_MAX(type) \
((type)~((type)1 << ((type)MOO_SIZEOF(type) * MOO_BITS_PER_BYTE - 1))) ((type)~((type)1 << ((type)MOO_BITSOF(type) - 1)))
#define MOO_TYPE_UNSIGNED_MAX(type) ((type)(~(type)0)) #define MOO_TYPE_UNSIGNED_MAX(type) ((type)(~(type)0))
#define MOO_TYPE_SIGNED_MIN(type) \ #define MOO_TYPE_SIGNED_MIN(type) \
((type)((type)1 << ((type)MOO_SIZEOF(type) * MOO_BITS_PER_BYTE - 1))) ((type)((type)1 << ((type)MOO_BITSOF(type) - 1)))
#define MOO_TYPE_UNSIGNED_MIN(type) ((type)0) #define MOO_TYPE_UNSIGNED_MIN(type) ((type)0)
#define MOO_TYPE_MAX(type) \ #define MOO_TYPE_MAX(type) \

View File

@ -178,6 +178,7 @@
# define XPOLLERR POLLERR # define XPOLLERR POLLERR
# define XPOLLHUP POLLHUP # define XPOLLHUP POLLHUP
# elif defined(HAVE_SYS_EVENT_H) && defined(HAVE_KQUEUE) # elif defined(HAVE_SYS_EVENT_H) && defined(HAVE_KQUEUE)
/* netbsd, openbsd, etc */
# include <sys/event.h> # include <sys/event.h>
# define USE_KQUEUE # define USE_KQUEUE
/* fake XPOLLXXX values */ /* fake XPOLLXXX values */
@ -355,6 +356,11 @@ struct xtn_t
* file descriptors added */ * file descriptors added */
struct pollfd buf[64]; /* buffer for reading events */ struct pollfd buf[64]; /* buffer for reading events */
#elif defined(USE_KQUEUE) #elif defined(USE_KQUEUE)
struct
{
moo_oow_t* ptr;
moo_oow_t capa;
} reg;
struct kevent buf[64]; struct kevent buf[64];
#elif defined(USE_EPOLL) #elif defined(USE_EPOLL)
/*TODO: make it dynamically changeable depending on the number of /*TODO: make it dynamically changeable depending on the number of
@ -1749,32 +1755,69 @@ static int _add_poll_fd (moo_t* moo, int fd, int event_mask)
#elif defined(USE_KQUEUE) #elif defined(USE_KQUEUE)
xtn_t* xtn = GET_XTN(moo); xtn_t* xtn = GET_XTN(moo);
struct kevent ev; struct kevent ev;
moo_oow_t rindex, roffset;
moo_oow_t rv = 0;
rindex = fd / (MOO_BITSOF(moo_oow_t) >> 1);
roffset = (fd << 1) % MOO_BITSOF(moo_oow_t);
if (rindex >= xtn->ev.reg.capa)
{
moo_oow_t* tmp;
moo_oow_t newcapa;
MOO_STATIC_ASSERT (MOO_SIZEOF(*tmp) == MOO_SIZEOF(*xtn->ev.reg.ptr));
newcapa = rindex + 1;
newcapa = MOO_ALIGN_POW2(newcapa, 16);
tmp = (moo_oow_t*)moo_reallocmem(moo, xtn->ev.reg.ptr, newcapa * MOO_SIZEOF(*tmp));
if (!tmp)
{
const moo_ooch_t* oldmsg = moo_backuperrmsg(moo);
moo_seterrbfmt (moo, MOO_ESYSERR, "unable to add file descriptor %d to kqueue - %js", fd, oldmsg);
MOO_DEBUG1 (moo, "%js", moo_geterrmsg(moo));
return -1;
}
MOO_MEMSET (&tmp[xtn->ev.reg.capa], 0, newcapa - xtn->ev.reg.capa);
xtn->ev.reg.ptr = tmp;
xtn->ev.reg.capa = newcapa;
}
if (event_mask & XPOLLIN) if (event_mask & XPOLLIN)
{ {
/*EV_SET (&ev, fd, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, 0);*/ /*EV_SET (&ev, fd, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, 0);*/
MOO_MEMSET (&ev, 0, MOO_SIZEOF(ev)); MOO_MEMSET (&ev, 0, MOO_SIZEOF(ev));
ev.ident = fd; ev.ident = fd;
ev.flags = EV_ADD | EV_CLEAR; /* EV_CLEAR for edge trigger? */ ev.flags = EV_ADD;
#if defined(USE_THREAD)
ev.flags |= EV_CLEAR; /* EV_CLEAR for edge trigger? */
#endif
ev.filter = EVFILT_READ; ev.filter = EVFILT_READ;
if (kevent(xtn->ep, &ev, 1, MOO_NULL, 0, MOO_NULL) == -1) if (kevent(xtn->ep, &ev, 1, MOO_NULL, 0, MOO_NULL) == -1)
{ {
moo_seterrwithsyserr (moo, 0, errno); moo_seterrwithsyserr (moo, 0, errno);
MOO_DEBUG2 (moo, "Cannot add file descriptor %d to kqueue - %hs\n", fd, strerror(errno)); MOO_DEBUG2 (moo, "Cannot add file descriptor %d to kqueue for read - %hs\n", fd, strerror(errno));
return -1; return -1;
} }
rv |= 1;
} }
if (event_mask & XPOLLOUT) if (event_mask & XPOLLOUT)
{ {
/*EV_SET (&ev, fd, EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, 0);*/ /*EV_SET (&ev, fd, EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, 0);*/
MOO_MEMSET (&ev, 0, MOO_SIZEOF(ev)); MOO_MEMSET (&ev, 0, MOO_SIZEOF(ev));
ev.ident = fd; ev.ident = fd;
ev.flags = EV_ADD | EV_CLEAR; ev.flags = EV_ADD;
#if defined(USE_THREAD)
ev.flags |= EV_CLEAR; /* EV_CLEAR for edge trigger? */
#endif
ev.filter = EVFILT_WRITE; ev.filter = EVFILT_WRITE;
if (kevent(xtn->ep, &ev, 1, MOO_NULL, 0, MOO_NULL) == -1) if (kevent(xtn->ep, &ev, 1, MOO_NULL, 0, MOO_NULL) == -1)
{ {
moo_seterrwithsyserr (moo, 0, errno); moo_seterrwithsyserr (moo, 0, errno);
MOO_DEBUG2 (moo, "Cannot add file descriptor %d to kqueue - %hs\n", fd, strerror(errno)); MOO_DEBUG2 (moo, "Cannot add file descriptor %d to kqueue for write - %hs\n", fd, strerror(errno));
if (event_mask & XPOLLIN) if (event_mask & XPOLLIN)
{ {
@ -1786,9 +1829,13 @@ static int _add_poll_fd (moo_t* moo, int fd, int event_mask)
} }
return -1; return -1;
} }
rv |= 2;
} }
MOO_SETBITS (moo_oow_t, xtn->ev.reg.ptr[rindex], roffset, 2, rv);
return 0; return 0;
#elif defined(USE_EPOLL) #elif defined(USE_EPOLL)
xtn_t* xtn = GET_XTN(moo); xtn_t* xtn = GET_XTN(moo);
struct epoll_event ev; struct epoll_event ev;
@ -1895,29 +1942,45 @@ static int _del_poll_fd (moo_t* moo, int fd)
#elif defined(USE_KQUEUE) #elif defined(USE_KQUEUE)
xtn_t* xtn = GET_XTN(moo); xtn_t* xtn = GET_XTN(moo);
moo_oow_t rindex, roffset;
int rv;
struct kevent ev; struct kevent ev;
/* i should keep track of filters(EVFILT_READ/EVFILT_WRITE) associated rindex = fd / (MOO_BITSOF(moo_oow_t) >> 1);
* with the file descriptor to know what filter to delete. since no roffset = (fd << 1) % MOO_BITSOF(moo_oow_t);
* tracking has been implemented, i should keep quiet when kevent()
* returns failure. */
/*EV_SET (&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);*/ if (rindex >= xtn->ev.reg.capa)
MOO_MEMSET (&ev, 0, MOO_SIZEOF(ev)); {
ev.ident = fd; moo_seterrbfmt (moo, MOO_EINVAL, "unknown file descriptor %d", fd);
ev.flags = EV_DELETE; MOO_DEBUG2 (moo, "Cannot remove file descriptor %d from kqueue - %js\n", fd, moo_geterrmsg(moo));
ev.filter = EVFILT_READ; return -1;
kevent(xtn->ep, &ev, 1, MOO_NULL, 0, MOO_NULL); };
/* no error check for now */
/*EV_SET (&ev, fd, EVFILT_WRITE, EV_DELETE, 0, 0, 0);*/ rv = MOO_GETBITS (moo_oow_t, xtn->ev.reg.ptr[rindex], roffset, 2);
MOO_MEMSET (&ev, 0, MOO_SIZEOF(ev));
ev.ident = fd;
ev.flags = EV_DELETE;
ev.filter = EVFILT_WRITE;
kevent(xtn->ep, &ev, 1, MOO_NULL, 0, MOO_NULL);
/* no error check for now */
if (rv & 1)
{
/*EV_SET (&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);*/
MOO_MEMSET (&ev, 0, MOO_SIZEOF(ev));
ev.ident = fd;
ev.flags = EV_DELETE;
ev.filter = EVFILT_READ;
kevent(xtn->ep, &ev, 1, MOO_NULL, 0, MOO_NULL);
/* no error check for now */
}
if (rv & 2)
{
/*EV_SET (&ev, fd, EVFILT_WRITE, EV_DELETE, 0, 0, 0);*/
MOO_MEMSET (&ev, 0, MOO_SIZEOF(ev));
ev.ident = fd;
ev.flags = EV_DELETE;
ev.filter = EVFILT_WRITE;
kevent(xtn->ep, &ev, 1, MOO_NULL, 0, MOO_NULL);
/* no error check for now */
}
MOO_SETBITS (moo_oow_t, xtn->ev.reg.ptr[rindex], roffset, 2, rv);
return 0; return 0;
#elif defined(USE_EPOLL) #elif defined(USE_EPOLL)
@ -2000,20 +2063,84 @@ static int _mod_poll_fd (moo_t* moo, int fd, int event_mask)
return 0; return 0;
#elif defined(USE_KQUEUE) #elif defined(USE_KQUEUE)
xtn_t* xtn = GET_XTN(moo);
moo_oow_t rindex, roffset;
int rv, newrv = 0;
struct kevent ev;
/* TODO: filter registration tracking. rindex = fd / (MOO_BITSOF(moo_oow_t) >> 1);
* the current implementation for kqueue invokes kevent() too frequently... roffset = (fd << 1) % MOO_BITSOF(moo_oow_t);
* this modification function doesn't need to delete all and add a new one
* if tracking is implemented
*/
if (_del_poll_fd (moo, fd) <= -1) return -1;
if (_add_poll_fd (moo, fd, event_mask) <= -1) if (rindex >= xtn->ev.reg.capa)
{ {
/* TODO: any good way to rollback successful deletion? */ moo_seterrbfmt (moo, MOO_EINVAL, "unknown file descriptor %d", fd);
MOO_DEBUG2 (moo, "Cannot remove file descriptor %d from kqueue - %js\n", fd, moo_geterrmsg(moo));
return -1; return -1;
};
rv = MOO_GETBITS(moo_oow_t, xtn->ev.reg.ptr[rindex], roffset, 2);
if (rv & 1)
{
if (!(event_mask & XPOLLIN))
{
MOO_MEMSET (&ev, 0, MOO_SIZEOF(ev));
ev.ident = fd;
ev.flags = EV_DELETE;
ev.filter = EVFILT_READ;
kevent(xtn->ep, &ev, 1, MOO_NULL, 0, MOO_NULL);
newrv &= ~1;
}
}
else
{
if (event_mask & XPOLLIN)
{
MOO_MEMSET (&ev, 0, MOO_SIZEOF(ev));
ev.ident = fd;
ev.flags = EV_ADD;
#if defined(USE_THREAD)
ev.flags |= EV_CLEAR; /* EV_CLEAR for edge trigger? */
#endif
ev.filter = EVFILT_READ;
kevent(xtn->ep, &ev, 1, MOO_NULL, 0, MOO_NULL);
newrv |= 1;
}
} }
if (rv & 2)
{
if (!(event_mask & XPOLLOUT))
{
MOO_MEMSET (&ev, 0, MOO_SIZEOF(ev));
ev.ident = fd;
ev.flags = EV_DELETE;
ev.filter = EVFILT_WRITE;
kevent(xtn->ep, &ev, 1, MOO_NULL, 0, MOO_NULL);
newrv &= ~2;
}
}
else
{
if (event_mask & XPOLLOUT)
{
MOO_MEMSET (&ev, 0, MOO_SIZEOF(ev));
ev.ident = fd;
ev.flags = EV_ADD;
#if defined(USE_THREAD)
ev.flags |= EV_CLEAR; /* EV_CLEAR for edge trigger? */
#endif
ev.filter = EVFILT_WRITE;
kevent(xtn->ep, &ev, 1, MOO_NULL, 0, MOO_NULL);
newrv |= 2;
}
}
MOO_SETBITS (moo_oow_t, xtn->ev.reg.ptr[rindex], roffset, 2, newrv);
return 0; return 0;
#elif defined(USE_EPOLL) #elif defined(USE_EPOLL)