2012-11-09 17:31:33 +00:00
|
|
|
/*
|
|
|
|
* $Id$
|
|
|
|
*
|
2019-06-06 05:28:23 +00:00
|
|
|
Copyright (c) 2006-2019 Chung, Hyung-Hwan. All rights reserved.
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2014-11-19 14:42:24 +00:00
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions
|
|
|
|
are met:
|
|
|
|
1. Redistributions of source code must retain the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer.
|
|
|
|
2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in the
|
|
|
|
documentation and/or other materials provided with the distribution.
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2014-11-19 14:42:24 +00:00
|
|
|
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
|
|
|
|
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2012-11-09 17:31:33 +00:00
|
|
|
*/
|
|
|
|
|
2016-04-28 14:33:10 +00:00
|
|
|
#include <qse/si/mux.h>
|
2016-04-29 03:55:42 +00:00
|
|
|
#include "../cmn/mem-prv.h"
|
2012-11-09 17:31:33 +00:00
|
|
|
|
|
|
|
#if defined(_WIN32)
|
|
|
|
# define FD_SETSIZE 4096 /* what is the best value??? */
|
|
|
|
# include <winsock2.h>
|
|
|
|
# include <windows.h>
|
|
|
|
# define USE_SELECT
|
|
|
|
#elif defined(__OS2__)
|
2013-10-21 08:39:53 +00:00
|
|
|
# if defined(TCPV40HDRS)
|
|
|
|
# define BSD_SELECT
|
|
|
|
# endif
|
2012-11-09 17:31:33 +00:00
|
|
|
# include <types.h>
|
|
|
|
# include <sys/socket.h>
|
|
|
|
# include <netinet/in.h>
|
|
|
|
# include <sys/ioctl.h>
|
|
|
|
# include <nerrno.h>
|
2012-12-18 08:12:15 +00:00
|
|
|
# if defined(TCPV40HDRS)
|
|
|
|
# define USE_SELECT
|
|
|
|
# include <sys/select.h>
|
|
|
|
# else
|
|
|
|
# include <unistd.h>
|
|
|
|
# endif
|
2012-11-09 17:31:33 +00:00
|
|
|
# define INCL_DOSERRORS
|
|
|
|
# include <os2.h>
|
|
|
|
|
|
|
|
#elif defined(__DOS__)
|
|
|
|
# include <errno.h>
|
2014-10-19 13:59:44 +00:00
|
|
|
# include <tcp.h> /* watt-32 */
|
|
|
|
# define select select_s
|
|
|
|
# define USE_SELECT
|
2014-09-25 15:27:08 +00:00
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
#else
|
|
|
|
# include <unistd.h>
|
|
|
|
# include <fcntl.h>
|
|
|
|
# include <errno.h>
|
2012-11-10 16:01:35 +00:00
|
|
|
# if defined(HAVE_SYS_TIME_H)
|
|
|
|
# include <sys/time.h>
|
|
|
|
# endif
|
2014-09-25 15:27:08 +00:00
|
|
|
|
|
|
|
# if defined(QSE_MUX_USE_SELECT)
|
|
|
|
/* you can set QSE_MUX_USE_SELECT to force using select() */
|
|
|
|
# define USE_SELECT
|
|
|
|
# elif defined(HAVE_SYS_EVENT_H) && defined(HAVE_KQUEUE) && defined(HAVE_KEVENT)
|
2013-01-02 03:00:20 +00:00
|
|
|
# include <sys/event.h>
|
|
|
|
# define USE_KQUEUE
|
|
|
|
# elif defined(HAVE_SYS_EPOLL_H)
|
2012-11-09 17:31:33 +00:00
|
|
|
# include <sys/epoll.h>
|
|
|
|
# if defined(HAVE_EPOLL_CREATE)
|
|
|
|
# define USE_EPOLL
|
2021-12-06 13:50:00 +00:00
|
|
|
# if !defined(EPOLLRDHUP)
|
|
|
|
# define EPOLLRDHUP (0x2000)
|
|
|
|
# endif
|
2012-11-09 17:31:33 +00:00
|
|
|
# endif
|
2014-09-25 15:27:08 +00:00
|
|
|
/*
|
2012-11-09 17:31:33 +00:00
|
|
|
# elif defined(HAVE_POLL_H)
|
2014-09-25 15:27:08 +00:00
|
|
|
TODO: IMPLEMENT THIS
|
2012-11-09 17:31:33 +00:00
|
|
|
# define USE_POLL
|
2014-09-25 15:27:08 +00:00
|
|
|
*/
|
2012-11-09 17:31:33 +00:00
|
|
|
# else
|
|
|
|
# define USE_SELECT
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
2020-09-08 16:26:05 +00:00
|
|
|
#define INVALID_CHAN (-1)
|
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
struct qse_mux_t
|
|
|
|
{
|
2020-09-08 16:26:05 +00:00
|
|
|
qse_mmgr_t* mmgr;
|
2012-11-09 17:31:33 +00:00
|
|
|
qse_mux_errnum_t errnum;
|
2018-06-28 14:07:35 +00:00
|
|
|
qse_mux_evtcb_t evtcb;
|
2020-09-08 16:26:05 +00:00
|
|
|
int chan[2]; /* pipe channels for simple interaction */
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
#if defined(USE_SELECT)
|
2012-11-09 17:31:33 +00:00
|
|
|
fd_set rset;
|
|
|
|
fd_set wset;
|
|
|
|
fd_set tmprset;
|
|
|
|
fd_set tmpwset;
|
|
|
|
int size;
|
|
|
|
int maxhnd;
|
|
|
|
struct
|
|
|
|
{
|
|
|
|
qse_mux_evt_t** ptr;
|
|
|
|
int ubound;
|
|
|
|
} me;
|
|
|
|
|
2013-01-02 03:00:20 +00:00
|
|
|
#elif defined(USE_KQUEUE)
|
|
|
|
|
|
|
|
int kq;
|
|
|
|
|
2014-09-27 14:40:05 +00:00
|
|
|
/* kevent() places the events into the event list up to the limit specified.
|
|
|
|
* this implementation passes the 'evlist' array to kevent() upon polling.
|
|
|
|
* what is the best array size?
|
|
|
|
* TODO: find the optimal size or make it auto-scalable. */
|
2013-01-02 03:00:20 +00:00
|
|
|
struct kevent evlist[512];
|
|
|
|
int size;
|
|
|
|
struct
|
|
|
|
{
|
|
|
|
qse_mux_evt_t** ptr;
|
|
|
|
int ubound;
|
|
|
|
} me;
|
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
#elif defined(USE_EPOLL)
|
|
|
|
int fd;
|
|
|
|
struct
|
|
|
|
{
|
|
|
|
struct epoll_event* ptr;
|
|
|
|
qse_size_t len;
|
|
|
|
qse_size_t capa;
|
2012-12-09 16:14:05 +00:00
|
|
|
} ee;
|
2012-11-09 17:31:33 +00:00
|
|
|
struct
|
|
|
|
{
|
|
|
|
qse_mux_evt_t** ptr;
|
|
|
|
int ubound;
|
|
|
|
} me;
|
2012-12-18 08:12:15 +00:00
|
|
|
#elif defined(__OS2__)
|
|
|
|
int* fdarr;
|
|
|
|
int size;
|
|
|
|
struct
|
|
|
|
{
|
|
|
|
qse_mux_evt_t** ptr;
|
|
|
|
int ubound;
|
|
|
|
} me;
|
2012-11-09 17:31:33 +00:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2013-01-02 03:00:20 +00:00
|
|
|
int qse_mux_init (
|
|
|
|
qse_mux_t* mux,
|
|
|
|
qse_mmgr_t* mmgr,
|
2018-06-28 14:07:35 +00:00
|
|
|
qse_mux_evtcb_t evtcb,
|
2013-01-02 03:00:20 +00:00
|
|
|
qse_size_t capahint
|
|
|
|
);
|
2012-11-09 17:31:33 +00:00
|
|
|
void qse_mux_fini (qse_mux_t* mux);
|
|
|
|
|
|
|
|
#if defined(_WIN32)
|
2012-12-28 08:39:41 +00:00
|
|
|
static qse_mux_errnum_t skerr_to_errnum (DWORD e)
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
|
|
|
switch (e)
|
|
|
|
{
|
2012-12-28 08:39:41 +00:00
|
|
|
case WSA_NOT_ENOUGH_MEMORY:
|
2012-11-09 17:31:33 +00:00
|
|
|
return QSE_MUX_ENOMEM;
|
|
|
|
|
2012-12-28 08:39:41 +00:00
|
|
|
case WSA_INVALID_PARAMETER:
|
|
|
|
case WSA_INVALID_HANDLE:
|
2012-11-09 17:31:33 +00:00
|
|
|
return QSE_MUX_EINVAL;
|
|
|
|
|
2012-12-28 08:39:41 +00:00
|
|
|
case WSAEACCES:
|
2012-11-09 17:31:33 +00:00
|
|
|
return QSE_MUX_EACCES;
|
|
|
|
|
2012-12-28 08:39:41 +00:00
|
|
|
case WSAEINTR:
|
|
|
|
return QSE_MUX_EINTR;
|
2012-12-27 14:40:58 +00:00
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
default:
|
|
|
|
return QSE_MUX_ESYSERR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#elif defined(__OS2__)
|
2012-12-28 08:39:41 +00:00
|
|
|
static qse_mux_errnum_t skerr_to_errnum (int e)
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
|
|
|
switch (e)
|
|
|
|
{
|
2012-12-18 08:12:15 +00:00
|
|
|
#if defined(SOCENOMEM)
|
2012-12-06 13:02:46 +00:00
|
|
|
case SOCENOMEM:
|
2012-11-09 17:31:33 +00:00
|
|
|
return QSE_MUX_ENOMEM;
|
2012-12-18 08:12:15 +00:00
|
|
|
#endif
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2012-12-06 13:02:46 +00:00
|
|
|
case SOCEINVAL:
|
2012-11-09 17:31:33 +00:00
|
|
|
return QSE_MUX_EINVAL;
|
|
|
|
|
2012-12-06 13:02:46 +00:00
|
|
|
case SOCEACCES:
|
2012-11-09 17:31:33 +00:00
|
|
|
return QSE_MUX_EACCES;
|
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
#if defined(SOCENOENT)
|
2012-12-06 13:02:46 +00:00
|
|
|
case SOCENOENT:
|
2012-11-09 17:31:33 +00:00
|
|
|
return QSE_MUX_ENOENT;
|
2012-12-18 08:12:15 +00:00
|
|
|
#endif
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2012-12-28 08:39:41 +00:00
|
|
|
#if defined(SOCEXIST)
|
2012-12-06 13:02:46 +00:00
|
|
|
case SOCEEXIST:
|
2012-11-09 17:31:33 +00:00
|
|
|
return QSE_MUX_EEXIST;
|
2012-12-18 08:12:15 +00:00
|
|
|
#endif
|
2012-12-06 13:02:46 +00:00
|
|
|
|
|
|
|
case SOCEINTR:
|
|
|
|
return QSE_MUX_EINTR;
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2012-12-28 08:39:41 +00:00
|
|
|
case SOCEPIPE:
|
|
|
|
return QSE_MUX_EPIPE;
|
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
default:
|
|
|
|
return QSE_MUX_ESYSERR;
|
|
|
|
}
|
|
|
|
}
|
2012-12-28 08:39:41 +00:00
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
#elif defined(__DOS__)
|
2012-12-28 08:39:41 +00:00
|
|
|
static qse_mux_errnum_t skerr_to_errnum (int e)
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
2012-12-28 08:39:41 +00:00
|
|
|
/* TODO: */
|
|
|
|
return QSE_MUX_ESYSERR;
|
2012-11-09 17:31:33 +00:00
|
|
|
}
|
|
|
|
#else
|
2012-12-28 08:39:41 +00:00
|
|
|
static qse_mux_errnum_t skerr_to_errnum (int e)
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
|
|
|
switch (e)
|
|
|
|
{
|
|
|
|
case ENOMEM:
|
|
|
|
return QSE_MUX_ENOMEM;
|
|
|
|
|
|
|
|
case EINVAL:
|
|
|
|
return QSE_MUX_EINVAL;
|
|
|
|
|
|
|
|
case EACCES:
|
|
|
|
return QSE_MUX_EACCES;
|
|
|
|
|
2012-12-28 08:39:41 +00:00
|
|
|
case ENOENT:
|
|
|
|
return QSE_MUX_ENOENT;
|
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
case EEXIST:
|
|
|
|
return QSE_MUX_EEXIST;
|
2020-09-08 16:26:05 +00:00
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
case EINTR:
|
|
|
|
return QSE_MUX_EINTR;
|
|
|
|
|
2012-12-27 14:40:58 +00:00
|
|
|
case EPIPE:
|
|
|
|
return QSE_MUX_EPIPE;
|
|
|
|
|
2014-10-23 16:09:05 +00:00
|
|
|
#if defined(EAGAIN) || defined(EWOULDBLOCK)
|
|
|
|
#if defined(EAGAIN) && defined(EWOULDBLOCK)
|
2012-12-27 14:40:58 +00:00
|
|
|
case EAGAIN:
|
2014-10-23 16:09:05 +00:00
|
|
|
#if (EWOULDBLOCK != EAGAIN)
|
|
|
|
case EWOULDBLOCK:
|
|
|
|
#endif
|
|
|
|
#elif defined(EAGAIN)
|
|
|
|
case EAGAIN:
|
|
|
|
#else
|
|
|
|
case EWOULDBLOCK;
|
|
|
|
#endif
|
2012-12-27 14:40:58 +00:00
|
|
|
return QSE_MUX_EAGAIN;
|
2014-10-23 16:09:05 +00:00
|
|
|
#endif
|
2012-12-27 14:40:58 +00:00
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
default:
|
|
|
|
return QSE_MUX_ESYSERR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-02-18 14:16:51 +00:00
|
|
|
qse_mux_t* qse_mux_open (qse_mmgr_t* mmgr, qse_size_t xtnsize, qse_mux_evtcb_t evtcb, qse_size_t capahint, qse_mux_errnum_t* errnum)
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
|
|
|
qse_mux_t* mux;
|
|
|
|
|
2018-06-28 14:07:35 +00:00
|
|
|
mux = QSE_MMGR_ALLOC(mmgr, QSE_SIZEOF(*mux) + xtnsize);
|
2012-11-09 17:31:33 +00:00
|
|
|
if (mux)
|
|
|
|
{
|
2018-06-28 14:07:35 +00:00
|
|
|
if (qse_mux_init(mux, mmgr, evtcb, capahint) <= -1)
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
2013-01-02 03:00:20 +00:00
|
|
|
if (errnum) *errnum = qse_mux_geterrnum (mux);
|
2012-11-09 17:31:33 +00:00
|
|
|
QSE_MMGR_FREE (mmgr, mux);
|
|
|
|
mux = QSE_NULL;
|
|
|
|
}
|
2014-07-11 14:17:00 +00:00
|
|
|
else QSE_MEMSET (QSE_XTN(mux), 0, xtnsize);
|
2012-11-09 17:31:33 +00:00
|
|
|
}
|
2014-07-11 14:17:00 +00:00
|
|
|
else if (errnum) *errnum = QSE_MUX_ENOMEM;
|
2012-11-09 17:31:33 +00:00
|
|
|
|
|
|
|
return mux;
|
|
|
|
}
|
|
|
|
|
|
|
|
void qse_mux_close (qse_mux_t* mux)
|
|
|
|
{
|
|
|
|
qse_mux_fini (mux);
|
|
|
|
QSE_MMGR_FREE (mux->mmgr, mux);
|
|
|
|
}
|
|
|
|
|
2022-02-18 14:16:51 +00:00
|
|
|
int qse_mux_init (qse_mux_t* mux, qse_mmgr_t* mmgr, qse_mux_evtcb_t evtcb, qse_size_t capahint)
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
|
|
|
QSE_MEMSET (mux, 0, QSE_SIZEOF(*mux));
|
|
|
|
mux->mmgr = mmgr;
|
2018-06-28 14:07:35 +00:00
|
|
|
mux->evtcb = evtcb;
|
2020-09-08 16:26:05 +00:00
|
|
|
mux->chan[0] = INVALID_CHAN;
|
|
|
|
mux->chan[1] = INVALID_CHAN;
|
2012-11-09 17:31:33 +00:00
|
|
|
|
|
|
|
/* epoll_create returns an error and set errno to EINVAL
|
|
|
|
* if size is 0. Having a positive size greater than 0
|
|
|
|
* also makes easier other parts like maintaining internal
|
|
|
|
* event buffers */
|
|
|
|
if (capahint <= 0) capahint = 1;
|
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
#if defined(USE_SELECT)
|
2012-11-09 17:31:33 +00:00
|
|
|
FD_ZERO (&mux->rset);
|
|
|
|
FD_ZERO (&mux->wset);
|
|
|
|
mux->maxhnd = -1;
|
|
|
|
|
2013-01-02 03:00:20 +00:00
|
|
|
#elif defined(USE_KQUEUE)
|
|
|
|
|
|
|
|
#if defined(HAVE_KQUEUE1) && defined(O_CLOEXEC)
|
2018-10-22 03:46:19 +00:00
|
|
|
mux->kq = kqueue1(O_CLOEXEC);
|
2013-01-02 03:00:20 +00:00
|
|
|
#else
|
2018-10-22 03:46:19 +00:00
|
|
|
mux->kq = kqueue();
|
2013-01-02 03:00:20 +00:00
|
|
|
#endif
|
|
|
|
if (mux->kq <= -1)
|
2014-09-27 14:40:05 +00:00
|
|
|
{
|
2020-09-08 16:26:05 +00:00
|
|
|
mux->errnum = skerr_to_errnum(errno);
|
2013-01-02 03:00:20 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(HAVE_KQUEUE1) && defined(O_CLOEXEC)
|
|
|
|
/* nothing to do */
|
|
|
|
#elif defined(FD_CLOEXEC)
|
|
|
|
{
|
2018-10-22 03:46:19 +00:00
|
|
|
int flag = fcntl(mux->kq, F_GETFD);
|
2013-01-02 03:00:20 +00:00
|
|
|
if (flag >= 0) fcntl (mux->kq, F_SETFD, flag | FD_CLOEXEC);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
#elif defined(USE_EPOLL)
|
2022-02-18 14:16:51 +00:00
|
|
|
#if defined(HAVE_EPOLL_CREATE1) && defined(EPOLL_CLOEXEC)
|
|
|
|
mux->fd = epoll_create1(EPOLL_CLOEXEC);
|
2012-11-09 17:31:33 +00:00
|
|
|
#else
|
2018-10-22 03:46:19 +00:00
|
|
|
mux->fd = epoll_create(capahint);
|
2012-11-09 17:31:33 +00:00
|
|
|
#endif
|
|
|
|
if (mux->fd <= -1)
|
|
|
|
{
|
2020-09-08 16:26:05 +00:00
|
|
|
mux->errnum = skerr_to_errnum(errno);
|
2012-11-09 17:31:33 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-02-18 14:16:51 +00:00
|
|
|
#if defined(HAVE_EPOLL_CREATE1) && defined(EPOLL_CLOEXEC)
|
2012-11-09 17:31:33 +00:00
|
|
|
/* nothing to do */
|
|
|
|
#elif defined(FD_CLOEXEC)
|
|
|
|
{
|
2018-10-22 03:46:19 +00:00
|
|
|
int flag = fcntl(mux->fd, F_GETFD);
|
2012-11-09 17:31:33 +00:00
|
|
|
if (flag >= 0) fcntl (mux->fd, F_SETFD, flag | FD_CLOEXEC);
|
|
|
|
}
|
|
|
|
#endif
|
2012-12-06 13:02:46 +00:00
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
#elif defined(__OS2__)
|
2013-01-02 03:00:20 +00:00
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
/* nothing special to do */
|
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
#else
|
|
|
|
/* TODO: */
|
|
|
|
mux->errnum = QSE_MUX_ENOIMPL;
|
|
|
|
return -1;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void qse_mux_fini (qse_mux_t* mux)
|
|
|
|
{
|
2020-09-08 16:26:05 +00:00
|
|
|
if (mux->chan[0] != INVALID_CHAN)
|
|
|
|
{
|
|
|
|
qse_mux_evt_t evt;
|
|
|
|
QSE_MEMSET (&evt, 0, QSE_SIZEOF(evt));
|
|
|
|
evt.hnd = mux->chan[0];
|
|
|
|
evt.mask = QSE_MUX_IN;
|
|
|
|
/* evt.data = ... */
|
|
|
|
qse_mux_delete (mux, &evt);
|
|
|
|
close (mux->chan[0]);
|
|
|
|
mux->chan[0] = INVALID_CHAN;
|
|
|
|
}
|
|
|
|
if (mux->chan[1] != INVALID_CHAN)
|
|
|
|
{
|
|
|
|
close (mux->chan[1]);
|
|
|
|
mux->chan[1] = INVALID_CHAN;
|
|
|
|
}
|
2012-12-18 08:12:15 +00:00
|
|
|
|
|
|
|
#if defined(USE_SELECT)
|
|
|
|
FD_ZERO (&mux->rset);
|
|
|
|
FD_ZERO (&mux->wset);
|
|
|
|
|
2012-12-09 16:14:05 +00:00
|
|
|
if (mux->me.ptr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < mux->me.ubound; i++)
|
|
|
|
{
|
|
|
|
if (mux->me.ptr[i])
|
|
|
|
QSE_MMGR_FREE (mux->mmgr, mux->me.ptr[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
QSE_MMGR_FREE (mux->mmgr, mux->me.ptr);
|
|
|
|
mux->me.ubound = 0;
|
2019-08-22 15:51:33 +00:00
|
|
|
mux->me.ptr = QSE_NULL;
|
2012-12-18 08:12:15 +00:00
|
|
|
mux->maxhnd = -1;
|
2012-12-09 16:14:05 +00:00
|
|
|
}
|
|
|
|
|
2013-01-02 03:00:20 +00:00
|
|
|
#elif defined(USE_KQUEUE)
|
|
|
|
close (mux->kq);
|
|
|
|
|
|
|
|
if (mux->me.ptr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < mux->me.ubound; i++)
|
|
|
|
{
|
|
|
|
if (mux->me.ptr[i])
|
|
|
|
QSE_MMGR_FREE (mux->mmgr, mux->me.ptr[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
QSE_MMGR_FREE (mux->mmgr, mux->me.ptr);
|
|
|
|
mux->me.ubound = 0;
|
2019-08-22 15:51:33 +00:00
|
|
|
mux->me.ptr = QSE_NULL;
|
2013-01-02 03:00:20 +00:00
|
|
|
}
|
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
#elif defined(USE_EPOLL)
|
|
|
|
close (mux->fd);
|
2012-12-09 16:14:05 +00:00
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
if (mux->ee.ptr)
|
|
|
|
{
|
|
|
|
QSE_MMGR_FREE (mux->mmgr, mux->ee.ptr);
|
|
|
|
mux->ee.len = 0;
|
|
|
|
mux->ee.capa = 0;
|
2019-08-22 15:51:33 +00:00
|
|
|
mux->ee.ptr = QSE_NULL;
|
2012-12-18 08:12:15 +00:00
|
|
|
}
|
2012-11-09 17:31:33 +00:00
|
|
|
|
|
|
|
if (mux->me.ptr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < mux->me.ubound; i++)
|
|
|
|
{
|
2019-08-22 15:51:33 +00:00
|
|
|
if (mux->me.ptr[i]) QSE_MMGR_FREE (mux->mmgr, mux->me.ptr[i]);
|
2012-11-09 17:31:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
QSE_MMGR_FREE (mux->mmgr, mux->me.ptr);
|
|
|
|
mux->me.ubound = 0;
|
2019-08-22 15:51:33 +00:00
|
|
|
mux->me.ptr = QSE_NULL;
|
2012-11-09 17:31:33 +00:00
|
|
|
}
|
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
#elif defined(__OS2__)
|
2012-11-09 17:31:33 +00:00
|
|
|
if (mux->me.ptr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < mux->me.ubound; i++)
|
|
|
|
{
|
|
|
|
if (mux->me.ptr[i])
|
|
|
|
QSE_MMGR_FREE (mux->mmgr, mux->me.ptr[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
QSE_MMGR_FREE (mux->mmgr, mux->me.ptr);
|
|
|
|
mux->me.ubound = 0;
|
2019-08-22 15:51:33 +00:00
|
|
|
mux->me.ptr = QSE_NULL;
|
2012-11-09 17:31:33 +00:00
|
|
|
}
|
2012-12-18 08:12:15 +00:00
|
|
|
|
2019-08-22 15:51:33 +00:00
|
|
|
if (mux->fdarr)
|
|
|
|
{
|
|
|
|
QSE_MMGR_FREE (mux->mmgr, mux->fdarr);
|
|
|
|
mux->fdarr = QSE_NULL;
|
|
|
|
}
|
2012-11-09 17:31:33 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
qse_mmgr_t* qse_mux_getmmgr (qse_mux_t* mux)
|
|
|
|
{
|
|
|
|
return mux->mmgr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* qse_mux_getxtn (qse_mux_t* mux)
|
|
|
|
{
|
|
|
|
return QSE_XTN (mux);
|
|
|
|
}
|
|
|
|
|
2012-12-10 03:10:44 +00:00
|
|
|
qse_mux_errnum_t qse_mux_geterrnum (qse_mux_t* mux)
|
|
|
|
{
|
|
|
|
return mux->errnum;
|
|
|
|
}
|
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
int qse_mux_insert (qse_mux_t* mux, const qse_mux_evt_t* evt)
|
|
|
|
{
|
2014-07-25 17:28:20 +00:00
|
|
|
#if defined(USE_SELECT)
|
|
|
|
/* nothing */
|
|
|
|
#elif defined(USE_KQUEUE)
|
|
|
|
struct kevent chlist[2];
|
|
|
|
int count = 0;
|
|
|
|
#elif defined(USE_EPOLL)
|
|
|
|
struct epoll_event ev;
|
|
|
|
#elif defined(__OS2__)
|
|
|
|
/* nothing */
|
|
|
|
#else
|
|
|
|
/* nothing */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* sanity check */
|
2019-08-23 03:49:05 +00:00
|
|
|
if (!(evt->mask & (QSE_MUX_IN | QSE_MUX_OUT)) || evt->hnd < 0)
|
2014-07-25 17:28:20 +00:00
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
#if defined(USE_SELECT)
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2012-12-09 16:14:05 +00:00
|
|
|
/* TODO: windows seems to return a pretty high file descriptors
|
|
|
|
* using an array to store information may not be so effcient.
|
|
|
|
* devise a different way to maintain information */
|
2012-11-09 17:31:33 +00:00
|
|
|
if (evt->hnd >= mux->me.ubound)
|
|
|
|
{
|
|
|
|
qse_mux_evt_t** tmp;
|
|
|
|
int ubound;
|
|
|
|
|
|
|
|
ubound = evt->hnd + 1;
|
2019-08-22 15:51:33 +00:00
|
|
|
ubound = QSE_ALIGNTO_POW2(ubound, 128);
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2019-08-22 15:51:33 +00:00
|
|
|
tmp = QSE_MMGR_REALLOC(mux->mmgr, mux->me.ptr, QSE_SIZEOF(*mux->me.ptr) * ubound);
|
2012-11-09 17:31:33 +00:00
|
|
|
if (tmp == QSE_NULL)
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_ENOMEM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
QSE_MEMSET (&tmp[mux->me.ubound], 0, QSE_SIZEOF(*mux->me.ptr) * (ubound - mux->me.ubound));
|
|
|
|
mux->me.ptr = tmp;
|
|
|
|
mux->me.ubound = ubound;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mux->me.ptr[evt->hnd])
|
|
|
|
{
|
2019-08-22 15:51:33 +00:00
|
|
|
mux->me.ptr[evt->hnd] = QSE_MMGR_ALLOC(mux->mmgr, QSE_SIZEOF(*evt));
|
2012-11-09 17:31:33 +00:00
|
|
|
if (!mux->me.ptr[evt->hnd])
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_ENOMEM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (evt->mask & QSE_MUX_IN) FD_SET (evt->hnd, &mux->rset);
|
|
|
|
if (evt->mask & QSE_MUX_OUT) FD_SET (evt->hnd, &mux->wset);
|
|
|
|
|
|
|
|
*mux->me.ptr[evt->hnd] = *evt;
|
|
|
|
if (evt->hnd > mux->maxhnd) mux->maxhnd = evt->hnd;
|
|
|
|
mux->size++;
|
|
|
|
return 0;
|
|
|
|
|
2013-01-02 03:00:20 +00:00
|
|
|
#elif defined(USE_KQUEUE)
|
|
|
|
/* TODO: study if it is better to put 'evt' to the udata
|
|
|
|
* field of chlist? */
|
|
|
|
|
|
|
|
if (evt->mask & QSE_MUX_IN)
|
|
|
|
{
|
2019-08-22 15:51:33 +00:00
|
|
|
EV_SET (&chlist[count], evt->hnd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0);
|
2013-01-02 03:00:20 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (evt->mask & QSE_MUX_OUT)
|
|
|
|
{
|
2019-08-22 15:51:33 +00:00
|
|
|
EV_SET (&chlist[count], evt->hnd, EVFILT_WRITE, EV_ADD | EV_ENABLE, 0, 0, 0);
|
2013-01-02 03:00:20 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
2014-07-25 17:28:20 +00:00
|
|
|
QSE_ASSERT (count > 0);
|
2013-01-02 03:00:20 +00:00
|
|
|
|
|
|
|
if (evt->hnd >= mux->me.ubound)
|
|
|
|
{
|
|
|
|
qse_mux_evt_t** tmp;
|
|
|
|
int ubound;
|
|
|
|
|
|
|
|
ubound = evt->hnd + 1;
|
2019-08-22 15:51:33 +00:00
|
|
|
ubound = QSE_ALIGNTO_POW2(ubound, 128);
|
2013-01-02 03:00:20 +00:00
|
|
|
|
2019-08-22 15:51:33 +00:00
|
|
|
tmp = QSE_MMGR_REALLOC(mux->mmgr, mux->me.ptr, QSE_SIZEOF(*mux->me.ptr) * ubound);
|
2013-01-02 03:00:20 +00:00
|
|
|
if (tmp == QSE_NULL)
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_ENOMEM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
QSE_MEMSET (&tmp[mux->me.ubound], 0, QSE_SIZEOF(*mux->me.ptr) * (ubound - mux->me.ubound));
|
|
|
|
mux->me.ptr = tmp;
|
|
|
|
mux->me.ubound = ubound;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mux->me.ptr[evt->hnd])
|
|
|
|
{
|
2019-08-22 15:51:33 +00:00
|
|
|
mux->me.ptr[evt->hnd] = QSE_MMGR_ALLOC(mux->mmgr, QSE_SIZEOF(*evt));
|
2013-01-02 03:00:20 +00:00
|
|
|
if (!mux->me.ptr[evt->hnd])
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_ENOMEM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add the event */
|
|
|
|
if (kevent (mux->kq, chlist, count, QSE_NULL, 0, QSE_NULL) <= -1)
|
|
|
|
{
|
|
|
|
mux->errnum = skerr_to_errnum (errno);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*mux->me.ptr[evt->hnd] = *evt;
|
|
|
|
mux->size++;
|
|
|
|
return 0;
|
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
#elif defined(USE_EPOLL)
|
|
|
|
|
|
|
|
QSE_MEMSET (&ev, 0, QSE_SIZEOF(ev));
|
|
|
|
if (evt->mask & QSE_MUX_IN) ev.events |= EPOLLIN;
|
|
|
|
if (evt->mask & QSE_MUX_OUT) ev.events |= EPOLLOUT;
|
|
|
|
|
2014-07-25 17:28:20 +00:00
|
|
|
QSE_ASSERT (ev.events != 0);
|
2012-11-09 17:31:33 +00:00
|
|
|
|
|
|
|
if (evt->hnd >= mux->me.ubound)
|
|
|
|
{
|
|
|
|
qse_mux_evt_t** tmp;
|
|
|
|
int ubound;
|
|
|
|
|
|
|
|
ubound = evt->hnd + 1;
|
2019-08-22 15:51:33 +00:00
|
|
|
ubound = QSE_ALIGNTO_POW2(ubound, 128);
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2019-08-22 15:51:33 +00:00
|
|
|
tmp = QSE_MMGR_REALLOC(mux->mmgr, mux->me.ptr, QSE_SIZEOF(*mux->me.ptr) * ubound);
|
2012-11-09 17:31:33 +00:00
|
|
|
if (tmp == QSE_NULL)
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_ENOMEM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
QSE_MEMSET (&tmp[mux->me.ubound], 0, QSE_SIZEOF(*mux->me.ptr) * (ubound - mux->me.ubound));
|
|
|
|
mux->me.ptr = tmp;
|
|
|
|
mux->me.ubound = ubound;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mux->me.ptr[evt->hnd])
|
|
|
|
{
|
2020-09-02 20:34:11 +00:00
|
|
|
mux->me.ptr[evt->hnd] = QSE_MMGR_ALLOC(mux->mmgr, QSE_SIZEOF(*evt));
|
2012-11-09 17:31:33 +00:00
|
|
|
if (!mux->me.ptr[evt->hnd])
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_ENOMEM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-10 16:01:35 +00:00
|
|
|
/*ev.data.fd = evt->hnd;*/
|
|
|
|
ev.data.ptr = mux->me.ptr[evt->hnd];
|
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
if (mux->ee.len >= mux->ee.capa)
|
|
|
|
{
|
|
|
|
struct epoll_event* tmp;
|
|
|
|
qse_size_t newcapa;
|
|
|
|
|
|
|
|
newcapa = (mux->ee.capa + 1) * 2;
|
2019-08-22 15:51:33 +00:00
|
|
|
newcapa = QSE_ALIGNTO_POW2(newcapa, 256);
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2019-08-22 15:51:33 +00:00
|
|
|
tmp = QSE_MMGR_REALLOC(mux->mmgr, mux->ee.ptr, QSE_SIZEOF(*mux->ee.ptr) * newcapa);
|
2012-11-09 17:31:33 +00:00
|
|
|
if (tmp == QSE_NULL)
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_ENOMEM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
mux->ee.ptr = tmp;
|
|
|
|
mux->ee.capa = newcapa;
|
|
|
|
}
|
|
|
|
|
2019-08-22 15:51:33 +00:00
|
|
|
if (epoll_ctl(mux->fd, EPOLL_CTL_ADD, evt->hnd, &ev) == -1)
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
2012-12-28 08:39:41 +00:00
|
|
|
mux->errnum = skerr_to_errnum (errno);
|
2012-11-09 17:31:33 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*mux->me.ptr[evt->hnd] = *evt;
|
|
|
|
mux->ee.len++;
|
|
|
|
return 0;
|
2012-12-09 16:14:05 +00:00
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
#elif defined(__OS2__)
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
if (evt->hnd >= mux->me.ubound)
|
|
|
|
{
|
|
|
|
qse_mux_evt_t** tmp;
|
|
|
|
int* fatmp;
|
|
|
|
int ubound;
|
2012-12-09 16:14:05 +00:00
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
ubound = evt->hnd + 1;
|
2019-08-22 15:51:33 +00:00
|
|
|
ubound = QSE_ALIGNTO_POW2(ubound, 128);
|
2012-12-09 16:14:05 +00:00
|
|
|
|
2019-08-22 15:51:33 +00:00
|
|
|
tmp = QSE_MMGR_REALLOC(mux->mmgr, mux->me.ptr, QSE_SIZEOF(*mux->me.ptr) * ubound);
|
2012-12-18 08:12:15 +00:00
|
|
|
if (tmp == QSE_NULL)
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_ENOMEM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* maintain this array double the size of the highest handle + 1 */
|
2019-08-22 15:51:33 +00:00
|
|
|
fatmp = QSE_MMGR_REALLOC(mux->mmgr, mux->fdarr, QSE_SIZEOF(*mux->fdarr) * (ubound * 2));
|
2012-12-18 08:12:15 +00:00
|
|
|
if (fatmp == QSE_NULL)
|
|
|
|
{
|
|
|
|
QSE_MMGR_FREE (mux->mmgr, tmp);
|
|
|
|
mux->errnum = QSE_MUX_ENOMEM;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
QSE_MEMSET (&tmp[mux->me.ubound], 0, QSE_SIZEOF(*mux->me.ptr) * (ubound - mux->me.ubound));
|
|
|
|
mux->me.ptr = tmp;
|
|
|
|
mux->me.ubound = ubound;
|
|
|
|
mux->fdarr = fatmp;
|
2012-12-09 16:14:05 +00:00
|
|
|
}
|
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
if (!mux->me.ptr[evt->hnd])
|
2012-12-09 16:14:05 +00:00
|
|
|
{
|
2019-08-22 15:51:33 +00:00
|
|
|
mux->me.ptr[evt->hnd] = QSE_MMGR_ALLOC(mux->mmgr, QSE_SIZEOF(*evt));
|
2012-12-18 08:12:15 +00:00
|
|
|
if (!mux->me.ptr[evt->hnd])
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_ENOMEM;
|
|
|
|
return -1;
|
|
|
|
}
|
2012-12-09 16:14:05 +00:00
|
|
|
}
|
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
*mux->me.ptr[evt->hnd] = *evt;
|
|
|
|
mux->size++;
|
|
|
|
return 0;
|
2012-12-09 16:14:05 +00:00
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
/* TODO: */
|
|
|
|
mux->errnum = QSE_MUX_ENOIMPL;
|
|
|
|
return -1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
int qse_mux_delete (qse_mux_t* mux, const qse_mux_evt_t* evt)
|
|
|
|
{
|
|
|
|
#if defined(USE_SELECT)
|
2012-11-09 17:31:33 +00:00
|
|
|
qse_mux_evt_t* mevt;
|
|
|
|
|
|
|
|
if (mux->size <= 0 || evt->hnd < 0 || evt->hnd >= mux->me.ubound)
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
mevt = mux->me.ptr[evt->hnd];
|
2014-09-27 14:40:05 +00:00
|
|
|
if (!mevt || mevt->hnd != evt->hnd)
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
|
|
|
/* already deleted??? */
|
|
|
|
mux->errnum = QSE_MUX_EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mevt->mask & QSE_MUX_IN) FD_CLR (evt->hnd, &mux->rset);
|
|
|
|
if (mevt->mask & QSE_MUX_OUT) FD_CLR (evt->hnd, &mux->wset);
|
|
|
|
|
|
|
|
if (mevt->hnd == mux->maxhnd)
|
|
|
|
{
|
|
|
|
qse_mux_hnd_t i;
|
|
|
|
|
|
|
|
for (i = mevt->hnd; i > 0; )
|
|
|
|
{
|
|
|
|
i--;
|
|
|
|
if (mux->me.ptr[i] && mux->me.ptr[i]->hnd >= 0)
|
|
|
|
{
|
|
|
|
QSE_ASSERT (i == mux->me.ptr[i]->hnd);
|
|
|
|
mux->maxhnd = mux->me.ptr[i]->hnd;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mux->maxhnd = -1;
|
|
|
|
QSE_ASSERT (mux->size == 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
mevt->hnd = -1;
|
2012-12-10 03:10:44 +00:00
|
|
|
mevt->mask = 0;
|
2012-11-09 17:31:33 +00:00
|
|
|
mux->size--;
|
|
|
|
return 0;
|
|
|
|
|
2013-01-02 03:00:20 +00:00
|
|
|
#elif defined(USE_KQUEUE)
|
|
|
|
|
|
|
|
qse_mux_evt_t* mevt;
|
|
|
|
struct kevent chlist[2];
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
if (mux->size <= 0 || evt->hnd < 0 || evt->hnd >= mux->me.ubound)
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
mevt = mux->me.ptr[evt->hnd];
|
2014-09-27 14:40:05 +00:00
|
|
|
if (!mevt || mevt->hnd != evt->hnd)
|
2013-01-02 03:00:20 +00:00
|
|
|
{
|
|
|
|
/* already deleted??? */
|
|
|
|
mux->errnum = QSE_MUX_EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* compose the change list */
|
|
|
|
if (mevt->mask & QSE_MUX_IN)
|
|
|
|
{
|
|
|
|
EV_SET (&chlist[count], evt->hnd,
|
|
|
|
EVFILT_READ, EV_DELETE | EV_DISABLE, 0, 0, 0);
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
if (mevt->mask & QSE_MUX_OUT)
|
|
|
|
{
|
|
|
|
EV_SET (&chlist[count], evt->hnd,
|
|
|
|
EVFILT_WRITE, EV_DELETE | EV_DISABLE, 0, 0, 0);
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* delte the event by applying the change list */
|
|
|
|
if (kevent (mux->kq, chlist, count, QSE_NULL, 0, QSE_NULL) <= -1)
|
|
|
|
{
|
|
|
|
mux->errnum = skerr_to_errnum (errno);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
mux->size--;
|
|
|
|
return 0;
|
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
#elif defined(USE_EPOLL)
|
|
|
|
if (mux->ee.len <= 0)
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-09-02 20:34:11 +00:00
|
|
|
if (epoll_ctl(mux->fd, EPOLL_CTL_DEL, evt->hnd, QSE_NULL) <= -1)
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
2012-12-28 08:39:41 +00:00
|
|
|
mux->errnum = skerr_to_errnum(errno);
|
2012-11-09 17:31:33 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
mux->ee.len--;
|
|
|
|
return 0;
|
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
#elif defined(__OS2__)
|
2012-12-09 16:14:05 +00:00
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
qse_mux_evt_t* mevt;
|
2012-12-09 16:14:05 +00:00
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
if (mux->size <= 0 || evt->hnd < 0 || evt->hnd >= mux->me.ubound)
|
2012-12-09 16:14:05 +00:00
|
|
|
{
|
2012-12-18 08:12:15 +00:00
|
|
|
mux->errnum = QSE_MUX_EINVAL;
|
|
|
|
return -1;
|
2012-12-09 16:14:05 +00:00
|
|
|
}
|
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
mevt = mux->me.ptr[evt->hnd];
|
2014-09-27 14:40:05 +00:00
|
|
|
if (!mevt || mevt->hnd != evt->hnd)
|
2012-12-09 16:14:05 +00:00
|
|
|
{
|
2012-12-18 08:12:15 +00:00
|
|
|
/* already deleted??? */
|
|
|
|
mux->errnum = QSE_MUX_EINVAL;
|
2012-12-09 16:14:05 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
mevt->hnd = -1;
|
|
|
|
mevt->mask = 0;
|
|
|
|
mux->size--;
|
|
|
|
return 0;
|
2012-12-09 16:14:05 +00:00
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
#else
|
|
|
|
/* TODO */
|
|
|
|
mux->errnum = QSE_MUX_ENOIMPL;
|
|
|
|
return -1;
|
|
|
|
#endif
|
|
|
|
}
|
2012-12-09 16:14:05 +00:00
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
int qse_mux_poll (qse_mux_t* mux, const qse_ntime_t* tmout)
|
|
|
|
{
|
|
|
|
#if defined(USE_SELECT)
|
2012-11-09 17:31:33 +00:00
|
|
|
struct timeval tv;
|
|
|
|
int n;
|
|
|
|
|
2018-06-28 14:07:35 +00:00
|
|
|
if (tmout)
|
|
|
|
{
|
|
|
|
tv.tv_sec = tmout->sec;
|
|
|
|
tv.tv_usec = QSE_NSEC_TO_USEC (tmout->nsec);
|
|
|
|
}
|
2012-11-09 17:31:33 +00:00
|
|
|
|
|
|
|
mux->tmprset = mux->rset;
|
|
|
|
mux->tmpwset = mux->wset;
|
|
|
|
|
2018-06-28 14:07:35 +00:00
|
|
|
n = select(mux->maxhnd + 1, &mux->tmprset, &mux->tmpwset, QSE_NULL, (tmout? &tv: QSE_NULL));
|
2012-11-09 17:31:33 +00:00
|
|
|
if (n <= -1)
|
|
|
|
{
|
|
|
|
#if defined(_WIN32)
|
2012-12-28 08:39:41 +00:00
|
|
|
mux->errnum = skerr_to_errnum(WSAGetLastError());
|
2012-12-06 13:02:46 +00:00
|
|
|
#elif defined(__OS2__)
|
2012-12-28 08:39:41 +00:00
|
|
|
mux->errnum = skerr_to_errnum(sock_errno());
|
2012-11-09 17:31:33 +00:00
|
|
|
#else
|
2012-12-28 08:39:41 +00:00
|
|
|
mux->errnum = skerr_to_errnum(errno);
|
2012-11-09 17:31:33 +00:00
|
|
|
#endif
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n > 0)
|
|
|
|
{
|
|
|
|
qse_mux_hnd_t i;
|
|
|
|
qse_mux_evt_t* evt, xevt;
|
|
|
|
|
2012-12-04 16:44:59 +00:00
|
|
|
for (i = 0; i <= mux->maxhnd; i++)
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
|
|
|
evt = mux->me.ptr[i];
|
|
|
|
if (!evt || evt->hnd != i) continue;
|
|
|
|
|
2012-12-10 03:10:44 +00:00
|
|
|
xevt = *evt;
|
2012-11-09 17:31:33 +00:00
|
|
|
xevt.mask = 0;
|
2012-12-09 16:14:05 +00:00
|
|
|
if ((evt->mask & QSE_MUX_IN) &&
|
|
|
|
FD_ISSET(evt->hnd, &mux->tmprset)) xevt.mask |= QSE_MUX_IN;
|
|
|
|
if ((evt->mask & QSE_MUX_OUT) &&
|
|
|
|
FD_ISSET(evt->hnd, &mux->tmpwset)) xevt.mask |= QSE_MUX_OUT;
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2018-06-28 14:07:35 +00:00
|
|
|
if (xevt.mask > 0) mux->evtcb (mux, &xevt);
|
2012-11-09 17:31:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return n;
|
|
|
|
|
2013-01-02 03:00:20 +00:00
|
|
|
#elif defined(USE_KQUEUE)
|
|
|
|
int nevs;
|
|
|
|
struct timespec ts;
|
|
|
|
|
2018-06-28 14:07:35 +00:00
|
|
|
if (tmout)
|
|
|
|
{
|
|
|
|
ts.tv_sec = tmout->sec;
|
|
|
|
ts.tv_nsec = tmout->nsec;
|
|
|
|
}
|
2013-01-02 03:00:20 +00:00
|
|
|
|
|
|
|
/* wait for events */
|
2018-06-28 14:07:35 +00:00
|
|
|
nevs = kevent(mux->kq, QSE_NULL, 0, mux->evlist, QSE_COUNTOF(mux->evlist), (tmout? &ts: QSE_NULL));
|
2013-01-02 03:00:20 +00:00
|
|
|
if (nevs <= -1)
|
|
|
|
{
|
|
|
|
mux->errnum = skerr_to_errnum(errno);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nevs > 0)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
qse_mux_hnd_t fd;
|
|
|
|
qse_mux_evt_t* evt, xevt;
|
|
|
|
|
|
|
|
for (i = 0; i < nevs; i++)
|
|
|
|
{
|
|
|
|
if (mux->evlist[i].flags & EV_ERROR) continue;
|
|
|
|
|
|
|
|
fd = mux->evlist[i].ident;
|
|
|
|
evt = mux->me.ptr[fd];
|
|
|
|
if (!evt || evt->hnd != fd) continue;
|
|
|
|
|
|
|
|
xevt = *evt;
|
|
|
|
xevt.mask = 0;
|
|
|
|
|
|
|
|
if ((evt->mask & QSE_MUX_IN) &&
|
|
|
|
mux->evlist[i].filter == EVFILT_READ) xevt.mask |= QSE_MUX_IN;
|
|
|
|
if ((evt->mask & QSE_MUX_OUT) &&
|
|
|
|
mux->evlist[i].filter == EVFILT_WRITE) xevt.mask |= QSE_MUX_OUT;
|
|
|
|
|
2018-06-28 14:07:35 +00:00
|
|
|
if (xevt.mask > 0) mux->evtcb (mux, &xevt);
|
2013-01-02 03:00:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nevs;
|
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
#elif defined(USE_EPOLL)
|
2012-11-11 16:07:34 +00:00
|
|
|
int nfds, i;
|
2012-11-09 17:31:33 +00:00
|
|
|
qse_mux_evt_t* evt, xevt;
|
2019-08-22 15:51:33 +00:00
|
|
|
int epoll_tmout;
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2019-08-22 15:51:33 +00:00
|
|
|
epoll_tmout = tmout? QSE_SECNSEC_TO_MSEC(tmout->sec,tmout->nsec): -1;
|
|
|
|
|
|
|
|
if (mux->ee.len <= 0)
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
2019-08-22 15:51:33 +00:00
|
|
|
/* epoll_wait() requires the third parameter to be greater than 0.
|
|
|
|
* so let me use a dummy variable to prevent an EINVAL error by epoll_wait() */
|
|
|
|
|
|
|
|
struct epoll_event dummy;
|
|
|
|
nfds = epoll_wait(mux->fd, &dummy, 1, epoll_tmout);
|
|
|
|
if (nfds <= -1)
|
|
|
|
{
|
|
|
|
mux->errnum = skerr_to_errnum(errno);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* nfds is supposed to be 0 as no file descriptors are watched. */
|
|
|
|
nfds = 0; /* but reset this to 0 just in case. */
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
nfds = epoll_wait(mux->fd, mux->ee.ptr, mux->ee.len, epoll_tmout);
|
|
|
|
if (nfds <= -1)
|
|
|
|
{
|
|
|
|
mux->errnum = skerr_to_errnum(errno);
|
|
|
|
return -1;
|
|
|
|
}
|
2012-11-09 17:31:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nfds; i++)
|
|
|
|
{
|
2012-11-10 16:01:35 +00:00
|
|
|
/*int hnd = mux->ee.ptr[i].data.fd;
|
2012-11-09 17:31:33 +00:00
|
|
|
evt = mux->me.ptr[hnd];
|
2019-08-22 15:51:33 +00:00
|
|
|
QSE_ASSERT (evt->hnd == hnd);*/
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2012-11-10 16:01:35 +00:00
|
|
|
evt = mux->ee.ptr[i].data.ptr;
|
2020-09-08 16:26:05 +00:00
|
|
|
if (evt->hnd == mux->chan[0])
|
|
|
|
{
|
|
|
|
qse_uint8_t tmp[128];
|
|
|
|
while (read(evt->hnd, tmp, QSE_SIZEOF(tmp)) > 0) /* nothing */;
|
|
|
|
continue;
|
|
|
|
}
|
2012-11-09 17:31:33 +00:00
|
|
|
|
2012-12-10 03:10:44 +00:00
|
|
|
xevt = *evt;
|
2012-11-09 17:31:33 +00:00
|
|
|
xevt.mask = 0;
|
|
|
|
if (mux->ee.ptr[i].events & EPOLLIN) xevt.mask |= QSE_MUX_IN;
|
|
|
|
if (mux->ee.ptr[i].events & EPOLLOUT) xevt.mask |= QSE_MUX_OUT;
|
2020-08-23 17:58:55 +00:00
|
|
|
if (mux->ee.ptr[i].events & (EPOLLRDHUP | EPOLLHUP | EPOLLERR))
|
2012-11-09 17:31:33 +00:00
|
|
|
{
|
2020-08-23 17:58:55 +00:00
|
|
|
if (mux->ee.ptr[i].events & EPOLLRDHUP) xevt.mask |= QSE_MUX_RDHUP;
|
|
|
|
if (mux->ee.ptr[i].events & EPOLLHUP) xevt.mask |= QSE_MUX_HUP;
|
|
|
|
if (mux->ee.ptr[i].events & EPOLLERR) xevt.mask |= QSE_MUX_ERR;
|
|
|
|
|
2012-11-11 16:07:34 +00:00
|
|
|
if (evt->mask & QSE_MUX_IN) xevt.mask |= QSE_MUX_IN;
|
|
|
|
if (evt->mask & QSE_MUX_OUT) xevt.mask |= QSE_MUX_OUT;
|
2012-11-09 17:31:33 +00:00
|
|
|
}
|
|
|
|
|
2018-06-28 14:07:35 +00:00
|
|
|
if (xevt.mask > 0) mux->evtcb (mux, &xevt);
|
2012-11-09 17:31:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nfds;
|
|
|
|
|
2012-12-18 08:12:15 +00:00
|
|
|
#elif defined(__OS2__)
|
|
|
|
|
|
|
|
qse_mux_evt_t* evt;
|
|
|
|
long tv;
|
|
|
|
int n, i, count, rcount, wcount;
|
2018-06-28 14:07:35 +00:00
|
|
|
|
|
|
|
tv = tmout? (QSE_SEC_TO_MSEC(tmout->sec) + QSE_NSEC_TO_MSEC (tmout->nsec)): -1;
|
2012-12-18 08:12:15 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* be aware that reconstructing this array every time is pretty
|
|
|
|
* inefficient.
|
|
|
|
*/
|
|
|
|
count = 0;
|
|
|
|
for (i = 0; i < mux->me.ubound; i++)
|
|
|
|
{
|
|
|
|
evt = mux->me.ptr[i];
|
|
|
|
if (evt && (evt->mask & QSE_MUX_IN)) mux->fdarr[count++] = evt->hnd;
|
|
|
|
}
|
|
|
|
rcount = count;
|
|
|
|
for (i = 0; i < mux->me.ubound; i++)
|
|
|
|
{
|
|
|
|
evt = mux->me.ptr[i];
|
|
|
|
if (evt && (evt->mask & QSE_MUX_OUT)) mux->fdarr[count++] = evt->hnd;
|
|
|
|
}
|
|
|
|
wcount = count - rcount;
|
|
|
|
|
2020-09-08 16:26:05 +00:00
|
|
|
n = os2_select(mux->fdarr, rcount, wcount, 0, tv);
|
2012-12-18 08:12:15 +00:00
|
|
|
if (n <= -1)
|
|
|
|
{
|
2012-12-28 08:39:41 +00:00
|
|
|
mux->errnum = skerr_to_errnum(sock_errno());
|
2012-12-18 08:12:15 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n >= 1)
|
|
|
|
{
|
|
|
|
qse_mux_evt_t xevt;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++)
|
|
|
|
{
|
|
|
|
if (mux->fdarr[i] == -1) continue;
|
|
|
|
|
|
|
|
evt = mux->me.ptr[mux->fdarr[i]];
|
|
|
|
if (!evt || evt->hnd != mux->fdarr[i]) continue;
|
|
|
|
|
|
|
|
xevt = *evt;
|
|
|
|
|
|
|
|
/* due to the way i check 'fdarr' , it can't have
|
|
|
|
* both IN and OUT at the same time. they are
|
|
|
|
* triggered separately */
|
|
|
|
xevt.mask = (i < rcount)? QSE_MUX_IN: QSE_MUX_OUT;
|
2018-06-28 14:07:35 +00:00
|
|
|
mux->evtcb (mux, &xevt);
|
2012-12-18 08:12:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return n;
|
|
|
|
|
2012-11-09 17:31:33 +00:00
|
|
|
#else
|
|
|
|
/* TODO */
|
|
|
|
mux->errnum = QSE_MUX_ENOIMPL;
|
|
|
|
return -1;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-09-08 16:26:05 +00:00
|
|
|
int qse_mux_setupchan (qse_mux_t* mux)
|
|
|
|
{
|
|
|
|
qse_mux_evt_t evt;
|
|
|
|
|
|
|
|
if (mux->chan[0] != INVALID_CHAN)
|
|
|
|
{
|
|
|
|
mux->errnum = QSE_MUX_EPERM; /* no allowed to call again */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-02-17 01:51:32 +00:00
|
|
|
#if defined(HAVE_PIPE2) && defined(O_CLOEXEC)
|
2020-09-08 16:26:05 +00:00
|
|
|
if (pipe2(mux->chan, O_CLOEXEC | O_NONBLOCK) <= -1)
|
|
|
|
#else
|
|
|
|
if (pipe(mux->chan) <= -1)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
mux->errnum = skerr_to_errnum(errno);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-02-17 01:51:32 +00:00
|
|
|
#if defined(HAVE_PIPE2) && defined(O_CLOEXEC)
|
2020-09-08 16:26:05 +00:00
|
|
|
/* do nothing */
|
|
|
|
#else
|
|
|
|
int flags;
|
|
|
|
flags = fcntl(mux->chan[0], F_GETFD);
|
|
|
|
if (flags >= 0) fcntl(mux->chan[0], F_SETFD, flags | FD_CLOEXEC);
|
|
|
|
flags = fcntl(mux->chan[1], F_GETFD);
|
|
|
|
if (flags >= 0) fcntl(mux->chan[1], F_SETFD, flags | FD_CLOEXEC);
|
|
|
|
flags = fcntl(mux->chan[0], F_GETFL);
|
|
|
|
if (flags >= 0) fcntl(mux->chan[0], F_SETFL, flags | O_NONBLOCK);
|
|
|
|
flags = fcntl(mux->chan[1], F_GETFL);
|
|
|
|
if (flags >= 0) fcntl(mux->chan[1], F_SETFL, flags | O_NONBLOCK);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
QSE_MEMSET (&evt, 0, QSE_SIZEOF(evt));
|
|
|
|
evt.hnd = mux->chan[0];
|
|
|
|
evt.mask = QSE_MUX_IN;
|
|
|
|
/*evt.data = ... */
|
2020-09-08 16:29:50 +00:00
|
|
|
if (qse_mux_insert(mux, &evt) <= -1)
|
|
|
|
{
|
|
|
|
close (mux->chan[0]);
|
|
|
|
close (mux->chan[1]);
|
|
|
|
|
|
|
|
mux->chan[0] = INVALID_CHAN;
|
|
|
|
mux->chan[1] = INVALID_CHAN;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2020-09-08 16:26:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void qse_mux_interrupt (qse_mux_t* mux)
|
|
|
|
{
|
|
|
|
if (mux->chan[1] != INVALID_CHAN)
|
|
|
|
{
|
|
|
|
write(mux->chan[1], "Q", 1);
|
|
|
|
}
|
|
|
|
}
|