2016-01-26 16:07:52 +00:00
/*
* $ Id $
*
2020-02-20 15:35:16 +00:00
Copyright ( c ) 2016 - 2020 Chung , Hyung - Hwan . All rights reserved .
2016-01-26 16:07:52 +00:00
Redistribution and use in source and binary forms , with or without
modification , are permitted provided that the following conditions
are met :
1. Redistributions of source code must retain the above copyright
notice , this list of conditions and the following disclaimer .
2. Redistributions in binary form must reproduce the above copyright
notice , this list of conditions and the following disclaimer in the
documentation and / or other materials provided with the distribution .
THIS SOFTWARE IS PROVIDED BY THE AUTHOR " AS IS " AND ANY EXPRESS OR
IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE IMPLIED WAfRRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED .
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT , INDIRECT ,
INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT
NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*/
2018-12-12 13:15:54 +00:00
# include "mio-prv.h"
2020-05-01 14:00:27 +00:00
# include "mio-fmt.h"
2020-04-27 04:15:11 +00:00
# include <stdlib.h>
2016-04-24 17:30:43 +00:00
2019-01-31 09:16:44 +00:00
# define DEV_CAP_ALL_WATCHED (MIO_DEV_CAP_IN_WATCHED | MIO_DEV_CAP_OUT_WATCHED | MIO_DEV_CAP_PRI_WATCHED)
2016-02-04 15:06:20 +00:00
2018-12-12 13:15:54 +00:00
static int schedule_kill_zombie_job ( mio_dev_t * dev ) ;
static int kill_and_free_device ( mio_dev_t * dev , int force ) ;
2016-04-16 16:05:57 +00:00
2019-01-17 08:51:10 +00:00
static void on_read_timeout ( mio_t * mio , const mio_ntime_t * now , mio_tmrjob_t * job ) ;
static void on_write_timeout ( mio_t * mio , const mio_ntime_t * now , mio_tmrjob_t * job ) ;
2016-04-24 17:30:43 +00:00
/* ========================================================================= */
2020-04-27 04:15:11 +00:00
static void * mmgr_alloc ( mio_mmgr_t * mmgr , mio_oow_t size )
{
return malloc ( size ) ;
}
static void * mmgr_realloc ( mio_mmgr_t * mmgr , void * ptr , mio_oow_t size )
{
return realloc ( ptr , size ) ;
}
static void mmgr_free ( mio_mmgr_t * mmgr , void * ptr )
{
return free ( ptr ) ;
}
static mio_mmgr_t default_mmgr =
{
mmgr_alloc ,
mmgr_realloc ,
mmgr_free ,
MIO_NULL
} ;
/* ========================================================================= */
2019-01-27 02:09:22 +00:00
mio_t * mio_open ( mio_mmgr_t * mmgr , mio_oow_t xtnsize , mio_cmgr_t * cmgr , mio_oow_t tmrcapa , mio_errinf_t * errinfo )
2016-04-24 17:30:43 +00:00
{
2018-12-12 13:15:54 +00:00
mio_t * mio ;
2016-04-24 17:30:43 +00:00
2020-04-27 04:15:11 +00:00
if ( ! mmgr ) mmgr = & default_mmgr ;
2019-01-27 02:09:22 +00:00
if ( ! cmgr ) cmgr = mio_get_utf8_cmgr ( ) ;
2016-04-24 17:30:43 +00:00
2019-01-27 02:09:22 +00:00
mio = ( mio_t * ) MIO_MMGR_ALLOC ( mmgr , MIO_SIZEOF ( mio_t ) + xtnsize ) ;
2018-12-12 13:15:54 +00:00
if ( mio )
2016-01-30 05:24:23 +00:00
{
2019-01-27 02:09:22 +00:00
if ( mio_init ( mio , mmgr , cmgr , tmrcapa ) < = - 1 )
2016-01-30 05:24:23 +00:00
{
2019-01-27 02:09:22 +00:00
if ( errinfo ) mio_geterrinf ( mio , errinfo ) ;
2018-12-12 13:15:54 +00:00
MIO_MMGR_FREE ( mmgr , mio ) ;
mio = MIO_NULL ;
2016-01-30 05:24:23 +00:00
}
2018-12-12 13:15:54 +00:00
else MIO_MEMSET ( mio + 1 , 0 , xtnsize ) ;
2016-01-30 05:24:23 +00:00
}
2019-01-27 02:09:22 +00:00
else if ( errinfo )
2016-01-26 16:02:20 +00:00
{
2019-01-27 02:09:22 +00:00
errinfo - > num = MIO_ESYSMEM ;
mio_copy_oocstr ( errinfo - > msg , MIO_COUNTOF ( errinfo - > msg ) , mio_errnum_to_errstr ( MIO_ESYSMEM ) ) ;
2016-01-26 16:02:20 +00:00
}
2018-12-12 13:15:54 +00:00
return mio ;
2016-01-30 05:24:23 +00:00
}
2018-12-12 13:15:54 +00:00
void mio_close ( mio_t * mio )
2016-01-30 05:24:23 +00:00
{
2018-12-12 13:15:54 +00:00
mio_fini ( mio ) ;
2019-06-21 12:43:50 +00:00
MIO_MMGR_FREE ( mio - > _mmgr , mio ) ;
2016-01-30 05:24:23 +00:00
}
2019-01-27 02:09:22 +00:00
int mio_init ( mio_t * mio , mio_mmgr_t * mmgr , mio_cmgr_t * cmgr , mio_oow_t tmrcapa )
2016-01-30 05:24:23 +00:00
{
2019-01-29 16:57:16 +00:00
int sys_inited = 0 ;
2019-01-27 02:09:22 +00:00
2018-12-12 13:15:54 +00:00
MIO_MEMSET ( mio , 0 , MIO_SIZEOF ( * mio ) ) ;
2019-06-21 12:43:50 +00:00
mio - > _instsize = MIO_SIZEOF ( * mio ) ;
mio - > _mmgr = mmgr ;
mio - > _cmgr = cmgr ;
2019-01-27 02:09:22 +00:00
/* initialize data for logging support */
mio - > option . log_mask = MIO_LOG_ALL_LEVELS | MIO_LOG_ALL_TYPES ;
mio - > log . capa = MIO_ALIGN_POW2 ( 1 , MIO_LOG_CAPA_ALIGN ) ; /* TODO: is this a good initial size? */
/* alloate the log buffer in advance though it may get reallocated
* in put_oocs and put_ooch in fmtout . c . this is to let the logging
* routine still function despite some side - effects when
* reallocation fails */
/* +1 required for consistency with put_oocs and put_ooch in fmtout.c */
mio - > log . ptr = mio_allocmem ( mio , ( mio - > log . capa + 1 ) * MIO_SIZEOF ( * mio - > log . ptr ) ) ;
2020-04-30 14:48:39 +00:00
if ( MIO_UNLIKELY ( ! mio - > log . ptr ) ) goto oops ;
2019-01-27 02:09:22 +00:00
/* inititalize the system-side logging */
2020-04-30 14:48:39 +00:00
if ( MIO_UNLIKELY ( mio_sys_init ( mio ) < = - 1 ) ) goto oops ;
2019-01-29 16:57:16 +00:00
sys_inited = 1 ;
2016-01-26 16:02:20 +00:00
2016-01-30 05:24:23 +00:00
/* initialize the timer object */
if ( tmrcapa < = 0 ) tmrcapa = 1 ;
2019-01-27 02:09:22 +00:00
mio - > tmr . jobs = mio_allocmem ( mio , tmrcapa * MIO_SIZEOF ( mio_tmrjob_t ) ) ;
2020-04-30 14:48:39 +00:00
if ( MIO_UNLIKELY ( ! mio - > tmr . jobs ) ) goto oops ;
2019-01-27 02:09:22 +00:00
2018-12-12 13:15:54 +00:00
mio - > tmr . capa = tmrcapa ;
2016-01-30 05:24:23 +00:00
2020-04-30 14:48:39 +00:00
MIO_DEVL_INIT ( & mio - > actdev ) ;
MIO_DEVL_INIT ( & mio - > hltdev ) ;
MIO_DEVL_INIT ( & mio - > zmbdev ) ;
2019-01-14 08:33:14 +00:00
MIO_CWQ_INIT ( & mio - > cwq ) ;
2020-04-30 14:48:39 +00:00
MIO_SVCL_INIT ( & mio - > actsvc ) ;
2019-01-30 10:18:58 +00:00
mio_sys_gettime ( mio , & mio - > init_time ) ;
2016-01-30 05:24:23 +00:00
return 0 ;
2019-01-27 02:09:22 +00:00
oops :
if ( mio - > tmr . jobs ) mio_freemem ( mio , mio - > tmr . jobs ) ;
2019-01-29 16:57:16 +00:00
if ( sys_inited ) mio_sys_fini ( mio ) ;
2019-01-27 02:09:22 +00:00
if ( mio - > log . ptr ) mio_freemem ( mio , mio - > log . ptr ) ;
mio - > log . capa = 0 ;
return - 1 ;
2016-01-26 16:02:20 +00:00
}
2018-12-12 13:15:54 +00:00
void mio_fini ( mio_t * mio )
2016-01-26 16:02:20 +00:00
{
2018-12-12 13:15:54 +00:00
mio_dev_t * dev , * next_dev ;
2020-04-30 14:48:39 +00:00
mio_dev_t diehard ;
2019-01-16 04:37:53 +00:00
mio_oow_t i ;
2016-01-30 05:24:23 +00:00
2019-01-16 04:37:53 +00:00
/* clean up free cwq list */
for ( i = 0 ; i < MIO_COUNTOF ( mio - > cwqfl ) ; i + + )
2019-01-14 09:23:16 +00:00
{
2019-01-16 04:37:53 +00:00
mio_cwq_t * cwq ;
while ( ( cwq = mio - > cwqfl [ i ] ) )
{
2020-05-03 16:02:56 +00:00
mio - > cwqfl [ i ] = cwq - > q_next ;
2019-01-28 08:13:06 +00:00
mio_freemem ( mio , cwq ) ;
2019-01-16 04:37:53 +00:00
}
2019-01-14 09:23:16 +00:00
}
2020-05-07 04:32:32 +00:00
/* kill services before killing devices */
2020-04-30 14:48:39 +00:00
while ( ! MIO_SVCL_IS_EMPTY ( & mio - > actsvc ) )
2019-02-22 08:44:51 +00:00
{
2020-04-30 14:48:39 +00:00
mio_svc_t * svc ;
svc = MIO_SVCL_FIRST_SVC ( & mio - > actsvc ) ;
2020-05-05 15:12:08 +00:00
if ( svc - > svc_stop )
2019-02-22 08:44:51 +00:00
{
/* the stop callback must unregister itself */
2020-05-05 15:12:08 +00:00
svc - > svc_stop ( svc ) ;
2019-02-22 08:44:51 +00:00
}
else
{
/* unregistration only if no stop callback is designated */
2020-04-30 14:48:39 +00:00
MIO_SVCL_UNLINK_SVC ( svc ) ;
2019-02-22 08:44:51 +00:00
}
}
2016-04-16 16:05:57 +00:00
/* kill all registered devices */
2020-04-30 14:48:39 +00:00
while ( ! MIO_DEVL_IS_EMPTY ( & mio - > actdev ) )
2016-04-16 16:05:57 +00:00
{
2020-04-30 14:48:39 +00:00
mio_dev_kill ( MIO_DEVL_FIRST_DEV ( & mio - > actdev ) ) ;
2016-04-16 16:05:57 +00:00
}
2019-01-16 04:37:53 +00:00
/* kill all halted devices */
2020-04-30 14:48:39 +00:00
while ( ! MIO_DEVL_IS_EMPTY ( & mio - > hltdev ) )
2016-04-16 16:05:57 +00:00
{
2020-04-30 14:48:39 +00:00
mio_dev_kill ( MIO_DEVL_FIRST_DEV ( & mio - > hltdev ) ) ;
2016-04-16 16:05:57 +00:00
}
/* clean up all zombie devices */
2020-04-30 14:48:39 +00:00
MIO_DEVL_INIT ( & diehard ) ;
for ( dev = MIO_DEVL_FIRST_DEV ( & mio - > zmbdev ) ; ! MIO_DEVL_IS_NIL_DEV ( & mio - > zmbdev , dev ) ; )
2016-04-16 16:05:57 +00:00
{
kill_and_free_device ( dev , 1 ) ;
2020-04-30 14:48:39 +00:00
if ( MIO_DEVL_FIRST_DEV ( & mio - > zmbdev ) = = dev )
2016-04-17 17:00:58 +00:00
{
/* the deive has not been freed. go on to the next one */
next_dev = dev - > dev_next ;
/* remove the device from the zombie device list */
2020-04-30 14:48:39 +00:00
MIO_DEVL_UNLINK_DEV ( dev ) ;
2019-01-31 09:16:44 +00:00
dev - > dev_cap & = ~ MIO_DEV_CAP_ZOMBIE ;
2016-04-17 17:00:58 +00:00
/* put it to a private list for aborting */
2020-04-30 14:48:39 +00:00
MIO_DEVL_APPEND_DEV ( & diehard , dev ) ;
2016-04-17 17:00:58 +00:00
dev = next_dev ;
}
2020-04-30 14:48:39 +00:00
else dev = MIO_DEVL_FIRST_DEV ( & mio - > zmbdev ) ;
2016-04-16 16:05:57 +00:00
}
2020-04-30 14:48:39 +00:00
while ( ! MIO_DEVL_IS_EMPTY ( & diehard ) )
2016-04-16 16:05:57 +00:00
{
/* if the kill method returns failure, it can leak some resource
* because the device is freed regardless of the failure when 2
* is given to kill_and_free_device ( ) . */
2020-04-30 14:48:39 +00:00
dev = MIO_DEVL_FIRST_DEV ( & diehard ) ;
2019-01-31 09:16:44 +00:00
MIO_ASSERT ( mio , ! ( dev - > dev_cap & ( MIO_DEV_CAP_ACTIVE | MIO_DEV_CAP_HALTED | MIO_DEV_CAP_ZOMBIE ) ) ) ;
2020-04-30 14:48:39 +00:00
MIO_DEVL_UNLINK_DEV ( dev ) ;
2016-04-17 17:00:58 +00:00
kill_and_free_device ( dev , 2 ) ;
2016-04-16 16:05:57 +00:00
}
2016-04-17 17:00:58 +00:00
/* purge scheduled timer jobs and kill the timer */
2018-12-12 13:15:54 +00:00
mio_cleartmrjobs ( mio ) ;
2019-01-28 08:13:06 +00:00
mio_freemem ( mio , mio - > tmr . jobs ) ;
2016-04-17 17:00:58 +00:00
2019-01-29 16:57:16 +00:00
mio_sys_fini ( mio ) ; /* finalize the system dependent data */
2019-01-28 08:13:06 +00:00
mio_freemem ( mio , mio - > log . ptr ) ;
2016-01-26 16:02:20 +00:00
}
2019-01-27 02:09:22 +00:00
int mio_setoption ( mio_t * mio , mio_option_t id , const void * value )
{
switch ( id )
{
case MIO_TRAIT :
mio - > option . trait = * ( mio_bitmask_t * ) value ;
return 0 ;
case MIO_LOG_MASK :
mio - > option . log_mask = * ( mio_bitmask_t * ) value ;
return 0 ;
case MIO_LOG_MAXCAPA :
mio - > option . log_maxcapa = * ( mio_oow_t * ) value ;
return 0 ;
}
mio_seterrnum ( mio , MIO_EINVAL ) ;
return - 1 ;
}
int mio_getoption ( mio_t * mio , mio_option_t id , void * value )
{
switch ( id )
{
case MIO_TRAIT :
* ( mio_bitmask_t * ) value = mio - > option . trait ;
return 0 ;
case MIO_LOG_MASK :
* ( mio_bitmask_t * ) value = mio - > option . log_mask ;
return 0 ;
case MIO_LOG_MAXCAPA :
* ( mio_oow_t * ) value = mio - > option . log_maxcapa ;
return 0 ;
} ;
mio_seterrnum ( mio , MIO_EINVAL ) ;
return - 1 ;
}
2016-02-04 15:06:20 +00:00
2018-12-12 13:15:54 +00:00
int mio_prologue ( mio_t * mio )
2016-02-04 15:06:20 +00:00
{
/* TODO: */
return 0 ;
}
2018-12-12 13:15:54 +00:00
void mio_epilogue ( mio_t * mio )
2016-02-04 15:06:20 +00:00
{
/* TODO: */
}
2018-12-12 13:15:54 +00:00
static MIO_INLINE void unlink_wq ( mio_t * mio , mio_wq_t * q )
2016-02-05 13:25:59 +00:00
{
2018-12-12 13:15:54 +00:00
if ( q - > tmridx ! = MIO_TMRIDX_INVALID )
2016-02-05 13:25:59 +00:00
{
2018-12-12 13:15:54 +00:00
mio_deltmrjob ( mio , q - > tmridx ) ;
2019-01-27 02:09:22 +00:00
MIO_ASSERT ( mio , q - > tmridx = = MIO_TMRIDX_INVALID ) ;
2016-02-05 13:25:59 +00:00
}
2018-12-12 13:15:54 +00:00
MIO_WQ_UNLINK ( q ) ;
2016-02-05 13:25:59 +00:00
}
2020-05-15 06:18:49 +00:00
static void fire_cwq_handlers ( mio_t * mio )
{
/* execute callbacks for completed write operations */
while ( ! MIO_CWQ_IS_EMPTY ( & mio - > cwq ) )
{
mio_cwq_t * cwq ;
mio_oow_t cwqfl_index ;
mio_dev_t * dev_to_halt ;
cwq = MIO_CWQ_HEAD ( & mio - > cwq ) ;
if ( cwq - > dev - > dev_evcb - > on_write ( cwq - > dev , cwq - > olen , cwq - > ctx , & cwq - > dstaddr ) < = - 1 )
{
MIO_DEBUG1 ( mio , " MIO - Error returned by on_write() of device %p in cwq \n " , cwq - > dev ) ;
dev_to_halt = cwq - > dev ;
}
else
{
dev_to_halt = MIO_NULL ;
}
cwq - > dev - > cw_count - - ;
MIO_CWQ_UNLINK ( cwq ) ;
cwqfl_index = MIO_ALIGN_POW2 ( cwq - > dstaddr . len , MIO_CWQFL_ALIGN ) / MIO_CWQFL_SIZE ;
if ( cwqfl_index < MIO_COUNTOF ( mio - > cwqfl ) )
{
/* reuse the cwq object if dstaddr is 0 in size. chain it to the free list */
cwq - > q_next = mio - > cwqfl [ cwqfl_index ] ;
mio - > cwqfl [ cwqfl_index ] = cwq ;
}
else
{
/* TODO: more reuse of objects of different size? */
mio_freemem ( mio , cwq ) ;
}
if ( dev_to_halt ) mio_dev_halt ( dev_to_halt ) ;
}
}
2020-05-19 12:09:13 +00:00
static void fire_cwq_handlers_for_dev ( mio_t * mio , mio_dev_t * dev , int for_kill )
2020-05-15 06:18:49 +00:00
{
mio_cwq_t * cwq , * next ;
MIO_ASSERT ( mio , dev - > cw_count > 0 ) ; /* Ensure to check dev->cw_count before calling this function */
cwq = MIO_CWQ_HEAD ( & mio - > cwq ) ;
while ( cwq ! = & mio - > cwq )
{
next = MIO_CWQ_NEXT ( cwq ) ;
if ( cwq - > dev = = dev ) /* TODO: THIS LOOP TOO INEFFICIENT??? MAINTAIN PER-DEVICE LIST OF CWQ? */
{
mio_dev_t * dev_to_halt ;
mio_oow_t cwqfl_index ;
if ( cwq - > dev - > dev_evcb - > on_write ( cwq - > dev , cwq - > olen , cwq - > ctx , & cwq - > dstaddr ) < = - 1 )
{
MIO_DEBUG1 ( mio , " MIO - Error returned by on_write() of device %p in cwq \n " , cwq - > dev ) ;
dev_to_halt = cwq - > dev ;
}
else
{
dev_to_halt = MIO_NULL ;
}
cwq - > dev - > cw_count - - ;
MIO_CWQ_UNLINK ( cwq ) ;
cwqfl_index = MIO_ALIGN_POW2 ( cwq - > dstaddr . len , MIO_CWQFL_ALIGN ) / MIO_CWQFL_SIZE ;
if ( cwqfl_index < MIO_COUNTOF ( mio - > cwqfl ) )
{
/* reuse the cwq object if dstaddr is 0 in size. chain it to the free list */
cwq - > q_next = mio - > cwqfl [ cwqfl_index ] ;
mio - > cwqfl [ cwqfl_index ] = cwq ;
}
else
{
/* TODO: more reuse of objects of different size? */
mio_freemem ( mio , cwq ) ;
}
2020-05-19 12:09:13 +00:00
if ( ! for_kill & & dev_to_halt ) mio_dev_halt ( dev_to_halt ) ;
2020-05-15 06:18:49 +00:00
}
cwq = next ;
}
}
2019-01-27 16:22:55 +00:00
static MIO_INLINE void handle_event ( mio_t * mio , mio_dev_t * dev , int events , int rdhup )
2016-01-26 16:02:20 +00:00
{
2019-01-27 16:22:55 +00:00
MIO_ASSERT ( mio , mio = = dev - > mio ) ;
2016-01-26 16:02:20 +00:00
2019-01-31 09:16:44 +00:00
dev - > dev_cap & = ~ MIO_DEV_CAP_RENEW_REQUIRED ;
2016-01-26 16:02:20 +00:00
2019-01-27 02:09:22 +00:00
MIO_ASSERT ( mio , mio = = dev - > mio ) ;
2016-02-05 13:25:59 +00:00
2016-02-04 15:06:20 +00:00
if ( dev - > dev_evcb - > ready )
2016-01-26 16:02:20 +00:00
{
2016-04-24 17:30:43 +00:00
int x , xevents ;
2016-02-04 15:06:20 +00:00
2016-04-24 17:30:43 +00:00
xevents = events ;
2018-12-12 13:15:54 +00:00
if ( rdhup ) xevents | = MIO_DEV_EVENT_HUP ;
2016-01-26 16:02:20 +00:00
2016-02-04 15:06:20 +00:00
/* return value of ready()
* < = - 1 - failure . kill the device .
* = = 0 - ok . but don ' t invoke recv ( ) or send ( ) .
* > = 1 - everything is ok . */
2019-01-17 08:09:19 +00:00
x = dev - > dev_evcb - > ready ( dev , xevents ) ;
2016-02-04 15:06:20 +00:00
if ( x < = - 1 )
{
2018-12-12 13:15:54 +00:00
mio_dev_halt ( dev ) ;
2016-02-04 15:06:20 +00:00
return ;
}
else if ( x = = 0 ) goto skip_evcb ;
}
2016-01-26 16:02:20 +00:00
2018-12-12 13:15:54 +00:00
if ( dev & & ( events & MIO_DEV_EVENT_PRI ) )
2016-01-26 16:02:20 +00:00
{
2016-02-04 15:06:20 +00:00
/* urgent data */
2016-04-25 16:15:36 +00:00
/* TODO: implement urgent data handling */
2019-01-17 08:09:19 +00:00
/*x = dev->dev_mth->urgread(dev, mio->bugbuf, &len);*/
2016-01-26 16:02:20 +00:00
}
2018-12-12 13:15:54 +00:00
if ( dev & & ( events & MIO_DEV_EVENT_OUT ) )
2016-01-26 16:02:20 +00:00
{
2016-02-04 15:06:20 +00:00
/* write pending requests */
2020-04-30 14:48:39 +00:00
while ( ! MIO_WQ_IS_EMPTY ( & dev - > wq ) )
2016-02-04 15:06:20 +00:00
{
2018-12-12 13:15:54 +00:00
mio_wq_t * q ;
const mio_uint8_t * uptr ;
mio_iolen_t urem , ulen ;
2016-02-04 15:06:20 +00:00
int x ;
2016-01-26 16:02:20 +00:00
2018-12-12 13:15:54 +00:00
q = MIO_WQ_HEAD ( & dev - > wq ) ;
2016-01-26 16:02:20 +00:00
2016-02-04 15:06:20 +00:00
uptr = q - > ptr ;
urem = q - > len ;
2016-01-26 16:02:20 +00:00
2016-02-04 15:06:20 +00:00
send_leftover :
ulen = urem ;
2019-01-17 08:09:19 +00:00
x = dev - > dev_mth - > write ( dev , uptr , & ulen , & q - > dstaddr ) ;
2016-02-04 15:06:20 +00:00
if ( x < = - 1 )
{
2018-12-12 13:15:54 +00:00
mio_dev_halt ( dev ) ;
dev = MIO_NULL ;
2016-02-04 15:06:20 +00:00
break ;
}
else if ( x = = 0 )
{
/* keep the left-over */
2018-12-12 13:15:54 +00:00
MIO_MEMMOVE ( q - > ptr , uptr , urem ) ;
2016-02-04 15:06:20 +00:00
q - > len = urem ;
break ;
}
else
{
uptr + = ulen ;
urem - = ulen ;
2016-01-26 16:02:20 +00:00
2016-02-04 15:06:20 +00:00
if ( urem < = 0 )
{
/* finished writing a single write request */
int y , out_closed = 0 ;
2016-01-26 16:02:20 +00:00
2019-01-31 09:16:44 +00:00
if ( q - > len < = 0 & & ( dev - > dev_cap & MIO_DEV_CAP_STREAM ) )
2016-02-04 15:06:20 +00:00
{
/* it was a zero-length write request.
* for a stream , it is to close the output . */
2019-01-31 09:16:44 +00:00
dev - > dev_cap | = MIO_DEV_CAP_OUT_CLOSED ;
dev - > dev_cap | = MIO_DEV_CAP_RENEW_REQUIRED ;
2016-02-04 15:06:20 +00:00
out_closed = 1 ;
}
2016-01-28 16:44:47 +00:00
2018-12-12 13:15:54 +00:00
unlink_wq ( mio , q ) ;
2019-01-17 08:09:19 +00:00
y = dev - > dev_evcb - > on_write ( dev , q - > olen , q - > ctx , & q - > dstaddr ) ;
2019-01-28 08:13:06 +00:00
mio_freemem ( mio , q ) ;
2016-01-28 16:44:47 +00:00
2016-02-04 15:06:20 +00:00
if ( y < = - 1 )
{
2020-05-15 06:18:49 +00:00
MIO_DEBUG1 ( mio , " MIO - Error returned by on_write() of device %p \n " , dev ) ;
2018-12-12 13:15:54 +00:00
mio_dev_halt ( dev ) ;
dev = MIO_NULL ;
2016-02-04 15:06:20 +00:00
break ;
}
2016-01-28 16:44:47 +00:00
2016-02-04 15:06:20 +00:00
if ( out_closed )
{
/* drain all pending requests.
* callbacks are skipped for drained requests */
2020-04-30 14:48:39 +00:00
while ( ! MIO_WQ_IS_EMPTY ( & dev - > wq ) )
2016-02-04 15:06:20 +00:00
{
2018-12-12 13:15:54 +00:00
q = MIO_WQ_HEAD ( & dev - > wq ) ;
unlink_wq ( mio , q ) ;
2019-01-28 08:13:06 +00:00
mio_freemem ( mio , q ) ;
2016-02-04 15:06:20 +00:00
}
break ;
}
}
else goto send_leftover ;
}
}
2016-01-26 16:02:20 +00:00
2020-04-30 14:48:39 +00:00
if ( dev & & MIO_WQ_IS_EMPTY ( & dev - > wq ) )
2016-02-04 15:06:20 +00:00
{
/* no pending request to write */
2019-01-31 09:16:44 +00:00
if ( ( dev - > dev_cap & MIO_DEV_CAP_IN_CLOSED ) & &
( dev - > dev_cap & MIO_DEV_CAP_OUT_CLOSED ) )
2016-02-04 15:06:20 +00:00
{
2018-12-12 13:15:54 +00:00
mio_dev_halt ( dev ) ;
dev = MIO_NULL ;
2016-02-04 15:06:20 +00:00
}
else
{
2019-01-31 09:16:44 +00:00
dev - > dev_cap | = MIO_DEV_CAP_RENEW_REQUIRED ;
2016-02-04 15:06:20 +00:00
}
}
2016-02-02 07:51:17 +00:00
}
2016-01-26 16:02:20 +00:00
2018-12-12 13:15:54 +00:00
if ( dev & & ( events & MIO_DEV_EVENT_IN ) )
2016-02-04 15:06:20 +00:00
{
2018-12-12 13:15:54 +00:00
mio_devaddr_t srcaddr ;
mio_iolen_t len ;
2016-02-04 15:06:20 +00:00
int x ;
2016-01-26 16:02:20 +00:00
2016-02-04 15:06:20 +00:00
/* the devices are all non-blocking. read as much as possible
* if on_read callback returns 1 or greater . read only once
* if the on_read calllback returns 0. */
while ( 1 )
{
2018-12-12 13:15:54 +00:00
len = MIO_COUNTOF ( mio - > bigbuf ) ;
2019-01-17 08:09:19 +00:00
x = dev - > dev_mth - > read ( dev , mio - > bigbuf , & len , & srcaddr ) ;
2016-02-04 15:06:20 +00:00
if ( x < = - 1 )
{
2018-12-12 13:15:54 +00:00
mio_dev_halt ( dev ) ;
dev = MIO_NULL ;
2016-02-04 15:06:20 +00:00
break ;
}
2019-01-17 08:51:10 +00:00
if ( dev - > rtmridx ! = MIO_TMRIDX_INVALID )
{
/* delete the read timeout job on the device as the
* read operation will be reported below . */
mio_tmrjob_t tmrjob ;
MIO_MEMSET ( & tmrjob , 0 , MIO_SIZEOF ( tmrjob ) ) ;
tmrjob . ctx = dev ;
2019-01-30 10:18:58 +00:00
mio_gettime ( mio , & tmrjob . when ) ;
2019-01-27 02:09:22 +00:00
MIO_ADD_NTIME ( & tmrjob . when , & tmrjob . when , & dev - > rtmout ) ;
2019-01-17 08:51:10 +00:00
tmrjob . handler = on_read_timeout ;
tmrjob . idxptr = & dev - > rtmridx ;
mio_updtmrjob ( mio , dev - > rtmridx , & tmrjob ) ;
/*mio_deltmrjob (mio, dev->rtmridx);
dev - > rtmridx = MIO_TMRIDX_INVALID ; */
}
if ( x = = 0 )
2016-02-04 15:06:20 +00:00
{
/* no data is available - EWOULDBLOCK or something similar */
break ;
}
2019-01-17 08:51:10 +00:00
else /*if (x >= 1) */
2016-02-04 15:06:20 +00:00
{
2020-05-15 06:18:49 +00:00
/* call on_write() callbacks enqueued fro the device before calling on_read().
* if on_write ( ) callback is delayed , there can be out - of - order execution
* between on_read ( ) and on_write ( ) callbacks . for instance , if a write request
* is started from within on_read ( ) callback , and the input data is available
* in the next iteration of this loop , the on_read ( ) callback is triggered
* before the on_write ( ) callbacks scheduled before that on_read ( ) callback . */
2020-05-16 04:52:06 +00:00
#if 0
2020-05-15 06:18:49 +00:00
if ( dev - > cw_count > 0 )
{
fire_cwq_handlers_for_dev ( mio , dev ) ;
/* it will still invoke the on_read() callbak below even if
* the device gets halted inside fire_cwq_handlers_for_dev ( ) */
}
2020-05-16 04:52:06 +00:00
# else
/* currently fire_cwq_handlers_for_dev() scans the entire cwq list.
* i might as well triggger handlers for all devices */
fire_cwq_handlers ( mio ) ;
# endif
2020-05-15 06:18:49 +00:00
2019-01-31 09:16:44 +00:00
if ( len < = 0 & & ( dev - > dev_cap & MIO_DEV_CAP_STREAM ) )
2016-02-04 15:06:20 +00:00
{
/* EOF received. for a stream device, a zero-length
* read is interpreted as EOF . */
2019-01-31 09:16:44 +00:00
dev - > dev_cap | = MIO_DEV_CAP_IN_CLOSED ;
dev - > dev_cap | = MIO_DEV_CAP_RENEW_REQUIRED ;
2019-01-17 08:51:10 +00:00
2016-02-04 15:06:20 +00:00
/* call the on_read callback to report EOF */
2019-01-17 08:09:19 +00:00
if ( dev - > dev_evcb - > on_read ( dev , mio - > bigbuf , len , & srcaddr ) < = - 1 | |
2019-01-31 09:16:44 +00:00
( dev - > dev_cap & MIO_DEV_CAP_OUT_CLOSED ) )
2016-02-04 15:06:20 +00:00
{
/* 1. input ended and its reporting failed or
* 2. input ended and no writing is possible */
2018-12-12 13:15:54 +00:00
mio_dev_halt ( dev ) ;
dev = MIO_NULL ;
2016-02-04 15:06:20 +00:00
}
2016-01-26 16:02:20 +00:00
2016-02-04 15:06:20 +00:00
/* since EOF is received, reading can't be greedy */
break ;
}
else
{
int y ;
/* TODO: for a stream device, merge received data if bigbuf isn't full and fire the on_read callback
* when x = = 0 or < = - 1. you can */
2016-01-26 16:02:20 +00:00
2016-02-04 15:06:20 +00:00
/* data available */
2019-01-17 08:09:19 +00:00
y = dev - > dev_evcb - > on_read ( dev , mio - > bigbuf , len , & srcaddr ) ;
2016-02-04 15:06:20 +00:00
if ( y < = - 1 )
{
2018-12-12 13:15:54 +00:00
mio_dev_halt ( dev ) ;
dev = MIO_NULL ;
2016-02-04 15:06:20 +00:00
break ;
}
else if ( y = = 0 )
{
/* don't be greedy. read only once
* for this loop iteration */
break ;
}
}
}
}
}
2016-02-02 07:51:17 +00:00
2016-02-05 13:25:59 +00:00
if ( dev )
2016-02-04 15:06:20 +00:00
{
2018-12-12 13:15:54 +00:00
if ( events & ( MIO_DEV_EVENT_ERR | MIO_DEV_EVENT_HUP ) )
2016-02-05 13:25:59 +00:00
{
/* if error or hangup has been reported on the device,
* halt the device . this check is performed after
* EPOLLIN or EPOLLOUT check because EPOLLERR or EPOLLHUP
* can be set together with EPOLLIN or EPOLLOUT . */
2020-05-16 19:12:10 +00:00
if ( ! ( dev - > dev_cap & MIO_DEV_CAP_IN_CLOSED ) )
{
/* this is simulated EOF. the INPUT side has not been closed on the device
* but there is the hangup / error event . */
dev - > dev_evcb - > on_read ( dev , MIO_NULL , - ! ! ( events & MIO_DEV_EVENT_ERR ) , MIO_NULL ) ;
/* i don't care about the return value since the device will be halted below
* if both MIO_DEV_CAP_IN_CLOSE and MIO_DEV_CAP_OUT_CLOSED are set */
}
2019-01-31 09:16:44 +00:00
dev - > dev_cap | = MIO_DEV_CAP_IN_CLOSED | MIO_DEV_CAP_OUT_CLOSED ;
dev - > dev_cap | = MIO_DEV_CAP_RENEW_REQUIRED ;
2016-02-05 13:25:59 +00:00
}
2016-04-24 17:30:43 +00:00
else if ( dev & & rdhup )
2016-02-05 13:25:59 +00:00
{
2018-12-12 13:15:54 +00:00
if ( events & ( MIO_DEV_EVENT_IN | MIO_DEV_EVENT_OUT | MIO_DEV_EVENT_PRI ) )
2016-02-05 13:25:59 +00:00
{
/* it may be a half-open state. don't do anything here
* to let the next read detect EOF */
}
else
{
2019-01-31 09:16:44 +00:00
dev - > dev_cap | = MIO_DEV_CAP_IN_CLOSED | MIO_DEV_CAP_OUT_CLOSED ;
dev - > dev_cap | = MIO_DEV_CAP_RENEW_REQUIRED ;
2016-02-05 13:25:59 +00:00
}
}
2016-02-02 07:51:17 +00:00
2019-01-31 09:16:44 +00:00
if ( ( dev - > dev_cap & MIO_DEV_CAP_IN_CLOSED ) & &
( dev - > dev_cap & MIO_DEV_CAP_OUT_CLOSED ) )
2016-02-05 13:25:59 +00:00
{
2018-12-12 13:15:54 +00:00
mio_dev_halt ( dev ) ;
dev = MIO_NULL ;
2016-02-05 13:25:59 +00:00
}
2016-02-04 15:06:20 +00:00
}
2016-01-26 16:02:20 +00:00
2016-02-04 15:06:20 +00:00
skip_evcb :
2020-05-08 09:48:26 +00:00
if ( dev & & ( dev - > dev_cap & MIO_DEV_CAP_RENEW_REQUIRED ) & & mio_dev_watch ( dev , MIO_DEV_WATCH_RENEW , MIO_DEV_EVENT_IN ) < = - 1 )
2016-02-04 15:06:20 +00:00
{
2018-12-12 13:15:54 +00:00
mio_dev_halt ( dev ) ;
dev = MIO_NULL ;
2016-02-04 15:06:20 +00:00
}
2016-01-26 16:02:20 +00:00
}
2019-01-29 08:38:12 +00:00
int mio_exec ( mio_t * mio )
2016-01-26 16:02:20 +00:00
{
2019-01-27 16:22:55 +00:00
int ret = 0 ;
2016-01-26 16:02:20 +00:00
2019-01-14 08:33:14 +00:00
/* execute callbacks for completed write operations */
2020-05-15 06:18:49 +00:00
fire_cwq_handlers ( mio ) ;
2019-01-14 08:33:14 +00:00
2016-01-30 19:08:28 +00:00
/* execute the scheduled jobs before checking devices with the
* multiplexer . the scheduled jobs can safely destroy the devices */
2018-12-12 13:15:54 +00:00
mio_firetmrjobs ( mio , MIO_NULL , MIO_NULL ) ;
2016-01-30 19:08:28 +00:00
2020-05-15 06:18:49 +00:00
/* execute callbacks for completed write operations again in case there were some jobs initiaated in the timer jobs */
2020-05-19 12:09:13 +00:00
/*fire_cwq_handlers (mio); <-- this may not be needed as it's called inside handle_event(). keep this line commented for now until i have new findings */
2020-05-15 06:18:49 +00:00
2020-04-30 14:48:39 +00:00
if ( ! MIO_DEVL_IS_EMPTY ( & mio - > actdev ) )
2016-01-30 05:24:23 +00:00
{
2019-01-29 08:38:12 +00:00
/* wait on the multiplexer only if there is at least 1 active device */
mio_ntime_t tmout ;
2016-01-30 05:24:23 +00:00
2019-01-29 08:38:12 +00:00
if ( mio_gettmrtmout ( mio , MIO_NULL , & tmout ) < = - 1 )
{
2020-05-19 12:09:13 +00:00
/* defaults to 0 or 1 second if timeout can't be acquired.
* if this timeout affects how fast the halted device will get killed .
* if there is a halted device , set timeout to 0. otherwise set it to 1 */
tmout . sec = ! ! MIO_DEVL_IS_EMPTY ( & mio - > hltdev ) ; /* TODO: don't use 1. make this longer value configurable */
2019-01-29 08:38:12 +00:00
tmout . nsec = 0 ;
}
if ( mio_sys_waitmux ( mio , & tmout , handle_event ) < = - 1 )
{
2020-05-15 06:18:49 +00:00
MIO_DEBUG0 ( mio , " MIO - WARNING - Failed to wait on mutiplexer \n " ) ;
2019-01-29 08:38:12 +00:00
ret = - 1 ;
}
2016-01-26 16:02:20 +00:00
}
2016-02-04 15:06:20 +00:00
/* kill all halted devices */
2020-04-30 14:48:39 +00:00
while ( ! MIO_DEVL_IS_EMPTY ( & mio - > hltdev ) )
2016-04-16 16:05:57 +00:00
{
2020-04-30 14:48:39 +00:00
mio_dev_t * dev = MIO_DEVL_FIRST_DEV ( & mio - > hltdev ) ;
2020-05-15 06:18:49 +00:00
MIO_DEBUG1 ( mio , " MIO - Killing HALTED device %p \n " , dev ) ;
2020-04-30 14:48:39 +00:00
mio_dev_kill ( dev ) ;
2016-04-16 16:05:57 +00:00
}
2016-02-02 07:51:17 +00:00
2019-01-27 16:22:55 +00:00
return ret ;
2016-01-26 16:02:20 +00:00
}
2018-12-12 13:15:54 +00:00
void mio_stop ( mio_t * mio , mio_stopreq_t stopreq )
2016-01-26 16:02:20 +00:00
{
2018-12-12 13:15:54 +00:00
mio - > stopreq = stopreq ;
2016-01-26 16:02:20 +00:00
}
2018-12-12 13:15:54 +00:00
int mio_loop ( mio_t * mio )
2016-01-26 16:02:20 +00:00
{
2020-04-30 14:48:39 +00:00
if ( MIO_DEVL_IS_EMPTY ( & mio - > actdev ) ) return 0 ;
2016-01-26 16:02:20 +00:00
2018-12-12 13:15:54 +00:00
mio - > stopreq = MIO_STOPREQ_NONE ;
2016-02-04 15:06:20 +00:00
2019-01-11 07:35:43 +00:00
if ( mio_prologue ( mio ) < = - 1 ) return - 1 ;
2016-01-26 16:02:20 +00:00
2020-04-30 14:48:39 +00:00
while ( mio - > stopreq = = MIO_STOPREQ_NONE & & ! MIO_DEVL_IS_EMPTY ( & mio - > actdev ) )
2016-01-26 16:02:20 +00:00
{
2019-01-11 07:35:43 +00:00
if ( mio_exec ( mio ) < = - 1 ) break ;
2016-01-26 16:02:20 +00:00
/* you can do other things here */
}
2019-01-11 07:35:43 +00:00
mio_epilogue ( mio ) ;
2016-01-26 16:02:20 +00:00
return 0 ;
}
2019-02-22 08:44:51 +00:00
mio_dev_t * mio_dev_make ( mio_t * mio , mio_oow_t dev_size , mio_dev_mth_t * dev_mth , mio_dev_evcb_t * dev_evcb , void * make_ctx )
2016-01-26 16:02:20 +00:00
{
2018-12-12 13:15:54 +00:00
mio_dev_t * dev ;
2016-02-04 15:06:20 +00:00
2018-12-12 13:15:54 +00:00
if ( dev_size < MIO_SIZEOF ( mio_dev_t ) )
2016-02-04 15:06:20 +00:00
{
2019-01-29 08:38:12 +00:00
mio_seterrnum ( mio , MIO_EINVAL ) ;
2018-12-12 13:15:54 +00:00
return MIO_NULL ;
2016-02-04 15:06:20 +00:00
}
2020-05-05 15:12:08 +00:00
dev = ( mio_dev_t * ) mio_callocmem ( mio , dev_size ) ;
if ( MIO_UNLIKELY ( ! dev ) ) return MIO_NULL ;
2016-02-04 15:06:20 +00:00
2018-12-12 13:15:54 +00:00
dev - > mio = mio ;
2016-02-04 15:06:20 +00:00
dev - > dev_size = dev_size ;
/* default capability. dev->dev_mth->make() can change this.
2018-12-12 13:15:54 +00:00
* mio_dev_watch ( ) is affected by the capability change . */
2019-01-31 09:16:44 +00:00
dev - > dev_cap = MIO_DEV_CAP_IN | MIO_DEV_CAP_OUT ;
2016-02-04 15:06:20 +00:00
dev - > dev_mth = dev_mth ;
dev - > dev_evcb = dev_evcb ;
2019-01-27 02:09:22 +00:00
MIO_INIT_NTIME ( & dev - > rtmout , 0 , 0 ) ;
2019-01-17 08:09:19 +00:00
dev - > rtmridx = MIO_TMRIDX_INVALID ;
2019-01-14 08:33:14 +00:00
MIO_WQ_INIT ( & dev - > wq ) ;
2019-01-14 08:45:56 +00:00
dev - > cw_count = 0 ;
2016-02-04 15:06:20 +00:00
/* call the callback function first */
2019-01-28 08:13:06 +00:00
mio_seterrnum ( mio , MIO_ENOERR ) ;
2019-01-11 07:35:43 +00:00
if ( dev - > dev_mth - > make ( dev , make_ctx ) < = - 1 )
2016-02-04 15:06:20 +00:00
{
2019-01-28 08:13:06 +00:00
if ( mio - > errnum = = MIO_ENOERR ) mio_seterrnum ( mio , MIO_EDEVMAKE ) ;
2016-02-04 15:06:20 +00:00
goto oops ;
}
2016-04-02 15:25:35 +00:00
/* the make callback must not change these fields */
2019-01-27 02:09:22 +00:00
MIO_ASSERT ( mio , dev - > dev_mth = = dev_mth ) ;
MIO_ASSERT ( mio , dev - > dev_evcb = = dev_evcb ) ;
MIO_ASSERT ( mio , dev - > dev_prev = = MIO_NULL ) ;
MIO_ASSERT ( mio , dev - > dev_next = = MIO_NULL ) ;
2016-04-02 15:25:35 +00:00
2016-02-04 15:06:20 +00:00
/* set some internal capability bits according to the capabilities
* removed by the device making callback for convenience sake . */
2019-01-31 09:16:44 +00:00
dev - > dev_cap & = MIO_DEV_CAP_ALL_MASK ; /* keep valid capability bits only. drop all internal-use bits */
if ( ! ( dev - > dev_cap & MIO_DEV_CAP_IN ) ) dev - > dev_cap | = MIO_DEV_CAP_IN_CLOSED ;
if ( ! ( dev - > dev_cap & MIO_DEV_CAP_OUT ) ) dev - > dev_cap | = MIO_DEV_CAP_OUT_CLOSED ;
2016-02-04 15:06:20 +00:00
2019-01-11 07:35:43 +00:00
if ( mio_dev_watch ( dev , MIO_DEV_WATCH_START , 0 ) < = - 1 ) goto oops_after_make ;
2020-05-12 17:53:19 +00:00
2016-04-17 17:00:58 +00:00
/* and place the new device object at the back of the active device list */
2020-04-30 14:48:39 +00:00
MIO_DEVL_APPEND_DEV ( & mio - > actdev , dev ) ;
2019-01-31 09:16:44 +00:00
dev - > dev_cap | = MIO_DEV_CAP_ACTIVE ;
2016-02-04 15:06:20 +00:00
return dev ;
oops_after_make :
2019-01-11 07:35:43 +00:00
if ( kill_and_free_device ( dev , 0 ) < = - 1 )
2016-04-16 16:05:57 +00:00
{
/* schedule a timer job that reattempts to destroy the device */
2019-01-11 07:35:43 +00:00
if ( schedule_kill_zombie_job ( dev ) < = - 1 )
2016-04-16 16:05:57 +00:00
{
/* job scheduling failed. i have no choice but to
* destroy the device now .
*
* NOTE : this while loop can block the process
* if the kill method keep returning failure */
2019-01-11 07:35:43 +00:00
while ( kill_and_free_device ( dev , 1 ) < = - 1 )
2016-04-16 16:05:57 +00:00
{
2018-12-12 13:15:54 +00:00
if ( mio - > stopreq ! = MIO_STOPREQ_NONE )
2016-04-16 16:05:57 +00:00
{
/* i can't wait until destruction attempt gets
* fully successful . there is a chance that some
* resources can leak inside the device */
kill_and_free_device ( dev , 2 ) ;
break ;
}
}
}
2018-12-12 13:15:54 +00:00
return MIO_NULL ;
2016-04-16 16:05:57 +00:00
}
2016-02-04 15:06:20 +00:00
oops :
2019-01-28 08:13:06 +00:00
mio_freemem ( mio , dev ) ;
2018-12-12 13:15:54 +00:00
return MIO_NULL ;
2016-01-26 16:02:20 +00:00
}
2018-12-12 13:15:54 +00:00
static int kill_and_free_device ( mio_dev_t * dev , int force )
2016-04-16 16:05:57 +00:00
{
2019-01-28 08:13:06 +00:00
mio_t * mio = dev - > mio ;
2016-04-16 16:05:57 +00:00
2019-01-31 09:16:44 +00:00
MIO_ASSERT ( mio , ! ( dev - > dev_cap & MIO_DEV_CAP_ACTIVE ) ) ;
MIO_ASSERT ( mio , ! ( dev - > dev_cap & MIO_DEV_CAP_HALTED ) ) ;
2016-04-17 17:00:58 +00:00
2016-04-16 16:05:57 +00:00
if ( dev - > dev_mth - > kill ( dev , force ) < = - 1 )
{
if ( force > = 2 ) goto free_device ;
2019-01-31 09:16:44 +00:00
if ( ! ( dev - > dev_cap & MIO_DEV_CAP_ZOMBIE ) )
2016-04-16 16:05:57 +00:00
{
2020-04-30 14:48:39 +00:00
MIO_DEVL_APPEND_DEV ( & mio - > zmbdev , dev ) ;
2019-01-31 09:16:44 +00:00
dev - > dev_cap | = MIO_DEV_CAP_ZOMBIE ;
2016-04-16 16:05:57 +00:00
}
return - 1 ;
}
free_device :
2019-01-31 09:16:44 +00:00
if ( dev - > dev_cap & MIO_DEV_CAP_ZOMBIE )
2016-04-16 16:05:57 +00:00
{
/* detach it from the zombie device list */
2020-04-30 14:48:39 +00:00
MIO_DEVL_UNLINK_DEV ( dev ) ;
2019-01-31 09:16:44 +00:00
dev - > dev_cap & = ~ MIO_DEV_CAP_ZOMBIE ;
2016-04-16 16:05:57 +00:00
}
2019-01-28 08:13:06 +00:00
mio_freemem ( mio , dev ) ;
2016-04-16 16:05:57 +00:00
return 0 ;
}
2018-12-12 13:15:54 +00:00
static void kill_zombie_job_handler ( mio_t * mio , const mio_ntime_t * now , mio_tmrjob_t * job )
2016-04-16 16:05:57 +00:00
{
2018-12-12 13:15:54 +00:00
mio_dev_t * dev = ( mio_dev_t * ) job - > ctx ;
2016-04-16 16:05:57 +00:00
2019-01-31 09:16:44 +00:00
MIO_ASSERT ( mio , dev - > dev_cap & MIO_DEV_CAP_ZOMBIE ) ;
2016-04-16 16:05:57 +00:00
2019-01-11 07:35:43 +00:00
if ( kill_and_free_device ( dev , 0 ) < = - 1 )
2016-04-16 16:05:57 +00:00
{
2019-01-11 07:35:43 +00:00
if ( schedule_kill_zombie_job ( dev ) < = - 1 )
2016-04-16 16:05:57 +00:00
{
/* i have to choice but to free up the devide by force */
2019-01-11 07:35:43 +00:00
while ( kill_and_free_device ( dev , 1 ) < = - 1 )
2016-04-16 16:05:57 +00:00
{
2019-01-11 07:35:43 +00:00
if ( mio - > stopreq ! = MIO_STOPREQ_NONE )
2016-04-16 16:05:57 +00:00
{
/* i can't wait until destruction attempt gets
* fully successful . there is a chance that some
* resources can leak inside the device */
kill_and_free_device ( dev , 2 ) ;
break ;
}
}
}
}
}
2018-12-12 13:15:54 +00:00
static int schedule_kill_zombie_job ( mio_dev_t * dev )
2016-04-16 16:05:57 +00:00
{
2019-01-30 10:18:58 +00:00
mio_t * mio = dev - > mio ;
2018-12-12 13:15:54 +00:00
mio_tmrjob_t kill_zombie_job ;
mio_ntime_t tmout ;
2016-04-16 16:05:57 +00:00
2019-01-27 02:09:22 +00:00
MIO_INIT_NTIME ( & tmout , 3 , 0 ) ; /* TODO: take it from configuration */
2016-04-16 16:05:57 +00:00
2018-12-12 13:15:54 +00:00
MIO_MEMSET ( & kill_zombie_job , 0 , MIO_SIZEOF ( kill_zombie_job ) ) ;
2016-04-16 16:05:57 +00:00
kill_zombie_job . ctx = dev ;
2019-01-30 10:18:58 +00:00
mio_gettime ( mio , & kill_zombie_job . when ) ;
2019-01-27 02:09:22 +00:00
MIO_ADD_NTIME ( & kill_zombie_job . when , & kill_zombie_job . when , & tmout ) ;
2016-04-16 16:05:57 +00:00
kill_zombie_job . handler = kill_zombie_job_handler ;
/*kill_zombie_job.idxptr = &rdev->tmridx_kill_zombie;*/
2019-04-11 07:26:00 +00:00
return mio_instmrjob ( mio , & kill_zombie_job ) = = MIO_TMRIDX_INVALID ? - 1 : 0 ;
2016-04-16 16:05:57 +00:00
}
2019-02-13 10:41:33 +00:00
void mio_dev_kill ( mio_dev_t * dev )
2016-01-26 16:02:20 +00:00
{
2019-02-13 10:41:33 +00:00
mio_t * mio = dev - > mio ;
2016-01-26 16:02:20 +00:00
2019-01-31 09:16:44 +00:00
if ( dev - > dev_cap & MIO_DEV_CAP_ZOMBIE )
2016-04-12 13:56:59 +00:00
{
2020-04-30 14:48:39 +00:00
MIO_ASSERT ( mio , MIO_WQ_IS_EMPTY ( & dev - > wq ) ) ;
2019-01-27 02:09:22 +00:00
MIO_ASSERT ( mio , dev - > cw_count = = 0 ) ;
MIO_ASSERT ( mio , dev - > rtmridx = = MIO_TMRIDX_INVALID ) ;
2016-04-12 13:56:59 +00:00
goto kill_device ;
}
2019-01-17 08:09:19 +00:00
if ( dev - > rtmridx ! = MIO_TMRIDX_INVALID )
{
mio_deltmrjob ( mio , dev - > rtmridx ) ;
dev - > rtmridx = MIO_TMRIDX_INVALID ;
}
2020-05-19 12:09:13 +00:00
/* clear completed write event queues */
if ( dev - > cw_count > 0 ) fire_cwq_handlers_for_dev ( mio , dev , 1 ) ;
2019-01-14 08:33:14 +00:00
2020-05-19 12:09:13 +00:00
/* clear pending write requests - won't fire on_write for pending write requests */
2020-04-30 14:48:39 +00:00
while ( ! MIO_WQ_IS_EMPTY ( & dev - > wq ) )
2016-02-04 15:06:20 +00:00
{
2018-12-12 13:15:54 +00:00
mio_wq_t * q ;
q = MIO_WQ_HEAD ( & dev - > wq ) ;
unlink_wq ( mio , q ) ;
2019-01-28 08:13:06 +00:00
mio_freemem ( mio , q ) ;
2016-02-04 15:06:20 +00:00
}
2019-01-31 09:16:44 +00:00
if ( dev - > dev_cap & MIO_DEV_CAP_HALTED )
2016-04-12 13:56:59 +00:00
{
2016-04-16 16:05:57 +00:00
/* this device is in the halted state.
* unlink it from the halted device list */
2020-04-30 14:48:39 +00:00
MIO_DEVL_UNLINK_DEV ( dev ) ;
2019-01-31 09:16:44 +00:00
dev - > dev_cap & = ~ MIO_DEV_CAP_HALTED ;
2016-04-12 13:56:59 +00:00
}
else
2016-02-02 07:51:17 +00:00
{
2019-01-31 09:16:44 +00:00
MIO_ASSERT ( mio , dev - > dev_cap & MIO_DEV_CAP_ACTIVE ) ;
2020-04-30 14:48:39 +00:00
MIO_DEVL_UNLINK_DEV ( dev ) ;
2019-01-31 09:16:44 +00:00
dev - > dev_cap & = ~ MIO_DEV_CAP_ACTIVE ;
2016-02-02 07:51:17 +00:00
}
2018-12-12 13:15:54 +00:00
mio_dev_watch ( dev , MIO_DEV_WATCH_STOP , 0 ) ;
2016-02-04 15:06:20 +00:00
2016-04-12 13:56:59 +00:00
kill_device :
2016-04-16 16:05:57 +00:00
if ( kill_and_free_device ( dev , 0 ) < = - 1 )
2016-04-12 13:56:59 +00:00
{
2019-01-31 09:16:44 +00:00
MIO_ASSERT ( mio , dev - > dev_cap & MIO_DEV_CAP_ZOMBIE ) ;
2016-04-16 16:05:57 +00:00
if ( schedule_kill_zombie_job ( dev ) < = - 1 )
2016-04-12 13:56:59 +00:00
{
2019-02-13 10:41:33 +00:00
/* i have no choice but to free up the devide by force */
2020-05-05 15:12:08 +00:00
while ( kill_and_free_device ( dev , 1 ) < = - 1 )
2016-04-16 16:05:57 +00:00
{
2018-12-12 13:15:54 +00:00
if ( mio - > stopreq ! = MIO_STOPREQ_NONE )
2016-04-16 16:05:57 +00:00
{
/* i can't wait until destruction attempt gets
* fully successful . there is a chance that some
* resources can leak inside the device */
kill_and_free_device ( dev , 2 ) ;
break ;
}
}
2016-04-12 13:56:59 +00:00
}
}
2016-02-04 15:06:20 +00:00
}
2018-12-12 13:15:54 +00:00
void mio_dev_halt ( mio_dev_t * dev )
2016-02-04 15:06:20 +00:00
{
2019-01-28 08:13:06 +00:00
mio_t * mio = dev - > mio ;
2019-01-31 09:16:44 +00:00
if ( dev - > dev_cap & MIO_DEV_CAP_ACTIVE )
2016-02-02 07:51:17 +00:00
{
2020-05-15 06:18:49 +00:00
MIO_DEBUG1 ( mio , " MIO - HALTING DEVICE %p \n " , dev ) ;
2020-05-07 04:32:32 +00:00
2016-04-17 17:00:58 +00:00
/* delink the device object from the active device list */
2020-04-30 14:48:39 +00:00
MIO_DEVL_UNLINK_DEV ( dev ) ;
2019-01-31 09:16:44 +00:00
dev - > dev_cap & = ~ MIO_DEV_CAP_ACTIVE ;
2016-02-04 15:06:20 +00:00
2016-04-12 13:56:59 +00:00
/* place it at the back of the halted device list */
2020-04-30 14:48:39 +00:00
MIO_DEVL_APPEND_DEV ( & mio - > hltdev , dev ) ;
2019-01-31 09:16:44 +00:00
dev - > dev_cap | = MIO_DEV_CAP_HALTED ;
2016-02-02 07:51:17 +00:00
}
2016-02-04 15:06:20 +00:00
}
2018-12-12 13:15:54 +00:00
int mio_dev_ioctl ( mio_dev_t * dev , int cmd , void * arg )
2016-02-04 15:06:20 +00:00
{
2019-01-28 08:13:06 +00:00
mio_t * mio = dev - > mio ;
if ( MIO_UNLIKELY ( ! dev - > dev_mth - > ioctl ) )
{
mio_seterrnum ( mio , MIO_ENOIMPL ) ; /* TODO: different error code ? */
return - 1 ;
}
return dev - > dev_mth - > ioctl ( dev , cmd , arg ) ;
2016-02-04 15:06:20 +00:00
}
2018-12-12 13:15:54 +00:00
int mio_dev_watch ( mio_dev_t * dev , mio_dev_watch_cmd_t cmd , int events )
2016-02-04 15:06:20 +00:00
{
2019-01-28 17:41:50 +00:00
mio_t * mio = dev - > mio ;
2016-04-24 17:30:43 +00:00
int mux_cmd ;
2019-01-31 09:16:44 +00:00
int dev_cap ;
2016-01-26 16:02:20 +00:00
2016-04-12 13:56:59 +00:00
/* the virtual device doesn't perform actual I/O.
2019-01-31 09:16:44 +00:00
* it ' s different from not hanving MIO_DEV_CAP_IN and MIO_DEV_CAP_OUT .
2016-04-12 13:56:59 +00:00
* a non - virtual device without the capabilities still gets attention
* of the system multiplexer for hangup and error . */
2019-01-31 09:16:44 +00:00
if ( dev - > dev_cap & MIO_DEV_CAP_VIRTUAL ) return 0 ;
2016-04-12 13:56:59 +00:00
2016-04-24 17:30:43 +00:00
/*ev.data.ptr = dev;*/
2016-01-26 16:02:20 +00:00
switch ( cmd )
{
2018-12-12 13:15:54 +00:00
case MIO_DEV_WATCH_START :
2019-01-29 16:57:16 +00:00
/* request input watching when a device is started.
2019-01-31 09:16:44 +00:00
* if the device is set with MIO_DEV_CAP_IN_DISABLED and / or
* is not set with MIO_DEV_CAP_IN , input wathcing is excluded
2019-01-29 16:57:16 +00:00
* after this ' switch ' block */
events = MIO_DEV_EVENT_IN ;
2019-01-27 02:09:22 +00:00
mux_cmd = MIO_SYS_MUX_CMD_INSERT ;
2016-01-26 16:02:20 +00:00
break ;
2018-12-12 13:15:54 +00:00
case MIO_DEV_WATCH_RENEW :
2020-05-08 09:48:26 +00:00
/* auto-renwal mode. input watching is taken from the events make passed in.
* output watching is requested only if there ' re enqueued data for writing .
* if you want to enable input watching while renewing , call this function like this .
* mio_dev_wtach ( dev , MIO_DEV_WATCH_RENEW , MIO_DEV_EVENT_IN ) ;
* if you want input whatching disabled while renewing , call this function like this .
* mio_dev_wtach ( dev , MIO_DEV_WATCH_RENEW , 0 ) ; */
if ( MIO_WQ_IS_EMPTY ( & dev - > wq ) ) events & = ~ MIO_DEV_EVENT_OUT ;
else events | = MIO_DEV_EVENT_OUT ;
2020-05-20 10:25:12 +00:00
2016-04-17 17:00:58 +00:00
/* fall through */
2018-12-12 13:15:54 +00:00
case MIO_DEV_WATCH_UPDATE :
2016-02-04 15:06:20 +00:00
/* honor event watching requests as given by the caller */
2019-01-27 02:09:22 +00:00
mux_cmd = MIO_SYS_MUX_CMD_UPDATE ;
2016-01-26 16:02:20 +00:00
break ;
2018-12-12 13:15:54 +00:00
case MIO_DEV_WATCH_STOP :
2020-05-20 10:25:12 +00:00
if ( ! ( dev - > dev_cap & DEV_CAP_ALL_WATCHED ) ) return 0 ; /* the device is not being watched */
2016-02-04 15:06:20 +00:00
events = 0 ; /* override events */
2019-01-27 02:09:22 +00:00
mux_cmd = MIO_SYS_MUX_CMD_DELETE ;
2020-05-20 10:25:12 +00:00
dev_cap = dev - > dev_cap & ~ ( DEV_CAP_ALL_WATCHED ) ;
goto ctrl_mux ;
2016-01-26 16:02:20 +00:00
default :
2019-01-29 08:38:12 +00:00
mio_seterrnum ( dev - > mio , MIO_EINVAL ) ;
2016-01-26 16:02:20 +00:00
return - 1 ;
}
2020-05-20 10:25:12 +00:00
dev_cap = dev - > dev_cap & ~ ( DEV_CAP_ALL_WATCHED ) ;
2016-02-04 15:06:20 +00:00
2018-12-12 13:15:54 +00:00
/* this function honors MIO_DEV_EVENT_IN and MIO_DEV_EVENT_OUT only
2016-02-04 15:06:20 +00:00
* as valid input event bits . it intends to provide simple abstraction
* by reducing the variety of event bits that the caller has to handle . */
2019-01-31 09:16:44 +00:00
if ( ( events & MIO_DEV_EVENT_IN ) & & ! ( dev - > dev_cap & ( MIO_DEV_CAP_IN_CLOSED | MIO_DEV_CAP_IN_DISABLED ) ) )
2016-01-26 16:02:20 +00:00
{
2019-01-31 09:16:44 +00:00
if ( dev - > dev_cap & MIO_DEV_CAP_IN )
2016-02-04 15:06:20 +00:00
{
2019-01-31 09:16:44 +00:00
if ( dev - > dev_cap & MIO_DEV_CAP_PRI ) dev_cap | = MIO_DEV_CAP_PRI_WATCHED ;
dev_cap | = MIO_DEV_CAP_IN_WATCHED ;
2016-02-04 15:06:20 +00:00
}
2016-01-26 16:02:20 +00:00
}
2019-01-31 09:16:44 +00:00
if ( ( events & MIO_DEV_EVENT_OUT ) & & ! ( dev - > dev_cap & MIO_DEV_CAP_OUT_CLOSED ) )
2016-02-04 15:06:20 +00:00
{
2019-01-31 09:16:44 +00:00
if ( dev - > dev_cap & MIO_DEV_CAP_OUT ) dev_cap | = MIO_DEV_CAP_OUT_WATCHED ;
2016-02-04 15:06:20 +00:00
}
2019-01-31 09:16:44 +00:00
if ( mux_cmd = = MIO_SYS_MUX_CMD_UPDATE & & ( dev_cap & DEV_CAP_ALL_WATCHED ) = = ( dev - > dev_cap & DEV_CAP_ALL_WATCHED ) )
2016-02-04 15:06:20 +00:00
{
2016-04-18 14:21:23 +00:00
/* no change in the device capacity. skip calling epoll_ctl */
2016-02-04 15:06:20 +00:00
}
else
{
2020-05-20 10:25:12 +00:00
ctrl_mux :
2019-01-31 09:16:44 +00:00
if ( mio_sys_ctrlmux ( mio , mux_cmd , dev , dev_cap ) < = - 1 ) return - 1 ;
2016-02-04 15:06:20 +00:00
}
2019-01-31 09:16:44 +00:00
dev - > dev_cap = dev_cap ;
2016-01-26 16:02:20 +00:00
return 0 ;
}
2016-01-28 16:44:47 +00:00
2019-01-17 08:09:19 +00:00
static void on_read_timeout ( mio_t * mio , const mio_ntime_t * now , mio_tmrjob_t * job )
{
mio_dev_t * dev ;
int x ;
dev = ( mio_dev_t * ) job - > ctx ;
2019-01-29 08:38:12 +00:00
mio_seterrnum ( mio , MIO_ETMOUT ) ;
2019-01-17 08:09:19 +00:00
x = dev - > dev_evcb - > on_read ( dev , MIO_NULL , - 1 , MIO_NULL ) ;
2019-01-27 02:09:22 +00:00
MIO_ASSERT ( mio , dev - > rtmridx = = MIO_TMRIDX_INVALID ) ;
2019-01-17 08:09:19 +00:00
if ( x < = - 1 ) mio_dev_halt ( dev ) ;
}
static int __dev_read ( mio_dev_t * dev , int enabled , const mio_ntime_t * tmout , void * rdctx )
{
2019-01-28 17:41:50 +00:00
mio_t * mio = dev - > mio ;
2019-01-31 09:16:44 +00:00
if ( dev - > dev_cap & MIO_DEV_CAP_IN_CLOSED )
2019-01-17 08:09:19 +00:00
{
2019-01-28 17:41:50 +00:00
mio_seterrbfmt ( mio , MIO_ENOCAPA , " unable to read closed device " ) ;
2019-01-17 08:09:19 +00:00
return - 1 ;
}
if ( enabled )
{
2019-01-31 09:16:44 +00:00
dev - > dev_cap & = ~ MIO_DEV_CAP_IN_DISABLED ;
if ( ! ( dev - > dev_cap & MIO_DEV_CAP_IN_WATCHED ) ) goto renew_watch_now ;
2019-01-17 08:09:19 +00:00
}
else
{
2019-01-31 09:16:44 +00:00
dev - > dev_cap | = MIO_DEV_CAP_IN_DISABLED ;
if ( ( dev - > dev_cap & MIO_DEV_CAP_IN_WATCHED ) ) goto renew_watch_now ;
2019-01-17 08:09:19 +00:00
}
2019-01-31 09:16:44 +00:00
dev - > dev_cap | = MIO_DEV_CAP_RENEW_REQUIRED ;
2019-01-17 08:09:19 +00:00
goto update_timer ;
renew_watch_now :
2020-05-08 09:48:26 +00:00
if ( mio_dev_watch ( dev , MIO_DEV_WATCH_RENEW , MIO_DEV_EVENT_IN ) < = - 1 ) return - 1 ;
2019-01-17 08:09:19 +00:00
goto update_timer ;
update_timer :
if ( dev - > rtmridx ! = MIO_TMRIDX_INVALID )
{
/* read timeout already on the socket. remove it first */
2019-01-28 17:41:50 +00:00
mio_deltmrjob ( mio , dev - > rtmridx ) ;
2019-01-17 08:09:19 +00:00
dev - > rtmridx = MIO_TMRIDX_INVALID ;
}
2019-01-27 02:09:22 +00:00
if ( tmout & & MIO_IS_POS_NTIME ( tmout ) )
2019-01-17 08:09:19 +00:00
{
mio_tmrjob_t tmrjob ;
MIO_MEMSET ( & tmrjob , 0 , MIO_SIZEOF ( tmrjob ) ) ;
tmrjob . ctx = dev ;
2019-01-30 10:18:58 +00:00
mio_gettime ( mio , & tmrjob . when ) ;
2019-01-27 02:09:22 +00:00
MIO_ADD_NTIME ( & tmrjob . when , & tmrjob . when , tmout ) ;
2019-01-17 08:09:19 +00:00
tmrjob . handler = on_read_timeout ;
tmrjob . idxptr = & dev - > rtmridx ;
2019-01-28 17:41:50 +00:00
dev - > rtmridx = mio_instmrjob ( mio , & tmrjob ) ;
2019-01-17 08:09:19 +00:00
if ( dev - > rtmridx = = MIO_TMRIDX_INVALID )
{
/* if timer registration fails, timeout will never be triggered */
return - 1 ;
}
2019-01-17 08:51:10 +00:00
dev - > rtmout = * tmout ;
2019-01-17 08:09:19 +00:00
}
return 0 ;
}
2018-12-12 13:15:54 +00:00
int mio_dev_read ( mio_dev_t * dev , int enabled )
2016-02-04 15:06:20 +00:00
{
2019-01-17 08:09:19 +00:00
return __dev_read ( dev , enabled , MIO_NULL , MIO_NULL ) ;
}
int mio_dev_timedread ( mio_dev_t * dev , int enabled , const mio_ntime_t * tmout )
{
return __dev_read ( dev , enabled , tmout , MIO_NULL ) ;
2016-02-04 15:06:20 +00:00
}
2018-12-12 13:15:54 +00:00
static void on_write_timeout ( mio_t * mio , const mio_ntime_t * now , mio_tmrjob_t * job )
2016-02-05 13:25:59 +00:00
{
2018-12-12 13:15:54 +00:00
mio_wq_t * q ;
mio_dev_t * dev ;
2016-02-05 13:25:59 +00:00
int x ;
2018-12-12 13:15:54 +00:00
q = ( mio_wq_t * ) job - > ctx ;
2016-02-05 13:25:59 +00:00
dev = q - > dev ;
2019-01-29 08:38:12 +00:00
mio_seterrnum ( mio , MIO_ETMOUT ) ;
2019-01-14 08:33:14 +00:00
x = dev - > dev_evcb - > on_write ( dev , - 1 , q - > ctx , & q - > dstaddr ) ;
2016-02-05 13:25:59 +00:00
2019-01-27 02:09:22 +00:00
MIO_ASSERT ( mio , q - > tmridx = = MIO_TMRIDX_INVALID ) ;
2018-12-12 13:15:54 +00:00
MIO_WQ_UNLINK ( q ) ;
2019-01-28 08:13:06 +00:00
mio_freemem ( mio , q ) ;
2016-02-05 13:25:59 +00:00
2018-12-12 13:15:54 +00:00
if ( x < = - 1 ) mio_dev_halt ( dev ) ;
2016-02-05 13:25:59 +00:00
}
2018-12-12 13:15:54 +00:00
static int __dev_write ( mio_dev_t * dev , const void * data , mio_iolen_t len , const mio_ntime_t * tmout , void * wrctx , const mio_devaddr_t * dstaddr )
2016-01-28 16:44:47 +00:00
{
2019-01-28 08:13:06 +00:00
mio_t * mio = dev - > mio ;
2018-12-12 13:15:54 +00:00
const mio_uint8_t * uptr ;
mio_iolen_t urem , ulen ;
mio_wq_t * q ;
2019-01-14 08:33:14 +00:00
mio_cwq_t * cwq ;
2019-01-16 04:37:53 +00:00
mio_oow_t cwq_extra_aligned , cwqfl_index ;
2016-02-04 15:06:20 +00:00
int x ;
2016-02-02 07:51:17 +00:00
2019-01-31 09:16:44 +00:00
if ( dev - > dev_cap & MIO_DEV_CAP_OUT_CLOSED )
2016-02-02 07:51:17 +00:00
{
2019-01-28 08:13:06 +00:00
mio_seterrbfmt ( mio , MIO_ENOCAPA , " unable to write to closed device " ) ;
2016-02-02 07:51:17 +00:00
return - 1 ;
}
2016-01-28 16:44:47 +00:00
uptr = data ;
urem = len ;
2020-04-30 14:48:39 +00:00
if ( ! MIO_WQ_IS_EMPTY ( & dev - > wq ) )
2016-02-04 15:06:20 +00:00
{
/* the writing queue is not empty.
* enqueue this request immediately */
goto enqueue_data ;
}
2016-02-02 07:51:17 +00:00
2019-01-31 09:16:44 +00:00
if ( dev - > dev_cap & MIO_DEV_CAP_STREAM )
2016-01-28 16:44:47 +00:00
{
2016-02-05 13:25:59 +00:00
/* use the do..while() loop to be able to send a zero-length data */
do
2016-02-04 15:06:20 +00:00
{
2016-02-05 13:25:59 +00:00
ulen = urem ;
2019-01-14 08:33:14 +00:00
x = dev - > dev_mth - > write ( dev , data , & ulen , dstaddr ) ;
2016-02-05 13:25:59 +00:00
if ( x < = - 1 ) return - 1 ;
else if ( x = = 0 )
{
/* [NOTE]
* the write queue is empty at this moment . a zero - length
2019-01-11 07:35:43 +00:00
* request for a stream device can still get enqueued if the
2016-02-05 13:25:59 +00:00
* write callback returns 0 though i can ' t figure out if there
* is a compelling reason to do so
*/
goto enqueue_data ; /* enqueue remaining data */
}
else
{
2020-05-07 04:32:32 +00:00
/* the write callback should return at most the number of requested
* bytes . but returning more is harmless as urem is of a signed type .
* for a zero - length request , it ' s necessary to return at least 1
* to indicate successful acknowlegement . otherwise , it gets enqueued
* as shown in the ' if ' block right above . */
2016-02-05 13:25:59 +00:00
urem - = ulen ;
uptr + = ulen ;
}
2016-02-04 15:06:20 +00:00
}
2016-02-05 13:25:59 +00:00
while ( urem > 0 ) ;
if ( len < = 0 ) /* original length */
2016-01-28 16:44:47 +00:00
{
2020-02-22 18:24:49 +00:00
/* a zero-length writing request is to close the writing end. this causes further write request to fail */
2019-01-31 09:16:44 +00:00
dev - > dev_cap | = MIO_DEV_CAP_OUT_CLOSED ;
2016-01-28 16:44:47 +00:00
}
2016-02-04 15:06:20 +00:00
2019-01-16 04:37:53 +00:00
/* if i trigger the write completion callback here, the performance
* may increase , but there can be annoying recursion issues if the
* callback requests another writing operation . it ' s imperative to
* delay the callback until this write function is finished .
* - - - > if ( dev - > dev_evcb - > on_write ( dev , len , wrctx , dstaddr ) < = - 1 ) return - 1 ; */
2019-01-14 08:33:14 +00:00
goto enqueue_completed_write ;
2016-02-05 13:25:59 +00:00
}
else
2016-02-04 15:06:20 +00:00
{
2016-02-05 13:25:59 +00:00
ulen = urem ;
2016-04-02 15:25:35 +00:00
2019-01-28 08:13:06 +00:00
x = dev - > dev_mth - > write ( dev , data , & ulen , dstaddr ) ;
2016-02-05 13:25:59 +00:00
if ( x < = - 1 ) return - 1 ;
else if ( x = = 0 ) goto enqueue_data ;
2019-01-16 04:37:53 +00:00
/* partial writing is still considered ok for a non-stream device. */
/* read the comment in the 'if' block above for why i enqueue the write completion event
* instead of calling the event callback here . . .
2020-02-22 18:24:49 +00:00
* - - - > if ( dev - > dev_evcb - > on_write ( dev , ulen , wrctx , dstaddr ) < = - 1 ) return - 1 ; */
2019-01-14 08:33:14 +00:00
goto enqueue_completed_write ;
2016-02-04 15:06:20 +00:00
}
2016-01-28 16:44:47 +00:00
2016-02-05 13:25:59 +00:00
return 1 ; /* written immediately and called on_write callback */
2016-01-28 16:44:47 +00:00
2016-02-02 07:51:17 +00:00
enqueue_data :
2019-01-31 09:16:44 +00:00
if ( dev - > dev_cap & MIO_DEV_CAP_OUT_UNQUEUEABLE )
2016-03-29 15:02:01 +00:00
{
/* writing queuing is not requested. so return failure */
2019-01-29 08:38:12 +00:00
mio_seterrbfmt ( mio , MIO_ENOCAPA , " device incapable of queuing " ) ;
2016-03-29 15:02:01 +00:00
return - 1 ;
}
2016-02-02 07:51:17 +00:00
/* queue the remaining data*/
2019-01-28 08:13:06 +00:00
q = ( mio_wq_t * ) mio_allocmem ( mio , MIO_SIZEOF ( * q ) + ( dstaddr ? dstaddr - > len : 0 ) + urem ) ;
if ( ! q ) return - 1 ;
2016-01-28 16:44:47 +00:00
2018-12-12 13:15:54 +00:00
q - > tmridx = MIO_TMRIDX_INVALID ;
2016-02-05 13:25:59 +00:00
q - > dev = dev ;
2016-02-02 15:40:09 +00:00
q - > ctx = wrctx ;
2016-03-29 15:02:01 +00:00
2016-04-18 14:21:23 +00:00
if ( dstaddr )
2016-03-29 15:02:01 +00:00
{
2018-12-12 13:15:54 +00:00
q - > dstaddr . ptr = ( mio_uint8_t * ) ( q + 1 ) ;
2016-04-18 14:21:23 +00:00
q - > dstaddr . len = dstaddr - > len ;
2018-12-12 13:15:54 +00:00
MIO_MEMCPY ( q - > dstaddr . ptr , dstaddr - > ptr , dstaddr - > len ) ;
2016-03-29 15:02:01 +00:00
}
else
{
2016-04-18 14:21:23 +00:00
q - > dstaddr . len = 0 ;
2016-03-29 15:02:01 +00:00
}
2018-12-12 13:15:54 +00:00
q - > ptr = ( mio_uint8_t * ) ( q + 1 ) + q - > dstaddr . len ;
2016-02-02 07:51:17 +00:00
q - > len = urem ;
2016-02-05 13:25:59 +00:00
q - > olen = len ;
2018-12-12 13:15:54 +00:00
MIO_MEMCPY ( q - > ptr , uptr , urem ) ;
2016-01-28 16:44:47 +00:00
2019-01-27 02:09:22 +00:00
if ( tmout & & MIO_IS_POS_NTIME ( tmout ) )
2016-02-05 13:25:59 +00:00
{
2018-12-12 13:15:54 +00:00
mio_tmrjob_t tmrjob ;
2016-02-04 15:06:20 +00:00
2018-12-12 13:15:54 +00:00
MIO_MEMSET ( & tmrjob , 0 , MIO_SIZEOF ( tmrjob ) ) ;
2016-02-05 13:25:59 +00:00
tmrjob . ctx = q ;
2019-01-30 10:18:58 +00:00
mio_gettime ( mio , & tmrjob . when ) ;
2019-01-27 02:09:22 +00:00
MIO_ADD_NTIME ( & tmrjob . when , & tmrjob . when , tmout ) ;
2016-02-05 13:25:59 +00:00
tmrjob . handler = on_write_timeout ;
tmrjob . idxptr = & q - > tmridx ;
2019-01-28 08:13:06 +00:00
q - > tmridx = mio_instmrjob ( mio , & tmrjob ) ;
2018-12-12 13:15:54 +00:00
if ( q - > tmridx = = MIO_TMRIDX_INVALID )
2016-02-05 13:25:59 +00:00
{
2019-01-28 08:13:06 +00:00
mio_freemem ( mio , q ) ;
2016-02-05 13:25:59 +00:00
return - 1 ;
}
}
2018-12-12 13:15:54 +00:00
MIO_WQ_ENQ ( & dev - > wq , q ) ;
2019-01-31 09:16:44 +00:00
if ( ! ( dev - > dev_cap & MIO_DEV_CAP_OUT_WATCHED ) )
2016-02-02 07:51:17 +00:00
{
2016-02-04 15:06:20 +00:00
/* if output is not being watched, arrange to do so */
2020-05-08 09:48:26 +00:00
if ( mio_dev_watch ( dev , MIO_DEV_WATCH_RENEW , MIO_DEV_EVENT_IN ) < = - 1 )
2016-01-28 16:44:47 +00:00
{
2019-01-28 08:13:06 +00:00
unlink_wq ( mio , q ) ;
mio_freemem ( mio , q ) ;
2016-02-02 07:51:17 +00:00
return - 1 ;
2016-01-28 16:44:47 +00:00
}
}
2016-02-05 13:25:59 +00:00
return 0 ; /* request pused to a write queue. */
2019-01-14 08:33:14 +00:00
enqueue_completed_write :
/* queue the remaining data*/
2019-01-16 04:37:53 +00:00
cwq_extra_aligned = ( dstaddr ? dstaddr - > len : 0 ) ;
cwq_extra_aligned = MIO_ALIGN_POW2 ( cwq_extra_aligned , MIO_CWQFL_ALIGN ) ;
cwqfl_index = cwq_extra_aligned / MIO_CWQFL_SIZE ;
2019-01-28 08:13:06 +00:00
if ( cwqfl_index < MIO_COUNTOF ( mio - > cwqfl ) & & mio - > cwqfl [ cwqfl_index ] )
2019-01-14 08:33:14 +00:00
{
2019-01-16 04:37:53 +00:00
/* take an available cwq object from the free cwq list */
cwq = dev - > mio - > cwqfl [ cwqfl_index ] ;
2020-05-03 16:02:56 +00:00
dev - > mio - > cwqfl [ cwqfl_index ] = cwq - > q_next ;
2019-01-14 09:23:16 +00:00
}
else
{
2019-01-28 08:13:06 +00:00
cwq = ( mio_cwq_t * ) mio_allocmem ( mio , MIO_SIZEOF ( * cwq ) + cwq_extra_aligned ) ;
2020-05-03 16:02:56 +00:00
if ( MIO_UNLIKELY ( ! cwq ) ) return - 1 ;
2019-01-14 08:33:14 +00:00
}
2019-01-14 09:23:16 +00:00
MIO_MEMSET ( cwq , 0 , MIO_SIZEOF ( * cwq ) ) ;
2019-01-14 08:33:14 +00:00
cwq - > dev = dev ;
cwq - > ctx = wrctx ;
if ( dstaddr )
{
cwq - > dstaddr . ptr = ( mio_uint8_t * ) ( cwq + 1 ) ;
cwq - > dstaddr . len = dstaddr - > len ;
MIO_MEMCPY ( cwq - > dstaddr . ptr , dstaddr - > ptr , dstaddr - > len ) ;
}
else
{
cwq - > dstaddr . len = 0 ;
}
cwq - > olen = len ;
MIO_CWQ_ENQ ( & dev - > mio - > cwq , cwq ) ;
2019-01-14 08:45:56 +00:00
dev - > cw_count + + ; /* increment the number of complete write operations */
2019-01-14 08:33:14 +00:00
return 0 ;
2016-02-05 13:25:59 +00:00
}
2020-02-22 18:24:49 +00:00
static int __dev_writev ( mio_dev_t * dev , mio_iovec_t * iov , mio_iolen_t iovcnt , const mio_ntime_t * tmout , void * wrctx , const mio_devaddr_t * dstaddr )
{
mio_t * mio = dev - > mio ;
mio_iolen_t urem , len ;
mio_iolen_t index = 0 , i , j ;
mio_wq_t * q ;
mio_cwq_t * cwq ;
mio_oow_t cwq_extra_aligned , cwqfl_index ;
int x ;
if ( dev - > dev_cap & MIO_DEV_CAP_OUT_CLOSED )
{
mio_seterrbfmt ( mio , MIO_ENOCAPA , " unable to write to closed device " ) ;
return - 1 ;
}
len = 0 ;
for ( i = 0 ; i < iovcnt ; i + + ) len + = iov [ i ] . iov_len ;
2020-04-30 14:48:39 +00:00
if ( ! MIO_WQ_IS_EMPTY ( & dev - > wq ) )
2020-02-22 18:24:49 +00:00
{
/* the writing queue is not empty.
* enqueue this request immediately */
urem = len ;
goto enqueue_data ;
}
if ( dev - > dev_cap & MIO_DEV_CAP_STREAM )
{
/* use the do..while() loop to be able to send a zero-length data */
mio_iolen_t backup_index = - 1 , dcnt ;
mio_iovec_t backup ;
do
{
dcnt = iovcnt - index ;
x = dev - > dev_mth - > writev ( dev , & iov [ index ] , & dcnt , dstaddr ) ;
if ( x < = - 1 ) return - 1 ;
else if ( x = = 0 )
{
/* [NOTE]
* the write queue is empty at this moment . a zero - length
* request for a stream device can still get enqueued if the
* write callback returns 0 though i can ' t figure out if there
* is a compelling reason to do so
*/
goto enqueue_data ; /* enqueue remaining data */
}
urem - = dcnt ;
while ( index < iovcnt & & ( mio_oow_t ) dcnt > = iov [ index ] . iov_len )
dcnt - = iov [ index + + ] . iov_len ;
if ( index = = iovcnt ) break ;
if ( backup_index ! = index )
{
if ( backup_index > = 0 ) iov [ backup_index ] = backup ;
backup = iov [ index ] ;
backup_index = index ;
}
2020-02-23 16:11:32 +00:00
iov [ index ] . iov_ptr = ( void * ) ( ( mio_uint8_t * ) iov [ index ] . iov_ptr + dcnt ) ;
2020-02-22 18:24:49 +00:00
iov [ index ] . iov_len - = dcnt ;
}
while ( 1 ) ;
if ( backup_index > = 0 ) iov [ backup_index ] = backup ;
if ( iovcnt < = 0 ) /* original vector count */
{
/* a zero-length writing request is to close the writing end. this causes further write request to fail */
dev - > dev_cap | = MIO_DEV_CAP_OUT_CLOSED ;
}
/* if i trigger the write completion callback here, the performance
* may increase , but there can be annoying recursion issues if the
* callback requests another writing operation . it ' s imperative to
* delay the callback until this write function is finished .
* - - - > if ( dev - > dev_evcb - > on_write ( dev , len , wrctx , dstaddr ) < = - 1 ) return - 1 ; */
goto enqueue_completed_write ;
}
else
{
mio_iolen_t dcnt ;
dcnt = iovcnt ;
x = dev - > dev_mth - > writev ( dev , iov , & dcnt , dstaddr ) ;
if ( x < = - 1 ) return - 1 ;
else if ( x = = 0 ) goto enqueue_data ;
urem - = dcnt ;
/* partial writing is still considered ok for a non-stream device. */
/* read the comment in the 'if' block above for why i enqueue the write completion event
* instead of calling the event callback here . . .
* - - - > if ( dev - > dev_evcb - > on_write ( dev , ulen , wrctx , dstaddr ) < = - 1 ) return - 1 ; */
goto enqueue_completed_write ;
}
return 1 ; /* written immediately and called on_write callback */
enqueue_data :
if ( dev - > dev_cap & MIO_DEV_CAP_OUT_UNQUEUEABLE )
{
/* writing queuing is not requested. so return failure */
mio_seterrbfmt ( mio , MIO_ENOCAPA , " device incapable of queuing " ) ;
return - 1 ;
}
/* queue the remaining data*/
q = ( mio_wq_t * ) mio_allocmem ( mio , MIO_SIZEOF ( * q ) + ( dstaddr ? dstaddr - > len : 0 ) + urem ) ;
if ( ! q ) return - 1 ;
q - > tmridx = MIO_TMRIDX_INVALID ;
q - > dev = dev ;
q - > ctx = wrctx ;
if ( dstaddr )
{
q - > dstaddr . ptr = ( mio_uint8_t * ) ( q + 1 ) ;
q - > dstaddr . len = dstaddr - > len ;
MIO_MEMCPY ( q - > dstaddr . ptr , dstaddr - > ptr , dstaddr - > len ) ;
}
else
{
q - > dstaddr . len = 0 ;
}
q - > ptr = ( mio_uint8_t * ) ( q + 1 ) + q - > dstaddr . len ;
q - > len = urem ;
q - > olen = len ;
for ( i = index , j = 0 ; i < iovcnt ; i + + )
{
MIO_MEMCPY ( & q - > ptr [ j ] , iov [ i ] . iov_ptr , iov [ i ] . iov_len ) ;
j + = iov [ i ] . iov_len ;
}
if ( tmout & & MIO_IS_POS_NTIME ( tmout ) )
{
mio_tmrjob_t tmrjob ;
MIO_MEMSET ( & tmrjob , 0 , MIO_SIZEOF ( tmrjob ) ) ;
tmrjob . ctx = q ;
mio_gettime ( mio , & tmrjob . when ) ;
MIO_ADD_NTIME ( & tmrjob . when , & tmrjob . when , tmout ) ;
tmrjob . handler = on_write_timeout ;
tmrjob . idxptr = & q - > tmridx ;
q - > tmridx = mio_instmrjob ( mio , & tmrjob ) ;
if ( q - > tmridx = = MIO_TMRIDX_INVALID )
{
mio_freemem ( mio , q ) ;
return - 1 ;
}
}
MIO_WQ_ENQ ( & dev - > wq , q ) ;
if ( ! ( dev - > dev_cap & MIO_DEV_CAP_OUT_WATCHED ) )
{
/* if output is not being watched, arrange to do so */
2020-05-08 09:48:26 +00:00
if ( mio_dev_watch ( dev , MIO_DEV_WATCH_RENEW , MIO_DEV_EVENT_IN ) < = - 1 )
2020-02-22 18:24:49 +00:00
{
unlink_wq ( mio , q ) ;
mio_freemem ( mio , q ) ;
return - 1 ;
}
}
return 0 ; /* request pused to a write queue. */
enqueue_completed_write :
/* queue the remaining data*/
cwq_extra_aligned = ( dstaddr ? dstaddr - > len : 0 ) ;
cwq_extra_aligned = MIO_ALIGN_POW2 ( cwq_extra_aligned , MIO_CWQFL_ALIGN ) ;
cwqfl_index = cwq_extra_aligned / MIO_CWQFL_SIZE ;
if ( cwqfl_index < MIO_COUNTOF ( mio - > cwqfl ) & & mio - > cwqfl [ cwqfl_index ] )
{
/* take an available cwq object from the free cwq list */
cwq = dev - > mio - > cwqfl [ cwqfl_index ] ;
2020-05-03 16:02:56 +00:00
dev - > mio - > cwqfl [ cwqfl_index ] = cwq - > q_next ;
2020-02-22 18:24:49 +00:00
}
else
{
cwq = ( mio_cwq_t * ) mio_allocmem ( mio , MIO_SIZEOF ( * cwq ) + cwq_extra_aligned ) ;
if ( ! cwq ) return - 1 ;
}
MIO_MEMSET ( cwq , 0 , MIO_SIZEOF ( * cwq ) ) ;
cwq - > dev = dev ;
cwq - > ctx = wrctx ;
if ( dstaddr )
{
cwq - > dstaddr . ptr = ( mio_uint8_t * ) ( cwq + 1 ) ;
cwq - > dstaddr . len = dstaddr - > len ;
MIO_MEMCPY ( cwq - > dstaddr . ptr , dstaddr - > ptr , dstaddr - > len ) ;
}
else
{
cwq - > dstaddr . len = 0 ;
}
cwq - > olen = len ;
MIO_CWQ_ENQ ( & dev - > mio - > cwq , cwq ) ;
dev - > cw_count + + ; /* increment the number of complete write operations */
return 0 ;
}
2018-12-12 13:15:54 +00:00
int mio_dev_write ( mio_dev_t * dev , const void * data , mio_iolen_t len , void * wrctx , const mio_devaddr_t * dstaddr )
2016-02-05 13:25:59 +00:00
{
2019-01-11 07:35:43 +00:00
return __dev_write ( dev , data , len , MIO_NULL , wrctx , dstaddr ) ;
2016-02-05 13:25:59 +00:00
}
2020-02-22 18:24:49 +00:00
int mio_dev_writev ( mio_dev_t * dev , mio_iovec_t * iov , mio_iolen_t iovcnt , void * wrctx , const mio_devaddr_t * dstaddr )
{
return __dev_writev ( dev , iov , iovcnt , MIO_NULL , wrctx , dstaddr ) ;
}
2018-12-12 13:15:54 +00:00
int mio_dev_timedwrite ( mio_dev_t * dev , const void * data , mio_iolen_t len , const mio_ntime_t * tmout , void * wrctx , const mio_devaddr_t * dstaddr )
2016-02-05 13:25:59 +00:00
{
2019-01-11 07:35:43 +00:00
return __dev_write ( dev , data , len , tmout , wrctx , dstaddr ) ;
2016-01-28 16:44:47 +00:00
}
2016-01-31 02:05:39 +00:00
2020-02-22 18:24:49 +00:00
int mio_dev_timedwritev ( mio_dev_t * dev , mio_iovec_t * iov , mio_iolen_t iovcnt , const mio_ntime_t * tmout , void * wrctx , const mio_devaddr_t * dstaddr )
2020-02-21 16:58:41 +00:00
{
2020-02-22 18:24:49 +00:00
return __dev_writev ( dev , iov , iovcnt , tmout , wrctx , dstaddr ) ;
2020-02-21 16:58:41 +00:00
}
2019-01-24 09:53:10 +00:00
/* -------------------------------------------------------------------------- */
2019-01-30 10:18:58 +00:00
void mio_gettime ( mio_t * mio , mio_ntime_t * now )
{
mio_sys_gettime ( mio , now ) ;
/* in mio_init(), mio->init_time has been set to the initialization time.
2019-02-18 17:15:44 +00:00
* the time returned here gets offset by mio - > init_time and
2019-01-30 10:18:58 +00:00
* thus becomes relative to it . this way , it is kept small such that it
* can be represented in a small integer with leaving almost zero chance
* of overflow . */
MIO_SUB_NTIME ( now , now , & mio - > init_time ) ; /* now = now - init_time */
}
/* -------------------------------------------------------------------------- */
2019-01-24 09:53:10 +00:00
void * mio_allocmem ( mio_t * mio , mio_oow_t size )
{
void * ptr ;
2019-06-21 12:43:50 +00:00
ptr = MIO_MMGR_ALLOC ( mio - > _mmgr , size ) ;
2019-01-24 09:53:10 +00:00
if ( ! ptr ) mio_seterrnum ( mio , MIO_ESYSMEM ) ;
return ptr ;
}
void * mio_callocmem ( mio_t * mio , mio_oow_t size )
{
void * ptr ;
2019-06-21 12:43:50 +00:00
ptr = MIO_MMGR_ALLOC ( mio - > _mmgr , size ) ;
2019-01-24 09:53:10 +00:00
if ( ! ptr ) mio_seterrnum ( mio , MIO_ESYSMEM ) ;
else MIO_MEMSET ( ptr , 0 , size ) ;
return ptr ;
}
void * mio_reallocmem ( mio_t * mio , void * ptr , mio_oow_t size )
{
2019-06-21 12:43:50 +00:00
ptr = MIO_MMGR_REALLOC ( mio - > _mmgr , ptr , size ) ;
2019-01-24 09:53:10 +00:00
if ( ! ptr ) mio_seterrnum ( mio , MIO_ESYSMEM ) ;
return ptr ;
}
void mio_freemem ( mio_t * mio , void * ptr )
{
2019-06-21 12:43:50 +00:00
MIO_MMGR_FREE ( mio - > _mmgr , ptr ) ;
2019-01-24 09:53:10 +00:00
}
2020-05-01 14:00:27 +00:00
/* ------------------------------------------------------------------------ */
struct fmt_uch_buf_t
{
mio_t * mio ;
mio_uch_t * ptr ;
mio_oow_t len ;
mio_oow_t capa ;
} ;
typedef struct fmt_uch_buf_t fmt_uch_buf_t ;
static int fmt_put_bchars_to_uch_buf ( mio_fmtout_t * fmtout , const mio_bch_t * ptr , mio_oow_t len )
{
fmt_uch_buf_t * b = ( fmt_uch_buf_t * ) fmtout - > ctx ;
mio_oow_t bcslen , ucslen ;
int n ;
bcslen = len ;
ucslen = b - > capa - b - > len ;
2020-05-06 09:28:36 +00:00
n = mio_conv_bchars_to_uchars_with_cmgr ( ptr , & bcslen , & b - > ptr [ b - > len ] , & ucslen , ( b - > mio ? b - > mio - > _cmgr : mio_get_utf8_cmgr ( ) ) , 1 ) ;
2020-05-01 14:00:27 +00:00
b - > len + = ucslen ;
if ( n < = - 1 )
{
if ( n = = - 2 )
{
return 0 ; /* buffer full. stop */
}
else
{
2020-05-01 18:01:29 +00:00
mio_seterrnum ( b - > mio , MIO_EECERR ) ;
2020-05-01 14:00:27 +00:00
return - 1 ;
}
}
return 1 ; /* success. carry on */
}
static int fmt_put_uchars_to_uch_buf ( mio_fmtout_t * fmtout , const mio_uch_t * ptr , mio_oow_t len )
{
fmt_uch_buf_t * b = ( fmt_uch_buf_t * ) fmtout - > ctx ;
mio_oow_t n ;
/* this function null-terminates the destination. so give the restored buffer size */
n = mio_copy_uchars_to_ucstr ( & b - > ptr [ b - > len ] , b - > capa - b - > len + 1 , ptr , len ) ;
b - > len + = n ;
if ( n < len )
{
if ( b - > mio ) mio_seterrnum ( b - > mio , MIO_EBUFFULL ) ;
return 0 ; /* stop. insufficient buffer */
}
return 1 ; /* success */
}
mio_oow_t mio_vfmttoucstr ( mio_t * mio , mio_uch_t * buf , mio_oow_t bufsz , const mio_uch_t * fmt , va_list ap )
{
mio_fmtout_t fo ;
fmt_uch_buf_t fb ;
if ( bufsz < = 0 ) return 0 ;
MIO_MEMSET ( & fo , 0 , MIO_SIZEOF ( fo ) ) ;
fo . putbchars = fmt_put_bchars_to_uch_buf ;
fo . putuchars = fmt_put_uchars_to_uch_buf ;
fo . ctx = & fb ;
MIO_MEMSET ( & fb , 0 , MIO_SIZEOF ( fb ) ) ;
fb . mio = mio ;
fb . ptr = buf ;
fb . capa = bufsz - 1 ;
if ( mio_ufmt_outv ( & fo , fmt , ap ) < = - 1 ) return - 1 ;
buf [ fb . len ] = ' \0 ' ;
return fb . len ;
}
mio_oow_t mio_fmttoucstr ( mio_t * mio , mio_uch_t * buf , mio_oow_t bufsz , const mio_uch_t * fmt , . . . )
{
mio_oow_t x ;
va_list ap ;
va_start ( ap , fmt ) ;
x = mio_vfmttoucstr ( mio , buf , bufsz , fmt , ap ) ;
va_end ( ap ) ;
return x ;
}
/* ------------------------------------------------------------------------ */
struct fmt_bch_buf_t
{
mio_t * mio ;
mio_bch_t * ptr ;
mio_oow_t len ;
mio_oow_t capa ;
} ;
typedef struct fmt_bch_buf_t fmt_bch_buf_t ;
static int fmt_put_bchars_to_bch_buf ( mio_fmtout_t * fmtout , const mio_bch_t * ptr , mio_oow_t len )
{
fmt_bch_buf_t * b = ( fmt_bch_buf_t * ) fmtout - > ctx ;
mio_oow_t n ;
/* this function null-terminates the destination. so give the restored buffer size */
n = mio_copy_bchars_to_bcstr ( & b - > ptr [ b - > len ] , b - > capa - b - > len + 1 , ptr , len ) ;
b - > len + = n ;
if ( n < len )
{
if ( b - > mio ) mio_seterrnum ( b - > mio , MIO_EBUFFULL ) ;
return 0 ; /* stop. insufficient buffer */
}
return 1 ; /* success */
}
static int fmt_put_uchars_to_bch_buf ( mio_fmtout_t * fmtout , const mio_uch_t * ptr , mio_oow_t len )
{
fmt_bch_buf_t * b = ( fmt_bch_buf_t * ) fmtout - > ctx ;
mio_oow_t bcslen , ucslen ;
int n ;
bcslen = b - > capa - b - > len ;
ucslen = len ;
2020-05-06 09:28:36 +00:00
n = mio_conv_uchars_to_bchars_with_cmgr ( ptr , & ucslen , & b - > ptr [ b - > len ] , & bcslen , ( b - > mio ? b - > mio - > _cmgr : mio_get_utf8_cmgr ( ) ) ) ;
2020-05-01 14:00:27 +00:00
b - > len + = bcslen ;
if ( n < = - 1 )
{
if ( n = = - 2 )
{
return 0 ; /* buffer full. stop */
}
else
{
2020-05-01 18:01:29 +00:00
mio_seterrnum ( b - > mio , MIO_EECERR ) ;
2020-05-01 14:00:27 +00:00
return - 1 ;
}
}
return 1 ; /* success. carry on */
}
mio_oow_t mio_vfmttobcstr ( mio_t * mio , mio_bch_t * buf , mio_oow_t bufsz , const mio_bch_t * fmt , va_list ap )
{
mio_fmtout_t fo ;
fmt_bch_buf_t fb ;
if ( bufsz < = 0 ) return 0 ;
MIO_MEMSET ( & fo , 0 , MIO_SIZEOF ( fo ) ) ;
fo . putbchars = fmt_put_bchars_to_bch_buf ;
fo . putuchars = fmt_put_uchars_to_bch_buf ;
fo . ctx = & fb ;
MIO_MEMSET ( & fb , 0 , MIO_SIZEOF ( fb ) ) ;
fb . mio = mio ;
fb . ptr = buf ;
fb . capa = bufsz - 1 ;
if ( mio_bfmt_outv ( & fo , fmt , ap ) < = - 1 ) return - 1 ;
buf [ fb . len ] = ' \0 ' ;
return fb . len ;
}
mio_oow_t mio_fmttobcstr ( mio_t * mio , mio_bch_t * buf , mio_oow_t bufsz , const mio_bch_t * fmt , . . . )
{
mio_oow_t x ;
va_list ap ;
va_start ( ap , fmt ) ;
x = mio_vfmttobcstr ( mio , buf , bufsz , fmt , ap ) ;
va_end ( ap ) ;
return x ;
}
/* ------------------------------------------------------------------------ */