2020-05-25 08:04:30 +00:00
/*
Copyright ( c ) 2016 - 2020 Chung , Hyung - Hwan . All rights reserved .
Redistribution and use in source and binary forms , with or without
modification , are permitted provided that the following conditions
are met :
1. Redistributions of source code must retain the above copyright
notice , this list of conditions and the following disclaimer .
2. Redistributions in binary form must reproduce the above copyright
notice , this list of conditions and the following disclaimer in the
documentation and / or other materials provided with the distribution .
THIS SOFTWARE IS PROVIDED BY THE AUTHOR " AS IS " AND ANY EXPRESS OR
2022-06-11 05:32:01 +00:00
IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE IMPLIED WARRANTIES
2020-05-25 08:04:30 +00:00
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED .
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT , INDIRECT ,
INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT
NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*/
2023-01-11 14:59:41 +00:00
2020-05-25 08:04:30 +00:00
# include "http-prv.h"
2021-07-22 07:30:20 +00:00
# include <hio-thr.h>
# include <hio-fmt.h>
# include <hio-chr.h>
2020-05-25 08:04:30 +00:00
# include <pthread.h>
2020-05-26 01:13:34 +00:00
# define THR_ALLOW_UNLIMITED_REQ_CONTENT_LENGTH
2023-01-11 14:59:41 +00:00
enum THR_TASK_RES_mode_t
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
THR_TASK_RES_MODE_CHUNKED ,
THR_TASK_RES_MODE_CLOSE ,
THR_TASK_RES_MODE_LENGTH
2020-05-26 01:13:34 +00:00
} ;
2023-01-11 14:59:41 +00:00
typedef enum THR_TASK_RES_mode_t THR_TASK_RES_mode_t ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
# define THR_TASK_PENDING_IO_THRESHOLD 5
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
# define THR_TASK_OVER_READ_FROM_CLIENT (1 << 0)
# define THR_TASK_OVER_READ_FROM_PEER (1 << 1)
# define THR_TASK_OVER_WRITE_TO_CLIENT (1 << 2)
# define THR_TASK_OVER_WRITE_TO_PEER (1 << 3)
# define THR_TASK_OVER_ALL (THR_TASK_OVER_READ_FROM_CLIENT | THR_TASK_OVER_READ_FROM_PEER | THR_TASK_OVER_WRITE_TO_CLIENT | THR_TASK_OVER_WRITE_TO_PEER)
2020-05-26 01:13:34 +00:00
struct thr_func_start_t
{
2023-01-11 15:33:52 +00:00
hio_t * hio ; /* for faster and safer access in case htts has been already destroyed */
2023-01-11 14:59:41 +00:00
hio_svc_htts_t * htts ;
2021-07-22 07:30:20 +00:00
hio_svc_htts_thr_func_t thr_func ;
2020-05-26 01:13:34 +00:00
void * thr_ctx ;
2021-07-22 07:30:20 +00:00
hio_svc_htts_thr_func_info_t tfi ;
2020-05-26 01:13:34 +00:00
} ;
typedef struct thr_func_start_t thr_func_start_t ;
2023-01-11 14:59:41 +00:00
struct thr_task_t
2020-05-25 08:04:30 +00:00
{
2023-01-11 15:41:01 +00:00
HIO_SVC_HTTS_TASK_HEADER ;
2020-05-25 08:04:30 +00:00
2022-10-09 16:41:07 +00:00
int options ;
2021-07-22 07:30:20 +00:00
hio_oow_t num_pending_writes_to_client ;
hio_oow_t num_pending_writes_to_peer ;
hio_dev_thr_t * peer ;
hio_htrd_t * peer_htrd ;
2023-01-11 14:59:41 +00:00
hio_dev_sck_t * csck ;
2021-07-22 07:30:20 +00:00
hio_svc_htts_cli_t * client ;
hio_http_version_t req_version ; /* client request */
2023-01-11 17:02:09 +00:00
hio_http_method_t req_method ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
unsigned int over : 4 ; /* must be large enough to accomodate THR_TASK_OVER_ALL */
2020-05-26 01:13:34 +00:00
unsigned int keep_alive : 1 ;
unsigned int req_content_length_unlimited : 1 ;
unsigned int ever_attempted_to_write_to_client : 1 ;
2022-10-09 16:41:07 +00:00
unsigned int client_eof_detected : 1 ;
2020-05-26 01:13:34 +00:00
unsigned int client_disconnected : 1 ;
2020-05-26 14:58:55 +00:00
unsigned int client_htrd_recbs_changed : 1 ;
2021-07-22 07:30:20 +00:00
hio_oow_t req_content_length ; /* client request content length */
2023-01-11 14:59:41 +00:00
THR_TASK_RES_mode_t res_mode_to_cli ;
2020-05-26 01:13:34 +00:00
2021-07-22 07:30:20 +00:00
hio_dev_sck_on_read_t client_org_on_read ;
hio_dev_sck_on_write_t client_org_on_write ;
hio_dev_sck_on_disconnect_t client_org_on_disconnect ;
hio_htrd_recbs_t client_htrd_org_recbs ;
2020-05-26 01:13:34 +00:00
} ;
2020-07-31 15:07:28 +00:00
2023-01-11 14:59:41 +00:00
typedef struct thr_task_t thr_task_t ;
2020-05-25 08:04:30 +00:00
2020-05-26 01:13:34 +00:00
struct thr_peer_xtn_t
{
2023-01-11 14:59:41 +00:00
thr_task_t * task ;
2020-05-26 01:13:34 +00:00
} ;
typedef struct thr_peer_xtn_t thr_peer_xtn_t ;
2023-01-11 14:59:41 +00:00
static void thr_task_halt_participating_devices ( thr_task_t * thr_task )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
HIO_ASSERT ( thr_task - > htts - > hio , thr_task - > client ! = HIO_NULL ) ;
HIO_ASSERT ( thr_task - > htts - > hio , thr_task - > csck ! = HIO_NULL ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
HIO_DEBUG4 ( thr_task - > htts - > hio , " HTTS(%p) - Halting participating devices in thr task %p(csck=%p,peer=%p) \n " , thr_task - > htts , thr_task , thr_task - > csck , thr_task - > peer ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( thr_task - > csck ) hio_dev_sck_halt ( thr_task - > csck ) ;
2020-05-26 01:13:34 +00:00
/* check for peer as it may not have been started */
2023-01-11 14:59:41 +00:00
if ( thr_task - > peer ) hio_dev_thr_halt ( thr_task - > peer ) ;
2020-05-26 01:13:34 +00:00
}
2023-01-11 14:59:41 +00:00
static int thr_task_write_to_client ( thr_task_t * thr_task , const void * data , hio_iolen_t dlen )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
if ( thr_task - > csck )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
thr_task - > ever_attempted_to_write_to_client = 1 ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
thr_task - > num_pending_writes_to_client + + ;
if ( hio_dev_sck_write ( thr_task - > csck , data , dlen , HIO_NULL , HIO_NULL ) < = - 1 )
{
thr_task - > num_pending_writes_to_client - - ;
return - 1 ;
}
if ( thr_task - > num_pending_writes_to_client > THR_TASK_PENDING_IO_THRESHOLD )
{
if ( hio_dev_thr_read ( thr_task - > peer , 0 ) < = - 1 ) return - 1 ;
}
2020-05-26 01:13:34 +00:00
}
return 0 ;
}
2023-01-11 14:59:41 +00:00
static int thr_task_writev_to_client ( thr_task_t * thr_task , hio_iovec_t * iov , hio_iolen_t iovcnt )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
if ( thr_task - > csck )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
thr_task - > ever_attempted_to_write_to_client = 1 ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
thr_task - > num_pending_writes_to_client + + ;
if ( hio_dev_sck_writev ( thr_task - > csck , iov , iovcnt , HIO_NULL , HIO_NULL ) < = - 1 )
{
thr_task - > num_pending_writes_to_client - - ;
return - 1 ;
}
if ( thr_task - > num_pending_writes_to_client > THR_TASK_PENDING_IO_THRESHOLD )
{
if ( hio_dev_thr_read ( thr_task - > peer , 0 ) < = - 1 ) return - 1 ;
}
2020-05-26 01:13:34 +00:00
}
return 0 ;
}
2023-01-11 14:59:41 +00:00
static int thr_task_send_final_status_to_client ( thr_task_t * thr_task , int status_code , int force_close )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
hio_svc_htts_cli_t * cli = thr_task - > client ;
2021-07-22 07:30:20 +00:00
hio_bch_t dtbuf [ 64 ] ;
2023-01-11 17:02:09 +00:00
const hio_bch_t * status_msg ;
2020-05-26 01:13:34 +00:00
2021-07-22 07:30:20 +00:00
hio_svc_htts_fmtgmtime ( cli - > htts , HIO_NULL , dtbuf , HIO_COUNTOF ( dtbuf ) ) ;
2023-01-11 17:02:09 +00:00
status_msg = hio_http_status_to_bcstr ( status_code ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( ! force_close ) force_close = ! thr_task - > keep_alive ;
2023-01-11 17:02:09 +00:00
if ( hio_becs_fmt ( cli - > sbuf , " HTTP/%d.%d %d %hs \r \n Server: %hs \r \n Date: %s \r \n Connection: %hs \r \n " ,
2023-01-11 14:59:41 +00:00
thr_task - > req_version . major , thr_task - > req_version . minor ,
2023-01-11 17:02:09 +00:00
status_code , status_msg ,
2020-05-26 01:13:34 +00:00
cli - > htts - > server_name , dtbuf ,
2021-07-22 07:30:20 +00:00
( force_close ? " close " : " keep-alive " ) ) = = ( hio_oow_t ) - 1 ) return - 1 ;
2020-05-26 01:13:34 +00:00
2023-01-11 17:02:09 +00:00
if ( thr_task - > req_method = = HIO_HTTP_HEAD & & status_code ! = HIO_HTTP_STATUS_OK ) status_msg = " " ;
if ( hio_becs_fcat ( cli - > sbuf , " Content-Type: text/plain \r \n Content-Length: %zu \r \n \r \n %hs " , hio_count_bcstr ( status_msg ) , status_msg ) = = ( hio_oow_t ) - 1 ) return - 1 ;
2023-01-11 14:59:41 +00:00
return ( thr_task_write_to_client ( thr_task , HIO_BECS_PTR ( cli - > sbuf ) , HIO_BECS_LEN ( cli - > sbuf ) ) < = - 1 | |
( force_close & & thr_task_write_to_client ( thr_task , HIO_NULL , 0 ) < = - 1 ) ) ? - 1 : 0 ;
2020-05-26 01:13:34 +00:00
}
2023-01-11 14:59:41 +00:00
static int thr_task_write_last_chunk_to_client ( thr_task_t * thr_task )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
if ( ! thr_task - > ever_attempted_to_write_to_client )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
if ( thr_task_send_final_status_to_client ( thr_task , HIO_HTTP_STATUS_INTERNAL_SERVER_ERROR , 0 ) < = - 1 ) return - 1 ;
2020-05-26 01:13:34 +00:00
}
else
{
2023-01-11 14:59:41 +00:00
if ( thr_task - > res_mode_to_cli = = THR_TASK_RES_MODE_CHUNKED & &
thr_task_write_to_client ( thr_task , " 0 \r \n \r \n " , 5 ) < = - 1 ) return - 1 ;
2020-05-26 01:13:34 +00:00
}
2023-01-11 14:59:41 +00:00
if ( ! thr_task - > keep_alive & & thr_task_write_to_client ( thr_task , HIO_NULL , 0 ) < = - 1 ) return - 1 ;
2020-05-26 01:13:34 +00:00
return 0 ;
}
2023-01-11 14:59:41 +00:00
static int thr_task_write_to_peer ( thr_task_t * thr_task , const void * data , hio_iolen_t dlen )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
thr_task - > num_pending_writes_to_peer + + ;
if ( hio_dev_thr_write ( thr_task - > peer , data , dlen , HIO_NULL ) < = - 1 )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
thr_task - > num_pending_writes_to_peer - - ;
2020-05-26 01:13:34 +00:00
return - 1 ;
}
/* TODO: check if it's already finished or something.. */
2023-01-11 14:59:41 +00:00
if ( thr_task - > num_pending_writes_to_peer > THR_TASK_PENDING_IO_THRESHOLD )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
if ( hio_dev_sck_read ( thr_task - > csck , 0 ) < = - 1 ) return - 1 ;
2020-05-26 01:13:34 +00:00
}
return 0 ;
}
2023-01-11 14:59:41 +00:00
static HIO_INLINE void thr_task_mark_over ( thr_task_t * thr_task , int over_bits )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
hio_svc_htts_t * htts = thr_task - > htts ;
hio_t * hio = htts - > hio ;
2020-05-26 01:13:34 +00:00
unsigned int old_over ;
2023-01-11 14:59:41 +00:00
old_over = thr_task - > over ;
thr_task - > over | = over_bits ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
HIO_DEBUG8 ( hio , " HTTS(%p) - thr(t=%p,c=%p[%d],p=%p) - old_over=%x | new-bits=%x => over=%x \n " , thr_task - > htts , thr_task , thr_task - > client , ( thr_task - > csck ? thr_task - > csck - > hnd : - 1 ) , thr_task - > peer , ( int ) old_over , ( int ) over_bits , ( int ) thr_task - > over ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( ! ( old_over & THR_TASK_OVER_READ_FROM_CLIENT ) & & ( thr_task - > over & THR_TASK_OVER_READ_FROM_CLIENT ) )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
if ( thr_task - > csck & & hio_dev_sck_read ( thr_task - > csck , 0 ) < = - 1 )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
HIO_DEBUG5 ( hio , " HTTS(%p) - thr(t=%p,c=%p[%d],p=%p) - halting client for failure to disable input watching \n " , thr_task - > htts , thr_task , thr_task - > client , ( thr_task - > csck ? thr_task - > csck - > hnd : - 1 ) , thr_task - > peer ) ;
hio_dev_sck_halt ( thr_task - > csck ) ;
2020-05-26 01:13:34 +00:00
}
}
2023-01-11 14:59:41 +00:00
if ( ! ( old_over & THR_TASK_OVER_READ_FROM_PEER ) & & ( thr_task - > over & THR_TASK_OVER_READ_FROM_PEER ) )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
if ( thr_task - > peer & & hio_dev_thr_read ( thr_task - > peer , 0 ) < = - 1 )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
HIO_DEBUG5 ( hio , " HTTS(%p) - thr(t=%p,c=%p[%d],p=%p) - halting peer for failure to disable input watching \n " , thr_task - > htts , thr_task , thr_task - > client , ( thr_task - > csck ? thr_task - > csck - > hnd : - 1 ) , thr_task - > peer ) ;
hio_dev_thr_halt ( thr_task - > peer ) ;
2020-05-26 01:13:34 +00:00
}
}
2023-01-11 14:59:41 +00:00
if ( old_over ! = THR_TASK_OVER_ALL & & thr_task - > over = = THR_TASK_OVER_ALL )
2020-05-26 01:13:34 +00:00
{
/* ready to stop */
2023-01-11 14:59:41 +00:00
if ( thr_task - > peer )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
HIO_DEBUG5 ( hio , " HTTS(%p) - thr(t=%p,c=%p[%d],p=%p) - halting peer as it is unneeded \n " , thr_task - > htts , thr_task , thr_task - > client , ( thr_task - > csck ? thr_task - > csck - > hnd : - 1 ) , thr_task - > peer ) ;
hio_dev_thr_halt ( thr_task - > peer ) ;
2020-05-26 01:13:34 +00:00
}
2023-01-11 14:59:41 +00:00
if ( thr_task - > csck )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
HIO_ASSERT ( hio , thr_task - > client ! = HIO_NULL ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( thr_task - > keep_alive & & ! thr_task - > client_eof_detected )
{
/* how to arrange to delete this thr_task object and put the socket back to the normal waiting state??? */
2023-01-11 15:41:01 +00:00
HIO_ASSERT ( thr_task - > htts - > hio , thr_task - > client - > task = = ( hio_svc_htts_task_t * ) thr_task ) ;
HIO_SVC_HTTS_TASK_UNREF ( thr_task - > client - > task ) ;
2023-01-11 14:59:41 +00:00
/* IMPORTANT: thr_task must not be accessed from here down as it could have been destroyed */
}
else
{
HIO_DEBUG5 ( hio , " HTTS(%p) - thr(t=%p,c=%p[%d],p=%p) - halting client for no keep-alive \n " , thr_task - > htts , thr_task , thr_task - > client , ( thr_task - > csck ? thr_task - > csck - > hnd : - 1 ) , thr_task - > peer ) ;
hio_dev_sck_shutdown ( thr_task - > csck , HIO_DEV_SCK_SHUTDOWN_WRITE ) ;
hio_dev_sck_halt ( thr_task - > csck ) ;
}
2020-05-26 01:13:34 +00:00
}
}
}
2023-01-11 15:41:01 +00:00
static void thr_task_on_kill ( hio_svc_htts_task_t * task )
2020-05-25 08:04:30 +00:00
{
2023-01-11 15:41:01 +00:00
thr_task_t * thr_task = ( thr_task_t * ) task ;
2023-01-11 14:59:41 +00:00
hio_t * hio = thr_task - > htts - > hio ;
2020-07-08 05:24:40 +00:00
2023-01-11 14:59:41 +00:00
HIO_DEBUG5 ( hio , " HTTS(%p) - thr(t=%p,c=%p[%d],p=%p) - killing the task \n " , thr_task - > htts , thr_task , thr_task - > client , ( thr_task - > csck ? thr_task - > csck - > hnd : - 1 ) , thr_task - > peer ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( thr_task - > peer )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
thr_peer_xtn_t * thr_peer = hio_dev_thr_getxtn ( thr_task - > peer ) ;
if ( thr_peer - > task )
{
/* thr_peer->task may not be NULL if the resource is killed regardless of the reference count.
2023-01-11 15:41:01 +00:00
* anyway , don ' t use HIO_SVC_HTTS_TASK_UNREF ( thr_peer - > task ) because the resource itself
2023-01-11 14:59:41 +00:00
* is already being killed . */
thr_peer - > task = HIO_NULL ;
}
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
hio_dev_thr_kill ( thr_task - > peer ) ;
thr_task - > peer = HIO_NULL ;
2020-05-26 01:13:34 +00:00
}
2023-01-11 14:59:41 +00:00
if ( thr_task - > peer_htrd )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
thr_peer_xtn_t * thr_peer = hio_htrd_getxtn ( thr_task - > peer_htrd ) ;
2023-01-11 15:41:01 +00:00
if ( thr_peer - > task ) thr_peer - > task = HIO_NULL ; // no HIO_SVC_HTTS_TASK_UNREF() for the same reason above
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
hio_htrd_close ( thr_task - > peer_htrd ) ;
thr_task - > peer_htrd = HIO_NULL ;
2020-05-26 01:13:34 +00:00
}
2023-01-11 14:59:41 +00:00
if ( thr_task - > csck )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
HIO_ASSERT ( hio , thr_task - > client ! = HIO_NULL ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
/* restore callbacks */
if ( thr_task - > client_org_on_read ) thr_task - > csck - > on_read = thr_task - > client_org_on_read ;
if ( thr_task - > client_org_on_write ) thr_task - > csck - > on_write = thr_task - > client_org_on_write ;
if ( thr_task - > client_org_on_disconnect ) thr_task - > csck - > on_disconnect = thr_task - > client_org_on_disconnect ;
if ( thr_task - > client_htrd_recbs_changed ) hio_htrd_setrecbs ( thr_task - > client - > htrd , & thr_task - > client_htrd_org_recbs ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( ! thr_task - > keep_alive | | hio_dev_sck_read ( thr_task - > csck , 1 ) < = - 1 )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
HIO_DEBUG2 ( hio , " HTTS(%p) - halting client(%p) for failure to enable input watching \n " , thr_task - > htts , thr_task - > csck ) ;
hio_dev_sck_halt ( thr_task - > csck ) ;
2020-05-26 01:13:34 +00:00
}
}
2023-01-11 14:59:41 +00:00
thr_task - > client_org_on_read = HIO_NULL ;
thr_task - > client_org_on_write = HIO_NULL ;
thr_task - > client_org_on_disconnect = HIO_NULL ;
thr_task - > client_htrd_recbs_changed = 0 ;
2023-01-11 17:02:09 +00:00
if ( thr_task - > task_next ) HIO_SVC_HTTS_TASKL_UNLINK_TASK ( thr_task ) ; /* detach from the htts service only if it's attached */
2023-01-11 14:59:41 +00:00
HIO_DEBUG5 ( hio , " HTTS(%p) - thr(t=%p,c=%p[%d],p=%p) - killed the task \n " , thr_task - > htts , thr_task , thr_task - > client , ( thr_task - > csck ? thr_task - > csck - > hnd : - 1 ) , thr_task - > peer ) ;
2020-05-26 01:13:34 +00:00
}
2021-07-22 07:30:20 +00:00
static void thr_peer_on_close ( hio_dev_thr_t * thr , hio_dev_thr_sid_t sid )
2020-05-26 01:13:34 +00:00
{
2021-07-22 07:30:20 +00:00
hio_t * hio = thr - > hio ;
thr_peer_xtn_t * thr_peer = ( thr_peer_xtn_t * ) hio_dev_thr_getxtn ( thr ) ;
2023-01-11 14:59:41 +00:00
thr_task_t * thr_task = thr_peer - > task ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( ! thr_task ) return ; /* thr task already gone */
2020-05-26 01:13:34 +00:00
switch ( sid )
{
2021-07-22 07:30:20 +00:00
case HIO_DEV_THR_MASTER :
2023-01-11 14:59:41 +00:00
HIO_DEBUG2 ( hio , " HTTS(%p) - peer %p closing master \n " , thr_task - > htts , thr ) ;
thr_task - > peer = HIO_NULL ; /* clear this peer from the state */
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
HIO_ASSERT ( hio , thr_peer - > task ! = HIO_NULL ) ;
2023-01-11 15:41:01 +00:00
HIO_SVC_HTTS_TASK_UNREF ( thr_peer - > task ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( thr_task - > peer_htrd )
2020-05-26 01:13:34 +00:00
{
/* once this peer device is closed, peer's htrd is also never used.
* it ' s safe to detach the extra information attached on the htrd object . */
2023-01-11 14:59:41 +00:00
thr_peer = hio_htrd_getxtn ( thr_task - > peer_htrd ) ;
HIO_ASSERT ( hio , thr_peer - > task ! = HIO_NULL ) ;
2023-01-11 15:41:01 +00:00
HIO_SVC_HTTS_TASK_UNREF ( thr_peer - > task ) ;
2020-05-26 01:13:34 +00:00
}
break ;
2021-07-22 07:30:20 +00:00
case HIO_DEV_THR_OUT :
2023-01-11 14:59:41 +00:00
HIO_ASSERT ( hio , thr_task - > peer = = thr ) ;
HIO_DEBUG3 ( hio , " HTTS(%p) - peer %p closing slave[%d] \n " , thr_task - > htts , thr , sid ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( ! ( thr_task - > over & THR_TASK_OVER_READ_FROM_PEER ) )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
if ( thr_task_write_last_chunk_to_client ( thr_task ) < = - 1 )
thr_task_halt_participating_devices ( thr_task ) ;
2020-05-26 01:13:34 +00:00
else
2023-01-11 14:59:41 +00:00
thr_task_mark_over ( thr_task , THR_TASK_OVER_READ_FROM_PEER ) ;
2020-05-26 01:13:34 +00:00
}
break ;
2021-07-22 07:30:20 +00:00
case HIO_DEV_THR_IN :
2023-01-11 14:59:41 +00:00
thr_task_mark_over ( thr_task , THR_TASK_OVER_WRITE_TO_PEER ) ;
2020-05-26 01:13:34 +00:00
break ;
default :
2023-01-11 14:59:41 +00:00
HIO_DEBUG3 ( hio , " HTTS(%p) - peer %p closing slave[%d] \n " , thr_task - > htts , thr , sid ) ;
2020-05-26 01:13:34 +00:00
/* do nothing */
break ;
}
}
2021-07-22 07:30:20 +00:00
static int thr_peer_on_read ( hio_dev_thr_t * thr , const void * data , hio_iolen_t dlen )
2020-05-26 01:13:34 +00:00
{
2021-07-22 07:30:20 +00:00
hio_t * hio = thr - > hio ;
thr_peer_xtn_t * thr_peer = ( thr_peer_xtn_t * ) hio_dev_thr_getxtn ( thr ) ;
2023-01-11 14:59:41 +00:00
thr_task_t * thr_task = thr_peer - > task ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
HIO_ASSERT ( hio , thr_task ! = HIO_NULL ) ;
2020-05-26 01:13:34 +00:00
if ( dlen < = - 1 )
{
2023-01-11 14:59:41 +00:00
HIO_DEBUG2 ( hio , " HTTPS(%p) - read error from peer %p \n " , thr_task - > htts , thr ) ;
2020-05-26 01:13:34 +00:00
goto oops ;
}
if ( dlen = = 0 )
{
2023-01-11 14:59:41 +00:00
HIO_DEBUG2 ( hio , " HTTPS(%p) - EOF from peer %p \n " , thr_task - > htts , thr ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( ! ( thr_task - > over & THR_TASK_OVER_READ_FROM_PEER ) )
2020-05-26 01:13:34 +00:00
{
/* the thr script could be misbehaviing.
* it still has to read more but EOF is read .
* otherwise client_peer_htrd_poke ( ) should have been called */
2023-01-11 14:59:41 +00:00
if ( thr_task_write_last_chunk_to_client ( thr_task ) < = - 1 ) goto oops ;
thr_task_mark_over ( thr_task , THR_TASK_OVER_READ_FROM_PEER ) ;
2020-05-26 01:13:34 +00:00
}
}
else
{
2021-07-22 07:30:20 +00:00
hio_oow_t rem ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
HIO_ASSERT ( hio , ! ( thr_task - > over & THR_TASK_OVER_READ_FROM_PEER ) ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( hio_htrd_feed ( thr_task - > peer_htrd , data , dlen , & rem ) < = - 1 )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
HIO_DEBUG2 ( hio , " HTTPS(%p) - unable to feed peer htrd - peer %p \n " , thr_task - > htts , thr ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( ! thr_task - > ever_attempted_to_write_to_client & &
! ( thr_task - > over & THR_TASK_OVER_WRITE_TO_CLIENT ) )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
thr_task_send_final_status_to_client ( thr_task , HIO_HTTP_STATUS_INTERNAL_SERVER_ERROR , 1 ) ; /* don't care about error because it jumps to oops below anyway */
2020-05-26 01:13:34 +00:00
}
2023-01-11 14:59:41 +00:00
goto oops ;
2020-05-26 01:13:34 +00:00
}
2023-01-11 14:59:41 +00:00
if ( rem > 0 )
2020-05-26 01:13:34 +00:00
{
/* If the script specifies Content-Length and produces longer data, it will come here */
2021-08-08 07:37:33 +00:00
/*printf ("AAAAAAAAAAAAAAAAAa EEEEEXcessive DATA..................\n");*/
2020-05-26 01:13:34 +00:00
/* TODO: or drop this request?? */
}
}
return 0 ;
oops :
2023-01-11 14:59:41 +00:00
thr_task_halt_participating_devices ( thr_task ) ;
2020-05-26 01:13:34 +00:00
return 0 ;
}
2021-07-22 07:30:20 +00:00
static int thr_peer_capture_response_header ( hio_htre_t * req , const hio_bch_t * key , const hio_htre_hdrval_t * val , void * ctx )
2020-05-26 01:13:34 +00:00
{
2021-07-22 07:30:20 +00:00
hio_svc_htts_cli_t * cli = ( hio_svc_htts_cli_t * ) ctx ;
2020-05-26 01:13:34 +00:00
/* capture a header except Status, Connection, Transfer-Encoding, and Server */
2021-07-22 07:30:20 +00:00
if ( hio_comp_bcstr ( key , " Status " , 1 ) ! = 0 & &
hio_comp_bcstr ( key , " Connection " , 1 ) ! = 0 & &
hio_comp_bcstr ( key , " Transfer-Encoding " , 1 ) ! = 0 & &
hio_comp_bcstr ( key , " Server " , 1 ) ! = 0 & &
hio_comp_bcstr ( key , " Date " , 1 ) ! = 0 )
2020-05-26 01:13:34 +00:00
{
do
{
2021-07-22 07:30:20 +00:00
if ( hio_becs_cat ( cli - > sbuf , key ) = = ( hio_oow_t ) - 1 | |
hio_becs_cat ( cli - > sbuf , " : " ) = = ( hio_oow_t ) - 1 | |
hio_becs_cat ( cli - > sbuf , val - > ptr ) = = ( hio_oow_t ) - 1 | |
hio_becs_cat ( cli - > sbuf , " \r \n " ) = = ( hio_oow_t ) - 1 )
2020-05-26 01:13:34 +00:00
{
return - 1 ;
}
val = val - > next ;
}
while ( val ) ;
}
return 0 ;
}
2021-07-22 07:30:20 +00:00
static int thr_peer_htrd_peek ( hio_htrd_t * htrd , hio_htre_t * req )
2020-05-26 01:13:34 +00:00
{
2021-07-22 07:30:20 +00:00
thr_peer_xtn_t * thr_peer = hio_htrd_getxtn ( htrd ) ;
2023-01-11 14:59:41 +00:00
thr_task_t * thr_task = thr_peer - > task ;
hio_svc_htts_cli_t * cli = thr_task - > client ;
2021-07-22 07:30:20 +00:00
hio_bch_t dtbuf [ 64 ] ;
2022-10-09 16:41:07 +00:00
int status_code = HIO_HTTP_STATUS_OK ;
2020-05-26 01:13:34 +00:00
if ( req - > attr . content_length )
{
// TOOD: remove content_length if content_length is negative or not numeric.
2023-01-11 14:59:41 +00:00
thr_task - > res_mode_to_cli = THR_TASK_RES_MODE_LENGTH ;
2020-05-26 01:13:34 +00:00
}
if ( req - > attr . status )
{
int is_sober ;
2021-07-22 07:30:20 +00:00
const hio_bch_t * endptr ;
hio_intmax_t v ;
2020-05-26 01:13:34 +00:00
2021-07-22 07:30:20 +00:00
v = hio_bchars_to_intmax ( req - > attr . status , hio_count_bcstr ( req - > attr . status ) , HIO_BCHARS_TO_INTMAX_MAKE_OPTION ( 0 , 0 , 0 , 10 ) , & endptr , & is_sober ) ;
if ( * endptr = = ' \0 ' & & is_sober & & v > 0 & & v < = HIO_TYPE_MAX ( int ) ) status_code = v ;
2020-05-26 01:13:34 +00:00
}
2021-07-22 07:30:20 +00:00
hio_svc_htts_fmtgmtime ( cli - > htts , HIO_NULL , dtbuf , HIO_COUNTOF ( dtbuf ) ) ;
2020-05-26 01:13:34 +00:00
2021-07-22 07:30:20 +00:00
if ( hio_becs_fmt ( cli - > sbuf , " HTTP/%d.%d %d %hs \r \n Server: %hs \r \n Date: %hs \r \n " ,
2023-01-11 14:59:41 +00:00
thr_task - > req_version . major , thr_task - > req_version . minor ,
2021-07-22 07:30:20 +00:00
status_code , hio_http_status_to_bcstr ( status_code ) ,
cli - > htts - > server_name , dtbuf ) = = ( hio_oow_t ) - 1 ) return - 1 ;
2020-05-26 01:13:34 +00:00
2021-07-22 07:30:20 +00:00
if ( hio_htre_walkheaders ( req , thr_peer_capture_response_header , cli ) < = - 1 ) return - 1 ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
switch ( thr_task - > res_mode_to_cli )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
case THR_TASK_RES_MODE_CHUNKED :
2021-07-22 07:30:20 +00:00
if ( hio_becs_cat ( cli - > sbuf , " Transfer-Encoding: chunked \r \n " ) = = ( hio_oow_t ) - 1 ) return - 1 ;
/*if (hio_becs_cat(cli->sbuf, "Connection: keep-alive\r\n") == (hio_oow_t)-1) return -1;*/
2020-05-26 01:13:34 +00:00
break ;
2023-01-11 14:59:41 +00:00
case THR_TASK_RES_MODE_CLOSE :
2021-07-22 07:30:20 +00:00
if ( hio_becs_cat ( cli - > sbuf , " Connection: close \r \n " ) = = ( hio_oow_t ) - 1 ) return - 1 ;
2020-05-26 01:13:34 +00:00
break ;
2023-01-11 14:59:41 +00:00
case THR_TASK_RES_MODE_LENGTH :
if ( hio_becs_cat ( cli - > sbuf , ( thr_task - > keep_alive ? " Connection: keep-alive \r \n " : " Connection: close \r \n " ) ) = = ( hio_oow_t ) - 1 ) return - 1 ;
2020-05-26 01:13:34 +00:00
}
2021-07-22 07:30:20 +00:00
if ( hio_becs_cat ( cli - > sbuf , " \r \n " ) = = ( hio_oow_t ) - 1 ) return - 1 ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
return thr_task_write_to_client ( thr_task , HIO_BECS_PTR ( cli - > sbuf ) , HIO_BECS_LEN ( cli - > sbuf ) ) ;
2020-05-26 01:13:34 +00:00
}
2021-07-22 07:30:20 +00:00
static int thr_peer_htrd_poke ( hio_htrd_t * htrd , hio_htre_t * req )
2020-05-26 01:13:34 +00:00
{
/* client request got completed */
2021-07-22 07:30:20 +00:00
thr_peer_xtn_t * thr_peer = hio_htrd_getxtn ( htrd ) ;
2023-01-11 14:59:41 +00:00
thr_task_t * thr_task = thr_peer - > task ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( thr_task_write_last_chunk_to_client ( thr_task ) < = - 1 ) return - 1 ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
thr_task_mark_over ( thr_task , THR_TASK_OVER_READ_FROM_PEER ) ;
2020-05-26 01:13:34 +00:00
return 0 ;
}
2021-07-22 07:30:20 +00:00
static int thr_peer_htrd_push_content ( hio_htrd_t * htrd , hio_htre_t * req , const hio_bch_t * data , hio_oow_t dlen )
2020-05-26 01:13:34 +00:00
{
2021-07-22 07:30:20 +00:00
thr_peer_xtn_t * thr_peer = hio_htrd_getxtn ( htrd ) ;
2023-01-11 14:59:41 +00:00
thr_task_t * thr_task = thr_peer - > task ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
HIO_ASSERT ( thr_task - > htts - > hio , htrd = = thr_task - > peer_htrd ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
switch ( thr_task - > res_mode_to_cli )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
case THR_TASK_RES_MODE_CHUNKED :
2020-05-26 01:13:34 +00:00
{
2021-07-22 07:30:20 +00:00
hio_iovec_t iov [ 3 ] ;
hio_bch_t lbuf [ 16 ] ;
hio_oow_t llen ;
2020-05-26 01:13:34 +00:00
2021-07-22 07:30:20 +00:00
/* hio_fmt_uintmax_to_bcstr() null-terminates the output. only HIO_COUNTOF(lbuf) - 1
2023-01-11 14:59:41 +00:00
* is enough to hold ' \r ' and ' \n ' at the back without ' \0 ' . */
2021-07-22 07:30:20 +00:00
llen = hio_fmt_uintmax_to_bcstr ( lbuf , HIO_COUNTOF ( lbuf ) - 1 , dlen , 16 | HIO_FMT_UINTMAX_UPPERCASE , 0 , ' \0 ' , HIO_NULL ) ;
2020-05-26 01:13:34 +00:00
lbuf [ llen + + ] = ' \r ' ;
lbuf [ llen + + ] = ' \n ' ;
iov [ 0 ] . iov_ptr = lbuf ;
iov [ 0 ] . iov_len = llen ;
iov [ 1 ] . iov_ptr = ( void * ) data ;
iov [ 1 ] . iov_len = dlen ;
iov [ 2 ] . iov_ptr = " \r \n " ;
iov [ 2 ] . iov_len = 2 ;
2023-01-11 14:59:41 +00:00
if ( thr_task_writev_to_client ( thr_task , iov , HIO_COUNTOF ( iov ) ) < = - 1 )
2020-07-16 10:46:17 +00:00
{
goto oops ;
}
2020-05-26 01:13:34 +00:00
break ;
}
2023-01-11 14:59:41 +00:00
case THR_TASK_RES_MODE_CLOSE :
case THR_TASK_RES_MODE_LENGTH :
if ( thr_task_write_to_client ( thr_task , data , dlen ) < = - 1 )
2020-07-16 10:46:17 +00:00
{
goto oops ;
}
2020-05-26 01:13:34 +00:00
break ;
}
2023-01-11 14:59:41 +00:00
if ( thr_task - > num_pending_writes_to_client > THR_TASK_PENDING_IO_THRESHOLD )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
if ( hio_dev_thr_read ( thr_task - > peer , 0 ) < = - 1 )
2020-07-16 10:46:17 +00:00
{
goto oops ;
}
2020-05-26 01:13:34 +00:00
}
return 0 ;
oops :
return - 1 ;
}
2021-07-22 07:30:20 +00:00
static hio_htrd_recbs_t thr_peer_htrd_recbs =
2020-05-26 01:13:34 +00:00
{
thr_peer_htrd_peek ,
thr_peer_htrd_poke ,
thr_peer_htrd_push_content
} ;
2021-07-22 07:30:20 +00:00
static int thr_client_htrd_poke ( hio_htrd_t * htrd , hio_htre_t * req )
2020-05-26 01:13:34 +00:00
{
/* client request got completed */
2021-07-22 07:30:20 +00:00
hio_svc_htts_cli_htrd_xtn_t * htrdxtn = ( hio_svc_htts_cli_htrd_xtn_t * ) hio_htrd_getxtn ( htrd ) ;
hio_dev_sck_t * sck = htrdxtn - > sck ;
hio_svc_htts_cli_t * cli = hio_dev_sck_getxtn ( sck ) ;
2023-01-11 15:41:01 +00:00
thr_task_t * thr_task = ( thr_task_t * ) cli - > task ;
2020-05-26 01:13:34 +00:00
/* indicate EOF to the client peer */
2023-01-11 14:59:41 +00:00
if ( thr_task_write_to_peer ( thr_task , HIO_NULL , 0 ) < = - 1 ) return - 1 ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
thr_task_mark_over ( thr_task , THR_TASK_OVER_READ_FROM_CLIENT ) ;
2020-05-26 01:13:34 +00:00
return 0 ;
2020-05-25 08:04:30 +00:00
}
2021-07-22 07:30:20 +00:00
static int thr_client_htrd_push_content ( hio_htrd_t * htrd , hio_htre_t * req , const hio_bch_t * data , hio_oow_t dlen )
2020-05-25 08:04:30 +00:00
{
2021-07-22 07:30:20 +00:00
hio_svc_htts_cli_htrd_xtn_t * htrdxtn = ( hio_svc_htts_cli_htrd_xtn_t * ) hio_htrd_getxtn ( htrd ) ;
hio_dev_sck_t * sck = htrdxtn - > sck ;
hio_svc_htts_cli_t * cli = hio_dev_sck_getxtn ( sck ) ;
2023-01-11 15:41:01 +00:00
thr_task_t * thr_task = ( thr_task_t * ) cli - > task ;
2020-05-25 08:04:30 +00:00
2021-07-22 07:30:20 +00:00
HIO_ASSERT ( sck - > hio , cli - > sck = = sck ) ;
2023-01-11 14:59:41 +00:00
return thr_task_write_to_peer ( thr_task , data , dlen ) ;
2020-05-25 08:04:30 +00:00
}
2021-07-22 07:30:20 +00:00
static hio_htrd_recbs_t thr_client_htrd_recbs =
2020-05-25 08:04:30 +00:00
{
2021-07-22 07:30:20 +00:00
HIO_NULL ,
2020-05-26 01:13:34 +00:00
thr_client_htrd_poke ,
thr_client_htrd_push_content
} ;
2021-07-22 07:30:20 +00:00
static int thr_peer_on_write ( hio_dev_thr_t * thr , hio_iolen_t wrlen , void * wrctx )
2020-05-26 01:13:34 +00:00
{
2021-07-22 07:30:20 +00:00
hio_t * hio = thr - > hio ;
thr_peer_xtn_t * thr_peer = ( thr_peer_xtn_t * ) hio_dev_thr_getxtn ( thr ) ;
2023-01-11 14:59:41 +00:00
thr_task_t * thr_task = thr_peer - > task ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( ! thr_task ) return 0 ; /* there is nothing i can do. the thr_task is being cleared or has been cleared already. */
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
HIO_ASSERT ( hio , thr_task - > peer = = thr ) ;
2020-05-26 01:13:34 +00:00
if ( wrlen < = - 1 )
{
2023-01-11 14:59:41 +00:00
HIO_DEBUG2 ( hio , " HTTS(%p) - unable to write to peer %p \n " , thr_task - > htts , thr ) ;
2020-05-26 01:13:34 +00:00
goto oops ;
}
else if ( wrlen = = 0 )
{
/* indicated EOF */
/* do nothing here as i didn't incremented num_pending_writes_to_peer when making the write request */
2023-01-11 14:59:41 +00:00
thr_task - > num_pending_writes_to_peer - - ;
HIO_ASSERT ( hio , thr_task - > num_pending_writes_to_peer = = 0 ) ;
HIO_DEBUG2 ( hio , " HTTS(%p) - indicated EOF to peer %p \n " , thr_task - > htts , thr ) ;
2020-05-26 01:13:34 +00:00
/* indicated EOF to the peer side. i need no more data from the client side.
* i don ' t need to enable input watching in the client side either */
2023-01-11 14:59:41 +00:00
thr_task_mark_over ( thr_task , THR_TASK_OVER_WRITE_TO_PEER ) ;
2020-05-26 01:13:34 +00:00
}
else
{
2023-01-11 14:59:41 +00:00
HIO_ASSERT ( hio , thr_task - > num_pending_writes_to_peer > 0 ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
thr_task - > num_pending_writes_to_peer - - ;
if ( thr_task - > num_pending_writes_to_peer = = THR_TASK_PENDING_IO_THRESHOLD )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
if ( ! ( thr_task - > over & THR_TASK_OVER_READ_FROM_CLIENT ) & &
hio_dev_sck_read ( thr_task - > csck , 1 ) < = - 1 ) goto oops ;
2020-05-26 01:13:34 +00:00
}
2023-01-11 14:59:41 +00:00
if ( ( thr_task - > over & THR_TASK_OVER_READ_FROM_CLIENT ) & & thr_task - > num_pending_writes_to_peer < = 0 )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
thr_task_mark_over ( thr_task , THR_TASK_OVER_WRITE_TO_PEER ) ;
2020-05-26 01:13:34 +00:00
}
}
return 0 ;
oops :
2023-01-11 14:59:41 +00:00
thr_task_halt_participating_devices ( thr_task ) ;
2020-05-26 01:13:34 +00:00
return 0 ;
}
2021-07-22 07:30:20 +00:00
static void thr_client_on_disconnect ( hio_dev_sck_t * sck )
2020-05-26 01:13:34 +00:00
{
2021-07-22 07:30:20 +00:00
hio_svc_htts_cli_t * cli = hio_dev_sck_getxtn ( sck ) ;
2023-01-11 15:41:01 +00:00
thr_task_t * thr_task = ( thr_task_t * ) cli - > task ;
2023-01-11 14:59:41 +00:00
hio_svc_htts_t * htts = thr_task - > htts ;
hio_t * hio = sck - > hio ;
HIO_ASSERT ( hio , sck = thr_task - > csck ) ;
HIO_DEBUG4 ( hio , " HTTS(%p) - thr(t=%p,c=%p,csck=%p) - client socket disconnect notified \n " , htts , thr_task , sck , cli ) ;
thr_task - > client_disconnected = 1 ;
thr_task - > csck = HIO_NULL ;
thr_task - > client = HIO_NULL ;
if ( thr_task - > client_org_on_disconnect )
{
thr_task - > client_org_on_disconnect ( sck ) ;
/* this original callback destroys the associated resource.
* thr_task must not be accessed from here down */
}
HIO_DEBUG4 ( hio , " HTTS(%p) - thr(t=%p,c=%p,csck=%p) - client socket disconnect handled \n " , htts , thr_task , sck , cli ) ;
/* Note: after this callback, the actual device pointed to by 'sck' will be freed in the main loop. */
2020-05-26 01:13:34 +00:00
}
2021-07-22 07:30:20 +00:00
static int thr_client_on_read ( hio_dev_sck_t * sck , const void * buf , hio_iolen_t len , const hio_skad_t * srcaddr )
2020-05-26 01:13:34 +00:00
{
2021-07-22 07:30:20 +00:00
hio_t * hio = sck - > hio ;
hio_svc_htts_cli_t * cli = hio_dev_sck_getxtn ( sck ) ;
2023-01-11 15:41:01 +00:00
thr_task_t * thr_task = ( thr_task_t * ) cli - > task ;
2020-05-26 01:13:34 +00:00
2021-07-22 07:30:20 +00:00
HIO_ASSERT ( hio , sck = = cli - > sck ) ;
2020-05-26 01:13:34 +00:00
if ( len < = - 1 )
{
/* read error */
2021-07-22 07:30:20 +00:00
HIO_DEBUG2 ( cli - > htts - > hio , " HTTPS(%p) - read error on client %p(%d) \n " , sck , ( int ) sck - > hnd ) ;
2020-05-26 01:13:34 +00:00
goto oops ;
}
2023-01-11 14:59:41 +00:00
if ( ! thr_task - > peer )
2020-05-26 01:13:34 +00:00
{
/* the peer is gone */
goto oops ; /* do what? just return 0? */
}
if ( len = = 0 )
{
/* EOF on the client side. arrange to close */
2023-01-11 14:59:41 +00:00
HIO_DEBUG3 ( hio , " HTTPS(%p) - EOF from client %p(hnd=%d) \n " , thr_task - > htts , sck , ( int ) sck - > hnd ) ;
thr_task - > client_eof_detected = 1 ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
if ( ! ( thr_task - > over & THR_TASK_OVER_READ_FROM_CLIENT ) ) /* if this is true, EOF is received without thr_client_htrd_poke() */
2020-05-26 01:13:34 +00:00
{
2022-10-09 16:41:07 +00:00
int n ;
2023-01-11 14:59:41 +00:00
n = thr_task_write_to_peer ( thr_task , HIO_NULL , 0 ) ;
thr_task_mark_over ( thr_task , THR_TASK_OVER_READ_FROM_CLIENT ) ;
2022-10-09 16:41:07 +00:00
if ( n < = - 1 ) goto oops ;
2020-05-26 01:13:34 +00:00
}
}
else
{
2021-07-22 07:30:20 +00:00
hio_oow_t rem ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
HIO_ASSERT ( hio , ! ( thr_task - > over & THR_TASK_OVER_READ_FROM_CLIENT ) ) ;
2020-05-26 01:13:34 +00:00
2021-07-22 07:30:20 +00:00
if ( hio_htrd_feed ( cli - > htrd , buf , len , & rem ) < = - 1 ) goto oops ;
2020-05-26 01:13:34 +00:00
if ( rem > 0 )
{
/* TODO store this to client buffer. once the current resource is completed, arrange to call on_read() with it */
2021-07-22 07:30:20 +00:00
HIO_DEBUG3 ( hio , " HTTPS(%p) - excessive data after contents by thr client %p(%d) \n " , sck - > hio , sck , ( int ) sck - > hnd ) ;
2020-05-26 01:13:34 +00:00
}
}
return 0 ;
oops :
2023-01-11 14:59:41 +00:00
thr_task_halt_participating_devices ( thr_task ) ;
2020-05-26 01:13:34 +00:00
return 0 ;
}
2021-07-22 07:30:20 +00:00
static int thr_client_on_write ( hio_dev_sck_t * sck , hio_iolen_t wrlen , void * wrctx , const hio_skad_t * dstaddr )
2020-05-26 01:13:34 +00:00
{
2021-07-22 07:30:20 +00:00
hio_t * hio = sck - > hio ;
hio_svc_htts_cli_t * cli = hio_dev_sck_getxtn ( sck ) ;
2023-01-11 15:41:01 +00:00
thr_task_t * thr_task = ( thr_task_t * ) cli - > task ;
2020-05-26 01:13:34 +00:00
if ( wrlen < = - 1 )
{
2021-07-22 07:30:20 +00:00
HIO_DEBUG3 ( hio , " HTTPS(%p) - unable to write to client %p(%d) \n " , sck - > hio , sck , ( int ) sck - > hnd ) ;
2020-05-26 01:13:34 +00:00
goto oops ;
}
if ( wrlen = = 0 )
{
/* if the connect is keep-alive, this part may not be called */
2023-01-11 14:59:41 +00:00
thr_task - > num_pending_writes_to_client - - ;
HIO_ASSERT ( hio , thr_task - > num_pending_writes_to_client = = 0 ) ;
HIO_DEBUG3 ( hio , " HTTS(%p) - indicated EOF to client %p(%d) \n " , thr_task - > htts , sck , ( int ) sck - > hnd ) ;
2020-05-26 01:13:34 +00:00
/* since EOF has been indicated to the client, it must not write to the client any further.
* this also means that i don ' t need any data from the peer side either .
* i don ' t need to enable input watching on the peer side */
2023-01-11 14:59:41 +00:00
thr_task_mark_over ( thr_task , THR_TASK_OVER_WRITE_TO_CLIENT ) ;
2020-05-26 01:13:34 +00:00
}
else
{
2023-01-11 14:59:41 +00:00
HIO_ASSERT ( hio , thr_task - > num_pending_writes_to_client > 0 ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
thr_task - > num_pending_writes_to_client - - ;
if ( thr_task - > peer & & thr_task - > num_pending_writes_to_client = = THR_TASK_PENDING_IO_THRESHOLD )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
if ( ! ( thr_task - > over & THR_TASK_OVER_READ_FROM_PEER ) & &
hio_dev_thr_read ( thr_task - > peer , 1 ) < = - 1 ) goto oops ;
2020-05-26 01:13:34 +00:00
}
2023-01-11 14:59:41 +00:00
if ( ( thr_task - > over & THR_TASK_OVER_READ_FROM_PEER ) & & thr_task - > num_pending_writes_to_client < = 0 )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
thr_task_mark_over ( thr_task , THR_TASK_OVER_WRITE_TO_CLIENT ) ;
2020-05-26 01:13:34 +00:00
}
}
return 0 ;
oops :
2023-01-11 14:59:41 +00:00
thr_task_halt_participating_devices ( thr_task ) ;
2020-05-26 01:13:34 +00:00
return 0 ;
}
static void free_thr_start_info ( void * ctx )
{
2023-01-11 15:33:52 +00:00
/* this function is a thread cleanup handler.
* it can get invoked after htts is destroyed by hio_svc_htts_stop ( ) because
* hio_dev_thr_kill ( ) pushes back the job using hio_addcfmb ( ) and the
* actual cfmb clean - up is performed after the service stop .
* it is not realiable to use tfs - > htts or tfs - > htts - > hio . use tfs - > hio only here .
= = 3845396 = = Invalid read of size 8
= = 3845396 = = at 0x40A7D5 : free_thr_start_info ( http - thr . c : 804 )
= = 3845396 = = by 0x40A7D5 : thr_func ( http - thr . c : 815 )
= = 3845396 = = by 0x41AE46 : run_thr_func ( thr . c : 127 )
= = 3845396 = = by 0x4A132A4 : start_thread ( in / usr / lib64 / libpthread - 2.33 . so )
= = 3845396 = = by 0x4B2B322 : clone ( in / usr / lib64 / libc - 2.33 . so )
= = 3845396 = = Address 0x4c38b00 is 0 bytes inside a block of size 464 free ' d
= = 3845396 = = at 0x48430E4 : free ( vg_replace_malloc . c : 872 )
= = 3845396 = = by 0x4091EE : hio_svc_htts_stop ( http - svr . c : 555 )
= = 3845396 = = by 0x40F5BE : hio_fini ( hio . c : 185 )
= = 3845396 = = by 0x40F848 : hio_close ( hio . c : 101 )
= = 3845396 = = by 0x402CB4 : main ( webs . c : 511 )
= = 3845396 = = Block was alloc ' d at
= = 3845396 = = at 0x484086F : malloc ( vg_replace_malloc . c : 381 )
= = 3845396 = = by 0x412873 : hio_callocmem ( hio . c : 2019 )
= = 3845396 = = by 0x40978E : hio_svc_htts_start ( http - svr . c : 350 )
= = 3845396 = = by 0x403900 : webs_start ( webs . c : 385 )
= = 3845396 = = by 0x402C6C : main ( webs . c : 498 )
*/
2020-05-26 01:13:34 +00:00
thr_func_start_t * tfs = ( thr_func_start_t * ) ctx ;
2023-01-11 15:33:52 +00:00
hio_t * hio = tfs - > hio ;
2023-01-11 14:59:41 +00:00
if ( tfs - > tfi . req_path ) hio_freemem ( hio , tfs - > tfi . req_path ) ;
if ( tfs - > tfi . req_param ) hio_freemem ( hio , tfs - > tfi . req_param ) ;
hio_freemem ( hio , tfs ) ;
2020-05-26 01:13:34 +00:00
}
2021-07-22 07:30:20 +00:00
static void thr_func ( hio_t * hio , hio_dev_thr_iopair_t * iop , void * ctx )
2020-05-26 01:13:34 +00:00
{
thr_func_start_t * tfs = ( thr_func_start_t * ) ctx ;
pthread_cleanup_push ( free_thr_start_info , tfs ) ;
2023-01-11 14:59:41 +00:00
tfs - > thr_func ( tfs - > htts , iop , & tfs - > tfi , tfs - > thr_ctx ) ;
2020-05-26 01:13:34 +00:00
pthread_cleanup_pop ( 1 ) ;
}
2021-07-22 07:30:20 +00:00
static int thr_capture_request_header ( hio_htre_t * req , const hio_bch_t * key , const hio_htre_hdrval_t * val , void * ctx )
2020-06-08 18:11:36 +00:00
{
thr_func_start_t * tfs = ( thr_func_start_t * ) ctx ;
2021-07-22 07:30:20 +00:00
if ( hio_comp_bcstr ( key , " X-HTTP-Method-Override " , 1 ) = = 0 )
2020-06-08 18:11:36 +00:00
{
2021-07-22 07:30:20 +00:00
tfs - > tfi . req_x_http_method_override = hio_bchars_to_http_method ( val - > ptr , val - > len ) ; /* don't care about multiple values */
2020-06-08 18:11:36 +00:00
}
#if 0
2021-07-22 07:30:20 +00:00
if ( hio_comp_bcstr ( key , " Connection " , 1 ) ! = 0 & &
hio_comp_bcstr ( key , " Transfer-Encoding " , 1 ) ! = 0 & &
hio_comp_bcstr ( key , " Content-Length " , 1 ) ! = 0 & &
hio_comp_bcstr ( key , " Expect " , 1 ) ! = 0 )
2020-06-08 18:11:36 +00:00
{
do
{
/* TODO: ... */
val = val - > next ;
}
while ( val ) ;
}
# endif
return 0 ;
}
2022-10-09 16:41:07 +00:00
int hio_svc_htts_dothr ( hio_svc_htts_t * htts , hio_dev_sck_t * csck , hio_htre_t * req , hio_svc_htts_thr_func_t func , void * ctx , int options )
2020-05-26 01:13:34 +00:00
{
2021-07-22 07:30:20 +00:00
hio_t * hio = htts - > hio ;
hio_svc_htts_cli_t * cli = hio_dev_sck_getxtn ( csck ) ;
2023-01-11 14:59:41 +00:00
thr_task_t * thr_task = HIO_NULL ;
2020-05-26 01:13:34 +00:00
thr_peer_xtn_t * thr_peer ;
2021-07-22 07:30:20 +00:00
hio_dev_thr_make_t mi ;
2020-05-26 01:13:34 +00:00
thr_func_start_t * tfs ;
2023-01-11 14:59:41 +00:00
int have_content ;
2020-05-26 01:13:34 +00:00
/* ensure that you call this function before any contents is received */
2021-07-22 07:30:20 +00:00
HIO_ASSERT ( hio , hio_htre_getcontentlen ( req ) = = 0 ) ;
2020-05-26 01:13:34 +00:00
2021-07-22 07:30:20 +00:00
tfs = hio_callocmem ( hio , HIO_SIZEOF ( * tfs ) ) ;
2020-05-26 01:13:34 +00:00
if ( ! tfs ) goto oops ;
2023-01-11 15:33:52 +00:00
tfs - > hio = hio ;
2023-01-11 14:59:41 +00:00
tfs - > htts = htts ;
2020-05-26 01:13:34 +00:00
tfs - > thr_func = func ;
tfs - > thr_ctx = ctx ;
2021-07-22 07:30:20 +00:00
tfs - > tfi . req_method = hio_htre_getqmethodtype ( req ) ;
tfs - > tfi . req_version = * hio_htre_getversion ( req ) ;
tfs - > tfi . req_path = hio_dupbcstr ( hio , hio_htre_getqpath ( req ) , HIO_NULL ) ;
2020-05-26 01:13:34 +00:00
if ( ! tfs - > tfi . req_path ) goto oops ;
2021-07-22 07:30:20 +00:00
if ( hio_htre_getqparam ( req ) )
2020-05-26 01:13:34 +00:00
{
2021-07-22 07:30:20 +00:00
tfs - > tfi . req_param = hio_dupbcstr ( hio , hio_htre_getqparam ( req ) , HIO_NULL ) ;
2020-05-26 01:13:34 +00:00
if ( ! tfs - > tfi . req_param ) goto oops ;
}
2020-06-08 18:11:36 +00:00
tfs - > tfi . req_x_http_method_override = - 1 ;
2021-07-22 07:30:20 +00:00
if ( hio_htre_walkheaders ( req , thr_capture_request_header , tfs ) < = - 1 ) goto oops ;
2020-06-08 18:11:36 +00:00
2020-05-26 01:13:34 +00:00
tfs - > tfi . server_addr = cli - > sck - > localaddr ;
tfs - > tfi . client_addr = cli - > sck - > remoteaddr ;
2021-07-22 07:30:20 +00:00
HIO_MEMSET ( & mi , 0 , HIO_SIZEOF ( mi ) ) ;
2020-05-26 01:13:34 +00:00
mi . thr_func = thr_func ;
mi . thr_ctx = tfs ;
mi . on_read = thr_peer_on_read ;
mi . on_write = thr_peer_on_write ;
mi . on_close = thr_peer_on_close ;
2020-05-25 08:04:30 +00:00
2023-01-11 15:41:01 +00:00
thr_task = ( thr_task_t * ) hio_svc_htts_task_make ( htts , HIO_SIZEOF ( * thr_task ) , thr_task_on_kill ) ;
2023-01-11 14:59:41 +00:00
if ( HIO_UNLIKELY ( ! thr_task ) ) goto oops ;
thr_task - > options = options ;
thr_task - > csck = csck ;
thr_task - > client = cli ; /* for faster access without going through csck. */
2020-05-25 08:04:30 +00:00
2023-01-11 14:59:41 +00:00
/*thr_task->num_pending_writes_to_client = 0;
thr_task - > num_pending_writes_to_peer = 0 ; */
2023-01-11 17:02:09 +00:00
thr_task - > req_method = hio_htre_getqmethodtype ( req ) ;
2023-01-11 14:59:41 +00:00
thr_task - > req_version = * hio_htre_getversion ( req ) ;
thr_task - > req_content_length_unlimited = hio_htre_getreqcontentlen ( req , & thr_task - > req_content_length ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
thr_task - > client_org_on_read = csck - > on_read ;
thr_task - > client_org_on_write = csck - > on_write ;
thr_task - > client_org_on_disconnect = csck - > on_disconnect ;
2020-05-26 01:13:34 +00:00
csck - > on_read = thr_client_on_read ;
csck - > on_write = thr_client_on_write ;
csck - > on_disconnect = thr_client_on_disconnect ;
2023-01-11 15:41:01 +00:00
/* attach the thr task to the client socket via the task field in the extended space of the socket */
HIO_ASSERT ( hio , cli - > task = = HIO_NULL ) ;
HIO_SVC_HTTS_TASK_REF ( ( hio_svc_htts_task_t * ) thr_task , cli - > task ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
thr_task - > peer = hio_dev_thr_make ( hio , HIO_SIZEOF ( * thr_peer ) , & mi ) ;
if ( HIO_UNLIKELY ( ! thr_task - > peer ) )
{
2023-01-11 15:41:01 +00:00
/* no need to detach the attached task here because that is handled
2023-01-11 14:59:41 +00:00
* in the kill / disconnect callbacks of relevant devices */
2021-07-22 07:30:20 +00:00
HIO_DEBUG3 ( hio , " HTTS(%p) - failed to create thread for %p(%d) \n " , htts , csck , ( int ) csck - > hnd ) ;
2023-01-11 14:59:41 +00:00
goto oops ;
2020-07-27 12:10:55 +00:00
}
2020-05-26 01:13:34 +00:00
2021-07-22 07:30:20 +00:00
tfs = HIO_NULL ; /* mark that tfs is delegated to the thread */
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
/* attach the thr task to the peer thread device */
thr_peer = hio_dev_thr_getxtn ( thr_task - > peer ) ;
2023-01-11 15:41:01 +00:00
HIO_SVC_HTTS_TASK_REF ( ( hio_svc_htts_task_t * ) thr_task , thr_peer - > task ) ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
thr_task - > peer_htrd = hio_htrd_open ( hio , HIO_SIZEOF ( * thr_peer ) ) ;
if ( HIO_UNLIKELY ( ! thr_task - > peer_htrd ) ) goto oops ;
hio_htrd_setoption ( thr_task - > peer_htrd , HIO_HTRD_SKIP_INITIAL_LINE | HIO_HTRD_RESPONSE ) ;
hio_htrd_setrecbs ( thr_task - > peer_htrd , & thr_peer_htrd_recbs ) ;
2020-05-25 08:04:30 +00:00
2023-01-11 14:59:41 +00:00
/* attach the thr task to the htrd parser set on the peer thread device */
thr_peer = hio_htrd_getxtn ( thr_task - > peer_htrd ) ;
2023-01-11 15:41:01 +00:00
HIO_SVC_HTTS_TASK_REF ( ( hio_svc_htts_task_t * ) thr_task , thr_peer - > task ) ;
2020-05-26 01:13:34 +00:00
# if !defined(THR_ALLOW_UNLIMITED_REQ_CONTENT_LENGTH)
2023-01-11 14:59:41 +00:00
if ( thr_task - > req_content_length_unlimited )
2020-05-26 01:13:34 +00:00
{
/* Transfer-Encoding is chunked. no content-length is known in advance. */
2023-01-11 14:59:41 +00:00
2020-05-26 01:13:34 +00:00
/* option 1. buffer contents. if it gets too large, send 413 Request Entity Too Large.
* option 2. send 411 Length Required immediately
* option 3. set Content - Length to - 1 and use EOF to indicate the end of content [ Non - Standard ] */
2023-01-11 14:59:41 +00:00
if ( thr_task_send_final_status_to_client ( thr_task , HIO_HTTP_STATUS_LENGTH_REQUIRED , 1 ) < = - 1 ) goto oops ;
2020-05-26 01:13:34 +00:00
}
# endif
2021-07-22 07:30:20 +00:00
if ( req - > flags & HIO_HTRE_ATTR_EXPECT100 )
2020-05-26 01:13:34 +00:00
{
/* TODO: Expect: 100-continue? who should handle this? thr? or the http server? */
/* CAN I LET the thr SCRIPT handle this? */
2022-10-09 16:41:07 +00:00
if ( ! ( options & HIO_SVC_HTTS_THR_NO_100_CONTINUE ) & &
hio_comp_http_version_numbers ( & req - > version , 1 , 1 ) > = 0 & &
2023-01-11 14:59:41 +00:00
( thr_task - > req_content_length_unlimited | | thr_task - > req_content_length > 0 ) )
2020-05-26 01:13:34 +00:00
{
2023-01-11 14:59:41 +00:00
/*
2020-05-26 01:13:34 +00:00
* Don ' t send 100 Continue if http verions is lower than 1.1
2023-01-11 14:59:41 +00:00
* [ RFC7231 ]
2020-05-26 01:13:34 +00:00
* A server that receives a 100 - continue expectation in an HTTP / 1.0
* request MUST ignore that expectation .
*
2023-01-11 14:59:41 +00:00
* Don ' t send 100 Continue if expected content lenth is 0.
2020-05-26 01:13:34 +00:00
* [ RFC7231 ]
* A server MAY omit sending a 100 ( Continue ) response if it has
* already received some or all of the message body for the
* corresponding request , or if the framing indicates that there is
* no message body .
*/
2021-07-22 07:30:20 +00:00
hio_bch_t msgbuf [ 64 ] ;
hio_oow_t msglen ;
2020-05-26 01:13:34 +00:00
2023-01-11 14:59:41 +00:00
msglen = hio_fmttobcstr ( hio , msgbuf , HIO_COUNTOF ( msgbuf ) , " HTTP/%d.%d %d %hs \r \n \r \n " , thr_task - > req_version . major , thr_task - > req_version . minor , HIO_HTTP_STATUS_CONTINUE , hio_http_status_to_bcstr ( HIO_HTTP_STATUS_CONTINUE ) ) ;
if ( thr_task_write_to_client ( thr_task , msgbuf , msglen ) < = - 1 ) goto oops ;
thr_task - > ever_attempted_to_write_to_client = 0 ; /* reset this as it's polluted for 100 continue */
2020-05-26 01:13:34 +00:00
}
}
2021-07-22 07:30:20 +00:00
else if ( req - > flags & HIO_HTRE_ATTR_EXPECT )
2020-05-26 01:13:34 +00:00
{
/* 417 Expectation Failed */
2023-01-11 14:59:41 +00:00
thr_task_send_final_status_to_client ( thr_task , HIO_HTTP_STATUS_EXPECTATION_FAILED , 1 ) ;
2020-05-26 01:13:34 +00:00
goto oops ;
}
# if defined(THR_ALLOW_UNLIMITED_REQ_CONTENT_LENGTH)
2023-01-11 14:59:41 +00:00
have_content = thr_task - > req_content_length > 0 | | thr_task - > req_content_length_unlimited ;
# else
have_content = thr_task - > req_content_length > 0 ;
# endif
if ( have_content )
2020-05-26 01:13:34 +00:00
{
/* change the callbacks to subscribe to contents to be uploaded */
2023-01-11 14:59:41 +00:00
thr_task - > client_htrd_org_recbs = * hio_htrd_getrecbs ( thr_task - > client - > htrd ) ;
thr_client_htrd_recbs . peek = thr_task - > client_htrd_org_recbs . peek ;
hio_htrd_setrecbs ( thr_task - > client - > htrd , & thr_client_htrd_recbs ) ;
thr_task - > client_htrd_recbs_changed = 1 ;
2020-05-26 01:13:34 +00:00
}
else
{
2023-01-11 14:59:41 +00:00
/* no content to be uploaded from the client */
/* indicate EOF to the peer and disable input wathching from the client */
if ( thr_task_write_to_peer ( thr_task , HIO_NULL , 0 ) < = - 1 ) goto oops ;
thr_task_mark_over ( thr_task , THR_TASK_OVER_READ_FROM_CLIENT | THR_TASK_OVER_WRITE_TO_PEER ) ;
2020-05-26 01:13:34 +00:00
}
2020-05-25 08:04:30 +00:00
2020-05-26 01:13:34 +00:00
/* this may change later if Content-Length is included in the thr output */
2021-07-22 07:30:20 +00:00
if ( req - > flags & HIO_HTRE_ATTR_KEEPALIVE )
2020-05-25 08:04:30 +00:00
{
2023-01-11 14:59:41 +00:00
thr_task - > keep_alive = 1 ;
thr_task - > res_mode_to_cli = THR_TASK_RES_MODE_CHUNKED ;
/* the mode still can get switched to THR_TASK_RES_MODE_LENGTH if the thr script emits Content-Length */
2020-05-26 01:13:34 +00:00
}
else
{
2023-01-11 14:59:41 +00:00
thr_task - > keep_alive = 0 ;
thr_task - > res_mode_to_cli = THR_TASK_RES_MODE_CLOSE ;
2020-05-25 08:04:30 +00:00
}
2023-01-11 14:59:41 +00:00
/* TODO: store current input watching state and use it when destroying the thr_task data */
if ( hio_dev_sck_read ( csck , ! ( thr_task - > over & THR_TASK_OVER_READ_FROM_CLIENT ) ) < = - 1 ) goto oops ;
2023-01-11 17:02:09 +00:00
HIO_SVC_HTTS_TASKL_APPEND_TASK ( & htts - > task , thr_task ) ;
2020-05-25 08:04:30 +00:00
return 0 ;
oops :
2021-07-22 07:30:20 +00:00
HIO_DEBUG2 ( hio , " HTTS(%p) - FAILURE in dothr - socket(%p) \n " , htts , csck ) ;
2020-05-26 01:13:34 +00:00
if ( tfs ) free_thr_start_info ( tfs ) ;
2023-01-11 14:59:41 +00:00
if ( thr_task ) thr_task_halt_participating_devices ( thr_task ) ;
2020-05-25 08:04:30 +00:00
return - 1 ;
}