added --enable-quadmath in configure.ac

This commit is contained in:
hyung-hwan 2025-04-04 19:53:38 +09:00
parent a34831a2e9
commit d18fbfc412
19 changed files with 157 additions and 128 deletions

View File

@ -162,7 +162,7 @@ am__DIST_COMMON = $(srcdir)/Dockerfile.in $(srcdir)/Makefile.in \
$(top_srcdir)/ac/config.guess $(top_srcdir)/ac/config.sub \
$(top_srcdir)/ac/install-sh $(top_srcdir)/ac/ltmain.sh \
$(top_srcdir)/ac/missing $(top_srcdir)/ac/tap-driver.sh \
$(top_srcdir)/pkgs/hio.spec.in ac/ar-lib ac/compile \
$(top_srcdir)/pkgs/hio.spec.in README.md ac/ar-lib ac/compile \
ac/config.guess ac/config.sub ac/depcomp ac/install-sh \
ac/ltmain.sh ac/missing
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)

25
configure vendored
View File

@ -624,7 +624,7 @@ PACKAGE_TARNAME='hio'
PACKAGE_VERSION='0.1.0'
PACKAGE_STRING='hio 0.1.0'
PACKAGE_BUGREPORT='Chung, Hyung-Hwan (hyunghwan.chung@gmail.com)'
PACKAGE_URL='http://code.miflux.com/@hio'
PACKAGE_URL='http://code.miflux.com/hyung-hwan/hio'
# Factoring default headers for most tests.
ac_includes_default="\
@ -834,6 +834,7 @@ enable_libtool_lock
enable_largefile
enable_all_static
with_all_static_libs
enable_quadmath
enable_ssl
enable_mariadb
with_mariadb
@ -1492,6 +1493,8 @@ Optional Features:
--disable-libtool-lock avoid locking (might break parallel builds)
--disable-largefile omit support for large files
--enable-all-static build the full static binaries(default. no)
--enable-quadmath attempt to support 128-bit floating point number if
available(default. no)
--enable-ssl build the library in the ssl mode (default. yes)
--enable-mariadb enable mariadb support (default. no)
--enable-debug build the library in the debug mode (default. no)
@ -1534,7 +1537,7 @@ Use these variables to override the choices made by `configure' or to help
it to find libraries and programs with nonstandard names/locations.
Report bugs to <Chung, Hyung-Hwan (hyunghwan.chung@gmail.com)>.
hio home page: <http://code.miflux.com/@hio>.
hio home page: <http://code.miflux.com/hyung-hwan/hio>.
_ACEOF
ac_status=$?
fi
@ -17084,6 +17087,16 @@ fi
ALL_STATIC_LIBS="$with_all_static_libs"
# Check whether --enable-quadmath was given.
if test ${enable_quadmath+y}
then :
enableval=$enable_quadmath; enable_quadmath_is=$enableval
else $as_nop
enable_quadmath_is=no
fi
# Check whether --enable-ssl was given.
if test ${enable_ssl+y}
then :
@ -20163,6 +20176,11 @@ fi
rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
fi
if test "x${enable_quadmath_is}" == "xno"
then
ac_cv_sizeof___float128=0
fi
if test ${ac_cv_sizeof___float128} -gt 0
then
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking __float128 with linking" >&5
@ -20192,7 +20210,6 @@ else $as_nop
printf "%s\n" "no" >&6; }
ac_cv_sizeof___float128=0
fi
rm -f core conftest.err conftest.$ac_objext conftest.beam \
conftest$ac_exeext conftest.$ac_ext
@ -21415,7 +21432,7 @@ Configuration commands:
$config_commands
Report bugs to <Chung, Hyung-Hwan (hyunghwan.chung@gmail.com)>.
hio home page: <http://code.miflux.com/@hio>."
hio home page: <http://code.miflux.com/hyung-hwan/hio>."
_ACEOF
ac_cs_config=`printf "%s\n" "$ac_configure_args" | sed "$ac_safe_unquote"`

View File

@ -1,6 +1,6 @@
dnl AC_PREREQ([2.71])
AC_INIT([hio],[0.1.0],[Chung, Hyung-Hwan (hyunghwan.chung@gmail.com)],[],[http://code.miflux.com/@hio])
AC_INIT([hio],[0.1.0],[Chung, Hyung-Hwan (hyunghwan.chung@gmail.com)],[],[http://code.miflux.com/hyung-hwan/hio])
AC_CONFIG_HEADERS([lib/hio-cfg.h])
AC_CONFIG_AUX_DIR([ac])
@ -265,6 +265,13 @@ AC_ARG_WITH([all-static-libs],
ALL_STATIC_LIBS="$with_all_static_libs"
AC_SUBST(ALL_STATIC_LIBS)
dnl ===== enable-quadmath =====
AC_ARG_ENABLE([quadmath],
[AS_HELP_STRING([--enable-quadmath],[attempt to support 128-bit floating point number if available(default. no)])],
enable_quadmath_is=$enableval,
enable_quadmath_is=no
)
dnl ===== enable-ssl =====
AC_ARG_ENABLE([ssl],
[AS_HELP_STRING([--enable-ssl],[build the library in the ssl mode (default. yes)])],
@ -672,6 +679,12 @@ then
)
fi
dnl if quamath is not enabled, zero the sizeof float128
if test "x${enable_quadmath_is}" == "xno"
then
ac_cv_sizeof___float128=0
fi
dnl Some compilers doesn't seem to have full support for __float128
dnl even if the type is available.
if test ${ac_cv_sizeof___float128} -gt 0
@ -684,7 +697,6 @@ then
]])],[AC_MSG_RESULT(yes)],[
AC_MSG_RESULT(no)
ac_cv_sizeof___float128=0
])
fi

View File

@ -255,7 +255,7 @@ static int sck_on_read (hio_dev_sck_t* sck, const void* data, hio_iolen_t dlen,
/* the complete body is in conn->r.buf */
if (conn->r.type == HIO_FCGI_END_REQUEST)
{
hio_fcgi_end_request_body_t* erb = conn->r.buf;
hio_fcgi_end_request_body_t* erb = (hio_fcgi_end_request_body_t*)conn->r.buf;
if (erb->proto_status != HIO_FCGI_REQUEST_COMPLETE)
{

View File

@ -286,7 +286,7 @@ static void cgi_peer_on_close (hio_dev_pro_t* pro, hio_dev_pro_sid_t sid)
if (!(cgi->over & CGI_OVER_READ_FROM_PEER))
{
if (hio_svc_htts_task_endbody(cgi) <= -1)
if (hio_svc_htts_task_endbody((hio_svc_htts_task_t*)cgi) <= -1)
cgi_halt_participating_devices (cgi);
else
cgi_mark_over (cgi, CGI_OVER_READ_FROM_PEER);
@ -331,7 +331,7 @@ static int cgi_peer_on_read (hio_dev_pro_t* pro, hio_dev_pro_sid_t sid, const vo
/* the cgi script could be misbehaving.
* it still has to read more but EOF is read.
* otherwise peer_htrd_poke() should have been called */
n = hio_svc_htts_task_endbody(cgi);
n = hio_svc_htts_task_endbody((hio_svc_htts_task_t*)cgi);
cgi_mark_over (cgi, CGI_OVER_READ_FROM_PEER);
if (n <= -1) goto oops;
}
@ -348,7 +348,7 @@ static int cgi_peer_on_read (hio_dev_pro_t* pro, hio_dev_pro_sid_t sid, const vo
if (!cgi->task_res_started && !(cgi->over & CGI_OVER_WRITE_TO_CLIENT))
{
hio_svc_htts_task_sendfinalres (cgi, HIO_HTTP_STATUS_BAD_GATEWAY, HIO_NULL, HIO_NULL, 1); /* don't care about error because it jumps to oops below anyway */
hio_svc_htts_task_sendfinalres ((hio_svc_htts_task_t*)cgi, HIO_HTTP_STATUS_BAD_GATEWAY, HIO_NULL, HIO_NULL, 1); /* don't care about error because it jumps to oops below anyway */
}
goto oops;
@ -421,7 +421,7 @@ oops:
static int peer_capture_response_header (hio_htre_t* req, const hio_bch_t* key, const hio_htre_hdrval_t* val, void* ctx)
{
return hio_svc_htts_task_addreshdrs((cgi_t*)ctx, key, val);
return hio_svc_htts_task_addreshdrs((hio_svc_htts_task_t*)(cgi_t*)ctx, key, val);
}
static int peer_htrd_peek (hio_htrd_t* htrd, hio_htre_t* req)
@ -440,9 +440,9 @@ static int peer_htrd_peek (hio_htrd_t* htrd, hio_htre_t* req)
chunked = cgi->task_keep_client_alive && !req->attr.content_length;
if (hio_svc_htts_task_startreshdr(cgi, status_code, status_desc, chunked) <= -1 ||
if (hio_svc_htts_task_startreshdr((hio_svc_htts_task_t*)cgi, status_code, status_desc, chunked) <= -1 ||
hio_htre_walkheaders(req, peer_capture_response_header, cgi) <= -1 ||
hio_svc_htts_task_endreshdr(cgi) <= -1) return -1;
hio_svc_htts_task_endreshdr((hio_svc_htts_task_t*)cgi) <= -1) return -1;
}
return 0;
@ -455,7 +455,7 @@ static int peer_htrd_poke (hio_htrd_t* htrd, hio_htre_t* req)
cgi_t* cgi = peer->cgi;
int n;
n = hio_svc_htts_task_endbody(cgi);
n = hio_svc_htts_task_endbody((hio_svc_htts_task_t*)cgi);
cgi_mark_over (cgi, CGI_OVER_READ_FROM_PEER);
return n;
}
@ -468,7 +468,7 @@ static int peer_htrd_push_content (hio_htrd_t* htrd, hio_htre_t* req, const hio_
HIO_ASSERT (cgi->htts->hio, htrd == cgi->peer_htrd);
n = hio_svc_htts_task_addresbody(cgi, data, dlen);
n = hio_svc_htts_task_addresbody((hio_svc_htts_task_t*)cgi, data, dlen);
if (cgi->task_res_pending_writes > CGI_PENDING_IO_THRESHOLD)
{
if (hio_dev_pro_read(cgi->peer, HIO_DEV_PRO_OUT, 0) <= -1) n = -1;
@ -1036,7 +1036,7 @@ int hio_svc_htts_docgi (hio_svc_htts_t* htts, hio_dev_sck_t* csck, hio_htre_t* r
}
bound_to_peer = 1;
if (hio_svc_htts_task_handleexpect100(cgi, 0) <= -1) goto oops;
if (hio_svc_htts_task_handleexpect100((hio_svc_htts_task_t*)cgi, 0) <= -1) goto oops;
if (setup_for_content_length(cgi, req) <= -1) goto oops;
/* TODO: store current input watching state and use it when destroying the cgi data */
@ -1055,7 +1055,7 @@ oops:
HIO_DEBUG3 (hio, "HTTS(%p) - FAILURE in docgi - socket(%p) - %js\n", htts, csck, hio_geterrmsg(hio));
if (cgi)
{
hio_svc_htts_task_sendfinalres(cgi, status_code, HIO_NULL, HIO_NULL, 1);
hio_svc_htts_task_sendfinalres((hio_svc_htts_task_t*)cgi, status_code, HIO_NULL, HIO_NULL, 1);
if (bound_to_peer) unbind_task_from_peer (cgi, 1);
if (bound_to_client) unbind_task_from_client (cgi, 1);
cgi_halt_participating_devices (cgi);

View File

@ -177,7 +177,7 @@ static void fcgi_peer_on_untie (hio_svc_fcgic_sess_t* peer, void* ctx)
HIO_DEBUG5 (hio, "HTTS(%p) - fcgi(t=%p,c=%p[%d],p=%p) - untieing peer\n", fcgi->htts, fcgi, fcgi->task_client, (fcgi->task_csck? fcgi->task_csck->hnd: -1), fcgi->peer);
fcgi->peer = HIO_NULL; /* to avoid infinite loop as explained above */
hio_svc_htts_task_endbody (fcgi);
hio_svc_htts_task_endbody ((hio_svc_htts_task_t*)fcgi);
unbind_task_from_peer (fcgi, 1);
HIO_DEBUG5 (hio, "HTTS(%p) - fcgi(t=%p,c=%p[%d],p=%p) - untied peer\n", fcgi->htts, fcgi, fcgi->task_client, (fcgi->task_csck? fcgi->task_csck->hnd: -1), fcgi->peer);
@ -205,7 +205,7 @@ static int fcgi_peer_on_read (hio_svc_fcgic_sess_t* peer, const void* data, hio_
/* the fcgi script could be misbehaving.
* it still has to read more but EOF is read.
* otherwise peer_htrd_poke() should have been called */
n = hio_svc_htts_task_endbody(fcgi);
n = hio_svc_htts_task_endbody((hio_svc_htts_task_t*)fcgi);
fcgi_mark_over (fcgi, FCGI_OVER_READ_FROM_PEER);
if (n <= -1) goto oops;
}
@ -222,7 +222,7 @@ static int fcgi_peer_on_read (hio_svc_fcgic_sess_t* peer, const void* data, hio_
if (!fcgi->task_res_started && !(fcgi->over & FCGI_OVER_WRITE_TO_CLIENT))
{
hio_svc_htts_task_sendfinalres (fcgi, HIO_HTTP_STATUS_BAD_GATEWAY, HIO_NULL, HIO_NULL, 1); /* don't care about error because it jumps to oops below anyway */
hio_svc_htts_task_sendfinalres ((hio_svc_htts_task_t*)fcgi, HIO_HTTP_STATUS_BAD_GATEWAY, HIO_NULL, HIO_NULL, 1); /* don't care about error because it jumps to oops below anyway */
}
goto oops;
@ -284,9 +284,9 @@ static int peer_htrd_peek (hio_htrd_t* htrd, hio_htre_t* req)
chunked = fcgi->task_keep_client_alive && !req->attr.content_length;
if (hio_svc_htts_task_startreshdr(fcgi, status_code, status_desc, chunked) <= -1 ||
if (hio_svc_htts_task_startreshdr((hio_svc_htts_task_t*)fcgi, status_code, status_desc, chunked) <= -1 ||
hio_htre_walkheaders(req, peer_capture_response_header, fcgi) <= -1 ||
hio_svc_htts_task_endreshdr(fcgi) <= -1) return -1;
hio_svc_htts_task_endreshdr((hio_svc_htts_task_t*)fcgi) <= -1) return -1;
}
return 0;
@ -299,7 +299,7 @@ static int peer_htrd_poke (hio_htrd_t* htrd, hio_htre_t* req)
fcgi_t* fcgi = peer->fcgi;
int n;
n = hio_svc_htts_task_endbody(fcgi);
n = hio_svc_htts_task_endbody((hio_svc_htts_task_t*)fcgi);
fcgi_mark_over (fcgi, FCGI_OVER_READ_FROM_PEER);
return n;
}
@ -309,7 +309,7 @@ static int peer_htrd_push_content (hio_htrd_t* htrd, hio_htre_t* req, const hio_
fcgi_peer_xtn_t* peer = hio_htrd_getxtn(htrd);
fcgi_t* fcgi = peer->fcgi;
HIO_ASSERT (fcgi->htts->hio, htrd == fcgi->peer_htrd);
return hio_svc_htts_task_addresbody(fcgi, data, dlen);
return hio_svc_htts_task_addresbody((hio_svc_htts_task_t*)fcgi, data, dlen);
}
static hio_htrd_recbs_t peer_htrd_recbs =
@ -786,7 +786,7 @@ int hio_svc_htts_dofcgi (hio_svc_htts_t* htts, hio_dev_sck_t* csck, hio_htre_t*
if (bind_task_to_peer(fcgi, fcgis_addr) <= -1) goto oops;
bound_to_peer = 1;
if (hio_svc_htts_task_handleexpect100(fcgi, 0) <= -1) goto oops;
if (hio_svc_htts_task_handleexpect100((hio_svc_htts_task_t*)fcgi, 0) <= -1) goto oops;
if (setup_for_content_length(fcgi, req) <= -1) goto oops;
/* TODO: store current input watching state and use it when destroying the fcgi data */
@ -810,7 +810,7 @@ oops:
HIO_DEBUG2 (hio, "HTTS(%p) - FAILURE in dofcgi - socket(%p)\n", htts, csck);
if (fcgi)
{
hio_svc_htts_task_sendfinalres(fcgi, status_code, HIO_NULL, HIO_NULL, 1);
hio_svc_htts_task_sendfinalres((hio_svc_htts_task_t*)fcgi, status_code, HIO_NULL, HIO_NULL, 1);
if (bound_to_peer) unbind_task_from_peer (fcgi, 1);
if (bound_to_client) unbind_task_from_client (fcgi, 1);
fcgi_halt_participating_devices (fcgi);

View File

@ -945,7 +945,7 @@ oops:
HIO_DEBUG2 (hio, "HTTS(%p) - file(c=%d) failure\n", htts, csck->hnd);
if (file)
{
hio_svc_htts_task_sendfinalres(file, status_code, HIO_NULL, HIO_NULL, 1);
hio_svc_htts_task_sendfinalres((hio_svc_htts_task_t*)file, status_code, HIO_NULL, HIO_NULL, 1);
if (bound_to_peer) unbind_task_from_peer (file, 0);
if (bound_to_client) unbind_task_from_client (file, 0);
file_halt_participating_devices (file);

View File

@ -23,7 +23,7 @@
*/
#include "http-prv.h"
#include <hio-pro.h>
#include <hio-sck.h>
#include <hio-fmt.h>
#include <hio-chr.h>
#include <hio-dns.h>
@ -56,7 +56,7 @@ struct prxy_t
int options;
hio_oow_t peer_pending_writes;
hio_dev_pro_t* peer;
hio_dev_sck_t* peer;
hio_htrd_t* peer_htrd;
unsigned int over: 4; /* must be large enough to accomodate PRXY_OVER_ALL */
@ -85,7 +85,7 @@ static void prxy_halt_participating_devices (prxy_t* prxy)
if (prxy->task_csck) hio_dev_sck_halt (prxy->task_csck);
/* check for peer as it may not have been started */
if (prxy->peer) hio_dev_pro_halt (prxy->peer);
if (prxy->peer) hio_dev_sck_halt (prxy->peer);
}
static int prxy_write_to_peer (prxy_t* prxy, const void* data, hio_iolen_t dlen)
@ -93,7 +93,7 @@ static int prxy_write_to_peer (prxy_t* prxy, const void* data, hio_iolen_t dlen)
if (prxy->peer)
{
prxy->peer_pending_writes++;
if (hio_dev_pro_write(prxy->peer, data, dlen, HIO_NULL) <= -1)
if (hio_dev_sck_write(prxy->peer, data, dlen, HIO_NULL, HIO_NULL) <= -1)
{
prxy->peer_pending_writes--;
return -1;
@ -130,10 +130,10 @@ static HIO_INLINE void prxy_mark_over (prxy_t* prxy, int over_bits)
if (!(old_over & PRXY_OVER_READ_FROM_PEER) && (prxy->over & PRXY_OVER_READ_FROM_PEER))
{
if (prxy->peer && hio_dev_pro_read(prxy->peer, HIO_DEV_PRO_OUT, 0) <= -1)
if (prxy->peer && hio_dev_sck_read(prxy->peer, 0) <= -1)
{
HIO_DEBUG5 (hio, "HTTS(%p) - prxy(t=%p,c=%p[%d],p=%p) - halting peer for failure to disable input watching\n", prxy->htts, prxy, prxy->task_client, (prxy->task_csck? prxy->task_csck->hnd: -1), prxy->peer);
hio_dev_pro_halt (prxy->peer);
hio_dev_sck_halt (prxy->peer);
}
}
@ -143,7 +143,7 @@ static HIO_INLINE void prxy_mark_over (prxy_t* prxy, int over_bits)
if (prxy->peer)
{
HIO_DEBUG5 (hio, "HTTS(%p) - prxy(t=%p,c=%p[%d],p=%p) - halting unneeded peer\n", prxy->htts, prxy, prxy->task_client, (prxy->task_csck? prxy->task_csck->hnd: -1), prxy->peer);
hio_dev_pro_halt (prxy->peer);
hio_dev_sck_halt (prxy->peer);
}
if (prxy->task_csck)
@ -249,7 +249,7 @@ static int prxy_peer_on_read (hio_dev_sck_t* sck, const void* data, hio_iolen_t
/* the prxy script could be misbehaving.
* it still has to read more but EOF is read.
* otherwise peer_htrd_poke() should have been called */
n = hio_svc_htts_task_endbody(prxy);
n = hio_svc_htts_task_endbody((hio_svc_htts_task_t*)prxy);
prxy_mark_over (prxy, PRXY_OVER_READ_FROM_PEER);
if (n <= -1) goto oops;
}
@ -266,7 +266,7 @@ static int prxy_peer_on_read (hio_dev_sck_t* sck, const void* data, hio_iolen_t
if (!prxy->task_res_started && !(prxy->over & PRXY_OVER_WRITE_TO_CLIENT))
{
hio_svc_htts_task_sendfinalres (prxy, HIO_HTTP_STATUS_BAD_GATEWAY, HIO_NULL, HIO_NULL, 1); /* don't care about error because it jumps to oops below anyway */
hio_svc_htts_task_sendfinalres ((hio_svc_htts_task_t*)prxy, HIO_HTTP_STATUS_BAD_GATEWAY, HIO_NULL, HIO_NULL, 1); /* don't care about error because it jumps to oops below anyway */
}
goto oops;
@ -288,7 +288,7 @@ oops:
static int prxy_peer_on_write (hio_dev_sck_t* sck, hio_iolen_t wrlen, void* wrctx, const hio_skad_t* dstaddr)
{
hio_t* hio = sck->hio;
prxy_peer_xtn_t* peer = hio_dev_pro_getxtn(sck);
prxy_peer_xtn_t* peer = hio_dev_sck_getxtn(sck);
prxy_t* prxy = peer->prxy;
if (!prxy) return 0; /* there is nothing i can do. the prxy is being cleared or has been cleared already. */
@ -339,7 +339,7 @@ oops:
static int peer_capture_response_header (hio_htre_t* req, const hio_bch_t* key, const hio_htre_hdrval_t* val, void* ctx)
{
return hio_svc_htts_task_addreshdrs((prxy_t*)ctx, key, val);
return hio_svc_htts_task_addreshdrs((hio_svc_htts_task_t*)(prxy_t*)ctx, key, val);
}
static int peer_htrd_peek (hio_htrd_t* htrd, hio_htre_t* req)
@ -358,9 +358,9 @@ static int peer_htrd_peek (hio_htrd_t* htrd, hio_htre_t* req)
chunked = prxy->task_keep_client_alive && !req->attr.content_length;
if (hio_svc_htts_task_startreshdr(prxy, status_code, status_desc, chunked) <= -1 ||
if (hio_svc_htts_task_startreshdr((hio_svc_htts_task_t*)prxy, status_code, status_desc, chunked) <= -1 ||
hio_htre_walkheaders(req, peer_capture_response_header, prxy) <= -1 ||
hio_svc_htts_task_endreshdr(prxy) <= -1) return -1;
hio_svc_htts_task_endreshdr((hio_svc_htts_task_t*)prxy) <= -1) return -1;
}
return 0;
@ -373,7 +373,7 @@ static int peer_htrd_poke (hio_htrd_t* htrd, hio_htre_t* req)
prxy_t* prxy = peer->prxy;
int n;
n = hio_svc_htts_task_endbody(prxy);
n = hio_svc_htts_task_endbody((hio_svc_htts_task_t*)prxy);
prxy_mark_over (prxy, PRXY_OVER_READ_FROM_PEER);
return n;
}
@ -386,10 +386,10 @@ static int peer_htrd_push_content (hio_htrd_t* htrd, hio_htre_t* req, const hio_
HIO_ASSERT (prxy->htts->hio, htrd == prxy->peer_htrd);
n = hio_svc_htts_task_addresbody(prxy, data, dlen);
n = hio_svc_htts_task_addresbody((hio_svc_htts_task_t*)prxy, data, dlen);
if (prxy->task_res_pending_writes > PRXY_PENDING_IO_THRESHOLD)
{
if (hio_dev_pro_read(prxy->peer, HIO_DEV_PRO_OUT, 0) <= -1) n = -1;
if (hio_dev_sck_read(prxy->peer, 0) <= -1) n = -1;
}
return n;
@ -447,7 +447,7 @@ static void prxy_client_on_disconnect (hio_dev_sck_t* sck)
if (prxy)
{
HIO_SVC_HTTS_TASK_RCUP (prxy);
HIO_SVC_HTTS_TASK_RCUP ((hio_svc_htts_task_t*)prxy);
/* detach the task from the client and the client socket */
unbind_task_from_client (prxy, 1);
@ -456,7 +456,7 @@ static void prxy_client_on_disconnect (hio_dev_sck_t* sck)
/*if (fprxy->client_org_on_disconnect) fprxy->client_org_on_disconnect (sck);*/
if (sck->on_disconnect) sck->on_disconnect (sck); /* restored to the orginal parent handler in unbind_task_from_client() */
HIO_SVC_HTTS_TASK_RCDOWN (prxy);
HIO_SVC_HTTS_TASK_RCDOWN ((hio_svc_htts_task_t*)prxy);
}
HIO_DEBUG4 (hio, "HTTS(%p) - prxy(t=%p,c=%p,csck=%p) - client socket disconnect handled\n", htts, prxy, cli, sck);
@ -527,7 +527,7 @@ static int prxy_client_on_write (hio_dev_sck_t* sck, hio_iolen_t wrlen, void* wr
{
/* enable input watching */
if (!(prxy->over & PRXY_OVER_READ_FROM_PEER) &&
hio_dev_pro_read(prxy->peer, HIO_DEV_PRO_OUT, 1) <= -1) n = -1;
hio_dev_sck_read(prxy->peer, 1) <= -1) n = -1;
}
if ((prxy->over & PRXY_OVER_READ_FROM_PEER) && prxy->task_res_pending_writes <= 0)
@ -591,7 +591,7 @@ static int peer_capture_request_header (hio_htre_t* req, const hio_bch_t* key, c
return 0;
}
static int prxy_peer_on_fork (hio_dev_pro_t* pro, void* fork_ctx)
static int prxy_peer_on_fork (hio_dev_sck_t* pro, void* fork_ctx)
{
hio_t* hio = pro->hio; /* in this callback, the pro device is not fully up. however, the hio field is guaranteed to be available */
peer_fork_ctx_t* fc = (peer_fork_ctx_t*)fork_ctx;
@ -837,7 +837,7 @@ static int bind_task_to_peer (prxy_t* prxy, hio_dev_sck_t* csck, hio_htre_t* req
prxy->peer = sck;
prxy->peer_htrd = htrd;
pxtn = hio_dev_pro_getxtn(prxy->peer);
pxtn = hio_dev_sck_getxtn(prxy->peer);
pxtn->prxy = prxy;
HIO_SVC_HTTS_TASK_RCUP (prxy);
@ -867,10 +867,10 @@ static void unbind_task_from_peer (prxy_t* prxy, int rcdown)
if (prxy->peer)
{
prxy_peer_xtn_t* peer_xtn;
peer_xtn = hio_dev_pro_getxtn(prxy->peer);
peer_xtn = hio_dev_sck_getxtn(prxy->peer);
peer_xtn->prxy = HIO_NULL;
hio_dev_pro_kill (prxy->peer);
hio_dev_sck_kill (prxy->peer);
prxy->peer = HIO_NULL;
n++;
}
@ -947,12 +947,12 @@ int hio_svc_htts_doprxy (hio_svc_htts_t* htts, hio_dev_sck_t* csck, hio_htre_t*
if ((n = bind_task_to_peer(prxy, csck, req, tgt_addr)) <= -1)
{
hio_svc_htts_task_sendfinalres(prxy, (n == 2? HIO_HTTP_STATUS_FORBIDDEN: HIO_HTTP_STATUS_INTERNAL_SERVER_ERROR), HIO_NULL, HIO_NULL, 1);
hio_svc_htts_task_sendfinalres((hio_svc_htts_task_t*)prxy, (n == 2? HIO_HTTP_STATUS_FORBIDDEN: HIO_HTTP_STATUS_INTERNAL_SERVER_ERROR), HIO_NULL, HIO_NULL, 1);
goto oops; /* TODO: must not go to oops. just destroy the prxy and finalize the request .. */
}
bound_to_peer = 1;
if (hio_svc_htts_task_handleexpect100(prxy, 0) <= -1) goto oops;
if (hio_svc_htts_task_handleexpect100((hio_svc_htts_task_t*)prxy, 0) <= -1) goto oops;
if (setup_for_content_length(prxy, req) <= -1) goto oops;
/* TODO: store current input watching state and use it when destroying the prxy data */
@ -970,7 +970,7 @@ oops:
HIO_DEBUG2 (hio, "HTTS(%p) - FAILURE in doprxy - socket(%p)\n", htts, csck);
if (prxy)
{
hio_svc_htts_task_sendfinalres(prxy, status_code, HIO_NULL, HIO_NULL, 1);
hio_svc_htts_task_sendfinalres((hio_svc_htts_task_t*)prxy, status_code, HIO_NULL, HIO_NULL, 1);
if (bound_to_peer) unbind_task_from_peer (prxy, 1);
if (bound_to_client) unbind_task_from_client (prxy, 1);
prxy_halt_participating_devices (prxy);

View File

@ -219,7 +219,7 @@ static void thr_peer_on_close (hio_dev_thr_t* peer, hio_dev_thr_sid_t sid)
if (!(thr->over & THR_OVER_READ_FROM_PEER))
{
if (hio_svc_htts_task_endbody(thr) <= -1)
if (hio_svc_htts_task_endbody((hio_svc_htts_task_t*)thr) <= -1)
thr_halt_participating_devices (thr);
else
thr_mark_over (thr, THR_OVER_READ_FROM_PEER);
@ -261,7 +261,7 @@ static int thr_peer_on_read (hio_dev_thr_t* peer, const void* data, hio_iolen_t
/* the thr script could be misbehaviing.
* it still has to read more but EOF is read.
* otherwise client_peer_htrd_poke() should have been called */
n = hio_svc_htts_task_endbody(thr);
n = hio_svc_htts_task_endbody((hio_svc_htts_task_t*)thr);
thr_mark_over (thr, THR_OVER_READ_FROM_PEER);
if (n <= -1) goto oops;
}
@ -278,7 +278,7 @@ static int thr_peer_on_read (hio_dev_thr_t* peer, const void* data, hio_iolen_t
if (!thr->task_res_started && !(thr->over & THR_OVER_WRITE_TO_CLIENT))
{
hio_svc_htts_task_sendfinalres (thr, HIO_HTTP_STATUS_BAD_GATEWAY, HIO_NULL, HIO_NULL, 1); /* don't care about error because it jumps to oops below anyway */
hio_svc_htts_task_sendfinalres ((hio_svc_htts_task_t*)thr, HIO_HTTP_STATUS_BAD_GATEWAY, HIO_NULL, HIO_NULL, 1); /* don't care about error because it jumps to oops below anyway */
}
goto oops;
@ -302,7 +302,7 @@ oops:
static int peer_capture_response_header (hio_htre_t* req, const hio_bch_t* key, const hio_htre_hdrval_t* val, void* ctx)
{
thr_t* thr = (thr_t*)ctx;
return hio_svc_htts_task_addreshdrs(thr, key, val);
return hio_svc_htts_task_addreshdrs((hio_svc_htts_task_t*)thr, key, val);
}
static int thr_peer_htrd_peek (hio_htrd_t* htrd, hio_htre_t* req)
@ -321,9 +321,9 @@ static int thr_peer_htrd_peek (hio_htrd_t* htrd, hio_htre_t* req)
chunked = thr->task_keep_client_alive && !req->attr.content_length;
if (hio_svc_htts_task_startreshdr(thr, status_code, status_desc, chunked) <= -1 ||
if (hio_svc_htts_task_startreshdr((hio_svc_htts_task_t*)thr, status_code, status_desc, chunked) <= -1 ||
hio_htre_walkheaders(req, peer_capture_response_header, thr) <= -1 ||
hio_svc_htts_task_endreshdr(thr) <= -1) return -1;
hio_svc_htts_task_endreshdr((hio_svc_htts_task_t*)thr) <= -1) return -1;
}
return 0;
@ -336,7 +336,7 @@ static int thr_peer_htrd_poke (hio_htrd_t* htrd, hio_htre_t* req)
thr_t* thr = pxtn->task;
int n;
n = hio_svc_htts_task_endbody(thr);
n = hio_svc_htts_task_endbody((hio_svc_htts_task_t*)thr);
thr_mark_over (thr, THR_OVER_READ_FROM_PEER);
return n;
}
@ -349,7 +349,7 @@ static int thr_peer_htrd_push_content (hio_htrd_t* htrd, hio_htre_t* req, const
HIO_ASSERT (thr->htts->hio, htrd == thr->peer_htrd);
n = hio_svc_htts_task_addresbody(thr, data, dlen);
n = hio_svc_htts_task_addresbody((hio_svc_htts_task_t*)thr, data, dlen);
if (thr->task_res_pending_writes > THR_PENDING_IO_THRESHOLD)
{
if (hio_dev_thr_read(thr->peer, 0) <= -1) n = -1;
@ -461,7 +461,7 @@ static void thr_client_on_disconnect (hio_dev_sck_t* sck)
if (thr)
{
HIO_SVC_HTTS_TASK_RCUP (thr);
HIO_SVC_HTTS_TASK_RCUP ((hio_svc_htts_task_t*)thr);
unbind_task_from_client (thr, 1);
@ -469,7 +469,7 @@ static void thr_client_on_disconnect (hio_dev_sck_t* sck)
/*if (thr->client_org_on_disconnect) thr->client_org_on_disconnect (sck);*/
if (sck->on_disconnect) sck->on_disconnect (sck); /* restored to the orginal parent handler in unbind_task_from_client() */
HIO_SVC_HTTS_TASK_RCDOWN (thr);
HIO_SVC_HTTS_TASK_RCDOWN ((hio_svc_htts_task_t*)thr);
}
HIO_DEBUG4 (hio, "HTTS(%p) - thr(t=%p,c=%p,csck=%p) - client socket disconnect handled\n", htts, thr, cli, sck);
@ -869,7 +869,7 @@ int hio_svc_htts_dothr (hio_svc_htts_t* htts, hio_dev_sck_t* csck, hio_htre_t* r
if (bind_task_to_peer(thr, csck, req, func, ctx) <= -1) goto oops;
bound_to_peer = 1;
if (hio_svc_htts_task_handleexpect100(thr, 0) <= -1) goto oops;
if (hio_svc_htts_task_handleexpect100((hio_svc_htts_task_t*)thr, 0) <= -1) goto oops;
if (setup_for_content_length(thr, req) <= -1) goto oops;
/* TODO: store current input watching state and use it when destroying the thr data */
@ -887,7 +887,7 @@ oops:
HIO_DEBUG2 (hio, "HTTS(%p) - FAILURE in dothr - socket(%p)\n", htts, csck);
if (thr)
{
hio_svc_htts_task_sendfinalres(thr, status_code, HIO_NULL, HIO_NULL, 1);
hio_svc_htts_task_sendfinalres((hio_svc_htts_task_t*)thr, status_code, HIO_NULL, HIO_NULL, 1);
if (bound_to_peer) unbind_task_from_peer (thr, 1);
if (bound_to_client) unbind_task_from_client (thr, 1);
thr_halt_participating_devices (thr);

View File

@ -358,13 +358,13 @@ int hio_svc_htts_dotxt (hio_svc_htts_t* htts, hio_dev_sck_t* csck, hio_htre_t* r
bind_task_to_client (txt, csck);
bound_to_client = 1;
if (hio_svc_htts_task_handleexpect100(txt, 1) <= -1) goto oops;
if (hio_svc_htts_task_handleexpect100((hio_svc_htts_task_t*)txt, 1) <= -1) goto oops;
if (setup_for_content_length(txt, req) <= -1) goto oops;
/* TODO: store current input watching state and use it when destroying the txt data */
if (hio_dev_sck_read(csck, !(txt->over & TXT_OVER_READ_FROM_CLIENT)) <= -1) goto oops;
if (hio_svc_htts_task_sendfinalres(txt, res_status_code, content_type, content_text, 0) <= -1) goto oops;
if (hio_svc_htts_task_sendfinalres((hio_svc_htts_task_t*)txt, res_status_code, content_type, content_text, 0) <= -1) goto oops;
HIO_SVC_HTTS_TASKL_APPEND_TASK (&htts->task, (hio_svc_htts_task_t*)txt);
HIO_SVC_HTTS_TASK_RCDOWN ((hio_svc_htts_task_t*)txt);
@ -378,7 +378,7 @@ oops:
HIO_DEBUG2 (hio, "HTTS(%p) - FAILURE in dotxt - socket(%p)\n", htts, csck);
if (txt)
{
hio_svc_htts_task_sendfinalres(txt, status_code, HIO_NULL, HIO_NULL, 1);
hio_svc_htts_task_sendfinalres((hio_svc_htts_task_t*)txt, status_code, HIO_NULL, HIO_NULL, 1);
if (bound_to_client) unbind_task_from_client (txt, 1);
txt_halt_participating_devices (txt);
HIO_SVC_HTTS_TASK_RCDOWN ((hio_svc_htts_task_t*)txt);

View File

@ -311,7 +311,7 @@ static int dev_pipe_writev_slave (hio_dev_t* dev, const hio_iovec_t* iov, hio_io
return 1; /* indicate that the operation got successful. the core will execute on_write() with 0. */
}
x = writev(pipe->pfd, iov, *iovcnt);
x = writev(pipe->pfd, (struct iovec*)iov, *iovcnt);
if (x <= -1)
{
if (errno == EINPROGRESS || errno == EWOULDBLOCK || errno == EAGAIN) return 0; /* no data can be written */

View File

@ -668,7 +668,7 @@ static int dev_pro_writev_slave (hio_dev_t* dev, const hio_iovec_t* iov, hio_iol
return 1; /* indicate that the operation got successful. the core will execute on_write() with 0. */
}
x = writev(pro->pfd, iov, *iovcnt);
x = writev(pro->pfd, (struct iovec*)iov, *iovcnt);
if (x <= -1)
{
if (errno == EINPROGRESS || errno == EWOULDBLOCK || errno == EAGAIN) return 0; /* no data can be written */

View File

@ -439,7 +439,7 @@ static int dev_pty_writev (hio_dev_t* dev, const hio_iovec_t* iov, hio_iolen_t*
return 1; /* indicate that the operation got successful. the core will execute on_write() with 0. */
}
x = writev(pty->hnd, iov, *iovcnt);
x = writev(pty->hnd, (struct iovec*)iov, *iovcnt);
if (x <= -1)
{
if (errno == EINPROGRESS || errno == EWOULDBLOCK || errno == EAGAIN) return 0; /* no data can be written */

View File

@ -164,7 +164,7 @@ static int dev_shw_writev (hio_dev_t* dev, const hio_iovec_t* iov, hio_iolen_t*
return 1; /* indicate that the operation got successful. the core will execute on_write() with 0. */
}
x = writev(shw->hnd, iov, *iovcnt);
x = writev(shw->hnd, (struct iovec*)iov, *iovcnt);
if (x <= -1)
{
if (errno == EINPROGRESS || errno == EWOULDBLOCK || errno == EAGAIN) return 0; /* no data can be written */

View File

@ -484,7 +484,7 @@ static int dev_thr_writev_slave (hio_dev_t* dev, const hio_iovec_t* iov, hio_iol
return 1; /* indicate that the operation got successful. the core will execute on_write() with 0. */
}
x = writev(thr->pfd, iov, *iovcnt);
x = writev(thr->pfd, (struct iovec*)iov, *iovcnt);
if (x <= -1)
{
if (errno == EINPROGRESS || errno == EWOULDBLOCK || errno == EAGAIN) return 0; /* no data can be written */