HEAD support in http-fil.c

This commit is contained in:
hyung-hwan 2020-07-17 10:14:15 +00:00
parent f2cffc0e6e
commit 7adf19f01d
4 changed files with 160 additions and 230 deletions

View File

@ -370,15 +370,8 @@ static int file_client_on_write (mio_dev_sck_t* sck, mio_iolen_t wrlen, void* wr
MIO_ASSERT (mio, file_state->num_pending_writes_to_client > 0); MIO_ASSERT (mio, file_state->num_pending_writes_to_client > 0);
file_state->num_pending_writes_to_client--; file_state->num_pending_writes_to_client--;
#if 0 if (file_state->req_method == MIO_HTTP_GET)
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX file_state_send_contents_to_client (file_state);
if (file_state->peer && file_state->num_pending_writes_to_client == FILE_STATE_PENDING_IO_THRESHOLD)
{
if (!(file_state->over & FILE_STATE_OVER_READ_FROM_PEER) &&
mio_dev_pro_read(file_state->peer, MIO_DEV_PRO_OUT, 1) <= -1) goto oops;
}
#endif
file_state_send_contents_to_client (file_state);
if ((file_state->over & FILE_STATE_OVER_READ_FROM_PEER) && file_state->num_pending_writes_to_client <= 0) if ((file_state->over & FILE_STATE_OVER_READ_FROM_PEER) && file_state->num_pending_writes_to_client <= 0)
{ {
@ -598,6 +591,7 @@ static int open_peer (file_state_t* file_state, const mio_bch_t* actual_file)
switch (file_state->req_method) switch (file_state->req_method)
{ {
case MIO_HTTP_GET: case MIO_HTTP_GET:
case MIO_HTTP_HEAD:
if (access(actual_file, R_OK) == -1) if (access(actual_file, R_OK) == -1)
{ {
file_state_send_final_status_to_client (file_state, ERRNO_TO_STATUS_CODE(errno), 1); /* 404 not found 403 Forbidden */ file_state_send_final_status_to_client (file_state, ERRNO_TO_STATUS_CODE(errno), 1); /* 404 not found 403 Forbidden */
@ -613,6 +607,7 @@ static int open_peer (file_state_t* file_state, const mio_bch_t* actual_file)
return 0; return 0;
#if 0
case MIO_HTTP_PUT: case MIO_HTTP_PUT:
case MIO_HTTP_POST: case MIO_HTTP_POST:
/* TOOD: this is destructive. jump to default if not allowed by flags... */ /* TOOD: this is destructive. jump to default if not allowed by flags... */
@ -633,6 +628,7 @@ static int open_peer (file_state_t* file_state, const mio_bch_t* actual_file)
return -1; return -1;
} }
return 0; return 0;
#endif
#if 0 #if 0
case MIO_HTTP_DELETE: case MIO_HTTP_DELETE:
@ -654,6 +650,8 @@ static MIO_INLINE void fadvise_on_peer (file_state_t* file_state)
int mio_svc_htts_dofile (mio_svc_htts_t* htts, mio_dev_sck_t* csck, mio_htre_t* req, const mio_bch_t* docroot, const mio_bch_t* file) int mio_svc_htts_dofile (mio_svc_htts_t* htts, mio_dev_sck_t* csck, mio_htre_t* req, const mio_bch_t* docroot, const mio_bch_t* file)
{ {
/* TODO: ETag, Last-Modified... */
mio_t* mio = htts->mio; mio_t* mio = htts->mio;
mio_svc_htts_cli_t* cli = mio_dev_sck_getxtn(csck); mio_svc_htts_cli_t* cli = mio_dev_sck_getxtn(csck);
file_state_t* file_state = MIO_NULL; file_state_t* file_state = MIO_NULL;
@ -708,23 +706,10 @@ int mio_svc_htts_dofile (mio_svc_htts_t* htts, mio_dev_sck_t* csck, mio_htre_t*
if (req->flags & MIO_HTRE_ATTR_EXPECT100) if (req->flags & MIO_HTRE_ATTR_EXPECT100)
{ {
/* TODO: check method. if GET, file contents can be transmitted without 100 continue ... */
if (mio_comp_http_version_numbers(&req->version, 1, 1) >= 0 && if (mio_comp_http_version_numbers(&req->version, 1, 1) >= 0 &&
(file_state->req_content_length_unlimited || file_state->req_content_length > 0)) (file_state->req_content_length_unlimited || file_state->req_content_length > 0) &&
(file_state->req_method != MIO_HTTP_GET && file_state->req_method != MIO_HTTP_HEAD))
{ {
/*
* Don't send 100 Continue if http verions is lower than 1.1
* [RFC7231]
* A server that receives a 100-continue expectation in an HTTP/1.0
* request MUST ignore that expectation.
*
* Don't send 100 Continue if expected content lenth is 0.
* [RFC7231]
* A server MAY omit sending a 100 (Continue) response if it has
* already received some or all of the message body for the
* corresponding request, or if the framing indicates that there is
* no message body.
*/
mio_bch_t msgbuf[64]; mio_bch_t msgbuf[64];
mio_oow_t msglen; mio_oow_t msglen;
@ -788,6 +773,11 @@ int mio_svc_htts_dofile (mio_svc_htts_t* htts, mio_dev_sck_t* csck, mio_htre_t*
if (file_state_send_header_to_client(file_state, 200, 0) <= -1 || if (file_state_send_header_to_client(file_state, 200, 0) <= -1 ||
file_state_send_contents_to_client(file_state) <= -1) goto oops; file_state_send_contents_to_client(file_state) <= -1) goto oops;
} }
else if (file_state->req_method == MIO_HTTP_HEAD)
{
if (file_state_send_header_to_client(file_state, 200, 0) <= -1) goto oops;
file_state_mark_over (file_state, FILE_STATE_OVER_READ_FROM_PEER | FILE_STATE_OVER_WRITE_TO_PEER);
}
/* TODO: store current input watching state and use it when destroying the file_state data */ /* TODO: store current input watching state and use it when destroying the file_state data */
if (mio_dev_sck_read(csck, !(file_state->over & FILE_STATE_OVER_READ_FROM_CLIENT)) <= -1) goto oops; if (mio_dev_sck_read(csck, !(file_state->over & FILE_STATE_OVER_READ_FROM_CLIENT)) <= -1) goto oops;

View File

@ -153,7 +153,6 @@ static int listener_on_read (mio_dev_sck_t* sck, const void* buf, mio_iolen_t le
if ((x = mio_htrd_feed(cli->htrd, buf, len, &rem)) <= -1) if ((x = mio_htrd_feed(cli->htrd, buf, len, &rem)) <= -1)
{ {
printf ("** HTTS - client htrd feed failure socket(%p) - %d\n", sck, x);
goto oops; goto oops;
} }
@ -172,7 +171,6 @@ printf ("** HTTS - client htrd feed failure socket(%p) - %d\n", sck, x);
return 0; return 0;
oops: oops:
printf ("HALTING CLIENT SOCKEXXT %p\n", sck);
mio_dev_sck_halt (sck); mio_dev_sck_halt (sck);
return 0; return 0;
} }

View File

@ -879,7 +879,7 @@ int mio_svc_htts_dothr (mio_svc_htts_t* htts, mio_dev_sck_t* csck, mio_htre_t* r
MIO_SVC_HTTS_RSRC_ATTACH (thr_state, cli->rsrc); MIO_SVC_HTTS_RSRC_ATTACH (thr_state, cli->rsrc);
thr_state->peer = mio_dev_thr_make(mio, MIO_SIZEOF(*thr_peer), &mi); thr_state->peer = mio_dev_thr_make(mio, MIO_SIZEOF(*thr_peer), &mi);
if (MIO_UNLIKELY(!thr_state->peer)) goto oops; if (MIO_UNLIKELY(!thr_state->peer)) { printf ("dev thr make failure. \n"); goto oops; }
tfs = MIO_NULL; /* mark that tfs is delegated to the thread */ tfs = MIO_NULL; /* mark that tfs is delegated to the thread */

View File

@ -1231,14 +1231,130 @@ static void on_write_timeout (mio_t* mio, const mio_ntime_t* now, mio_tmrjob_t*
if (x <= -1) mio_dev_halt (dev); if (x <= -1) mio_dev_halt (dev);
} }
static MIO_INLINE int __enqueue_completed_write (mio_dev_t* dev, mio_iolen_t len, void* wrctx, const mio_devaddr_t* dstaddr)
{
mio_t* mio = dev->mio;
mio_cwq_t* cwq;
mio_oow_t cwq_extra_aligned, cwqfl_index;
cwq_extra_aligned = (dstaddr? dstaddr->len: 0);
cwq_extra_aligned = MIO_ALIGN_POW2(cwq_extra_aligned, MIO_CWQFL_ALIGN);
cwqfl_index = cwq_extra_aligned / MIO_CWQFL_SIZE;
if (cwqfl_index < MIO_COUNTOF(mio->cwqfl) && mio->cwqfl[cwqfl_index])
{
/* take an available cwq object from the free cwq list */
cwq = dev->mio->cwqfl[cwqfl_index];
dev->mio->cwqfl[cwqfl_index] = cwq->q_next;
}
else
{
cwq = (mio_cwq_t*)mio_allocmem(mio, MIO_SIZEOF(*cwq) + cwq_extra_aligned);
if (MIO_UNLIKELY(!cwq)) return -1;
}
MIO_MEMSET (cwq, 0, MIO_SIZEOF(*cwq));
cwq->dev = dev;
cwq->ctx = wrctx;
if (dstaddr)
{
cwq->dstaddr.ptr = (mio_uint8_t*)(cwq + 1);
cwq->dstaddr.len = dstaddr->len;
MIO_MEMCPY (cwq->dstaddr.ptr, dstaddr->ptr, dstaddr->len);
}
else
{
cwq->dstaddr.len = 0;
}
cwq->olen = len;
MIO_CWQ_ENQ (&dev->mio->cwq, cwq);
dev->cw_count++; /* increment the number of complete write operations */
return 0;
}
static MIO_INLINE int __enqueue_pending_write (mio_dev_t* dev, mio_iolen_t olen, mio_iolen_t urem, mio_iovec_t* iov, mio_iolen_t iov_cnt, mio_iolen_t iov_index, const mio_ntime_t* tmout, void* wrctx, const mio_devaddr_t* dstaddr)
{
mio_t* mio = dev->mio;
mio_wq_t* q;
mio_iolen_t i, j;
if (dev->dev_cap & MIO_DEV_CAP_OUT_UNQUEUEABLE)
{
/* writing queuing is not requested. so return failure */
mio_seterrbfmt (mio, MIO_ENOCAPA, "device incapable of queuing");
return -1;
}
/* queue the remaining data*/
q = (mio_wq_t*)mio_allocmem(mio, MIO_SIZEOF(*q) + (dstaddr? dstaddr->len: 0) + urem);
if (MIO_UNLIKELY(!q)) return -1;
q->tmridx = MIO_TMRIDX_INVALID;
q->dev = dev;
q->ctx = wrctx;
if (dstaddr)
{
q->dstaddr.ptr = (mio_uint8_t*)(q + 1);
q->dstaddr.len = dstaddr->len;
MIO_MEMCPY (q->dstaddr.ptr, dstaddr->ptr, dstaddr->len);
}
else
{
q->dstaddr.len = 0;
}
q->ptr = (mio_uint8_t*)(q + 1) + q->dstaddr.len;
q->len = urem;
q->olen = olen; /* original length to use when invoking on_write() */
for (i = iov_index, j = 0; i < iov_cnt; i++)
{
MIO_MEMCPY (&q->ptr[j], iov[i].iov_ptr, iov[i].iov_len);
j += iov[i].iov_len;
}
if (tmout && MIO_IS_POS_NTIME(tmout))
{
mio_tmrjob_t tmrjob;
MIO_MEMSET (&tmrjob, 0, MIO_SIZEOF(tmrjob));
tmrjob.ctx = q;
mio_gettime (mio, &tmrjob.when);
MIO_ADD_NTIME (&tmrjob.when, &tmrjob.when, tmout);
tmrjob.handler = on_write_timeout;
tmrjob.idxptr = &q->tmridx;
q->tmridx = mio_instmrjob(mio, &tmrjob);
if (q->tmridx == MIO_TMRIDX_INVALID)
{
mio_freemem (mio, q);
return -1;
}
}
MIO_WQ_ENQ (&dev->wq, q);
if (!(dev->dev_cap & MIO_DEV_CAP_OUT_WATCHED))
{
/* if output is not being watched, arrange to do so */
if (mio_dev_watch(dev, MIO_DEV_WATCH_RENEW, MIO_DEV_EVENT_IN) <= -1)
{
unlink_wq (mio, q);
mio_freemem (mio, q);
return -1;
}
}
return 0; /* request pused to a write queue. */
}
static int __dev_write (mio_dev_t* dev, const void* data, mio_iolen_t len, const mio_ntime_t* tmout, void* wrctx, const mio_devaddr_t* dstaddr) static int __dev_write (mio_dev_t* dev, const void* data, mio_iolen_t len, const mio_ntime_t* tmout, void* wrctx, const mio_devaddr_t* dstaddr)
{ {
mio_t* mio = dev->mio; mio_t* mio = dev->mio;
const mio_uint8_t* uptr; const mio_uint8_t* uptr;
mio_iolen_t urem, ulen; mio_iolen_t urem, ulen;
mio_wq_t* q; mio_iovec_t iov;
mio_cwq_t* cwq;
mio_oow_t cwq_extra_aligned, cwqfl_index;
int x; int x;
if (dev->dev_cap & MIO_DEV_CAP_OUT_CLOSED) if (dev->dev_cap & MIO_DEV_CAP_OUT_CLOSED)
@ -1317,120 +1433,22 @@ static int __dev_write (mio_dev_t* dev, const void* data, mio_iolen_t len, const
goto enqueue_completed_write; goto enqueue_completed_write;
} }
return 1; /* written immediately and called on_write callback */ return 1; /* written immediately and called on_write callback. but this line will never be reached */
enqueue_data: enqueue_data:
if (dev->dev_cap & MIO_DEV_CAP_OUT_UNQUEUEABLE) iov.iov_ptr = (void*)uptr;
{ iov.iov_len = urem;
/* writing queuing is not requested. so return failure */ return __enqueue_pending_write(dev, len, urem, &iov, 1, 0, tmout, wrctx, dstaddr);
mio_seterrbfmt (mio, MIO_ENOCAPA, "device incapable of queuing");
return -1;
}
/* queue the remaining data*/
q = (mio_wq_t*)mio_allocmem(mio, MIO_SIZEOF(*q) + (dstaddr? dstaddr->len: 0) + urem);
if (!q) return -1;
q->tmridx = MIO_TMRIDX_INVALID;
q->dev = dev;
q->ctx = wrctx;
if (dstaddr)
{
q->dstaddr.ptr = (mio_uint8_t*)(q + 1);
q->dstaddr.len = dstaddr->len;
MIO_MEMCPY (q->dstaddr.ptr, dstaddr->ptr, dstaddr->len);
}
else
{
q->dstaddr.len = 0;
}
q->ptr = (mio_uint8_t*)(q + 1) + q->dstaddr.len;
q->len = urem;
q->olen = len;
MIO_MEMCPY (q->ptr, uptr, urem);
if (tmout && MIO_IS_POS_NTIME(tmout))
{
mio_tmrjob_t tmrjob;
MIO_MEMSET (&tmrjob, 0, MIO_SIZEOF(tmrjob));
tmrjob.ctx = q;
mio_gettime (mio, &tmrjob.when);
MIO_ADD_NTIME (&tmrjob.when, &tmrjob.when, tmout);
tmrjob.handler = on_write_timeout;
tmrjob.idxptr = &q->tmridx;
q->tmridx = mio_instmrjob(mio, &tmrjob);
if (q->tmridx == MIO_TMRIDX_INVALID)
{
mio_freemem (mio, q);
return -1;
}
}
MIO_WQ_ENQ (&dev->wq, q);
if (!(dev->dev_cap & MIO_DEV_CAP_OUT_WATCHED))
{
/* if output is not being watched, arrange to do so */
if (mio_dev_watch(dev, MIO_DEV_WATCH_RENEW, MIO_DEV_EVENT_IN) <= -1)
{
unlink_wq (mio, q);
mio_freemem (mio, q);
return -1;
}
}
return 0; /* request pused to a write queue. */
enqueue_completed_write: enqueue_completed_write:
/* queue the remaining data*/ return __enqueue_completed_write(dev, len, wrctx, dstaddr);
cwq_extra_aligned = (dstaddr? dstaddr->len: 0);
cwq_extra_aligned = MIO_ALIGN_POW2(cwq_extra_aligned, MIO_CWQFL_ALIGN);
cwqfl_index = cwq_extra_aligned / MIO_CWQFL_SIZE;
if (cwqfl_index < MIO_COUNTOF(mio->cwqfl) && mio->cwqfl[cwqfl_index])
{
/* take an available cwq object from the free cwq list */
cwq = dev->mio->cwqfl[cwqfl_index];
dev->mio->cwqfl[cwqfl_index] = cwq->q_next;
}
else
{
cwq = (mio_cwq_t*)mio_allocmem(mio, MIO_SIZEOF(*cwq) + cwq_extra_aligned);
if (MIO_UNLIKELY(!cwq)) return -1;
}
MIO_MEMSET (cwq, 0, MIO_SIZEOF(*cwq));
cwq->dev = dev;
cwq->ctx = wrctx;
if (dstaddr)
{
cwq->dstaddr.ptr = (mio_uint8_t*)(cwq + 1);
cwq->dstaddr.len = dstaddr->len;
MIO_MEMCPY (cwq->dstaddr.ptr, dstaddr->ptr, dstaddr->len);
}
else
{
cwq->dstaddr.len = 0;
}
cwq->olen = len;
MIO_CWQ_ENQ (&dev->mio->cwq, cwq);
dev->cw_count++; /* increment the number of complete write operations */
return 0;
} }
static int __dev_writev (mio_dev_t* dev, mio_iovec_t* iov, mio_iolen_t iovcnt, const mio_ntime_t* tmout, void* wrctx, const mio_devaddr_t* dstaddr) static int __dev_writev (mio_dev_t* dev, mio_iovec_t* iov, mio_iolen_t iovcnt, const mio_ntime_t* tmout, void* wrctx, const mio_devaddr_t* dstaddr)
{ {
mio_t* mio = dev->mio; mio_t* mio = dev->mio;
mio_iolen_t urem, len; mio_iolen_t urem, len;
mio_iolen_t index = 0, i, j; mio_iolen_t index = 0, i;
mio_wq_t* q;
mio_cwq_t* cwq;
mio_oow_t cwq_extra_aligned, cwqfl_index;
int x; int x;
if (dev->dev_cap & MIO_DEV_CAP_OUT_CLOSED) if (dev->dev_cap & MIO_DEV_CAP_OUT_CLOSED)
@ -1522,117 +1540,41 @@ static int __dev_writev (mio_dev_t* dev, mio_iovec_t* iov, mio_iolen_t iovcnt, c
goto enqueue_completed_write; goto enqueue_completed_write;
} }
return 1; /* written immediately and called on_write callback */ return 1; /* written immediately and called on_write callback. but this line will never be reached */
enqueue_data: enqueue_data:
if (dev->dev_cap & MIO_DEV_CAP_OUT_UNQUEUEABLE) return __enqueue_pending_write(dev, len, urem, iov, iovcnt, index, tmout, wrctx, dstaddr);
enqueue_completed_write:
return __enqueue_completed_write(dev, len, wrctx, dstaddr);
}
static int __dev_sendfile (mio_dev_t* dev, mio_syshnd_t in_fd, mio_uintmax_t offset, mio_iolen_t len, const mio_ntime_t* tmout, void* wrctx, const mio_devaddr_t* dstaddr)
{
mio_t* mio = dev->mio;
if (dev->dev_cap & MIO_DEV_CAP_OUT_CLOSED)
{ {
/* writing queuing is not requested. so return failure */ mio_seterrbfmt (mio, MIO_ENOCAPA, "unable to write to closed device");
mio_seterrbfmt (mio, MIO_ENOCAPA, "device incapable of queuing");
return -1; return -1;
} }
/* queue the remaining data*/ if (!MIO_WQ_IS_EMPTY(&dev->wq))
q = (mio_wq_t*)mio_allocmem(mio, MIO_SIZEOF(*q) + (dstaddr? dstaddr->len: 0) + urem);
if (MIO_UNLIKELY(!q)) return -1;
q->tmridx = MIO_TMRIDX_INVALID;
q->dev = dev;
q->ctx = wrctx;
if (dstaddr)
{ {
q->dstaddr.ptr = (mio_uint8_t*)(q + 1); /* if the writing queue is not empty, enqueue this request immediately */
q->dstaddr.len = dstaddr->len; goto enqueue_data;
MIO_MEMCPY (q->dstaddr.ptr, dstaddr->ptr, dstaddr->len);
}
else
{
q->dstaddr.len = 0;
} }
q->ptr = (mio_uint8_t*)(q + 1) + q->dstaddr.len; return 1;
q->len = urem;
q->olen = len; /* original length to use when invoking on_write() */
for (i = index, j = 0; i < iovcnt; i++)
{
MIO_MEMCPY (&q->ptr[j], iov[i].iov_ptr, iov[i].iov_len);
j += iov[i].iov_len;
}
if (tmout && MIO_IS_POS_NTIME(tmout)) enqueue_data:
{ /*return __enqueue_pending_write(dev, len, urem, iov, iovcnt, index, tmout, wrctx, dstaddr);*/
mio_tmrjob_t tmrjob;
MIO_MEMSET (&tmrjob, 0, MIO_SIZEOF(tmrjob));
tmrjob.ctx = q;
mio_gettime (mio, &tmrjob.when);
MIO_ADD_NTIME (&tmrjob.when, &tmrjob.when, tmout);
tmrjob.handler = on_write_timeout;
tmrjob.idxptr = &q->tmridx;
q->tmridx = mio_instmrjob(mio, &tmrjob);
if (q->tmridx == MIO_TMRIDX_INVALID)
{
mio_freemem (mio, q);
return -1;
}
}
MIO_WQ_ENQ (&dev->wq, q);
if (!(dev->dev_cap & MIO_DEV_CAP_OUT_WATCHED))
{
/* if output is not being watched, arrange to do so */
if (mio_dev_watch(dev, MIO_DEV_WATCH_RENEW, MIO_DEV_EVENT_IN) <= -1)
{
unlink_wq (mio, q);
mio_freemem (mio, q);
return -1;
}
}
return 0; /* request pused to a write queue. */
enqueue_completed_write: enqueue_completed_write:
/* queue the remaining data*/ /*return __enqueue_completed_write(dev, len, wrctx, dstaddr);*/
cwq_extra_aligned = (dstaddr? dstaddr->len: 0);
cwq_extra_aligned = MIO_ALIGN_POW2(cwq_extra_aligned, MIO_CWQFL_ALIGN);
cwqfl_index = cwq_extra_aligned / MIO_CWQFL_SIZE;
if (cwqfl_index < MIO_COUNTOF(mio->cwqfl) && mio->cwqfl[cwqfl_index])
{
/* take an available cwq object from the free cwq list */
cwq = dev->mio->cwqfl[cwqfl_index];
dev->mio->cwqfl[cwqfl_index] = cwq->q_next;
}
else
{
cwq = (mio_cwq_t*)mio_allocmem(mio, MIO_SIZEOF(*cwq) + cwq_extra_aligned);
if (!cwq) return -1;
}
MIO_MEMSET (cwq, 0, MIO_SIZEOF(*cwq));
cwq->dev = dev;
cwq->ctx = wrctx;
if (dstaddr)
{
cwq->dstaddr.ptr = (mio_uint8_t*)(cwq + 1);
cwq->dstaddr.len = dstaddr->len;
MIO_MEMCPY (cwq->dstaddr.ptr, dstaddr->ptr, dstaddr->len);
}
else
{
cwq->dstaddr.len = 0;
}
cwq->olen = len;
MIO_CWQ_ENQ (&dev->mio->cwq, cwq);
dev->cw_count++; /* increment the number of complete write operations */
return 0; return 0;
} }
int mio_dev_write (mio_dev_t* dev, const void* data, mio_iolen_t len, void* wrctx, const mio_devaddr_t* dstaddr) int mio_dev_write (mio_dev_t* dev, const void* data, mio_iolen_t len, void* wrctx, const mio_devaddr_t* dstaddr)
{ {
return __dev_write(dev, data, len, MIO_NULL, wrctx, dstaddr); return __dev_write(dev, data, len, MIO_NULL, wrctx, dstaddr);