hip/ctx.c

849 lines
20 KiB
C

#include "hip-prv.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#if defined(__NetBSD__)
#include <sys/event.h>
#include <fcntl.h>
#else
#include <sys/epoll.h>
#include <fcntl.h>
#include <valgrind/valgrind.h>
#endif
/* ---------------------------------------------------- */
#if (MKCTX_NARGS <= 1)
static void invoke_uf(unsigned int a)
#else
static void invoke_uf(unsigned int a, unsigned int b)
#endif
{
hip_t* hip;
hip_uctx_t* uctx;
#if (MKCTX_NARGS <= 1)
uctx = (hip_uctx_t*)(hip_oow_t)a;
#else
uctx = (hip_uctx_t*)(((hip_oow_t)a << 32) | (hip_oow_t)b);
#endif
//printf ("invoke_uf START ...%p\n", uctx);
uctx->uf(uctx, uctx->ctx);
//printf ("invoke_uf DONE ...%p\n", uctx);
//TODO: This part must not be interrupted by the timer handler???
hip = uctx->hip;
if (uctx == hip->uctx_sched) /* if not the scheduler itself */
{
/* back to where the schedule got activated for the first time */
setcontext(&hip->uc_main);
}
else if (uctx == hip->uctx_mux)
{
/* TDOO: is this correct? */
setcontext(&hip->uctx_sched->uc);
}
else
{
/* switch to the scheduler */
assert (&uctx->uctx == hip->running);
/* place the routine in the terminated routines list and switch to the scheduler */
hip_switch(hip, &hip->terminated);
/* the control must never reach here for a terminated routine
* as the scheduler never reschdule to this function */
}
//TODO: This part must not be interrupted?
// TODO: schedule the runnable process?
}
hip_uctx_t* hip_uctx_open(hip_t* hip, hip_oow_t stack_size, int flags, hip_ufun_t uf, void* ctx)
{
hip_uctx_t* uctx;
void* sp;
uctx = (hip_uctx_t*)malloc(HIP_SIZEOF(*uctx) + stack_size);
if (HIP_UNLIKELY(!uctx)) return HIP_NULL;
//printf ("MALLOC UCTX %p\n", uctx);
sp = (void*)(uctx + 1);
memset(uctx, 0, HIP_SIZEOF(*uctx) + stack_size);
uctx->hip = hip;
uctx->flags = flags;
uctx->uf = uf;
uctx->ctx = ctx;
uctx->waiting_fd = HIP_INVALID_FD;
getcontext(&uctx->uc);
if (uf)
{
sigemptyset (&uctx->uc.uc_sigmask);
// sigaddset (&uctx->uc.uc_sigmask, SIGRTMIN); /* block SIGRTMIN. TODO: take this value from the outer scheduler object? */
uctx->stid = VALGRIND_STACK_REGISTER(sp, (hip_uint8_t*)sp + stack_size);
uctx->uc.uc_stack.ss_sp = sp;
uctx->uc.uc_stack.ss_size = stack_size;
uctx->uc.uc_stack.ss_flags = 0;
uctx->uc.uc_link = HIP_NULL;
#if (MKCTX_NARGS <= 1)
makecontext(&uctx->uc, (void(*)(void))invoke_uf, 1, uctx);
#else
makecontext(&uctx->uc, (void(*)(void))invoke_uf, 2, (unsigned int)((hip_oow_t)uctx >> 32), (unsigned int)((hip_oow_t)uctx & 0xFFFFFFFFu));
#endif
}
//printf("NEW UCTX %p\n", uctx);
return uctx;
}
void hip_uctx_close(hip_uctx_t* uctx)
{
//printf ("HIP_UCTX_CLOSE %p\n", uctx);
if (uctx->uf) VALGRIND_STACK_DEREGISTER(uctx->stid);
//printf("FREEING UCTX %p\n", uctx);
free(uctx);
//printf("FREEED UCTX %p\n", uctx);
}
void hip_uctx_swap(hip_uctx_t* uc, hip_uctx_t* new_uc)
{
uc->cso++;
new_uc->csi++;
swapcontext(&uc->uc, &new_uc->uc);
}
/* ---------------------------------------------------- */
static hip_nsecdur_t monotime(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (hip_nsecdur_t)ts.tv_sec * 1000000000 + ts.tv_nsec;
}
/* ---------------------------------------------------- */
static hip_uint8_t signal_stack[8012];
static hip_t* g_hip;
static void on_timer_signal(int sig, siginfo_t* si, void* ctx)
{
/* inform all running threads that time slice expired */
//pthread_kill(....);
#if 0
hip_uctx_t* uctx;
uctx = UCTX_FROM_LINK(g_hip->uctx_cur);
swapcontext(&uctx->uc, &g_hip->uctx_sched->uc);
#endif
}
static void schedule(hip_uctx_t* uctx, void *ctx)
{
hip_t* hip;
hip = uctx->hip;
while(1)
{
hip_uctx_link_t* h;
hip_uctx_t* u;
while (!HIP_LIST_IS_EMPTY(&hip->terminated))
{
// clean up terminated routines
h = HIP_LIST_HEAD(&hip->terminated);
HIP_LIST_UNCHAIN(h);
u = UCTX_FROM_LINK(h);
hip_uctx_close(u);
}
if (HIP_LIST_IS_EMPTY(&hip->runnables))
{
/* TODO: check if there are sleeping ... */
// printf ("<scheduler> NO TASK\n");
swapcontext(&hip->uctx_sched->uc, &hip->uctx_mux->uc);
if (HIP_LIST_IS_EMPTY(&hip->runnables)) break; /* no task? */
}
h = HIP_LIST_HEAD(&hip->runnables);
HIP_LIST_UNCHAIN(h);
hip->running = h;
u = UCTX_FROM_LINK(h);
/* TODO: extract these lines to a macro or something*/
hip->uctx_sched->cso++;
u->csi++;
swapcontext(&hip->uctx_sched->uc, &u->uc);
hip->running = HIP_NULL;
}
}
static void multiplex(hip_uctx_t* uctx, void *ctx)
{
/* this is a builtin task to handle multiplexing and sleep */
hip_t* hip;
hip_uctx_link_t* l;
hip = uctx->hip;
while (1)
{
//printf ("sleeping empty[%d] io_waiting empty[%d]\n", HIP_LIST_IS_EMPTY(&hip->sleeping), HIP_LIST_IS_EMPTY(&hip->io_waiting));
if (HIP_LIST_IS_EMPTY(&hip->sleeping) && HIP_LIST_IS_EMPTY(&hip->io_waiting))
{
// STILL NEED TO HANDLE IO FILE DESCRIPTORS...
/* go back to the scheduler */
//printf ("NO SLEEPING. BACK TO SCEDULER\n");
swapcontext(&hip->uctx_mux->uc, &hip->uctx_sched->uc);
}
else
{
hip_nsecdur_t now;
hip_nsecdur_t wait;
struct timespec tmout;
struct epoll_event ee[100]; /* TODO: hold it in hip_t struct... sizing must be handled in the main struct.. */
int n;
hip_uctx_t* u;
l = HIP_LIST_HEAD(&hip->sleeping);
u = UCTX_FROM_LINK(l);
now = monotime();
wait = now >= u->wakeup_time? 0: u->wakeup_time - now;
/* TODO: the io_waiting process that is time-bound must be catered for */
/* TODO:
* lazy removal of unneeded context here???
* epoll_ctl(DELETE here
*/
// lazy deletion of file descriptors
while (!HIP_LIST_IS_EMPTY(&hip->io_done))
{
l = HIP_LIST_HEAD(&hip->io_done);
u = UCTX_FROM_LINK(l);
HIP_LIST_UNCHAIN(l);
epoll_ctl(hip->mux_id, EPOLL_CTL_DEL, u->waiting_fd, HIP_NULL);
}
//printf ("WAITING %llu\n", (unsigned long long)wait);
/* TODO: support different io multiplexers...*/
#if defined(HAVE_EPOLL_PWAIT2)
tmout.tv_sec = wait / 1000000000;
tmout.tv_nsec = wait % 1000000000;
n = epoll_pwait2(hip->mux_id, ee, 100, &tmout, HIP_NULL);
#else
/* epoll_pwait()'s timeout is at the resolution of a millisecond.
* add 1 millisecond as long as there is a fraction of nanoseconds
* less than 1 millisecond.
* ceil(wait / 1000000.0)
* (wait / 1000000) + !!(wait % 1000000)
* (wait + 1000000 - 1) / 1000000
*/
n = epoll_pwait(hip->mux_id, ee, 100, (wait + 1000000 - 1) / 1000000, HIP_NULL);
#endif
//printf ("Epoll returned [%d]\n", (int)n);
if (n <= -1)
{
/* TODO: some error handling action is required */
/* scheduler panic? */
}
if (n > 0)
{
do
{
l = ee[--n].data.ptr;
u = UCTX_FROM_LINK(l);
HIP_LIST_UNCHAIN(l); /* unchain it from io_waiting */
HIP_LIST_ADD_BACK(l, &hip->runnables);
if (hip->flags & HIP_FLAG_LAZY)
{
/* arrange to remove the file descriptor from
* the multiplexer just before actual waiting.
* add this to the io_done list for clean-up
* before waiting and keep waiting_fd untouched
* for adjustment later. */
HIP_LIST_ADD_BACK(&u->io_done, &hip->io_done);
}
else
{
//printf ("PWAIT link MULTIPLEX DEL %p fd[%d] io_waiting empty[%d]\n", l, u->waiting_fd, HIP_LIST_IS_EMPTY(&hip->io_waiting));
epoll_ctl(hip->mux_id, EPOLL_CTL_DEL, u->waiting_fd, HIP_NULL);
u->waiting_fd = HIP_INVALID_FD;
}
}
while(n > 0);
}
now = monotime();
while (!HIP_LIST_IS_EMPTY(&hip->sleeping))
{
l = HIP_LIST_HEAD(&hip->sleeping);
u = UCTX_FROM_LINK(l);
if (now < u->wakeup_time) break;
HIP_LIST_UNCHAIN(l);
HIP_LIST_ADD_BACK(l, &hip->runnables);
}
/* go back to the scheduler */
if (!HIP_LIST_IS_EMPTY(&hip->runnables))
{
//printf ("BACK TO SCHEDULER \n");
swapcontext(&hip->uctx_mux->uc, &hip->uctx_sched->uc);
}
}
}
}
hip_t* hip_open(int flags)
{
hip_t* hip;
struct sigaction sa;
struct sigevent se;
stack_t ss;
sigset_t sm, oldsm;
timer_t tid;
hip = (hip_t*)malloc(HIP_SIZEOF(*hip));
if (!hip) return HIP_NULL;
//printf ("MALLOC HIP %p\n", hip);
memset(hip, 0, HIP_SIZEOF(*hip));
hip->flags = flags;
/* initialize to an empty list by making each pointer point to itcaller.*/
HIP_LIST_INIT(&hip->runnables);
HIP_LIST_INIT(&hip->terminated);
HIP_LIST_INIT(&hip->sleeping);
HIP_LIST_INIT(&hip->io_waiting);
HIP_LIST_INIT(&hip->io_done);
HIP_LIST_INIT(&hip->suspended);
sigemptyset(&sm);
sigaddset(&sm, SIGRTMIN);
sigprocmask(SIG_BLOCK, &sm, &oldsm);
memset(&sa, 0, HIP_SIZEOF(sa));
sa.sa_flags = SA_SIGINFO /*| SA_RESTART*/;
sa.sa_sigaction = on_timer_signal;
sigemptyset(&sa.sa_mask);
sigaction(SIGRTMIN, &sa, &hip->oldsa);
ss.ss_sp = signal_stack; /* TODO: allocate this dynamically */
ss.ss_size = HIP_SIZEOF(signal_stack);
ss.ss_flags = 0;
sigaltstack(&ss, HIP_NULL);
memset(&se, 0, HIP_SIZEOF(se));
se.sigev_notify = SIGEV_SIGNAL;
se.sigev_signo = SIGRTMIN;
/*se.sigev_value.sival_ptr = hip; TOOD: possible to use sigev_notify_function and use this field?
* what about sigwaitinfo() or sigtimedwait() in a dedicated thread?
* */
timer_create(CLOCK_MONOTONIC, &se, &tid);
hip->tmr_id = tid;
sigprocmask(SIG_SETMASK, &oldsm, HIP_NULL);
#if defined(__NetBSD__)
hip->mux_id = kqueue1(O_CLOEXEC);
#else
hip->mux_id = epoll_create1(EPOLL_CLOEXEC);
#endif
if (hip->mux_id <= -1)
{
timer_delete(tid);
//printf("FREEING HIP %p\n", hip);
free(hip);
return HIP_NULL;
}
g_hip = hip; /* TODO: avoid using a global hip */
return hip;
}
void hip_close(hip_t* hip)
{
hip_uctx_link_t* l;
hip_uctx_t* uctx;
sigaction(SIGRTMIN, &hip->oldsa, NULL);
close(hip->mux_id);
timer_delete(hip->tmr_id);
while (!HIP_LIST_IS_EMPTY(&hip->terminated))
{
l = HIP_LIST_HEAD(&hip->terminated);
HIP_LIST_UNCHAIN(l);
uctx = UCTX_FROM_LINK(l);
hip_uctx_close(uctx);
}
while (!HIP_LIST_IS_EMPTY(&hip->runnables))
{
l = HIP_LIST_HEAD(&hip->runnables);
HIP_LIST_UNCHAIN(l);
uctx = UCTX_FROM_LINK(l);
hip_uctx_close(uctx);
}
/* TODO: minimize repeated code in the same pattern */
/* TODO: or is it better to maintain a list of all contexts? */
while (!HIP_LIST_IS_EMPTY(&hip->sleeping))
{
l = HIP_LIST_HEAD(&hip->sleeping);
HIP_LIST_UNCHAIN(l);
uctx = UCTX_FROM_LINK(l);
hip_uctx_close(uctx);
}
while (!HIP_LIST_IS_EMPTY(&hip->io_waiting))
{
l = HIP_LIST_HEAD(&hip->io_waiting);
HIP_LIST_UNCHAIN(l);
uctx = UCTX_FROM_LINK(l);
hip_uctx_close(uctx);
}
while (!HIP_LIST_IS_EMPTY(&hip->io_done))
{
l = HIP_LIST_HEAD(&hip->io_done);
HIP_LIST_UNCHAIN(l);
uctx = UCTX_FROM_LINK(l);
hip_uctx_close(uctx);
}
while (!HIP_LIST_IS_EMPTY(&hip->suspended))
{
l = HIP_LIST_HEAD(&hip->suspended);
HIP_LIST_UNCHAIN(l);
uctx = UCTX_FROM_LINK(l);
hip_uctx_close(uctx);
}
if (hip->uctx_mux) hip_uctx_close(hip->uctx_mux);
if (hip->uctx_sched) hip_uctx_close(hip->uctx_sched);
//printf("FREEING HIP2 %p\n", hip);
free(hip);
}
hip_uctx_t* hip_newrtn(hip_t* hip, int flags, hip_ufun_t uf, void* ctx)
{
hip_uctx_t* uctx;
uctx = hip_uctx_open(hip, 65535, flags, uf, ctx); /* TODO: stack size */
if (!uctx) return HIP_NULL;
/* append to the list */
HIP_LIST_ADD_BACK(&uctx->uctx, &hip->runnables);
return uctx;
}
int hip_schedule(hip_t* hip, int preempt)
{
hip->uctx_sched = hip_uctx_open(hip, 40960, 0, schedule, hip);
if (!hip->uctx_sched) return -1;
hip->uctx_mux = hip_uctx_open(hip, 40960, 0, multiplex, hip);
if (!hip->uctx_mux)
{
hip_uctx_close(hip->uctx_sched);
hip->uctx_sched = HIP_NULL;
return -1;
}
/* fire the schduler tick for preemptive scheduling */
if (preempt)
{
/* start the timer tick */
struct itimerspec its;
memset(&its, 0, HIP_SIZEOF(its));
its.it_interval.tv_sec = 0;
//its.it_interval.tv_nsec = 10000000; /* 10 milliseconds */
its.it_interval.tv_nsec = 100000000; /* 100 milliseconds */
its.it_value = its.it_interval;
timer_settime(hip->tmr_id, 0, &its, NULL);
}
/* jump to the scheduler */
swapcontext(&hip->uc_main, &hip->uctx_sched->uc);
if (preempt)
{
/* stop the timer tick */
struct itimerspec its;
memset(&its, 0, HIP_SIZEOF(its));
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = 0;
its.it_value = its.it_interval;
timer_settime(hip->tmr_id, 0, &its, NULL);
}
return 0;
}
void hip_switch(hip_t* hip, hip_uctx_link_t* list_to_deposit)
{
hip_uctx_t* caller;
assert (hip->running != HIP_NULL);
/* switch from the running context to the scheduler context */
HIP_LIST_ADD_BACK(hip->running, list_to_deposit);
caller = UCTX_FROM_LINK(hip->running);
/* TODO: extract these lines to a macro or something */
caller->cso++;
hip->uctx_sched->csi++;
swapcontext(&caller->uc, &hip->uctx_sched->uc);
}
void hip_yield(hip_t* hip)
{
hip_switch(hip, &hip->runnables);
}
void hip_suspend(hip_t* hip)
{
/* TODO: this isn't needed as of now. delete it if it's not really needed eventually */
hip_switch(hip, &hip->suspended);
}
void hip_sleep(hip_t* hip, hip_nsecdur_t nsecs)
{
hip_uctx_t* caller;
assert (hip->running != HIP_NULL);
caller = UCTX_FROM_LINK(hip->running);
caller->wakeup_time = monotime() + nsecs;
/* TODO: switch to HEAP. for now simply linear search to keep this list sorted. */
if (!HIP_LIST_IS_EMPTY(&hip->sleeping))
{
hip_uctx_link_t* l;
hip_uctx_t* r;
for (l = HIP_LIST_HEAD(&hip->sleeping); l != &hip->sleeping; l = l->next)
{
r = UCTX_FROM_LINK(l);
if (caller->wakeup_time <= r->wakeup_time)
{
HIP_LIST_CHAIN(hip->running, l->prev, l);
goto do_sched;
}
}
}
/* place the running rountine at the end of the sleeping routines list */
HIP_LIST_ADD_BACK(hip->running, &hip->sleeping);
do_sched:
hip->running = HIP_NULL;
swapcontext(&caller->uc, &hip->uctx_sched->uc); /* switch to the scheduler */
}
void hip_awaitio(hip_t* hip, int fd, int flags)
{
struct epoll_event ev;
hip_uctx_t* rr;
/* if waiting on multiple items are supported, the following code requires a whole rewrite */
rr = UCTX_FROM_LINK(hip->running);
if (rr->waiting_fd != HIP_INVALID_FD)
{
HIP_LIST_UNCHAIN(&rr->io_done);
if (rr->waiting_fd == fd)
{
/* update */
memset(&ev, 0, HIP_SIZEOF(ev));
if (flags & HIP_IO_READ) ev.events |= EPOLLIN;
if (flags & HIP_IO_WRITE) ev.events |= EPOLLOUT;
/* - the event mask could have chagned.
* - the waiting context could have changed. */
/* TODO: more optimization based on above */
ev.data.ptr = hip->running; /* store running context link */
//printf ("PWAIT link UPDATE %p fd[%d]\n", ev.data.ptr, fd);
epoll_ctl(hip->mux_id, EPOLL_CTL_MOD, fd, &ev);
}
else
{
/* delete and add */
//printf ("PWAIT link DELETE fd[%d]\n", fd);
epoll_ctl(hip->mux_id, EPOLL_CTL_DEL, fd, HIP_NULL);
goto add;
}
}
else
{
add:
memset(&ev, 0, HIP_SIZEOF(ev));
if (flags & HIP_IO_READ) ev.events |= EPOLLIN;
if (flags & HIP_IO_WRITE) ev.events |= EPOLLOUT;
ev.data.ptr = hip->running; /* store running context link */
//printf ("PWAIT link ADD %p fd[%d]\n", ev.data.ptr, fd);
epoll_ctl(hip->mux_id, EPOLL_CTL_ADD, fd, &ev);
/* TODO: error handling - probably panic? */
}
HIP_LIST_ADD_BACK(hip->running, &hip->io_waiting);
hip->running = HIP_NULL;
rr->waiting_fd = fd; /* remember the file descriptor being waited on */
swapcontext(&rr->uc, &hip->uctx_sched->uc); /* switch to the scheduler */
}
/* ---------------------------------------------------- */
static int fd_set_nonblock(int fd)
{
int flags;
flags = fcntl(fd, F_GETFL);
if (flags <= -1) return -1;
return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
}
static int fd_set_cloexec(int fd)
{
int flags;
flags = fcntl(fd, F_GETFD);
if (flags <= -1) return -1;
return fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
}
static int socket_connect(hip_t* hip, int proto, const struct sockaddr* addr, socklen_t addrlen) /* TODO: timeout? */
{
int fd;
int x;
x = SOCK_STREAM;
#if defined(SOCK_CLOEXEC)
x |= SOCK_CLOEXEC;
#endif
#if defined(SOCK_NONBLOCK)
x |= SOCK_NONBLOCK;
#endif
fd = socket(addr->sa_family, x, 0); /* TODO make SOCK_STREAM selectable */
if (fd <= -1) return -1;
#if !defined(SOCK_CLOEXEC)
if (fd_set_cloexec(fd) <= -1)
{
close(fd);
return -1;
}
#endif
#if !defined(SOCK_NONBLOCK)
if (fd_set_nonblock(fd) <= -1)
{
close(fd);
return -1;
}
#endif
x = connect(fd, addr, addrlen);
if (x <= -1)
{
socklen_t sl;
int ss;
if (errno != EINPROGRESS) /* TODO: cater for EINTR? */
{
close(fd);
return -1;
}
hip_awaitio(hip, fd, HIP_IO_WRITE);
sl = HIP_SIZEOF(ss);
if (getsockopt(fd, SOL_SOCKET, SO_ERROR, (void*)&ss, &sl) <= -1)
{
close(fd);
return -1;
}
if (ss != 0)
{
close(fd);
errno = ss;
return -1;
}
}
return fd;
}
static void socket_close(hip_t* hip, int fd)
{
/* TODO: DEL only if fd is still in the multiplexer? */
epoll_ctl(hip->mux_id, EPOLL_CTL_DEL, fd, HIP_NULL); /* NOTE: this is not be necessary */
close(fd);
}
static hip_ooi_t socket_read(hip_t* hip, int fd, void* buf, hip_oow_t len)
{
hip_awaitio(hip, fd, HIP_IO_READ);
return read(fd, buf, len);
}
static hip_ooi_t socket_write(hip_t* hip, int fd, const void* buf, hip_oow_t len)
{
hip_awaitio(hip, fd, HIP_IO_WRITE);
return write(fd, buf, len);
}
/* ---------------------------------------------------- */
static void uf1(hip_uctx_t* uc, void* ctx)
{
hip_chan_t* chan;
int i;
chan = (hip_chan_t*)ctx;
for (i =0; i < 5; i++)
{
printf (">> UF1 TOP [%d]\n", i);
//hip_yield(uc->hip);
hip_sleep(uc->hip, 500000000);
printf (">> UF1 SEC [%d]\n", i);
//hip_yield(uc->hip);
hip_sleep(uc->hip, 1000000000);
}
for (i = 0; i < 10; i++)
{
int n;
char buf[32];
printf (">> UF1 ALMOST DONE. WAITING FOR UF3\n");
n = hip_chan_recv(chan, buf, sizeof(buf));
printf (">> UF1 RECEIVED [%.*s]\n", (int)n, buf);
n = hip_chan_send(chan, "holy cow", 8);
printf (">> UF1 SENT [%d]\n", (int)n);
}
}
static void uf2(hip_uctx_t* uc, void* ctx)
{
int i;
for (i =0; i < 10; i++)
{
printf (">> UF2 TOP [%d]\n", i);
//hip_yield(uc->hip);
hip_sleep(uc->hip, 1000000000);
printf (">> UF2 SEC [%d]\n", i);
//hip_yield(uc->hip);
hip_sleep(uc->hip, 500000000);
}
}
static void uf3(hip_uctx_t* uc, void* ctx)
{
hip_t* hip;
hip_chan_t* chan;
struct sockaddr_in sa;
int i, fd;
hip = uc->hip;
chan = (hip_chan_t*)ctx;
#if 1
memset(&sa, 0, HIP_SIZEOF(sa));
sa.sin_family = AF_INET;
sa.sin_addr.s_addr = inet_addr("142.250.206.196");
sa.sin_port = htons(80);
fd = socket_connect(hip, SOCK_STREAM, (struct sockaddr*)&sa, HIP_SIZEOF(sa));
printf ("*********CONNECTED>>>>>>>>>>>>>>>\n");
{
int n;
char buf[256];
// if the server side doesn't close the connection,
// the following loop will never end as this client side doesn't actively
// close the connection after having read the response chunk. it's not http aware...
// use 'Connection: close' for now
//const char x[] = "GET / HTTP/1.1\r\nHost: www.google.com\r\n\r\n";
const char x[] = "GET / HTTP/1.1\r\nHost: www.google.com\r\nConnection: close\r\n\r\n";
socket_write(hip, fd, x, HIP_SIZEOF(x) - 1);
do
{
n = socket_read(hip, fd, buf, HIP_SIZEOF(buf));
if (n <= 0) break;
printf("GOT [%.*s]\n", n, buf);
}
while(1);
}
socket_close(hip, fd);
printf ("*********DISCONNECTED>>>>>>>>>>>>>>>\n");
#endif
for (i = 0; i < 15; i++)
{
printf (">> UF3 TOP [%d]\n", i);
//hip_yield(uc->hip);
hip_sleep(uc->hip, 1000000000);
printf (">> UF3 SEC [%d]\n", i);
//hip_yield(uc->hip);
hip_sleep(uc->hip, 1000000000);
if (i == 8) hip_newrtn(uc->hip, 0, uf2, HIP_NULL);
}
printf (">> UF3 DONE DONE\n");
for (i = 0; i < 10; i++)
{
int n;
char buf[64];
n = hip_chan_send(chan, "hello", 5);
printf (">> UF3 SENT %d\n", (int)n);
n = hip_chan_recv(chan, buf, sizeof(buf));
printf (">> UF3 RECV [%.*s]\n", (int)n, buf);
}
}
int main()
{
hip_t* hip;
hip_chan_t* chan;
setbuf(stdout, NULL);
hip = hip_open(HIP_FLAG_LAZY);
chan = hip_chan_open(hip, 100, 1);
hip_newrtn(hip, 0, uf1, chan);
hip_newrtn(hip, 0, uf2, HIP_NULL);
hip_newrtn(hip, 0, uf3, chan);
/* jump to the scheduler */
hip_schedule(hip, 0);
printf("END OF SCHEDULE\n");
hip_chan_close(chan);
/* TODO: close all uctxes ? */
hip_close(hip);
printf("END OF APP\n");
return 0;
}