Commit 688ccfce authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

Rename struct vtp to pfd, it will become non-TCP specific.

parent a125b10d
......@@ -73,11 +73,11 @@ static struct lock backends_mtx;
* Get a connection to the backend
*/
static struct vtp *
static struct pfd *
vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo,
unsigned force_fresh)
{
struct vtp *vtp;
struct pfd *pfd;
double tmod;
char abuf1[VTCP_ADDRBUFSIZE], abuf2[VTCP_ADDRBUFSIZE];
char pbuf1[VTCP_PORTBUFSIZE], pbuf2[VTCP_PORTBUFSIZE];
......@@ -113,8 +113,8 @@ vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo,
bo->htc->doclose = SC_NULL;
FIND_TMO(connect_timeout, tmod, bo, bp);
vtp = VTP_Get(bp->tcp_pool, tmod, wrk, force_fresh);
if (vtp == NULL) {
pfd = VTP_Get(bp->tcp_pool, tmod, wrk, force_fresh);
if (pfd == NULL) {
VSLb(bo->vsl, SLT_FetchError,
"backend %s: fail", bp->director->display_name);
// XXX: Per backend stats ?
......@@ -123,8 +123,8 @@ vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo,
return (NULL);
}
assert(vtp->fd >= 0);
AN(vtp->addr);
assert(pfd->fd >= 0);
AN(pfd->priv);
Lck_Lock(&bp->mtx);
bp->n_conn++;
......@@ -133,21 +133,21 @@ vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo,
Lck_Unlock(&bp->mtx);
if (bp->proxy_header != 0)
VPX_Send_Proxy(vtp->fd, bp->proxy_header, bo->sp);
VPX_Send_Proxy(pfd->fd, bp->proxy_header, bo->sp);
VTCP_myname(vtp->fd, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1);
VTCP_hisname(vtp->fd, abuf2, sizeof abuf2, pbuf2, sizeof pbuf2);
VTCP_myname(pfd->fd, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1);
VTCP_hisname(pfd->fd, abuf2, sizeof abuf2, pbuf2, sizeof pbuf2);
VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s %s %s",
vtp->fd, bp->director->display_name, abuf2, pbuf2, abuf1, pbuf1);
pfd->fd, bp->director->display_name, abuf2, pbuf2, abuf1, pbuf1);
INIT_OBJ(bo->htc, HTTP_CONN_MAGIC);
bo->htc->priv = vtp;
bo->htc->rfd = &vtp->fd;
bo->htc->priv = pfd;
bo->htc->rfd = &pfd->fd;
FIND_TMO(first_byte_timeout,
bo->htc->first_byte_timeout, bo, bp);
FIND_TMO(between_bytes_timeout,
bo->htc->between_bytes_timeout, bo, bp);
return (vtp);
return (pfd);
}
static unsigned v_matchproto_(vdi_healthy_f)
......@@ -167,7 +167,7 @@ vbe_dir_finish(const struct director *d, struct worker *wrk,
struct busyobj *bo)
{
struct backend *bp;
struct vtp *vtp;
struct pfd *pfd;
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
......@@ -175,24 +175,24 @@ vbe_dir_finish(const struct director *d, struct worker *wrk,
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC);
CAST_OBJ_NOTNULL(vtp, bo->htc->priv, VTP_MAGIC);
CAST_OBJ_NOTNULL(pfd, bo->htc->priv, PFD_MAGIC);
bo->htc->priv = NULL;
if (vtp->state != VTP_STATE_USED)
if (pfd->state != PFD_STATE_USED)
assert(bo->htc->doclose == SC_TX_PIPE ||
bo->htc->doclose == SC_RX_TIMEOUT);
if (bo->htc->doclose != SC_NULL || bp->proxy_header != 0) {
VSLb(bo->vsl, SLT_BackendClose, "%d %s", vtp->fd,
VSLb(bo->vsl, SLT_BackendClose, "%d %s", pfd->fd,
bp->director->display_name);
VTP_Close(&vtp);
AZ(vtp);
VTP_Close(&pfd);
AZ(pfd);
Lck_Lock(&bp->mtx);
} else {
assert (vtp->state == VTP_STATE_USED);
VSLb(bo->vsl, SLT_BackendReuse, "%d %s", vtp->fd,
assert (pfd->state == PFD_STATE_USED);
VSLb(bo->vsl, SLT_BackendReuse, "%d %s", pfd->fd,
bp->director->display_name);
Lck_Lock(&bp->mtx);
VSC_C_main->backend_recycle++;
VTP_Recycle(wrk, &vtp);
VTP_Recycle(wrk, &pfd);
}
assert(bp->n_conn > 0);
bp->n_conn--;
......@@ -210,7 +210,7 @@ vbe_dir_gethdrs(const struct director *d, struct worker *wrk,
{
int i, extrachance = 1;
struct backend *bp;
struct vtp *vtp;
struct pfd *pfd;
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
......@@ -226,18 +226,18 @@ vbe_dir_gethdrs(const struct director *d, struct worker *wrk,
http_PrintfHeader(bo->bereq, "Host: %s", bp->hosthdr);
do {
vtp = vbe_dir_getfd(wrk, bp, bo, extrachance == 0);
if (vtp == NULL)
pfd = vbe_dir_getfd(wrk, bp, bo, extrachance == 0);
if (pfd == NULL)
return (-1);
AN(bo->htc);
if (vtp->state != VTP_STATE_STOLEN)
if (pfd->state != PFD_STATE_STOLEN)
extrachance = 0;
i = V1F_SendReq(wrk, bo, &bo->acct.bereq_hdrbytes,
&bo->acct.bereq_bodybytes, 0);
if (vtp->state != VTP_STATE_USED) {
if (VTP_Wait(wrk, vtp, VTIM_real() +
if (pfd->state != PFD_STATE_USED) {
if (VTP_Wait(wrk, pfd, VTIM_real() +
bo->htc->first_byte_timeout) != 0) {
bo->htc->doclose = SC_RX_TIMEOUT;
VSLb(bo->vsl, SLT_FetchError,
......@@ -247,7 +247,7 @@ vbe_dir_gethdrs(const struct director *d, struct worker *wrk,
}
if (bo->htc->doclose == SC_NULL) {
assert(vtp->state == VTP_STATE_USED);
assert(pfd->state == PFD_STATE_USED);
if (i == 0)
i = V1F_FetchRespHdr(bo);
if (i == 0) {
......@@ -278,15 +278,15 @@ static const struct suckaddr * v_matchproto_(vdi_getip_f)
vbe_dir_getip(const struct director *d, struct worker *wrk,
struct busyobj *bo)
{
struct vtp *vtp;
struct pfd *pfd;
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC);
CAST_OBJ_NOTNULL(vtp, bo->htc->priv, VTP_MAGIC);
CAST_OBJ_NOTNULL(pfd, bo->htc->priv, PFD_MAGIC);
return (vtp->addr);
return (pfd->priv);
}
/*--------------------------------------------------------------------*/
......@@ -298,7 +298,7 @@ vbe_dir_http1pipe(const struct director *d, struct req *req, struct busyobj *bo)
enum sess_close retval;
struct backend *bp;
struct v1p_acct v1a;
struct vtp *vtp;
struct pfd *pfd;
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
......@@ -313,16 +313,16 @@ vbe_dir_http1pipe(const struct director *d, struct req *req, struct busyobj *bo)
req->res_mode = RES_PIPE;
vtp = vbe_dir_getfd(req->wrk, bp, bo, 0);
pfd = vbe_dir_getfd(req->wrk, bp, bo, 0);
if (vtp == NULL) {
if (pfd == NULL) {
retval = SC_TX_ERROR;
} else {
CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC);
i = V1F_SendReq(req->wrk, bo, &v1a.bereq, &v1a.out, 1);
VSLb_ts_req(req, "Pipe", W_TIM_real(req->wrk));
if (i == 0)
V1P_Process(req, vtp->fd, &v1a);
V1P_Process(req, pfd->fd, &v1a);
VSLb_ts_req(req, "PipeSess", W_TIM_real(req->wrk));
bo->htc->doclose = SC_TX_PIPE;
vbe_dir_finish(d, req->wrk, bo);
......
......@@ -277,7 +277,7 @@ vbp_poke(struct vbp_target *vt)
t_start = t_now = VTIM_real();
t_end = t_start + vt->timeout;
s = VTP_Open(vt->tcp_pool, t_end - t_now, &sa);
s = VTP_Open(vt->tcp_pool, t_end - t_now, (const void **)&sa);
if (s < 0) {
/* Got no connection: failed */
return;
......
......@@ -56,10 +56,10 @@ struct tcp_pool {
int refcnt;
struct lock mtx;
VTAILQ_HEAD(, vtp) connlist;
VTAILQ_HEAD(, pfd) connlist;
int n_conn;
VTAILQ_HEAD(, vtp) killlist;
VTAILQ_HEAD(, pfd) killlist;
int n_kill;
int n_used;
......@@ -75,39 +75,39 @@ static VTAILQ_HEAD(, tcp_pool) tcp_pools = VTAILQ_HEAD_INITIALIZER(tcp_pools);
static void v_matchproto_(waiter_handle_f)
tcp_handle(struct waited *w, enum wait_event ev, double now)
{
struct vtp *vtp;
struct pfd *pfd;
struct tcp_pool *tp;
CAST_OBJ_NOTNULL(vtp, w->priv1, VTP_MAGIC);
CAST_OBJ_NOTNULL(pfd, w->priv1, PFD_MAGIC);
(void)ev;
(void)now;
CHECK_OBJ_NOTNULL(vtp->tcp_pool, TCP_POOL_MAGIC);
tp = vtp->tcp_pool;
CHECK_OBJ_NOTNULL(pfd->tcp_pool, TCP_POOL_MAGIC);
tp = pfd->tcp_pool;
Lck_Lock(&tp->mtx);
switch (vtp->state) {
case VTP_STATE_STOLEN:
vtp->state = VTP_STATE_USED;
VTAILQ_REMOVE(&tp->connlist, vtp, list);
AN(vtp->cond);
AZ(pthread_cond_signal(vtp->cond));
switch (pfd->state) {
case PFD_STATE_STOLEN:
pfd->state = PFD_STATE_USED;
VTAILQ_REMOVE(&tp->connlist, pfd, list);
AN(pfd->cond);
AZ(pthread_cond_signal(pfd->cond));
break;
case VTP_STATE_AVAIL:
VTCP_close(&vtp->fd);
VTAILQ_REMOVE(&tp->connlist, vtp, list);
case PFD_STATE_AVAIL:
VTCP_close(&pfd->fd);
VTAILQ_REMOVE(&tp->connlist, pfd, list);
tp->n_conn--;
FREE_OBJ(vtp);
FREE_OBJ(pfd);
break;
case VTP_STATE_CLEANUP:
VTCP_close(&vtp->fd);
case PFD_STATE_CLEANUP:
VTCP_close(&pfd->fd);
tp->n_kill--;
VTAILQ_REMOVE(&tp->killlist, vtp, list);
memset(vtp, 0x11, sizeof *vtp);
free(vtp);
VTAILQ_REMOVE(&tp->killlist, pfd, list);
memset(pfd, 0x11, sizeof *pfd);
free(pfd);
break;
default:
WRONG("Wrong vtp state");
WRONG("Wrong pfd state");
}
Lck_Unlock(&tp->mtx);
}
......@@ -194,7 +194,7 @@ void
VTP_Rel(struct tcp_pool **tpp)
{
struct tcp_pool *tp;
struct vtp *vtp, *vtp2;
struct pfd *pfd, *pfd2;
TAKE_OBJ_NOTNULL(tp, tpp, TCP_POOL_MAGIC);
......@@ -211,13 +211,13 @@ VTP_Rel(struct tcp_pool **tpp)
free(tp->ip4);
free(tp->ip6);
Lck_Lock(&tp->mtx);
VTAILQ_FOREACH_SAFE(vtp, &tp->connlist, list, vtp2) {
VTAILQ_REMOVE(&tp->connlist, vtp, list);
VTAILQ_FOREACH_SAFE(pfd, &tp->connlist, list, pfd2) {
VTAILQ_REMOVE(&tp->connlist, pfd, list);
tp->n_conn--;
assert(vtp->state == VTP_STATE_AVAIL);
vtp->state = VTP_STATE_CLEANUP;
(void)shutdown(vtp->fd, SHUT_WR);
VTAILQ_INSERT_TAIL(&tp->killlist, vtp, list);
assert(pfd->state == PFD_STATE_AVAIL);
pfd->state = PFD_STATE_CLEANUP;
(void)shutdown(pfd->fd, SHUT_WR);
VTAILQ_INSERT_TAIL(&tp->killlist, pfd, list);
tp->n_kill++;
}
while (tp->n_kill) {
......@@ -239,7 +239,7 @@ VTP_Rel(struct tcp_pool **tpp)
*/
int
VTP_Open(const struct tcp_pool *tp, double tmo, const struct suckaddr **sa)
VTP_Open(const struct tcp_pool *tp, double tmo, const void **privp)
{
int s;
int msec;
......@@ -248,17 +248,17 @@ VTP_Open(const struct tcp_pool *tp, double tmo, const struct suckaddr **sa)
msec = (int)floor(tmo * 1000.0);
if (cache_param->prefer_ipv6) {
*sa = tp->ip6;
*privp = tp->ip6;
s = VTCP_connect(tp->ip6, msec);
if (s >= 0)
return (s);
}
*sa = tp->ip4;
*privp = tp->ip4;
s = VTCP_connect(tp->ip4, msec);
if (s >= 0)
return (s);
if (!cache_param->prefer_ipv6) {
*sa = tp->ip6;
*privp = tp->ip6;
s = VTCP_connect(tp->ip6, msec);
}
return (s);
......@@ -269,43 +269,43 @@ VTP_Open(const struct tcp_pool *tp, double tmo, const struct suckaddr **sa)
*/
void
VTP_Recycle(const struct worker *wrk, struct vtp **vtpp)
VTP_Recycle(const struct worker *wrk, struct pfd **pfdp)
{
struct vtp *vtp;
struct pfd *pfd;
struct tcp_pool *tp;
int i = 0;
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
vtp = *vtpp;
*vtpp = NULL;
CHECK_OBJ_NOTNULL(vtp, VTP_MAGIC);
tp = vtp->tcp_pool;
pfd = *pfdp;
*pfdp = NULL;
CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
tp = pfd->tcp_pool;
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
assert(vtp->state == VTP_STATE_USED);
assert(vtp->fd > 0);
assert(pfd->state == PFD_STATE_USED);
assert(pfd->fd > 0);
Lck_Lock(&tp->mtx);
tp->n_used--;
vtp->waited->priv1 = vtp;
vtp->waited->fd = vtp->fd;
vtp->waited->idle = VTIM_real();
vtp->state = VTP_STATE_AVAIL;
vtp->waited->func = tcp_handle;
vtp->waited->tmo = &cache_param->backend_idle_timeout;
if (Wait_Enter(wrk->pool->waiter, vtp->waited)) {
VTCP_close(&vtp->fd);
memset(vtp, 0x33, sizeof *vtp);
free(vtp);
pfd->waited->priv1 = pfd;
pfd->waited->fd = pfd->fd;
pfd->waited->idle = VTIM_real();
pfd->state = PFD_STATE_AVAIL;
pfd->waited->func = tcp_handle;
pfd->waited->tmo = &cache_param->backend_idle_timeout;
if (Wait_Enter(wrk->pool->waiter, pfd->waited)) {
VTCP_close(&pfd->fd);
memset(pfd, 0x33, sizeof *pfd);
free(pfd);
// XXX: stats
vtp = NULL;
pfd = NULL;
} else {
VTAILQ_INSERT_HEAD(&tp->connlist, vtp, list);
VTAILQ_INSERT_HEAD(&tp->connlist, pfd, list);
i++;
}
if (vtp != NULL)
if (pfd != NULL)
tp->n_conn++;
Lck_Unlock(&tp->mtx);
......@@ -314,7 +314,7 @@ VTP_Recycle(const struct worker *wrk, struct vtp **vtpp)
* In varnishtest we do not have the luxury of using
* multiple backend connections, so whenever we end up
* in the "pending" case, take a short nap to let the
* waiter catch up and put the vtp back into circulations.
* waiter catch up and put the pfd back into circulations.
*
* In particular ESI:include related tests suffer random
* failures without this.
......@@ -332,33 +332,33 @@ VTP_Recycle(const struct worker *wrk, struct vtp **vtpp)
*/
void
VTP_Close(struct vtp **vtpp)
VTP_Close(struct pfd **pfdp)
{
struct vtp *vtp;
struct pfd *pfd;
struct tcp_pool *tp;
vtp = *vtpp;
*vtpp = NULL;
CHECK_OBJ_NOTNULL(vtp, VTP_MAGIC);
tp = vtp->tcp_pool;
pfd = *pfdp;
*pfdp = NULL;
CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
tp = pfd->tcp_pool;
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
assert(vtp->fd > 0);
assert(pfd->fd > 0);
Lck_Lock(&tp->mtx);
assert(vtp->state == VTP_STATE_USED || vtp->state == VTP_STATE_STOLEN);
assert(pfd->state == PFD_STATE_USED || pfd->state == PFD_STATE_STOLEN);
tp->n_used--;
if (vtp->state == VTP_STATE_STOLEN) {
(void)shutdown(vtp->fd, SHUT_RDWR);
VTAILQ_REMOVE(&tp->connlist, vtp, list);
vtp->state = VTP_STATE_CLEANUP;
VTAILQ_INSERT_HEAD(&tp->killlist, vtp, list);
if (pfd->state == PFD_STATE_STOLEN) {
(void)shutdown(pfd->fd, SHUT_RDWR);
VTAILQ_REMOVE(&tp->connlist, pfd, list);
pfd->state = PFD_STATE_CLEANUP;
VTAILQ_INSERT_HEAD(&tp->killlist, pfd, list);
tp->n_kill++;
} else {
assert(vtp->state == VTP_STATE_USED);
VTCP_close(&vtp->fd);
memset(vtp, 0x44, sizeof *vtp);
free(vtp);
assert(pfd->state == PFD_STATE_USED);
VTCP_close(&pfd->fd);
memset(pfd, 0x44, sizeof *pfd);
free(pfd);
}
Lck_Unlock(&tp->mtx);
}
......@@ -367,69 +367,69 @@ VTP_Close(struct vtp **vtpp)
* Get a connection
*/
struct vtp *
struct pfd *
VTP_Get(struct tcp_pool *tp, double tmo, struct worker *wrk,
unsigned force_fresh)
{
struct vtp *vtp;
struct pfd *pfd;
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
Lck_Lock(&tp->mtx);
vtp = VTAILQ_FIRST(&tp->connlist);
CHECK_OBJ_ORNULL(vtp, VTP_MAGIC);
if (force_fresh || vtp == NULL || vtp->state == VTP_STATE_STOLEN)
vtp = NULL;
pfd = VTAILQ_FIRST(&tp->connlist);
CHECK_OBJ_ORNULL(pfd, PFD_MAGIC);
if (force_fresh || pfd == NULL || pfd->state == PFD_STATE_STOLEN)
pfd = NULL;
else {
assert(vtp->tcp_pool == tp);
assert(vtp->state == VTP_STATE_AVAIL);
VTAILQ_REMOVE(&tp->connlist, vtp, list);
VTAILQ_INSERT_TAIL(&tp->connlist, vtp, list);
assert(pfd->tcp_pool == tp);
assert(pfd->state == PFD_STATE_AVAIL);
VTAILQ_REMOVE(&tp->connlist, pfd, list);
VTAILQ_INSERT_TAIL(&tp->connlist, pfd, list);
tp->n_conn--;
VSC_C_main->backend_reuse++;
vtp->state = VTP_STATE_STOLEN;
vtp->cond = &wrk->cond;
pfd->state = PFD_STATE_STOLEN;
pfd->cond = &wrk->cond;
}
tp->n_used++; // Opening mostly works
Lck_Unlock(&tp->mtx);
if (vtp != NULL)
return (vtp);
ALLOC_OBJ(vtp, VTP_MAGIC);
AN(vtp);
INIT_OBJ(vtp->waited, WAITED_MAGIC);
vtp->state = VTP_STATE_USED;
vtp->tcp_pool = tp;
vtp->fd = VTP_Open(tp, tmo, &vtp->addr);
if (vtp->fd < 0) {
FREE_OBJ(vtp);
if (pfd != NULL)
return (pfd);
ALLOC_OBJ(pfd, PFD_MAGIC);
AN(pfd);
INIT_OBJ(pfd->waited, WAITED_MAGIC);
pfd->state = PFD_STATE_USED;
pfd->tcp_pool = tp;
pfd->fd = VTP_Open(tp, tmo, &pfd->priv);
if (pfd->fd < 0) {
FREE_OBJ(pfd);
Lck_Lock(&tp->mtx);
tp->n_used--; // Nope, didn't work after all.
Lck_Unlock(&tp->mtx);
} else
VSC_C_main->backend_conn++;
return (vtp);
return (pfd);
}
/*--------------------------------------------------------------------
*/
int
VTP_Wait(struct worker *wrk, struct vtp *vtp, double tmo)
VTP_Wait(struct worker *wrk, struct pfd *pfd, double tmo)
{
struct tcp_pool *tp;
int r;
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
CHECK_OBJ_NOTNULL(vtp, VTP_MAGIC);
tp = vtp->tcp_pool;
CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
tp = pfd->tcp_pool;
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
assert(vtp->cond == &wrk->cond);
assert(pfd->cond == &wrk->cond);
Lck_Lock(&tp->mtx);
while (vtp->state == VTP_STATE_STOLEN) {
while (pfd->state == PFD_STATE_STOLEN) {
r = Lck_CondWait(&wrk->cond, &tp->mtx, tmo);
if (r != 0) {
if (r == EINTR)
......@@ -439,8 +439,8 @@ VTP_Wait(struct worker *wrk, struct vtp *vtp, double tmo)
return (1);
}
}
assert(vtp->state == VTP_STATE_USED);
vtp->cond = NULL;
assert(pfd->state == PFD_STATE_USED);
pfd->cond = NULL;
Lck_Unlock(&tp->mtx);
return (0);
......
......@@ -32,17 +32,17 @@
struct tcp_pool;
struct vtp {
struct pfd {
unsigned magic;
#define VTP_MAGIC 0x0c5e6592
#define PFD_MAGIC 0x0c5e6593
int fd;
VTAILQ_ENTRY(vtp) list;
const struct suckaddr *addr;
VTAILQ_ENTRY(pfd) list;
const void *priv;
uint8_t state;
#define VTP_STATE_AVAIL (1<<0)
#define VTP_STATE_USED (1<<1)
#define VTP_STATE_STOLEN (1<<2)
#define VTP_STATE_CLEANUP (1<<3)
#define PFD_STATE_AVAIL (1<<0)
#define PFD_STATE_USED (1<<1)
#define PFD_STATE_STOLEN (1<<2)
#define PFD_STATE_CLEANUP (1<<3)
struct waited waited[1];
struct tcp_pool *tcp_pool;
......@@ -72,28 +72,28 @@ void VTP_Rel(struct tcp_pool **);
* the pool is destroyed and all cached connections closed.
*/
int VTP_Open(const struct tcp_pool *, double tmo, const struct suckaddr **);
int VTP_Open(const struct tcp_pool *, double tmo, const void **);
/*
* Open a new connection and return the adress used.
*/
void VTP_Close(struct vtp **);
void VTP_Close(struct pfd **);
/*
* Close a connection.
*/
void VTP_Recycle(const struct worker *, struct vtp **);
void VTP_Recycle(const struct worker *, struct pfd **);
/*
* Recycle an open connection.
*/
struct vtp *VTP_Get(struct tcp_pool *, double tmo, struct worker *,
struct pfd *VTP_Get(struct tcp_pool *, double tmo, struct worker *,
unsigned force_fresh);
/*
* Get a (possibly) recycled connection.
*/
int VTP_Wait(struct worker *, struct vtp *, double tmo);
int VTP_Wait(struct worker *, struct pfd *, double tmo);
/*
* If the connection was recycled (state != VTP_STATE_USED) call this
* function before attempting to receive on the connection.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment