Commit a0ad9025 authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

Fix #2923 properly by tracking which requests have been counted

towards our max-session limit, and by adjusting the counts
at frame rx/tx time.

Fixes: #2923

Thanks to: xcir
parent 361070ea
......@@ -119,6 +119,7 @@ struct h2_req {
uint32_t stream;
int scheduled;
enum h2_stream_e state;
int counted;
struct h2_sess *h2sess;
struct req *req;
double t_send;
......@@ -148,7 +149,7 @@ struct h2_sess {
struct sess *sess;
int refcnt;
int pending_kills;
int open_streams;
uint32_t highest_stream;
int bogosity;
int do_sweep;
......
......@@ -197,8 +197,6 @@ h2_del_req(struct worker *wrk, const struct h2_req *r2)
Lck_Lock(&sp->mtx);
assert(h2->refcnt > 0);
--h2->refcnt;
if (h2->pending_kills > 0)
h2->pending_kills--;
/* XXX: PRIORITY reshuffle */
VTAILQ_REMOVE(&h2->streams, r2, list);
Lck_Unlock(&sp->mtx);
......@@ -217,7 +215,11 @@ h2_kill_req(struct worker *wrk, struct h2_sess *h2,
Lck_Lock(&h2->sess->mtx);
VSLb(h2->vsl, SLT_Debug, "KILL st=%u state=%d sched=%d",
r2->stream, r2->state, r2->scheduled);
h2->pending_kills++;
if (r2->counted) {
assert(h2->open_streams > 0);
h2->open_streams--;
r2->counted = 0;
}
if (r2->error == NULL)
r2->error = h2e;
if (r2->scheduled) {
......@@ -638,7 +640,7 @@ h2_rx_headers(struct worker *wrk, struct h2_sess *h2, struct h2_req *r2)
if (r2 == NULL) {
if (h2->rxf_stream <= h2->highest_stream)
return (H2CE_PROTOCOL_ERROR); // rfc7540,l,1153,1158
if (h2->refcnt - h2->pending_kills >
if (h2->open_streams >=
h2->local_settings.max_concurrent_streams) {
VSLb(h2->vsl, SLT_Debug,
"H2: stream %u: Hit maximum number of "
......@@ -647,6 +649,8 @@ h2_rx_headers(struct worker *wrk, struct h2_sess *h2, struct h2_req *r2)
}
h2->highest_stream = h2->rxf_stream;
r2 = h2_new_req(wrk, h2, h2->rxf_stream, NULL);
r2->counted = 1;
h2->open_streams++;
}
CHECK_OBJ_NOTNULL(r2, H2_REQ_MAGIC);
......
......@@ -285,6 +285,15 @@ H2_Send(struct worker *wrk, struct h2_req *r2,
Lck_Lock(&h2->sess->mtx);
mfs = h2->remote_settings.max_frame_size;
if (r2->counted && (
(ftyp == H2_F_HEADERS && (flags & H2FF_HEADERS_END_STREAM)) ||
(ftyp == H2_F_DATA && (flags & H2FF_DATA_END_STREAM)) ||
ftyp == H2_F_RST_STREAM
)) {
assert(h2->open_streams > 0);
h2->open_streams--;
r2->counted = 0;
}
Lck_Unlock(&h2->sess->mtx);
if (ftyp->respect_window) {
......
varnishtest "race cond in max_concurrent_streams when request after receiving RST_STREAM"
barrier b1 sock 6
barrier b2 sock 6
barrier b3 sock 6
barrier b4 sock 6
barrier b1 sock 2
barrier b2 sock 2
barrier b3 sock 2
barrier b4 sock 2
barrier b5 sock 3
server s1 {
rxreq
txresp
rxreq
expect req.url == /nosync
txresp
rxreq
expect req.url == /sync
txresp
} -start
varnish v1 -cliok "param.set feature +http2"
......@@ -15,68 +20,81 @@ varnish v1 -cliok "param.set debug +syncvsl"
varnish v1 -cliok "param.set h2_max_concurrent_streams 3"
varnish v1 -vcl+backend {
import vtc;
import vtc;
sub vcl_recv {
}
sub vcl_recv {
}
sub vcl_backend_fetch {
vtc.barrier_sync("${b1_sock}");
vtc.barrier_sync("${b2_sock}");
vtc.barrier_sync("${b3_sock}");
vtc.barrier_sync("${b4_sock}");
}
sub vcl_backend_fetch {
if(bereq.url ~ "/sync"){
vtc.barrier_sync("${b1_sock}");
vtc.barrier_sync("${b5_sock}");
}
}
} -start
client c1 {
txpri
stream 0 rxsettings -run
stream 1 {
txreq
barrier b1 sync
barrier b2 sync
barrier b3 sync
barrier b4 sync
rxresp
expect resp.status == 200
} -start
stream 3 {
barrier b1 sync
txreq
txrst -err 0x8
barrier b2 sync
barrier b3 sync
barrier b4 sync
} -start
stream 5 {
barrier b1 sync
barrier b2 sync
txreq
txrst -err 0x8
barrier b3 sync
barrier b4 sync
} -start
stream 7 {
barrier b1 sync
barrier b2 sync
barrier b3 sync
txreq
barrier b4 sync
rxresp
expect resp.status == 200
} -start
stream 9 {
barrier b1 sync
barrier b2 sync
barrier b3 sync
barrier b4 sync
txreq
rxresp
expect resp.status == 200
} -start
txpri
stream 0 rxsettings -run
stream 1 {
txreq -url /sync
rxresp
expect resp.status == 200
} -start
stream 3 {
barrier b1 sync
delay .5
# Goes on waiting list
txreq -url /sync
delay .5
txrst -err 0x8
delay .5
barrier b2 sync
} -start
stream 5 {
barrier b2 sync
txreq -url /nosync
delay .5
rxresp
delay .5
expect resp.status == 200
barrier b3 sync
} -start
stream 7 {
barrier b3 sync
# Goes on waiting list
txreq -url /sync
delay .5
txrst -err 0x8
delay .5
barrier b4 sync
} -start
stream 9 {
barrier b4 sync
# Goes on waiting list
txreq -url /sync
delay .5
barrier b5 sync
delay .5
rxresp
delay .5
expect resp.status == 200
} -start
stream 11 {
barrier b5 sync
txreq -url /sync
delay .5
rxresp
delay .5
expect resp.status == 200
} -start
} -run
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment