Commit 05a68698 authored by Dridi Boukelmoune's avatar Dridi Boukelmoune

http2_session: Implement transport polling

The error check is not performed in a critical section to avoid
contention, at the risk of not seeing the error until the next
transport poll.
parent 6ad7be6f
......@@ -441,6 +441,16 @@ h2_new_session(struct worker *wrk, void *arg)
wrk->vsl = NULL;
}
static int v_matchproto_(vtr_poll_f)
h2_poll(struct req *req)
{
struct h2_req *r2;
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
CAST_OBJ_NOTNULL(r2, req->transport_priv, H2_REQ_MAGIC);
return (r2->error ? -1 : 1);
}
struct transport HTTP2_transport = {
.name = "HTTP/2",
.magic = TRANSPORT_MAGIC,
......@@ -450,4 +460,5 @@ struct transport HTTP2_transport = {
.req_body = h2_req_body,
.req_fail = h2_req_fail,
.sess_panic = h2_sess_panic,
.poll = h2_poll,
};
......@@ -5,6 +5,9 @@ barrier b2 sock 3
barrier b3 sock 3
barrier b4 sock 3
barrier b2_err cond 2
barrier b3_err cond 2
server s1 {
rxreq
txresp -bodylen 66300
......@@ -64,6 +67,13 @@ client c1 {
stream 0 -wait
} -run
varnish v1 -vsl_catchup
logexpect l2 -v v1 -g raw {
expect * * ReqMethod GET
expect * = VCL_call DELIVER
} -start
client c2 {
stream 0 {
barrier b2 sync
......@@ -71,6 +81,7 @@ client c2 {
stream 1 {
txreq -hdr barrier ${b2_sock}
barrier b2_err sync
txdata -data "fail"
rxrst
expect rst.err == STREAM_CLOSED
......@@ -78,7 +89,17 @@ client c2 {
} -run
stream 0 -wait
} -run
} -start
logexpect l2 -wait
barrier b2_err sync
client c2 -wait
logexpect l3 -v v1 -g raw {
expect * * ReqMethod POST
expect * = VCL_call DELIVER
} -start
client c3 {
stream 0 {
......@@ -96,6 +117,7 @@ client c3 {
stream 1 {
txreq -req "POST" -hdr barrier ${b3_sock} -nostrend
txdata -data "ok"
barrier b3_err sync
txdata -data "fail"
rxrst
expect rst.err == STREAM_CLOSED
......@@ -117,4 +139,9 @@ client c3 {
} -run
stream 0 -wait
} -run
} -start
logexpect l3 -wait
barrier b3_err sync
client c3 -wait
varnishtest "h2 reset interrupt"
barrier b1 sock 2
barrier b2 sock 2
varnish v1 -cliok "param.set feature +http2"
varnish v1 -cliok "param.set debug +syncvsl"
varnish v1 -vcl {
import vtc;
backend be none;
sub vcl_recv {
vtc.barrier_sync("${b1_sock}");
vtc.barrier_sync("${b2_sock}");
}
sub vcl_miss {
vtc.panic("unreachable");
}
} -start
logexpect l1 -v v1 -g raw -i Debug {
expect * * Debug "^H2RXF RST_STREAM"
} -start
client c1 {
stream 1 {
txreq
barrier b1 sync
txrst
} -run
} -start
logexpect l1 -wait
barrier b2 sync
varnish v1 -vsl_catchup
varnish v1 -expect req_reset == 1
# NB: The varnishncsa command below shows a minimal pattern to collect
# "rapid reset" suspects per session, with the IP address. Here rapid
# is interpreted as before a second elapsed. Session VXIDs showing up
# numerous times become increasingly more suspicious. The format can of
# course be extended to add anything else useful for data mining.
shell -expect "1000 ${localhost}" {
varnishncsa -n ${v1_name} -d \
-q 'Timestamp:Reset[2] < 1.0' -F '%{VSL:Begin[2]}x %h'
}
......@@ -77,7 +77,8 @@ Restart
Client request is being restarted.
Reset
The client closed its connection or reset its stream. Request
The client closed its connection, reset its stream or caused
a stream error that forced Varnish to reset the stream. Request
processing is interrupted and considered failed.
Pipe handling timestamps
......
......@@ -350,7 +350,9 @@
:oneliner: Requests reset
Number of times a client left before the VCL processing of its
requests completed.
requests completed. For HTTP/2 sessions, either the stream was
reset by an RST_STREAM frame from the client, or a stream or
connection error occurred.
.. varnish_vsc:: n_object
:type: gauge
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment