Commit 507a1019 authored by Martin Blix Grydeland's avatar Martin Blix Grydeland

Add VSL Query/Dispatch structured log messages facility

Move vtree.h from varnishtop to include directory so it's available for anyone.

Simple proof-of-concept single regex against any log line query
implementation.
parent 445ba2bb
......@@ -8,8 +8,7 @@ dist_man_MANS = varnishtop.1
varnishtop_SOURCES = varnishtop.c \
$(top_builddir)/lib/libvarnish/vas.c \
$(top_builddir)/lib/libvarnish/version.c \
vtree.h
$(top_builddir)/lib/libvarnish/version.c
varnishtop_LDADD = \
$(top_builddir)/lib/libvarnishcompat/libvarnishcompat.la \
......
......@@ -57,7 +57,8 @@ nobase_noinst_HEADERS = \
vsub.h \
vss.h \
vtcp.h \
vtim.h
vtim.h \
vtree.h
# Headers for use with vmods
pkgdataincludedir = $(pkgdatadir)/include
......
/*-
* Copyright (c) 2006 Verdens Gang AS
* Copyright (c) 2006-2011 Varnish Software AS
* Copyright (c) 2006-2013 Varnish Software AS
* All rights reserved.
*
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
* Author: Martin Blix Grydeland <martin@varnish-software.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -48,9 +49,27 @@
VSL_x_USAGE
struct VSL_data;
struct VSLQ;
struct VSL_cursor {
const uint32_t *ptr; /* Record pointer */
/* If not -1, the vxid of all records in this set */
int32_t vxid;
/* For set cursors, the depth level of these records */
unsigned level;
/* Nonzero if pointer values from this cursor are still valid
after next call to VSL_Next */
unsigned shmptr_ok;
};
enum VSL_grouping_e {
VSL_g_raw,
VSL_g_vxid,
VSL_g_request,
VSL_g_session,
};
extern const char *VSL_tags[256];
......@@ -155,13 +174,138 @@ int VSL_Match(struct VSL_data *vsl, const struct VSL_cursor *c);
* 0: No match
*/
int VSL_Print(struct VSL_data *vsl, const struct VSL_cursor *c, void *file);
int VSL_PrintVXID(struct VSL_data *vsl, const struct VSL_cursor *c, void *fo);
/*
* Print the log record pointed to by cursor to stream.
*
* Format: <vxid> <tag> <type> <data>
*
* Arguments:
* vsl: The VSL_data context
* c: A VSL_cursor
* fo: A FILE* pointer
*
* Return values:
* 0: OK
* -5: I/O write error - see errno
*/
int VSL_PrintLevel(struct VSL_data *vsl, const struct VSL_cursor *c, void *fo);
/*
* Print the log record pointed to by cursor to stream.
*
* Format: <level> <tag> <type> <data>
*
* Arguments:
* vsl: The VSL_data context
* c: A VSL_cursor
* fo: A FILE* pointer
*
* Return values:
* 0: OK
* -5: I/O write error - see errno
*/
int VSL_PrintAll(struct VSL_data *vsl, struct VSL_cursor *c, void *fo);
/*
* Calls VSL_Next on c until c is exhausted. In turn calls
* prints all records where VSL_Match returns true.
*
* If c->vxid == -1, calls VSL_PrintVXID on each record. Else
* prints a VXID header and calls VSL_PrintLevel on each record.
*
* Arguments:
* vsl: The VSL_data context
* c: A VSL_cursor
* fo: A FILE* pointer, stdout if NULL
*
* Return values:
* 0: OK
* !=0: Return value from either VSL_Next or VSL_Print
*/
int VSL_PrintSet(struct VSL_data *vsl, struct VSL_cursor *cp[], void *fo);
/*
* Calls VSL_PrintAll on each cursor in cp[]. If any cursor in cp
* has vxid != -1 it will end with a double line break as a set
* delimeter.
*
* Arguments:
* vsl: The VSL_data context
* cp: A NULL-terminated array of VSL_cursor pointers
* fo: A FILE* pointer, stdout if NULL
*
* Return values:
* 0: OK
* !=0: Return value from either VSL_Next or VSL_PrintAll
*/
struct VSLQ *VSLQ_New(struct VSL_data *vsl, struct VSL_cursor **cp,
enum VSL_grouping_e grouping, const char *query);
/*
* Create a new query context using cp. On success cp is NULLed,
* and will be deleted when deleting the query.
*
* Arguments:
* vsl: The VSL_data context
* cp: The cursor to use
* grouping: VXID grouping to report on
* query: Query match expression
*
* Return values:
* non-NULL: OK
* NULL: Error - see VSL_Error
*/
void VSLQ_Delete(struct VSLQ **pvslq);
/*
* Delete the query pointed to by pvslq, freeing up the resources
*/
typedef int VSLQ_dispatch_f(struct VSL_data *vsl, struct VSL_cursor *cp[],
void *priv);
/*
* The callback function type for use with VSLQ_Dispatch.
*
* Arguments:
* vsl: The VSL_data context
* cp[]: A NULL terminated array of pointer to cursors. Each cursor
* will iterate over the log records of a single VXID
* priv: The priv argument from VSL_Dispatch
*
* Return value:
* 0: OK - continue
* !=0: Makes VSLQ_Dispatch return with this return value immediatly
*/
int VSLQ_Dispatch(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv);
/*
* Process log and call func for each set matching the specified
* query
*
* Arguments:
* vslq: The VSLQ query
* func: The callback function to call. Can be NULL to ignore records.
* priv: An argument passed to func
*
* Return values:
* 0: No more log records available
* !=0: The return value from either VSL_Next() or func
*/
int VSLQ_Flush(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv);
/*
* Flush any pending record sets from the query.
*
* Arguments:
* vslq: The VSL context
* func: The callback function to call. Pass NULL to discard the
* pending messages
* priv: An argument passed to func
*
* Return values:
* 0: OK
* !=0: The return value from func
*/
#endif /* VAPI_VSL_H_INCLUDED */
......@@ -27,6 +27,8 @@ libvarnishapi_la_SOURCES = \
vsm.c \
vsl_arg.c \
vsl_cursor.c \
vsl_dispatch.c \
vsl_query.c \
vsl.c \
vsc.c \
libvarnishapi.map
......
......@@ -104,6 +104,13 @@ LIBVARNISHAPI_1.3 {
VSL_DeleteCursor;
VSL_Next;
VSL_Match;
VSL_Print;
VSL_PrintVXID;
VSL_PrintLevel;
VSL_PrintAll;
VSL_PrintSet;
VSLQ_New;
VSLQ_Delete;
VSLQ_Dispatch;
VSLQ_Flush;
# Variables:
} LIBVARNISHAPI_1.0;
......@@ -155,7 +155,7 @@ VSL_Match(struct VSL_data *vsl, const struct VSL_cursor *c)
}
int
VSL_Print(struct VSL_data *vsl, const struct VSL_cursor *c, void *fo)
VSL_PrintVXID(struct VSL_data *vsl, const struct VSL_cursor *c, void *fo)
{
enum VSL_tag_e tag;
uint32_t vxid;
......@@ -200,3 +200,101 @@ VSL_Print(struct VSL_data *vsl, const struct VSL_cursor *c, void *fo)
return (-5);
return (0);
}
int
VSL_PrintLevel(struct VSL_data *vsl, const struct VSL_cursor *c, void *fo)
{
enum VSL_tag_e tag;
unsigned len, lvl;
const char *data;
int type;
int i;
CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
if (c == NULL || c->ptr == NULL)
return (0);
if (fo == NULL)
fo = stdout;
tag = VSL_TAG(c->ptr);
len = VSL_LEN(c->ptr);
type = VSL_CLIENT(c->ptr) ? 'c' : VSL_BACKEND(c->ptr) ? 'b' : '-';
data = VSL_CDATA(c->ptr);
lvl = c->level;
if (tag == SLT_Debug) {
i = fprintf(fo, "%2u %-15s %c \"", lvl, VSL_tags[tag],
type);
if (i < 0)
return (-5);
while (len-- > 0) {
if (*data >= ' ' && *data <= '~')
i = fprintf(fo, "%c", *data);
else
i = fprintf(fo, "%%%02x",
(unsigned char)*data);
if (i < 0)
return (-5);
data++;
}
i = fprintf(fo, "\"\n");
if (i < 0)
return (-5);
return (0);
}
i = fprintf(fo, "%2u %-15s %c %.*s\n",
lvl, VSL_tags[tag], type, (int)len, data);
if (i < 0)
return (-5);
return (0);
}
int
VSL_PrintAll(struct VSL_data *vsl, struct VSL_cursor *c, void *fo)
{
int i;
if (c == NULL)
return (0);
if (c->vxid >= 0) {
i = fprintf(fo, "vv VXID: %11u vv\n", c->vxid);
if (i < 0)
return (-5);
}
while (1) {
i = VSL_Next(c);
if (i <= 0)
return (i);
if (!VSL_Match(vsl, c))
continue;
if (c->vxid < 0)
i = VSL_PrintVXID(vsl, c, fo);
else
i = VSL_PrintLevel(vsl, c, fo);
if (i != 0)
return (i);
}
}
int
VSL_PrintSet(struct VSL_data *vsl, struct VSL_cursor *cp[], void *fo)
{
int i;
int delim = 0;
struct VSL_cursor *c;
c = cp[0];
while (c) {
if (c->vxid >= 0)
delim = 1;
i = VSL_PrintAll(vsl, c, fo);
if (i)
return (i);
c = *++cp;
}
if (delim) {
i = fprintf(fo, "\n");
if (i < 0)
return (-5);
}
return (0);
}
......@@ -37,10 +37,12 @@
int vsl_diag(struct VSL_data *vsl, const char *fmt, ...)
__printflike(2, 3);
int vsl_skip(struct VSL_cursor *c, ssize_t words);
typedef void vslc_delete_f(void *);
typedef int vslc_next_f(void *);
typedef int vslc_reset_f(void *);
typedef int vslc_skip_f(void *, ssize_t words);
struct vslc {
struct VSL_cursor c;
......@@ -50,6 +52,7 @@ struct vslc {
vslc_delete_f *delete;
vslc_next_f *next;
vslc_reset_f *reset;
vslc_skip_f *skip;
};
struct VSL_data {
......@@ -66,3 +69,10 @@ struct VSL_data {
struct vbitmap *vbm_select;
struct vbitmap *vbm_supress;
};
/* vsl_query.c */
struct vslq_query;
struct vslq_query *vslq_newquery(struct VSL_data *vsl,
enum VSL_grouping_e grouping, const char *query);
void vslq_deletequery(struct vslq_query **pquery);
int vslq_runquery(struct vslq_query *query, struct VSL_cursor *cp[]);
......@@ -169,6 +169,23 @@ vslc_vsm_reset(void *cursor)
return (0);
}
static int
vslc_vsm_skip(void *cursor, ssize_t words)
{
struct vslc_vsm *c;
CAST_OBJ_NOTNULL(c, cursor, VSLC_VSM_MAGIC);
if (words < 0)
return (-1);
c->next += words;
assert(c->next >= c->head->log);
assert(c->next < c->end);
c->c.c.ptr = NULL;
return (0);
}
struct VSL_cursor *
VSL_CursorVSM(struct VSL_data *vsl, struct VSM_data *vsm, int tail)
{
......@@ -199,10 +216,13 @@ VSL_CursorVSM(struct VSL_data *vsl, struct VSM_data *vsm, int tail)
vsl_diag(vsl, "Out of memory\n");
return (NULL);
}
c->c.c.vxid = -1; /* N/A to this cursor type */
c->c.c.shmptr_ok = 1;
c->c.magic = VSLC_MAGIC;
c->c.delete = vslc_vsm_delete;
c->c.next = vslc_vsm_next;
c->c.reset = vslc_vsm_reset;
c->c.skip = vslc_vsm_skip;
c->vsm = vsm;
c->vf = vf;
......@@ -347,6 +367,7 @@ VSL_CursorFile(struct VSL_data *vsl, const char *name)
vsl_diag(vsl, "Out of memory\n");
return (NULL);
}
c->c.c.vxid = -1; /* N/A to this cursor type */
c->c.magic = VSLC_MAGIC;
c->c.delete = vslc_file_delete;
c->c.next = vslc_file_next;
......@@ -391,3 +412,14 @@ VSL_Next(struct VSL_cursor *cursor)
AN(c->next);
return ((c->next)(c));
}
int
vsl_skip(struct VSL_cursor *cursor, ssize_t words)
{
struct vslc *c;
CAST_OBJ_NOTNULL(c, (void *)cursor, VSLC_MAGIC);
if (c->skip == NULL)
return (-1);
return ((c->skip)(c, words));
}
/*-
* Copyright (c) 2006 Verdens Gang AS
* Copyright (c) 2006-2013 Varnish Software AS
* All rights reserved.
*
* Author: Martin Blix Grydeland <martin@varnish-software.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <stdint.h>
#include "vas.h"
#include "miniobj.h"
#include "vqueue.h"
#include "vtree.h"
#include "vtim.h"
#include "vapi/vsl.h"
#include "vsl_api.h"
#define VTX_CACHE 10
#define VTX_BUFSIZE_MIN 64
enum vtx_type_e {
vtx_t_unknown,
vtx_t_sess,
vtx_t_req,
vtx_t_esireq,
vtx_t_bereq,
};
enum vtx_link_e {
vtx_l_sess,
vtx_l_req,
vtx_l_esireq,
vtx_l_bereq,
};
struct vtx_key {
unsigned vxid;
VRB_ENTRY(vtx_key) entry;
};
VRB_HEAD(vtx_tree, vtx_key);
struct vtx {
struct vtx_key key;
unsigned magic;
#define VTX_MAGIC 0xACC21D09
VTAILQ_ENTRY(vtx) list_child;
VTAILQ_ENTRY(vtx) list_incomplete;
double t_start;
unsigned flags;
#define VTX_F_SHM 0x1
#define VTX_F_COMPLETE 0x2
#define VTX_F_READY 0x4
enum vtx_type_e type;
struct vtx *parent;
VTAILQ_HEAD(,vtx) child;
unsigned n_child;
unsigned n_childready;
unsigned n_descend;
const uint32_t *start;
ssize_t len;
unsigned index;
uint32_t *buf;
ssize_t bufsize;
};
struct vslc_raw {
struct vslc c;
unsigned magic;
#define VSLC_RAW_MAGIC 0x247EBD44
const uint32_t *start;
ssize_t len;
const uint32_t *next;
};
struct vslc_vtx {
struct vslc c;
unsigned magic;
#define VSLC_VTX_MAGIC 0x74C6523F
struct vtx *vtx;
const uint32_t *next;
};
struct VSLQ {
unsigned magic;
#define VSLQ_MAGIC 0x23A8BE97
struct VSL_data *vsl;
struct VSL_cursor *c;
struct vslq_query *query;
enum VSL_grouping_e grouping;
struct vtx_tree tree;
VTAILQ_HEAD(,vtx) incomplete;
unsigned n_incomplete;
VTAILQ_HEAD(,vtx) cache;
unsigned n_cache;
};
static inline int
vtx_keycmp(const struct vtx_key *a, const struct vtx_key *b)
{
if (a->vxid < b->vxid)
return (-1);
if (a->vxid > b->vxid)
return (1);
return (0);
}
VRB_PROTOTYPE(vtx_tree, vtx_key, entry, vtx_keycmp);
VRB_GENERATE(vtx_tree, vtx_key, entry, vtx_keycmp);
static int
vtx_diag(struct vtx *vtx, const char *fmt, ...)
{
va_list ap;
/* XXX: Prepend diagnostic message on vtx as a synthetic log
record. For now print to stderr */
fprintf(stderr, "vtx_diag <%u>: ", vtx->key.vxid);
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
fprintf(stderr, "\n");
return (-1);
}
static int
vtx_diag_tag(struct vtx *vtx, const uint32_t *ptr, const char *reason)
{
return (vtx_diag(vtx, "%s (%s: %.*s)", reason, VSL_tags[VSL_TAG(ptr)],
(int)VSL_LEN(ptr), VSL_CDATA(ptr)));
}
static int
vslc_raw_next(void *cursor)
{
struct vslc_raw *c;
CAST_OBJ_NOTNULL(c, cursor, VSLC_RAW_MAGIC);
assert(c->next >= c->start);
assert(c->next <= c->start + c->len);
if (c->next < c->start + c->len) {
c->c.c.ptr = c->next;
c->next = VSL_NEXT(c->next);
return (1);
}
return (0);
}
static int
vslc_raw_reset(void *cursor)
{
struct vslc_raw *c;
CAST_OBJ_NOTNULL(c, cursor, VSLC_RAW_MAGIC);
assert(c->next >= c->start);
assert(c->next <= c->start + c->len);
c->next = c->start;
c->c.c.ptr = NULL;
return (0);
}
static int
vslc_vtx_next(void *cursor)
{
struct vslc_vtx *c;
CAST_OBJ_NOTNULL(c, cursor, VSLC_VTX_MAGIC);
assert(c->next >= c->vtx->start);
assert(c->next <= c->vtx->start + c->vtx->len);
if (c->next < c->vtx->start + c->vtx->len) {
c->c.c.ptr = c->next;
c->next = VSL_NEXT(c->next);
return (1);
}
return (0);
}
static int
vslc_vtx_reset(void *cursor)
{
struct vslc_vtx *c;
CAST_OBJ_NOTNULL(c, cursor, VSLC_VTX_MAGIC);
assert(c->next >= c->vtx->start);
assert(c->next <= c->vtx->start + c->vtx->len);
c->next = c->vtx->start;
c->c.c.ptr = NULL;
return (0);
}
static void
vslc_vtx_setup(struct vslc_vtx *c, struct vtx *vtx, unsigned level)
{
AN(c);
AN(vtx);
memset(c, 0, sizeof *c);
c->c.c.vxid = vtx->key.vxid;
c->c.c.level = level;
c->c.magic = VSLC_MAGIC;
c->c.next = vslc_vtx_next;
c->c.reset = vslc_vtx_reset;
c->magic = VSLC_VTX_MAGIC;
c->vtx = vtx;
c->next = c->vtx->start;
}
static struct vtx *
vtx_new(struct VSLQ *vslq)
{
struct vtx *vtx;
AN(vslq);
if (vslq->n_cache) {
AZ(VTAILQ_EMPTY(&vslq->cache));
vtx = VTAILQ_FIRST(&vslq->cache);
VTAILQ_REMOVE(&vslq->cache, vtx, list_child);
vslq->n_cache--;
} else {
ALLOC_OBJ(vtx, VTX_MAGIC);
AN(vtx);
}
vtx->key.vxid = 0;
vtx->t_start = VTIM_mono();
vtx->flags = 0;
vtx->type = vtx_t_unknown;
vtx->parent = NULL;
VTAILQ_INIT(&vtx->child);
vtx->n_child = 0;
vtx->n_childready = 0;
vtx->n_descend = 0;
vtx->start = vtx->buf;
vtx->len = 0;
vtx->index = 0;
VTAILQ_INSERT_TAIL(&vslq->incomplete, vtx, list_incomplete);
vslq->n_incomplete++;
return (vtx);
}
static void
vtx_free(struct vtx **pvtx)
{
struct vtx *vtx;
AN(pvtx);
vtx = *pvtx;
*pvtx = NULL;
free(vtx->buf);
FREE_OBJ(vtx);
}
static void
vtx_retire(struct VSLQ *vslq, struct vtx **pvtx)
{
struct vtx *vtx;
struct vtx *child;
AN(vslq);
AN(pvtx);
vtx = *pvtx;
*pvtx = NULL;
CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC);
AN(vtx->flags & VTX_F_COMPLETE);
AN(vtx->flags & VTX_F_READY);
AZ(vtx->parent);
while (!VTAILQ_EMPTY(&vtx->child)) {
child = VTAILQ_FIRST(&vtx->child);
assert(child->parent == vtx);
AN(vtx->n_child);
assert(vtx->n_descend >= child->n_descend + 1);
VTAILQ_REMOVE(&vtx->child, child, list_child);
child->parent = NULL;
vtx->n_child--;
vtx->n_descend -= child->n_descend + 1;
vtx_retire(vslq, &child);
AZ(child);
}
AZ(vtx->n_child);
AZ(vtx->n_descend);
AN(VRB_REMOVE(vtx_tree, &vslq->tree, &vtx->key));
if (vslq->n_cache < VTX_CACHE) {
VTAILQ_INSERT_HEAD(&vslq->cache, vtx, list_child);
vslq->n_cache++;
} else {
vtx_free(&vtx);
AZ(vtx);
}
}
static struct vtx *
vtx_lori(struct VSLQ *vslq, unsigned vxid)
{
struct vtx *vtx;
struct vtx_key lkey, *key;
AN(vslq);
lkey.vxid = vxid;
key = VRB_FIND(vtx_tree, &vslq->tree, &lkey);
if (key != NULL) {
CAST_OBJ_NOTNULL(vtx, (void *)key, VTX_MAGIC);
return (vtx);
}
vtx = vtx_new(vslq);
AN(vtx);
vtx->key.vxid = vxid;
AZ(VRB_INSERT(vtx_tree, &vslq->tree, &vtx->key));
return (vtx);
}
static void
vtx_append(struct vtx *vtx, const uint32_t *ptr, ssize_t len, int shmptr_ok)
{
ssize_t bufsize;
const uint32_t *ptr2;
ssize_t len2;
AN(vtx);
if (vtx->flags & VTX_F_SHM) {
assert(vtx->start != vtx->buf);
ptr2 = vtx->start;
vtx->start = vtx->buf;
len2 = vtx->len;
vtx->len = 0;
vtx->flags &= ~VTX_F_SHM;
vtx_append(vtx, ptr2, len2, 0);
}
if (len == 0)
return;
AN(ptr);
if (shmptr_ok && vtx->len == 0) {
vtx->start = ptr;
vtx->len = len;
vtx->flags |= VTX_F_SHM;
return;
}
bufsize = vtx->bufsize;
if (bufsize == 0)
bufsize = VTX_BUFSIZE_MIN;
while (vtx->len + len > bufsize)
bufsize *= 2;
if (bufsize != vtx->bufsize) {
vtx->buf = realloc(vtx->buf, 4 * bufsize);
AN(vtx->buf);
vtx->bufsize = bufsize;
vtx->start = vtx->buf;
}
memcpy(&vtx->buf[vtx->len], ptr, 4 * len);
vtx->len += len;
}
static struct vtx *
vtx_check_ready(struct VSLQ *vslq, struct vtx *vtx)
{
struct vtx *ready;
AN(vslq);
AN(vtx->flags & VTX_F_COMPLETE);
AZ(vtx->flags & VTX_F_READY);
if (vtx->type == vtx_t_unknown)
vtx_diag(vtx, "vtx of unknown type marked complete");
ready = vtx;
while (1) {
if (ready->flags & VTX_F_COMPLETE &&
ready->n_child == ready->n_childready)
ready->flags |= VTX_F_READY;
else
break;
if (ready->parent == NULL)
break;
ready = ready->parent;
ready->n_childready++;
assert(ready->n_child >= ready->n_childready);
}
if (ready->flags & VTX_F_READY && ready->parent == NULL)
/* Top level vtx ready */
return (ready);
if (vtx->flags & VTX_F_SHM) {
/* Not ready, append zero to make sure it's not a shm
reference */
vtx_append(vtx, NULL, 0, 0);
AZ(vtx->flags & VTX_F_SHM);
}
return (NULL);
}
static int
vtx_parsetag_bl(const char *str, unsigned strlen, enum vtx_type_e *ptype,
unsigned *pvxid)
{
char ibuf[strlen + 1];
char tbuf[7];
unsigned vxid;
int i;
enum vtx_type_e type = vtx_t_unknown;
AN(str);
memcpy(ibuf, str, strlen);
ibuf[strlen] = '\0';
i = sscanf(ibuf, "%6s %u", tbuf, &vxid);
if (i < 1)
return (-1);
if (!strcmp(tbuf, "sess"))
type = vtx_t_sess;
else if (!strcmp(tbuf, "req"))
type = vtx_t_req;
else if (!strcmp(tbuf, "esireq"))
type = vtx_t_esireq;
else if (!strcmp(tbuf, "bereq"))
type = vtx_t_bereq;
else
return (-1);
if (i == 1)
vxid = 0;
if (ptype)
*ptype = type;
if (pvxid)
*pvxid = vxid;
return (i);
}
static void
vtx_set_parent(struct vtx *parent, struct vtx *child)
{
AN(parent);
AN(child);
AZ(child->parent);
child->parent = parent;
VTAILQ_INSERT_TAIL(&parent->child, child, list_child);
parent->n_child++;
do
parent->n_descend += 1 + child->n_descend;
while ((parent = parent->parent));
}
static int
vtx_scan_begintag(struct VSLQ *vslq, struct vtx *vtx, const uint32_t *ptr)
{
int i;
enum vtx_type_e p_type;
unsigned p_vxid;
struct vtx *p_vtx;
assert(VSL_TAG(ptr) == SLT_Begin);
if (vtx->flags & VTX_F_READY)
return (vtx_diag_tag(vtx, ptr, "link too late"));
i = vtx_parsetag_bl(VSL_CDATA(ptr), VSL_LEN(ptr), &p_type, &p_vxid);
if (i < 1)
return (vtx_diag_tag(vtx, ptr, "parse error"));
/* Check/set vtx type */
assert(p_type != vtx_t_unknown);
if (vtx->type != vtx_t_unknown && vtx->type != p_type)
return (vtx_diag_tag(vtx, ptr, "type mismatch"));
vtx->type = p_type;
if (i == 1 || p_vxid == 0)
return (0);
/* Lookup and check parent vtx */
p_vtx = vtx_lori(vslq, p_vxid);
AN(p_vtx);
if (vtx->parent == p_vtx)
/* Link already exists */
return (0);
if (vtx->parent != NULL)
return (vtx_diag_tag(vtx, ptr, "duplicate link"));
if (p_vtx->flags & VTX_F_READY)
return (vtx_diag_tag(vtx, ptr, "link too late"));
vtx_set_parent(p_vtx, vtx);
return (0);
}
static int
vtx_scan_linktag(struct VSLQ *vslq, struct vtx *vtx, const uint32_t *ptr)
{
int i;
enum vtx_type_e c_type;
unsigned c_vxid;
struct vtx *c_vtx;
assert(VSL_TAG(ptr) == SLT_Link);
if (vtx->flags & VTX_F_READY)
return (vtx_diag_tag(vtx, ptr, "link too late"));
i = vtx_parsetag_bl(VSL_CDATA(ptr), VSL_LEN(ptr), &c_type, &c_vxid);
if (i < 2)
return (vtx_diag_tag(vtx, ptr, "parse error"));
assert(i == 2);
/* Lookup and check child vtx */
c_vtx = vtx_lori(vslq, c_vxid);
AN(c_vtx);
if (c_vtx->parent == vtx)
/* Link already exists */
return (0);
if (c_vtx->parent != NULL)
return (vtx_diag_tag(vtx, ptr, "duplicate link"));
if (c_vtx->flags & VTX_F_READY)
return (vtx_diag_tag(vtx, ptr, "link too late"));
assert(c_type != vtx_t_unknown);
if (c_vtx->type != vtx_t_unknown && c_vtx->type != c_type)
return (vtx_diag_tag(vtx, ptr, "type mismatch"));
c_vtx->type = c_type;
vtx_set_parent(vtx, c_vtx);
return (0);
}
static struct vtx *
vtx_scan(struct VSLQ *vslq, struct vtx *vtx)
{
const uint32_t *ptr;
enum VSL_tag_e tag;
int complete;
complete = (vtx->flags & VTX_F_COMPLETE ? 1 : 0);
ptr = vtx->start + vtx->index;
assert(ptr <= vtx->start + vtx->len);
for (; ptr < vtx->start + vtx->len; ptr = VSL_NEXT(ptr)) {
tag = VSL_TAG(ptr);
if (complete) {
vtx_diag(vtx, "late log rec");
continue;
}
if (vtx->type == vtx_t_unknown && tag != SLT_Begin)
vtx_diag_tag(vtx, ptr, "early log rec");
switch (tag) {
case SLT_Begin:
(void)vtx_scan_begintag(vslq, vtx, ptr);
break;
case SLT_Link:
(void)vtx_scan_linktag(vslq, vtx, ptr);
break;
case SLT_End:
complete = 1;
break;
default:
break;
}
}
vtx->index = ptr - vtx->start;
assert(vtx->index <= vtx->len);
if (!complete && vtx->flags & VTX_F_SHM) {
/* Append zero to make sure it's not a shm reference */
vtx_append(vtx, NULL, 0, 0);
AZ(vtx->flags & VTX_F_SHM);
}
if (complete) {
VTAILQ_REMOVE(&vslq->incomplete, vtx, list_incomplete);
vtx->flags |= VTX_F_COMPLETE;
AN(vslq->n_incomplete);
vslq->n_incomplete--;
return (vtx_check_ready(vslq, vtx));
}
return (NULL);
}
static struct vtx *
vtx_force(struct VSLQ *vslq, struct vtx *vtx, const char *reason)
{
AZ(vtx->flags & VTX_F_COMPLETE);
AZ(vtx->flags & VTX_F_READY);
vtx_diag(vtx, reason);
VTAILQ_REMOVE(&vslq->incomplete, vtx, list_incomplete);
vtx->flags |= VTX_F_COMPLETE;
AN(vslq->n_incomplete);
vslq->n_incomplete--;
return (vtx_check_ready(vslq, vtx));
}
static int
vslq_callback(struct VSLQ *vslq, struct vtx *vtx, VSLQ_dispatch_f *func,
void *priv)
{
unsigned n = vtx->n_descend + 1;
unsigned i, j;
struct vslc_vtx c[n];
struct VSL_cursor *cp[n + 1];
AN(vslq);
CHECK_OBJ_NOTNULL(vtx, VTX_MAGIC);
if (func == NULL)
return (0);
if (vslq->grouping == VSL_g_session &&
vtx->type != vtx_t_sess)
return (0);
if (vslq->grouping == VSL_g_request &&
vtx->type != vtx_t_req)
return (0);
i = j = 0;
vslc_vtx_setup(&c[i], vtx, 0);
i++;
while (j < i) {
vtx = VTAILQ_FIRST(&c[j].vtx->child);
while (vtx) {
assert(i < n);
vslc_vtx_setup(&c[i], vtx, c[j].c.c.level + 1);
i++;
vtx = VTAILQ_NEXT(vtx, list_child);
}
j++;
}
assert(i == n);
/* Reverse order */
for (i = 0; i < n; i++)
cp[i] = &c[n - i - 1].c.c;
cp[i] = NULL;
/* Query test goes here */
if (vslq->query == NULL ? 1 : vslq_runquery(vslq->query, cp))
return ((func)(vslq->vsl, cp, priv));
else
return (0);
}
struct VSLQ *
VSLQ_New(struct VSL_data *vsl, struct VSL_cursor **cp,
enum VSL_grouping_e grouping, const char *querystring)
{
struct vslq_query *query;
struct VSLQ *vslq;
CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
AN(cp);
if (grouping > VSL_g_session) {
(void)vsl_diag(vsl, "Illegal query grouping");
return (NULL);
}
if (querystring != NULL) {
query = vslq_newquery(vsl, grouping, querystring);
if (query == NULL)
return (NULL);
} else
query = NULL;
ALLOC_OBJ(vslq, VSLQ_MAGIC);
AN(vslq);
vslq->vsl = vsl;
vslq->c = *cp;
*cp = NULL;
vslq->grouping = grouping;
vslq->query = query;
VRB_INIT(&vslq->tree);
VTAILQ_INIT(&vslq->incomplete);
VTAILQ_INIT(&vslq->cache);
return (vslq);
}
void
VSLQ_Delete(struct VSLQ **pvslq)
{
struct VSLQ *vslq;
struct vtx *vtx;
AN(pvslq);
vslq = *pvslq;
*pvslq = NULL;
CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC);
(void)VSLQ_Flush(vslq, NULL, NULL);
AZ(vslq->n_incomplete);
VSL_DeleteCursor(vslq->c);
vslq->c = NULL;
if (vslq->query != NULL)
vslq_deletequery(&vslq->query);
AZ(vslq->query);
while (!VTAILQ_EMPTY(&vslq->cache)) {
AN(vslq->n_cache);
vtx = VTAILQ_FIRST(&vslq->cache);
VTAILQ_REMOVE(&vslq->cache, vtx, list_child);
vslq->n_cache--;
vtx_free(&vtx);
AZ(vtx);
}
FREE_OBJ(vslq);
}
static int
vslq_raw(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv)
{
struct vslc_raw rawc;
struct VSL_cursor *c;
struct VSL_cursor *pc[2];
int i;
assert(vslq->grouping == VSL_g_raw);
c = vslq->c;
memset(&rawc, 0, sizeof rawc);
rawc.c.c.vxid = -1;
rawc.c.magic = VSLC_MAGIC;
rawc.c.next = vslc_raw_next;
rawc.c.reset = vslc_raw_reset;
rawc.magic = VSLC_RAW_MAGIC;
pc[0] = &rawc.c.c;
pc[1] = NULL;
while (1) {
i = VSL_Next(c);
if (i <= 0)
break;
AN(c->ptr);
if (func == NULL)
continue;
rawc.start = c->ptr;
rawc.len = VSL_NEXT(c->ptr) - c->ptr;
rawc.next = rawc.start;
rawc.c.c.ptr = NULL;
/* Query check goes here */
i = 0;
if (vslq->query == NULL ? 1 : vslq_runquery(vslq->query, pc))
i = (func)(vslq->vsl, pc, priv);
if (i)
break;
}
return (i);
}
int
VSLQ_Dispatch(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv)
{
struct VSL_cursor *c;
int i;
enum VSL_tag_e tag;
const uint32_t *ptr;
ssize_t len;
unsigned vxid;
struct vtx *vtx;
double now;
CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC);
if (vslq->grouping == VSL_g_raw)
return (vslq_raw(vslq, func, priv));
c = vslq->c;
while (1) {
i = VSL_Next(c);
if (i != 1)
break;
tag = VSL_TAG(c->ptr);
if (tag == SLT__Batch) {
ptr = VSL_NEXT(c->ptr);
len = VSL_WORDS(c->ptr[1]);
AZ(vsl_skip(c, len));
} else {
ptr = c->ptr;
len = VSL_NEXT(ptr) - ptr;
}
vxid = VSL_ID(ptr);
if (vxid == 0)
continue;
vtx = vtx_lori(vslq, vxid);
AN(vtx);
vtx_append(vtx, ptr, len, c->shmptr_ok);
vtx = vtx_scan(vslq, vtx);
if (vtx) {
AN(vtx->flags & VTX_F_READY);
i = vslq_callback(vslq, vtx, func, priv);
vtx_retire(vslq, &vtx);
AZ(vtx);
if (i)
break;
}
}
if (i)
return (i);
now = VTIM_mono();
while ((vtx = VTAILQ_FIRST(&vslq->incomplete)) &&
now - vtx->t_start > 120.) {
/* XXX: Make timeout configurable through options and
provide a sane default */
AZ(vtx->flags & VTX_F_COMPLETE);
vtx = vtx_force(vslq, vtx, "incomplete - timeout");
if (vtx) {
AN(vtx->flags & VTX_F_READY);
i = vslq_callback(vslq, vtx, func, priv);
vtx_retire(vslq, &vtx);
AZ(vtx);
if (i)
break;
}
}
if (i)
return (i);
while (vslq->n_incomplete > 1000) {
/* XXX: Make limit configurable through options and
provide a sane default */
vtx = VTAILQ_FIRST(&vslq->incomplete);
AN(vtx);
AZ(vtx->flags & VTX_F_COMPLETE);
vtx = vtx_force(vslq, vtx, "incomplete - store overflow");
if (vtx) {
AN(vtx->flags & VTX_F_READY);
i = vslq_callback(vslq, vtx, func, priv);
vtx_retire(vslq, &vtx);
AZ(vtx);
if (i)
break;
}
}
return (i);
}
int
VSLQ_Flush(struct VSLQ *vslq, VSLQ_dispatch_f *func, void *priv)
{
struct vtx *vtx;
int i = 0;
CHECK_OBJ_NOTNULL(vslq, VSLQ_MAGIC);
while (vslq->n_incomplete) {
vtx = VTAILQ_FIRST(&vslq->incomplete);
AN(vtx);
AZ(vtx->flags & VTX_F_COMPLETE);
vtx = vtx_force(vslq, vtx, "incomplete - flushing");
if (vtx) {
AN(vtx->flags & VTX_F_READY);
i = vslq_callback(vslq, vtx, func, priv);
vtx_retire(vslq, &vtx);
AZ(vtx);
if (i)
break;
}
}
return (i);
}
/*-
* Copyright (c) 2006 Verdens Gang AS
* Copyright (c) 2006-2013 Varnish Software AS
* All rights reserved.
*
* Author: Martin Blix Grydeland <martin@varnish-software.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdint.h>
#include "vas.h"
#include "miniobj.h"
#include "vre.h"
#include "vapi/vsl.h"
#include "vsl_api.h"
struct vslq_query {
unsigned magic;
#define VSLQ_QUERY_MAGIC 0x122322A5
vre_t *regex;
};
struct vslq_query *
vslq_newquery(struct VSL_data *vsl, enum VSL_grouping_e grouping,
const char *querystring)
{
struct vslq_query *query;
const char *error;
int pos;
vre_t *regex;
(void)grouping;
AN(querystring);
regex = VRE_compile(querystring, 0, &error, &pos);
if (regex == NULL) {
vsl_diag(vsl, "failed to compile regex at pos %d: %s",
pos, error);
return (NULL);
}
ALLOC_OBJ(query, VSLQ_QUERY_MAGIC);
query->regex = regex;
return (query);
}
void
vslq_deletequery(struct vslq_query **pquery)
{
struct vslq_query *query;
AN(pquery);
query = *pquery;
*pquery = NULL;
CHECK_OBJ_NOTNULL(query, VSLQ_QUERY_MAGIC);
AN(query->regex);
VRE_free(&query->regex);
AZ(query->regex);
FREE_OBJ(query);
}
int
vslq_runquery(struct vslq_query *query, struct VSL_cursor *cp[])
{
struct VSL_cursor *c;
int i, len;
const char *data;
CHECK_OBJ_NOTNULL(query, VSLQ_QUERY_MAGIC);
AN(query->regex);
c = cp[0];
while (c) {
while (1) {
i = VSL_Next(c);
if (i == 0)
break;
assert(i == 1);
AN(c->ptr);
len = VSL_LEN(c->ptr);
data = VSL_CDATA(c->ptr);
i = VRE_exec(query->regex, data, len, 0, 0, NULL, 0,
NULL);
if (i != VRE_ERROR_NOMATCH) {
AZ(VSL_ResetCursor(c));
return (1);
}
}
AZ(VSL_ResetCursor(c));
c = *++cp;
}
return (0);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment