Commit a95a36f7 authored by Per Buer's avatar Per Buer

Merge branch 'master' of ssh://git.varnish-cache.org/git/varnish-cache

Conflicts:
	doc/sphinx/reference/varnishncsa.rst
parents 40e21685 f214debc
......@@ -640,8 +640,8 @@ void Fetch_Init(void);
struct vgz;
enum vgz_flag { VGZ_NORMAL, VGZ_ALIGN, VGZ_RESET, VGZ_FINISH };
struct vgz *VGZ_NewUngzip(const struct sess *sp, struct ws *tmp);
struct vgz *VGZ_NewGzip(const struct sess *sp, struct ws *tmp);
struct vgz *VGZ_NewUngzip(struct sess *sp);
struct vgz *VGZ_NewGzip(struct sess *sp);
void VGZ_Ibuf(struct vgz *, const void *, ssize_t len);
int VGZ_IbufEmpty(const struct vgz *vg);
void VGZ_Obuf(struct vgz *, const void *, ssize_t len);
......
......@@ -274,7 +274,7 @@ ESI_Deliver(struct sess *sp)
}
if (isgzip && !sp->wrk->gzip_resp) {
vgz = VGZ_NewUngzip(sp, sp->wrk->ws);
vgz = VGZ_NewUngzip(sp);
obufl = 0;
}
......
......@@ -303,24 +303,24 @@ vfp_esi_begin(struct sess *sp, size_t estimate)
/* XXX: snapshot WS's ? We'll need the space */
if (sp->wrk->is_gzip && sp->wrk->do_gunzip) {
sp->wrk->vgz_rx = VGZ_NewUngzip(sp, sp->ws);
sp->wrk->vgz_rx = VGZ_NewUngzip(sp);
VEP_Init(sp, NULL);
} else if (sp->wrk->is_gunzip && sp->wrk->do_gzip) {
vef = (void*)WS_Alloc(sp->ws, sizeof *vef);
AN(vef);
memset(vef, 0, sizeof *vef);
vef->magic = VEF_MAGIC;
vef->vgz = VGZ_NewGzip(sp, sp->ws);
vef->vgz = VGZ_NewGzip(sp);
AZ(sp->wrk->vef_priv);
sp->wrk->vef_priv = vef;
VEP_Init(sp, vfp_vep_callback);
} else if (sp->wrk->is_gzip) {
sp->wrk->vgz_rx = VGZ_NewUngzip(sp, sp->ws);
sp->wrk->vgz_rx = VGZ_NewUngzip(sp);
vef = (void*)WS_Alloc(sp->ws, sizeof *vef);
AN(vef);
memset(vef, 0, sizeof *vef);
vef->magic = VEF_MAGIC;
vef->vgz = VGZ_NewGzip(sp, sp->ws);
vef->vgz = VGZ_NewGzip(sp);
AZ(sp->wrk->vef_priv);
sp->wrk->vef_priv = vef;
VEP_Init(sp, vfp_vep_callback);
......
......@@ -113,34 +113,48 @@ vgz_free(voidpf opaque, voidpf address)
*/
static struct vgz *
vgz_alloc_vgz(struct ws *ws)
vgz_alloc_vgz(struct sess *sp)
{
char *s;
struct vgz *vg;
struct ws *ws = sp->wrk->ws;
WS_Assert(ws);
s = WS_Snapshot(ws);
vg = (void*)WS_Alloc(ws, sizeof *vg);
AN(vg);
memset(vg, 0, sizeof *vg);
vg->magic = VGZ_MAGIC;
vg->tmp = ws;
vg->tmp_snapshot = s;
vg->vz.zalloc = vgz_alloc;
vg->vz.zfree = vgz_free;
vg->vz.opaque = vg;
switch (params->gzip_tmp_space) {
case 0:
/* malloc, the default */
break;
case 1:
vg->tmp = sp->ws;
vg->tmp_snapshot = WS_Snapshot(vg->tmp);
vg->vz.zalloc = vgz_alloc;
vg->vz.zfree = vgz_free;
vg->vz.opaque = vg;
break;
case 2:
vg->tmp = sp->wrk->ws;
vg->tmp_snapshot = WS_Snapshot(vg->tmp);
vg->vz.zalloc = vgz_alloc;
vg->vz.zfree = vgz_free;
vg->vz.opaque = vg;
break;
default:
assert(0 == __LINE__);
}
return (vg);
}
struct vgz *
VGZ_NewUngzip(const struct sess *sp, struct ws *tmp)
VGZ_NewUngzip(struct sess *sp)
{
struct vgz *vg;
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
vg = vgz_alloc_vgz(tmp);
vg = vgz_alloc_vgz(sp);
/*
* Max memory usage according to zonf.h:
......@@ -148,23 +162,18 @@ VGZ_NewUngzip(const struct sess *sp, struct ws *tmp)
* Since we don't control windowBits, we have to assume
* it is 15, so 34-35KB or so.
*/
#if 1
vg->vz.zalloc = NULL;
vg->vz.zfree = NULL;
vg->vz.opaque = NULL;
#endif
assert(Z_OK == inflateInit2(&vg->vz, 31));
return (vg);
}
struct vgz *
VGZ_NewGzip(const struct sess *sp, struct ws *tmp)
VGZ_NewGzip(struct sess *sp)
{
struct vgz *vg;
int i;
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
vg = vgz_alloc_vgz(tmp);
vg = vgz_alloc_vgz(sp);
/*
* From zconf.h:
......@@ -181,13 +190,8 @@ VGZ_NewGzip(const struct sess *sp, struct ws *tmp)
* XXX: It may be more efficent to malloc them, rather than have
* XXX: too many worker threads grow the stacks.
*/
#if 1
vg->vz.zalloc = NULL;
vg->vz.zfree = NULL;
vg->vz.opaque = NULL;
#endif
i = deflateInit2(&vg->vz,
0, /* Level */
params->gzip_level, /* Level */
Z_DEFLATED, /* Method */
16 + 8, /* Window bits (16=gzip + 15) */
1, /* memLevel */
......@@ -345,7 +349,8 @@ VGZ_Destroy(struct vgz **vg)
{
CHECK_OBJ_NOTNULL(*vg, VGZ_MAGIC);
WS_Reset((*vg)->tmp, (*vg)->tmp_snapshot);
if ((*vg)->tmp != NULL)
WS_Reset((*vg)->tmp, (*vg)->tmp_snapshot);
*vg = NULL;
}
......@@ -359,7 +364,7 @@ static void __match_proto__()
vfp_gunzip_begin(struct sess *sp, size_t estimate)
{
(void)estimate;
sp->wrk->vgz_rx = VGZ_NewUngzip(sp, sp->ws);
sp->wrk->vgz_rx = VGZ_NewUngzip(sp);
}
static int __match_proto__()
......@@ -428,7 +433,7 @@ vfp_gzip_begin(struct sess *sp, size_t estimate)
{
(void)estimate;
sp->wrk->vgz_rx = VGZ_NewGzip(sp, sp->ws);
sp->wrk->vgz_rx = VGZ_NewGzip(sp);
}
static int __match_proto__()
......@@ -504,7 +509,7 @@ static void __match_proto__()
vfp_testgzip_begin(struct sess *sp, size_t estimate)
{
(void)estimate;
sp->wrk->vgz_rx = VGZ_NewUngzip(sp, sp->ws);
sp->wrk->vgz_rx = VGZ_NewUngzip(sp);
}
static int __match_proto__()
......
......@@ -253,7 +253,7 @@ res_WriteGunzipObj(struct sess *sp)
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
vg = VGZ_NewUngzip(sp, sp->wrk->ws);
vg = VGZ_NewUngzip(sp);
VTAILQ_FOREACH(st, &sp->obj->store, list) {
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
......
......@@ -201,6 +201,8 @@ struct params {
unsigned http_gzip_support;
unsigned gzip_stack_buffer;
unsigned gzip_tmp_space;
unsigned gzip_level;
double critbit_cooloff;
};
......
......@@ -817,9 +817,29 @@ static const struct parspec input_parspec[] = {
"Enable support for HTTP GZIP compression.\n",
EXPERIMENTAL,
"on", "bool" },
{ "gzip_tmp_space", tweak_uint, &master.gzip_tmp_space, 0, 2,
"Where temporary space for gzip/gunzip is allocated.\n"
" 0 - malloc\n"
" 1 - session workspace\n"
" 2 - thread workspace\n"
"If you have much gzip/gunzip activity, it may be an"
" advantage to use workspace for these allocations to reduce"
" malloc activity. Be aware that gzip needs 256+KB and gunzip"
" needs 32+KB of workspace (64+KB if ESI processing).",
EXPERIMENTAL,
"0", "" },
{ "gzip_level", tweak_uint, &master.gzip_level, 0, 9,
"Gzip compression level: 0=debug, 1=fast, 9=best",
0,
"6", ""},
{ "gzip_stack_buffer", tweak_uint, &master.gzip_stack_buffer,
2048, UINT_MAX,
"Size of stack buffer used for gzip processing.\n",
"Size of stack buffer used for gzip processing.\n"
"The stack buffers are used for in-transit data,"
" for instance gunzip'ed data being sent to a client."
"Making this space to small results in more overhead,"
" writes to sockets etc, making it too big is probably"
" just a waste of memory.",
EXPERIMENTAL,
"32768", "Bytes" },
{ "critbit_cooloff", tweak_timeout_double,
......
......@@ -24,6 +24,7 @@ varnish v1 -vcl+backend {
varnish v1 -cliok "param.set esi_syntax 4"
varnish v1 -cliok "param.set http_gzip_support true"
varnish v1 -cliok "param.set gzip_tmp_space 2"
client c1 {
txreq
......
......@@ -27,6 +27,7 @@ varnish v1 -vcl+backend {
varnish v1 -cliok "param.set esi_syntax 0xc"
varnish v1 -cliok "param.set http_gzip_support true"
varnish v1 -cliok "param.set gzip_tmp_space 1"
client c1 {
txreq -hdr "Accept-Encoding: gzip"
......
......@@ -42,6 +42,11 @@ SVNID("$Id$")
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <pwd.h>
#ifdef __linux__
#include <sys/prctl.h>
#endif
#include "libvarnish.h"
#include "vev.h"
......@@ -364,6 +369,21 @@ main(int argc, char * const *argv)
vb = vev_new_base();
if (geteuid() == 0) {
struct passwd *pw;
pw = getpwnam("nobody");
assert(setgid(pw->pw_gid) == 0);
assert(setuid(pw->pw_uid) == 0);
/* On Linux >= 2.4, you need to set the dumpable flag
to get core dumps after you have done a setuid. */
#ifdef __linux__
if (prctl(PR_SET_DUMPABLE, 1) != 0) {
printf("Could not set dumpable bit. Core dumps turned "
"off\n");
}
#endif
}
i = 0;
while(!VTAILQ_EMPTY(&tst_head) || i) {
if (!VTAILQ_EMPTY(&tst_head) && njob < npar) {
......
......@@ -19,6 +19,7 @@ varnishncsa [-a] [-b] [-C] [-c] [-D] [-d] [-f] [-F format] [-I regex]
[-i tag] [-n varnish_name] [-P file] [-r file] [-V] [-w file]
[-X regex] [-x tag]
DESCRIPTION
===========
......
......@@ -71,8 +71,7 @@ Documentation files for %name
#Varnish Cache is a high-performance HTTP accelerator
%prep
#%setup -q
%setup -q -n varnish-cache
%setup -q
# The svn sources needs to generate a suitable configure script
# Release tarballs would not need this
......@@ -166,8 +165,7 @@ mv doc/sphinx/\=build/html doc
%endif
%endif
LD_LIBRARY_PATH="lib/libvarnish/.libs:lib/libvarnishcompat/.libs:lib/libvarnishapi/.libs:lib/libvcl/.libs" bin/varnishd/varnishd -b 127.0.0.1:80 -C -n /tmp/foo
%{__make} check LD_LIBRARY_PATH="../../lib/libvarnish/.libs:../../lib/libvarnishcompat/.libs:../../lib/libvarnishapi/.libs:../../lib/libvcl/.libs"
%{__make} check LD_LIBRARY_PATH="../../lib/libvarnish/.libs:../../lib/libvarnishcompat/.libs:../../lib/libvarnishapi/.libs:../../lib/libvcl/.libs:../../lib/libvgz/.libs"
%install
rm -rf %{buildroot}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment