Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
V
varnish-cache
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Commits
Open sidebar
varnishcache
varnish-cache
Commits
af2fb997
Commit
af2fb997
authored
Feb 03, 2016
by
Poul-Henning Kamp
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Collect all the LRU stuff in its own source file.
parent
3e590da4
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
204 additions
and
156 deletions
+204
-156
Makefile.am
bin/varnishd/Makefile.am
+1
-0
cache.h
bin/varnishd/cache/cache.h
+0
-9
stevedore.c
bin/varnishd/storage/stevedore.c
+1
-24
storage.h
bin/varnishd/storage/storage.h
+6
-2
storage_lru.c
bin/varnishd/storage/storage_lru.c
+184
-0
storage_persistent.c
bin/varnishd/storage/storage_persistent.c
+6
-11
storage_persistent_silo.c
bin/varnishd/storage/storage_persistent_silo.c
+1
-2
storage_simple.c
bin/varnishd/storage/storage_simple.c
+5
-108
No files found.
bin/varnishd/Makefile.am
View file @
af2fb997
...
...
@@ -86,6 +86,7 @@ varnishd_SOURCES = \
storage/mgt_stevedore.c
\
storage/stevedore_utils.c
\
storage/storage_file.c
\
storage/storage_lru.c
\
storage/storage_malloc.c
\
storage/storage_persistent.c
\
storage/mgt_storage_persistent.c
\
...
...
bin/varnishd/cache/cache.h
View file @
af2fb997
...
...
@@ -354,15 +354,6 @@ struct worker {
uintptr_t
stack_end
;
};
/* LRU ---------------------------------------------------------------*/
struct
lru
{
unsigned
magic
;
#define LRU_MAGIC 0x3fec7bb0
VTAILQ_HEAD
(,
objcore
)
lru_head
;
struct
lock
mtx
;
};
/* Stored object -----------------------------------------------------
* This is just to encapsulate the fields owned by the stevedore
*/
...
...
bin/varnishd/storage/stevedore.c
View file @
af2fb997
...
...
@@ -37,6 +37,7 @@
#include <stdlib.h>
#include "cache/cache.h"
#include "hash/hash_slinger.h"
#include "storage/storage.h"
#include "vrt.h"
...
...
@@ -44,29 +45,6 @@
static
const
struct
stevedore
*
volatile
stv_next
;
/*--------------------------------------------------------------------
*/
struct
lru
*
LRU_Alloc
(
void
)
{
struct
lru
*
l
;
ALLOC_OBJ
(
l
,
LRU_MAGIC
);
AN
(
l
);
VTAILQ_INIT
(
&
l
->
lru_head
);
Lck_New
(
&
l
->
mtx
,
lck_lru
);
return
(
l
);
}
void
LRU_Free
(
struct
lru
*
lru
)
{
CHECK_OBJ_NOTNULL
(
lru
,
LRU_MAGIC
);
Lck_Delete
(
&
lru
->
mtx
);
FREE_OBJ
(
lru
);
}
/*--------------------------------------------------------------------
* XXX: trust pointer writes to be atomic
*/
...
...
@@ -101,7 +79,6 @@ stv_pick_stevedore(struct vsl_log *vsl, const char **hint)
return
(
stv
);
}
/*-------------------------------------------------------------------
* Allocate storage for an object, based on the header information.
* XXX: If we know (a hint of) the length, we could allocate space
...
...
bin/varnishd/storage/storage.h
View file @
af2fb997
...
...
@@ -120,8 +120,12 @@ uintmax_t STV_FileSize(int fd, const char *size, unsigned *granularity,
const
char
*
ctx
);
struct
lru
*
LRU_Alloc
(
void
);
void
LRU_Free
(
struct
lru
*
lru
);
int
EXP_NukeOne
(
struct
worker
*
wrk
,
struct
lru
*
lru
);
void
LRU_Free
(
struct
lru
*
);
void
LRU_Add
(
struct
objcore
*
);
void
LRU_Remove
(
struct
objcore
*
);
int
LRU_NukeOne
(
struct
worker
*
,
struct
lru
*
);
void
LRU_Touch
(
struct
worker
*
,
struct
objcore
*
,
double
now
);
/*--------------------------------------------------------------------*/
extern
const
struct
stevedore
sma_stevedore
;
...
...
bin/varnishd/storage/storage_lru.c
0 → 100644
View file @
af2fb997
/*-
* Copyright (c) 2007-2015 Varnish Software AS
* All rights reserved.
*
* Author: Dag-Erling Smørgav <des@des.no>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* STEVEDORE: one who works at or is responsible for loading and
* unloading ships in port. Example: "on the wharves, stevedores were
* unloading cargo from the far corners of the world." Origin: Spanish
* estibador, from estibar to pack. First Known Use: 1788
*/
#include "config.h"
#include <stdio.h>
#include <stdlib.h>
#include "cache/cache.h"
#include "hash/hash_slinger.h"
#include "storage/storage.h"
#include "vtim.h"
struct
lru
{
unsigned
magic
;
#define LRU_MAGIC 0x3fec7bb0
VTAILQ_HEAD
(,
objcore
)
lru_head
;
struct
lock
mtx
;
};
struct
lru
*
LRU_Alloc
(
void
)
{
struct
lru
*
l
;
ALLOC_OBJ
(
l
,
LRU_MAGIC
);
AN
(
l
);
VTAILQ_INIT
(
&
l
->
lru_head
);
Lck_New
(
&
l
->
mtx
,
lck_lru
);
return
(
l
);
}
void
LRU_Free
(
struct
lru
*
lru
)
{
CHECK_OBJ_NOTNULL
(
lru
,
LRU_MAGIC
);
Lck_Delete
(
&
lru
->
mtx
);
FREE_OBJ
(
lru
);
}
void
LRU_Add
(
struct
objcore
*
oc
)
{
struct
lru
*
lru
;
CHECK_OBJ_NOTNULL
(
oc
,
OBJCORE_MAGIC
);
lru
=
ObjGetLRU
(
oc
);
CHECK_OBJ_NOTNULL
(
lru
,
LRU_MAGIC
);
Lck_Lock
(
&
lru
->
mtx
);
VTAILQ_INSERT_TAIL
(
&
lru
->
lru_head
,
oc
,
lru_list
);
oc
->
last_lru
=
VTIM_real
();
Lck_Unlock
(
&
lru
->
mtx
);
}
void
LRU_Remove
(
struct
objcore
*
oc
)
{
struct
lru
*
lru
;
CHECK_OBJ_NOTNULL
(
oc
,
OBJCORE_MAGIC
);
lru
=
ObjGetLRU
(
oc
);
CHECK_OBJ_NOTNULL
(
lru
,
LRU_MAGIC
);
Lck_Lock
(
&
lru
->
mtx
);
if
(
!
isnan
(
oc
->
last_lru
))
{
VTAILQ_REMOVE
(
&
lru
->
lru_head
,
oc
,
lru_list
);
oc
->
last_lru
=
NAN
;
}
Lck_Unlock
(
&
lru
->
mtx
);
}
void
__match_proto__
(
objtouch_f
)
LRU_Touch
(
struct
worker
*
wrk
,
struct
objcore
*
oc
,
double
now
)
{
struct
lru
*
lru
;
CHECK_OBJ_NOTNULL
(
wrk
,
WORKER_MAGIC
);
CHECK_OBJ_NOTNULL
(
oc
,
OBJCORE_MAGIC
);
/*
* To avoid the exphdl->mtx becoming a hotspot, we only
* attempt to move objects if they have not been moved
* recently and if the lock is available. This optimization
* obviously leaves the LRU list imperfectly sorted.
*/
if
(
oc
->
flags
&
OC_F_INCOMPLETE
)
return
;
if
(
now
-
oc
->
last_lru
<
cache_param
->
lru_interval
)
return
;
lru
=
ObjGetLRU
(
oc
);
CHECK_OBJ_NOTNULL
(
lru
,
LRU_MAGIC
);
if
(
Lck_Trylock
(
&
lru
->
mtx
))
return
;
if
(
!
isnan
(
oc
->
last_lru
))
{
/* Can only touch it while it's actually on the LRU list */
VTAILQ_REMOVE
(
&
lru
->
lru_head
,
oc
,
lru_list
);
VTAILQ_INSERT_TAIL
(
&
lru
->
lru_head
,
oc
,
lru_list
);
VSC_C_main
->
n_lru_moved
++
;
oc
->
last_lru
=
now
;
}
Lck_Unlock
(
&
lru
->
mtx
);
}
/*--------------------------------------------------------------------
* Attempt to make space by nuking the oldest object on the LRU list
* which isn't in use.
* Returns: 1: did, 0: didn't, -1: can't
*/
int
LRU_NukeOne
(
struct
worker
*
wrk
,
struct
lru
*
lru
)
{
struct
objcore
*
oc
,
*
oc2
;
CHECK_OBJ_NOTNULL
(
wrk
,
WORKER_MAGIC
);
CHECK_OBJ_NOTNULL
(
lru
,
LRU_MAGIC
);
/* Find the first currently unused object on the LRU. */
Lck_Lock
(
&
lru
->
mtx
);
VTAILQ_FOREACH_SAFE
(
oc
,
&
lru
->
lru_head
,
lru_list
,
oc2
)
{
CHECK_OBJ_NOTNULL
(
oc
,
OBJCORE_MAGIC
);
VSLb
(
wrk
->
vsl
,
SLT_ExpKill
,
"LRU_Cand p=%p f=0x%x r=%d"
,
oc
,
oc
->
flags
,
oc
->
refcnt
);
AZ
(
isnan
(
oc
->
last_lru
));
if
(
ObjSnipe
(
wrk
,
oc
))
{
VSC_C_main
->
n_lru_nuked
++
;
// XXX per lru ?
VTAILQ_REMOVE
(
&
lru
->
lru_head
,
oc
,
lru_list
);
oc
->
last_lru
=
NAN
;
break
;
}
}
Lck_Unlock
(
&
lru
->
mtx
);
if
(
oc
==
NULL
)
{
VSLb
(
wrk
->
vsl
,
SLT_ExpKill
,
"LRU_Fail"
);
return
(
-
1
);
}
/* XXX: We could grab and return one storage segment to our caller */
ObjSlim
(
wrk
,
oc
);
EXP_Poke
(
oc
);
VSLb
(
wrk
->
vsl
,
SLT_ExpKill
,
"LRU x=%u"
,
ObjGetXID
(
wrk
,
oc
));
(
void
)
HSH_DerefObjCore
(
wrk
,
&
oc
);
return
(
1
);
}
bin/varnishd/storage/storage_persistent.c
View file @
af2fb997
...
...
@@ -235,7 +235,7 @@ smp_open_segs(struct smp_sc *sc, struct smp_signspace *spc)
ALLOC_OBJ
(
sg
,
SMP_SEG_MAGIC
);
AN
(
sg
);
sg
->
lru
=
LRU_Alloc
();
CHECK_OBJ_NOTNULL
(
sg
->
lru
,
LRU_MAGIC
);
AN
(
sg
->
lru
);
sg
->
p
=
*
ss
;
sg
->
flags
|=
SMP_SEG_MUSTLOAD
;
...
...
@@ -531,7 +531,7 @@ smp_allocobj(struct worker *wrk, const struct stevedore *stv,
while
(
1
)
{
if
(
really
>
0
)
{
if
(
EXP
_NukeOne
(
wrk
,
stv
->
lru
)
==
-
1
)
if
(
LRU
_NukeOne
(
wrk
,
stv
->
lru
)
==
-
1
)
return
(
0
);
really
--
;
}
...
...
@@ -608,10 +608,9 @@ const struct stevedore smp_stevedore = {
*/
static
void
debug_report_silo
(
struct
cli
*
cli
,
const
struct
smp_sc
*
sc
,
int
objs
)
debug_report_silo
(
struct
cli
*
cli
,
const
struct
smp_sc
*
sc
)
{
struct
smp_seg
*
sg
;
struct
objcore
*
oc
;
VCLI_Out
(
cli
,
"Silo: %s (%s)
\n
"
,
sc
->
stevedore
->
ident
,
sc
->
filename
);
...
...
@@ -626,10 +625,6 @@ debug_report_silo(struct cli *cli, const struct smp_sc *sc, int objs)
(
uintmax_t
)(
sc
->
next_top
-
sc
->
next_bot
));
VCLI_Out
(
cli
,
" %u nobj, %u alloc, %u lobjlist, %u fixed
\n
"
,
sg
->
nobj
,
sg
->
nalloc
,
sg
->
p
.
lobjlist
,
sg
->
nfixed
);
if
(
objs
)
{
VTAILQ_FOREACH
(
oc
,
&
sg
->
lru
->
lru_head
,
lru_list
)
VCLI_Out
(
cli
,
" OC %p
\n
"
,
oc
);
}
}
}
...
...
@@ -642,7 +637,7 @@ debug_persistent(struct cli *cli, const char * const * av, void *priv)
if
(
av
[
2
]
==
NULL
)
{
VTAILQ_FOREACH
(
sc
,
&
silos
,
list
)
debug_report_silo
(
cli
,
sc
,
0
);
debug_report_silo
(
cli
,
sc
);
return
;
}
VTAILQ_FOREACH
(
sc
,
&
silos
,
list
)
...
...
@@ -654,7 +649,7 @@ debug_persistent(struct cli *cli, const char * const * av, void *priv)
return
;
}
if
(
av
[
3
]
==
NULL
)
{
debug_report_silo
(
cli
,
sc
,
0
);
debug_report_silo
(
cli
,
sc
);
return
;
}
Lck_Lock
(
&
sc
->
mtx
);
...
...
@@ -663,7 +658,7 @@ debug_persistent(struct cli *cli, const char * const * av, void *priv)
smp_close_seg
(
sc
,
sc
->
cur_seg
);
smp_new_seg
(
sc
);
}
else
if
(
!
strcmp
(
av
[
3
],
"dump"
))
{
debug_report_silo
(
cli
,
sc
,
1
);
debug_report_silo
(
cli
,
sc
);
}
else
{
VCLI_Out
(
cli
,
"Unknown operation
\n
"
);
VCLI_SetResult
(
cli
,
CLIS_PARAM
);
...
...
bin/varnishd/storage/storage_persistent_silo.c
View file @
af2fb997
...
...
@@ -139,7 +139,6 @@ smp_load_seg(struct worker *wrk, const struct smp_sc *sc,
ASSERT_SILO_THREAD
(
sc
);
CHECK_OBJ_NOTNULL
(
wrk
,
WORKER_MAGIC
);
CHECK_OBJ_NOTNULL
(
sg
,
SMP_SEG_MAGIC
);
CHECK_OBJ_NOTNULL
(
sg
->
lru
,
LRU_MAGIC
);
assert
(
sg
->
flags
&
SMP_SEG_MUSTLOAD
);
sg
->
flags
&=
~
SMP_SEG_MUSTLOAD
;
AN
(
sg
->
p
.
offset
);
...
...
@@ -225,7 +224,7 @@ smp_new_seg(struct smp_sc *sc)
return
;
*
sg
=
tmpsg
;
sg
->
lru
=
LRU_Alloc
();
CHECK_OBJ_NOTNULL
(
sg
->
lru
,
LRU_MAGIC
);
AN
(
sg
->
lru
);
sg
->
p
.
offset
=
IRNUP
(
sc
,
sg
->
p
.
offset
);
sg
->
p
.
length
-=
sg
->
p
.
offset
-
tmpsg
.
p
.
offset
;
...
...
bin/varnishd/storage/storage_simple.c
View file @
af2fb997
...
...
@@ -39,56 +39,6 @@
#include "storage/storage.h"
#include "storage/storage_simple.h"
#include "vtim.h"
/*--------------------------------------------------------------------
* Attempt to make space by nuking the oldest object on the LRU list
* which isn't in use.
* Returns: 1: did, 0: didn't, -1: can't
*/
int
EXP_NukeOne
(
struct
worker
*
wrk
,
struct
lru
*
lru
)
{
struct
objcore
*
oc
,
*
oc2
;
CHECK_OBJ_NOTNULL
(
wrk
,
WORKER_MAGIC
);
CHECK_OBJ_NOTNULL
(
lru
,
LRU_MAGIC
);
/* Find the first currently unused object on the LRU. */
Lck_Lock
(
&
lru
->
mtx
);
VTAILQ_FOREACH_SAFE
(
oc
,
&
lru
->
lru_head
,
lru_list
,
oc2
)
{
CHECK_OBJ_NOTNULL
(
oc
,
OBJCORE_MAGIC
);
VSLb
(
wrk
->
vsl
,
SLT_ExpKill
,
"LRU_Cand p=%p f=0x%x r=%d"
,
oc
,
oc
->
flags
,
oc
->
refcnt
);
AZ
(
isnan
(
oc
->
last_lru
));
if
(
ObjSnipe
(
wrk
,
oc
))
{
VSC_C_main
->
n_lru_nuked
++
;
// XXX per lru ?
VTAILQ_REMOVE
(
&
lru
->
lru_head
,
oc
,
lru_list
);
oc
->
last_lru
=
NAN
;
break
;
}
}
Lck_Unlock
(
&
lru
->
mtx
);
if
(
oc
==
NULL
)
{
VSLb
(
wrk
->
vsl
,
SLT_ExpKill
,
"LRU_Fail"
);
return
(
-
1
);
}
/* XXX: We could grab and return one storage segment to our caller */
ObjSlim
(
wrk
,
oc
);
EXP_Poke
(
oc
);
VSLb
(
wrk
->
vsl
,
SLT_ExpKill
,
"LRU x=%u"
,
ObjGetXID
(
wrk
,
oc
));
(
void
)
HSH_DerefObjCore
(
wrk
,
&
oc
);
return
(
1
);
}
/*-------------------------------------------------------------------*/
static
struct
storage
*
...
...
@@ -178,7 +128,7 @@ SML_allocobj(struct worker *wrk, const struct stevedore *stv,
ltot
=
sizeof
(
struct
object
)
+
PRNDUP
(
wsl
);
while
(
1
)
{
if
(
really
>
0
)
{
if
(
EXP
_NukeOne
(
wrk
,
stv
->
lru
)
==
-
1
)
if
(
LRU
_NukeOne
(
wrk
,
stv
->
lru
)
==
-
1
)
return
(
0
);
really
--
;
}
...
...
@@ -249,7 +199,6 @@ static void __match_proto__(objfree_f)
sml_objfree
(
struct
worker
*
wrk
,
struct
objcore
*
oc
)
{
struct
object
*
o
;
struct
lru
*
lru
;
CHECK_OBJ_NOTNULL
(
wrk
,
WORKER_MAGIC
);
CHECK_OBJ_NOTNULL
(
oc
,
OBJCORE_MAGIC
);
...
...
@@ -257,14 +206,7 @@ sml_objfree(struct worker *wrk, struct objcore *oc)
CAST_OBJ_NOTNULL
(
o
,
oc
->
stobj
->
priv
,
OBJECT_MAGIC
);
o
->
magic
=
0
;
lru
=
ObjGetLRU
(
oc
);
CHECK_OBJ_NOTNULL
(
lru
,
LRU_MAGIC
);
Lck_Lock
(
&
lru
->
mtx
);
if
(
!
isnan
(
oc
->
last_lru
))
{
VTAILQ_REMOVE
(
&
lru
->
lru_head
,
oc
,
lru_list
);
oc
->
last_lru
=
NAN
;
}
Lck_Unlock
(
&
lru
->
mtx
);
LRU_Remove
(
oc
);
sml_stv_free
(
oc
->
stobj
->
stevedore
,
o
->
objstore
);
...
...
@@ -397,7 +339,7 @@ objallocwithnuke(struct worker *wrk, const struct stevedore *stv, size_t size)
/* no luck; try to free some space and keep trying */
if
(
fail
<
cache_param
->
nuke_limit
&&
EXP
_NukeOne
(
wrk
,
stv
->
lru
)
==
-
1
)
LRU
_NukeOne
(
wrk
,
stv
->
lru
)
==
-
1
)
break
;
}
CHECK_OBJ_ORNULL
(
st
,
STORAGE_MAGIC
);
...
...
@@ -548,7 +490,6 @@ sml_stable(struct worker *wrk, struct objcore *oc, struct boc *boc)
{
const
struct
stevedore
*
stv
;
struct
storage
*
st
;
struct
lru
*
lru
;
CHECK_OBJ_NOTNULL
(
wrk
,
WORKER_MAGIC
);
CHECK_OBJ_NOTNULL
(
oc
,
OBJCORE_MAGIC
);
...
...
@@ -564,12 +505,7 @@ sml_stable(struct worker *wrk, struct objcore *oc, struct boc *boc)
sml_stv_free
(
stv
,
st
);
}
lru
=
ObjGetLRU
(
oc
);
CHECK_OBJ_NOTNULL
(
lru
,
LRU_MAGIC
);
Lck_Lock
(
&
lru
->
mtx
);
VTAILQ_INSERT_TAIL
(
&
lru
->
lru_head
,
oc
,
lru_list
);
oc
->
last_lru
=
VTIM_real
();
Lck_Unlock
(
&
lru
->
mtx
);
LRU_Add
(
oc
);
}
static
void
*
__match_proto__
(
objgetattr_f
)
...
...
@@ -675,45 +611,6 @@ sml_setattr(struct worker *wrk, struct objcore *oc, enum obj_attr attr,
return
(
retval
);
}
static
void
__match_proto__
(
objtouch_f
)
sml_touch
(
struct
worker
*
wrk
,
struct
objcore
*
oc
,
double
now
)
{
struct
lru
*
lru
;
CHECK_OBJ_NOTNULL
(
wrk
,
WORKER_MAGIC
);
CHECK_OBJ_NOTNULL
(
oc
,
OBJCORE_MAGIC
);
/*
* To avoid the exphdl->mtx becoming a hotspot, we only
* attempt to move objects if they have not been moved
* recently and if the lock is available. This optimization
* obviously leaves the LRU list imperfectly sorted.
*/
if
(
oc
->
flags
&
OC_F_INCOMPLETE
)
return
;
if
(
now
-
oc
->
last_lru
<
cache_param
->
lru_interval
)
return
;
lru
=
ObjGetLRU
(
oc
);
CHECK_OBJ_NOTNULL
(
lru
,
LRU_MAGIC
);
if
(
Lck_Trylock
(
&
lru
->
mtx
))
return
;
AN
(
oc
->
exp_flags
&
OC_EF_EXP
);
if
(
!
isnan
(
oc
->
last_lru
))
{
/* Can only touch it while it's actually on the LRU list */
VTAILQ_REMOVE
(
&
lru
->
lru_head
,
oc
,
lru_list
);
VTAILQ_INSERT_TAIL
(
&
lru
->
lru_head
,
oc
,
lru_list
);
VSC_C_main
->
n_lru_moved
++
;
oc
->
last_lru
=
now
;
}
Lck_Unlock
(
&
lru
->
mtx
);
}
const
struct
obj_methods
SML_methods
=
{
.
objfree
=
sml_objfree
,
.
objgetlru
=
sml_objgetlru
,
...
...
@@ -726,5 +623,5 @@ const struct obj_methods SML_methods = {
.
objslim
=
sml_slim
,
.
objgetattr
=
sml_getattr
,
.
objsetattr
=
sml_setattr
,
.
objtouch
=
sml_t
ouch
,
.
objtouch
=
LRU_T
ouch
,
};
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment