Cache plugin: replace ARC cache with SIEVE

This commit is contained in:
Frank Denis 2024-01-19 00:05:33 +01:00
parent 63f8d9b30d
commit f2484f5bd5
19 changed files with 434 additions and 845 deletions

View File

@ -6,8 +6,8 @@ import (
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
"github.com/miekg/dns"
sieve "github.com/opencoff/go-sieve"
)
const StaleResponseTTL = 30 * time.Second
@ -19,7 +19,7 @@ type CachedResponse struct {
type CachedResponses struct {
sync.RWMutex
cache *lru.ARCCache
cache *sieve.Sieve[[32]byte, CachedResponse]
}
var cachedResponses CachedResponses
@ -75,12 +75,11 @@ func (plugin *PluginCache) Eval(pluginsState *PluginsState, msg *dns.Msg) error
cachedResponses.RUnlock()
return nil
}
cachedAny, ok := cachedResponses.cache.Get(cacheKey)
cached, ok := cachedResponses.cache.Get(cacheKey)
if !ok {
cachedResponses.RUnlock()
return nil
}
cached := cachedAny.(CachedResponse)
expiration := cached.expiration
synth := cached.msg.Copy()
cachedResponses.RUnlock()
@ -151,8 +150,8 @@ func (plugin *PluginCacheResponse) Eval(pluginsState *PluginsState, msg *dns.Msg
cachedResponses.Lock()
if cachedResponses.cache == nil {
var err error
cachedResponses.cache, err = lru.NewARC(pluginsState.cacheSize)
if err != nil {
cachedResponses.cache = sieve.New[[32]byte, CachedResponse](pluginsState.cacheSize)
if cachedResponses.cache == nil {
cachedResponses.Unlock()
return err
}

3
go.mod
View File

@ -8,7 +8,6 @@ require (
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185
github.com/hashicorp/go-immutable-radix v1.3.1
github.com/hashicorp/golang-lru v1.0.2
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb
github.com/jedisct1/dlog v0.0.0-20230811132706-443b333ff1b3
github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e
@ -19,6 +18,7 @@ require (
github.com/k-sone/critbitgo v1.4.0
github.com/kardianos/service v1.2.2
github.com/miekg/dns v1.1.58
github.com/opencoff/go-sieve v0.2.1
github.com/powerman/check v1.7.0
github.com/quic-go/quic-go v0.41.0
golang.org/x/crypto v0.18.0
@ -33,6 +33,7 @@ require (
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect
github.com/hashicorp/go-syslog v1.0.0 // indirect
github.com/hashicorp/golang-lru v0.5.0 // indirect
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect

5
go.sum
View File

@ -32,9 +32,8 @@ github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwM
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM=
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@ -62,6 +61,8 @@ github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/opencoff/go-sieve v0.2.1 h1:5Pv6rd3zRquNmXcYHFndjVoolTgcv0ua2XTdMQ+gw0M=
github.com/opencoff/go-sieve v0.2.1/go.mod h1:CndxLpW4R8fDq04XfBSCOZ+qWwDCcxjfUJbr0GPqWHY=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=

View File

@ -1,23 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

View File

@ -1,30 +0,0 @@
linters:
enable:
- megacheck
- revive
- govet
- unconvert
- megacheck
- gas
- gocyclo
- dupl
- misspell
- unparam
- unused
- typecheck
- ineffassign
- stylecheck
- exportloopref
- gocritic
- nakedret
- gosimple
- prealloc
fast: false
disable-all: true
issues:
exclude-rules:
- path: _test\.go
linters:
- dupl
exclude-use-default: false

View File

@ -1,222 +0,0 @@
package lru
import (
"fmt"
"sync"
"github.com/hashicorp/golang-lru/simplelru"
)
const (
// Default2QRecentRatio is the ratio of the 2Q cache dedicated
// to recently added entries that have only been accessed once.
Default2QRecentRatio = 0.25
// Default2QGhostEntries is the default ratio of ghost
// entries kept to track entries recently evicted
Default2QGhostEntries = 0.50
)
// TwoQueueCache is a thread-safe fixed size 2Q cache.
// 2Q is an enhancement over the standard LRU cache
// in that it tracks both frequently and recently used
// entries separately. This avoids a burst in access to new
// entries from evicting frequently used entries. It adds some
// additional tracking overhead to the standard LRU cache, and is
// computationally about 2x the cost, and adds some metadata over
// head. The ARCCache is similar, but does not require setting any
// parameters.
type TwoQueueCache struct {
size int
recentSize int
recent simplelru.LRUCache
frequent simplelru.LRUCache
recentEvict simplelru.LRUCache
lock sync.RWMutex
}
// New2Q creates a new TwoQueueCache using the default
// values for the parameters.
func New2Q(size int) (*TwoQueueCache, error) {
return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
}
// New2QParams creates a new TwoQueueCache using the provided
// parameter values.
func New2QParams(size int, recentRatio, ghostRatio float64) (*TwoQueueCache, error) {
if size <= 0 {
return nil, fmt.Errorf("invalid size")
}
if recentRatio < 0.0 || recentRatio > 1.0 {
return nil, fmt.Errorf("invalid recent ratio")
}
if ghostRatio < 0.0 || ghostRatio > 1.0 {
return nil, fmt.Errorf("invalid ghost ratio")
}
// Determine the sub-sizes
recentSize := int(float64(size) * recentRatio)
evictSize := int(float64(size) * ghostRatio)
// Allocate the LRUs
recent, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
frequent, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
recentEvict, err := simplelru.NewLRU(evictSize, nil)
if err != nil {
return nil, err
}
// Initialize the cache
c := &TwoQueueCache{
size: size,
recentSize: recentSize,
recent: recent,
frequent: frequent,
recentEvict: recentEvict,
}
return c, nil
}
// Get looks up a key's value from the cache.
func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if this is a frequent value
if val, ok := c.frequent.Get(key); ok {
return val, ok
}
// If the value is contained in recent, then we
// promote it to frequent
if val, ok := c.recent.Peek(key); ok {
c.recent.Remove(key)
c.frequent.Add(key, val)
return val, ok
}
// No hit
return nil, false
}
// Add adds a value to the cache.
func (c *TwoQueueCache) Add(key, value interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if the value is frequently used already,
// and just update the value
if c.frequent.Contains(key) {
c.frequent.Add(key, value)
return
}
// Check if the value is recently used, and promote
// the value into the frequent list
if c.recent.Contains(key) {
c.recent.Remove(key)
c.frequent.Add(key, value)
return
}
// If the value was recently evicted, add it to the
// frequently used list
if c.recentEvict.Contains(key) {
c.ensureSpace(true)
c.recentEvict.Remove(key)
c.frequent.Add(key, value)
return
}
// Add to the recently seen list
c.ensureSpace(false)
c.recent.Add(key, value)
}
// ensureSpace is used to ensure we have space in the cache
func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
// If we have space, nothing to do
recentLen := c.recent.Len()
freqLen := c.frequent.Len()
if recentLen+freqLen < c.size {
return
}
// If the recent buffer is larger than
// the target, evict from there
if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
k, _, _ := c.recent.RemoveOldest()
c.recentEvict.Add(k, nil)
return
}
// Remove from the frequent list otherwise
c.frequent.RemoveOldest()
}
// Len returns the number of items in the cache.
func (c *TwoQueueCache) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.recent.Len() + c.frequent.Len()
}
// Keys returns a slice of the keys in the cache.
// The frequently used keys are first in the returned slice.
func (c *TwoQueueCache) Keys() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
k1 := c.frequent.Keys()
k2 := c.recent.Keys()
return append(k1, k2...)
}
// Remove removes the provided key from the cache.
func (c *TwoQueueCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
if c.frequent.Remove(key) {
return
}
if c.recent.Remove(key) {
return
}
if c.recentEvict.Remove(key) {
return
}
}
// Purge is used to completely clear the cache.
func (c *TwoQueueCache) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
c.recent.Purge()
c.frequent.Purge()
c.recentEvict.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *TwoQueueCache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.frequent.Contains(key) || c.recent.Contains(key)
}
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.frequent.Peek(key); ok {
return val, ok
}
return c.recent.Peek(key)
}

View File

@ -1,5 +1,3 @@
Copyright (c) 2014 HashiCorp, Inc.
Mozilla Public License, version 2.0
1. Definitions

View File

@ -1,7 +0,0 @@
golang-lru
==========
Please upgrade to github.com/hashicorp/golang-lru/v2 for all new code as v1 will
not be updated anymore. The v2 version supports generics and is faster; old code
can specify a specific tag, e.g. github.com/hashicorp/golang-lru/v1.0.2 for
backwards compatibility.

View File

@ -1,256 +0,0 @@
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/simplelru"
)
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
// ARC is an enhancement over the standard LRU cache in that tracks both
// frequency and recency of use. This avoids a burst in access to new
// entries from evicting the frequently used older entries. It adds some
// additional tracking overhead to a standard LRU cache, computationally
// it is roughly 2x the cost, and the extra memory overhead is linear
// with the size of the cache. ARC has been patented by IBM, but is
// similar to the TwoQueueCache (2Q) which requires setting parameters.
type ARCCache struct {
size int // Size is the total capacity of the cache
p int // P is the dynamic preference towards T1 or T2
t1 simplelru.LRUCache // T1 is the LRU for recently accessed items
b1 simplelru.LRUCache // B1 is the LRU for evictions from t1
t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items
b2 simplelru.LRUCache // B2 is the LRU for evictions from t2
lock sync.RWMutex
}
// NewARC creates an ARC of the given size
func NewARC(size int) (*ARCCache, error) {
// Create the sub LRUs
b1, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
b2, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
t1, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
t2, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
// Initialize the ARC
c := &ARCCache{
size: size,
p: 0,
t1: t1,
b1: b1,
t2: t2,
b2: b2,
}
return c, nil
}
// Get looks up a key's value from the cache.
func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// If the value is contained in T1 (recent), then
// promote it to T2 (frequent)
if val, ok := c.t1.Peek(key); ok {
c.t1.Remove(key)
c.t2.Add(key, val)
return val, ok
}
// Check if the value is contained in T2 (frequent)
if val, ok := c.t2.Get(key); ok {
return val, ok
}
// No hit
return nil, false
}
// Add adds a value to the cache.
func (c *ARCCache) Add(key, value interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if the value is contained in T1 (recent), and potentially
// promote it to frequent T2
if c.t1.Contains(key) {
c.t1.Remove(key)
c.t2.Add(key, value)
return
}
// Check if the value is already in T2 (frequent) and update it
if c.t2.Contains(key) {
c.t2.Add(key, value)
return
}
// Check if this value was recently evicted as part of the
// recently used list
if c.b1.Contains(key) {
// T1 set is too small, increase P appropriately
delta := 1
b1Len := c.b1.Len()
b2Len := c.b2.Len()
if b2Len > b1Len {
delta = b2Len / b1Len
}
if c.p+delta >= c.size {
c.p = c.size
} else {
c.p += delta
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(false)
}
// Remove from B1
c.b1.Remove(key)
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
// Check if this value was recently evicted as part of the
// frequently used list
if c.b2.Contains(key) {
// T2 set is too small, decrease P appropriately
delta := 1
b1Len := c.b1.Len()
b2Len := c.b2.Len()
if b1Len > b2Len {
delta = b1Len / b2Len
}
if delta >= c.p {
c.p = 0
} else {
c.p -= delta
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(true)
}
// Remove from B2
c.b2.Remove(key)
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(false)
}
// Keep the size of the ghost buffers trim
if c.b1.Len() > c.size-c.p {
c.b1.RemoveOldest()
}
if c.b2.Len() > c.p {
c.b2.RemoveOldest()
}
// Add to the recently seen list
c.t1.Add(key, value)
}
// replace is used to adaptively evict from either T1 or T2
// based on the current learned value of P
func (c *ARCCache) replace(b2ContainsKey bool) {
t1Len := c.t1.Len()
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
k, _, ok := c.t1.RemoveOldest()
if ok {
c.b1.Add(k, nil)
}
} else {
k, _, ok := c.t2.RemoveOldest()
if ok {
c.b2.Add(k, nil)
}
}
}
// Len returns the number of cached entries
func (c *ARCCache) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.t1.Len() + c.t2.Len()
}
// Keys returns all the cached keys
func (c *ARCCache) Keys() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
k1 := c.t1.Keys()
k2 := c.t2.Keys()
return append(k1, k2...)
}
// Remove is used to purge a key from the cache
func (c *ARCCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
if c.t1.Remove(key) {
return
}
if c.t2.Remove(key) {
return
}
if c.b1.Remove(key) {
return
}
if c.b2.Remove(key) {
return
}
}
// Purge is used to clear the cache
func (c *ARCCache) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
c.t1.Purge()
c.t2.Purge()
c.b1.Purge()
c.b2.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *ARCCache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.t1.Contains(key) || c.t2.Contains(key)
}
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.t1.Peek(key); ok {
return val, ok
}
return c.t2.Peek(key)
}

View File

@ -1,21 +0,0 @@
// Package lru provides three different LRU caches of varying sophistication.
//
// Cache is a simple LRU cache. It is based on the
// LRU implementation in groupcache:
// https://github.com/golang/groupcache/tree/master/lru
//
// TwoQueueCache tracks frequently used and recently used entries separately.
// This avoids a burst of accesses from taking out frequently used entries,
// at the cost of about 2x computational overhead and some extra bookkeeping.
//
// ARCCache is an adaptive replacement cache. It tracks recent evictions as
// well as recent usage in both the frequent and recent caches. Its
// computational overhead is comparable to TwoQueueCache, but the memory
// overhead is linear with the size of the cache.
//
// ARC has been patented by IBM, so do not use it if that is problematic for
// your program.
//
// All caches in this package take locks while operating, and are therefore
// thread-safe for consumers.
package lru

View File

@ -1,231 +0,0 @@
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/simplelru"
)
const (
// DefaultEvictedBufferSize defines the default buffer size to store evicted key/val
DefaultEvictedBufferSize = 16
)
// Cache is a thread-safe fixed size LRU cache.
type Cache struct {
lru *simplelru.LRU
evictedKeys, evictedVals []interface{}
onEvictedCB func(k, v interface{})
lock sync.RWMutex
}
// New creates an LRU of the given size.
func New(size int) (*Cache, error) {
return NewWithEvict(size, nil)
}
// NewWithEvict constructs a fixed size cache with the given eviction
// callback.
func NewWithEvict(size int, onEvicted func(key, value interface{})) (c *Cache, err error) {
// create a cache with default settings
c = &Cache{
onEvictedCB: onEvicted,
}
if onEvicted != nil {
c.initEvictBuffers()
onEvicted = c.onEvicted
}
c.lru, err = simplelru.NewLRU(size, onEvicted)
return
}
func (c *Cache) initEvictBuffers() {
c.evictedKeys = make([]interface{}, 0, DefaultEvictedBufferSize)
c.evictedVals = make([]interface{}, 0, DefaultEvictedBufferSize)
}
// onEvicted save evicted key/val and sent in externally registered callback
// outside of critical section
func (c *Cache) onEvicted(k, v interface{}) {
c.evictedKeys = append(c.evictedKeys, k)
c.evictedVals = append(c.evictedVals, v)
}
// Purge is used to completely clear the cache.
func (c *Cache) Purge() {
var ks, vs []interface{}
c.lock.Lock()
c.lru.Purge()
if c.onEvictedCB != nil && len(c.evictedKeys) > 0 {
ks, vs = c.evictedKeys, c.evictedVals
c.initEvictBuffers()
}
c.lock.Unlock()
// invoke callback outside of critical section
if c.onEvictedCB != nil {
for i := 0; i < len(ks); i++ {
c.onEvictedCB(ks[i], vs[i])
}
}
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *Cache) Add(key, value interface{}) (evicted bool) {
var k, v interface{}
c.lock.Lock()
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return
}
// Get looks up a key's value from the cache.
func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
value, ok = c.lru.Get(key)
c.lock.Unlock()
return value, ok
}
// Contains checks if a key is in the cache, without updating the
// recent-ness or deleting it for being stale.
func (c *Cache) Contains(key interface{}) bool {
c.lock.RLock()
containKey := c.lru.Contains(key)
c.lock.RUnlock()
return containKey
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
value, ok = c.lru.Peek(key)
c.lock.RUnlock()
return value, ok
}
// ContainsOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
var k, v interface{}
c.lock.Lock()
if c.lru.Contains(key) {
c.lock.Unlock()
return true, false
}
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return false, evicted
}
// PeekOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) {
var k, v interface{}
c.lock.Lock()
previous, ok = c.lru.Peek(key)
if ok {
c.lock.Unlock()
return previous, true, false
}
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return nil, false, evicted
}
// Remove removes the provided key from the cache.
func (c *Cache) Remove(key interface{}) (present bool) {
var k, v interface{}
c.lock.Lock()
present = c.lru.Remove(key)
if c.onEvictedCB != nil && present {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && present {
c.onEvictedCB(k, v)
}
return
}
// Resize changes the cache size.
func (c *Cache) Resize(size int) (evicted int) {
var ks, vs []interface{}
c.lock.Lock()
evicted = c.lru.Resize(size)
if c.onEvictedCB != nil && evicted > 0 {
ks, vs = c.evictedKeys, c.evictedVals
c.initEvictBuffers()
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted > 0 {
for i := 0; i < len(ks); i++ {
c.onEvictedCB(ks[i], vs[i])
}
}
return evicted
}
// RemoveOldest removes the oldest item from the cache.
func (c *Cache) RemoveOldest() (key, value interface{}, ok bool) {
var k, v interface{}
c.lock.Lock()
key, value, ok = c.lru.RemoveOldest()
if c.onEvictedCB != nil && ok {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && ok {
c.onEvictedCB(k, v)
}
return
}
// GetOldest returns the oldest entry
func (c *Cache) GetOldest() (key, value interface{}, ok bool) {
c.lock.RLock()
key, value, ok = c.lru.GetOldest()
c.lock.RUnlock()
return
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *Cache) Keys() []interface{} {
c.lock.RLock()
keys := c.lru.Keys()
c.lock.RUnlock()
return keys
}
// Len returns the number of items in the cache.
func (c *Cache) Len() int {
c.lock.RLock()
length := c.lru.Len()
c.lock.RUnlock()
return length
}

View File

@ -25,7 +25,7 @@ type entry struct {
// NewLRU constructs an LRU of the given size
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
if size <= 0 {
return nil, errors.New("must provide a positive size")
return nil, errors.New("Must provide a positive size")
}
c := &LRU{
size: size,
@ -73,9 +73,6 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) {
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
if ent.Value.(*entry) == nil {
return nil, false
}
return ent.Value.(*entry).value, true
}
return
@ -109,7 +106,7 @@ func (c *LRU) Remove(key interface{}) (present bool) {
}
// RemoveOldest removes the oldest item from the cache.
func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) {
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
ent := c.evictList.Back()
if ent != nil {
c.removeElement(ent)
@ -120,7 +117,7 @@ func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) {
}
// GetOldest returns the oldest entry
func (c *LRU) GetOldest() (key, value interface{}, ok bool) {
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
ent := c.evictList.Back()
if ent != nil {
kv := ent.Value.(*entry)
@ -145,19 +142,6 @@ func (c *LRU) Len() int {
return c.evictList.Len()
}
// Resize changes the cache size.
func (c *LRU) Resize(size int) (evicted int) {
diff := c.Len() - size
if diff < 0 {
diff = 0
}
for i := 0; i < diff; i++ {
c.removeOldest()
}
c.size = size
return diff
}
// removeOldest removes the oldest item from the cache.
func (c *LRU) removeOldest() {
ent := c.evictList.Back()

View File

@ -1,4 +1,3 @@
// Package simplelru provides simple LRU implementation based on build-in container/list.
package simplelru
// LRUCache is the interface for simple LRU cache.
@ -11,7 +10,7 @@ type LRUCache interface {
// updates the "recently used"-ness of the key. #value, isFound
Get(key interface{}) (value interface{}, ok bool)
// Checks if a key exists in cache without updating the recent-ness.
// Check if a key exsists in cache without updating the recent-ness.
Contains(key interface{}) (ok bool)
// Returns key's value without updating the "recently used"-ness of the key.
@ -32,9 +31,6 @@ type LRUCache interface {
// Returns the number of items in the cache.
Len() int
// Clears all cache entries.
// Clear all cache entries
Purge()
// Resizes cache, returning number evicted
Resize(int) int
}

View File

@ -1,16 +0,0 @@
package lru
import (
"crypto/rand"
"math"
"math/big"
"testing"
)
func getRand(tb testing.TB) int64 {
out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
tb.Fatal(err)
}
return out.Int64()
}

47
vendor/github.com/opencoff/go-sieve/.gitignore generated vendored Normal file
View File

@ -0,0 +1,47 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
bin/*
.*.sw?
.idea
logs/*
# gg ignores
vendor/src/*
vendor/pkg/*
servers.iml
*.DS_Store
# vagrant ignores
tools/vagrant/.vagrant
tools/vagrant/adsrv-conf/.frontend
tools/vagrant/adsrv-conf/.bidder
tools/vagrant/adsrv-conf/.transcoder
tools/vagrant/redis-cluster-conf/7777/nodes.conf
tools/vagrant/redis-cluster-conf/7778/nodes.conf
tools/vagrant/redis-cluster-conf/7779/nodes.conf
*.aof
*.rdb
*.deb

24
vendor/github.com/opencoff/go-sieve/LICENSE generated vendored Normal file
View File

@ -0,0 +1,24 @@
BSD 2-Clause License
Copyright (c) 2024, Sudhi Herle
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

9
vendor/github.com/opencoff/go-sieve/README.md generated vendored Normal file
View File

@ -0,0 +1,9 @@
# go-sieve - SIEVE is simpler than LRU
## What is it?
`go-sieve` is golang implementation of the [SIEVE](https://yazhuozhang.com/assets/pdf/nsdi24-sieve.pdf)
cache eviction algorithm.
This implementation closely follows the paper's pseudo-code - but
uses golang generics to provide an ergonomic interface.

334
vendor/github.com/opencoff/go-sieve/sieve.go generated vendored Normal file
View File

@ -0,0 +1,334 @@
// sieve.go - SIEVE - a simple and efficient cache
//
// (c) 2024 Sudhi Herle <sudhi@herle.net>
//
// Copyright 2024- Sudhi Herle <sw-at-herle-dot-net>
// License: BSD-2-Clause
//
// If you need a commercial license for this work, please contact
// the author.
//
// This software does not come with any express or implied
// warranty; it is provided "as is". No claim is made to its
// suitability for any purpose.
// This is golang implementation of the SIEVE cache eviction algorithm
// The original paper is:
// https://yazhuozhang.com/assets/pdf/nsdi24-sieve.pdf
//
// This implementation closely follows the paper - but uses golang generics
// for an ergonomic interface.
// Package sieve implements the SIEVE cache eviction algorithm.
// SIEVE stands in contrast to other eviction algorithms like LRU, 2Q, ARC
// with its simplicity. The original paper is in:
// https://yazhuozhang.com/assets/pdf/nsdi24-sieve.pdf
//
// SIEVE is built on a FIFO queue - with an extra pointer (called "hand") in
// the paper. This "hand" plays a crucial role in determining who to evict
// next.
package sieve
import (
"fmt"
"strings"
"sync"
"sync/atomic"
)
// node contains the <key, val> tuple as a node in a linked list.
type node[K comparable, V any] struct {
sync.Mutex
key K
val V
visited atomic.Bool
next *node[K, V]
prev *node[K, V]
}
// Sieve represents a cache mapping the key of type 'K' with
// a value of type 'V'. The type 'K' must implement the
// comparable trait. An instance of Sieve has a fixed max capacity;
// new additions to the cache beyond the capacity will cause cache
// eviction of other entries - as determined by the SIEVE algorithm.
type Sieve[K comparable, V any] struct {
mu sync.Mutex
cache *syncMap[K, *node[K, V]]
head *node[K, V]
tail *node[K, V]
hand *node[K, V]
size int
capacity int
pool *syncPool[node[K, V]]
}
// New creates a new cache of size 'capacity' mapping key 'K' to value 'V'
func New[K comparable, V any](capacity int) *Sieve[K, V] {
s := &Sieve[K, V]{
cache: newSyncMap[K, *node[K, V]](),
capacity: capacity,
pool: newSyncPool[node[K, V]](),
}
return s
}
// Get fetches the value for a given key in the cache.
// It returns true if the key is in the cache, false otherwise.
// The zero value for 'V' is returned when key is not in the cache.
func (s *Sieve[K, V]) Get(key K) (V, bool) {
if v, ok := s.cache.Get(key); ok {
v.visited.Store(true)
return v.val, true
}
var x V
return x, false
}
// Add adds a new element to the cache or overwrite one if it exists
// Return true if we replaced, false otherwise
func (s *Sieve[K, V]) Add(key K, val V) bool {
if v, ok := s.cache.Get(key); ok {
v.visited.Store(true)
v.Lock()
v.val = val
v.Unlock()
return true
}
s.mu.Lock()
s.add(key, val)
s.mu.Unlock()
return false
}
// Probe adds <key, val> if not present in the cache.
// Returns:
//
// <cached-val, true> when key is present in the cache
// <val, false> when key is not present in the cache
func (s *Sieve[K, V]) Probe(key K, val V) (V, bool) {
if v, ok := s.cache.Get(key); ok {
v.visited.Store(true)
return v.val, true
}
s.mu.Lock()
s.add(key, val)
s.mu.Unlock()
return val, false
}
// Delete deletes the named key from the cache
// It returns true if the item was in the cache and false otherwise
func (s *Sieve[K, V]) Delete(key K) bool {
if v, ok := s.cache.Del(key); ok {
s.mu.Lock()
s.remove(v)
s.mu.Unlock()
return true
}
return false
}
// Purge resets the cache
func (s *Sieve[K, V]) Purge() {
s.mu.Lock()
s.cache = newSyncMap[K, *node[K, V]]()
s.head = nil
s.tail = nil
s.mu.Unlock()
}
// Len returns the current cache utilization
func (s *Sieve[K, V]) Len() int {
return s.size
}
// Cap returns the max cache capacity
func (s *Sieve[K, V]) Cap() int {
return s.capacity
}
// String returns a string description of the sieve cache
func (s *Sieve[K, V]) String() string {
s.mu.Lock()
m := s.desc()
s.mu.Unlock()
return m
}
// Dump dumps all the cache contents as a newline delimited
// string.
func (s *Sieve[K, V]) Dump() string {
var b strings.Builder
s.mu.Lock()
b.WriteString(s.desc())
b.WriteRune('\n')
for n := s.head; n != nil; n = n.next {
h := " "
if n == s.hand {
h = ">>"
}
b.WriteString(fmt.Sprintf("%svisited=%v, key=%v, val=%v\n", h, n.visited.Load(), n.key, n.val))
}
s.mu.Unlock()
return b.String()
}
// -- internal methods --
// add a new tuple to the cache and evict as necessary
// caller must hold lock.
func (s *Sieve[K, V]) add(key K, val V) {
// cache miss; we evict and fnd a new node
if s.size == s.capacity {
s.evict()
}
n := s.newNode(key, val)
// Eviction is guaranteed to remove one node; so this should never happen.
if n == nil {
msg := fmt.Sprintf("%T: add <%v>: objpool empty after eviction", s, key)
panic(msg)
}
s.cache.Put(key, n)
// insert at the head of the list
n.next = s.head
n.prev = nil
if s.head != nil {
s.head.prev = n
}
s.head = n
if s.tail == nil {
s.tail = n
}
s.size += 1
}
// evict an item from the cache.
// NB: Caller must hold the lock
func (s *Sieve[K, V]) evict() {
hand := s.hand
if hand == nil {
hand = s.tail
}
for hand != nil {
if !hand.visited.Load() {
s.cache.Del(hand.key)
s.remove(hand)
s.hand = hand.prev
return
}
hand.visited.Store(false)
hand = hand.prev
// wrap around and start again
if hand == nil {
hand = s.tail
}
}
s.hand = hand
}
func (s *Sieve[K, V]) remove(n *node[K, V]) {
s.size -= 1
// remove node from list
if n.prev != nil {
n.prev.next = n.next
} else {
s.head = n.next
}
if n.next != nil {
n.next.prev = n.prev
} else {
s.tail = n.prev
}
s.pool.Put(n)
}
func (s *Sieve[K, V]) newNode(key K, val V) *node[K, V] {
n := s.pool.Get()
n.key, n.val = key, val
n.next, n.prev = nil, nil
n.visited.Store(false)
return n
}
// desc describes the properties of the sieve
func (s *Sieve[K, V]) desc() string {
m := fmt.Sprintf("cache<%T>: size %d, cap %d, head=%p, tail=%p, hand=%p",
s, s.size, s.capacity, s.head, s.tail, s.hand)
return m
}
// Generic sync.Pool
type syncPool[T any] struct {
pool sync.Pool
}
func newSyncPool[T any]() *syncPool[T] {
p := &syncPool[T]{
pool: sync.Pool{
New: func() any { return new(T) },
},
}
return p
}
func (s *syncPool[T]) Get() *T {
p := s.pool.Get()
return p.(*T)
}
func (s *syncPool[T]) Put(n *T) {
s.pool.Put(n)
}
// generic sync.Map
type syncMap[K comparable, V any] struct {
m sync.Map
}
func newSyncMap[K comparable, V any]() *syncMap[K, V] {
m := syncMap[K, V]{}
return &m
}
func (m *syncMap[K, V]) Get(key K) (V, bool) {
v, ok := m.m.Load(key)
if ok {
return v.(V), true
}
var z V
return z, false
}
func (m *syncMap[K, V]) Put(key K, val V) {
m.m.Store(key, val)
}
func (m *syncMap[K, V]) Del(key K) (V, bool) {
x, ok := m.m.LoadAndDelete(key)
if ok {
return x.(V), true
}
var z V
return z, false
}

8
vendor/modules.txt vendored
View File

@ -34,9 +34,8 @@ github.com/hashicorp/go-immutable-radix
# github.com/hashicorp/go-syslog v1.0.0
## explicit
github.com/hashicorp/go-syslog
# github.com/hashicorp/golang-lru v1.0.2
## explicit; go 1.12
github.com/hashicorp/golang-lru
# github.com/hashicorp/golang-lru v0.5.0
## explicit
github.com/hashicorp/golang-lru/simplelru
# github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb
## explicit; go 1.12
@ -87,6 +86,9 @@ github.com/onsi/ginkgo/v2/internal/interrupt_handler
github.com/onsi/ginkgo/v2/internal/parallel_support
github.com/onsi/ginkgo/v2/reporters
github.com/onsi/ginkgo/v2/types
# github.com/opencoff/go-sieve v0.2.1
## explicit; go 1.21.1
github.com/opencoff/go-sieve
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors