[chore] consolidate caching libraries (#704)

* add miekg/dns dependency

* set/validate accountDomain

* move finger to dereferencer

* totally break GetRemoteAccount

* start reworking finger func a bit

* start reworking getRemoteAccount a bit

* move mention parts to namestring

* rework webfingerget

* use util function to extract webfinger parts

* use accountDomain

* rework finger again, final form

* just a real nasty commit, the worst

* remove refresh from account

* use new ASRepToAccount signature

* fix incorrect debug call

* fix for new getRemoteAccount

* rework GetRemoteAccount

* start updating tests to remove repetition

* break a lot of tests
Move shared test logic into the testrig,
rather than having it scattered all over
the place. This allows us to just mock
the transport controller once, and have
all tests use it (unless they need not to
for some other reason).

* fix up tests to use main mock httpclient

* webfinger only if necessary

* cheeky linting with the lads

* update mentionName regex
recognize instance accounts

* don't finger instance accounts

* test webfinger part extraction

* increase default worker count to 4 per cpu

* don't repeat regex parsing

* final search for discovered accountDomain

* be more permissive in namestring lookup

* add more extraction tests

* simplify GetParseMentionFunc

* skip long search if local account

* fix broken test

* consolidate to all use same caching libraries

Signed-off-by: kim <grufwub@gmail.com>

* perform more caching in the database layer

Signed-off-by: kim <grufwub@gmail.com>

* remove ASNote cache

Signed-off-by: kim <grufwub@gmail.com>

* update cache library, improve db tracing hooks

Signed-off-by: kim <grufwub@gmail.com>

* return ErrNoEntries if no account status IDs found, small formatting changes

Signed-off-by: kim <grufwub@gmail.com>

* fix tests, thanks tobi!

Signed-off-by: kim <grufwub@gmail.com>

Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
This commit is contained in:
kim
2022-07-10 16:18:21 +01:00
committed by GitHub
parent 211266c072
commit 7cc40302a5
67 changed files with 3159 additions and 1244 deletions

View File

@@ -61,7 +61,7 @@ type Cache[Key comparable, Value any] interface {
// New returns a new initialized Cache.
func New[K comparable, V any]() Cache[K, V] {
c := TTLCache[K, V]{}
c := &TTLCache[K, V]{}
c.Init()
return &c
return c
}

View File

@@ -40,9 +40,9 @@ type LookupCache[OGKey, AltKey comparable, Value any] interface {
}
type lookupTTLCache[OK, AK comparable, V any] struct {
TTLCache[OK, V]
config LookupCfg[OK, AK, V]
lookup LookupMap[OK, AK]
TTLCache[OK, V]
}
// NewLookup returns a new initialized LookupCache.
@@ -55,14 +55,13 @@ func NewLookup[OK, AK comparable, V any](cfg LookupCfg[OK, AK, V]) LookupCache[O
case cfg.DeleteLookups == nil:
panic("cache: nil delete lookups function")
}
c := lookupTTLCache[OK, AK, V]{config: cfg}
c := &lookupTTLCache[OK, AK, V]{config: cfg}
c.TTLCache.Init()
c.lookup.lookup = make(map[string]map[AK]OK)
c.config.RegisterLookups(&c.lookup)
c.SetEvictionCallback(nil)
c.SetInvalidateCallback(nil)
c.lookup.initd = true
return &c
return c
}
func (c *lookupTTLCache[OK, AK, V]) SetEvictionCallback(hook Hook[OK, V]) {
@@ -158,16 +157,13 @@ func (c *lookupTTLCache[OK, AK, V]) InvalidateBy(lookup string, key AK) bool {
// keys to primary keys under supplied lookup identifiers.
// This is essentially a wrapper around map[string](map[K1]K2).
type LookupMap[OK comparable, AK comparable] struct {
initd bool
lookup map[string](map[AK]OK)
}
// RegisterLookup registers a lookup identifier in the LookupMap,
// note this can only be doing during the cfg.RegisterLookups() hook.
func (l *LookupMap[OK, AK]) RegisterLookup(id string) {
if l.initd {
panic("cache: cannot register lookup after initialization")
} else if _, ok := l.lookup[id]; ok {
if _, ok := l.lookup[id]; ok {
panic("cache: lookup mapping already exists for identifier")
}
l.lookup[id] = make(map[AK]OK, 100)

17
vendor/codeberg.org/gruf/go-cache/v2/scheduler.go generated vendored Normal file
View File

@@ -0,0 +1,17 @@
package cache
import (
"time"
"codeberg.org/gruf/go-sched"
)
// scheduler is the global cache runtime scheduler
// for handling regular cache evictions.
var scheduler = sched.NewScheduler(5)
// schedule will given sweep routine to the global scheduler, and start global scheduler.
func schedule(sweep func(time.Time), freq time.Duration) func() {
go scheduler.Start() // does nothing if already running
return scheduler.Schedule(sched.NewJob(sweep).Every(freq))
}

View File

@@ -1,11 +1,8 @@
package cache
import (
"context"
"sync"
"time"
"codeberg.org/gruf/go-runners"
)
// TTLCache is the underlying Cache implementation, providing both the base
@@ -16,11 +13,11 @@ type TTLCache[Key comparable, Value any] struct {
evict Hook[Key, Value] // the evict hook is called when an item is evicted from the cache, includes manual delete
invalid Hook[Key, Value] // the invalidate hook is called when an item's data in the cache is invalidated
ttl time.Duration // ttl is the item TTL
svc runners.Service // svc manages running of the cache eviction routine
stop func() // stop is the cancel function for the scheduled eviction routine
mu sync.Mutex // mu protects TTLCache for concurrent access
}
// Init performs Cache initialization, this MUST be called.
// Init performs Cache initialization. MUST be called.
func (c *TTLCache[K, V]) Init() {
c.cache = make(map[K](*entry[V]), 100)
c.evict = emptyHook[K, V]
@@ -28,68 +25,48 @@ func (c *TTLCache[K, V]) Init() {
c.ttl = time.Minute * 5
}
func (c *TTLCache[K, V]) Start(freq time.Duration) bool {
func (c *TTLCache[K, V]) Start(freq time.Duration) (ok bool) {
// Nothing to start
if freq <= 0 {
return false
}
// Track state of starting
done := make(chan struct{})
started := false
// Safely start
c.mu.Lock()
go func() {
ran := c.svc.Run(func(ctx context.Context) {
// Successfully started
started = true
close(done)
// start routine
c.run(ctx, freq)
})
// failed to start
if !ran {
close(done)
}
}()
<-done
return started
}
func (c *TTLCache[K, V]) Stop() bool {
return c.svc.Stop()
}
func (c *TTLCache[K, V]) run(ctx context.Context, freq time.Duration) {
t := time.NewTimer(freq)
for {
select {
// we got stopped
case <-ctx.Done():
if !t.Stop() {
<-t.C
}
return
// next tick
case <-t.C:
c.sweep()
t.Reset(freq)
}
if ok = c.stop == nil; ok {
// Not yet running, schedule us
c.stop = schedule(c.sweep, freq)
}
// Done with lock
c.mu.Unlock()
return
}
func (c *TTLCache[K, V]) Stop() (ok bool) {
// Safely stop
c.mu.Lock()
if ok = c.stop != nil; ok {
// We're running, cancel evicts
c.stop()
c.stop = nil
}
// Done with lock
c.mu.Unlock()
return
}
// sweep attempts to evict expired items (with callback!) from cache.
func (c *TTLCache[K, V]) sweep() {
func (c *TTLCache[K, V]) sweep(now time.Time) {
// Lock and defer unlock (in case of hook panic)
c.mu.Lock()
defer c.mu.Unlock()
// Fetch current time for TTL check
now := time.Now()
// Sweep the cache for old items!
for key, item := range c.cache {
if now.After(item.expiry) {
@@ -116,9 +93,9 @@ func (c *TTLCache[K, V]) SetEvictionCallback(hook Hook[K, V]) {
}
// Safely set evict hook
c.Lock()
c.mu.Lock()
c.evict = hook
c.Unlock()
c.mu.Unlock()
}
func (c *TTLCache[K, V]) SetInvalidateCallback(hook Hook[K, V]) {
@@ -128,14 +105,14 @@ func (c *TTLCache[K, V]) SetInvalidateCallback(hook Hook[K, V]) {
}
// Safely set invalidate hook
c.Lock()
c.mu.Lock()
c.invalid = hook
c.Unlock()
c.mu.Unlock()
}
func (c *TTLCache[K, V]) SetTTL(ttl time.Duration, update bool) {
// Safely update TTL
c.Lock()
c.mu.Lock()
diff := ttl - c.ttl
c.ttl = ttl
@@ -147,13 +124,13 @@ func (c *TTLCache[K, V]) SetTTL(ttl time.Duration, update bool) {
}
// We're done
c.Unlock()
c.mu.Unlock()
}
func (c *TTLCache[K, V]) Get(key K) (V, bool) {
c.Lock()
c.mu.Lock()
value, ok := c.GetUnsafe(key)
c.Unlock()
c.mu.Unlock()
return value, ok
}
@@ -169,9 +146,9 @@ func (c *TTLCache[K, V]) GetUnsafe(key K) (V, bool) {
}
func (c *TTLCache[K, V]) Put(key K, value V) bool {
c.Lock()
c.mu.Lock()
success := c.PutUnsafe(key, value)
c.Unlock()
c.mu.Unlock()
return success
}
@@ -192,8 +169,8 @@ func (c *TTLCache[K, V]) PutUnsafe(key K, value V) bool {
}
func (c *TTLCache[K, V]) Set(key K, value V) {
c.Lock()
defer c.Unlock() // defer in case of hook panic
c.mu.Lock()
defer c.mu.Unlock() // defer in case of hook panic
c.SetUnsafe(key, value)
}
@@ -215,9 +192,9 @@ func (c *TTLCache[K, V]) SetUnsafe(key K, value V) {
}
func (c *TTLCache[K, V]) CAS(key K, cmp V, swp V) bool {
c.Lock()
c.mu.Lock()
ok := c.CASUnsafe(key, cmp, swp)
c.Unlock()
c.mu.Unlock()
return ok
}
@@ -240,9 +217,9 @@ func (c *TTLCache[K, V]) CASUnsafe(key K, cmp V, swp V) bool {
}
func (c *TTLCache[K, V]) Swap(key K, swp V) V {
c.Lock()
c.mu.Lock()
old := c.SwapUnsafe(key, swp)
c.Unlock()
c.mu.Unlock()
return old
}
@@ -267,9 +244,9 @@ func (c *TTLCache[K, V]) SwapUnsafe(key K, swp V) V {
}
func (c *TTLCache[K, V]) Has(key K) bool {
c.Lock()
c.mu.Lock()
ok := c.HasUnsafe(key)
c.Unlock()
c.mu.Unlock()
return ok
}
@@ -280,8 +257,8 @@ func (c *TTLCache[K, V]) HasUnsafe(key K) bool {
}
func (c *TTLCache[K, V]) Invalidate(key K) bool {
c.Lock()
defer c.Unlock()
c.mu.Lock()
defer c.mu.Unlock()
return c.InvalidateUnsafe(key)
}
@@ -300,8 +277,8 @@ func (c *TTLCache[K, V]) InvalidateUnsafe(key K) bool {
}
func (c *TTLCache[K, V]) Clear() {
c.Lock()
defer c.Unlock()
c.mu.Lock()
defer c.mu.Unlock()
c.ClearUnsafe()
}
@@ -314,9 +291,9 @@ func (c *TTLCache[K, V]) ClearUnsafe() {
}
func (c *TTLCache[K, V]) Size() int {
c.Lock()
c.mu.Lock()
sz := c.SizeUnsafe()
c.Unlock()
c.mu.Unlock()
return sz
}