[chore] consolidate caching libraries (#704)

* add miekg/dns dependency

* set/validate accountDomain

* move finger to dereferencer

* totally break GetRemoteAccount

* start reworking finger func a bit

* start reworking getRemoteAccount a bit

* move mention parts to namestring

* rework webfingerget

* use util function to extract webfinger parts

* use accountDomain

* rework finger again, final form

* just a real nasty commit, the worst

* remove refresh from account

* use new ASRepToAccount signature

* fix incorrect debug call

* fix for new getRemoteAccount

* rework GetRemoteAccount

* start updating tests to remove repetition

* break a lot of tests
Move shared test logic into the testrig,
rather than having it scattered all over
the place. This allows us to just mock
the transport controller once, and have
all tests use it (unless they need not to
for some other reason).

* fix up tests to use main mock httpclient

* webfinger only if necessary

* cheeky linting with the lads

* update mentionName regex
recognize instance accounts

* don't finger instance accounts

* test webfinger part extraction

* increase default worker count to 4 per cpu

* don't repeat regex parsing

* final search for discovered accountDomain

* be more permissive in namestring lookup

* add more extraction tests

* simplify GetParseMentionFunc

* skip long search if local account

* fix broken test

* consolidate to all use same caching libraries

Signed-off-by: kim <grufwub@gmail.com>

* perform more caching in the database layer

Signed-off-by: kim <grufwub@gmail.com>

* remove ASNote cache

Signed-off-by: kim <grufwub@gmail.com>

* update cache library, improve db tracing hooks

Signed-off-by: kim <grufwub@gmail.com>

* return ErrNoEntries if no account status IDs found, small formatting changes

Signed-off-by: kim <grufwub@gmail.com>

* fix tests, thanks tobi!

Signed-off-by: kim <grufwub@gmail.com>

Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
This commit is contained in:
kim
2022-07-10 16:18:21 +01:00
committed by GitHub
parent 211266c072
commit 7cc40302a5
67 changed files with 3159 additions and 1244 deletions

View File

@ -1,18 +0,0 @@
language: go
go:
- "1.14"
- "1.13"
git:
depth: 1
install:
- go install -race std
- go install golang.org/x/tools/cmd/cover
- go install golang.org/x/lint/golint
- export PATH=$HOME/gopath/bin:$PATH
script:
- golint .
- go test -cover -race -count=1 -timeout=30s -run .
- cd bench; go test -run=Bench.* -bench=. -benchmem

View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2018 Rene Kroon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,71 +0,0 @@
## TTLCache - an in-memory cache with expiration
TTLCache is a simple key/value cache in golang with the following functions:
1. Thread-safe
2. Individual expiring time or global expiring time, you can choose
3. Auto-Extending expiration on `Get` -or- DNS style TTL, see `SkipTtlExtensionOnHit(bool)`
4. Fast and memory efficient
5. Can trigger callback on key expiration
6. Cleanup resources by calling `Close()` at end of lifecycle.
Note (issue #25): by default, due to historic reasons, the TTL will be reset on each cache hit and you need to explicitly configure the cache to use a TTL that will not get extended.
[![Build Status](https://travis-ci.org/ReneKroon/ttlcache.svg?branch=master)](https://travis-ci.org/ReneKroon/ttlcache)
#### Usage
```go
import (
"time"
"fmt"
"github.com/ReneKroon/ttlcache"
)
func main () {
newItemCallback := func(key string, value interface{}) {
fmt.Printf("New key(%s) added\n", key)
}
checkExpirationCallback := func(key string, value interface{}) bool {
if key == "key1" {
// if the key equals "key1", the value
// will not be allowed to expire
return false
}
// all other values are allowed to expire
return true
}
expirationCallback := func(key string, value interface{}) {
fmt.Printf("This key(%s) has expired\n", key)
}
cache := ttlcache.NewCache()
defer cache.Close()
cache.SetTTL(time.Duration(10 * time.Second))
cache.SetExpirationCallback(expirationCallback)
cache.Set("key", "value")
cache.SetWithTTL("keyWithTTL", "value", 10 * time.Second)
value, exists := cache.Get("key")
count := cache.Count()
result := cache.Remove("key")
}
```
#### TTLCache - Some design considerations
1. The complexity of the current cache is already quite high. Therefore i will not add 'convenience' features like an interface to supply a function to get missing keys.
2. The locking should be done only in the functions of the Cache struct. Else data races can occur or recursive locks are needed, which are both unwanted.
3. I prefer correct functionality over fast tests. It's ok for new tests to take seconds to proof something.
#### Original Project
TTLCache was forked from [wunderlist/ttlcache](https://github.com/wunderlist/ttlcache) to add extra functions not avaiable in the original scope.
The main differences are:
1. A item can store any kind of object, previously, only strings could be saved
2. Optionally, you can add callbacks too: check if a value should expire, be notified if a value expires, and be notified when new values are added to the cache
3. The expiration can be either global or per item
4. Can exist items without expiration time
5. Expirations and callbacks are realtime. Don't have a pooling time to check anymore, now it's done with a heap.

View File

@ -1,307 +0,0 @@
package ttlcache
import (
"sync"
"time"
)
// CheckExpireCallback is used as a callback for an external check on item expiration
type checkExpireCallback func(key string, value interface{}) bool
// ExpireCallback is used as a callback on item expiration or when notifying of an item new to the cache
type expireCallback func(key string, value interface{})
// Cache is a synchronized map of items that can auto-expire once stale
type Cache struct {
mutex sync.Mutex
ttl time.Duration
items map[string]*item
expireCallback expireCallback
checkExpireCallback checkExpireCallback
newItemCallback expireCallback
priorityQueue *priorityQueue
expirationNotification chan bool
expirationTime time.Time
skipTTLExtension bool
shutdownSignal chan (chan struct{})
isShutDown bool
}
func (cache *Cache) getItem(key string) (*item, bool, bool) {
item, exists := cache.items[key]
if !exists || item.expired() {
return nil, false, false
}
if item.ttl >= 0 && (item.ttl > 0 || cache.ttl > 0) {
if cache.ttl > 0 && item.ttl == 0 {
item.ttl = cache.ttl
}
if !cache.skipTTLExtension {
item.touch()
}
cache.priorityQueue.update(item)
}
expirationNotification := false
if cache.expirationTime.After(time.Now().Add(item.ttl)) {
expirationNotification = true
}
return item, exists, expirationNotification
}
func (cache *Cache) startExpirationProcessing() {
timer := time.NewTimer(time.Hour)
for {
var sleepTime time.Duration
cache.mutex.Lock()
if cache.priorityQueue.Len() > 0 {
sleepTime = time.Until(cache.priorityQueue.items[0].expireAt)
if sleepTime < 0 && cache.priorityQueue.items[0].expireAt.IsZero() {
sleepTime = time.Hour
} else if sleepTime < 0 {
sleepTime = time.Microsecond
}
if cache.ttl > 0 {
sleepTime = min(sleepTime, cache.ttl)
}
} else if cache.ttl > 0 {
sleepTime = cache.ttl
} else {
sleepTime = time.Hour
}
cache.expirationTime = time.Now().Add(sleepTime)
cache.mutex.Unlock()
timer.Reset(sleepTime)
select {
case shutdownFeedback := <-cache.shutdownSignal:
timer.Stop()
cache.mutex.Lock()
if cache.priorityQueue.Len() > 0 {
cache.evictjob()
}
cache.mutex.Unlock()
shutdownFeedback <- struct{}{}
return
case <-timer.C:
timer.Stop()
cache.mutex.Lock()
if cache.priorityQueue.Len() == 0 {
cache.mutex.Unlock()
continue
}
cache.cleanjob()
cache.mutex.Unlock()
case <-cache.expirationNotification:
timer.Stop()
continue
}
}
}
func (cache *Cache) evictjob() {
// index will only be advanced if the current entry will not be evicted
i := 0
for item := cache.priorityQueue.items[i]; ; item = cache.priorityQueue.items[i] {
cache.priorityQueue.remove(item)
delete(cache.items, item.key)
if cache.expireCallback != nil {
go cache.expireCallback(item.key, item.data)
}
if cache.priorityQueue.Len() == 0 {
return
}
}
}
func (cache *Cache) cleanjob() {
// index will only be advanced if the current entry will not be evicted
i := 0
for item := cache.priorityQueue.items[i]; item.expired(); item = cache.priorityQueue.items[i] {
if cache.checkExpireCallback != nil {
if !cache.checkExpireCallback(item.key, item.data) {
item.touch()
cache.priorityQueue.update(item)
i++
if i == cache.priorityQueue.Len() {
break
}
continue
}
}
cache.priorityQueue.remove(item)
delete(cache.items, item.key)
if cache.expireCallback != nil {
go cache.expireCallback(item.key, item.data)
}
if cache.priorityQueue.Len() == 0 {
return
}
}
}
// Close calls Purge, and then stops the goroutine that does ttl checking, for a clean shutdown.
// The cache is no longer cleaning up after the first call to Close, repeated calls are safe though.
func (cache *Cache) Close() {
cache.mutex.Lock()
if !cache.isShutDown {
cache.isShutDown = true
cache.mutex.Unlock()
feedback := make(chan struct{})
cache.shutdownSignal <- feedback
<-feedback
close(cache.shutdownSignal)
} else {
cache.mutex.Unlock()
}
cache.Purge()
}
// Set is a thread-safe way to add new items to the map
func (cache *Cache) Set(key string, data interface{}) {
cache.SetWithTTL(key, data, ItemExpireWithGlobalTTL)
}
// SetWithTTL is a thread-safe way to add new items to the map with individual ttl
func (cache *Cache) SetWithTTL(key string, data interface{}, ttl time.Duration) {
cache.mutex.Lock()
item, exists, _ := cache.getItem(key)
if exists {
item.data = data
item.ttl = ttl
} else {
item = newItem(key, data, ttl)
cache.items[key] = item
}
if item.ttl >= 0 && (item.ttl > 0 || cache.ttl > 0) {
if cache.ttl > 0 && item.ttl == 0 {
item.ttl = cache.ttl
}
item.touch()
}
if exists {
cache.priorityQueue.update(item)
} else {
cache.priorityQueue.push(item)
}
cache.mutex.Unlock()
if !exists && cache.newItemCallback != nil {
cache.newItemCallback(key, data)
}
cache.expirationNotification <- true
}
// Get is a thread-safe way to lookup items
// Every lookup, also touches the item, hence extending it's life
func (cache *Cache) Get(key string) (interface{}, bool) {
cache.mutex.Lock()
item, exists, triggerExpirationNotification := cache.getItem(key)
var dataToReturn interface{}
if exists {
dataToReturn = item.data
}
cache.mutex.Unlock()
if triggerExpirationNotification {
cache.expirationNotification <- true
}
return dataToReturn, exists
}
func (cache *Cache) Remove(key string) bool {
cache.mutex.Lock()
object, exists := cache.items[key]
if !exists {
cache.mutex.Unlock()
return false
}
delete(cache.items, object.key)
cache.priorityQueue.remove(object)
cache.mutex.Unlock()
return true
}
// Count returns the number of items in the cache
func (cache *Cache) Count() int {
cache.mutex.Lock()
length := len(cache.items)
cache.mutex.Unlock()
return length
}
func (cache *Cache) SetTTL(ttl time.Duration) {
cache.mutex.Lock()
cache.ttl = ttl
cache.mutex.Unlock()
cache.expirationNotification <- true
}
// SetExpirationCallback sets a callback that will be called when an item expires
func (cache *Cache) SetExpirationCallback(callback expireCallback) {
cache.expireCallback = callback
}
// SetCheckExpirationCallback sets a callback that will be called when an item is about to expire
// in order to allow external code to decide whether the item expires or remains for another TTL cycle
func (cache *Cache) SetCheckExpirationCallback(callback checkExpireCallback) {
cache.checkExpireCallback = callback
}
// SetNewItemCallback sets a callback that will be called when a new item is added to the cache
func (cache *Cache) SetNewItemCallback(callback expireCallback) {
cache.newItemCallback = callback
}
// SkipTtlExtensionOnHit allows the user to change the cache behaviour. When this flag is set to true it will
// no longer extend TTL of items when they are retrieved using Get, or when their expiration condition is evaluated
// using SetCheckExpirationCallback.
func (cache *Cache) SkipTtlExtensionOnHit(value bool) {
cache.skipTTLExtension = value
}
// Purge will remove all entries
func (cache *Cache) Purge() {
cache.mutex.Lock()
cache.items = make(map[string]*item)
cache.priorityQueue = newPriorityQueue()
cache.mutex.Unlock()
}
// NewCache is a helper to create instance of the Cache struct
func NewCache() *Cache {
shutdownChan := make(chan chan struct{})
cache := &Cache{
items: make(map[string]*item),
priorityQueue: newPriorityQueue(),
expirationNotification: make(chan bool),
expirationTime: time.Now(),
shutdownSignal: shutdownChan,
isShutDown: false,
}
go cache.startExpirationProcessing()
return cache
}
func min(duration time.Duration, second time.Duration) time.Duration {
if duration < second {
return duration
}
return second
}

View File

@ -1,46 +0,0 @@
package ttlcache
import (
"time"
)
const (
// ItemNotExpire Will avoid the item being expired by TTL, but can still be exired by callback etc.
ItemNotExpire time.Duration = -1
// ItemExpireWithGlobalTTL will use the global TTL when set.
ItemExpireWithGlobalTTL time.Duration = 0
)
func newItem(key string, data interface{}, ttl time.Duration) *item {
item := &item{
data: data,
ttl: ttl,
key: key,
}
// since nobody is aware yet of this item, it's safe to touch without lock here
item.touch()
return item
}
type item struct {
key string
data interface{}
ttl time.Duration
expireAt time.Time
queueIndex int
}
// Reset the item expiration time
func (item *item) touch() {
if item.ttl > 0 {
item.expireAt = time.Now().Add(item.ttl)
}
}
// Verify if the item is expired
func (item *item) expired() bool {
if item.ttl <= 0 {
return false
}
return item.expireAt.Before(time.Now())
}

View File

@ -1,71 +0,0 @@
package ttlcache
import (
"container/heap"
)
func newPriorityQueue() *priorityQueue {
queue := &priorityQueue{}
heap.Init(queue)
return queue
}
type priorityQueue struct {
items []*item
}
func (pq *priorityQueue) update(item *item) {
heap.Fix(pq, item.queueIndex)
}
func (pq *priorityQueue) push(item *item) {
heap.Push(pq, item)
}
func (pq *priorityQueue) pop() *item {
if pq.Len() == 0 {
return nil
}
return heap.Pop(pq).(*item)
}
func (pq *priorityQueue) remove(item *item) {
heap.Remove(pq, item.queueIndex)
}
func (pq priorityQueue) Len() int {
length := len(pq.items)
return length
}
// Less will consider items with time.Time default value (epoch start) as more than set items.
func (pq priorityQueue) Less(i, j int) bool {
if pq.items[i].expireAt.IsZero() {
return false
}
if pq.items[j].expireAt.IsZero() {
return true
}
return pq.items[i].expireAt.Before(pq.items[j].expireAt)
}
func (pq priorityQueue) Swap(i, j int) {
pq.items[i], pq.items[j] = pq.items[j], pq.items[i]
pq.items[i].queueIndex = i
pq.items[j].queueIndex = j
}
func (pq *priorityQueue) Push(x interface{}) {
item := x.(*item)
item.queueIndex = len(pq.items)
pq.items = append(pq.items, item)
}
func (pq *priorityQueue) Pop() interface{} {
old := pq.items
n := len(old)
item := old[n-1]
item.queueIndex = -1
pq.items = old[0 : n-1]
return item
}