mirror of
https://github.com/superseriousbusiness/gotosocial
synced 2025-06-05 21:59:39 +02:00
[performance] update remaining worker pools to use queues (#2865)
* start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
This commit is contained in:
61
vendor/codeberg.org/gruf/go-structr/cache.go
generated
vendored
61
vendor/codeberg.org/gruf/go-structr/cache.go
generated
vendored
@ -150,10 +150,10 @@ func (c *Cache[T]) Get(index *Index, keys ...Key) []T {
|
||||
|
||||
// Acquire lock.
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
// Check cache init.
|
||||
if c.copy == nil {
|
||||
c.mutex.Unlock()
|
||||
panic("not initialized")
|
||||
}
|
||||
|
||||
@ -173,9 +173,6 @@ func (c *Cache[T]) Get(index *Index, keys ...Key) []T {
|
||||
})
|
||||
}
|
||||
|
||||
// Done with lock.
|
||||
c.mutex.Unlock()
|
||||
|
||||
return values
|
||||
}
|
||||
|
||||
@ -185,12 +182,12 @@ func (c *Cache[T]) Put(values ...T) {
|
||||
// Acquire lock.
|
||||
c.mutex.Lock()
|
||||
|
||||
// Get func ptrs.
|
||||
invalid := c.invalid
|
||||
// Wrap unlock to only do once.
|
||||
unlock := once(c.mutex.Unlock)
|
||||
defer unlock()
|
||||
|
||||
// Check cache init.
|
||||
if c.copy == nil {
|
||||
c.mutex.Unlock()
|
||||
panic("not initialized")
|
||||
}
|
||||
|
||||
@ -203,8 +200,12 @@ func (c *Cache[T]) Put(values ...T) {
|
||||
)
|
||||
}
|
||||
|
||||
// Done with lock.
|
||||
c.mutex.Unlock()
|
||||
// Get func ptrs.
|
||||
invalid := c.invalid
|
||||
|
||||
// Done with
|
||||
// the lock.
|
||||
unlock()
|
||||
|
||||
if invalid != nil {
|
||||
// Pass all invalidated values
|
||||
@ -241,13 +242,13 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
|
||||
// Acquire lock.
|
||||
c.mutex.Lock()
|
||||
|
||||
// Get func ptrs.
|
||||
ignore := c.ignore
|
||||
// Wrap unlock to only do once.
|
||||
unlock := once(c.mutex.Unlock)
|
||||
defer unlock()
|
||||
|
||||
// Check init'd.
|
||||
if c.copy == nil ||
|
||||
ignore == nil {
|
||||
c.mutex.Unlock()
|
||||
c.ignore == nil {
|
||||
panic("not initialized")
|
||||
}
|
||||
|
||||
@ -273,8 +274,12 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
|
||||
}
|
||||
}
|
||||
|
||||
// Done with lock.
|
||||
c.mutex.Unlock()
|
||||
// Get func ptrs.
|
||||
ignore := c.ignore
|
||||
|
||||
// Done with
|
||||
// the lock.
|
||||
unlock()
|
||||
|
||||
if ok {
|
||||
// item found!
|
||||
@ -325,9 +330,12 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
|
||||
// Acquire lock.
|
||||
c.mutex.Lock()
|
||||
|
||||
// Wrap unlock to only do once.
|
||||
unlock := once(c.mutex.Unlock)
|
||||
defer unlock()
|
||||
|
||||
// Check init'd.
|
||||
if c.copy == nil {
|
||||
c.mutex.Unlock()
|
||||
panic("not initialized")
|
||||
}
|
||||
|
||||
@ -365,8 +373,9 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
|
||||
i++
|
||||
}
|
||||
|
||||
// Done with lock.
|
||||
c.mutex.Unlock()
|
||||
// Done with
|
||||
// the lock.
|
||||
unlock()
|
||||
|
||||
// Load uncached values.
|
||||
uncached, err := load(keys)
|
||||
@ -374,8 +383,20 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Insert uncached.
|
||||
c.Put(uncached...)
|
||||
// Acquire lock.
|
||||
c.mutex.Lock()
|
||||
|
||||
// Store all uncached values.
|
||||
for i := range uncached {
|
||||
c.store_value(
|
||||
nil,
|
||||
Key{},
|
||||
uncached[i],
|
||||
)
|
||||
}
|
||||
|
||||
// Done with lock.
|
||||
c.mutex.Unlock()
|
||||
|
||||
// Append uncached to return values.
|
||||
values = append(values, uncached...)
|
||||
|
Reference in New Issue
Block a user