mirror of
https://github.com/superseriousbusiness/gotosocial
synced 2025-06-05 21:59:39 +02:00
[performance] update remaining worker pools to use queues (#2865)
* start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
This commit is contained in:
61
vendor/codeberg.org/gruf/go-structr/cache.go
generated
vendored
61
vendor/codeberg.org/gruf/go-structr/cache.go
generated
vendored
@@ -150,10 +150,10 @@ func (c *Cache[T]) Get(index *Index, keys ...Key) []T {
|
||||
|
||||
// Acquire lock.
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
// Check cache init.
|
||||
if c.copy == nil {
|
||||
c.mutex.Unlock()
|
||||
panic("not initialized")
|
||||
}
|
||||
|
||||
@@ -173,9 +173,6 @@ func (c *Cache[T]) Get(index *Index, keys ...Key) []T {
|
||||
})
|
||||
}
|
||||
|
||||
// Done with lock.
|
||||
c.mutex.Unlock()
|
||||
|
||||
return values
|
||||
}
|
||||
|
||||
@@ -185,12 +182,12 @@ func (c *Cache[T]) Put(values ...T) {
|
||||
// Acquire lock.
|
||||
c.mutex.Lock()
|
||||
|
||||
// Get func ptrs.
|
||||
invalid := c.invalid
|
||||
// Wrap unlock to only do once.
|
||||
unlock := once(c.mutex.Unlock)
|
||||
defer unlock()
|
||||
|
||||
// Check cache init.
|
||||
if c.copy == nil {
|
||||
c.mutex.Unlock()
|
||||
panic("not initialized")
|
||||
}
|
||||
|
||||
@@ -203,8 +200,12 @@ func (c *Cache[T]) Put(values ...T) {
|
||||
)
|
||||
}
|
||||
|
||||
// Done with lock.
|
||||
c.mutex.Unlock()
|
||||
// Get func ptrs.
|
||||
invalid := c.invalid
|
||||
|
||||
// Done with
|
||||
// the lock.
|
||||
unlock()
|
||||
|
||||
if invalid != nil {
|
||||
// Pass all invalidated values
|
||||
@@ -241,13 +242,13 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
|
||||
// Acquire lock.
|
||||
c.mutex.Lock()
|
||||
|
||||
// Get func ptrs.
|
||||
ignore := c.ignore
|
||||
// Wrap unlock to only do once.
|
||||
unlock := once(c.mutex.Unlock)
|
||||
defer unlock()
|
||||
|
||||
// Check init'd.
|
||||
if c.copy == nil ||
|
||||
ignore == nil {
|
||||
c.mutex.Unlock()
|
||||
c.ignore == nil {
|
||||
panic("not initialized")
|
||||
}
|
||||
|
||||
@@ -273,8 +274,12 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
|
||||
}
|
||||
}
|
||||
|
||||
// Done with lock.
|
||||
c.mutex.Unlock()
|
||||
// Get func ptrs.
|
||||
ignore := c.ignore
|
||||
|
||||
// Done with
|
||||
// the lock.
|
||||
unlock()
|
||||
|
||||
if ok {
|
||||
// item found!
|
||||
@@ -325,9 +330,12 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
|
||||
// Acquire lock.
|
||||
c.mutex.Lock()
|
||||
|
||||
// Wrap unlock to only do once.
|
||||
unlock := once(c.mutex.Unlock)
|
||||
defer unlock()
|
||||
|
||||
// Check init'd.
|
||||
if c.copy == nil {
|
||||
c.mutex.Unlock()
|
||||
panic("not initialized")
|
||||
}
|
||||
|
||||
@@ -365,8 +373,9 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
|
||||
i++
|
||||
}
|
||||
|
||||
// Done with lock.
|
||||
c.mutex.Unlock()
|
||||
// Done with
|
||||
// the lock.
|
||||
unlock()
|
||||
|
||||
// Load uncached values.
|
||||
uncached, err := load(keys)
|
||||
@@ -374,8 +383,20 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Insert uncached.
|
||||
c.Put(uncached...)
|
||||
// Acquire lock.
|
||||
c.mutex.Lock()
|
||||
|
||||
// Store all uncached values.
|
||||
for i := range uncached {
|
||||
c.store_value(
|
||||
nil,
|
||||
Key{},
|
||||
uncached[i],
|
||||
)
|
||||
}
|
||||
|
||||
// Done with lock.
|
||||
c.mutex.Unlock()
|
||||
|
||||
// Append uncached to return values.
|
||||
values = append(values, uncached...)
|
||||
|
134
vendor/codeberg.org/gruf/go-structr/queue_ctx.go
generated
vendored
Normal file
134
vendor/codeberg.org/gruf/go-structr/queue_ctx.go
generated
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
package structr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// QueueCtx is a context-aware form of Queue{}.
|
||||
type QueueCtx[StructType any] struct {
|
||||
Queue[StructType]
|
||||
ch chan struct{}
|
||||
}
|
||||
|
||||
// PopFront pops the current value at front of the queue, else blocking on ctx.
|
||||
func (q *QueueCtx[T]) PopFront(ctx context.Context) (T, bool) {
|
||||
return q.pop(ctx, func() *list_elem {
|
||||
return q.queue.head
|
||||
})
|
||||
}
|
||||
|
||||
// PopBack pops the current value at back of the queue, else blocking on ctx.
|
||||
func (q *QueueCtx[T]) PopBack(ctx context.Context) (T, bool) {
|
||||
return q.pop(ctx, func() *list_elem {
|
||||
return q.queue.tail
|
||||
})
|
||||
}
|
||||
|
||||
// PushFront pushes values to front of queue.
|
||||
func (q *QueueCtx[T]) PushFront(values ...T) {
|
||||
q.mutex.Lock()
|
||||
for i := range values {
|
||||
item := q.index(values[i])
|
||||
q.queue.push_front(&item.elem)
|
||||
}
|
||||
if q.ch != nil {
|
||||
close(q.ch)
|
||||
q.ch = nil
|
||||
}
|
||||
q.mutex.Unlock()
|
||||
}
|
||||
|
||||
// PushBack pushes values to back of queue.
|
||||
func (q *QueueCtx[T]) PushBack(values ...T) {
|
||||
q.mutex.Lock()
|
||||
for i := range values {
|
||||
item := q.index(values[i])
|
||||
q.queue.push_back(&item.elem)
|
||||
}
|
||||
if q.ch != nil {
|
||||
close(q.ch)
|
||||
q.ch = nil
|
||||
}
|
||||
q.mutex.Unlock()
|
||||
}
|
||||
|
||||
// Wait returns a ptr to the current ctx channel,
|
||||
// this will block until next push to the queue.
|
||||
func (q *QueueCtx[T]) Wait() <-chan struct{} {
|
||||
q.mutex.Lock()
|
||||
if q.ch == nil {
|
||||
q.ch = make(chan struct{})
|
||||
}
|
||||
ctx := q.ch
|
||||
q.mutex.Unlock()
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (q *QueueCtx[T]) pop(ctx context.Context, next func() *list_elem) (T, bool) {
|
||||
if next == nil {
|
||||
panic("nil fn")
|
||||
} else if ctx == nil {
|
||||
panic("nil ctx")
|
||||
}
|
||||
|
||||
// Acquire lock.
|
||||
q.mutex.Lock()
|
||||
|
||||
var elem *list_elem
|
||||
|
||||
for {
|
||||
// Get element.
|
||||
elem = next()
|
||||
if elem != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if q.ch == nil {
|
||||
// Allocate new ctx channel.
|
||||
q.ch = make(chan struct{})
|
||||
}
|
||||
|
||||
// Get current
|
||||
// ch pointer.
|
||||
ch := q.ch
|
||||
|
||||
// Unlock queue.
|
||||
q.mutex.Unlock()
|
||||
|
||||
select {
|
||||
// Ctx cancelled.
|
||||
case <-ctx.Done():
|
||||
var z T
|
||||
return z, false
|
||||
|
||||
// Pushed!
|
||||
case <-ch:
|
||||
}
|
||||
|
||||
// Relock queue.
|
||||
q.mutex.Lock()
|
||||
}
|
||||
|
||||
// Cast the indexed item from elem.
|
||||
item := (*indexed_item)(elem.data)
|
||||
|
||||
// Extract item value.
|
||||
value := item.data.(T)
|
||||
|
||||
// Delete queued.
|
||||
q.delete(item)
|
||||
|
||||
// Get func ptrs.
|
||||
pop := q.Queue.pop
|
||||
|
||||
// Done with lock.
|
||||
q.mutex.Unlock()
|
||||
|
||||
if pop != nil {
|
||||
// Pass to
|
||||
// user hook.
|
||||
pop(value)
|
||||
}
|
||||
|
||||
return value, true
|
||||
}
|
13
vendor/codeberg.org/gruf/go-structr/util.go
generated
vendored
Normal file
13
vendor/codeberg.org/gruf/go-structr/util.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
package structr
|
||||
|
||||
// once only executes 'fn' once.
|
||||
func once(fn func()) func() {
|
||||
var once int32
|
||||
return func() {
|
||||
if once != 0 {
|
||||
return
|
||||
}
|
||||
once = 1
|
||||
fn()
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user