mirror of
https://github.com/superseriousbusiness/gotosocial
synced 2025-06-05 21:59:39 +02:00
updates go-mutexes to no longer rely on unsafe linkname (#3027)
This commit is contained in:
86
vendor/codeberg.org/gruf/go-mutexes/cond.go
generated
vendored
86
vendor/codeberg.org/gruf/go-mutexes/cond.go
generated
vendored
@ -2,86 +2,66 @@ package mutexes
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Cond is similar to a sync.Cond{}, but
|
||||
// it encompasses the Mutex{} within itself.
|
||||
type Cond struct {
|
||||
notify notifyList
|
||||
c sync.Cond
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// See: sync.Cond{}.Wait().
|
||||
func (c *Cond) Wait() {
|
||||
t := runtime_notifyListAdd(&c.notify)
|
||||
c.Mutex.Unlock()
|
||||
runtime_notifyListWait(&c.notify, t)
|
||||
c.Mutex.Lock()
|
||||
if c.c.L == nil {
|
||||
c.c.L = &c.Mutex
|
||||
}
|
||||
c.c.Wait()
|
||||
}
|
||||
|
||||
// See: sync.Cond{}.Signal().
|
||||
func (c *Cond) Signal() { runtime_notifyListNotifyOne(&c.notify) }
|
||||
func (c *Cond) Signal() {
|
||||
if c.c.L == nil {
|
||||
c.c.L = &c.Mutex
|
||||
}
|
||||
c.c.Signal()
|
||||
}
|
||||
|
||||
// See: sync.Cond{}.Broadcast().
|
||||
func (c *Cond) Broadcast() { runtime_notifyListNotifyAll(&c.notify) }
|
||||
func (c *Cond) Broadcast() {
|
||||
if c.c.L == nil {
|
||||
c.c.L = &c.Mutex
|
||||
}
|
||||
c.c.Broadcast()
|
||||
}
|
||||
|
||||
// RWCond is similar to a sync.Cond{}, but
|
||||
// it encompasses the RWMutex{} within itself.
|
||||
type RWCond struct {
|
||||
notify notifyList
|
||||
c sync.Cond
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// See: sync.Cond{}.Wait().
|
||||
func (c *RWCond) Wait() {
|
||||
t := runtime_notifyListAdd(&c.notify)
|
||||
c.RWMutex.Unlock()
|
||||
runtime_notifyListWait(&c.notify, t)
|
||||
c.RWMutex.Lock()
|
||||
if c.c.L == nil {
|
||||
c.c.L = &c.RWMutex
|
||||
}
|
||||
c.c.Wait()
|
||||
}
|
||||
|
||||
// See: sync.Cond{}.Signal().
|
||||
func (c *RWCond) Signal() { runtime_notifyListNotifyOne(&c.notify) }
|
||||
func (c *RWCond) Signal() {
|
||||
if c.c.L == nil {
|
||||
c.c.L = &c.RWMutex
|
||||
}
|
||||
c.c.Signal()
|
||||
}
|
||||
|
||||
// See: sync.Cond{}.Broadcast().
|
||||
func (c *RWCond) Broadcast() { runtime_notifyListNotifyAll(&c.notify) }
|
||||
|
||||
// unused fields left
|
||||
// un-named for safety.
|
||||
type notifyList struct {
|
||||
_ uint32 // wait uint32
|
||||
notify uint32 // notify uint32
|
||||
_ uintptr // lock mutex
|
||||
_ unsafe.Pointer // head *sudog
|
||||
_ unsafe.Pointer // tail *sudog
|
||||
}
|
||||
|
||||
// See runtime/sema.go for documentation.
|
||||
//
|
||||
//go:linkname runtime_notifyListAdd sync.runtime_notifyListAdd
|
||||
func runtime_notifyListAdd(l *notifyList) uint32
|
||||
|
||||
// See runtime/sema.go for documentation.
|
||||
//
|
||||
//go:linkname runtime_notifyListWait sync.runtime_notifyListWait
|
||||
func runtime_notifyListWait(l *notifyList, t uint32)
|
||||
|
||||
// See runtime/sema.go for documentation.
|
||||
//
|
||||
//go:linkname runtime_notifyListNotifyOne sync.runtime_notifyListNotifyOne
|
||||
func runtime_notifyListNotifyOne(l *notifyList)
|
||||
|
||||
// See runtime/sema.go for documentation.
|
||||
//
|
||||
//go:linkname runtime_notifyListNotifyAll sync.runtime_notifyListNotifyAll
|
||||
func runtime_notifyListNotifyAll(l *notifyList)
|
||||
|
||||
// Ensure that sync and runtime agree on size of notifyList.
|
||||
//
|
||||
//go:linkname runtime_notifyListCheck sync.runtime_notifyListCheck
|
||||
func runtime_notifyListCheck(size uintptr)
|
||||
func init() {
|
||||
var n notifyList
|
||||
runtime_notifyListCheck(unsafe.Sizeof(n))
|
||||
func (c *RWCond) Broadcast() {
|
||||
if c.c.L == nil {
|
||||
c.c.L = &c.RWMutex
|
||||
}
|
||||
c.c.Broadcast()
|
||||
}
|
||||
|
56
vendor/codeberg.org/gruf/go-mutexes/hash_map.go
generated
vendored
Normal file
56
vendor/codeberg.org/gruf/go-mutexes/hash_map.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
package mutexes
|
||||
|
||||
type hashmap struct {
|
||||
m map[string]*rwmutex
|
||||
n int
|
||||
}
|
||||
|
||||
func (m *hashmap) init(cap int) {
|
||||
m.m = make(map[string]*rwmutex, cap)
|
||||
m.n = cap
|
||||
}
|
||||
|
||||
func (m *hashmap) Get(key string) *rwmutex { return m.m[key] }
|
||||
|
||||
func (m *hashmap) Put(key string, mu *rwmutex) {
|
||||
m.m[key] = mu
|
||||
if n := len(m.m); n > m.n {
|
||||
m.n = n
|
||||
}
|
||||
}
|
||||
|
||||
func (m *hashmap) Delete(key string) {
|
||||
delete(m.m, key)
|
||||
}
|
||||
|
||||
func (m *hashmap) Compact() {
|
||||
// Noop when hashmap size
|
||||
// is too small to matter.
|
||||
if m.n < 2048 {
|
||||
return
|
||||
}
|
||||
|
||||
// Difference between maximum map
|
||||
// size and the current map size.
|
||||
diff := m.n - len(m.m)
|
||||
|
||||
// Maximum load factor before
|
||||
// runtime allocates new hmap:
|
||||
// maxLoad = 13 / 16
|
||||
//
|
||||
// So we apply the inverse/2, once
|
||||
// $maxLoad/2 % of hmap is empty we
|
||||
// compact the map to drop buckets.
|
||||
if 2*16*diff > m.n*13 {
|
||||
|
||||
// Create new map only as big as required.
|
||||
m2 := make(map[string]*rwmutex, len(m.m))
|
||||
for k, v := range m.m {
|
||||
m2[k] = v
|
||||
}
|
||||
|
||||
// Set new.
|
||||
m.m = m2
|
||||
m.n = len(m2)
|
||||
}
|
||||
}
|
92
vendor/codeberg.org/gruf/go-mutexes/map.go
generated
vendored
92
vendor/codeberg.org/gruf/go-mutexes/map.go
generated
vendored
@ -6,7 +6,6 @@ import (
|
||||
"unsafe"
|
||||
|
||||
"codeberg.org/gruf/go-mempool"
|
||||
"github.com/dolthub/swiss"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -27,14 +26,14 @@ const (
|
||||
// like structures for sleeping / awaking awaiting goroutines.
|
||||
type MutexMap struct {
|
||||
mapmu sync.Mutex
|
||||
mumap *swiss.Map[string, *rwmutex]
|
||||
mumap hashmap
|
||||
mupool mempool.UnsafePool
|
||||
}
|
||||
|
||||
// checkInit ensures MutexMap is initialized (UNSAFE).
|
||||
func (mm *MutexMap) checkInit() {
|
||||
if mm.mumap == nil {
|
||||
mm.mumap = swiss.NewMap[string, *rwmutex](0)
|
||||
if mm.mumap.m == nil {
|
||||
mm.mumap.init(0)
|
||||
mm.mupool.DirtyFactor = 256
|
||||
}
|
||||
}
|
||||
@ -58,7 +57,7 @@ func (mm *MutexMap) lock(key string, lt uint8) func() {
|
||||
|
||||
for {
|
||||
// Check map for mutex.
|
||||
mu, _ := mm.mumap.Get(key)
|
||||
mu := mm.mumap.Get(key)
|
||||
|
||||
if mu == nil {
|
||||
// Allocate mutex.
|
||||
@ -69,7 +68,7 @@ func (mm *MutexMap) lock(key string, lt uint8) func() {
|
||||
if !mu.Lock(lt) {
|
||||
// Wait on mutex unlock, after
|
||||
// immediately relocking map mu.
|
||||
mu.WaitRelock(&mm.mapmu)
|
||||
mu.WaitRelock()
|
||||
continue
|
||||
}
|
||||
|
||||
@ -100,27 +99,9 @@ func (mm *MutexMap) unlock(key string, mu *rwmutex) {
|
||||
mm.mumap.Delete(key)
|
||||
mm.release(mu)
|
||||
|
||||
// Maximum load factor before
|
||||
// 'swiss' allocates new hmap:
|
||||
// maxLoad = 7 / 8
|
||||
//
|
||||
// So we apply the inverse/2, once
|
||||
// $maxLoad/2 % of hmap is empty we
|
||||
// compact the map to drop buckets.
|
||||
len := mm.mumap.Count()
|
||||
cap := mm.mumap.Capacity()
|
||||
if cap-len > (cap*7)/(8*2) {
|
||||
|
||||
// Create a new map only as big as required.
|
||||
mumap := swiss.NewMap[string, *rwmutex](uint32(len))
|
||||
mm.mumap.Iter(func(k string, v *rwmutex) (stop bool) {
|
||||
mumap.Put(k, v)
|
||||
return false
|
||||
})
|
||||
|
||||
// Set new map.
|
||||
mm.mumap = mumap
|
||||
}
|
||||
// Check if compaction
|
||||
// needed.
|
||||
mm.mumap.Compact()
|
||||
|
||||
// Done with map.
|
||||
mm.mapmu.Unlock()
|
||||
@ -131,7 +112,9 @@ func (mm *MutexMap) acquire() *rwmutex {
|
||||
if ptr := mm.mupool.Get(); ptr != nil {
|
||||
return (*rwmutex)(ptr)
|
||||
}
|
||||
return new(rwmutex)
|
||||
mu := new(rwmutex)
|
||||
mu.c.L = &mm.mapmu
|
||||
return mu
|
||||
}
|
||||
|
||||
// release will release given mutex to memory pool.
|
||||
@ -152,9 +135,9 @@ func (mm *MutexMap) release(mu *rwmutex) {
|
||||
// mechanism we use, otherwise all Cond{}.L would reference
|
||||
// the same outer map mutex.
|
||||
type rwmutex struct {
|
||||
n notifyList // 'trigger' mechanism
|
||||
l int32 // no. locks
|
||||
t uint8 // lock type
|
||||
c sync.Cond // 'trigger' mechanism
|
||||
l int32 // no. locks
|
||||
t uint8 // lock type
|
||||
}
|
||||
|
||||
// Lock will lock the mutex for given lock type, in the
|
||||
@ -202,11 +185,34 @@ func (mu *rwmutex) Unlock() bool {
|
||||
// Fully unlocked.
|
||||
mu.t = 0
|
||||
|
||||
// NOTE: must remain in
|
||||
// sync with runtime.notifyList{}.
|
||||
//
|
||||
// goexperiment.staticlockranking
|
||||
// does change it slightly, but
|
||||
// this does not alter the first
|
||||
// 2 fields which are all we need.
|
||||
type notifyList struct {
|
||||
_ uint32
|
||||
notify uint32
|
||||
// ... other fields
|
||||
}
|
||||
|
||||
// NOTE: must remain in
|
||||
// sync with sync.Cond{}.
|
||||
type syncCond struct {
|
||||
_ struct{}
|
||||
L sync.Locker
|
||||
n notifyList
|
||||
// ... other fields
|
||||
}
|
||||
|
||||
// Awake all blocked goroutines and check
|
||||
// for change in the last notified ticket.
|
||||
before := atomic.LoadUint32(&mu.n.notify)
|
||||
runtime_notifyListNotifyAll(&mu.n)
|
||||
after := atomic.LoadUint32(&mu.n.notify)
|
||||
cptr := (*syncCond)(unsafe.Pointer(&mu.c))
|
||||
before := atomic.LoadUint32(&cptr.n.notify)
|
||||
mu.c.Broadcast() // awakes all blocked!
|
||||
after := atomic.LoadUint32(&cptr.n.notify)
|
||||
|
||||
// If ticket changed, this indicates
|
||||
// AT LEAST one goroutine was awoken.
|
||||
@ -226,20 +232,4 @@ func (mu *rwmutex) Unlock() bool {
|
||||
// locked state. It incr the notifyList waiter count before
|
||||
// unlocking the outer mutex and blocking on notifyList wait.
|
||||
// On awake it will decr wait count and relock outer mutex.
|
||||
func (mu *rwmutex) WaitRelock(outer *sync.Mutex) {
|
||||
|
||||
// add ourselves to list while still
|
||||
// under protection of outer map lock.
|
||||
t := runtime_notifyListAdd(&mu.n)
|
||||
|
||||
// Finished with
|
||||
// outer map lock.
|
||||
outer.Unlock()
|
||||
|
||||
// Block until awoken by another
|
||||
// goroutine within mu.Unlock().
|
||||
runtime_notifyListWait(&mu.n, t)
|
||||
|
||||
// Relock!
|
||||
outer.Lock()
|
||||
}
|
||||
func (mu *rwmutex) WaitRelock() { mu.c.Wait() }
|
||||
|
Reference in New Issue
Block a user