[feature] add per-uri dereferencer locks (#2291)

This commit is contained in:
kim
2023-10-31 11:12:22 +00:00
committed by GitHub
parent 51d0a0bba5
commit ce71a5a790
54 changed files with 2432 additions and 2719 deletions

View File

@@ -1,9 +0,0 @@
MIT License
Copyright (c) 2021 gruf
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1 +0,0 @@
Alternative path library with a `strings.Builder` like path builder.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 106 KiB

View File

@@ -1,319 +0,0 @@
package fastpath
import (
"unsafe"
)
// allocate these just once
var (
dot = []byte(dotStr)
dotStr = "."
)
type Builder struct {
B []byte // B is the underlying byte buffer
dd int // pos of last '..' appended to builder
abs bool // abs stores whether path passed to first .Append() is absolute
set bool // set stores whether b.abs has been set i.e. not first call to .Append()
}
// NewBuilder returns a new Builder object using the
// supplied byte slice as the underlying buffer
func NewBuilder(b []byte) Builder {
if b != nil {
b = b[:0]
}
return Builder{
B: b,
dd: 0,
abs: false,
set: false,
}
}
// Reset resets the Builder object
func (b *Builder) Reset() {
b.B = b.B[:0]
b.dd = 0
b.abs = false
b.set = false
}
// Len returns the number of accumulated bytes in the Builder
func (b *Builder) Len() int {
return len(b.B)
}
// Cap returns the capacity of the underlying Builder buffer
func (b *Builder) Cap() int {
return cap(b.B)
}
// Bytes returns the accumulated path bytes.
func (b *Builder) Bytes() []byte {
if len(b.B) < 1 {
return dot
}
return b.B
}
// String returns the accumulated path string.
func (b *Builder) String() string {
if len(b.B) < 1 {
return dotStr
}
return string(b.B)
}
// StringPtr returns a ptr to the accumulated path string.
//
// Please note the underlying byte slice for this string is
// tied to the builder, so any changes will result in the
// returned string changing. Consider using .String() if
// this is undesired behaviour.
func (b *Builder) StringPtr() string {
if len(b.B) < 1 {
return dotStr
}
return *(*string)(unsafe.Pointer(&b.B))
}
// Absolute returns whether current path is absolute (not relative)
func (b *Builder) Absolute() bool {
return b.abs
}
// SetAbsolute converts the current path to / from absolute
func (b *Builder) SetAbsolute(val bool) {
if !b.set {
if val {
// .Append() has not been
// called, add a '/' and set abs
b.Guarantee(1)
b.appendByte('/')
b.abs = true
}
// Set as having been set
b.set = true
return
}
if !val && b.abs {
// Already set and absolute. Update
b.abs = false
// If not empty (i.e. not just '/'),
// then shift bytes 1 left
if len(b.B) > 1 {
copy(b.B, b.B[1:])
}
// Truncate 1 byte. In the case of empty,
// i.e. just '/' then it will drop this
b.truncate(1)
} else if val && !b.abs {
// Already set but NOT abs. Update
b.abs = true
// Guarantee 1 byte available
b.Guarantee(1)
// If empty, just append '/'
if len(b.B) < 1 {
b.appendByte('/')
return
}
// Increase length
l := len(b.B)
b.B = b.B[:l+1]
// Shift bytes 1 right
copy(b.B[1:], b.B[:l])
// Set first byte '/'
b.B[0] = '/'
}
}
// Append adds and cleans the supplied path bytes to the
// builder's internal buffer, growing the buffer if necessary
// to accomodate the extra path length
func (b *Builder) Append(p []byte) {
b.AppendString(*(*string)(unsafe.Pointer(&p)))
}
// AppendString adds and cleans the supplied path string to the
// builder's internal buffer, growing the buffer if necessary
// to accomodate the extra path length
func (b *Builder) AppendString(path string) {
defer func() {
// If buffer is empty, and an absolute
// path, ensure it starts with a '/'
if len(b.B) < 1 && b.abs {
b.appendByte('/')
}
}()
// Empty path, nothing to do
if len(path) == 0 {
return
}
// Guarantee at least the total length
// of supplied path available in the buffer
b.Guarantee(len(path))
// Try store if absolute
if !b.set {
b.abs = len(path) > 0 && path[0] == '/'
b.set = true
}
i := 0
for i < len(path) {
switch {
// Empty path segment
case path[i] == '/':
i++
// Singular '.' path segment, treat as empty
case path[i] == '.' && (i+1 == len(path) || path[i+1] == '/'):
i++
// Backtrack segment
case path[i] == '.' && path[i+1] == '.' && (i+2 == len(path) || path[i+2] == '/'):
i += 2
switch {
// Check if it's possible to backtrack with
// our current state of the buffer. i.e. is
// our buffer length longer than the last
// '..' we placed?
case len(b.B) > b.dd:
b.backtrack()
// b.cp = b.lp
// b.lp = 0
// If we reached here, need to check if
// we can append '..' to the path buffer,
// which is ONLY when path is NOT absolute
case !b.abs:
if len(b.B) > 0 {
b.appendByte('/')
}
b.appendByte('.')
b.appendByte('.')
b.dd = len(b.B)
// b.lp = lp - 2
// b.cp = b.dd
}
default:
if (b.abs && len(b.B) != 1) || (!b.abs && len(b.B) > 0) {
b.appendByte('/')
}
// b.lp = b.cp
// b.cp = len(b.B)
i += b.appendSlice(path[i:])
}
}
}
// Clean creates the shortest possible functional equivalent
// to the supplied path, resetting the builder before performing
// this operation. The builder object is NOT reset after return
func (b *Builder) Clean(path string) string {
b.Reset()
b.AppendString(path)
return b.String()
}
// Join connects and cleans multiple paths, resetting the builder before
// performing this operation and returning the shortest possible combination
// of all the supplied paths. The builder object is NOT reset after return
func (b *Builder) Join(base string, paths ...string) string {
b.Reset()
b.AppendString(base)
size := len(base)
for i := 0; i < len(paths); i++ {
b.AppendString(paths[i])
size += len(paths[i])
}
if size < 1 {
return ""
} else if len(b.B) < 1 {
return dotStr
}
return string(b.B)
}
// Guarantee ensures there is at least the requested size
// free bytes available in the buffer, reallocating if necessary
func (b *Builder) Guarantee(size int) {
if size > cap(b.B)-len(b.B) {
nb := make([]byte, 2*cap(b.B)+size)
copy(nb, b.B)
b.B = nb[:len(b.B)]
}
}
// Truncate reduces the length of the buffer by the requested
// number of bytes. If the builder is set to absolute, the first
// byte (i.e. '/') will never be truncated
func (b *Builder) Truncate(size int) {
// If absolute and just '/', do nothing
if b.abs && len(b.B) == 1 {
return
}
// Truncate requested bytes
b.truncate(size)
}
// truncate reduces the length of the buffer by the requested
// size, no sanity checks are performed
func (b *Builder) truncate(size int) {
b.B = b.B[:len(b.B)-size]
}
// appendByte appends the supplied byte to the end of
// the buffer. appending is achieved by continually reslicing the
// buffer and setting the next byte-at-index, this is safe as guarantee()
// will have been called beforehand
func (b *Builder) appendByte(c byte) {
b.B = b.B[:len(b.B)+1]
b.B[len(b.B)-1] = c
}
// appendSlice appends the supplied string slice to
// the end of the buffer and returns the number of indices
// we were able to iterate before hitting a path separator '/'.
// appending is achieved by continually reslicing the buffer
// and setting the next byte-at-index, this is safe as guarantee()
// will have been called beforehand
func (b *Builder) appendSlice(slice string) int {
i := 0
for i < len(slice) && slice[i] != '/' {
b.B = b.B[:len(b.B)+1]
b.B[len(b.B)-1] = slice[i]
i++
}
return i
}
// backtrack reduces the end of the buffer back to the last
// separating '/', or end of buffer
func (b *Builder) backtrack() {
b.B = b.B[:len(b.B)-1]
for len(b.B)-1 > b.dd && b.B[len(b.B)-1] != '/' {
b.B = b.B[:len(b.B)-1]
}
if len(b.B) > 0 {
b.B = b.B[:len(b.B)-1]
}
}

View File

@@ -1,9 +0,0 @@
MIT License
Copyright (c) 2021 gruf
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1 +0,0 @@
HashEncoder provides a means of quickly hash-summing and encoding data

View File

@@ -1,42 +0,0 @@
package hashenc
import (
"encoding/base32"
"encoding/base64"
"encoding/hex"
)
// Encoder defines an interface for encoding binary data.
type Encoder interface {
// Encode encodes the data at src into dst
Encode(dst []byte, src []byte)
// EncodedLen returns the encoded length for input data of supplied length
EncodedLen(int) int
}
// Base32 returns a new base32 Encoder (StdEncoding, no padding).
func Base32() Encoder {
return base32.StdEncoding.WithPadding(base64.NoPadding)
}
// Base64 returns a new base64 Encoder (URLEncoding, no padding).
func Base64() Encoder {
return base64.URLEncoding.WithPadding(base64.NoPadding)
}
// Hex returns a new hex Encoder.
func Hex() Encoder {
return &hexEncoder{}
}
// hexEncoder simply provides an empty receiver to satisfy Encoder.
type hexEncoder struct{}
func (*hexEncoder) Encode(dst []byte, src []byte) {
hex.Encode(dst, src)
}
func (*hexEncoder) EncodedLen(len int) int {
return hex.EncodedLen(len)
}

View File

@@ -1,58 +0,0 @@
package hashenc
import (
"hash"
"codeberg.org/gruf/go-bytes"
)
// HashEncoder defines an interface for calculating encoded hash sums of binary data
type HashEncoder interface {
// EncodeSum calculates the hash sum of src and encodes (at most) Size() into dst
EncodeSum(dst []byte, src []byte)
// EncodedSum calculates the encoded hash sum of src and returns data in a newly allocated bytes.Bytes
EncodedSum(src []byte) bytes.Bytes
// Size returns the expected length of encoded hashes
Size() int
}
// New returns a new HashEncoder instance based on supplied hash.Hash and Encoder supplying functions.
func New(hash hash.Hash, enc Encoder) HashEncoder {
hashSize := hash.Size()
return &henc{
hash: hash,
hbuf: make([]byte, hashSize),
enc: enc,
size: enc.EncodedLen(hashSize),
}
}
// henc is the HashEncoder implementation.
type henc struct {
hash hash.Hash
hbuf []byte
enc Encoder
size int
}
func (henc *henc) EncodeSum(dst []byte, src []byte) {
// Hash supplied bytes
henc.hash.Reset()
henc.hash.Write(src)
henc.hbuf = henc.hash.Sum(henc.hbuf[:0])
// Encode the hashsum and return a copy
henc.enc.Encode(dst, henc.hbuf)
}
func (henc *henc) EncodedSum(src []byte) bytes.Bytes {
dst := make([]byte, henc.size)
henc.EncodeSum(dst, src)
return bytes.ToBytes(dst)
}
func (henc *henc) Size() int {
return henc.size
}

View File

@@ -12,6 +12,14 @@ func (r ReaderFunc) Read(b []byte) (int, error) {
return r(b)
}
// ReaderFromFunc is a function signature which allows
// a function to implement the io.ReaderFrom type.
type ReaderFromFunc func(io.Reader) (int64, error)
func (rf ReaderFromFunc) ReadFrom(r io.Reader) (int64, error) {
return rf(r)
}
// ReadCloser wraps an io.Reader and io.Closer in order to implement io.ReadCloser.
func ReadCloser(r io.Reader, c io.Closer) io.ReadCloser {
return &struct {

View File

@@ -10,6 +10,14 @@ func (w WriterFunc) Write(b []byte) (int, error) {
return w(b)
}
// WriterToFunc is a function signature which allows
// a function to implement the io.WriterTo type.
type WriterToFunc func(io.Writer) (int64, error)
func (wt WriterToFunc) WriteTo(r io.Writer) (int64, error) {
return wt(r)
}
// WriteCloser wraps an io.Writer and io.Closer in order to implement io.WriteCloser.
func WriteCloser(w io.Writer, c io.Closer) io.WriteCloser {
return &struct {

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2021 gruf
Copyright (c) gruf
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

View File

@@ -1,9 +1,8 @@
package mutexes
import (
"runtime"
"sync"
"sync/atomic"
"unsafe"
)
const (
@@ -12,452 +11,253 @@ const (
lockTypeWrite = uint8(1) << 1
lockTypeMap = uint8(1) << 2
// possible mutexmap states.
stateUnlockd = uint8(0)
stateRLocked = uint8(1)
stateLocked = uint8(2)
stateInUse = uint8(3)
// default values.
defaultWake = 1024
// frequency of GC cycles
// per no. unlocks. i.e.
// every 'gcfreq' unlocks.
gcfreq = 1024
)
// acquireState attempts to acquire required map state for lockType.
func acquireState(state uint8, lt uint8) (uint8, bool) {
switch state {
// Unlocked state
// (all allowed)
case stateUnlockd:
// Keys locked, no state lock.
// (don't allow map locks)
case stateInUse:
if lt&lockTypeMap != 0 {
return 0, false
}
// Read locked
// (only allow read locks)
case stateRLocked:
if lt&lockTypeRead == 0 {
return 0, false
}
// Write locked
// (none allowed)
case stateLocked:
return 0, false
// shouldn't reach here
default:
panic("unexpected state")
}
switch {
// If unlocked and not a map
// lock request, set in use
case lt&lockTypeMap == 0:
if state == stateUnlockd {
state = stateInUse
}
// Set read lock state
case lt&lockTypeRead != 0:
state = stateRLocked
// Set write lock state
case lt&lockTypeWrite != 0:
state = stateLocked
default:
panic("unexpected lock type")
}
return state, true
}
// MutexMap is a structure that allows read / write locking key, performing
// as you'd expect a map[string]*sync.RWMutex to perform. The differences
// being that the entire map can itself be read / write locked, it uses memory
// pooling for the mutex (not quite) structures, and it is self-evicting. The
// core configurations of maximum no. open locks and wake modulus* are user
// definable.
// MutexMap is a structure that allows read / write locking
// per key, performing as you'd expect a map[string]*RWMutex
// to perform, without you needing to worry about deadlocks
// between competing read / write locks and the map's own mutex.
// It uses memory pooling for the internal "mutex" (ish) types
// and performs self-eviction of keys.
//
// * The wake modulus is the number that the current number of open locks is
// modulused against to determine how often to notify sleeping goroutines.
// These are goroutines that are attempting to lock a key / whole map and are
// awaiting a permissible state (.e.g no key write locks allowed when the
// map is read locked).
// Under the hood this is achieved using a single mutex for the
// map, state tracking for individual keys, and some simple waitgroup
// type structures to park / block goroutines waiting for keys.
type MutexMap struct {
queue *sync.WaitGroup
qucnt int32
mumap map[string]*rwmutex
mpool pool
evict []*rwmutex
count int32
maxmu int32
wake int32
mapmu sync.Mutex
state uint8
mapmu sync.Mutex
mumap map[string]*rwmutexish
mupool rwmutexPool
count uint32
}
// NewMap returns a new MutexMap instance with provided max no. open mutexes.
func NewMap(max, wake int32) MutexMap {
// Determine wake mod.
if wake < 1 {
wake = defaultWake
}
// Determine max no. mutexes
if max < 1 {
procs := runtime.GOMAXPROCS(0)
max = wake * int32(procs)
}
return MutexMap{
queue: &sync.WaitGroup{},
mumap: make(map[string]*rwmutex, max),
maxmu: max,
wake: wake,
// checkInit ensures MutexMap is initialized (UNSAFE).
func (mm *MutexMap) checkInit() {
if mm.mumap == nil {
mm.mumap = make(map[string]*rwmutexish)
}
}
// SET sets the MutexMap max open locks and wake modulus, returns current values.
// For values less than zero defaults are set, and zero is non-op.
func (mm *MutexMap) SET(max, wake int32) (int32, int32) {
mm.mapmu.Lock()
switch {
// Set default wake
case wake < 0:
mm.wake = defaultWake
// Set supplied wake
case wake > 0:
mm.wake = wake
}
switch {
// Set default max
case max < 0:
procs := runtime.GOMAXPROCS(0)
mm.maxmu = wake * int32(procs)
// Set supplied max
case max > 0:
mm.maxmu = max
}
// Fetch values
max = mm.maxmu
wake = mm.wake
mm.mapmu.Unlock()
return max, wake
}
// spinLock will wait (using a mutex to sleep thread) until conditional returns true.
func (mm *MutexMap) spinLock(cond func() bool) {
for {
// Acquire map lock
mm.mapmu.Lock()
if cond() {
return
}
// Current queue ptr
queue := mm.queue
// Queue ourselves
queue.Add(1)
mm.qucnt++
// Unlock map
mm.mapmu.Unlock()
// Wait on notify
mm.queue.Wait()
}
}
// lock will acquire a lock of given type on the 'mutex' at key.
func (mm *MutexMap) lock(key string, lt uint8) func() {
var ok bool
var mu *rwmutex
// Spin lock until returns true
mm.spinLock(func() bool {
// Check not overloaded
if !(mm.count < mm.maxmu) {
return false
}
// Attempt to acquire usable map state
state, ok := acquireState(mm.state, lt)
if !ok {
return false
}
// Update state
mm.state = state
// Ensure mutex at key
// is in lockable state
mu, ok = mm.mumap[key]
return !ok || mu.CanLock(lt)
})
// Incr count
mm.count++
if !ok {
// No mutex found for key
// Alloc mu from pool
mu = mm.mpool.Acquire()
mm.mumap[key] = mu
// Set our key
mu.key = key
// Queue for eviction
mm.evict = append(mm.evict, mu)
}
// Lock mutex
mu.Lock(lt)
// Unlock map
mm.mapmu.Unlock()
return func() {
mm.mapmu.Lock()
mu.Unlock()
mm.cleanup()
}
}
// lockMap will lock the whole map under given lock type.
func (mm *MutexMap) lockMap(lt uint8) {
// Spin lock until returns true
mm.spinLock(func() bool {
// Attempt to acquire usable map state
state, ok := acquireState(mm.state, lt)
if !ok {
return false
}
// Update state
mm.state = state
return true
})
// Incr count
mm.count++
// State acquired, unlock
mm.mapmu.Unlock()
}
// cleanup is performed as the final stage of unlocking a locked key / map state, finally unlocks map.
func (mm *MutexMap) cleanup() {
// Decr count
mm.count--
// Calculate current wake modulus
wakemod := mm.count % mm.wake
if mm.count != 0 && wakemod != 0 {
// Fast path => no cleanup.
// Unlock, return early
mm.mapmu.Unlock()
return
}
go func() {
if wakemod == 0 {
// Release queued goroutines
mm.queue.Add(-int(mm.qucnt))
// Allocate new queue and reset
mm.queue = &sync.WaitGroup{}
mm.qucnt = 0
}
if mm.count == 0 {
// Perform evictions
for _, mu := range mm.evict {
key := mu.key
mu.key = ""
delete(mm.mumap, key)
mm.mpool.Release(mu)
}
// Reset map state
mm.evict = mm.evict[:0]
mm.state = stateUnlockd
mm.mpool.GC()
}
// Unlock map
mm.mapmu.Unlock()
}()
}
// RLockMap acquires a read lock over the entire map, returning a lock state for acquiring key read locks.
// Please note that the 'unlock()' function will block until all keys locked from this state are unlocked.
func (mm *MutexMap) RLockMap() *LockState {
mm.lockMap(lockTypeRead | lockTypeMap)
return &LockState{
mmap: mm,
ltyp: lockTypeRead,
}
}
// LockMap acquires a write lock over the entire map, returning a lock state for acquiring key read/write locks.
// Please note that the 'unlock()' function will block until all keys locked from this state are unlocked.
func (mm *MutexMap) LockMap() *LockState {
mm.lockMap(lockTypeWrite | lockTypeMap)
return &LockState{
mmap: mm,
ltyp: lockTypeWrite,
}
}
// RLock acquires a mutex read lock for supplied key, returning an RUnlock function.
func (mm *MutexMap) RLock(key string) (runlock func()) {
return mm.lock(key, lockTypeRead)
}
// Lock acquires a mutex write lock for supplied key, returning an Unlock function.
func (mm *MutexMap) Lock(key string) (unlock func()) {
// Lock acquires a write lock on key in map, returning unlock function.
func (mm *MutexMap) Lock(key string) func() {
return mm.lock(key, lockTypeWrite)
}
// LockState represents a window to a locked MutexMap.
type LockState struct {
wait sync.WaitGroup
mmap *MutexMap
done uint32
ltyp uint8
// RLock acquires a read lock on key in map, returning runlock function.
func (mm *MutexMap) RLock(key string) func() {
return mm.lock(key, lockTypeRead)
}
// Lock: see MutexMap.Lock() definition. Will panic if map only read locked.
func (st *LockState) Lock(key string) (unlock func()) {
return st.lock(key, lockTypeWrite)
func (mm *MutexMap) lock(key string, lt uint8) func() {
// Perform first map lock
// and check initialization
// OUTSIDE the main loop.
mm.mapmu.Lock()
mm.checkInit()
for {
// Check map for mu.
mu := mm.mumap[key]
if mu == nil {
// Allocate new mutex.
mu = mm.mupool.Acquire()
mm.mumap[key] = mu
}
if !mu.Lock(lt) {
// Wait on mutex unlock, after
// immediately relocking map mu.
mu.WaitRelock(&mm.mapmu)
continue
}
// Done with map.
mm.mapmu.Unlock()
// Return mutex unlock function.
return func() { mm.unlock(key, mu) }
}
}
// RLock: see MutexMap.RLock() definition.
func (st *LockState) RLock(key string) (runlock func()) {
return st.lock(key, lockTypeRead)
}
func (mm *MutexMap) unlock(key string, mu *rwmutexish) {
// Get map lock.
mm.mapmu.Lock()
// lock: see MutexMap.lock() definition.
func (st *LockState) lock(key string, lt uint8) func() {
st.wait.Add(1) // track lock
// Unlock mutex.
if mu.Unlock() {
if atomic.LoadUint32(&st.done) == 1 {
panic("called (r)lock on unlocked state")
} else if lt&lockTypeWrite != 0 &&
st.ltyp&lockTypeWrite == 0 {
panic("called lock on rlocked map")
// Mutex fully unlocked
// with zero waiters. Self
// evict and release it.
delete(mm.mumap, key)
mm.mupool.Release(mu)
}
var ok bool
var mu *rwmutex
if mm.count++; mm.count%gcfreq == 0 {
// Every 'gcfreq' unlocks perform
// a garbage collection to keep
// us squeaky clean :]
mm.mupool.GC()
}
// Spin lock until returns true
st.mmap.spinLock(func() bool {
// Check not overloaded
if !(st.mmap.count < st.mmap.maxmu) {
// Done with map.
mm.mapmu.Unlock()
}
// rwmutexPool is a very simply memory rwmutexPool.
type rwmutexPool struct {
current []*rwmutexish
victim []*rwmutexish
}
// Acquire will returns a rwmutexState from rwmutexPool (or alloc new).
func (p *rwmutexPool) Acquire() *rwmutexish {
// First try the current queue
if l := len(p.current) - 1; l >= 0 {
mu := p.current[l]
p.current = p.current[:l]
return mu
}
// Next try the victim queue.
if l := len(p.victim) - 1; l >= 0 {
mu := p.victim[l]
p.victim = p.victim[:l]
return mu
}
// Lastly, alloc new.
mu := new(rwmutexish)
return mu
}
// Release places a sync.rwmutexState back in the rwmutexPool.
func (p *rwmutexPool) Release(mu *rwmutexish) {
p.current = append(p.current, mu)
}
// GC will clear out unused entries from the rwmutexPool.
func (p *rwmutexPool) GC() {
current := p.current
p.current = nil
p.victim = current
}
// rwmutexish is a RW mutex (ish), i.e. the representation
// of one only to be accessed within
type rwmutexish struct {
tr trigger
ln int32 // no. locks
wn int32 // no. waiters
lt uint8 // lock type
}
// Lock will lock the mutex for given lock type, in the
// sense that it will update the internal state tracker
// accordingly. Return value is true on successful lock.
func (mu *rwmutexish) Lock(lt uint8) bool {
switch mu.lt {
case lockTypeRead:
// already read locked,
// only permit more reads.
if lt != lockTypeRead {
return false
}
// Ensure mutex at key
// is in lockable state
mu, ok = st.mmap.mumap[key]
return !ok || mu.CanLock(lt)
})
case lockTypeWrite:
// already write locked,
// no other locks allowed.
return false
// Incr count
st.mmap.count++
if !ok {
// No mutex found for key
// Alloc mu from pool
mu = st.mmap.mpool.Acquire()
st.mmap.mumap[key] = mu
// Set our key
mu.key = key
// Queue for eviction
st.mmap.evict = append(st.mmap.evict, mu)
default:
// Fully unlocked.
mu.lt = lt
}
// Lock mutex
mu.Lock(lt)
// Update
// count.
mu.ln++
// Unlock map
st.mmap.mapmu.Unlock()
return func() {
st.mmap.mapmu.Lock()
mu.Unlock()
st.mmap.cleanup()
st.wait.Add(-1)
}
return true
}
// UnlockMap will close this state and release the currently locked map.
func (st *LockState) UnlockMap() {
if !atomic.CompareAndSwapUint32(&st.done, 0, 1) {
panic("called unlockmap on expired state")
}
st.wait.Wait()
st.mmap.mapmu.Lock()
st.mmap.cleanup()
}
// Unlock will unlock the mutex, in the sense that
// it will update the internal state tracker accordingly.
// On any unlock it will awaken sleeping waiting threads.
// Returned boolean is if unlocked=true AND waiters=0.
func (mu *rwmutexish) Unlock() bool {
var ok bool
// rwmutex is a very simple *representation* of a read-write
// mutex, though not one in implementation. it works by
// tracking the lock state for a given map key, which is
// protected by the map's mutex.
type rwmutex struct {
rcnt int32 // read lock count
lock uint8 // lock type
key string // map key
}
switch mu.ln--; {
case mu.ln > 0 && mu.lt == lockTypeWrite:
panic("BUG: multiple writer locks")
case mu.ln < 0:
panic("BUG: negative lock count")
case mu.ln == 0:
// Fully unlocked.
mu.lt = 0
func (mu *rwmutex) CanLock(lt uint8) bool {
return mu.lock == 0 ||
(mu.lock&lockTypeRead != 0 && lt&lockTypeRead != 0)
}
func (mu *rwmutex) Lock(lt uint8) {
// Set lock type
mu.lock = lt
if lt&lockTypeRead != 0 {
// RLock, increment
mu.rcnt++
}
}
func (mu *rwmutex) Unlock() {
if mu.rcnt > 0 {
// RUnlock
mu.rcnt--
// Only return true
// with no waiters.
ok = (mu.wn == 0)
}
if mu.rcnt == 0 {
// Total unlock
mu.lock = 0
}
// Awake all waiting
// goroutines for mu.
mu.tr.Trigger()
return ok
}
// WaitRelock expects a mutex to be passed in already in
// the lock state. It incr the rwmutexish waiter count before
// unlocking the outer mutex and blocking on internal trigger.
// On awake it will relock outer mutex and decr wait count.
func (mu *rwmutexish) WaitRelock(outer *sync.Mutex) {
mu.wn++
outer.Unlock()
mu.tr.Wait()
outer.Lock()
mu.wn--
}
// trigger uses the internals of sync.Cond to provide
// a waitgroup type structure (including goroutine parks)
// without such a heavy reliance on a delta value.
type trigger struct{ notifyList }
func (t *trigger) Trigger() {
runtime_notifyListNotifyAll(&t.notifyList)
}
func (t *trigger) Wait() {
v := runtime_notifyListAdd(&t.notifyList)
runtime_notifyListWait(&t.notifyList, v)
}
// Approximation of notifyList in runtime/sema.go.
type notifyList struct {
wait uint32
notify uint32
lock uintptr // key field of the mutex
head unsafe.Pointer
tail unsafe.Pointer
}
// See runtime/sema.go for documentation.
//
//go:linkname runtime_notifyListAdd sync.runtime_notifyListAdd
func runtime_notifyListAdd(l *notifyList) uint32
// See runtime/sema.go for documentation.
//
//go:linkname runtime_notifyListWait sync.runtime_notifyListWait
func runtime_notifyListWait(l *notifyList, t uint32)
// See runtime/sema.go for documentation.
//
//go:linkname runtime_notifyListNotifyAll sync.runtime_notifyListNotifyAll
func runtime_notifyListNotifyAll(l *notifyList)

View File

@@ -1,39 +0,0 @@
package mutexes
// pool is a very simply memory pool.
type pool struct {
current []*rwmutex
victim []*rwmutex
}
// Acquire will returns a rwmutex from pool (or alloc new).
func (p *pool) Acquire() *rwmutex {
// First try the current queue
if l := len(p.current) - 1; l >= 0 {
mu := p.current[l]
p.current = p.current[:l]
return mu
}
// Next try the victim queue.
if l := len(p.victim) - 1; l >= 0 {
mu := p.victim[l]
p.victim = p.victim[:l]
return mu
}
// Lastly, alloc new.
return &rwmutex{}
}
// Release places a sync.RWMutex back in the pool.
func (p *pool) Release(mu *rwmutex) {
p.current = append(p.current, mu)
}
// GC will clear out unused entries from the pool.
func (p *pool) GC() {
current := p.current
p.current = nil
p.victim = current
}

View File

@@ -1,9 +0,0 @@
MIT License
Copyright (c) 2021 gruf
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,2 +0,0 @@
A selection of type-defined `sync.Pool` implementations with redefined "getter" and "putter"
methods to handle their appropriate types.

View File

@@ -1,89 +0,0 @@
package pools
import (
"bufio"
"io"
"sync"
)
// BufioReaderPool is a pooled allocator for bufio.Reader objects.
type BufioReaderPool interface {
// Get fetches a bufio.Reader from pool and resets to supplied reader
Get(io.Reader) *bufio.Reader
// Put places supplied bufio.Reader back in pool
Put(*bufio.Reader)
}
// NewBufioReaderPool returns a newly instantiated bufio.Reader pool.
func NewBufioReaderPool(size int) BufioReaderPool {
return &bufioReaderPool{
pool: sync.Pool{
New: func() interface{} {
return bufio.NewReaderSize(nil, size)
},
},
size: size,
}
}
// bufioReaderPool is our implementation of BufioReaderPool.
type bufioReaderPool struct {
pool sync.Pool
size int
}
func (p *bufioReaderPool) Get(r io.Reader) *bufio.Reader {
br := p.pool.Get().(*bufio.Reader)
br.Reset(r)
return br
}
func (p *bufioReaderPool) Put(br *bufio.Reader) {
if br.Size() < p.size {
return
}
br.Reset(nil)
p.pool.Put(br)
}
// BufioWriterPool is a pooled allocator for bufio.Writer objects.
type BufioWriterPool interface {
// Get fetches a bufio.Writer from pool and resets to supplied writer
Get(io.Writer) *bufio.Writer
// Put places supplied bufio.Writer back in pool
Put(*bufio.Writer)
}
// NewBufioWriterPool returns a newly instantiated bufio.Writer pool.
func NewBufioWriterPool(size int) BufioWriterPool {
return &bufioWriterPool{
pool: sync.Pool{
New: func() interface{} {
return bufio.NewWriterSize(nil, size)
},
},
size: size,
}
}
// bufioWriterPool is our implementation of BufioWriterPool.
type bufioWriterPool struct {
pool sync.Pool
size int
}
func (p *bufioWriterPool) Get(w io.Writer) *bufio.Writer {
bw := p.pool.Get().(*bufio.Writer)
bw.Reset(w)
return bw
}
func (p *bufioWriterPool) Put(bw *bufio.Writer) {
if bw.Size() < p.size {
return
}
bw.Reset(nil)
p.pool.Put(bw)
}

View File

@@ -1,46 +0,0 @@
package pools
import (
"sync"
"codeberg.org/gruf/go-byteutil"
)
// BufferPool is a pooled allocator for bytes.Buffer objects
type BufferPool interface {
// Get fetches a bytes.Buffer from pool
Get() *byteutil.Buffer
// Put places supplied bytes.Buffer in pool
Put(*byteutil.Buffer)
}
// NewBufferPool returns a newly instantiated bytes.Buffer pool
func NewBufferPool(size int) BufferPool {
return &bufferPool{
pool: sync.Pool{
New: func() interface{} {
return &byteutil.Buffer{B: make([]byte, 0, size)}
},
},
size: size,
}
}
// bufferPool is our implementation of BufferPool
type bufferPool struct {
pool sync.Pool
size int
}
func (p *bufferPool) Get() *byteutil.Buffer {
return p.pool.Get().(*byteutil.Buffer)
}
func (p *bufferPool) Put(buf *byteutil.Buffer) {
if buf.Cap() < p.size {
return
}
buf.Reset()
p.pool.Put(buf)
}

View File

@@ -1,46 +0,0 @@
package pools
import (
"sync"
"codeberg.org/gruf/go-fastpath"
)
// PathBuilderPool is a pooled allocator for fastpath.Builder objects
type PathBuilderPool interface {
// Get fetches a fastpath.Builder from pool
Get() *fastpath.Builder
// Put places supplied fastpath.Builder back in pool
Put(*fastpath.Builder)
}
// NewPathBuilderPool returns a newly instantiated fastpath.Builder pool
func NewPathBuilderPool(size int) PathBuilderPool {
return &pathBuilderPool{
pool: sync.Pool{
New: func() interface{} {
return &fastpath.Builder{B: make([]byte, 0, size)}
},
},
size: size,
}
}
// pathBuilderPool is our implementation of PathBuilderPool
type pathBuilderPool struct {
pool sync.Pool
size int
}
func (p *pathBuilderPool) Get() *fastpath.Builder {
return p.pool.Get().(*fastpath.Builder)
}
func (p *pathBuilderPool) Put(pb *fastpath.Builder) {
if pb.Cap() < p.size {
return
}
pb.Reset()
p.pool.Put(pb)
}

View File

@@ -1,46 +0,0 @@
package pools
import (
"hash"
"sync"
"codeberg.org/gruf/go-hashenc"
)
// HashEncoderPool is a pooled allocator for hashenc.HashEncoder objects.
type HashEncoderPool interface {
// Get fetches a hashenc.HashEncoder from pool
Get() hashenc.HashEncoder
// Put places supplied hashenc.HashEncoder back in pool
Put(hashenc.HashEncoder)
}
// NewHashEncoderPool returns a newly instantiated hashenc.HashEncoder pool.
func NewHashEncoderPool(hash func() hash.Hash, enc func() hashenc.Encoder) HashEncoderPool {
return &hencPool{
pool: sync.Pool{
New: func() interface{} {
return hashenc.New(hash(), enc())
},
},
size: hashenc.New(hash(), enc()).Size(),
}
}
// hencPool is our implementation of HashEncoderPool.
type hencPool struct {
pool sync.Pool
size int
}
func (p *hencPool) Get() hashenc.HashEncoder {
return p.pool.Get().(hashenc.HashEncoder)
}
func (p *hencPool) Put(henc hashenc.HashEncoder) {
if henc.Size() < p.size {
return
}
p.pool.Put(henc)
}

View File

@@ -1,387 +0,0 @@
package pools
import (
"runtime"
"sync"
"sync/atomic"
"unsafe"
)
type Pool struct {
// New is used to instantiate new items
New func() interface{}
// Evict is called on evicted items during pool .Clean()
Evict func(interface{})
local unsafe.Pointer // ptr to []_ppool
localSz int64 // count of all elems in local
victim unsafe.Pointer // ptr to []_ppool
victimSz int64 // count of all elems in victim
mutex sync.Mutex // mutex protects new cleanups, and new allocations of local
}
// Get attempts to fetch an item from the pool, failing that allocates with supplied .New() function
func (p *Pool) Get() interface{} {
// Get local pool for proc
// (also pins proc)
pool, pid := p.pin()
if v := pool.getPrivate(); v != nil {
// local _ppool private elem acquired
runtime_procUnpin()
atomic.AddInt64(&p.localSz, -1)
return v
}
if v := pool.get(); v != nil {
// local _ppool queue elem acquired
runtime_procUnpin()
atomic.AddInt64(&p.localSz, -1)
return v
}
// Unpin before attempting slow
runtime_procUnpin()
if v := p.getSlow(pid); v != nil {
// note size decrementing
// is handled within p.getSlow()
// as we don't know if it came
// from the local or victim pools
return v
}
// Alloc new
return p.New()
}
// Put places supplied item in the proc local pool
func (p *Pool) Put(v interface{}) {
// Don't store nil
if v == nil {
return
}
// Get proc local pool
// (also pins proc)
pool, _ := p.pin()
// first try private, then queue
if !pool.setPrivate(v) {
pool.put(v)
}
runtime_procUnpin()
// Increment local pool size
atomic.AddInt64(&p.localSz, 1)
}
// Clean will drop the current victim pools, move the current local pools to its
// place and reset the local pools ptr in order to be regenerated
func (p *Pool) Clean() {
p.mutex.Lock()
// victim becomes local, local becomes nil
localPtr := atomic.SwapPointer(&p.local, nil)
victimPtr := atomic.SwapPointer(&p.victim, localPtr)
localSz := atomic.SwapInt64(&p.localSz, 0)
atomic.StoreInt64(&p.victimSz, localSz)
var victim []ppool
if victimPtr != nil {
victim = *(*[]ppool)(victimPtr)
}
// drain each of the vict _ppool items
for i := 0; i < len(victim); i++ {
ppool := &victim[i]
ppool.evict(p.Evict)
}
p.mutex.Unlock()
}
// LocalSize returns the total number of elements in all the proc-local pools
func (p *Pool) LocalSize() int64 {
return atomic.LoadInt64(&p.localSz)
}
// VictimSize returns the total number of elements in all the victim (old proc-local) pools
func (p *Pool) VictimSize() int64 {
return atomic.LoadInt64(&p.victimSz)
}
// getSlow is the slow path for fetching an element, attempting to steal from other proc's
// local pools, and failing that, from the aging-out victim pools. pid is still passed so
// not all procs start iterating from the same index
func (p *Pool) getSlow(pid int) interface{} {
// get local pools
local := p.localPools()
// Try to steal from other proc locals
for i := 0; i < len(local); i++ {
pool := &local[(pid+i+1)%len(local)]
if v := pool.get(); v != nil {
atomic.AddInt64(&p.localSz, -1)
return v
}
}
// get victim pools
victim := p.victimPools()
// Attempt to steal from victim pools
for i := 0; i < len(victim); i++ {
pool := &victim[(pid+i+1)%len(victim)]
if v := pool.get(); v != nil {
atomic.AddInt64(&p.victimSz, -1)
return v
}
}
// Set victim pools to nil (none found)
atomic.StorePointer(&p.victim, nil)
return nil
}
// localPools safely loads slice of local _ppools
func (p *Pool) localPools() []ppool {
local := atomic.LoadPointer(&p.local)
if local == nil {
return nil
}
return *(*[]ppool)(local)
}
// victimPools safely loads slice of victim _ppools
func (p *Pool) victimPools() []ppool {
victim := atomic.LoadPointer(&p.victim)
if victim == nil {
return nil
}
return *(*[]ppool)(victim)
}
// pin will get fetch pin proc to PID, fetch proc-local _ppool and current PID we're pinned to
func (p *Pool) pin() (*ppool, int) {
for {
// get local pools
local := p.localPools()
if len(local) > 0 {
// local already initialized
// pin to current proc
pid := runtime_procPin()
// check for pid local pool
if pid < len(local) {
return &local[pid], pid
}
// unpin from proc
runtime_procUnpin()
} else {
// local not yet initialized
// Check functions are set
if p.New == nil {
panic("new func must not be nil")
}
if p.Evict == nil {
panic("evict func must not be nil")
}
}
// allocate local
p.allocLocal()
}
}
// allocLocal allocates a new local pool slice, with the old length passed to check
// if pool was previously nil, or whether a change in GOMAXPROCS occurred
func (p *Pool) allocLocal() {
// get pool lock
p.mutex.Lock()
// Calculate new size to use
size := runtime.GOMAXPROCS(0)
local := p.localPools()
if len(local) != size {
// GOMAXPROCS changed, reallocate
pools := make([]ppool, size)
atomic.StorePointer(&p.local, unsafe.Pointer(&pools))
// Evict old local elements
for i := 0; i < len(local); i++ {
pool := &local[i]
pool.evict(p.Evict)
}
}
// Unlock pool
p.mutex.Unlock()
}
// _ppool is a proc local pool
type _ppool struct {
// root is the root element of the _ppool queue,
// and protects concurrent access to the queue
root unsafe.Pointer
// private is a proc private member accessible
// only to the pid this _ppool is assigned to,
// except during evict (hence the unsafe pointer)
private unsafe.Pointer
}
// ppool wraps _ppool with pad.
type ppool struct {
_ppool
// Prevents false sharing on widespread platforms with
// 128 mod (cache line size) = 0 .
pad [128 - unsafe.Sizeof(_ppool{})%128]byte
}
// getPrivate gets the proc private member
func (pp *_ppool) getPrivate() interface{} {
ptr := atomic.SwapPointer(&pp.private, nil)
if ptr == nil {
return nil
}
return *(*interface{})(ptr)
}
// setPrivate sets the proc private member (only if unset)
func (pp *_ppool) setPrivate(v interface{}) bool {
return atomic.CompareAndSwapPointer(&pp.private, nil, unsafe.Pointer(&v))
}
// get fetches an element from the queue
func (pp *_ppool) get() interface{} {
for {
// Attempt to load root elem
root := atomic.LoadPointer(&pp.root)
if root == nil {
return nil
}
// Attempt to consume root elem
if root == inUsePtr ||
!atomic.CompareAndSwapPointer(&pp.root, root, inUsePtr) {
continue
}
// Root becomes next in chain
e := (*elem)(root)
v := e.value
// Place new root back in the chain
atomic.StorePointer(&pp.root, unsafe.Pointer(e.next))
putElem(e)
return v
}
}
// put places an element in the queue
func (pp *_ppool) put(v interface{}) {
// Prepare next elem
e := getElem()
e.value = v
for {
// Attempt to load root elem
root := atomic.LoadPointer(&pp.root)
if root == inUsePtr {
continue
}
// Set the next elem value (might be nil)
e.next = (*elem)(root)
// Attempt to store this new value at root
if atomic.CompareAndSwapPointer(&pp.root, root, unsafe.Pointer(e)) {
break
}
}
}
// hook evicts all entries from pool, calling hook on each
func (pp *_ppool) evict(hook func(interface{})) {
if v := pp.getPrivate(); v != nil {
hook(v)
}
for {
v := pp.get()
if v == nil {
break
}
hook(v)
}
}
// inUsePtr is a ptr used to indicate _ppool is in use
var inUsePtr = unsafe.Pointer(&elem{
next: nil,
value: "in_use",
})
// elem defines an element in the _ppool queue
type elem struct {
next *elem
value interface{}
}
// elemPool is a simple pool of unused elements
var elemPool = struct {
root unsafe.Pointer
}{}
// getElem fetches a new elem from pool, or creates new
func getElem() *elem {
// Attempt to load root elem
root := atomic.LoadPointer(&elemPool.root)
if root == nil {
return &elem{}
}
// Attempt to consume root elem
if root == inUsePtr ||
!atomic.CompareAndSwapPointer(&elemPool.root, root, inUsePtr) {
return &elem{}
}
// Root becomes next in chain
e := (*elem)(root)
atomic.StorePointer(&elemPool.root, unsafe.Pointer(e.next))
e.next = nil
return e
}
// putElem will place element in the pool
func putElem(e *elem) {
e.value = nil
// Attempt to load root elem
root := atomic.LoadPointer(&elemPool.root)
if root == inUsePtr {
return // drop
}
// Set the next elem value (might be nil)
e.next = (*elem)(root)
// Attempt to store this new value at root
atomic.CompareAndSwapPointer(&elemPool.root, root, unsafe.Pointer(e))
}
//go:linkname runtime_procPin sync.runtime_procPin
func runtime_procPin() int
//go:linkname runtime_procUnpin sync.runtime_procUnpin
func runtime_procUnpin()

View File

@@ -1,63 +0,0 @@
package kv
import (
"context"
"errors"
"codeberg.org/gruf/go-mutexes"
"codeberg.org/gruf/go-store/v2/storage"
)
var ErrIteratorClosed = errors.New("store/kv: iterator closed")
// Iterator provides a read-only iterator to all the key-value
// pairs in a KVStore. While the iterator is open the store is read
// locked, you MUST release the iterator when you are finished with
// it.
//
// Please note:
// individual iterators are NOT concurrency safe, though it is safe to
// have multiple iterators running concurrently.
type Iterator struct {
store *KVStore // store is the linked KVStore
state *mutexes.LockState
entries []storage.Entry
index int
key string
}
// Next attempts to fetch the next key-value pair, the
// return value indicates whether another pair remains.
func (i *Iterator) Next() bool {
next := i.index + 1
if next >= len(i.entries) {
i.key = ""
return false
}
i.key = i.entries[next].Key
i.index = next
return true
}
// Key returns the current iterator key.
func (i *Iterator) Key() string {
return i.key
}
// Value returns the current iterator value at key.
func (i *Iterator) Value(ctx context.Context) ([]byte, error) {
if i.store == nil {
return nil, ErrIteratorClosed
}
return i.store.get(i.state.RLock, ctx, i.key)
}
// Release will release the store read-lock, and close this iterator.
func (i *Iterator) Release() {
i.state.UnlockMap()
i.state = nil
i.store = nil
i.key = ""
i.entries = nil
i.index = 0
}

View File

@@ -1,116 +0,0 @@
package kv
import (
"context"
"errors"
"io"
"codeberg.org/gruf/go-mutexes"
)
// ErrStateClosed is returned on further calls to states after calling Release().
var ErrStateClosed = errors.New("store/kv: state closed")
// StateRO provides a read-only window to the store. While this
// state is active during the Read() function window, the entire
// store will be read-locked. The state is thread-safe for concurrent
// use UNTIL the moment that your supplied function to Read() returns.
type StateRO struct {
store *KVStore
state *mutexes.LockState
}
// Get: see KVStore.Get(). Returns error if state already closed.
func (st *StateRO) Get(ctx context.Context, key string) ([]byte, error) {
if st.store == nil {
return nil, ErrStateClosed
}
return st.store.get(st.state.RLock, ctx, key)
}
// GetStream: see KVStore.GetStream(). Returns error if state already closed.
func (st *StateRO) GetStream(ctx context.Context, key string) (io.ReadCloser, error) {
if st.store == nil {
return nil, ErrStateClosed
}
return st.store.getStream(st.state.RLock, ctx, key)
}
// Has: see KVStore.Has(). Returns error if state already closed.
func (st *StateRO) Has(ctx context.Context, key string) (bool, error) {
if st.store == nil {
return false, ErrStateClosed
}
return st.store.has(st.state.RLock, ctx, key)
}
// Release will release the store read-lock, and close this state.
func (st *StateRO) Release() {
st.state.UnlockMap()
st.state = nil
st.store = nil
}
// StateRW provides a read-write window to the store. While this
// state is active during the Update() function window, the entire
// store will be locked. The state is thread-safe for concurrent
// use UNTIL the moment that your supplied function to Update() returns.
type StateRW struct {
store *KVStore
state *mutexes.LockState
}
// Get: see KVStore.Get(). Returns error if state already closed.
func (st *StateRW) Get(ctx context.Context, key string) ([]byte, error) {
if st.store == nil {
return nil, ErrStateClosed
}
return st.store.get(st.state.RLock, ctx, key)
}
// GetStream: see KVStore.GetStream(). Returns error if state already closed.
func (st *StateRW) GetStream(ctx context.Context, key string) (io.ReadCloser, error) {
if st.store == nil {
return nil, ErrStateClosed
}
return st.store.getStream(st.state.RLock, ctx, key)
}
// Put: see KVStore.Put(). Returns error if state already closed.
func (st *StateRW) Put(ctx context.Context, key string, value []byte) (int, error) {
if st.store == nil {
return 0, ErrStateClosed
}
return st.store.put(st.state.Lock, ctx, key, value)
}
// PutStream: see KVStore.PutStream(). Returns error if state already closed.
func (st *StateRW) PutStream(ctx context.Context, key string, r io.Reader) (int64, error) {
if st.store == nil {
return 0, ErrStateClosed
}
return st.store.putStream(st.state.Lock, ctx, key, r)
}
// Has: see KVStore.Has(). Returns error if state already closed.
func (st *StateRW) Has(ctx context.Context, key string) (bool, error) {
if st.store == nil {
return false, ErrStateClosed
}
return st.store.has(st.state.RLock, ctx, key)
}
// Delete: see KVStore.Delete(). Returns error if state already closed.
func (st *StateRW) Delete(ctx context.Context, key string) error {
if st.store == nil {
return ErrStateClosed
}
return st.store.delete(st.state.Lock, ctx, key)
}
// Release will release the store lock, and close this state.
func (st *StateRW) Release() {
st.state.UnlockMap()
st.state = nil
st.store = nil
}

View File

@@ -1,267 +0,0 @@
package kv
import (
"context"
"io"
"codeberg.org/gruf/go-iotools"
"codeberg.org/gruf/go-mutexes"
"codeberg.org/gruf/go-store/v2/storage"
)
// KVStore is a very simple, yet performant key-value store
type KVStore struct {
mu mutexes.MutexMap // map of keys to mutexes to protect key access
st storage.Storage // underlying storage implementation
}
func OpenDisk(path string, cfg *storage.DiskConfig) (*KVStore, error) {
// Attempt to open disk storage
storage, err := storage.OpenDisk(path, cfg)
if err != nil {
return nil, err
}
// Return new KVStore
return OpenStorage(storage)
}
func OpenBlock(path string, cfg *storage.BlockConfig) (*KVStore, error) {
// Attempt to open block storage
storage, err := storage.OpenBlock(path, cfg)
if err != nil {
return nil, err
}
// Return new KVStore
return OpenStorage(storage)
}
func OpenMemory(overwrites bool) *KVStore {
return New(storage.OpenMemory(100, overwrites))
}
func OpenS3(endpoint string, bucket string, cfg *storage.S3Config) (*KVStore, error) {
// Attempt to open S3 storage
storage, err := storage.OpenS3(endpoint, bucket, cfg)
if err != nil {
return nil, err
}
// Return new KVStore
return OpenStorage(storage)
}
// OpenStorage will return a new KVStore instance based on Storage, performing an initial storage.Clean().
func OpenStorage(storage storage.Storage) (*KVStore, error) {
// Perform initial storage clean
err := storage.Clean(context.Background())
if err != nil {
return nil, err
}
// Return new KVStore
return New(storage), nil
}
// New will simply return a new KVStore instance based on Storage.
func New(storage storage.Storage) *KVStore {
if storage == nil {
panic("nil storage")
}
return &KVStore{
mu: mutexes.NewMap(-1, -1),
st: storage,
}
}
// RLock acquires a read-lock on supplied key, returning unlock function.
func (st *KVStore) RLock(key string) (runlock func()) {
return st.mu.RLock(key)
}
// Lock acquires a write-lock on supplied key, returning unlock function.
func (st *KVStore) Lock(key string) (unlock func()) {
return st.mu.Lock(key)
}
// Get fetches the bytes for supplied key in the store.
func (st *KVStore) Get(ctx context.Context, key string) ([]byte, error) {
return st.get(st.RLock, ctx, key)
}
// get performs the underlying logic for KVStore.Get(), using supplied read lock func to allow use with states.
func (st *KVStore) get(rlock func(string) func(), ctx context.Context, key string) ([]byte, error) {
// Acquire read lock for key
runlock := rlock(key)
defer runlock()
// Read file bytes from storage
return st.st.ReadBytes(ctx, key)
}
// GetStream fetches a ReadCloser for the bytes at the supplied key in the store.
func (st *KVStore) GetStream(ctx context.Context, key string) (io.ReadCloser, error) {
return st.getStream(st.RLock, ctx, key)
}
// getStream performs the underlying logic for KVStore.GetStream(), using supplied read lock func to allow use with states.
func (st *KVStore) getStream(rlock func(string) func(), ctx context.Context, key string) (io.ReadCloser, error) {
// Acquire read lock for key
runlock := rlock(key)
// Attempt to open stream for read
rd, err := st.st.ReadStream(ctx, key)
if err != nil {
runlock()
return nil, err
}
var unlocked bool
// Wrap readcloser to call our own callback
return iotools.ReadCloser(rd, iotools.CloserFunc(func() error {
if !unlocked {
unlocked = true
defer runlock()
}
return rd.Close()
})), nil
}
// Put places the bytes at the supplied key in the store.
func (st *KVStore) Put(ctx context.Context, key string, value []byte) (int, error) {
return st.put(st.Lock, ctx, key, value)
}
// put performs the underlying logic for KVStore.Put(), using supplied lock func to allow use with states.
func (st *KVStore) put(lock func(string) func(), ctx context.Context, key string, value []byte) (int, error) {
// Acquire write lock for key
unlock := lock(key)
defer unlock()
// Write file bytes to storage
return st.st.WriteBytes(ctx, key, value)
}
// PutStream writes the bytes from the supplied Reader at the supplied key in the store.
func (st *KVStore) PutStream(ctx context.Context, key string, r io.Reader) (int64, error) {
return st.putStream(st.Lock, ctx, key, r)
}
// putStream performs the underlying logic for KVStore.PutStream(), using supplied lock func to allow use with states.
func (st *KVStore) putStream(lock func(string) func(), ctx context.Context, key string, r io.Reader) (int64, error) {
// Acquire write lock for key
unlock := lock(key)
defer unlock()
// Write file stream to storage
return st.st.WriteStream(ctx, key, r)
}
// Has checks whether the supplied key exists in the store.
func (st *KVStore) Has(ctx context.Context, key string) (bool, error) {
return st.has(st.RLock, ctx, key)
}
// has performs the underlying logic for KVStore.Has(), using supplied read lock func to allow use with states.
func (st *KVStore) has(rlock func(string) func(), ctx context.Context, key string) (bool, error) {
// Acquire read lock for key
runlock := rlock(key)
defer runlock()
// Stat file in storage
return st.st.Stat(ctx, key)
}
// Delete removes value at supplied key from the store.
func (st *KVStore) Delete(ctx context.Context, key string) error {
return st.delete(st.Lock, ctx, key)
}
// delete performs the underlying logic for KVStore.Delete(), using supplied lock func to allow use with states.
func (st *KVStore) delete(lock func(string) func(), ctx context.Context, key string) error {
// Acquire write lock for key
unlock := lock(key)
defer unlock()
// Remove file from storage
return st.st.Remove(ctx, key)
}
// Iterator returns an Iterator for key-value pairs in the store, using supplied match function
func (st *KVStore) Iterator(ctx context.Context, matchFn func(string) bool) (*Iterator, error) {
if matchFn == nil {
// By default simply match all keys
matchFn = func(string) bool { return true }
}
// Get store read lock state
state := st.mu.RLockMap()
var entries []storage.Entry
walkFn := func(ctx context.Context, entry storage.Entry) error {
// Ignore unmatched entries
if !matchFn(entry.Key) {
return nil
}
// Add to entries
entries = append(entries, entry)
return nil
}
// Collate keys in storage with our walk function
err := st.st.WalkKeys(ctx, storage.WalkKeysOptions{WalkFn: walkFn})
if err != nil {
state.UnlockMap()
return nil, err
}
// Return new iterator
return &Iterator{
store: st,
state: state,
entries: entries,
index: -1,
key: "",
}, nil
}
// Read provides a read-only window to the store, holding it in a read-locked state until release.
func (st *KVStore) Read() *StateRO {
state := st.mu.RLockMap()
return &StateRO{store: st, state: state}
}
// ReadFn provides a read-only window to the store, holding it in a read-locked state until fn return..
func (st *KVStore) ReadFn(fn func(*StateRO)) {
// Acquire read-only state
state := st.Read()
defer state.Release()
// Pass to fn
fn(state)
}
// Update provides a read-write window to the store, holding it in a write-locked state until release.
func (st *KVStore) Update() *StateRW {
state := st.mu.LockMap()
return &StateRW{store: st, state: state}
}
// UpdateFn provides a read-write window to the store, holding it in a write-locked state until fn return.
func (st *KVStore) UpdateFn(fn func(*StateRW)) {
// Acquire read-write state
state := st.Update()
defer state.Release()
// Pass to fn
fn(state)
}
// Close will close the underlying storage, the mutex map locking (e.g. RLock(), Lock()) will continue to function.
func (st *KVStore) Close() error {
return st.st.Close()
}

View File

@@ -0,0 +1,38 @@
package storage_test
import (
"os"
"testing"
"codeberg.org/gruf/go-store/v2/storage"
)
func TestBlockStorage(t *testing.T) {
// Set test path, defer deleting it
testPath := "blockstorage.test"
t.Cleanup(func() {
os.RemoveAll(testPath)
})
// Open new blockstorage instance
st, err := storage.OpenBlock(testPath, nil)
if err != nil {
t.Fatalf("Failed opening storage: %v", err)
}
// Attempt multi open of same instance
_, err = storage.OpenBlock(testPath, nil)
if err == nil {
t.Fatal("Successfully opened a locked storage instance")
}
// Run the storage tests
testStorage(t, st)
// Test reopen storage path
st, err = storage.OpenBlock(testPath, nil)
if err != nil {
t.Fatalf("Failed opening storage: %v", err)
}
st.Close()
}