diff --git a/go.mod b/go.mod index dc216caef..90f37953c 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( codeberg.org/gruf/go-runners v1.6.3 codeberg.org/gruf/go-sched v1.2.4 codeberg.org/gruf/go-storage v0.2.0 - codeberg.org/gruf/go-structr v0.8.11 + codeberg.org/gruf/go-structr v0.9.0 codeberg.org/superseriousbusiness/activity v1.12.0-gts codeberg.org/superseriousbusiness/exif-terminator v0.10.0 codeberg.org/superseriousbusiness/httpsig v1.3.0-SSB @@ -95,7 +95,7 @@ require ( require ( codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect - codeberg.org/gruf/go-mangler v1.4.1 // indirect + codeberg.org/gruf/go-mangler v1.4.3 // indirect codeberg.org/gruf/go-maps v1.0.4 // indirect codeberg.org/superseriousbusiness/go-jpeg-image-structure/v2 v2.1.0-SSB // indirect codeberg.org/superseriousbusiness/go-png-image-structure/v2 v2.1.0-SSB // indirect diff --git a/go.sum b/go.sum index da7496636..5c7f9966a 100644 --- a/go.sum +++ b/go.sum @@ -24,8 +24,8 @@ codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f h1:Ss6Z+vygy+jOGhj9 codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f/go.mod h1:F9pl4h34iuVN7kucKam9fLwsItTc+9mmaKt7pNXRd/4= codeberg.org/gruf/go-loosy v0.0.0-20231007123304-bb910d1ab5c4 h1:IXwfoU7f2whT6+JKIKskNl/hBlmWmnF1vZd84Eb3cyA= codeberg.org/gruf/go-loosy v0.0.0-20231007123304-bb910d1ab5c4/go.mod h1:fiO8HE1wjZCephcYmRRsVnNI/i0+mhy44Z5dQalS0rM= -codeberg.org/gruf/go-mangler v1.4.1 h1:Dv58jFfy9On49L11ji6tpADUknwoJA46iaiZvnNXecs= -codeberg.org/gruf/go-mangler v1.4.1/go.mod h1:mDmW8Ia352RvNFaXoP9K60TgcmCZJtX0j6wm3vjAsJE= +codeberg.org/gruf/go-mangler v1.4.3 h1:mdtcbGDyj0AS9LE/H1imQreICVn6BQiks554jzdAozc= +codeberg.org/gruf/go-mangler v1.4.3/go.mod h1:mDmW8Ia352RvNFaXoP9K60TgcmCZJtX0j6wm3vjAsJE= codeberg.org/gruf/go-maps v1.0.4 h1:K+Ww4vvR3TZqm5jqrKVirmguZwa3v1VUvmig2SE8uxY= codeberg.org/gruf/go-maps v1.0.4/go.mod h1:ASX7osM7kFwt5O8GfGflcFjrwYGD8eIuRLl/oMjhEi8= codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760 h1:m2/UCRXhjDwAg4vyji6iKCpomKw6P4PmBOUi5DvAMH4= @@ -38,8 +38,8 @@ codeberg.org/gruf/go-sched v1.2.4 h1:ddBB9o0D/2oU8NbQ0ldN5aWxogpXPRBATWi58+p++Hw codeberg.org/gruf/go-sched v1.2.4/go.mod h1:wad6l+OcYGWMA2TzNLMmLObsrbBDxdJfEy5WvTgBjNk= codeberg.org/gruf/go-storage v0.2.0 h1:mKj3Lx6AavEkuXXtxqPhdq+akW9YwrnP16yQBF7K5ZI= codeberg.org/gruf/go-storage v0.2.0/go.mod h1:o3GzMDE5QNUaRnm/daUzFqvuAaC4utlgXDXYO79sWKU= -codeberg.org/gruf/go-structr v0.8.11 h1:I3cQCHpK3fQSXWaaUfksAJRN4+efULiuF11Oi/m8c+o= -codeberg.org/gruf/go-structr v0.8.11/go.mod h1:zkoXVrAnKosh8VFAsbP/Hhs8FmLBjbVVy5w/Ngm8ApM= +codeberg.org/gruf/go-structr v0.9.0 h1:UYw8igp3I4UBnlsRyDR2AbF3g7NPEP7HBrQs1I15218= +codeberg.org/gruf/go-structr v0.9.0/go.mod h1:mUvBvn4q1iM/I+d3Fj1w/gxGUU/Ve9GpiNo6dPmBJnk= codeberg.org/superseriousbusiness/activity v1.12.0-gts h1:frNGTENLmOIQHKfOw/jj3UVj/GjHBljDx+CFAAK+m6Q= codeberg.org/superseriousbusiness/activity v1.12.0-gts/go.mod h1:enxU1Lva4OcK6b/NBXscoHSEgEMsKJvdHrQFifQxp4o= codeberg.org/superseriousbusiness/exif-terminator v0.10.0 h1:FiLX/AK07tzceS36I+kOP2aEH+aytjPSIlFoYePMEyg= diff --git a/vendor/codeberg.org/gruf/go-mangler/helpers.go b/vendor/codeberg.org/gruf/go-mangler/helpers.go index 4e37e1344..f64663e62 100644 --- a/vendor/codeberg.org/gruf/go-mangler/helpers.go +++ b/vendor/codeberg.org/gruf/go-mangler/helpers.go @@ -1,3 +1,5 @@ +//go:build go1.19 || go1.20 || go1.21 || go1.22 || go1.23 + package mangler import ( @@ -35,8 +37,17 @@ func append_uint64(b []byte, u uint64) []byte { } type typecontext struct { - ntype reflect.Type - rtype reflect.Type + isptr bool + direct bool + ntype reflect.Type + rtype reflect.Type +} + +func (ctx *typecontext) set_nested(direct bool) { + ctx.direct = ctx.direct && direct && !ctx.isptr + ctx.ntype = ctx.rtype + ctx.rtype = nil + ctx.isptr = false } func deref_ptr_mangler(ctx typecontext, mangle Mangler, n uint) Mangler { @@ -44,16 +55,14 @@ func deref_ptr_mangler(ctx typecontext, mangle Mangler, n uint) Mangler { panic("bad input") } - // Non-nested value types, - // i.e. just direct ptrs to - // primitives require one - // less dereference to ptr. - if ctx.ntype == nil { + // If this is a direct value type, i.e. non-nested primitive, + // or part of a single-field struct / single element array + // then it can be treated as a direct ptr with 1 less deref. + if ctx.direct { n-- } return func(buf []byte, ptr unsafe.Pointer) []byte { - // Deref n number times. for i := n; i > 0; i-- { @@ -117,6 +126,15 @@ func iter_array_mangler(ctx typecontext, mangle Mangler) Mangler { // no. array elements. n := ctx.ntype.Len() + // Optimize + // easy cases. + switch n { + case 0: + return empty_mangler + case 1: + return mangle + } + // memory size of elem. esz := ctx.rtype.Size() @@ -139,19 +157,27 @@ func iter_array_mangler(ctx typecontext, mangle Mangler) Mangler { } func iter_struct_mangler(ctx typecontext, manglers []Mangler) Mangler { - if ctx.rtype == nil || len(manglers) != ctx.rtype.NumField() { + if ctx.rtype == nil || len(manglers) != ctx.ntype.NumField() { panic("bad input") } + // Optimized easy cases. + switch len(manglers) { + case 0: + return empty_mangler + case 1: + return manglers[0] + } + type field struct { mangle Mangler offset uintptr } // Bundle together the fields and manglers. - fields := make([]field, ctx.rtype.NumField()) + fields := make([]field, ctx.ntype.NumField()) for i := range fields { - rfield := ctx.rtype.FieldByIndex([]int{i}) + rfield := ctx.ntype.Field(i) fields[i].offset = rfield.Offset fields[i].mangle = manglers[i] if fields[i].mangle == nil { @@ -178,6 +204,10 @@ func iter_struct_mangler(ctx typecontext, manglers []Mangler) Mangler { } } +func empty_mangler(buf []byte, _ unsafe.Pointer) []byte { + return buf +} + // array_at returns ptr to index in array at ptr, given element size. func array_at(ptr unsafe.Pointer, esz uintptr, i int) unsafe.Pointer { return unsafe.Pointer(uintptr(ptr) + esz*uintptr(i)) diff --git a/vendor/codeberg.org/gruf/go-mangler/load.go b/vendor/codeberg.org/gruf/go-mangler/load.go index bc79d381e..ba2b9351b 100644 --- a/vendor/codeberg.org/gruf/go-mangler/load.go +++ b/vendor/codeberg.org/gruf/go-mangler/load.go @@ -8,6 +8,7 @@ import ( // function will be returned for given value interface{} and reflected type. Else panics. func loadMangler(t reflect.Type) Mangler { ctx := typecontext{rtype: t} + ctx.direct = true // Load mangler fn mng := load(ctx) @@ -103,6 +104,9 @@ func loadReflectPtr(ctx typecontext) Mangler { n++ } + // Set ptr type. + ctx.isptr = true + // Search for elemn type mangler. if mng := load(ctx); mng != nil { return deref_ptr_mangler(ctx, mng, n) @@ -157,11 +161,13 @@ func loadReflectKnownSlice(ctx typecontext) Mangler { // loadReflectSlice ... func loadReflectSlice(ctx typecontext) Mangler { - // Set nesting type. - ctx.ntype = ctx.rtype // Get nested element type. - ctx.rtype = ctx.rtype.Elem() + elem := ctx.rtype.Elem() + + // Set this as nested type. + ctx.set_nested(false) + ctx.rtype = elem // Preferably look for known slice mangler func if mng := loadReflectKnownSlice(ctx); mng != nil { @@ -178,11 +184,14 @@ func loadReflectSlice(ctx typecontext) Mangler { // loadReflectArray ... func loadReflectArray(ctx typecontext) Mangler { - // Set nesting type. - ctx.ntype = ctx.rtype // Get nested element type. - ctx.rtype = ctx.rtype.Elem() + elem := ctx.rtype.Elem() + + // Set this as a nested value type. + direct := ctx.rtype.Len() <= 1 + ctx.set_nested(direct) + ctx.rtype = elem // Use manglers for nested iteration. if mng := load(ctx); mng != nil { @@ -196,17 +205,15 @@ func loadReflectArray(ctx typecontext) Mangler { func loadReflectStruct(ctx typecontext) Mangler { var mngs []Mangler - // Set nesting type. - ctx.ntype = ctx.rtype + // Set this as a nested value type. + direct := ctx.rtype.NumField() <= 1 + ctx.set_nested(direct) // Gather manglers for all fields. for i := 0; i < ctx.ntype.NumField(); i++ { - // Field typectx. - ctx := typecontext{ - ntype: ctx.ntype, - rtype: ctx.ntype.Field(i).Type, - } + // Update context with field at index. + ctx.rtype = ctx.ntype.Field(i).Type // Load mangler. mng := load(ctx) diff --git a/vendor/codeberg.org/gruf/go-structr/README.md b/vendor/codeberg.org/gruf/go-structr/README.md index c8e1585b3..f0b235c98 100644 --- a/vendor/codeberg.org/gruf/go-structr/README.md +++ b/vendor/codeberg.org/gruf/go-structr/README.md @@ -2,141 +2,9 @@ A library with a series of performant data types with automated struct value indexing. Indexing is supported via arbitrary combinations of fields, and in the case of the cache type, negative results (errors!) are also supported. -Under the hood, go-structr maintains a hashmap per index, where each hashmap is a hashmap keyed by serialized input key type. This is handled by the incredibly performant serialization library [go-mangler](https://codeberg.org/gruf/go-mangler), which at this point in time supports just about **any** arbitrary type, so feel free to index by *anything*! +Under the hood, go-structr maintains a hashmap per index, where each hashmap is a hashmap keyed by serialized input key type. This is handled by the incredibly performant serialization library [go-mangler](https://codeberg.org/gruf/go-mangler), which at this point in time supports *most* arbitrary types (other than maps, channels, functions), so feel free to index by by almost *anything*! -## Cache example - -```golang -type Cached struct { - Username string - Domain string - URL string - CountryCode int -} - -var c structr.Cache[*Cached] - -c.Init(structr.CacheConfig[*Cached]{ - - // Fields this cached struct type - // will be indexed and stored under. - Indices: []structr.IndexConfig{ - {Fields: "Username,Domain", AllowZero: true}, - {Fields: "URL"}, - {Fields: "CountryCode", Multiple: true}, - }, - - // Maximum LRU cache size before - // new entries cause evictions. - MaxSize: 1000, - - // User provided value copy function to - // reduce need for reflection + ensure - // concurrency safety for returned values. - Copy: func(c *Cached) *Cached { - c2 := new(Cached) - *c2 = *c - return c2 - }, - - // User defined invalidation hook. - Invalidate: func(c *Cached) { - log.Println("invalidated:", c) - }, -}) - -// Access and store indexes ahead-of-time for perf. -usernameDomainIndex := c.Index("Username,Domain") -urlIndex := c.Index("URL") -countryCodeIndex := c.Index("CountryCode") - -var url string - -// Generate URL index key. -urlKey := urlIndex.Key(url) - -// Load value from cache, with callback function to hydrate -// cache if value cannot be found under index name with key. -// Negative (error) results are also cached, with user definable -// errors to ignore from caching (e.g. context deadline errs). -value, err := c.LoadOne(urlIndex, func() (*Cached, error) { - return dbType.SelectByURL(url) -}, urlKey) -if err != nil { - return nil, err -} - -// Store value in cache, only if provided callback -// function returns without error. Passes value through -// invalidation hook regardless of error return value. -// -// On success value will be automatically added to and -// accessible under all initially configured indices. -if err := c.Store(value, func() error { - return dbType.Insert(value) -}); err != nil { - return nil, err -} - -// Generate country code index key. -countryCodeKey := countryCodeIndex.Key(42) - -// Invalidate all cached results stored under -// provided index name with give field value(s). -c.Invalidate(countryCodeIndex, countryCodeKey) -``` - -## Queue example - -```golang - -type Queued struct{ - Username string - Domain string - URL string - CountryCode int -} - -var q structr.Queue[*Queued] - -q.Init(structr.QueueConfig[*Cached]{ - - // Fields this queued struct type - // will be indexed and stored under. - Indices: []structr.IndexConfig{ - {Fields: "Username,Domain", AllowZero: true}, - {Fields: "URL"}, - {Fields: "CountryCode", Multiple: true}, - }, - - // User defined pop hook. - Pop: func(c *Cached) { - log.Println("popped:", c) - }, -}) - -// Access and store indexes ahead-of-time for perf. -usernameDomainIndex := q.Index("Username,Domain") -urlIndex := q.Index("URL") -countryCodeIndex := q.Index("CountryCode") - -// ... -q.PushBack(Queued{ - Username: "billybob", - Domain: "google.com", - URL: "https://some.website.here", - CountryCode: 42, -}) - -// ... -queued, ok := q.PopFront() - -// Generate country code index key. -countryCodeKey := countryCodeIndex.Key(42) - -// ... -queuedByCountry := q.Pop(countryCodeIndex, countryCodeKey) -``` +See the [docs](https://pkg.go.dev/codeberg.org/gruf/go-structr) for more API information. ## Notes diff --git a/vendor/codeberg.org/gruf/go-structr/TODO.md b/vendor/codeberg.org/gruf/go-structr/TODO.md new file mode 100644 index 000000000..41f99a619 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-structr/TODO.md @@ -0,0 +1,5 @@ +## Timeline Todos + +- optimize store() to operate on sorted list + +- finish writing code comments \ No newline at end of file diff --git a/vendor/codeberg.org/gruf/go-structr/cache.go b/vendor/codeberg.org/gruf/go-structr/cache.go index 3705d7c7c..4b32d6821 100644 --- a/vendor/codeberg.org/gruf/go-structr/cache.go +++ b/vendor/codeberg.org/gruf/go-structr/cache.go @@ -17,7 +17,7 @@ func DefaultIgnoreErr(err error) bool { } // CacheConfig defines config vars -// for initializing a struct cache. +// for initializing a Cache{} type. type CacheConfig[StructType any] struct { // IgnoreErr defines which errors to @@ -70,14 +70,13 @@ type Cache[StructType any] struct { indices []Index // max cache size, imposes size - // limit on the lruList in order + // limit on the lru list in order // to evict old entries. maxSize int // protective mutex, guards: - // - Cache{}.lruList + // - Cache{}.* // - Index{}.data - // - Cache{} hook fns mutex sync.Mutex } @@ -105,6 +104,7 @@ func (c *Cache[T]) Init(config CacheConfig[T]) { // Safely copy over // provided config. c.mutex.Lock() + defer c.mutex.Unlock() c.indices = make([]Index, len(config.Indices)) for i, cfg := range config.Indices { c.indices[i].ptr = unsafe.Pointer(c) @@ -114,7 +114,6 @@ func (c *Cache[T]) Init(config CacheConfig[T]) { c.copy = config.Copy c.invalid = config.Invalidate c.maxSize = config.MaxSize - c.mutex.Unlock() } // Index selects index with given name from cache, else panics. @@ -161,6 +160,7 @@ func (c *Cache[T]) Get(index *Index, keys ...Key) []T { // Concatenate all *values* from cached items. index.get(keys[i].key, func(item *indexed_item) { if value, ok := item.data.(T); ok { + // Append value COPY. value = c.copy(value) values = append(values, value) @@ -431,6 +431,7 @@ func (c *Cache[T]) Store(value T, store func() error) error { } // Invalidate invalidates all results stored under index keys. +// Note that if set, this will call the invalidate hook on each. func (c *Cache[T]) Invalidate(index *Index, keys ...Key) { if index == nil { panic("no index given") @@ -455,7 +456,7 @@ func (c *Cache[T]) Invalidate(index *Index, keys ...Key) { values = append(values, value) } - // Delete cached. + // Delete item. c.delete(item) }) } @@ -478,6 +479,7 @@ func (c *Cache[T]) Invalidate(index *Index, keys ...Key) { // Trim will truncate the cache to ensure it // stays within given percentage of MaxSize. func (c *Cache[T]) Trim(perc float64) { + // Acquire lock. c.mutex.Lock() @@ -572,7 +574,14 @@ func (c *Cache[T]) store_value(index *Index, key string, value T) { if index != nil { // Append item to index a key // was already generated for. - index.append(&c.lru, key, item) + evicted := index.append(key, item) + if evicted != nil { + + // This item is no longer + // indexed, remove from list. + c.lru.remove(&evicted.elem) + free_indexed_item(evicted) + } } // Get ptr to value data. @@ -593,9 +602,6 @@ func (c *Cache[T]) store_value(index *Index, key string, value T) { // Extract fields comprising index key. parts := extract_fields(ptr, idx.fields) - if parts == nil { - continue - } // Calculate index key. key := idx.key(buf, parts) @@ -604,15 +610,29 @@ func (c *Cache[T]) store_value(index *Index, key string, value T) { } // Append item to this index. - idx.append(&c.lru, key, item) + evicted := idx.append(key, item) + if evicted != nil { + + // This item is no longer + // indexed, remove from list. + c.lru.remove(&evicted.elem) + free_indexed_item(evicted) + } + } + + // Done with buf. + free_buffer(buf) + + if len(item.indexed) == 0 { + // Item was not stored under + // any index. Drop this item. + free_indexed_item(item) + return } // Add item to main lru list. c.lru.push_front(&item.elem) - // Done with buf. - free_buffer(buf) - if c.lru.len > c.maxSize { // Cache has hit max size! // Drop the oldest element. @@ -643,7 +663,14 @@ func (c *Cache[T]) store_error(index *Index, key string, err error) { // Append item to index a key // was already generated for. - index.append(&c.lru, key, item) + evicted := index.append(key, item) + if evicted != nil { + + // This item is no longer + // indexed, remove from list. + c.lru.remove(&evicted.elem) + free_indexed_item(evicted) + } // Add item to main lru list. c.lru.push_front(&item.elem) @@ -657,19 +684,23 @@ func (c *Cache[T]) store_error(index *Index, key string, err error) { } } -func (c *Cache[T]) delete(item *indexed_item) { - for len(item.indexed) != 0 { +func (c *Cache[T]) delete(i *indexed_item) { + for len(i.indexed) != 0 { // Pop last indexed entry from list. - entry := item.indexed[len(item.indexed)-1] - item.indexed = item.indexed[:len(item.indexed)-1] + entry := i.indexed[len(i.indexed)-1] + i.indexed[len(i.indexed)-1] = nil + i.indexed = i.indexed[:len(i.indexed)-1] - // Drop index_entry from index. - entry.index.delete_entry(entry) + // Get entry's index. + index := entry.index + + // Drop this index_entry. + index.delete_entry(entry) } - // Drop entry from lru list. - c.lru.remove(&item.elem) + // Drop from lru list. + c.lru.remove(&i.elem) - // Free now-unused item. - free_indexed_item(item) + // Free unused item. + free_indexed_item(i) } diff --git a/vendor/codeberg.org/gruf/go-structr/index.go b/vendor/codeberg.org/gruf/go-structr/index.go index 558832da9..eed2e4eea 100644 --- a/vendor/codeberg.org/gruf/go-structr/index.go +++ b/vendor/codeberg.org/gruf/go-structr/index.go @@ -168,7 +168,7 @@ func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) { // Initialize store for // index_entry lists. - i.data.init(cap) + i.data.Init(cap) } // get_one will fetch one indexed item under key. @@ -248,7 +248,7 @@ func (i *Index) key(buf *byteutil.Buffer, parts []unsafe.Pointer) string { // doubly-linked-list in index hashmap. this handles case of // overwriting "unique" index entries, and removes from given // outer linked-list in the case that it is no longer indexed. -func (i *Index) append(ll *list, key string, item *indexed_item) { +func (i *Index) append(key string, item *indexed_item) (evicted *indexed_item) { // Look for existing. l := i.data.Get(key) @@ -267,17 +267,16 @@ func (i *Index) append(ll *list, key string, item *indexed_item) { // Drop index from inner item, // catching the evicted item. e := (*index_entry)(elem.data) - evicted := e.item + evicted = e.item evicted.drop_index(e) // Free unused entry. free_index_entry(e) - if len(evicted.indexed) == 0 { - // Evicted item is not indexed, - // remove from outer linked list. - ll.remove(&evicted.elem) - free_indexed_item(evicted) + if len(evicted.indexed) != 0 { + // Evicted is still stored + // under index, don't return. + evicted = nil } } @@ -292,6 +291,7 @@ func (i *Index) append(ll *list, key string, item *indexed_item) { // Add entry to index list. l.push_front(&entry.elem) + return } // delete will remove all indexed items under key, passing each to hook. @@ -403,7 +403,7 @@ func new_index_entry() *index_entry { func free_index_entry(entry *index_entry) { if entry.elem.next != nil || entry.elem.prev != nil { - should_not_reach() + should_not_reach(false) return } entry.key = "" diff --git a/vendor/codeberg.org/gruf/go-structr/item.go b/vendor/codeberg.org/gruf/go-structr/item.go index 6178e18e3..12700fa87 100644 --- a/vendor/codeberg.org/gruf/go-structr/item.go +++ b/vendor/codeberg.org/gruf/go-structr/item.go @@ -37,7 +37,7 @@ func free_indexed_item(item *indexed_item) { if len(item.indexed) > 0 || item.elem.next != nil || item.elem.prev != nil { - should_not_reach() + should_not_reach(false) return } item.data = nil diff --git a/vendor/codeberg.org/gruf/go-structr/key.go b/vendor/codeberg.org/gruf/go-structr/key.go index 65bdba455..11e595b80 100644 --- a/vendor/codeberg.org/gruf/go-structr/key.go +++ b/vendor/codeberg.org/gruf/go-structr/key.go @@ -33,7 +33,7 @@ func (k Key) Values() []any { // Zero indicates a zero value key. func (k Key) Zero() bool { - return (k.raw == nil) + return (k.key == "") } var buf_pool sync.Pool diff --git a/vendor/codeberg.org/gruf/go-structr/list.go b/vendor/codeberg.org/gruf/go-structr/list.go index bf380aa26..7657472c7 100644 --- a/vendor/codeberg.org/gruf/go-structr/list.go +++ b/vendor/codeberg.org/gruf/go-structr/list.go @@ -43,7 +43,7 @@ func free_list(list *list) { if list.head != nil || list.tail != nil || list.len != 0 { - should_not_reach() + should_not_reach(false) return } list_pool.Put(list) @@ -107,6 +107,32 @@ func (l *list) move_back(elem *list_elem) { l.push_back(elem) } +// insert will insert given element at given location in list. +func (l *list) insert(elem *list_elem, at *list_elem) { + if elem == at { + return + } + + // Set new 'next'. + oldNext := at.next + at.next = elem + + // Link to 'at'. + elem.prev = at + + if oldNext == nil { + // Set new tail + l.tail = elem + } else { + // Link to 'prev'. + oldNext.prev = elem + elem.next = oldNext + } + + // Incr count + l.len++ +} + // remove will remove given elem from list. func (l *list) remove(elem *list_elem) { // Get linked elems. @@ -149,3 +175,11 @@ func (l *list) remove(elem *list_elem) { // Decr count l.len-- } + +// func (l *list) range_up(yield func(*list_elem) bool) { + +// } + +// func (l *list) range_down(yield func(*list_elem) bool) { + +// } diff --git a/vendor/codeberg.org/gruf/go-structr/map.go b/vendor/codeberg.org/gruf/go-structr/map.go index 316a8e528..6a718eae1 100644 --- a/vendor/codeberg.org/gruf/go-structr/map.go +++ b/vendor/codeberg.org/gruf/go-structr/map.go @@ -5,7 +5,7 @@ type hashmap struct { n int } -func (m *hashmap) init(cap int) { +func (m *hashmap) Init(cap int) { m.m = make(map[string]*list, cap) m.n = cap } @@ -43,6 +43,10 @@ func (m *hashmap) Compact() { // So we apply the inverse/2, once // $maxLoad/2 % of hmap is empty we // compact the map to drop buckets. + // + // TODO: this is still a relatively + // good approximation, but it has + // changed a little with swiss maps. if 2*16*diff > m.n*13 { // Create new map only big as required. diff --git a/vendor/codeberg.org/gruf/go-structr/ordered_list.bak b/vendor/codeberg.org/gruf/go-structr/ordered_list.bak deleted file mode 100644 index 46b56853f..000000000 --- a/vendor/codeberg.org/gruf/go-structr/ordered_list.bak +++ /dev/null @@ -1,180 +0,0 @@ -package structr - -import "sync" - -type Timeline[StructType any, PK comparable] struct { - - // hook functions. - pkey func(StructType) PK - gte func(PK, PK) bool - lte func(PK, PK) bool - copy func(StructType) StructType - - // main underlying - // ordered item list. - list list - - // indices used in storing passed struct - // types by user defined sets of fields. - indices []Index - - // protective mutex, guards: - // - TODO - mutex sync.Mutex -} - -func (t *Timeline[T, PK]) Init(config any) { - -} - -func (t *Timeline[T, PK]) Index(name string) *Index { - for i := range t.indices { - if t.indices[i].name == name { - return &t.indices[i] - } - } - panic("unknown index: " + name) -} - -func (t *Timeline[T, PK]) Insert(values ...T) { - -} - -func (t *Timeline[T, PK]) LoadTop(min, max PK, length int, load func(min, max PK, length int) ([]T, error)) ([]T, error) { - // Allocate expected no. values. - values := make([]T, 0, length) - - // Acquire lock. - t.mutex.Lock() - - // Wrap unlock to only do once. - unlock := once(t.mutex.Unlock) - defer unlock() - - // Check init'd. - if t.copy == nil { - panic("not initialized") - } - - // Iterate through linked list from top (i.e. head). - for next := t.list.head; next != nil; next = next.next { - - // Check if we've gathered - // enough values from timeline. - if len(values) >= length { - return values, nil - } - - item := (*indexed_item)(next.data) - value := item.data.(T) - pkey := t.pkey(value) - - // Check if below min. - if t.lte(pkey, min) { - continue - } - - // Update min. - min = pkey - - // Check if above max. - if t.gte(pkey, max) { - break - } - - // Append value copy. - value = t.copy(value) - values = append(values, value) - } -} - -func (t *Timeline[T, PK]) LoadBottom(min, max PK, length int, load func(min, max PK, length int) ([]T, error)) ([]T, error) { - // Allocate expected no. values. - values := make([]T, 0, length) - - // Acquire lock. - t.mutex.Lock() - - // Wrap unlock to only do once. - unlock := once(t.mutex.Unlock) - defer unlock() - - // Check init'd. - if t.copy == nil { - panic("not initialized") - } - - // Iterate through linked list from bottom (i.e. tail). - for next := t.list.tail; next != nil; next = next.prev { - - // Check if we've gathered - // enough values from timeline. - if len(values) >= length { - return values, nil - } - - item := (*indexed_item)(next.data) - value := item.data.(T) - pkey := t.pkey(value) - - // Check if above max. - if t.gte(pkey, max) { - continue - } - - // Update max. - max = pkey - - // Check if below min. - if t.lte(pkey, min) { - break - } - - // Append value copy. - value = t.copy(value) - values = append(values, value) - } - - // Done with - // the lock. - unlock() - - // Attempt to load values up to given length. - next, err := load(min, max, length-len(values)) - if err != nil { - return nil, err - } - - // Acquire lock. - t.mutex.Lock() - - // Store uncached values. - for i := range next { - t.store_value( - nil, "", - uncached[i], - ) - } - - // Done with lock. - t.mutex.Unlock() - - // Append uncached to return values. - values = append(values, next...) - - return values, nil -} - -func (t *Timeline[T, PK]) index(value T) *indexed_item { - pk := t.pkey(value) - - switch { - case t.list.len == 0: - - case pk < t.list.head.data: - } -} - -func (t *Timeline[T, PK]) delete(item *indexed_item) { - -} diff --git a/vendor/codeberg.org/gruf/go-structr/queue.go b/vendor/codeberg.org/gruf/go-structr/queue.go index dab925f95..2860e5f4b 100644 --- a/vendor/codeberg.org/gruf/go-structr/queue.go +++ b/vendor/codeberg.org/gruf/go-structr/queue.go @@ -57,13 +57,13 @@ func (q *Queue[T]) Init(config QueueConfig[T]) { // Safely copy over // provided config. q.mutex.Lock() + defer q.mutex.Unlock() q.indices = make([]Index, len(config.Indices)) for i, cfg := range config.Indices { q.indices[i].ptr = unsafe.Pointer(q) q.indices[i].init(t, cfg, 0) } q.pop = config.Pop - q.mutex.Unlock() } // Index selects index with given name from queue, else panics. @@ -133,7 +133,7 @@ func (q *Queue[T]) Pop(index *Index, keys ...Key) []T { value := item.data.(T) values = append(values, value) - // Delete queued. + // Delete item. q.delete(item) }) } @@ -253,7 +253,7 @@ func (q *Queue[T]) pop_n(n int, next func() *list_elem) []T { value := item.data.(T) values = append(values, value) - // Delete queued. + // Delete item. q.delete(item) } @@ -298,9 +298,6 @@ func (q *Queue[T]) index(value T) *indexed_item { // Extract fields comprising index key. parts := extract_fields(ptr, idx.fields) - if parts == nil { - continue - } // Calculate index key. key := idx.key(buf, parts) @@ -309,7 +306,14 @@ func (q *Queue[T]) index(value T) *indexed_item { } // Append item to this index. - idx.append(&q.queue, key, item) + evicted := idx.append(key, item) + if evicted != nil { + + // This item is no longer + // indexed, remove from list. + q.queue.remove(&evicted.elem) + free_indexed_item(evicted) + } } // Done with buf. @@ -318,11 +322,12 @@ func (q *Queue[T]) index(value T) *indexed_item { return item } -func (q *Queue[T]) delete(item *indexed_item) { - for len(item.indexed) != 0 { +func (q *Queue[T]) delete(i *indexed_item) { + for len(i.indexed) != 0 { // Pop last indexed entry from list. - entry := item.indexed[len(item.indexed)-1] - item.indexed = item.indexed[:len(item.indexed)-1] + entry := i.indexed[len(i.indexed)-1] + i.indexed[len(i.indexed)-1] = nil + i.indexed = i.indexed[:len(i.indexed)-1] // Get entry's index. index := entry.index @@ -330,13 +335,13 @@ func (q *Queue[T]) delete(item *indexed_item) { // Drop this index_entry. index.delete_entry(entry) - // Check compact map. + // Compact index map. index.data.Compact() } - // Drop entry from queue list. - q.queue.remove(&item.elem) + // Drop from queue list. + q.queue.remove(&i.elem) - // Free now-unused item. - free_indexed_item(item) + // Free unused item. + free_indexed_item(i) } diff --git a/vendor/codeberg.org/gruf/go-structr/queue_ctx.go b/vendor/codeberg.org/gruf/go-structr/queue_ctx.go index 1dac46349..9a9c615e2 100644 --- a/vendor/codeberg.org/gruf/go-structr/queue_ctx.go +++ b/vendor/codeberg.org/gruf/go-structr/queue_ctx.go @@ -133,7 +133,7 @@ func (q *QueueCtx[T]) pop(ctx context.Context, next func() *list_elem) (T, bool) // Extract item value. value := item.data.(T) - // Delete queued. + // Delete item. q.delete(item) // Get func ptrs. diff --git a/vendor/codeberg.org/gruf/go-structr/runtime.go b/vendor/codeberg.org/gruf/go-structr/runtime.go index 6e8af83dd..cd7f8d7a1 100644 --- a/vendor/codeberg.org/gruf/go-structr/runtime.go +++ b/vendor/codeberg.org/gruf/go-structr/runtime.go @@ -1,3 +1,5 @@ +//go:build go1.22 || go1.23 + package structr import ( @@ -36,6 +38,11 @@ type struct_field struct { // offsets defines whereabouts in // memory this field is located. offsets []next_offset + + // determines whether field type + // is ptr-like in-memory, and so + // requires a further dereference. + likeptr bool } // next_offset defines a next offset location @@ -107,6 +114,9 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) { t = field.Type } + // Check if ptr-like in-memory. + sfield.likeptr = like_ptr(t) + // Set final type. sfield.rtype = t @@ -126,10 +136,14 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) { // extract_fields extracts given structfields from the provided value type, // this is done using predetermined struct field memory offset locations. func extract_fields(ptr unsafe.Pointer, fields []struct_field) []unsafe.Pointer { + // Prepare slice of field value pointers. ptrs := make([]unsafe.Pointer, len(fields)) - for i, field := range fields { + if len(ptrs) != len(fields) { + panic("BCE") + } + for i, field := range fields { // loop scope. fptr := ptr @@ -145,7 +159,7 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []unsafe.Pointer offset.offset) } - if like_ptr(field.rtype) && fptr != nil { + if field.likeptr && fptr != nil { // Further dereference value ptr. fptr = *(*unsafe.Pointer)(fptr) } @@ -162,9 +176,63 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []unsafe.Pointer return ptrs } -// like_ptr returns whether type's kind is ptr-like. +// pkey_field contains pre-prepared type +// information about a primary key struct's +// field member, including memory offset. +type pkey_field struct { + rtype reflect.Type + + // offsets defines whereabouts in + // memory this field is located. + offsets []next_offset + + // determines whether field type + // is ptr-like in-memory, and so + // requires a further dereference. + likeptr bool +} + +// extract_pkey will extract a pointer from 'ptr', to +// the primary key struct field defined by 'field'. +func extract_pkey(ptr unsafe.Pointer, field pkey_field) unsafe.Pointer { + for _, offset := range field.offsets { + // Dereference any ptrs to offset. + ptr = deref(ptr, offset.derefs) + if ptr == nil { + return nil + } + + // Jump forward by offset to next ptr. + ptr = unsafe.Pointer(uintptr(ptr) + + offset.offset) + } + + if field.likeptr && ptr != nil { + // Further dereference value ptr. + ptr = *(*unsafe.Pointer)(ptr) + } + + return ptr +} + +// like_ptr returns whether type's kind is ptr-like in-memory, +// which indicates it may need a final additional dereference. func like_ptr(t reflect.Type) bool { switch t.Kind() { + case reflect.Array: + switch n := t.Len(); n { + case 1: + // specifically single elem arrays + // follow like_ptr for contained type. + return like_ptr(t.Elem()) + } + case reflect.Struct: + switch n := t.NumField(); n { + case 1: + // specifically single field structs + // follow like_ptr for contained type. + return like_ptr(t.Field(0).Type) + } case reflect.Pointer, reflect.Map, reflect.Chan, @@ -201,7 +269,7 @@ func panicf(format string, args ...any) { // else it prints callsite info with a BUG report. // //go:noinline -func should_not_reach() { +func should_not_reach(exit bool) { pcs := make([]uintptr, 1) _ = runtime.Callers(2, pcs) fn := runtime.FuncForPC(pcs[0]) @@ -212,5 +280,9 @@ func should_not_reach() { funcname = funcname[i+1:] } } - os.Stderr.WriteString("BUG: assertion failed in " + funcname + "\n") + if exit { + panic("BUG: assertion failed in " + funcname) + } else { + os.Stderr.WriteString("BUG: assertion failed in " + funcname + "\n") + } } diff --git a/vendor/codeberg.org/gruf/go-structr/test.sh b/vendor/codeberg.org/gruf/go-structr/test.sh deleted file mode 100644 index 554417df7..000000000 --- a/vendor/codeberg.org/gruf/go-structr/test.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -set -e -go test -v -tags=structr_32bit_hash . -go test -v -tags=structr_48bit_hash . -go test -v -tags=structr_64bit_hash . \ No newline at end of file diff --git a/vendor/codeberg.org/gruf/go-structr/timeline.go b/vendor/codeberg.org/gruf/go-structr/timeline.go new file mode 100644 index 000000000..7a9c17f70 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-structr/timeline.go @@ -0,0 +1,1008 @@ +package structr + +import ( + "cmp" + "reflect" + "slices" + "sync" + "unsafe" +) + +// Direction defines a direction +// to iterate entries in a Timeline. +type Direction bool + +const ( + // Asc = ascending, i.e. bottom-up. + Asc = Direction(true) + + // Desc = descending, i.e. top-down. + Desc = Direction(false) +) + +// TimelineConfig defines config vars for initializing a Timeline{}. +type TimelineConfig[StructType any, PK cmp.Ordered] struct { + + // Copy provides a means of copying + // timelined values, to ensure returned values + // do not share memory with those in timeline. + Copy func(StructType) StructType + + // Invalidate is called when timelined + // values are invalidated, either as passed + // to Insert(), or by calls to Invalidate(). + Invalidate func(StructType) + + // PKey defines the generic parameter StructType's + // field to use as the primary key for this cache. + // It must be ordered so that the timeline can + // maintain correct sorting of inserted values. + // + // Field selection logic follows the same path as + // with IndexConfig{}.Fields. Noting that in this + // case only a single field is permitted, though + // it may be nested, and as described above the + // type must conform to cmp.Ordered. + PKey string + + // Indices defines indices to create + // in the Timeline for the receiving + // generic struct type parameter. + Indices []IndexConfig +} + +// Timeline provides an ordered-list like cache of structures, +// with automated indexing and invalidation by any initialization +// defined combination of fields. The list order is maintained +// according to the configured struct primary key. +type Timeline[StructType any, PK cmp.Ordered] struct { + + // hook functions. + invalid func(StructType) + copy func(StructType) StructType + + // main underlying + // timeline list. + // + // where: + // - head = top = largest + // - tail = btm = smallest + list list + + // contains struct field information of + // the field used as the primary key for + // this timeline. it can also be found + // under indices[0] + pkey pkey_field + + // indices used in storing passed struct + // types by user defined sets of fields. + indices []Index + + // protective mutex, guards: + // - Timeline{}.* + // - Index{}.data + mutex sync.Mutex +} + +// Init initializes the timeline with given configuration +// including struct fields to index, and necessary fns. +func (t *Timeline[T, PK]) Init(config TimelineConfig[T, PK]) { + rt := reflect.TypeOf((*T)(nil)).Elem() + + if len(config.Indices) == 0 { + panic("no indices provided") + } + + if config.Copy == nil { + panic("copy function must be provided") + } + + // Safely copy over + // provided config. + t.mutex.Lock() + defer t.mutex.Unlock() + + // The first index is created from PKey. + t.indices = make([]Index, len(config.Indices)+1) + t.indices[0].ptr = unsafe.Pointer(t) + t.indices[0].init(rt, IndexConfig{ + Fields: config.PKey, + AllowZero: true, + Multiple: true, + }, 0) + if len(t.indices[0].fields) > 1 { + panic("primary key must contain only 1 field") + } + for i, cfg := range config.Indices { + t.indices[i+1].ptr = unsafe.Pointer(t) + t.indices[i+1].init(rt, cfg, 0) + } + + // Before extracting + // first index for pkey. + field := t.indices[0].fields[0] + t.pkey = pkey_field{ + rtype: field.rtype, + offsets: field.offsets, + likeptr: field.likeptr, + } + + // Copy over remaining. + t.copy = config.Copy + t.invalid = config.Invalidate +} + +// Index selects index with given name from timeline, else panics. +func (t *Timeline[T, PK]) Index(name string) *Index { + for i, idx := range t.indices { + if idx.name == name { + return &(t.indices[i]) + } + } + panic("unknown index: " + name) +} + +// Select allows you to retrieve a slice of values, in order, from the timeline. +// This slice is defined by the minimum and maximum primary key parameters, up to +// a given length in size. The direction in which you select will determine which +// of the min / max primary key values is used as the *cursor* to begin the start +// of the selection, and which is used as the *boundary* to mark the end, if set. +// In either case, the length parameter is always optional. +// +// dir = Asc : cursors up from 'max' (required), with boundary 'min' (optional). +// dir = Desc : cursors down from 'min' (required), with boundary 'max' (optional). +func (t *Timeline[T, PK]) Select(min, max *PK, length *int, dir Direction) (values []T) { + + // Acquire lock. + t.mutex.Lock() + + // Check init'd. + if t.copy == nil { + t.mutex.Unlock() + panic("not initialized") + } + + switch dir { + case Asc: + // Verify args. + if min == nil { + t.mutex.Unlock() + panic("min must be provided when selecting asc") + } + + // Select determined values ASCENDING. + values = t.select_asc(*min, max, length) + + case Desc: + // Verify args. + if max == nil { + t.mutex.Unlock() + panic("max must be provided when selecting asc") + } + + // Select determined values DESCENDING. + values = t.select_desc(min, *max, length) + } + + // Done with lock. + t.mutex.Unlock() + + return values +} + +// Insert will insert the given values into the timeline, +// calling any set invalidate hook on each inserted value. +func (t *Timeline[T, PK]) Insert(values ...T) { + + // Acquire lock. + t.mutex.Lock() + + // Check init'd. + if t.copy == nil { + t.mutex.Unlock() + panic("not initialized") + } + + // Allocate a slice of our value wrapping struct type. + with_keys := make([]value_with_pk[T, PK], len(values)) + if len(with_keys) != len(values) { + panic("BCE") + } + + // Range the provided values. + for i, value := range values { + + // Create our own copy + // of value to work with. + value = t.copy(value) + + // Take ptr to the value copy. + vptr := unsafe.Pointer(&value) + + // Extract primary key from vptr. + kptr := extract_pkey(vptr, t.pkey) + + var pkey PK + if kptr != nil { + // Cast as PK type. + pkey = *(*PK)(kptr) + } else { + // Use zero value pointer. + kptr = unsafe.Pointer(&pkey) + } + + // Append wrapped value to slice with + // the acquire pointers and primary key. + with_keys[i] = value_with_pk[T, PK]{ + k: pkey, + v: value, + + kptr: kptr, + vptr: vptr, + } + } + + var last *list_elem + + // BEFORE inserting the prepared slice of value copies w/ primary + // keys, sort them by their primary key, ascending. This permits + // us to re-use the 'last' timeline position as next insert cursor. + // Otherwise we would have to iterate from 'head' every single time. + slices.SortFunc(with_keys, func(a, b value_with_pk[T, PK]) int { + const k = +1 + switch { + case a.k < b.k: + return +k + case b.k < a.k: + return -k + default: + return 0 + } + }) + + // Store each value in the timeline, + // updating the last used list element + // each time so we don't have to iter + // down from head on every single store. + for _, value := range with_keys { + last = t.store_one(last, value) + } + + // Get func ptrs. + invalid := t.invalid + + // Done with lock. + t.mutex.Unlock() + + if invalid != nil { + // Pass all invalidated values + // to given user hook (if set). + for _, value := range values { + invalid(value) + } + } +} + +// Invalidate invalidates all entries stored in index under given keys. +// Note that if set, this will call the invalidate hook on each value. +func (t *Timeline[T, PK]) Invalidate(index *Index, keys ...Key) { + if index == nil { + panic("no index given") + } else if index.ptr != unsafe.Pointer(t) { + panic("invalid index for timeline") + } + + // Acquire lock. + t.mutex.Lock() + + // Preallocate expected ret slice. + values := make([]T, 0, len(keys)) + + for i := range keys { + // Delete all items under key from index, collecting + // value items and dropping them from all their indices. + index.delete(keys[i].key, func(item *indexed_item) { + + // Cast to *actual* timeline item. + t_item := to_timeline_item(item) + + if value, ok := item.data.(T); ok { + // No need to copy, as item + // being deleted from cache. + values = append(values, value) + } + + // Delete item. + t.delete(t_item) + }) + } + + // Get func ptrs. + invalid := t.invalid + + // Done with lock. + t.mutex.Unlock() + + if invalid != nil { + // Pass all invalidated values + // to given user hook (if set). + for _, value := range values { + invalid(value) + } + } +} + +// Range will range over all values in the timeline in given direction. +// dir = Asc : ranges from the bottom-up. +// dir = Desc : ranges from the top-down. +// +// Please note that the entire Timeline{} will be locked for the duration of the range +// operation, i.e. from the beginning of the first yield call until the end of the last. +func (t *Timeline[T, PK]) Range(dir Direction) func(yield func(T) bool) { + return func(yield func(T) bool) { + if t.copy == nil { + panic("not initialized") + } else if yield == nil { + panic("nil func") + } + + // Acquire lock. + t.mutex.Lock() + defer t.mutex.Unlock() + + switch dir { + case Asc: + // Iterate through linked list from bottom (i.e. tail). + for prev := t.list.tail; prev != nil; prev = prev.prev { + + // Extract item from list element. + item := (*timeline_item)(prev.data) + + // Create copy of item value. + value := t.copy(item.data.(T)) + + // Pass to given function. + if !yield(value) { + break + } + } + + case Desc: + // Iterate through linked list from top (i.e. head). + for next := t.list.head; next != nil; next = next.next { + + // Extract item from list element. + item := (*timeline_item)(next.data) + + // Create copy of item value. + value := t.copy(item.data.(T)) + + // Pass to given function. + if !yield(value) { + break + } + } + } + } +} + +// RangeKeys will iterate over all values for given keys in the given index. +// +// Please note that the entire Timeline{} will be locked for the duration of the range +// operation, i.e. from the beginning of the first yield call until the end of the last. +func (t *Timeline[T, PK]) RangeKeys(index *Index, keys ...Key) func(yield func(T) bool) { + return func(yield func(T) bool) { + if t.copy == nil { + panic("not initialized") + } else if index == nil { + panic("no index given") + } else if index.ptr != unsafe.Pointer(t) { + panic("invalid index for timeline") + } else if yield == nil { + panic("nil func") + } + + // Acquire lock. + t.mutex.Lock() + defer t.mutex.Unlock() + + for _, key := range keys { + var done bool + + // Iterate over values in index under key. + index.get(key.key, func(i *indexed_item) { + + // Cast to timeline_item type. + item := to_timeline_item(i) + + // Create copy of item value. + value := t.copy(item.data.(T)) + + // Pass val to yield function. + done = done || !yield(value) + }) + + if done { + break + } + } + } +} + +// Trim will remove entries from the timeline in given +// direction, ensuring timeline is no larger than 'max'. +// If 'max' >= t.Len(), this function is a no-op. +// dir = Asc : trims from the bottom-up. +// dir = Desc : trims from the top-down. +func (t *Timeline[T, PK]) Trim(max int, dir Direction) { + // Acquire lock. + t.mutex.Lock() + + // Calculate number to drop. + diff := t.list.len - int(max) + if diff <= 0 { + + // Trim not needed. + t.mutex.Unlock() + return + } + + switch dir { + case Asc: + // Iterate over 'diff' items + // from bottom of timeline list. + for range diff { + + // Get bottom list elem. + bottom := t.list.tail + if bottom == nil { + + // reached + // end. + break + } + + // Drop bottom-most item from timeline. + item := (*timeline_item)(bottom.data) + t.delete(item) + } + + case Desc: + // Iterate over 'diff' items + // from top of timeline list. + for range diff { + + // Get top list elem. + top := t.list.head + if top == nil { + + // reached + // end. + break + } + + // Drop top-most item from timeline. + item := (*timeline_item)(top.data) + t.delete(item) + } + } + + // Compact index data stores. + for _, idx := range t.indices { + (&idx).data.Compact() + } + + // Done with lock. + t.mutex.Unlock() +} + +// Clear empties the timeline by calling .TrimBottom(0, Down). +func (t *Timeline[T, PK]) Clear() { t.Trim(0, Desc) } + +// Len returns the current length of cache. +func (t *Timeline[T, PK]) Len() int { + t.mutex.Lock() + l := t.list.len + t.mutex.Unlock() + return l +} + +// Debug returns debug stats about cache. +func (t *Timeline[T, PK]) Debug() map[string]any { + m := make(map[string]any, 2) + t.mutex.Lock() + m["list"] = t.list.len + indices := make(map[string]any, len(t.indices)) + m["indices"] = indices + for _, idx := range t.indices { + var n uint64 + for _, l := range idx.data.m { + n += uint64(l.len) + } + indices[idx.name] = n + } + t.mutex.Unlock() + return m +} + +func (t *Timeline[T, PK]) select_asc(min PK, max *PK, length *int) (values []T) { + // Iterate through linked list + // from bottom (i.e. tail), asc. + prev := t.list.tail + + // Iterate from 'prev' up, skipping all + // entries with pkey below cursor 'min'. + for ; prev != nil; prev = prev.prev { + item := (*timeline_item)(prev.data) + pkey := *(*PK)(item.pk) + + // Check below min. + if pkey < min { + continue + } + + // Reached + // cursor. + break + } + + if prev == nil { + // No values + // remaining. + return + } + + // Optimized switch case to handle + // each set of argument combinations + // separately, in order to minimize + // number of checks during loops. + switch { + + case length != nil && max != nil: + // Deref arguments. + length := *length + max := *max + + // Optimistic preallocate slice. + values = make([]T, 0, length) + + // Both a length and maximum were given, + // select from cursor until either reached. + for ; prev != nil; prev = prev.prev { + item := (*timeline_item)(prev.data) + pkey := *(*PK)(item.pk) + + // Check above max. + if pkey >= max { + break + } + + // Append value copy. + value := item.data.(T) + value = t.copy(value) + values = append(values, value) + + // Check if length reached. + if len(values) >= length { + break + } + } + + case length != nil: + // Deref length. + length := *length + + // Optimistic preallocate slice. + values = make([]T, 0, length) + + // Only a length was given, select + // from cursor until length reached. + for ; prev != nil; prev = prev.prev { + item := (*timeline_item)(prev.data) + + // Append value copy. + value := item.data.(T) + value = t.copy(value) + values = append(values, value) + + // Check if length reached. + if len(values) >= length { + break + } + } + + case max != nil: + // Deref min. + max := *max + + // Only a maximum was given, select + // from cursor until max is reached. + for ; prev != nil; prev = prev.prev { + item := (*timeline_item)(prev.data) + pkey := *(*PK)(item.pk) + + // Check above max. + if pkey >= max { + break + } + + // Append value copy. + value := item.data.(T) + value = t.copy(value) + values = append(values, value) + } + + default: + // No maximum or length were given, + // ALL from cursor need selecting. + for ; prev != nil; prev = prev.prev { + item := (*timeline_item)(prev.data) + + // Append value copy. + value := item.data.(T) + value = t.copy(value) + values = append(values, value) + } + } + + return +} + +func (t *Timeline[T, PK]) select_desc(min *PK, max PK, length *int) (values []T) { + // Iterate through linked list + // from top (i.e. head), desc. + next := t.list.head + + // Iterate from 'next' down, skipping + // all entries with pkey above cursor 'max'. + for ; next != nil; next = next.next { + item := (*timeline_item)(next.data) + pkey := *(*PK)(item.pk) + + // Check above max. + if pkey > max { + continue + } + + // Reached + // cursor. + break + } + + if next == nil { + // No values + // remaining. + return + } + + // Optimized switch case to handle + // each set of argument combinations + // separately, in order to minimize + // number of checks during loops. + switch { + + case length != nil && min != nil: + // Deref arguments. + length := *length + min := *min + + // Optimistic preallocate slice. + values = make([]T, 0, length) + + // Both a length and minimum were given, + // select from cursor until either reached. + for ; next != nil; next = next.next { + item := (*timeline_item)(next.data) + pkey := *(*PK)(item.pk) + + // Check below min. + if pkey <= min { + break + } + + // Append value copy. + value := item.data.(T) + value = t.copy(value) + values = append(values, value) + + // Check if length reached. + if len(values) >= length { + break + } + } + + case length != nil: + // Deref length. + length := *length + + // Optimistic preallocate slice. + values = make([]T, 0, length) + + // Only a length was given, select + // from cursor until length reached. + for ; next != nil; next = next.next { + item := (*timeline_item)(next.data) + + // Append value copy. + value := item.data.(T) + value = t.copy(value) + values = append(values, value) + + // Check if length reached. + if len(values) >= length { + break + } + } + + case min != nil: + // Deref min. + min := *min + + // Only a minimum was given, select + // from cursor until minimum reached. + for ; next != nil; next = next.next { + item := (*timeline_item)(next.data) + pkey := *(*PK)(item.pk) + + // Check below min. + if pkey <= min { + break + } + + // Append value copy. + value := item.data.(T) + value = t.copy(value) + values = append(values, value) + } + + default: + // No minimum or length were given, + // ALL from cursor need selecting. + for ; next != nil; next = next.next { + item := (*timeline_item)(next.data) + + // Append value copy. + value := item.data.(T) + value = t.copy(value) + values = append(values, value) + } + } + + return +} + +// value_with_pk wraps an incoming value type, with +// its extracted primary key, and pointers to both. +// this encompasses all arguments related to a value +// required by store_one(), simplifying some logic. +// +// with all the primary keys extracted, it also +// makes it much easier to sort input before insert. +type value_with_pk[T any, PK comparable] struct { + k PK // primary key value + v T // value copy + + kptr unsafe.Pointer // primary key ptr + vptr unsafe.Pointer // value copy ptr +} + +func (t *Timeline[T, PK]) store_one(last *list_elem, value value_with_pk[T, PK]) *list_elem { + // NOTE: the value passed here should + // already be a copy of the original. + + // Alloc new index item. + t_item := new_timeline_item() + if cap(t_item.indexed) < len(t.indices) { + + // Preallocate item indices slice to prevent Go auto + // allocating overlying large slices we don't need. + t_item.indexed = make([]*index_entry, 0, len(t.indices)) + } + + // Set item value data. + t_item.data = value.v + t_item.pk = value.kptr + + // Acquire key buf. + buf := new_buffer() + + // Convert to indexed_item ptr. + i_item := from_timeline_item(t_item) + + // Append already-extracted + // primary key to 0th index. + idx := (&t.indices[0]) + partptrs := []unsafe.Pointer{value.kptr} + key := idx.key(buf, partptrs) + evicted := idx.append(key, i_item) + if evicted != nil { + + // This item is no longer + // indexed, remove from list. + t.list.remove(&evicted.elem) + + // Now convert from index_item ptr + // and release it to global mem pool. + evicted := to_timeline_item(evicted) + free_timeline_item(evicted) + } + + for i := 1; i < len(t.indices); i++ { + // Get current index ptr. + idx := (&t.indices[i]) + + // Extract fields comprising index key from value. + parts := extract_fields(value.vptr, idx.fields) + + // Calculate this index key. + key := idx.key(buf, parts) + if key == "" { + continue + } + + // Append this item to index. + evicted := idx.append(key, i_item) + if evicted != nil { + + // This item is no longer + // indexed, remove from list. + t.list.remove(&evicted.elem) + + // Now convert from index_item ptr + // and release it to global mem pool. + evicted := to_timeline_item(evicted) + free_timeline_item(evicted) + } + } + + // Done with buf. + free_buffer(buf) + + if last == nil { + // No previous element was provided, this is + // first insert, we need to work from head. + + // Check for emtpy head. + if t.list.head == nil { + + // The easiest case, this will + // be the first item in list. + t.list.push_front(&t_item.elem) + return t.list.head + } + + // Extract head item and its primary key. + headItem := (*timeline_item)(t.list.head.data) + headPK := *(*PK)(headItem.pk) + if value.k >= headPK { + + // Another easier case, this also + // will be the first item in list. + t.list.push_front(&t_item.elem) + return t.list.head + } + + // Set last=head + // to work from. + last = t.list.head + } + + // Iterate through linked list + // from head to find location. + for next := last.next; // + next != nil; next = next.next { + + // Extract item and it's primary key. + nextItem := (*timeline_item)(next.data) + nextPK := *(*PK)(nextItem.pk) + + // If pkey smaller than + // cursor's, keep going. + if value.k < nextPK { + continue + } + + // New pkey is larger than cursor, + // insert into list just before it. + t.list.insert(&t_item.elem, next.prev) + return next + } + + // We reached the end of the + // list, insert at tail pos. + t.list.push_back(&t_item.elem) + return t.list.tail +} + +func (t *Timeline[T, PK]) delete(i *timeline_item) { + for len(i.indexed) != 0 { + // Pop last indexed entry from list. + entry := i.indexed[len(i.indexed)-1] + i.indexed[len(i.indexed)-1] = nil + i.indexed = i.indexed[:len(i.indexed)-1] + + // Get entry's index. + index := entry.index + + // Drop this index_entry. + index.delete_entry(entry) + } + + // Drop from main list. + t.list.remove(&i.elem) + + // Free unused item. + free_timeline_item(i) +} + +type timeline_item struct { + indexed_item + + // retains fast ptr access + // to primary key value of + // above indexed_item{}.data + pk unsafe.Pointer + + // check bits always all set + // to 1. used to ensure cast + // from indexed_item to this + // type was originally a + // timeline_item to begin with. + ck uint +} + +func init() { + // ensure the embedded indexed_item struct is ALWAYS at zero offset. + // we rely on this to allow a ptr to one to be a ptr to either of them. + const off = unsafe.Offsetof(timeline_item{}.indexed_item) + if off != 0 { + panic("invalid timeline_item{}.indexed_item offset") + } +} + +// from_timeline_item converts a timeline_item ptr to indexed_item, given the above init() guarantee. +func from_timeline_item(item *timeline_item) *indexed_item { + return (*indexed_item)(unsafe.Pointer(item)) +} + +// to_timeline_item converts an indexed_item ptr to timeline_item, given the above init() guarantee. +// NOTE THIS MUST BE AN indexed_item THAT WAS INITIALLY CONVERTED WITH from_timeline_item(). +func to_timeline_item(item *indexed_item) *timeline_item { + to := (*timeline_item)(unsafe.Pointer(item)) + if to.ck != ^uint(0) { + // ensure check bits are + // set indicating it was a + // timeline_item originally. + should_not_reach(true) + } + return to +} + +var timeline_item_pool sync.Pool + +// new_timeline_item returns a new prepared timeline_item. +func new_timeline_item() *timeline_item { + v := timeline_item_pool.Get() + if v == nil { + i := new(timeline_item) + i.elem.data = unsafe.Pointer(i) + i.ck = ^uint(0) + v = i + } + item := v.(*timeline_item) + return item +} + +// free_timeline_item releases the timeline_item. +func free_timeline_item(item *timeline_item) { + if len(item.indexed) > 0 || + item.elem.next != nil || + item.elem.prev != nil { + should_not_reach(false) + return + } + item.data = nil + item.pk = nil + timeline_item_pool.Put(item) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index aaf95bc63..a04e736d0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -38,7 +38,7 @@ codeberg.org/gruf/go-kv/format # codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f ## explicit; go 1.21.3 codeberg.org/gruf/go-list -# codeberg.org/gruf/go-mangler v1.4.1 +# codeberg.org/gruf/go-mangler v1.4.3 ## explicit; go 1.19 codeberg.org/gruf/go-mangler # codeberg.org/gruf/go-maps v1.0.4 @@ -63,8 +63,8 @@ codeberg.org/gruf/go-storage/disk codeberg.org/gruf/go-storage/internal codeberg.org/gruf/go-storage/memory codeberg.org/gruf/go-storage/s3 -# codeberg.org/gruf/go-structr v0.8.11 -## explicit; go 1.21 +# codeberg.org/gruf/go-structr v0.9.0 +## explicit; go 1.22 codeberg.org/gruf/go-structr # codeberg.org/superseriousbusiness/activity v1.12.0-gts ## explicit; go 1.21