[chore]: Bump codeberg.org/gruf/go-structr from 0.9.0 to 0.9.6 (#3973)

Bumps codeberg.org/gruf/go-structr from 0.9.0 to 0.9.6.

---
updated-dependencies:
- dependency-name: codeberg.org/gruf/go-structr
  dependency-version: 0.9.6
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot]
2025-04-07 11:03:57 +01:00
committed by GitHub
parent e263d23622
commit 2cc5d6269d
10 changed files with 280 additions and 116 deletions

4
go.mod
View File

@ -27,7 +27,7 @@ require (
codeberg.org/gruf/go-runners v1.6.3 codeberg.org/gruf/go-runners v1.6.3
codeberg.org/gruf/go-sched v1.2.4 codeberg.org/gruf/go-sched v1.2.4
codeberg.org/gruf/go-storage v0.2.0 codeberg.org/gruf/go-storage v0.2.0
codeberg.org/gruf/go-structr v0.9.0 codeberg.org/gruf/go-structr v0.9.6
codeberg.org/superseriousbusiness/activity v1.13.0-gts codeberg.org/superseriousbusiness/activity v1.13.0-gts
codeberg.org/superseriousbusiness/exif-terminator v0.10.0 codeberg.org/superseriousbusiness/exif-terminator v0.10.0
codeberg.org/superseriousbusiness/httpsig v1.3.0-SSB codeberg.org/superseriousbusiness/httpsig v1.3.0-SSB
@ -96,7 +96,7 @@ require (
require ( require (
codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect
codeberg.org/gruf/go-mangler v1.4.3 // indirect codeberg.org/gruf/go-mangler v1.4.4 // indirect
codeberg.org/gruf/go-maps v1.0.4 // indirect codeberg.org/gruf/go-maps v1.0.4 // indirect
codeberg.org/superseriousbusiness/go-jpeg-image-structure/v2 v2.1.0-SSB // indirect codeberg.org/superseriousbusiness/go-jpeg-image-structure/v2 v2.1.0-SSB // indirect
codeberg.org/superseriousbusiness/go-png-image-structure/v2 v2.1.0-SSB // indirect codeberg.org/superseriousbusiness/go-png-image-structure/v2 v2.1.0-SSB // indirect

8
go.sum generated
View File

@ -24,8 +24,8 @@ codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f h1:Ss6Z+vygy+jOGhj9
codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f/go.mod h1:F9pl4h34iuVN7kucKam9fLwsItTc+9mmaKt7pNXRd/4= codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f/go.mod h1:F9pl4h34iuVN7kucKam9fLwsItTc+9mmaKt7pNXRd/4=
codeberg.org/gruf/go-loosy v0.0.0-20231007123304-bb910d1ab5c4 h1:IXwfoU7f2whT6+JKIKskNl/hBlmWmnF1vZd84Eb3cyA= codeberg.org/gruf/go-loosy v0.0.0-20231007123304-bb910d1ab5c4 h1:IXwfoU7f2whT6+JKIKskNl/hBlmWmnF1vZd84Eb3cyA=
codeberg.org/gruf/go-loosy v0.0.0-20231007123304-bb910d1ab5c4/go.mod h1:fiO8HE1wjZCephcYmRRsVnNI/i0+mhy44Z5dQalS0rM= codeberg.org/gruf/go-loosy v0.0.0-20231007123304-bb910d1ab5c4/go.mod h1:fiO8HE1wjZCephcYmRRsVnNI/i0+mhy44Z5dQalS0rM=
codeberg.org/gruf/go-mangler v1.4.3 h1:mdtcbGDyj0AS9LE/H1imQreICVn6BQiks554jzdAozc= codeberg.org/gruf/go-mangler v1.4.4 h1:moQl7FSSLLaByS7w5UP7b3Z7r2ex/F4IpvSp+PyRWK4=
codeberg.org/gruf/go-mangler v1.4.3/go.mod h1:mDmW8Ia352RvNFaXoP9K60TgcmCZJtX0j6wm3vjAsJE= codeberg.org/gruf/go-mangler v1.4.4/go.mod h1:mDmW8Ia352RvNFaXoP9K60TgcmCZJtX0j6wm3vjAsJE=
codeberg.org/gruf/go-maps v1.0.4 h1:K+Ww4vvR3TZqm5jqrKVirmguZwa3v1VUvmig2SE8uxY= codeberg.org/gruf/go-maps v1.0.4 h1:K+Ww4vvR3TZqm5jqrKVirmguZwa3v1VUvmig2SE8uxY=
codeberg.org/gruf/go-maps v1.0.4/go.mod h1:ASX7osM7kFwt5O8GfGflcFjrwYGD8eIuRLl/oMjhEi8= codeberg.org/gruf/go-maps v1.0.4/go.mod h1:ASX7osM7kFwt5O8GfGflcFjrwYGD8eIuRLl/oMjhEi8=
codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760 h1:m2/UCRXhjDwAg4vyji6iKCpomKw6P4PmBOUi5DvAMH4= codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760 h1:m2/UCRXhjDwAg4vyji6iKCpomKw6P4PmBOUi5DvAMH4=
@ -38,8 +38,8 @@ codeberg.org/gruf/go-sched v1.2.4 h1:ddBB9o0D/2oU8NbQ0ldN5aWxogpXPRBATWi58+p++Hw
codeberg.org/gruf/go-sched v1.2.4/go.mod h1:wad6l+OcYGWMA2TzNLMmLObsrbBDxdJfEy5WvTgBjNk= codeberg.org/gruf/go-sched v1.2.4/go.mod h1:wad6l+OcYGWMA2TzNLMmLObsrbBDxdJfEy5WvTgBjNk=
codeberg.org/gruf/go-storage v0.2.0 h1:mKj3Lx6AavEkuXXtxqPhdq+akW9YwrnP16yQBF7K5ZI= codeberg.org/gruf/go-storage v0.2.0 h1:mKj3Lx6AavEkuXXtxqPhdq+akW9YwrnP16yQBF7K5ZI=
codeberg.org/gruf/go-storage v0.2.0/go.mod h1:o3GzMDE5QNUaRnm/daUzFqvuAaC4utlgXDXYO79sWKU= codeberg.org/gruf/go-storage v0.2.0/go.mod h1:o3GzMDE5QNUaRnm/daUzFqvuAaC4utlgXDXYO79sWKU=
codeberg.org/gruf/go-structr v0.9.0 h1:UYw8igp3I4UBnlsRyDR2AbF3g7NPEP7HBrQs1I15218= codeberg.org/gruf/go-structr v0.9.6 h1:FSbJ1A0ubTQB82rC0K4o6qyiqrDGH1t9ivttm8Zy64o=
codeberg.org/gruf/go-structr v0.9.0/go.mod h1:mUvBvn4q1iM/I+d3Fj1w/gxGUU/Ve9GpiNo6dPmBJnk= codeberg.org/gruf/go-structr v0.9.6/go.mod h1:9k5hYztZ4PsBS+m1v5hUTeFiVUBTLF5VA7d9cd1OEMs=
codeberg.org/superseriousbusiness/activity v1.13.0-gts h1:4WZLc/SNt+Vt5x2UjL2n6V5dHlIL9ECudUPx8Ld5rxw= codeberg.org/superseriousbusiness/activity v1.13.0-gts h1:4WZLc/SNt+Vt5x2UjL2n6V5dHlIL9ECudUPx8Ld5rxw=
codeberg.org/superseriousbusiness/activity v1.13.0-gts/go.mod h1:enxU1Lva4OcK6b/NBXscoHSEgEMsKJvdHrQFifQxp4o= codeberg.org/superseriousbusiness/activity v1.13.0-gts/go.mod h1:enxU1Lva4OcK6b/NBXscoHSEgEMsKJvdHrQFifQxp4o=
codeberg.org/superseriousbusiness/exif-terminator v0.10.0 h1:FiLX/AK07tzceS36I+kOP2aEH+aytjPSIlFoYePMEyg= codeberg.org/superseriousbusiness/exif-terminator v0.10.0 h1:FiLX/AK07tzceS36I+kOP2aEH+aytjPSIlFoYePMEyg=

View File

@ -1,4 +1,4 @@
//go:build go1.19 || go1.20 || go1.21 || go1.22 || go1.23 //go:build go1.19 && !go1.25
package mangler package mangler

View File

@ -1,5 +0,0 @@
## Timeline Todos
- optimize store() to operate on sorted list
- finish writing code comments

View File

@ -1,6 +1,7 @@
package structr package structr
import ( import (
"os"
"reflect" "reflect"
"strings" "strings"
"sync" "sync"
@ -244,6 +245,39 @@ func (i *Index) key(buf *byteutil.Buffer, parts []unsafe.Pointer) string {
return string(buf.B) return string(buf.B)
} }
// add will attempt to add given index entry to appropriate
// doubly-linked-list in index hashmap. in the case of an
// existing entry in a "unique" index, it will return false.
func (i *Index) add(key string, item *indexed_item) bool {
// Look for existing.
l := i.data.Get(key)
if l == nil {
// Allocate new.
l = new_list()
i.data.Put(key, l)
} else if is_unique(i.flags) {
// Collision!
return false
}
// Prepare new index entry.
entry := new_index_entry()
entry.item = item
entry.key = key
entry.index = i
// Add ourselves to item's index tracker.
item.indexed = append(item.indexed, entry)
// Add entry to index list.
l.push_front(&entry.elem)
return true
}
// append will append the given index entry to appropriate // append will append the given index entry to appropriate
// doubly-linked-list in index hashmap. this handles case of // doubly-linked-list in index hashmap. this handles case of
// overwriting "unique" index entries, and removes from given // overwriting "unique" index entries, and removes from given
@ -403,7 +437,8 @@ func new_index_entry() *index_entry {
func free_index_entry(entry *index_entry) { func free_index_entry(entry *index_entry) {
if entry.elem.next != nil || if entry.elem.next != nil ||
entry.elem.prev != nil { entry.elem.prev != nil {
should_not_reach(false) msg := assert("entry not in use")
os.Stderr.WriteString(msg + "\n")
return return
} }
entry.key = "" entry.key = ""

View File

@ -1,6 +1,7 @@
package structr package structr
import ( import (
"os"
"sync" "sync"
"unsafe" "unsafe"
) )
@ -37,7 +38,8 @@ func free_indexed_item(item *indexed_item) {
if len(item.indexed) > 0 || if len(item.indexed) > 0 ||
item.elem.next != nil || item.elem.next != nil ||
item.elem.prev != nil { item.elem.prev != nil {
should_not_reach(false) msg := assert("item not in use")
os.Stderr.WriteString(msg + "\n")
return return
} }
item.data = nil item.data = nil

View File

@ -1,6 +1,7 @@
package structr package structr
import ( import (
"os"
"sync" "sync"
"unsafe" "unsafe"
) )
@ -43,7 +44,8 @@ func free_list(list *list) {
if list.head != nil || if list.head != nil ||
list.tail != nil || list.tail != nil ||
list.len != 0 { list.len != 0 {
should_not_reach(false) msg := assert("list not in use")
os.Stderr.WriteString(msg + "\n")
return return
} }
list_pool.Put(list) list_pool.Put(list)

View File

@ -1,10 +1,9 @@
//go:build go1.22 || go1.23 //go:build go1.22 && !go1.25
package structr package structr
import ( import (
"fmt" "fmt"
"os"
"reflect" "reflect"
"runtime" "runtime"
"strings" "strings"
@ -140,7 +139,7 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []unsafe.Pointer
// Prepare slice of field value pointers. // Prepare slice of field value pointers.
ptrs := make([]unsafe.Pointer, len(fields)) ptrs := make([]unsafe.Pointer, len(fields))
if len(ptrs) != len(fields) { if len(ptrs) != len(fields) {
panic("BCE") panic(assert("BCE"))
} }
for i, field := range fields { for i, field := range fields {
@ -264,12 +263,12 @@ func panicf(format string, args ...any) {
panic(fmt.Sprintf(format, args...)) panic(fmt.Sprintf(format, args...))
} }
// should_not_reach can be called to indicated a // assert can be called to indicated a block
// block of code should not be able to be reached, // of code should not be able to be reached,
// else it prints callsite info with a BUG report. // it returns a BUG report with callsite.
// //
//go:noinline //go:noinline
func should_not_reach(exit bool) { func assert(assert string) string {
pcs := make([]uintptr, 1) pcs := make([]uintptr, 1)
_ = runtime.Callers(2, pcs) _ = runtime.Callers(2, pcs)
fn := runtime.FuncForPC(pcs[0]) fn := runtime.FuncForPC(pcs[0])
@ -280,9 +279,11 @@ func should_not_reach(exit bool) {
funcname = funcname[i+1:] funcname = funcname[i+1:]
} }
} }
if exit { var buf strings.Builder
panic("BUG: assertion failed in " + funcname) buf.Grow(32 + len(assert) + len(funcname))
} else { buf.WriteString("BUG: assertion \"")
os.Stderr.WriteString("BUG: assertion failed in " + funcname + "\n") buf.WriteString(assert)
} buf.WriteString("\" failed in ")
buf.WriteString(funcname)
return buf.String()
} }

View File

@ -2,6 +2,7 @@ package structr
import ( import (
"cmp" "cmp"
"os"
"reflect" "reflect"
"slices" "slices"
"sync" "sync"
@ -43,7 +44,7 @@ type TimelineConfig[StructType any, PK cmp.Ordered] struct {
// case only a single field is permitted, though // case only a single field is permitted, though
// it may be nested, and as described above the // it may be nested, and as described above the
// type must conform to cmp.Ordered. // type must conform to cmp.Ordered.
PKey string PKey IndexConfig
// Indices defines indices to create // Indices defines indices to create
// in the Timeline for the receiving // in the Timeline for the receiving
@ -103,14 +104,11 @@ func (t *Timeline[T, PK]) Init(config TimelineConfig[T, PK]) {
t.mutex.Lock() t.mutex.Lock()
defer t.mutex.Unlock() defer t.mutex.Unlock()
// The first index is created from PKey. // The first index is created from PKey,
// other indices are created as expected.
t.indices = make([]Index, len(config.Indices)+1) t.indices = make([]Index, len(config.Indices)+1)
t.indices[0].ptr = unsafe.Pointer(t) t.indices[0].ptr = unsafe.Pointer(t)
t.indices[0].init(rt, IndexConfig{ t.indices[0].init(rt, config.PKey, 0)
Fields: config.PKey,
AllowZero: true,
Multiple: true,
}, 0)
if len(t.indices[0].fields) > 1 { if len(t.indices[0].fields) > 1 {
panic("primary key must contain only 1 field") panic("primary key must contain only 1 field")
} }
@ -119,8 +117,7 @@ func (t *Timeline[T, PK]) Init(config TimelineConfig[T, PK]) {
t.indices[i+1].init(rt, cfg, 0) t.indices[i+1].init(rt, cfg, 0)
} }
// Before extracting // Extract pkey details from index.
// first index for pkey.
field := t.indices[0].fields[0] field := t.indices[0].fields[0]
t.pkey = pkey_field{ t.pkey = pkey_field{
rtype: field.rtype, rtype: field.rtype,
@ -207,7 +204,7 @@ func (t *Timeline[T, PK]) Insert(values ...T) {
// Allocate a slice of our value wrapping struct type. // Allocate a slice of our value wrapping struct type.
with_keys := make([]value_with_pk[T, PK], len(values)) with_keys := make([]value_with_pk[T, PK], len(values))
if len(with_keys) != len(values) { if len(with_keys) != len(values) {
panic("BCE") panic(assert("BCE"))
} }
// Range the provided values. // Range the provided values.
@ -387,6 +384,54 @@ func (t *Timeline[T, PK]) Range(dir Direction) func(yield func(T) bool) {
} }
} }
// RangeUnsafe is functionally similar to Range(), except it does not pass *copies* of
// data. It allows you to operate on the data directly and modify it. As such it can also
// be more performant to use this function, even for read-write operations.
//
// Please note that the entire Timeline{} will be locked for the duration of the range
// operation, i.e. from the beginning of the first yield call until the end of the last.
func (t *Timeline[T, PK]) RangeUnsafe(dir Direction) func(yield func(T) bool) {
return func(yield func(T) bool) {
if t.copy == nil {
panic("not initialized")
} else if yield == nil {
panic("nil func")
}
// Acquire lock.
t.mutex.Lock()
defer t.mutex.Unlock()
switch dir {
case Asc:
// Iterate through linked list from bottom (i.e. tail).
for prev := t.list.tail; prev != nil; prev = prev.prev {
// Extract item from list element.
item := (*timeline_item)(prev.data)
// Pass to given function.
if !yield(item.data.(T)) {
break
}
}
case Desc:
// Iterate through linked list from top (i.e. head).
for next := t.list.head; next != nil; next = next.next {
// Extract item from list element.
item := (*timeline_item)(next.data)
// Pass to given function.
if !yield(item.data.(T)) {
break
}
}
}
}
}
// RangeKeys will iterate over all values for given keys in the given index. // RangeKeys will iterate over all values for given keys in the given index.
// //
// Please note that the entire Timeline{} will be locked for the duration of the range // Please note that the entire Timeline{} will be locked for the duration of the range
@ -430,6 +475,48 @@ func (t *Timeline[T, PK]) RangeKeys(index *Index, keys ...Key) func(yield func(T
} }
} }
// RangeKeysUnsafe is functionally similar to RangeKeys(), except it does not pass *copies*
// of data. It allows you to operate on the data directly and modify it. As such it can also
// be more performant to use this function, even for read-write operations.
//
// Please note that the entire Timeline{} will be locked for the duration of the range
// operation, i.e. from the beginning of the first yield call until the end of the last.
func (t *Timeline[T, PK]) RangeKeysUnsafe(index *Index, keys ...Key) func(yield func(T) bool) {
return func(yield func(T) bool) {
if t.copy == nil {
panic("not initialized")
} else if index == nil {
panic("no index given")
} else if index.ptr != unsafe.Pointer(t) {
panic("invalid index for timeline")
} else if yield == nil {
panic("nil func")
}
// Acquire lock.
t.mutex.Lock()
defer t.mutex.Unlock()
for _, key := range keys {
var done bool
// Iterate over values in index under key.
index.get(key.key, func(i *indexed_item) {
// Cast to timeline_item type.
item := to_timeline_item(i)
// Pass value data to yield function.
done = done || !yield(item.data.(T))
})
if done {
break
}
}
}
}
// Trim will remove entries from the timeline in given // Trim will remove entries from the timeline in given
// direction, ensuring timeline is no larger than 'max'. // direction, ensuring timeline is no larger than 'max'.
// If 'max' >= t.Len(), this function is a no-op. // If 'max' >= t.Len(), this function is a no-op.
@ -538,7 +625,7 @@ func (t *Timeline[T, PK]) select_asc(min PK, max *PK, length *int) (values []T)
pkey := *(*PK)(item.pk) pkey := *(*PK)(item.pk)
// Check below min. // Check below min.
if pkey < min { if pkey <= min {
continue continue
} }
@ -661,7 +748,7 @@ func (t *Timeline[T, PK]) select_desc(min *PK, max PK, length *int) (values []T)
pkey := *(*PK)(item.pk) pkey := *(*PK)(item.pk)
// Check above max. // Check above max.
if pkey > max { if pkey >= max {
continue continue
} }
@ -804,60 +891,25 @@ func (t *Timeline[T, PK]) store_one(last *list_elem, value value_with_pk[T, PK])
t_item.data = value.v t_item.data = value.v
t_item.pk = value.kptr t_item.pk = value.kptr
// Get zero'th index, i.e.
// the primary key index.
idx0 := (&t.indices[0])
// Acquire key buf. // Acquire key buf.
buf := new_buffer() buf := new_buffer()
// Convert to indexed_item ptr. // Calculate index key from already extracted
i_item := from_timeline_item(t_item) // primary key, checking for zero return value.
// Append already-extracted
// primary key to 0th index.
idx := (&t.indices[0])
partptrs := []unsafe.Pointer{value.kptr} partptrs := []unsafe.Pointer{value.kptr}
key := idx.key(buf, partptrs) key := idx0.key(buf, partptrs)
evicted := idx.append(key, i_item) if key == "" { // i.e. (!allow_zero && pkey == zero)
if evicted != nil { free_timeline_item(t_item)
// This item is no longer
// indexed, remove from list.
t.list.remove(&evicted.elem)
// Now convert from index_item ptr
// and release it to global mem pool.
evicted := to_timeline_item(evicted)
free_timeline_item(evicted)
}
for i := 1; i < len(t.indices); i++ {
// Get current index ptr.
idx := (&t.indices[i])
// Extract fields comprising index key from value.
parts := extract_fields(value.vptr, idx.fields)
// Calculate this index key.
key := idx.key(buf, parts)
if key == "" {
continue
}
// Append this item to index.
evicted := idx.append(key, i_item)
if evicted != nil {
// This item is no longer
// indexed, remove from list.
t.list.remove(&evicted.elem)
// Now convert from index_item ptr
// and release it to global mem pool.
evicted := to_timeline_item(evicted)
free_timeline_item(evicted)
}
}
// Done with buf.
free_buffer(buf) free_buffer(buf)
return last
}
// Convert to indexed_item pointer.
i_item := from_timeline_item(t_item)
if last == nil { if last == nil {
// No previous element was provided, this is // No previous element was provided, this is
@ -869,28 +921,67 @@ func (t *Timeline[T, PK]) store_one(last *list_elem, value value_with_pk[T, PK])
// The easiest case, this will // The easiest case, this will
// be the first item in list. // be the first item in list.
t.list.push_front(&t_item.elem) t.list.push_front(&t_item.elem)
return t.list.head last = t.list.head // return value
goto indexing
} }
// Extract head item and its primary key. // Extract head item and its primary key.
headItem := (*timeline_item)(t.list.head.data) headItem := (*timeline_item)(t.list.head.data)
headPK := *(*PK)(headItem.pk) headPK := *(*PK)(headItem.pk)
if value.k >= headPK { if value.k > headPK {
// Another easier case, this also // Another easier case, this also
// will be the first item in list. // will be the first item in list.
t.list.push_front(&t_item.elem) t.list.push_front(&t_item.elem)
last = t.list.head // return value
goto indexing
}
// Check (and drop) if pkey is a collision!
if value.k == headPK && is_unique(idx0.flags) {
free_timeline_item(t_item)
free_buffer(buf)
return t.list.head return t.list.head
} }
// Set last=head // Set last = head.next
// to work from. // as next to work from.
last = t.list.head last = t.list.head.next
} }
// Iterate through linked list // Iterate through list from head
// from head to find location. // to find location. Optimized into two
for next := last.next; // // cases to minimize loop CPU cycles.
if is_unique(idx0.flags) {
for next := last; //
next != nil; next = next.next {
// Extract item and it's primary key.
nextItem := (*timeline_item)(next.data)
nextPK := *(*PK)(nextItem.pk)
// If pkey smaller than
// cursor's, keep going.
if value.k < nextPK {
continue
}
// Check (and drop) if
// pkey is a collision!
if value.k == nextPK {
free_timeline_item(t_item)
free_buffer(buf)
return next
}
// New pkey is larger than cursor,
// insert into list just before it.
t.list.insert(&t_item.elem, next.prev)
last = next // return value
goto indexing
}
} else {
for next := last; //
next != nil; next = next.next { next != nil; next = next.next {
// Extract item and it's primary key. // Extract item and it's primary key.
@ -906,13 +997,51 @@ func (t *Timeline[T, PK]) store_one(last *list_elem, value value_with_pk[T, PK])
// New pkey is larger than cursor, // New pkey is larger than cursor,
// insert into list just before it. // insert into list just before it.
t.list.insert(&t_item.elem, next.prev) t.list.insert(&t_item.elem, next.prev)
return next last = next // return value
goto indexing
}
} }
// We reached the end of the // We reached the end of the
// list, insert at tail pos. // list, insert at tail pos.
t.list.push_back(&t_item.elem) t.list.push_back(&t_item.elem)
return t.list.tail last = t.list.tail // return value
goto indexing
indexing:
// Append already-extracted
// primary key to 0th index.
_ = idx0.add(key, i_item)
// Insert item into each of indices.
for i := 1; i < len(t.indices); i++ {
// Get current index ptr.
idx := (&t.indices[i])
// Extract fields comprising index key from value.
parts := extract_fields(value.vptr, idx.fields)
// Calculate this index key,
// checking for zero values.
key := idx.key(buf, parts)
if key == "" {
continue
}
// Add this item to index,
// checking for collisions.
if !idx.add(key, i_item) {
t.delete(t_item)
free_buffer(buf)
return last
}
}
// Done with bufs.
free_buffer(buf)
return last
} }
func (t *Timeline[T, PK]) delete(i *timeline_item) { func (t *Timeline[T, PK]) delete(i *timeline_item) {
@ -957,7 +1086,7 @@ func init() {
// we rely on this to allow a ptr to one to be a ptr to either of them. // we rely on this to allow a ptr to one to be a ptr to either of them.
const off = unsafe.Offsetof(timeline_item{}.indexed_item) const off = unsafe.Offsetof(timeline_item{}.indexed_item)
if off != 0 { if off != 0 {
panic("invalid timeline_item{}.indexed_item offset") panic(assert("offset_of(timeline_item{}.indexed_item) = 0"))
} }
} }
@ -971,10 +1100,9 @@ func from_timeline_item(item *timeline_item) *indexed_item {
func to_timeline_item(item *indexed_item) *timeline_item { func to_timeline_item(item *indexed_item) *timeline_item {
to := (*timeline_item)(unsafe.Pointer(item)) to := (*timeline_item)(unsafe.Pointer(item))
if to.ck != ^uint(0) { if to.ck != ^uint(0) {
// ensure check bits are // ensure check bits are set indicating
// set indicating it was a // it was a timeline_item originally.
// timeline_item originally. panic(assert("check bits are set"))
should_not_reach(true)
} }
return to return to
} }
@ -999,7 +1127,8 @@ func free_timeline_item(item *timeline_item) {
if len(item.indexed) > 0 || if len(item.indexed) > 0 ||
item.elem.next != nil || item.elem.next != nil ||
item.elem.prev != nil { item.elem.prev != nil {
should_not_reach(false) msg := assert("item not in use")
os.Stderr.WriteString(msg + "\n")
return return
} }
item.data = nil item.data = nil

4
vendor/modules.txt vendored
View File

@ -38,7 +38,7 @@ codeberg.org/gruf/go-kv/format
# codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f # codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f
## explicit; go 1.21.3 ## explicit; go 1.21.3
codeberg.org/gruf/go-list codeberg.org/gruf/go-list
# codeberg.org/gruf/go-mangler v1.4.3 # codeberg.org/gruf/go-mangler v1.4.4
## explicit; go 1.19 ## explicit; go 1.19
codeberg.org/gruf/go-mangler codeberg.org/gruf/go-mangler
# codeberg.org/gruf/go-maps v1.0.4 # codeberg.org/gruf/go-maps v1.0.4
@ -63,7 +63,7 @@ codeberg.org/gruf/go-storage/disk
codeberg.org/gruf/go-storage/internal codeberg.org/gruf/go-storage/internal
codeberg.org/gruf/go-storage/memory codeberg.org/gruf/go-storage/memory
codeberg.org/gruf/go-storage/s3 codeberg.org/gruf/go-storage/s3
# codeberg.org/gruf/go-structr v0.9.0 # codeberg.org/gruf/go-structr v0.9.6
## explicit; go 1.22 ## explicit; go 1.22
codeberg.org/gruf/go-structr codeberg.org/gruf/go-structr
# codeberg.org/superseriousbusiness/activity v1.13.0-gts # codeberg.org/superseriousbusiness/activity v1.13.0-gts