[chore]: Bump github.com/ulule/limiter/v3 from 3.11.1 to 3.11.2 (#1841)

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot]
2023-05-29 13:54:05 +01:00
committed by GitHub
parent 2e7043be95
commit e50b228539
6 changed files with 21 additions and 87 deletions

View File

@ -17,7 +17,7 @@ _Dead simple rate limit middleware for Go._
Using [Go Modules](https://github.com/golang/go/wiki/Modules)
```bash
$ go get github.com/ulule/limiter/v3@v3.11.1
$ go get github.com/ulule/limiter/v3@v3.11.2
```
## Usage

View File

@ -2,11 +2,11 @@ package memory
import (
"context"
"strings"
"time"
"github.com/ulule/limiter/v3"
"github.com/ulule/limiter/v3/drivers/store/common"
"github.com/ulule/limiter/v3/internal/bytebuffer"
)
// Store is the in-memory store.
@ -35,11 +35,7 @@ func NewStoreWithOptions(options limiter.StoreOptions) limiter.Store {
// Get returns the limit for given identifier.
func (store *Store) Get(ctx context.Context, key string, rate limiter.Rate) (limiter.Context, error) {
buffer := bytebuffer.New()
defer buffer.Close()
buffer.Concat(store.Prefix, ":", key)
count, expiration := store.cache.Increment(buffer.String(), 1, rate.Period)
count, expiration := store.cache.Increment(store.getCacheKey(key), 1, rate.Period)
lctx := common.GetContextFromState(time.Now(), rate, expiration, count)
return lctx, nil
@ -47,11 +43,7 @@ func (store *Store) Get(ctx context.Context, key string, rate limiter.Rate) (lim
// Increment increments the limit by given count & returns the new limit value for given identifier.
func (store *Store) Increment(ctx context.Context, key string, count int64, rate limiter.Rate) (limiter.Context, error) {
buffer := bytebuffer.New()
defer buffer.Close()
buffer.Concat(store.Prefix, ":", key)
newCount, expiration := store.cache.Increment(buffer.String(), count, rate.Period)
newCount, expiration := store.cache.Increment(store.getCacheKey(key), count, rate.Period)
lctx := common.GetContextFromState(time.Now(), rate, expiration, newCount)
return lctx, nil
@ -59,11 +51,7 @@ func (store *Store) Increment(ctx context.Context, key string, count int64, rate
// Peek returns the limit for given identifier, without modification on current values.
func (store *Store) Peek(ctx context.Context, key string, rate limiter.Rate) (limiter.Context, error) {
buffer := bytebuffer.New()
defer buffer.Close()
buffer.Concat(store.Prefix, ":", key)
count, expiration := store.cache.Get(buffer.String(), rate.Period)
count, expiration := store.cache.Get(store.getCacheKey(key), rate.Period)
lctx := common.GetContextFromState(time.Now(), rate, expiration, count)
return lctx, nil
@ -71,12 +59,17 @@ func (store *Store) Peek(ctx context.Context, key string, rate limiter.Rate) (li
// Reset returns the limit for given identifier.
func (store *Store) Reset(ctx context.Context, key string, rate limiter.Rate) (limiter.Context, error) {
buffer := bytebuffer.New()
defer buffer.Close()
buffer.Concat(store.Prefix, ":", key)
count, expiration := store.cache.Reset(buffer.String(), rate.Period)
count, expiration := store.cache.Reset(store.getCacheKey(key), rate.Period)
lctx := common.GetContextFromState(time.Now(), rate, expiration, count)
return lctx, nil
}
// getCacheKey returns the full path for an identifier.
func (store *Store) getCacheKey(key string) string {
buffer := strings.Builder{}
buffer.WriteString(store.Prefix)
buffer.WriteString(":")
buffer.WriteString(key)
return buffer.String()
}

View File

@ -1,58 +0,0 @@
package bytebuffer
import (
"sync"
"unsafe"
)
// ByteBuffer is a wrapper around a slice to reduce memory allocation while handling blob of data.
type ByteBuffer struct {
blob []byte
}
// New creates a new ByteBuffer instance.
func New() *ByteBuffer {
entry := bufferPool.Get().(*ByteBuffer)
entry.blob = entry.blob[:0]
return entry
}
// Bytes returns the content buffer.
func (buffer *ByteBuffer) Bytes() []byte {
return buffer.blob
}
// String returns the content buffer.
func (buffer *ByteBuffer) String() string {
// Copied from strings.(*Builder).String
return *(*string)(unsafe.Pointer(&buffer.blob)) // nolint: gosec
}
// Concat appends given arguments to blob content
func (buffer *ByteBuffer) Concat(args ...string) {
for i := range args {
buffer.blob = append(buffer.blob, args[i]...)
}
}
// Close recycles underlying resources of encoder.
func (buffer *ByteBuffer) Close() {
// Proper usage of a sync.Pool requires each entry to have approximately
// the same memory cost. To obtain this property when the stored type
// contains a variably-sized buffer, we add a hard limit on the maximum buffer
// to place back in the pool.
//
// See https://golang.org/issue/23199
if buffer != nil && cap(buffer.blob) < (1<<16) {
bufferPool.Put(buffer)
}
}
// A byte buffer pool to reduce memory allocation pressure.
var bufferPool = &sync.Pool{
New: func() interface{} {
return &ByteBuffer{
blob: make([]byte, 0, 1024),
}
},
}