bump dependencies: minio-go, go-sqlite3, goldmark, otel, x/image/webp (#4075)

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4075
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim
2025-04-28 11:20:24 +00:00
committed by kim
parent 457ca3c9d3
commit 436765a6a2
31 changed files with 500 additions and 167 deletions

View File

@ -0,0 +1,226 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2025 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"github.com/minio/minio-go/v7/pkg/s3utils"
)
// AppendObjectOptions https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-append.html
type AppendObjectOptions struct {
// Provide a progress reader to indicate the current append() progress.
Progress io.Reader
// ChunkSize indicates the maximum append() size,
// it is useful when you want to control how much data
// per append() you are interested in sending to server
// while keeping the input io.Reader of a longer length.
ChunkSize uint64
// Aggressively disable sha256 payload, it is automatically
// turned-off for TLS supporting endpoints, useful in benchmarks
// where you are interested in the peak() numbers.
DisableContentSha256 bool
customHeaders http.Header
checksumType ChecksumType
}
// Header returns the custom header for AppendObject API
func (opts AppendObjectOptions) Header() (header http.Header) {
header = make(http.Header)
for k, v := range opts.customHeaders {
header[k] = v
}
return header
}
func (opts *AppendObjectOptions) setWriteOffset(offset int64) {
if len(opts.customHeaders) == 0 {
opts.customHeaders = make(http.Header)
}
opts.customHeaders["x-amz-write-offset-bytes"] = []string{strconv.FormatInt(offset, 10)}
}
func (opts *AppendObjectOptions) setChecksumParams(info ObjectInfo) {
if len(opts.customHeaders) == 0 {
opts.customHeaders = make(http.Header)
}
fullObject := info.ChecksumMode == ChecksumFullObjectMode.String()
switch {
case info.ChecksumCRC32 != "":
if fullObject {
opts.checksumType = ChecksumFullObjectCRC32
}
case info.ChecksumCRC32C != "":
if fullObject {
opts.checksumType = ChecksumFullObjectCRC32C
}
case info.ChecksumCRC64NVME != "":
// CRC64NVME only has a full object variant
// so it does not carry any special full object
// modifier
opts.checksumType = ChecksumCRC64NVME
}
}
func (opts AppendObjectOptions) validate(c *Client) (err error) {
if opts.ChunkSize > maxPartSize {
return errInvalidArgument("Append chunkSize cannot be larger than max part size allowed")
}
switch {
case !c.trailingHeaderSupport:
return errInvalidArgument("AppendObject() requires Client with TrailingHeaders enabled")
case c.overrideSignerType.IsV2():
return errInvalidArgument("AppendObject() cannot be used with v2 signatures")
case s3utils.IsGoogleEndpoint(*c.endpointURL):
return errInvalidArgument("AppendObject() cannot be used with GCS endpoints")
}
return nil
}
// appendObjectDo - executes the append object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (c *Client) appendObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts AppendObjectOptions) (UploadInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return UploadInfo{}, err
}
// Set headers.
customHeader := opts.Header()
// Populate request metadata.
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
customHeader: customHeader,
contentBody: reader,
contentLength: size,
streamSha256: !opts.DisableContentSha256,
}
if opts.checksumType.IsSet() {
reqMetadata.addCrc = &opts.checksumType
}
// Execute PUT an objectName.
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
defer closeResponse(resp)
if err != nil {
return UploadInfo{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
h := resp.Header
// When AppendObject() is used, S3 Express will return final object size as x-amz-object-size
if amzSize := h.Get("x-amz-object-size"); amzSize != "" {
size, err = strconv.ParseInt(amzSize, 10, 64)
if err != nil {
return UploadInfo{}, err
}
}
return UploadInfo{
Bucket: bucketName,
Key: objectName,
ETag: trimEtag(h.Get("ETag")),
Size: size,
// Checksum values
ChecksumCRC32: h.Get(ChecksumCRC32.Key()),
ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()),
ChecksumSHA1: h.Get(ChecksumSHA1.Key()),
ChecksumSHA256: h.Get(ChecksumSHA256.Key()),
ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
ChecksumMode: h.Get(ChecksumFullObjectMode.Key()),
}, nil
}
// AppendObject - S3 Express Zone https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-append.html
func (c *Client) AppendObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
opts AppendObjectOptions,
) (info UploadInfo, err error) {
if objectSize < 0 && opts.ChunkSize == 0 {
return UploadInfo{}, errors.New("object size must be provided when no chunk size is provided")
}
if err = opts.validate(c); err != nil {
return UploadInfo{}, err
}
oinfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{Checksum: true})
if err != nil {
return UploadInfo{}, err
}
if oinfo.ChecksumMode != ChecksumFullObjectMode.String() {
return UploadInfo{}, fmt.Errorf("append API is not allowed on objects that are not full_object checksum type: %s", oinfo.ChecksumMode)
}
opts.setChecksumParams(oinfo) // set the appropriate checksum params based on the existing object checksum metadata.
opts.setWriteOffset(oinfo.Size) // First append must set the current object size as the offset.
if opts.ChunkSize > 0 {
finalObjSize := int64(-1)
if objectSize > 0 {
finalObjSize = info.Size + objectSize
}
totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(finalObjSize, opts.ChunkSize)
if err != nil {
return UploadInfo{}, err
}
buf := make([]byte, partSize)
var partNumber int
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
// Proceed to upload the part.
if partNumber == totalPartsCount {
partSize = lastPartSize
}
n, err := readFull(reader, buf)
if err != nil {
return info, err
}
if n != int(partSize) {
return info, io.ErrUnexpectedEOF
}
rd := newHook(bytes.NewReader(buf[:n]), opts.Progress)
uinfo, err := c.appendObjectDo(ctx, bucketName, objectName, rd, partSize, opts)
if err != nil {
return info, err
}
opts.setWriteOffset(uinfo.Size)
}
}
rd := newHook(reader, opts.Progress)
return c.appendObjectDo(ctx, bucketName, objectName, rd, objectSize, opts)
}

View File

@ -157,13 +157,6 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
return
}
// Continuously run and listen on bucket notification.
// Create a done channel to control 'ListObjects' go routine.
retryDoneCh := make(chan struct{}, 1)
// Indicate to our routine to exit cleanly upon return.
defer close(retryDoneCh)
// Prepare urlValues to pass into the request on every loop
urlValues := make(url.Values)
urlValues.Set("ping", "10")
@ -172,7 +165,7 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
urlValues["events"] = events
// Wait on the jitter retry loop.
for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter) {
// Execute GET on bucket to list objects.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,

View File

@ -148,6 +148,7 @@ type UploadInfo struct {
ChecksumSHA1 string
ChecksumSHA256 string
ChecksumCRC64NVME string
ChecksumMode string
}
// RestoreInfo contains information of the restore operation of an archived object
@ -223,6 +224,7 @@ type ObjectInfo struct {
ChecksumSHA1 string
ChecksumSHA256 string
ChecksumCRC64NVME string
ChecksumMode string
Internal *struct {
K int // Data blocks

View File

@ -457,5 +457,6 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32,
ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C,
ChecksumCRC64NVME: completeMultipartUploadResult.ChecksumCRC64NVME,
ChecksumMode: completeMultipartUploadResult.ChecksumType,
}, nil
}

View File

@ -805,5 +805,6 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
ChecksumSHA1: h.Get(ChecksumSHA1.Key()),
ChecksumSHA256: h.Get(ChecksumSHA256.Key()),
ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
ChecksumMode: h.Get(ChecksumFullObjectMode.Key()),
}, nil
}

View File

@ -366,6 +366,7 @@ type completeMultipartUploadResult struct {
ChecksumSHA1 string
ChecksumSHA256 string
ChecksumCRC64NVME string
ChecksumType string
}
// CompletePart sub container lists individual part numbers and their

View File

@ -155,7 +155,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.89"
libraryVersion = "v7.0.91"
)
// User Agent should always following the below style.
@ -660,13 +660,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil)))
}
// Create cancel context to control 'newRetryTimer' go routine.
retryCtx, cancel := context.WithCancel(ctx)
// Indicate to our routine to exit cleanly upon return.
defer cancel()
for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) {
for range c.newRetryTimer(ctx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) {
// Retry executes the following function body if request has an
// error until maxRetries have been exhausted, retry attempts are
// performed after waiting for a given period of time in a
@ -779,7 +773,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
}
// Return an error when retry is canceled or deadlined
if e := retryCtx.Err(); e != nil {
if e := ctx.Err(); e != nil {
return nil, e
}
@ -909,6 +903,11 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
// For anonymous requests just return.
if signerType.IsAnonymous() {
if len(metadata.trailer) > 0 {
req.Header.Set("X-Amz-Content-Sha256", unsignedPayloadTrailer)
return signer.UnsignedTrailer(*req, metadata.trailer), nil
}
return req, nil
}
@ -1066,3 +1065,11 @@ func (c *Client) CredContext() *credentials.CredContext {
Endpoint: c.endpointURL.String(),
}
}
// GetCreds returns the access creds for the client
func (c *Client) GetCreds() (credentials.Value, error) {
if c.credsProvider == nil {
return credentials.Value{}, errors.New("no credentials provider")
}
return c.credsProvider.GetWithContext(c.CredContext())
}

View File

@ -34,6 +34,43 @@ import (
"github.com/minio/crc64nvme"
)
// ChecksumMode contains information about the checksum mode on the object
type ChecksumMode uint32
const (
// ChecksumFullObjectMode Full object checksum `csumCombine(csum1, csum2...)...), csumN...)`
ChecksumFullObjectMode ChecksumMode = 1 << iota
// ChecksumCompositeMode Composite checksum `csum([csum1 + csum2 ... + csumN])`
ChecksumCompositeMode
// Keep after all valid checksums
checksumLastMode
// checksumModeMask is a mask for valid checksum mode types.
checksumModeMask = checksumLastMode - 1
)
// Is returns if c is all of t.
func (c ChecksumMode) Is(t ChecksumMode) bool {
return c&t == t
}
// Key returns the header key.
func (c ChecksumMode) Key() string {
return amzChecksumMode
}
func (c ChecksumMode) String() string {
switch c & checksumModeMask {
case ChecksumFullObjectMode:
return "FULL_OBJECT"
case ChecksumCompositeMode:
return "COMPOSITE"
}
return ""
}
// ChecksumType contains information about the checksum type.
type ChecksumType uint32
@ -75,6 +112,7 @@ const (
amzChecksumSHA1 = "x-amz-checksum-sha1"
amzChecksumSHA256 = "x-amz-checksum-sha256"
amzChecksumCRC64NVME = "x-amz-checksum-crc64nvme"
amzChecksumMode = "x-amz-checksum-type"
)
// Base returns the base type, without modifiers.
@ -397,7 +435,7 @@ func addAutoChecksumHeaders(opts *PutObjectOptions) {
}
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
if opts.AutoChecksum.FullObjectRequested() {
opts.UserMetadata["X-Amz-Checksum-Type"] = "FULL_OBJECT"
opts.UserMetadata[amzChecksumMode] = ChecksumFullObjectMode.String()
}
}
@ -414,7 +452,10 @@ func applyAutoChecksum(opts *PutObjectOptions, allParts []ObjectPart) {
} else if opts.AutoChecksum.CanMergeCRC() {
crc, err := opts.AutoChecksum.FullObjectChecksum(allParts)
if err == nil {
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): crc.Encoded(), "X-Amz-Checksum-Type": "FULL_OBJECT"}
opts.UserMetadata = map[string]string{
opts.AutoChecksum.KeyCapitalized(): crc.Encoded(),
amzChecksumMode: ChecksumFullObjectMode.String(),
}
}
}
}

View File

@ -20,7 +20,6 @@ package minio
import (
"fmt"
"io"
"sync"
)
// hookReader hooks additional reader in the source stream. It is
@ -28,7 +27,6 @@ import (
// notified about the exact number of bytes read from the primary
// source on each Read operation.
type hookReader struct {
mu sync.RWMutex
source io.Reader
hook io.Reader
}
@ -36,9 +34,6 @@ type hookReader struct {
// Seek implements io.Seeker. Seeks source first, and if necessary
// seeks hook if Seek method is appropriately found.
func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
hr.mu.Lock()
defer hr.mu.Unlock()
// Verify for source has embedded Seeker, use it.
sourceSeeker, ok := hr.source.(io.Seeker)
if ok {
@ -70,9 +65,6 @@ func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
// value 'n' number of bytes are reported through the hook. Returns
// error for all non io.EOF conditions.
func (hr *hookReader) Read(b []byte) (n int, err error) {
hr.mu.RLock()
defer hr.mu.RUnlock()
n, err = hr.source.Read(b)
if err != nil && err != io.EOF {
return n, err
@ -92,7 +84,7 @@ func (hr *hookReader) Read(b []byte) (n int, err error) {
// reports the data read from the source to the hook.
func newHook(source, hook io.Reader) io.Reader {
if hook == nil {
return &hookReader{source: source}
return source
}
return &hookReader{
source: source,

View File

@ -333,11 +333,34 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati
if len(trailer) > 0 {
// Use custom chunked encoding.
req.Trailer = trailer
return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC())
return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, t)
}
return &req
}
// UnsignedTrailer will do chunked encoding with a custom trailer.
func UnsignedTrailer(req http.Request, trailer http.Header) *http.Request {
if len(trailer) == 0 {
return &req
}
// Initial time.
t := time.Now().UTC()
// Set x-amz-date.
req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
for k := range trailer {
req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
}
req.Header.Set("Content-Encoding", "aws-chunked")
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10))
// Use custom chunked encoding.
req.Trailer = trailer
return StreamingUnsignedV4(&req, "", req.ContentLength, t)
}
// SignV4 sign the request before Do(), in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {

View File

@ -17,12 +17,14 @@
package minio
import "time"
import (
"iter"
"math"
"time"
)
// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
attemptCh := make(chan int)
func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitter float64) iter.Seq[int] {
// normalize jitter to the range [0, 1.0]
if jitter < NoJitter {
jitter = NoJitter
@ -44,26 +46,20 @@ func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitte
if sleep > maxSleep {
sleep = maxSleep
}
if jitter != NoJitter {
if math.Abs(jitter-NoJitter) > 1e-9 {
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
}
return sleep
}
go func() {
defer close(attemptCh)
return func(yield func(int) bool) {
var nextBackoff int
for {
select {
// Attempts starts.
case attemptCh <- nextBackoff:
nextBackoff++
case <-doneCh:
// Stop the routine.
if !yield(nextBackoff) {
return
}
nextBackoff++
time.Sleep(exponentialBackoffWait(nextBackoff))
}
}()
return attemptCh
}
}

View File

@ -21,6 +21,8 @@ import (
"context"
"crypto/x509"
"errors"
"iter"
"math"
"net/http"
"net/url"
"time"
@ -45,9 +47,7 @@ var DefaultRetryCap = time.Second
// newRetryTimer creates a timer with exponentially increasing
// delays until the maximum retry attempts are reached.
func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, maxSleep time.Duration, jitter float64) <-chan int {
attemptCh := make(chan int)
func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, maxSleep time.Duration, jitter float64) iter.Seq[int] {
// computes the exponential backoff duration according to
// https://www.awsarchitectureblog.com/2015/03/backoff.html
exponentialBackoffWait := func(attempt int) time.Duration {
@ -64,18 +64,22 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, max
if sleep > maxSleep {
sleep = maxSleep
}
if jitter != NoJitter {
if math.Abs(jitter-NoJitter) > 1e-9 {
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
}
return sleep
}
go func() {
defer close(attemptCh)
for i := 0; i < maxRetry; i++ {
select {
case attemptCh <- i + 1:
case <-ctx.Done():
return func(yield func(int) bool) {
// if context is already canceled, skip yield
select {
case <-ctx.Done():
return
default:
}
for i := range maxRetry {
if !yield(i) {
return
}
@ -85,8 +89,7 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, max
return
}
}
}()
return attemptCh
}
}
// List of AWS S3 error codes which are retryable.

View File

@ -168,6 +168,18 @@ var awsS3EndpointMap = map[string]awsS3Endpoint{
"s3.il-central-1.amazonaws.com",
"s3.dualstack.il-central-1.amazonaws.com",
},
"ap-southeast-5": {
"s3.ap-southeast-5.amazonaws.com",
"s3.dualstack.ap-southeast-5.amazonaws.com",
},
"ap-southeast-7": {
"s3.ap-southeast-7.amazonaws.com",
"s3.dualstack.ap-southeast-7.amazonaws.com",
},
"mx-central-1": {
"s3.mx-central-1.amazonaws.com",
"s3.dualstack.mx-central-1.amazonaws.com",
},
}
// getS3Endpoint get Amazon S3 endpoint based on the bucket location.

View File

@ -390,6 +390,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
ChecksumSHA1: h.Get(ChecksumSHA1.Key()),
ChecksumSHA256: h.Get(ChecksumSHA256.Key()),
ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
ChecksumMode: h.Get(ChecksumFullObjectMode.Key()),
}, nil
}