[chore]: Bump github.com/minio/minio-go/v7 from 7.0.75 to 7.0.76 (#3262)

Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.75 to 7.0.76.
- [Release notes](https://github.com/minio/minio-go/releases)
- [Commits](https://github.com/minio/minio-go/compare/v7.0.75...v7.0.76)

---
updated-dependencies:
- dependency-name: github.com/minio/minio-go/v7
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot]
2024-09-02 13:08:54 +02:00
committed by GitHub
parent 0a1555521d
commit f924297af1
15 changed files with 224 additions and 134 deletions

View File

@ -24,7 +24,6 @@ import (
"encoding/hex"
"encoding/xml"
"fmt"
"hash/crc32"
"io"
"net/http"
"net/url"
@ -87,7 +86,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
}
// Initiate a new multipart upload.
@ -116,7 +115,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
customHeader := make(http.Header)
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
crc := opts.AutoChecksum.Hasher()
for partNumber <= totalPartsCount {
length, rErr := readFull(reader, buf)
if rErr == io.EOF && partNumber > 1 {
@ -154,7 +153,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
@ -202,12 +201,13 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
AutoChecksum: opts.AutoChecksum,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {

View File

@ -22,7 +22,6 @@ import (
"context"
"encoding/base64"
"fmt"
"hash/crc32"
"io"
"net/http"
"net/url"
@ -115,7 +114,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
}
// Initiate a new multipart upload.
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
@ -195,10 +194,10 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
trailer := make(http.Header, 1)
if withChecksum {
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
crc := opts.AutoChecksum.Hasher()
trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil)))
sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) {
trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(hash))
})
}
@ -271,17 +270,18 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
AutoChecksum: opts.AutoChecksum,
}
if withChecksum {
// Add hash of hashes.
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
crc := opts.AutoChecksum.Hasher()
for _, part := range complMultipartUpload.Parts {
cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C)
cs, err := base64.StdEncoding.DecodeString(part.Checksum(opts.AutoChecksum))
if err == nil {
crc.Write(cs)
}
}
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
@ -308,7 +308,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
}
// Calculate the optimal parts info for a given size.
@ -337,7 +337,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
customHeader := make(http.Header)
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
crc := opts.AutoChecksum.Hasher()
md5Hash := c.md5Hasher()
defer md5Hash.Close()
@ -381,7 +381,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
customHeader.Set(opts.AutoChecksum.KeyCapitalized(), base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
@ -433,12 +433,13 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
AutoChecksum: opts.AutoChecksum,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
@ -467,7 +468,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
}
// Cancel all when an error occurs.
@ -500,7 +501,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
// Create checksums
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
crc := opts.AutoChecksum.Hasher()
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
@ -558,7 +559,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
@ -639,12 +640,13 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
AutoChecksum: opts.AutoChecksum,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
@ -765,7 +767,10 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
contentMD5Base64: md5Base64,
contentSHA256Hex: sha256Hex,
streamSha256: !opts.DisableContentSha256,
addCrc: addCrc,
}
if addCrc {
opts.AutoChecksum.SetDefault(ChecksumCRC32C)
reqMetadata.addCrc = &opts.AutoChecksum
}
if opts.Internal.SourceVersionID != "" {
if opts.Internal.SourceVersionID != nullVersionID {

View File

@ -23,7 +23,6 @@ import (
"encoding/base64"
"errors"
"fmt"
"hash/crc32"
"io"
"net/http"
"sort"
@ -90,6 +89,11 @@ type PutObjectOptions struct {
DisableContentSha256 bool
DisableMultipart bool
// AutoChecksum is the type of checksum that will be added if no other checksum is added,
// like MD5 or SHA256 streaming checksum, and it is feasible for the upload type.
// If none is specified CRC32C is used, since it is generally the fastest.
AutoChecksum ChecksumType
// ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
// fill them serially and upload them in parallel.
// This can be used for faster uploads on non-seekable or slow-to-seek input.
@ -300,6 +304,7 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
if size > int64(maxMultipartPutObjectSize) {
return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
}
opts.AutoChecksum.SetDefault(ChecksumCRC32C)
// NOTE: Streaming signature is not supported by GCS.
if s3utils.IsGoogleEndpoint(*c.endpointURL) {
@ -361,7 +366,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
}
// Initiate a new multipart upload.
@ -390,7 +395,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
customHeader := make(http.Header)
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
crc := opts.AutoChecksum.Hasher()
for partNumber <= totalPartsCount {
length, rerr := readFull(reader, buf)
@ -413,7 +418,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
@ -466,12 +471,13 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
AutoChecksum: opts.AutoChecksum,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {

View File

@ -340,6 +340,22 @@ type CompletePart struct {
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
}
// Checksum will return the checksum for the given type.
// Will return the empty string if not set.
func (c CompletePart) Checksum(t ChecksumType) string {
switch {
case t.Is(ChecksumCRC32C):
return c.ChecksumCRC32C
case t.Is(ChecksumCRC32):
return c.ChecksumCRC32
case t.Is(ChecksumSHA1):
return c.ChecksumSHA1
case t.Is(ChecksumSHA256):
return c.ChecksumSHA256
}
return ""
}
// completeMultipartUpload container for completing multipart upload.
type completeMultipartUpload struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`

View File

@ -23,7 +23,6 @@ import (
"encoding/base64"
"errors"
"fmt"
"hash/crc32"
"io"
"math/rand"
"net"
@ -129,7 +128,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.75"
libraryVersion = "v7.0.76"
)
// User Agent should always following the below style.
@ -471,7 +470,7 @@ type requestMetadata struct {
contentMD5Base64 string // carries base64 encoded md5sum
contentSHA256Hex string // carries hex encoded sha256sum
streamSha256 bool
addCrc bool
addCrc *ChecksumType
trailer http.Header // (http.Request).Trailer. Requires v4 signature.
}
@ -616,16 +615,16 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
}
}
if metadata.addCrc && metadata.contentLength > 0 {
if metadata.addCrc != nil && metadata.contentLength > 0 {
if metadata.trailer == nil {
metadata.trailer = make(http.Header, 1)
}
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
crc := metadata.addCrc.Hasher()
metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) {
// Update trailer when done.
metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(hash))
})
metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil)))
}
// Create cancel context to control 'newRetryTimer' go routine.

View File

@ -25,6 +25,7 @@ import (
"hash/crc32"
"io"
"math/bits"
"net/http"
)
// ChecksumType contains information about the checksum type.
@ -78,6 +79,11 @@ func (c ChecksumType) Key() string {
return ""
}
// KeyCapitalized returns the capitalized key as used in HTTP headers.
func (c ChecksumType) KeyCapitalized() string {
return http.CanonicalHeaderKey(c.Key())
}
// RawByteLen returns the size of the un-encoded checksum.
func (c ChecksumType) RawByteLen() int {
switch c & checksumMask {
@ -112,6 +118,13 @@ func (c ChecksumType) IsSet() bool {
return bits.OnesCount32(uint32(c)) == 1
}
// SetDefault will set the checksum if not already set.
func (c *ChecksumType) SetDefault(t ChecksumType) {
if !c.IsSet() {
*c = t
}
}
// String returns the type as a string.
// CRC32, CRC32C, SHA1, and SHA256 for valid values.
// Empty string for unset and "<invalid>" if not valid.

View File

@ -24,7 +24,6 @@ import (
"archive/zip"
"bytes"
"context"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"errors"
@ -166,7 +165,7 @@ func logError(testName, function string, args map[string]interface{}, startTime
}
}
// log failed test runs
// Log failed test runs, do not call this directly, use logError instead, as that correctly stops the test run
func logFailure(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) {
l := baseLogger(testName, function, args, startTime).With(
"status", "FAIL",
@ -2199,22 +2198,15 @@ func testPutObjectWithChecksums() {
defer cleanupBucket(bucketName, c)
tests := []struct {
header string
hasher hash.Hash
// Checksum values
ChecksumCRC32 string
ChecksumCRC32C string
ChecksumSHA1 string
ChecksumSHA256 string
cs minio.ChecksumType
}{
{header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()},
{header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))},
{header: "x-amz-checksum-sha1", hasher: sha1.New()},
{header: "x-amz-checksum-sha256", hasher: sha256.New()},
{cs: minio.ChecksumCRC32C},
{cs: minio.ChecksumCRC32},
{cs: minio.ChecksumSHA1},
{cs: minio.ChecksumSHA256},
}
for i, test := range tests {
for _, test := range tests {
bufSize := dataFileMap["datafile-10-kB"]
// Save the data
@ -2235,29 +2227,27 @@ func testPutObjectWithChecksums() {
logError(testName, function, args, startTime, "", "Read failed", err)
return
}
h := test.hasher
h := test.cs.Hasher()
h.Reset()
// Wrong CRC.
meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
// Test with Wrong CRC.
meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
args["metadata"] = meta
args["range"] = "false"
args["checksum"] = test.cs.String()
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
DisableMultipart: true,
UserMetadata: meta,
})
if err == nil {
if i == 0 && resp.ChecksumCRC32 == "" {
logIgnored(testName, function, args, startTime, "Checksums does not appear to be supported by backend")
return
}
logError(testName, function, args, startTime, "", "PutObject failed", err)
logError(testName, function, args, startTime, "", "PutObject did not fail on wrong CRC", err)
return
}
// Set correct CRC.
h.Write(b)
meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
reader.Close()
resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
@ -2419,17 +2409,12 @@ func testPutMultipartObjectWithChecksums() {
}
defer cleanupBucket(bucketName, c)
tests := []struct {
header string
hasher hash.Hash
// Checksum values
ChecksumCRC32 string
ChecksumCRC32C string
ChecksumSHA1 string
ChecksumSHA256 string
cs minio.ChecksumType
}{
// Currently there is no way to override the checksum type.
{header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "OpEx0Q==-13"},
{cs: minio.ChecksumCRC32C},
{cs: minio.ChecksumCRC32},
{cs: minio.ChecksumSHA1},
{cs: minio.ChecksumSHA256},
}
for _, test := range tests {
@ -2438,11 +2423,12 @@ func testPutMultipartObjectWithChecksums() {
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
args["checksum"] = test.cs.String()
cmpChecksum := func(got, want string) {
if want != got {
// logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
fmt.Printf("want %s, got %s\n", want, got)
logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
//fmt.Printf("want %s, got %s\n", want, got)
return
}
}
@ -2455,9 +2441,9 @@ func testPutMultipartObjectWithChecksums() {
return
}
reader.Close()
h := test.hasher
h := test.cs.Hasher()
h.Reset()
test.ChecksumCRC32C = hashMultiPart(b, partSize, test.hasher)
want := hashMultiPart(b, partSize, test.cs.Hasher())
// Set correct CRC.
@ -2466,15 +2452,40 @@ func testPutMultipartObjectWithChecksums() {
DisableMultipart: false,
UserMetadata: nil,
PartSize: partSize,
AutoChecksum: test.cs,
})
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C)
switch test.cs {
case minio.ChecksumCRC32C:
cmpChecksum(resp.ChecksumCRC32C, want)
case minio.ChecksumCRC32:
cmpChecksum(resp.ChecksumCRC32, want)
case minio.ChecksumSHA1:
cmpChecksum(resp.ChecksumSHA1, want)
case minio.ChecksumSHA256:
cmpChecksum(resp.ChecksumSHA256, want)
}
s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err)
return
}
want = want[:strings.IndexByte(want, '-')]
switch test.cs {
case minio.ChecksumCRC32C:
cmpChecksum(s.Checksum.ChecksumCRC32C, want)
case minio.ChecksumCRC32:
cmpChecksum(s.Checksum.ChecksumCRC32, want)
case minio.ChecksumSHA1:
cmpChecksum(s.Checksum.ChecksumSHA1, want)
case minio.ChecksumSHA256:
cmpChecksum(s.Checksum.ChecksumSHA256, want)
}
// Read the data back
gopts := minio.GetObjectOptions{Checksum: true}
@ -2496,18 +2507,17 @@ func testPutMultipartObjectWithChecksums() {
// Test part 2 checksum...
h.Reset()
h.Write(b[partSize : 2*partSize])
got := base64.StdEncoding.EncodeToString(h.Sum(nil))
if test.ChecksumSHA256 != "" {
cmpChecksum(st.ChecksumSHA256, got)
}
if test.ChecksumSHA1 != "" {
cmpChecksum(st.ChecksumSHA1, got)
}
if test.ChecksumCRC32 != "" {
cmpChecksum(st.ChecksumCRC32, got)
}
if test.ChecksumCRC32C != "" {
cmpChecksum(st.ChecksumCRC32C, got)
want = base64.StdEncoding.EncodeToString(h.Sum(nil))
switch test.cs {
case minio.ChecksumCRC32C:
cmpChecksum(st.ChecksumCRC32C, want)
case minio.ChecksumCRC32:
cmpChecksum(st.ChecksumCRC32, want)
case minio.ChecksumSHA1:
cmpChecksum(st.ChecksumSHA1, want)
case minio.ChecksumSHA256:
cmpChecksum(st.ChecksumSHA256, want)
}
delete(args, "metadata")
@ -13500,7 +13510,7 @@ func testCors() {
Secure: mustParseBool(os.Getenv(enableHTTPS)),
})
if err != nil {
logFailure(testName, function, args, startTime, "", "MinIO client object creation failed", err)
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
return
}
@ -13516,7 +13526,7 @@ func testCors() {
bucketName = randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
if err != nil {
logFailure(testName, function, args, startTime, "", "MakeBucket failed", err)
logError(testName, function, args, startTime, "", "MakeBucket failed", err)
return
}
}
@ -13526,7 +13536,7 @@ func testCors() {
publicPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:*"],"Resource":["arn:aws:s3:::` + bucketName + `", "arn:aws:s3:::` + bucketName + `/*"]}]}`
err = c.SetBucketPolicy(ctx, bucketName, publicPolicy)
if err != nil {
logFailure(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
return
}
@ -13540,7 +13550,7 @@ func testCors() {
_, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
logFailure(testName, function, args, startTime, "", "PutObject call failed", err)
logError(testName, function, args, startTime, "", "PutObject call failed", err)
return
}
bucketURL := c.EndpointURL().String() + "/" + bucketName + "/"
@ -13548,7 +13558,7 @@ func testCors() {
transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
if err != nil {
logFailure(testName, function, args, startTime, "", "DefaultTransport failed", err)
logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
return
}
httpClient := &http.Client{
@ -14156,7 +14166,7 @@ func testCors() {
}
err = c.SetBucketCors(ctx, bucketName, corsConfig)
if err != nil {
logFailure(testName, function, args, startTime, "", "SetBucketCors failed to apply", err)
logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err)
return
}
}
@ -14165,7 +14175,7 @@ func testCors() {
if test.method != "" && test.url != "" {
req, err := http.NewRequestWithContext(ctx, test.method, test.url, nil)
if err != nil {
logFailure(testName, function, args, startTime, "", "HTTP request creation failed", err)
logError(testName, function, args, startTime, "", "HTTP request creation failed", err)
return
}
req.Header.Set("User-Agent", "MinIO-go-FunctionalTest/"+appVersion)
@ -14175,7 +14185,7 @@ func testCors() {
}
resp, err := httpClient.Do(req)
if err != nil {
logFailure(testName, function, args, startTime, "", "HTTP request failed", err)
logError(testName, function, args, startTime, "", "HTTP request failed", err)
return
}
defer resp.Body.Close()
@ -14183,7 +14193,7 @@ func testCors() {
// Check returned status code
if resp.StatusCode != test.wantStatus {
errStr := fmt.Sprintf(" incorrect status code in response, want: %d, got: %d", test.wantStatus, resp.StatusCode)
logFailure(testName, function, args, startTime, "", errStr, nil)
logError(testName, function, args, startTime, "", errStr, nil)
return
}
@ -14191,12 +14201,12 @@ func testCors() {
if test.wantBodyContains != "" {
body, err := io.ReadAll(resp.Body)
if err != nil {
logFailure(testName, function, args, startTime, "", "Failed to read response body", err)
logError(testName, function, args, startTime, "", "Failed to read response body", err)
return
}
if !strings.Contains(string(body), test.wantBodyContains) {
errStr := fmt.Sprintf(" incorrect body in response, want: %s, in got: %s", test.wantBodyContains, string(body))
logFailure(testName, function, args, startTime, "", errStr, nil)
logError(testName, function, args, startTime, "", errStr, nil)
return
}
}
@ -14213,7 +14223,7 @@ func testCors() {
gotVal = strings.ReplaceAll(gotVal, " ", "")
if gotVal != v {
errStr := fmt.Sprintf(" incorrect header in response, want: %s: '%s', got: '%s'", k, v, gotVal)
logFailure(testName, function, args, startTime, "", errStr, nil)
logError(testName, function, args, startTime, "", errStr, nil)
return
}
}
@ -14241,7 +14251,7 @@ func testCorsSetGetDelete() {
Secure: mustParseBool(os.Getenv(enableHTTPS)),
})
if err != nil {
logFailure(testName, function, args, startTime, "", "MinIO client object creation failed", err)
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
return
}
@ -14258,7 +14268,7 @@ func testCorsSetGetDelete() {
// Make a new bucket.
err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
if err != nil {
logFailure(testName, function, args, startTime, "", "MakeBucket failed", err)
logError(testName, function, args, startTime, "", "MakeBucket failed", err)
return
}
defer cleanupBucket(bucketName, c)
@ -14284,37 +14294,37 @@ func testCorsSetGetDelete() {
corsConfig := cors.NewConfig(corsRules)
err = c.SetBucketCors(ctx, bucketName, corsConfig)
if err != nil {
logFailure(testName, function, args, startTime, "", "SetBucketCors failed to apply", err)
logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err)
return
}
// Get the rules and check they match what we set
gotCorsConfig, err := c.GetBucketCors(ctx, bucketName)
if err != nil {
logFailure(testName, function, args, startTime, "", "GetBucketCors failed", err)
logError(testName, function, args, startTime, "", "GetBucketCors failed", err)
return
}
if !reflect.DeepEqual(corsConfig, gotCorsConfig) {
msg := fmt.Sprintf("GetBucketCors returned unexpected rules, expected: %+v, got: %+v", corsConfig, gotCorsConfig)
logFailure(testName, function, args, startTime, "", msg, nil)
logError(testName, function, args, startTime, "", msg, nil)
return
}
// Delete the rules
err = c.SetBucketCors(ctx, bucketName, nil)
if err != nil {
logFailure(testName, function, args, startTime, "", "SetBucketCors failed to delete", err)
logError(testName, function, args, startTime, "", "SetBucketCors failed to delete", err)
return
}
// Get the rules and check they are now empty
gotCorsConfig, err = c.GetBucketCors(ctx, bucketName)
if err != nil {
logFailure(testName, function, args, startTime, "", "GetBucketCors failed", err)
logError(testName, function, args, startTime, "", "GetBucketCors failed", err)
return
}
if gotCorsConfig != nil {
logFailure(testName, function, args, startTime, "", "GetBucketCors returned unexpected rules", nil)
logError(testName, function, args, startTime, "", "GetBucketCors returned unexpected rules", nil)
return
}