4
0
mirror of https://github.com/cwinfo/matterbridge.git synced 2025-07-04 05:27:44 +00:00

Update dependencies/vendor (#1659)

This commit is contained in:
Wim
2021-12-12 00:05:15 +01:00
committed by GitHub
parent 658bdd9faa
commit 3893a035be
214 changed files with 8892 additions and 5650 deletions

View File

@ -1,4 +1,4 @@
# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
@ -171,9 +171,9 @@ The full API Reference is available here.
* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy)
### API Reference : Client custom settings
* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo)
* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn)
* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff)
* [`SetAppInfo`](https://docs.min.io/docs/golang-client-api-reference#SetAppInfo)
* [`TraceOn`](https://docs.min.io/docs/golang-client-api-reference#TraceOn)
* [`TraceOff`](https://docs.min.io/docs/golang-client-api-reference#TraceOff)
## Full Examples
@ -248,4 +248,4 @@ The full API Reference is available here.
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
## License
This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information.
This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information.

View File

@ -27,6 +27,7 @@ import (
"net/url"
"time"
"github.com/google/uuid"
"github.com/minio/minio-go/v7/pkg/replication"
"github.com/minio/minio-go/v7/pkg/s3utils"
)
@ -187,12 +188,39 @@ func (c Client) GetBucketReplicationMetrics(ctx context.Context, bucketName stri
return s, nil
}
// mustGetUUID - get a random UUID.
func mustGetUUID() string {
u, err := uuid.NewRandom()
if err != nil {
return ""
}
return u.String()
}
// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
// is enabled in the replication config
func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (resetID string, err error) {
func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) {
rID = mustGetUUID()
_, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID)
if err != nil {
return rID, err
}
return rID, nil
}
// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
// is enabled in the replication config
func (c Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (rinfo replication.ResyncTargetsInfo, err error) {
rID := mustGetUUID()
return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, rID)
}
// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
// is enabled in the replication config
func (c Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string, resetID string) (rinfo replication.ResyncTargetsInfo, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return
}
// Get resources properly escaped and lined up before
// using them in http request.
@ -201,7 +229,10 @@ func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, o
if olderThan > 0 {
urlValues.Set("older-than", olderThan.String())
}
if tgtArn != "" {
urlValues.Set("arn", tgtArn)
}
urlValues.Set("reset-id", resetID)
// Execute GET on bucket to get replication config.
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
bucketName: bucketName,
@ -210,19 +241,19 @@ func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, o
defer closeResponse(resp)
if err != nil {
return "", err
return rinfo, err
}
if resp.StatusCode != http.StatusOK {
return "", httpRespToErrorResponse(resp, bucketName, "")
return rinfo, httpRespToErrorResponse(resp, bucketName, "")
}
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
return rinfo, err
}
if err := json.Unmarshal(respBytes, &resetID); err != nil {
return "", err
if err := json.Unmarshal(respBytes, &rinfo); err != nil {
return rinfo, err
}
return resetID, nil
return rinfo, nil
}

View File

@ -223,6 +223,16 @@ func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuck
if dstOpts.Internal.ReplicationRequest {
headers.Set(minIOBucketReplicationRequest, "")
}
if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
}
if !dstOpts.Internal.RetentionTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
}
if !dstOpts.Internal.TaggingTimestamp.IsZero() {
headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
}
if len(dstOpts.UserTags) != 0 {
headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
}
@ -513,7 +523,7 @@ func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...
// 4. Make final complete-multipart request.
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
completeMultipartUpload{Parts: objParts})
completeMultipartUpload{Parts: objParts}, PutObjectOptions{})
if err != nil {
return UploadInfo{}, err
}

View File

@ -64,8 +64,9 @@ func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Owner name.
type Owner struct {
DisplayName string `json:"name"`
ID string `json:"id"`
XMLName xml.Name `xml:"Owner" json:"owner"`
DisplayName string `xml:"ID" json:"name"`
ID string `xml:"DisplayName" json:"id"`
}
// UploadInfo contains information about the
@ -85,6 +86,14 @@ type UploadInfo struct {
ExpirationRuleID string
}
// RestoreInfo contains information of the restore operation of an archived object
type RestoreInfo struct {
// Is the restoring operation is still ongoing
OngoingRestore bool
// When the restored copy of the archived object will be removed
ExpiryTime time.Time
}
// ObjectInfo container for object metadata.
type ObjectInfo struct {
// An ETag is optionally set to md5sum of an object. In case of multipart objects,
@ -115,14 +124,7 @@ type ObjectInfo struct {
Owner Owner
// ACL grant.
Grant []struct {
Grantee struct {
ID string `xml:"ID"`
DisplayName string `xml:"DisplayName"`
URI string `xml:"URI"`
} `xml:"Grantee"`
Permission string `xml:"Permission"`
} `xml:"Grant"`
Grant []Grant
// The class of storage used to store the object.
StorageClass string `json:"storageClass"`
@ -144,6 +146,8 @@ type ObjectInfo struct {
Expiration time.Time
ExpirationRuleID string
Restore *RestoreInfo
// Error
Err error `json:"-"`
}

View File

@ -19,25 +19,36 @@ package minio
import (
"context"
"encoding/xml"
"net/http"
"net/url"
)
// Grantee represents the person being granted permissions.
type Grantee struct {
XMLName xml.Name `xml:"Grantee"`
ID string `xml:"ID"`
DisplayName string `xml:"DisplayName"`
URI string `xml:"URI"`
}
// Grant holds grant information
type Grant struct {
XMLName xml.Name `xml:"Grant"`
Grantee Grantee
Permission string `xml:"Permission"`
}
// AccessControlList contains the set of grantees and the permissions assigned to each grantee.
type AccessControlList struct {
XMLName xml.Name `xml:"AccessControlList"`
Grant []Grant
Permission string `xml:"Permission"`
}
type accessControlPolicy struct {
Owner struct {
ID string `xml:"ID"`
DisplayName string `xml:"DisplayName"`
} `xml:"Owner"`
AccessControlList struct {
Grant []struct {
Grantee struct {
ID string `xml:"ID"`
DisplayName string `xml:"DisplayName"`
URI string `xml:"URI"`
} `xml:"Grantee"`
Permission string `xml:"Permission"`
} `xml:"Grant"`
} `xml:"AccessControlList"`
Owner
AccessControlList
}
// GetObjectACL get object ACLs

View File

@ -56,14 +56,13 @@ func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
return listAllMyBucketsResult.Buckets.Bucket, nil
}
/// Bucket Read Operations.
func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix string, recursive, metadata bool, maxKeys int) <-chan ObjectInfo {
/// Bucket List Operations.
func (c Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
// Allocate new list objects channel.
objectStatCh := make(chan ObjectInfo, 1)
// Default listing is delimited at "/"
delimiter := "/"
if recursive {
if opts.Recursive {
// If recursive we do not delimit.
delimiter = ""
}
@ -81,7 +80,7 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix stri
}
// Validate incoming object prefix.
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
@ -96,8 +95,8 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix stri
var continuationToken string
for {
// Get list of objects a maximum of 1000 per request.
result, err := c.listObjectsV2Query(ctx, bucketName, objectPrefix, continuationToken,
fetchOwner, metadata, delimiter, maxKeys)
result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken,
fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers)
if err != nil {
objectStatCh <- ObjectInfo{
Err: err,
@ -148,12 +147,13 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix stri
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
// request parameters :-
// ---------
// ?continuation-token - Used to continue iterating over a set of objects
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
// ?continuation-token - Used to continue iterating over a set of objects
// ?metadata - Specifies if we want metadata for the objects as part of list operation.
func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
// ?delimiter - A delimiter is a character you use to group keys.
// ?start-after - Sets a marker to start listing lexically at this key onwards.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) {
// Validate bucket name.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListBucketV2Result{}, err
@ -173,6 +173,11 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix
urlValues.Set("metadata", "true")
}
// Set this conditionally if asked
if startAfter != "" {
urlValues.Set("start-after", startAfter)
}
// Always set encoding-type in ListObjects V2
urlValues.Set("encoding-type", "url")
@ -202,6 +207,7 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix
bucketName: bucketName,
queryValues: urlValues,
contentSHA256Hex: emptySHA256Hex,
customHeader: headers,
})
defer closeResponse(resp)
if err != nil {
@ -246,12 +252,12 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix
return listBucketResult, nil
}
func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string, recursive bool, maxKeys int) <-chan ObjectInfo {
func (c Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
// Allocate new list objects channel.
objectStatCh := make(chan ObjectInfo, 1)
// Default listing is delimited at "/"
delimiter := "/"
if recursive {
if opts.Recursive {
// If recursive we do not delimit.
delimiter = ""
}
@ -264,7 +270,7 @@ func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string
return objectStatCh
}
// Validate incoming object prefix.
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
@ -276,10 +282,10 @@ func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string
go func(objectStatCh chan<- ObjectInfo) {
defer close(objectStatCh)
marker := ""
marker := opts.StartAfter
for {
// Get list of objects a maximum of 1000 per request.
result, err := c.listObjectsQuery(ctx, bucketName, objectPrefix, marker, delimiter, maxKeys)
result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers)
if err != nil {
objectStatCh <- ObjectInfo{
Err: err,
@ -326,12 +332,12 @@ func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string
return objectStatCh
}
func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix string, recursive bool, maxKeys int) <-chan ObjectInfo {
func (c Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
// Allocate new list objects channel.
resultCh := make(chan ObjectInfo, 1)
// Default listing is delimited at "/"
delimiter := "/"
if recursive {
if opts.Recursive {
// If recursive we do not delimit.
delimiter = ""
}
@ -346,7 +352,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin
}
// Validate incoming object prefix.
if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil {
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
defer close(resultCh)
resultCh <- ObjectInfo{
Err: err,
@ -365,7 +371,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin
for {
// Get list of objects a maximum of 1000 per request.
result, err := c.listObjectVersionsQuery(ctx, bucketName, prefix, keyMarker, versionIDMarker, delimiter, maxKeys)
result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers)
if err != nil {
resultCh <- ObjectInfo{
Err: err,
@ -376,15 +382,14 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin
// If contents are available loop through and send over channel.
for _, version := range result.Versions {
info := ObjectInfo{
ETag: trimEtag(version.ETag),
Key: version.Key,
LastModified: version.LastModified,
Size: version.Size,
Owner: version.Owner,
StorageClass: version.StorageClass,
IsLatest: version.IsLatest,
VersionID: version.VersionID,
ETag: trimEtag(version.ETag),
Key: version.Key,
LastModified: version.LastModified,
Size: version.Size,
Owner: version.Owner,
StorageClass: version.StorageClass,
IsLatest: version.IsLatest,
VersionID: version.VersionID,
IsDeleteMarker: version.isDeleteMarker,
}
select {
@ -438,7 +443,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int) (ListVersionsResult, error) {
func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int, headers http.Header) (ListVersionsResult, error) {
// Validate bucket name.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListVersionsResult{}, err
@ -483,6 +488,7 @@ func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix,
bucketName: bucketName,
queryValues: urlValues,
contentSHA256Hex: emptySHA256Hex,
customHeader: headers,
})
defer closeResponse(resp)
if err != nil {
@ -534,7 +540,7 @@ func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix,
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) {
// Validate bucket name.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListBucketResult{}, err
@ -571,6 +577,7 @@ func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix,
bucketName: bucketName,
queryValues: urlValues,
contentSHA256Hex: emptySHA256Hex,
customHeader: headers,
})
defer closeResponse(resp)
if err != nil {
@ -626,9 +633,25 @@ type ListObjectsOptions struct {
// batch, advanced use-case not useful for most
// applications
MaxKeys int
// StartAfter start listing lexically at this
// object onwards, this value can also be set
// for Marker when `UseV1` is set to true.
StartAfter string
// Use the deprecated list objects V1 API
UseV1 bool
headers http.Header
}
// Set adds a key value pair to the options. The
// key-value pair will be part of the HTTP GET request
// headers.
func (o *ListObjectsOptions) Set(key, value string) {
if o.headers == nil {
o.headers = make(http.Header)
}
o.headers.Set(key, value)
}
// ListObjects returns objects list after evaluating the passed options.
@ -640,22 +663,22 @@ type ListObjectsOptions struct {
//
func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
if opts.WithVersions {
return c.listObjectVersions(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys)
return c.listObjectVersions(ctx, bucketName, opts)
}
// Use legacy list objects v1 API
if opts.UseV1 {
return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys)
return c.listObjects(ctx, bucketName, opts)
}
// Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1.
if location, ok := c.bucketLocCache.Get(bucketName); ok {
if location == "snowball" {
return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys)
return c.listObjects(ctx, bucketName, opts)
}
}
return c.listObjectsV2(ctx, bucketName, opts.Prefix, opts.Recursive, opts.WithMetadata, opts.MaxKeys)
return c.listObjectsV2(ctx, bucketName, opts)
}
// ListIncompleteUploads - List incompletely uploaded multipart objects.

View File

@ -176,7 +176,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
if err != nil {
return UploadInfo{}, err
}
@ -309,7 +309,7 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
complete completeMultipartUpload) (UploadInfo, error) {
complete completeMultipartUpload, opts PutObjectOptions) (UploadInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err
@ -336,6 +336,7 @@ func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectN
contentBody: completeMultipartUploadBuffer,
contentLength: int64(len(completeMultipartUploadBytes)),
contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
customHeader: opts.Header(),
}
// Execute POST to complete multipart upload for an objectName.

View File

@ -231,7 +231,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
if err != nil {
return UploadInfo{}, err
}
@ -358,7 +358,7 @@ func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bu
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
if err != nil {
return UploadInfo{}, err
}

View File

@ -60,6 +60,9 @@ type AdvancedPutOptions struct {
ReplicationStatus ReplicationStatus
SourceMTime time.Time
ReplicationRequest bool
RetentionTimestamp time.Time
TaggingTimestamp time.Time
LegalholdTimestamp time.Time
}
// PutObjectOptions represents options specified by user for PutObject call
@ -156,6 +159,16 @@ func (opts PutObjectOptions) Header() (header http.Header) {
if opts.Internal.ReplicationRequest {
header.Set(minIOBucketReplicationRequest, "")
}
if !opts.Internal.LegalholdTimestamp.IsZero() {
header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
}
if !opts.Internal.RetentionTimestamp.IsZero() {
header.Set(minIOBucketReplicationObjectRetentionTimestamp, opts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
}
if !opts.Internal.TaggingTimestamp.IsZero() {
header.Set(minIOBucketReplicationTaggingTimestamp, opts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
}
if len(opts.UserTags) != 0 {
header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags))
}
@ -360,7 +373,7 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
if err != nil {
return UploadInfo{}, err
}

View File

@ -29,6 +29,50 @@ import (
"github.com/minio/minio-go/v7/pkg/s3utils"
)
// BucketOptions special headers to purge buckets, only
// useful when endpoint is MinIO
type BucketOptions struct {
ForceDelete bool
}
// RemoveBucketWithOptions deletes the bucket name.
//
// All objects (including all object versions and delete markers)
// in the bucket will be deleted forcibly if bucket options set
// ForceDelete to 'true'.
func (c Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts BucketOptions) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Build headers.
headers := make(http.Header)
if opts.ForceDelete {
headers.Set(minIOForceDelete, "true")
}
// Execute DELETE on bucket.
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
bucketName: bucketName,
contentSHA256Hex: emptySHA256Hex,
customHeader: headers,
})
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp, bucketName, "")
}
}
// Remove the location from cache on a successful delete.
c.bucketLocCache.Delete(bucketName)
return nil
}
// RemoveBucket deletes the bucket name.
//
// All objects (including all object versions and delete markers).
@ -69,6 +113,7 @@ type AdvancedRemoveOptions struct {
// RemoveObjectOptions represents options specified by user for RemoveObject call
type RemoveObjectOptions struct {
ForceDelete bool
GovernanceBypass bool
VersionID string
Internal AdvancedRemoveOptions
@ -116,6 +161,9 @@ func (c Client) removeObject(ctx context.Context, bucketName, objectName string,
if opts.Internal.ReplicationRequest {
headers.Set(minIOBucketReplicationRequest, "")
}
if opts.ForceDelete {
headers.Set(minIOForceDelete, "true")
}
// Execute DELETE on objectName.
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
bucketName: bucketName,

182
vendor/github.com/minio/minio-go/v7/api-restore.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2018-2021 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"context"
"encoding/xml"
"net/http"
"net/url"
"github.com/minio/minio-go/v7/pkg/s3utils"
"github.com/minio/minio-go/v7/pkg/tags"
)
// RestoreType represents the restore request type
type RestoreType string
const (
// RestoreSelect represents the restore SELECT operation
RestoreSelect = RestoreType("SELECT")
)
// TierType represents a retrieval tier
type TierType string
const (
// TierStandard is the standard retrieval tier
TierStandard = TierType("Standard")
// TierBulk is the bulk retrieval tier
TierBulk = TierType("Bulk")
// TierExpedited is the expedited retrieval tier
TierExpedited = TierType("Expedited")
)
// GlacierJobParameters represents the retrieval tier parameter
type GlacierJobParameters struct {
Tier TierType
}
// Encryption contains the type of server-side encryption used during object retrieval
type Encryption struct {
EncryptionType string
KMSContext string
KMSKeyID string `xml:"KMSKeyId"`
}
// MetadataEntry represents a metadata information of the restored object.
type MetadataEntry struct {
Name string
Value string
}
// S3 holds properties of the copy of the archived object
type S3 struct {
AccessControlList *AccessControlList `xml:"AccessControlList,omiempty"`
BucketName string
Prefix string
CannedACL *string `xml:"CannedACL,omitempty"`
Encryption *Encryption `xml:"Encryption,omitempty"`
StorageClass *string `xml:"StorageClass,omitempty"`
Tagging *tags.Tags `xml:"Tagging,omitempty"`
UserMetadata *MetadataEntry `xml:"UserMetadata,omitempty"`
}
// SelectParameters holds the select request parameters
type SelectParameters struct {
XMLName xml.Name `xml:"SelectParameters"`
ExpressionType QueryExpressionType
Expression string
InputSerialization SelectObjectInputSerialization
OutputSerialization SelectObjectOutputSerialization
}
// OutputLocation holds properties of the copy of the archived object
type OutputLocation struct {
XMLName xml.Name `xml:"OutputLocation"`
S3 S3 `xml:"S3"`
}
// RestoreRequest holds properties of the restore object request
type RestoreRequest struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest"`
Type *RestoreType `xml:"Type,omitempty"`
Tier *TierType `xml:"Tier,omitempty"`
Days *int `xml:"Days,omitempty"`
GlacierJobParameters *GlacierJobParameters `xml:"GlacierJobParameters,omitempty"`
Description *string `xml:"Description,omitempty"`
SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"`
OutputLocation *OutputLocation `xml:"OutputLocation,omitempty"`
}
// SetDays sets the days parameter of the restore request
func (r *RestoreRequest) SetDays(v int) {
r.Days = &v
}
// SetDays sets the GlacierJobParameters of the restore request
func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) {
r.GlacierJobParameters = &v
}
// SetType sets the type of the restore request
func (r *RestoreRequest) SetType(v RestoreType) {
r.Type = &v
}
// SetTier sets the retrieval tier of the restore request
func (r *RestoreRequest) SetTier(v TierType) {
r.Tier = &v
}
// SetDescription sets the description of the restore request
func (r *RestoreRequest) SetDescription(v string) {
r.Description = &v
}
// SetSelectParameters sets SelectParameters of the restore select request
func (r *RestoreRequest) SetSelectParameters(v SelectParameters) {
r.SelectParameters = &v
}
// SetOutputLocation sets the properties of the copy of the archived object
func (r *RestoreRequest) SetOutputLocation(v OutputLocation) {
r.OutputLocation = &v
}
// RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API
func (c Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
restoreRequestBytes, err := xml.Marshal(req)
if err != nil {
return err
}
urlValues := make(url.Values)
urlValues.Set("restore", "")
if versionID != "" {
urlValues.Set("versionId", versionID)
}
// Execute POST on bucket/object.
resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentMD5Base64: sumMD5Base64(restoreRequestBytes),
contentSHA256Hex: sum256Hex(restoreRequestBytes),
contentBody: bytes.NewReader(restoreRequestBytes),
contentLength: int64(len(restoreRequestBytes)),
})
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusAccepted {
return httpRespToErrorResponse(resp, bucketName, "")
}
return nil
}

View File

@ -54,6 +54,13 @@ const (
SelectCompressionNONE SelectCompressionType = "NONE"
SelectCompressionGZIP = "GZIP"
SelectCompressionBZIP = "BZIP2"
// Non-standard compression schemes, supported by MinIO hosts:
SelectCompressionZSTD = "ZSTD" // Zstandard compression.
SelectCompressionLZ4 = "LZ4" // LZ4 Stream
SelectCompressionS2 = "S2" // S2 Stream
SelectCompressionSNAPPY = "SNAPPY" // Snappy stream
)
// CSVQuoteFields - is the parameter for how CSV fields are quoted.
@ -330,10 +337,10 @@ func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) er
// SelectObjectInputSerialization - input serialization parameters
type SelectObjectInputSerialization struct {
CompressionType SelectCompressionType
Parquet *ParquetInputOptions `xml:"Parquet,omitempty"`
CSV *CSVInputOptions `xml:"CSV,omitempty"`
JSON *JSONInputOptions `xml:"JSON,omitempty"`
CompressionType SelectCompressionType `xml:"CompressionType,omitempty"`
Parquet *ParquetInputOptions `xml:"Parquet,omitempty"`
CSV *CSVInputOptions `xml:"CSV,omitempty"`
JSON *JSONInputOptions `xml:"JSON,omitempty"`
}
// SelectObjectOutputSerialization - output serialization parameters.

View File

@ -99,11 +99,11 @@ func (c Client) statObject(ctx context.Context, bucketName, objectName string, o
if err != nil {
return ObjectInfo{}, err
}
deleteMarker := resp.Header.Get(amzDeleteMarker) == "true"
if resp != nil {
deleteMarker := resp.Header.Get(amzDeleteMarker) == "true"
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
if resp.StatusCode == http.StatusBadRequest && opts.VersionID != "" && deleteMarker {
if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker {
errResp := ErrorResponse{
StatusCode: resp.StatusCode,
Code: "MethodNotAllowed",

View File

@ -34,6 +34,7 @@ import (
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
md5simd "github.com/minio/md5-simd"
@ -90,6 +91,10 @@ type Client struct {
// Factory for MD5 hash functions.
md5Hasher func() md5simd.Hasher
sha256Hasher func() md5simd.Hasher
healthCheckCh chan struct{}
healthCheck int32
lastOnline time.Time
}
// Options for New method
@ -108,7 +113,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.11"
libraryVersion = "v7.0.14"
)
// User Agent should always following the below style.
@ -305,6 +310,10 @@ func privateNew(endpoint string, opts *Options) (*Client, error) {
// Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
// by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
clnt.lookup = opts.BucketLookup
// healthcheck is not initialized
clnt.healthCheck = unknown
// Return.
return clnt, nil
}
@ -387,6 +396,72 @@ func (c *Client) hashMaterials(isMd5Requested bool) (hashAlgos map[string]md5sim
return hashAlgos, hashSums
}
const (
unknown = -1
offline = 0
online = 1
)
// IsOnline returns true if healthcheck enabled and client is online
func (c *Client) IsOnline() bool {
switch atomic.LoadInt32(&c.healthCheck) {
case online, unknown:
return true
}
return false
}
// IsOffline returns true if healthcheck enabled and client is offline
func (c *Client) IsOffline() bool {
return !c.IsOnline()
}
// HealthCheck starts a healthcheck to see if endpoint is up. Returns a context cancellation function
// and and error if health check is already started
func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) {
if atomic.LoadInt32(&c.healthCheck) == online {
return nil, fmt.Errorf("health check running already")
}
if hcDuration < 1*time.Second {
return nil, fmt.Errorf("health check duration should be atleast 1 second")
}
ctx, cancelFn := context.WithCancel(context.Background())
c.healthCheckCh = make(chan struct{})
atomic.StoreInt32(&c.healthCheck, online)
probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-")
go func(duration time.Duration) {
timer := time.NewTimer(duration)
defer timer.Stop()
for {
select {
case <-ctx.Done():
close(c.healthCheckCh)
atomic.StoreInt32(&c.healthCheck, unknown)
return
case <-timer.C:
timer.Reset(duration)
// Do health check the first time and ONLY if the connection is marked offline
if c.IsOffline() || c.lastOnline.IsZero() {
_, err := c.getBucketLocation(context.Background(), probeBucketName)
if err != nil && IsNetworkOrHostDown(err, false) {
atomic.StoreInt32(&c.healthCheck, offline)
}
switch ToErrorResponse(err).Code {
case "NoSuchBucket", "AccessDenied", "":
c.lastOnline = time.Now()
atomic.StoreInt32(&c.healthCheck, online)
}
}
case <-c.healthCheckCh:
// set offline if client saw a network error
atomic.StoreInt32(&c.healthCheck, offline)
}
}
}(hcDuration)
return cancelFn, nil
}
// requestMetadata - is container for all the values to make a request.
type requestMetadata struct {
// If set newRequest presigns the URL.
@ -565,12 +640,25 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque
if isS3CodeRetryable(errResponse.Code) {
continue // Retry.
}
if atomic.LoadInt32(&c.healthCheck) != unknown && IsNetworkOrHostDown(err, false) {
select {
case c.healthCheckCh <- struct{}{}:
default:
}
}
return nil, err
}
// Initiate the request.
res, err = c.do(req)
if err != nil {
if atomic.LoadInt32(&c.healthCheck) != unknown && IsNetworkOrHostDown(err, false) {
select {
case c.healthCheckCh <- struct{}{}:
default:
}
}
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return nil, err
}

View File

@ -69,6 +69,7 @@ const (
amzVersionID = "X-Amz-Version-Id"
amzTaggingCount = "X-Amz-Tagging-Count"
amzExpiration = "X-Amz-Expiration"
amzRestore = "X-Amz-Restore"
amzReplicationStatus = "X-Amz-Replication-Status"
amzDeleteMarker = "X-Amz-Delete-Marker"
@ -89,4 +90,12 @@ const (
minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker"
minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request"
minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request"
// Header indicates last tag update time on source
minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp"
// Header indicates last retention update time on source
minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp"
// Header indicates last legalhold update time on source
minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp"
minIOForceDelete = "x-minio-force-delete"
)

View File

@ -46,13 +46,13 @@ func NewCore(endpoint string, opts *Options) (*Core, error) {
// ListObjects - List all the objects at a prefix, optionally with marker and delimiter
// you can further filter the results.
func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {
return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys)
return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil)
}
// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
// continuationToken instead of marker to support iteration over the results.
func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, fetchOwner, false, delimiter, maxkeys)
func (c Core) ListObjectsV2(bucketName, objectPrefix, startAfter, continuationToken, delimiter string, maxkeys int) (ListBucketV2Result, error) {
return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, true, false, delimiter, startAfter, maxkeys, nil)
}
// CopyObject - copies an object from source object to destination object on server side.
@ -97,10 +97,10 @@ func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID stri
}
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart) (string, error) {
func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (string, error) {
res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{
Parts: parts,
})
}, opts)
return res.ETag, err
}

View File

@ -38,6 +38,7 @@ import (
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/dustin/go-humanize"
@ -1054,6 +1055,153 @@ func testGetObjectWithVersioning() {
successLogger(testName, function, args, startTime).Info()
}
func testPutObjectWithVersioning() {
// initialize logging params
startTime := time.Now()
testName := getFuncName()
function := "GetObject()"
args := map[string]interface{}{}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Instantiate new minio client object.
c, err := minio.New(os.Getenv(serverEndpoint),
&minio.Options{
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
Secure: mustParseBool(os.Getenv(enableHTTPS)),
})
if err != nil {
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
return
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
if err != nil {
logError(testName, function, args, startTime, "", "Make bucket failed", err)
return
}
err = c.EnableVersioning(context.Background(), bucketName)
if err != nil {
logError(testName, function, args, startTime, "", "Enable versioning failed", err)
return
}
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
const n = 10
// Read input...
// Save the data concurrently.
var wg sync.WaitGroup
wg.Add(n)
var buffers = make([][]byte, n)
var errs [n]error
for i := 0; i < n; i++ {
r := newRandomReader(int64((1<<20)*i+i), int64(i))
buf, err := ioutil.ReadAll(r)
if err != nil {
logError(testName, function, args, startTime, "", "unexpected failure", err)
return
}
buffers[i] = buf
go func(i int) {
defer wg.Done()
_, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20})
}(i)
}
wg.Wait()
for _, err := range errs {
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
}
objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
var results []minio.ObjectInfo
for info := range objectsInfo {
if info.Err != nil {
logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
return
}
results = append(results, info)
}
if len(results) != n {
logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
return
}
sort.Slice(results, func(i, j int) bool {
return results[i].Size < results[j].Size
})
sort.Slice(buffers, func(i, j int) bool {
return len(buffers[i]) < len(buffers[j])
})
for i := 0; i < len(results); i++ {
opts := minio.GetObjectOptions{VersionID: results[i].VersionID}
reader, err := c.GetObject(context.Background(), bucketName, objectName, opts)
if err != nil {
logError(testName, function, args, startTime, "", "error during GET object", err)
return
}
statInfo, err := reader.Stat()
if err != nil {
logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err)
return
}
if statInfo.ETag != results[i].ETag {
logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err)
return
}
if statInfo.LastModified.Unix() != results[i].LastModified.Unix() {
logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err)
return
}
if statInfo.Size != results[i].Size {
logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err)
return
}
tmpBuffer := bytes.NewBuffer([]byte{})
_, err = io.Copy(tmpBuffer, reader)
if err != nil {
logError(testName, function, args, startTime, "", "unexpected io.Copy()", err)
return
}
if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) {
logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err)
return
}
}
// Delete all objects and their versions as long as the bucket itself
if err = cleanupVersionedBucket(bucketName, c); err != nil {
logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
return
}
successLogger(testName, function, args, startTime).Info()
}
func testCopyObjectWithVersioning() {
// initialize logging params
startTime := time.Now()
@ -1191,6 +1339,166 @@ func testCopyObjectWithVersioning() {
successLogger(testName, function, args, startTime).Info()
}
func testConcurrentCopyObjectWithVersioning() {
// initialize logging params
startTime := time.Now()
testName := getFuncName()
function := "CopyObject()"
args := map[string]interface{}{}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Instantiate new minio client object.
c, err := minio.New(os.Getenv(serverEndpoint),
&minio.Options{
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
Secure: mustParseBool(os.Getenv(enableHTTPS)),
})
if err != nil {
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
return
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
if err != nil {
logError(testName, function, args, startTime, "", "Make bucket failed", err)
return
}
err = c.EnableVersioning(context.Background(), bucketName)
if err != nil {
logError(testName, function, args, startTime, "", "Enable versioning failed", err)
return
}
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
var testFiles = []string{"datafile-10-kB"}
for _, testFile := range testFiles {
r := getDataReader(testFile)
buf, err := ioutil.ReadAll(r)
if err != nil {
logError(testName, function, args, startTime, "", "unexpected failure", err)
return
}
r.Close()
_, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
}
objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
var infos []minio.ObjectInfo
for info := range objectsInfo {
if info.Err != nil {
logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
return
}
infos = append(infos, info)
}
sort.Slice(infos, func(i, j int) bool {
return infos[i].Size < infos[j].Size
})
reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID})
if err != nil {
logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err)
return
}
oldestContent, err := ioutil.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
return
}
// Copy Source
srcOpts := minio.CopySrcOptions{
Bucket: bucketName,
Object: objectName,
VersionID: infos[0].VersionID,
}
args["src"] = srcOpts
dstOpts := minio.CopyDestOptions{
Bucket: bucketName,
Object: objectName + "-copy",
}
args["dst"] = dstOpts
// Perform the Copy concurrently
const n = 10
var wg sync.WaitGroup
wg.Add(n)
var errs [n]error
for i := 0; i < n; i++ {
go func(i int) {
defer wg.Done()
_, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts)
}(i)
}
wg.Wait()
for _, err := range errs {
if err != nil {
logError(testName, function, args, startTime, "", "CopyObject failed", err)
return
}
}
objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object})
infos = []minio.ObjectInfo{}
for info := range objectsInfo {
// Destination object
readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID})
if err != nil {
logError(testName, function, args, startTime, "", "GetObject failed", err)
return
}
defer readerCopy.Close()
newestContent, err := ioutil.ReadAll(readerCopy)
if err != nil {
logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
return
}
if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) {
logError(testName, function, args, startTime, "", "Unexpected destination object content", err)
return
}
infos = append(infos, info)
}
if len(infos) != n {
logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
return
}
// Delete all objects and their versions as long as the bucket itself
if err = cleanupVersionedBucket(bucketName, c); err != nil {
logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
return
}
successLogger(testName, function, args, startTime).Info()
}
func testComposeObjectWithVersioning() {
// initialize logging params
startTime := time.Now()
@ -7548,7 +7856,7 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() {
completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag})
// Complete the multipart upload
_, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts)
_, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
return
@ -7606,7 +7914,7 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() {
}
// Complete the multipart upload
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
return
@ -7783,7 +8091,7 @@ func testSSECEncryptedToSSECCopyObjectPart() {
}
// Complete the multipart upload
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
return
@ -7959,7 +8267,7 @@ func testSSECEncryptedToUnencryptedCopyPart() {
}
// Complete the multipart upload
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
return
@ -8138,7 +8446,7 @@ func testSSECEncryptedToSSES3CopyObjectPart() {
}
// Complete the multipart upload
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
return
@ -8312,7 +8620,7 @@ func testUnencryptedToSSECCopyObjectPart() {
}
// Complete the multipart upload
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
return
@ -8482,7 +8790,7 @@ func testUnencryptedToUnencryptedCopyPart() {
}
// Complete the multipart upload
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
return
@ -8654,7 +8962,7 @@ func testUnencryptedToSSES3CopyObjectPart() {
}
// Complete the multipart upload
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
return
@ -8829,7 +9137,7 @@ func testSSES3EncryptedToSSECCopyObjectPart() {
}
// Complete the multipart upload
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
return
@ -9000,7 +9308,7 @@ func testSSES3EncryptedToUnencryptedCopyPart() {
}
// Complete the multipart upload
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
return
@ -9174,7 +9482,7 @@ func testSSES3EncryptedToSSES3CopyObjectPart() {
}
// Complete the multipart upload
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
return
@ -11285,22 +11593,20 @@ func testRemoveObjects() {
var reader = getDataReader("datafile-129-MB")
defer reader.Close()
n, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
_, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
if err != nil {
log.Fatalln(err)
logError(testName, function, args, startTime, "", "Error uploading object", err)
}
log.Println("Uploaded", objectName, " of size: ", n, "to bucket: ", bucketName, "Successfully.")
// Replace with smaller...
bufSize = dataFileMap["datafile-10-kB"]
reader = getDataReader("datafile-10-kB")
defer reader.Close()
n, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
_, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
if err != nil {
log.Fatalln(err)
logError(testName, function, args, startTime, "", "Error uploading object", err)
}
log.Println("Uploaded", objectName, " of size: ", n, "to bucket: ", bucketName, "Successfully.")
t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC)
m := minio.RetentionMode(minio.Governance)
@ -11416,6 +11722,7 @@ func main() {
testFPutObjectContextV2()
testFGetObjectContextV2()
testPutObjectContextV2()
testPutObjectWithVersioning()
testMakeBucketError()
testMakeBucketRegions()
testPutObjectWithMetadata()
@ -11453,6 +11760,7 @@ func main() {
testStatObjectWithVersioning()
testGetObjectWithVersioning()
testCopyObjectWithVersioning()
testConcurrentCopyObjectWithVersioning()
testComposeObjectWithVersioning()
testRemoveObjectWithVersioning()
testRemoveObjectsWithVersioning()

View File

@ -22,8 +22,13 @@ import (
"time"
)
// STSVersion sts version string
const STSVersion = "2011-06-15"
const (
// STSVersion sts version string
STSVersion = "2011-06-15"
// How much duration to slash from the given expiration duration
defaultExpiryWindow = 0.8
)
// A Value is the AWS credentials value for individual credential fields.
type Value struct {
@ -82,10 +87,15 @@ type Expiry struct {
// the expiration time given to ensure no requests are made with expired
// tokens.
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
e.expiration = expiration
if window > 0 {
e.expiration = e.expiration.Add(-window)
if e.CurrentTime == nil {
e.CurrentTime = time.Now
}
cut := window
if cut < 0 {
expireIn := expiration.Sub(e.CurrentTime())
cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow))
}
e.expiration = expiration.Add(-cut)
}
// IsExpired returns if the credentials are expired.

View File

@ -38,7 +38,10 @@ import (
// prior to the credentials actually expiring. This is beneficial
// so race conditions with expiring credentials do not cause
// request to fail unexpectedly due to ExpiredTokenException exceptions.
const DefaultExpiryWindow = time.Second * 10 // 10 secs
// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration.
// When used the tokens refresh will be triggered when 80% of the elapsed
// time until the actual expiration time is passed.
const DefaultExpiryWindow = -1
// A IAM retrieves credentials from the EC2 service, and keeps track if
// those credentials are expired.
@ -181,10 +184,6 @@ type ec2RoleCredRespBody struct {
// be sent to fetch the rolling access credentials.
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
func getIAMRoleURL(endpoint string) (*url.URL, error) {
if endpoint == "" {
endpoint = defaultIAMRoleEndpoint
}
u, err := url.Parse(endpoint)
if err != nil {
return nil, err
@ -281,6 +280,10 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
// If the credentials cannot be found, or there is an error
// reading the response an error will be returned.
func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
if endpoint == "" {
endpoint = defaultIAMRoleEndpoint
}
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
token, _ := fetchIMDSToken(client, endpoint)

View File

@ -1,6 +1,6 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
* Copyright 2019-2021 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -20,6 +20,7 @@ package credentials
import (
"encoding/xml"
"errors"
"fmt"
"net/http"
"net/url"
"time"
@ -60,26 +61,86 @@ type LDAPIdentity struct {
// LDAP username/password used to fetch LDAP STS credentials.
LDAPUsername, LDAPPassword string
// Session policy to apply to the generated credentials. Leave empty to
// use the full access policy available to the user.
Policy string
// RequestedExpiry is the configured expiry duration for credentials
// requested from LDAP.
RequestedExpiry time.Duration
}
// NewLDAPIdentity returns new credentials object that uses LDAP
// Identity.
func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string) (*Credentials, error) {
func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) {
l := LDAPIdentity{
Client: &http.Client{Transport: http.DefaultTransport},
STSEndpoint: stsEndpoint,
LDAPUsername: ldapUsername,
LDAPPassword: ldapPassword,
}
for _, optFunc := range optFuncs {
optFunc(&l)
}
return New(&l), nil
}
// LDAPIdentityOpt is a function type used to configured the LDAPIdentity
// instance.
type LDAPIdentityOpt func(*LDAPIdentity)
// LDAPIdentityPolicyOpt sets the session policy for requested credentials.
func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt {
return func(k *LDAPIdentity) {
k.Policy = policy
}
}
// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials.
func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt {
return func(k *LDAPIdentity) {
k.RequestedExpiry = d
}
}
func stripPassword(err error) error {
urlErr, ok := err.(*url.Error)
if ok {
u, _ := url.Parse(urlErr.URL)
if u == nil {
return urlErr
}
values := u.Query()
values.Set("LDAPPassword", "xxxxx")
u.RawQuery = values.Encode()
urlErr.URL = u.String()
return urlErr
}
return err
}
// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses
// LDAP Identity with a specified session policy. The `policy` parameter must be
// a JSON string specifying the policy document.
//
// DEPRECATED: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead.
func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) {
return New(&LDAPIdentity{
Client: &http.Client{Transport: http.DefaultTransport},
STSEndpoint: stsEndpoint,
LDAPUsername: ldapUsername,
LDAPPassword: ldapPassword,
Policy: policy,
}), nil
}
// Retrieve gets the credential by calling the MinIO STS API for
// LDAP on the configured stsEndpoint.
func (k *LDAPIdentity) Retrieve() (value Value, err error) {
u, kerr := url.Parse(k.STSEndpoint)
if kerr != nil {
err = kerr
return
u, err := url.Parse(k.STSEndpoint)
if err != nil {
return value, err
}
v := url.Values{}
@ -87,25 +148,28 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
v.Set("Version", STSVersion)
v.Set("LDAPUsername", k.LDAPUsername)
v.Set("LDAPPassword", k.LDAPPassword)
if k.Policy != "" {
v.Set("Policy", k.Policy)
}
if k.RequestedExpiry != 0 {
v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds())))
}
u.RawQuery = v.Encode()
req, kerr := http.NewRequest(http.MethodPost, u.String(), nil)
if kerr != nil {
err = kerr
return
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
if err != nil {
return value, stripPassword(err)
}
resp, kerr := k.Client.Do(req)
if kerr != nil {
err = kerr
return
resp, err := k.Client.Do(req)
if err != nil {
return value, stripPassword(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
err = errors.New(resp.Status)
return
return value, errors.New(resp.Status)
}
r := AssumeRoleWithLDAPResponse{}

View File

@ -54,8 +54,9 @@ type WebIdentityResult struct {
// WebIdentityToken - web identity token with expiry.
type WebIdentityToken struct {
Token string
Expiry int
Token string
AccessToken string
Expiry int
}
// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if
@ -121,6 +122,10 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
v.Set("RoleSessionName", roleSessionName)
}
v.Set("WebIdentityToken", idToken.Token)
if idToken.AccessToken != "" {
// Usually set when server is using extended userInfo endpoint.
v.Set("WebIdentityAccessToken", idToken.AccessToken)
}
if idToken.Expiry > 0 {
v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
}

View File

@ -19,6 +19,7 @@
package lifecycle
import (
"encoding/json"
"encoding/xml"
"time"
)
@ -116,6 +117,26 @@ type Transition struct {
Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
}
// MarshalJSON customizes json encoding by omitting empty values
func (t Transition) MarshalJSON() ([]byte, error) {
type transition struct {
Date *ExpirationDate `json:"Date,omitempty"`
StorageClass string `json:"StorageClass,omitempty"`
Days *ExpirationDays `json:"Days,omitempty"`
}
newt := transition{
StorageClass: t.StorageClass,
}
if !t.IsDaysNull() {
newt.Days = &t.Days
}
if !t.IsDateNull() {
newt.Date = &t.Date
}
return json.Marshal(newt)
}
// IsDaysNull returns true if days field is null
func (t Transition) IsDaysNull() bool {
return t.Days == ExpirationDays(0)
@ -160,6 +181,31 @@ type Filter struct {
Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
}
// IsNull returns true if all Filter fields are empty.
func (f Filter) IsNull() bool {
return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == ""
}
// MarshalJSON customizes json encoding by removing empty values.
func (f Filter) MarshalJSON() ([]byte, error) {
type filter struct {
And *And `json:"And,omitempty"`
Prefix string `json:"Prefix,omitempty"`
Tag *Tag `json:"Tag,omitempty"`
}
newf := filter{
Prefix: f.Prefix,
}
if !f.Tag.IsEmpty() {
newf.Tag = &f.Tag
}
if !f.And.IsEmpty() {
newf.And = &f.And
}
return json.Marshal(newf)
}
// MarshalXML - produces the xml representation of the Filter struct
// only one of Prefix, And and Tag should be present in the output.
func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
@ -238,6 +284,26 @@ type Expiration struct {
DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"`
}
// MarshalJSON customizes json encoding by removing empty day/date specification.
func (e Expiration) MarshalJSON() ([]byte, error) {
type expiration struct {
Date *ExpirationDate `json:"Date,omitempty"`
Days *ExpirationDays `json:"Days,omitempty"`
DeleteMarker ExpireDeleteMarker
}
newexp := expiration{
DeleteMarker: e.DeleteMarker,
}
if !e.IsDaysNull() {
newexp.Days = &e.Days
}
if !e.IsDateNull() {
newexp.Date = &e.Date
}
return json.Marshal(newexp)
}
// IsDaysNull returns true if days field is null
func (e Expiration) IsDaysNull() bool {
return e.Days == ExpirationDays(0)
@ -267,6 +333,47 @@ func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) e
return en.EncodeElement(expirationWrapper(e), startElement)
}
// MarshalJSON customizes json encoding by omitting empty values
func (r Rule) MarshalJSON() ([]byte, error) {
type rule struct {
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"`
Expiration *Expiration `json:"Expiration,omitempty"`
ID string `json:"ID"`
RuleFilter *Filter `json:"Filter,omitempty"`
NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"`
NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"`
Prefix string `json:"Prefix,omitempty"`
Status string `json:"Status"`
Transition *Transition `json:"Transition,omitempty"`
}
newr := rule{
Prefix: r.Prefix,
Status: r.Status,
ID: r.ID,
}
if !r.RuleFilter.IsNull() {
newr.RuleFilter = &r.RuleFilter
}
if !r.AbortIncompleteMultipartUpload.IsDaysNull() {
newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload
}
if !r.Expiration.IsNull() {
newr.Expiration = &r.Expiration
}
if !r.Transition.IsNull() {
newr.Transition = &r.Transition
}
if !r.NoncurrentVersionExpiration.IsDaysNull() {
newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration
}
if !r.NoncurrentVersionTransition.IsDaysNull() {
newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition
}
return json.Marshal(newr)
}
// Rule represents a single rule in lifecycle configuration
type Rule struct {
XMLName xml.Name `xml:"Rule,omitempty" json:"-"`

View File

@ -47,13 +47,13 @@ const (
// Options represents options to set a replication configuration rule
type Options struct {
Op OptionType
RoleArn string
ID string
Prefix string
RuleStatus string
Priority string
TagString string
StorageClass string
RoleArn string
DestBucket string
IsTagSet bool
IsSCSet bool
@ -103,9 +103,17 @@ func (c *Config) AddRule(opts Options) error {
if err != nil {
return err
}
if opts.RoleArn != c.Role && c.Role != "" {
return fmt.Errorf("role ARN does not match existing configuration")
if opts.RoleArn != "" {
tokens := strings.Split(opts.RoleArn, ":")
if len(tokens) != 6 {
return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn)
}
if !strings.HasPrefix(opts.RoleArn, "arn:aws:iam") {
return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn)
}
c.Role = opts.RoleArn
}
var status Status
// toggle rule status for edit option
switch opts.RuleStatus {
@ -139,28 +147,11 @@ func (c *Config) AddRule(opts Options) error {
if opts.ID == "" {
opts.ID = xid.New().String()
}
arnStr := opts.RoleArn
if opts.RoleArn == "" {
arnStr = c.Role
}
if arnStr == "" {
return fmt.Errorf("role ARN required")
}
tokens := strings.Split(arnStr, ":")
if len(tokens) != 6 {
return fmt.Errorf("invalid format for replication Arn")
}
if c.Role == "" {
c.Role = arnStr
}
destBucket := opts.DestBucket
// ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 {
if len(btokens) == 1 {
destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket)
} else {
return fmt.Errorf("destination bucket needs to be in Arn format")
}
return fmt.Errorf("destination bucket needs to be in Arn format")
}
dmStatus := Disabled
if opts.ReplicateDeleteMarkers != "" {
@ -236,13 +227,18 @@ func (c *Config) AddRule(opts Options) error {
if err := newRule.Validate(); err != nil {
return err
}
// if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration
if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") {
for i := range c.Rules {
c.Rules[i].Destination.Bucket = c.Role
}
c.Role = ""
}
for _, rule := range c.Rules {
if rule.Priority == newRule.Priority {
return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
}
if rule.Destination.Bucket != newRule.Destination.Bucket {
return fmt.Errorf("the destination bucket must be same for all rules")
}
if rule.ID == newRule.ID {
return fmt.Errorf("a rule exists with this ID")
}
@ -257,6 +253,14 @@ func (c *Config) EditRule(opts Options) error {
if opts.ID == "" {
return fmt.Errorf("rule ID missing")
}
// if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS.
if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") {
for i := range c.Rules {
c.Rules[i].Destination.Bucket = c.Role
}
c.Role = ""
}
rIdx := -1
var newRule Rule
for i, rule := range c.Rules {
@ -351,7 +355,7 @@ func (c *Config) EditRule(opts Options) error {
return fmt.Errorf("replica metadata sync should be either [enable|disable]")
}
}
fmt.Println("opts.ExistingObjectReplicate>", opts.ExistingObjectReplicate)
if opts.ExistingObjectReplicate != "" {
switch opts.ExistingObjectReplicate {
case "enable":
@ -376,11 +380,7 @@ func (c *Config) EditRule(opts Options) error {
destBucket := opts.DestBucket
// ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 {
if len(btokens) == 1 {
destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket)
} else {
return fmt.Errorf("destination bucket needs to be in Arn format")
}
return fmt.Errorf("destination bucket needs to be in Arn format")
}
newRule.Destination.Bucket = destBucket
}
@ -393,8 +393,8 @@ func (c *Config) EditRule(opts Options) error {
if rule.Priority == newRule.Priority && rIdx != idx {
return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
}
if rule.Destination.Bucket != newRule.Destination.Bucket {
return fmt.Errorf("the destination bucket must be same for all rules")
if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID {
return fmt.Errorf("invalid destination bucket for this rule")
}
}
@ -678,9 +678,9 @@ func (e ExistingObjectReplication) Validate() error {
return nil
}
// Metrics represents inline replication metrics
// such as pending, failed and completed bytes in total for a bucket
type Metrics struct {
// TargetMetrics represents inline replication metrics
// such as pending, failed and completed bytes in total for a bucket remote target
type TargetMetrics struct {
// Pending size in bytes
PendingSize uint64 `json:"pendingReplicationSize"`
// Completed size in bytes
@ -694,3 +694,28 @@ type Metrics struct {
// Total number of failed operations including metadata updates
FailedCount uint64 `json:"failedReplicationCount"`
}
// Metrics represents inline replication metrics for a bucket.
type Metrics struct {
Stats map[string]TargetMetrics
// Total Pending size in bytes across targets
PendingSize uint64 `json:"pendingReplicationSize"`
// Completed size in bytes across targets
ReplicatedSize uint64 `json:"completedReplicationSize"`
// Total Replica size in bytes across targets
ReplicaSize uint64 `json:"replicaSize"`
// Failed size in bytes across targets
FailedSize uint64 `json:"failedReplicationSize"`
// Total number of pending operations including metadata updates across targets
PendingCount uint64 `json:"pendingReplicationCount"`
// Total number of failed operations including metadata updates across targets
FailedCount uint64 `json:"failedReplicationCount"`
}
type ResyncTargetsInfo struct {
Targets []ResyncTarget `json:"target,omitempty"`
}
type ResyncTarget struct {
Arn string `json:"arn"`
ResetID string `json:"resetid"`
}

View File

@ -18,14 +18,17 @@
package minio
import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
@ -58,6 +61,26 @@ func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) {
return time.Time{}, ""
}
var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`)
func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) {
matches := restoreRegex.FindStringSubmatch(restore)
if len(matches) != 4 {
return false, time.Time{}, errors.New("unexpected restore header")
}
ongoing, err = strconv.ParseBool(matches[1])
if err != nil {
return false, time.Time{}, err
}
if matches[3] != "" {
expTime, err = time.Parse(http.TimeFormat, matches[3])
if err != nil {
return false, time.Time{}, err
}
}
return
}
// xmlDecoder provide decoded value in xml.
func xmlDecoder(body io.Reader, v interface{}) error {
d := xml.NewDecoder(body)
@ -294,6 +317,16 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn
}
}
// Nil if not found
var restore *RestoreInfo
if restoreHdr := h.Get(amzRestore); restoreHdr != "" {
ongoing, expTime, err := amzRestoreToStruct(restoreHdr)
if err != nil {
return ObjectInfo{}, err
}
restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime}
}
// extract lifecycle expiry date and rule ID
expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration))
@ -319,6 +352,7 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn
UserMetadata: userMetadata,
UserTags: userTags,
UserTagCount: tagCount,
Restore: restore,
}, nil
}
@ -397,19 +431,20 @@ func getDefaultLocation(u url.URL, regionOverride string) (location string) {
return region
}
var supportedHeaders = []string{
"content-type",
"cache-control",
"content-encoding",
"content-disposition",
"content-language",
"x-amz-website-redirect-location",
"x-amz-object-lock-mode",
"x-amz-metadata-directive",
"x-amz-object-lock-retain-until-date",
"expires",
"x-amz-replication-status",
var supportedHeaders = map[string]bool{
"content-type": true,
"cache-control": true,
"content-encoding": true,
"content-disposition": true,
"content-language": true,
"x-amz-website-redirect-location": true,
"x-amz-object-lock-mode": true,
"x-amz-metadata-directive": true,
"x-amz-object-lock-retain-until-date": true,
"expires": true,
"x-amz-replication-status": true,
// Add more supported headers here.
// Must be lower case.
}
// isStorageClassHeader returns true if the header is a supported storage class header
@ -419,34 +454,24 @@ func isStorageClassHeader(headerKey string) bool {
// isStandardHeader returns true if header is a supported header and not a custom header
func isStandardHeader(headerKey string) bool {
key := strings.ToLower(headerKey)
for _, header := range supportedHeaders {
if strings.ToLower(header) == key {
return true
}
}
return false
return supportedHeaders[strings.ToLower(headerKey)]
}
// sseHeaders is list of server side encryption headers
var sseHeaders = []string{
"x-amz-server-side-encryption",
"x-amz-server-side-encryption-aws-kms-key-id",
"x-amz-server-side-encryption-context",
"x-amz-server-side-encryption-customer-algorithm",
"x-amz-server-side-encryption-customer-key",
"x-amz-server-side-encryption-customer-key-MD5",
var sseHeaders = map[string]bool{
"x-amz-server-side-encryption": true,
"x-amz-server-side-encryption-aws-kms-key-id": true,
"x-amz-server-side-encryption-context": true,
"x-amz-server-side-encryption-customer-algorithm": true,
"x-amz-server-side-encryption-customer-key": true,
"x-amz-server-side-encryption-customer-key-md5": true,
// Add more supported headers here.
// Must be lower case.
}
// isSSEHeader returns true if header is a server side encryption header.
func isSSEHeader(headerKey string) bool {
key := strings.ToLower(headerKey)
for _, h := range sseHeaders {
if strings.ToLower(h) == key {
return true
}
}
return false
return sseHeaders[strings.ToLower(headerKey)]
}
// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
@ -486,3 +511,79 @@ func (m hashWrapper) Close() {
}
m.Hash = nil
}
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
// randString generates random names and prepends them with a known prefix.
func randString(n int, src rand.Source, prefix string) string {
b := make([]byte, n)
// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return prefix + string(b[0:30-len(prefix)])
}
// IsNetworkOrHostDown - if there was a network error or if the host is down.
// expectTimeouts indicates that *context* timeouts are expected and does not
// indicate a downed host. Other timeouts still returns down.
func IsNetworkOrHostDown(err error, expectTimeouts bool) bool {
if err == nil {
return false
}
if errors.Is(err, context.Canceled) {
return false
}
if expectTimeouts && errors.Is(err, context.DeadlineExceeded) {
return false
}
// We need to figure if the error either a timeout
// or a non-temporary error.
urlErr := &url.Error{}
if errors.As(err, &urlErr) {
switch urlErr.Err.(type) {
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
return true
}
}
var e net.Error
if errors.As(err, &e) {
if e.Timeout() {
return true
}
}
// Fallback to other mechanisms.
switch {
case strings.Contains(err.Error(), "Connection closed by foreign host"):
return true
case strings.Contains(err.Error(), "TLS handshake timeout"):
// If error is - tlsHandshakeTimeoutError.
return true
case strings.Contains(err.Error(), "i/o timeout"):
// If error is - tcp timeoutError.
return true
case strings.Contains(err.Error(), "connection timed out"):
// If err is a net.Dial timeout.
return true
case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"):
// Denial errors
return true
}
return false
}