mirror of
https://github.com/cwinfo/matterbridge.git
synced 2024-11-09 23:40:27 +00:00
Update dependencies/vendor (#1659)
This commit is contained in:
parent
658bdd9faa
commit
3893a035be
42
go.mod
42
go.mod
@ -6,30 +6,30 @@ require (
|
|||||||
github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f
|
github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f
|
||||||
github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560
|
github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560
|
||||||
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c
|
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c
|
||||||
github.com/SevereCloud/vksdk/v2 v2.10.0
|
github.com/SevereCloud/vksdk/v2 v2.11.0
|
||||||
github.com/d5/tengo/v2 v2.8.0
|
github.com/d5/tengo/v2 v2.10.0
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/fsnotify/fsnotify v1.5.1
|
github.com/fsnotify/fsnotify v1.5.1
|
||||||
github.com/go-telegram-bot-api/telegram-bot-api v1.0.1-0.20200524105306-7434b0456e81
|
github.com/go-telegram-bot-api/telegram-bot-api v1.0.1-0.20200524105306-7434b0456e81
|
||||||
github.com/gomarkdown/markdown v0.0.0-20210918233619-6c1113f12c4a
|
github.com/gomarkdown/markdown v0.0.0-20211207152620-5d6539fd8bfc
|
||||||
github.com/google/gops v0.3.22
|
github.com/google/gops v0.3.22
|
||||||
github.com/gorilla/schema v1.2.0
|
github.com/gorilla/schema v1.2.0
|
||||||
github.com/gorilla/websocket v1.4.2
|
github.com/gorilla/websocket v1.4.2
|
||||||
github.com/hashicorp/golang-lru v0.5.4
|
github.com/hashicorp/golang-lru v0.5.4
|
||||||
github.com/jpillora/backoff v1.0.0
|
github.com/jpillora/backoff v1.0.0
|
||||||
github.com/keybase/go-keybase-chat-bot v0.0.0-20211004153716-fd2ee4d6be11
|
github.com/keybase/go-keybase-chat-bot v0.0.0-20211201215354-ee4b23828b55
|
||||||
github.com/kyokomi/emoji/v2 v2.2.8
|
github.com/kyokomi/emoji/v2 v2.2.8
|
||||||
github.com/labstack/echo/v4 v4.6.1
|
github.com/labstack/echo/v4 v4.6.1
|
||||||
github.com/lrstanley/girc v0.0.0-20210611213246-771323f1624b
|
github.com/lrstanley/girc v0.0.0-20211023233735-147f0ff77566
|
||||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
|
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
|
||||||
github.com/matterbridge/Rocket.Chat.Go.SDK v0.0.0-20211016222428-79310a412696
|
github.com/matterbridge/Rocket.Chat.Go.SDK v0.0.0-20211016222428-79310a412696
|
||||||
github.com/matterbridge/discordgo v0.21.2-0.20210201201054-fb39a175b4f7
|
github.com/matterbridge/discordgo v0.21.2-0.20210201201054-fb39a175b4f7
|
||||||
github.com/matterbridge/go-xmpp v0.0.0-20210731150933-5702291c239f
|
github.com/matterbridge/go-xmpp v0.0.0-20210731150933-5702291c239f
|
||||||
github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75
|
github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75
|
||||||
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba
|
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba
|
||||||
github.com/matterbridge/matterclient v0.0.0-20211024214211-22e762684b4a
|
github.com/matterbridge/matterclient v0.0.0-20211107234719-faca3cd42315
|
||||||
github.com/mattermost/mattermost-server/v5 v5.39.0
|
github.com/mattermost/mattermost-server/v5 v5.39.0
|
||||||
github.com/mattermost/mattermost-server/v6 v6.0.2
|
github.com/mattermost/mattermost-server/v6 v6.1.0
|
||||||
github.com/mattn/godown v0.0.1
|
github.com/mattn/godown v0.0.1
|
||||||
github.com/missdeer/golib v1.0.4
|
github.com/missdeer/golib v1.0.4
|
||||||
github.com/nelsonken/gomf v0.0.0-20180504123937-a9dd2f9deae9
|
github.com/nelsonken/gomf v0.0.0-20180504123937-a9dd2f9deae9
|
||||||
@ -39,15 +39,15 @@ require (
|
|||||||
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca
|
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca
|
||||||
github.com/shazow/ssh-chat v1.10.1
|
github.com/shazow/ssh-chat v1.10.1
|
||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/slack-go/slack v0.9.5
|
github.com/slack-go/slack v0.10.0
|
||||||
github.com/spf13/viper v1.9.0
|
github.com/spf13/viper v1.9.0
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50
|
github.com/vincent-petithory/dataurl v1.0.0
|
||||||
github.com/writeas/go-strip-markdown v2.0.1+incompatible
|
github.com/writeas/go-strip-markdown v2.0.1+incompatible
|
||||||
github.com/yaegashi/msgraph.go v0.1.4
|
github.com/yaegashi/msgraph.go v0.1.4
|
||||||
github.com/zfjagann/golang-ring v0.0.0-20210116075443-7c86fdb43134
|
github.com/zfjagann/golang-ring v0.0.0-20210116075443-7c86fdb43134
|
||||||
golang.org/x/image v0.0.0-20211028202545-6944b10bf410
|
golang.org/x/image v0.0.0-20211028202545-6944b10bf410
|
||||||
golang.org/x/oauth2 v0.0.0-20211028175245-ba495a64dcb5
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||||
gomod.garykim.dev/nc-talk v0.3.0
|
gomod.garykim.dev/nc-talk v0.3.0
|
||||||
gopkg.in/olahol/melody.v1 v1.0.0-20170518105555-d52139073376
|
gopkg.in/olahol/melody.v1 v1.0.0-20170518105555-d52139073376
|
||||||
layeh.com/gumble v0.0.0-20200818122324-146f9205029b
|
layeh.com/gumble v0.0.0-20200818122324-146f9205029b
|
||||||
@ -65,31 +65,31 @@ require (
|
|||||||
github.com/go-asn1-ber/asn1-ber v1.5.3 // indirect
|
github.com/go-asn1-ber/asn1-ber v1.5.3 // indirect
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/uuid v1.2.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/gopackage/ddp v0.0.3 // indirect
|
github.com/gopackage/ddp v0.0.3 // indirect
|
||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.11 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kettek/apng v0.0.0-20191108220231-414630eed80f // indirect
|
github.com/kettek/apng v0.0.0-20191108220231-414630eed80f // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.0.6 // indirect
|
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||||
github.com/labstack/gommon v0.3.0 // indirect
|
github.com/labstack/gommon v0.3.0 // indirect
|
||||||
github.com/magiconair/properties v1.8.5 // indirect
|
github.com/magiconair/properties v1.8.5 // indirect
|
||||||
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 // indirect
|
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 // indirect
|
||||||
github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d // indirect
|
github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d // indirect
|
||||||
github.com/mattermost/logr v1.0.13 // indirect
|
github.com/mattermost/logr v1.0.13 // indirect
|
||||||
github.com/mattermost/logr/v2 v2.0.10 // indirect
|
github.com/mattermost/logr/v2 v2.0.15 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
github.com/mattn/go-colorable v0.1.11 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
github.com/minio/md5-simd v1.1.2 // indirect
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
github.com/minio/minio-go/v7 v7.0.11 // indirect
|
github.com/minio/minio-go/v7 v7.0.14 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.4.2 // indirect
|
github.com/mitchellh/mapstructure v1.4.2 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/monaco-io/request v1.0.5 // indirect
|
github.com/monaco-io/request v1.0.5 // indirect
|
||||||
github.com/mreiferson/go-httpclient v0.0.0-20201222173833-5e475fde3a4d // indirect
|
github.com/mreiferson/go-httpclient v0.0.0-20201222173833-5e475fde3a4d // indirect
|
||||||
github.com/mrexodia/wray v0.0.0-20160318003008-78a2c1f284ff // indirect
|
github.com/mrexodia/wray v0.0.0-20160318003008-78a2c1f284ff // indirect
|
||||||
@ -116,12 +116,12 @@ require (
|
|||||||
github.com/wiggin77/cfg v1.0.2 // indirect
|
github.com/wiggin77/cfg v1.0.2 // indirect
|
||||||
github.com/wiggin77/merror v1.0.3 // indirect
|
github.com/wiggin77/merror v1.0.3 // indirect
|
||||||
github.com/wiggin77/srslog v1.0.1 // indirect
|
github.com/wiggin77/srslog v1.0.1 // indirect
|
||||||
go.uber.org/atomic v1.8.0 // indirect
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
go.uber.org/multierr v1.7.0 // indirect
|
go.uber.org/multierr v1.7.0 // indirect
|
||||||
go.uber.org/zap v1.17.0 // indirect
|
go.uber.org/zap v1.17.0 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect
|
||||||
golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect
|
golang.org/x/net v0.0.0-20211006190231-62292e806868 // indirect
|
||||||
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0 // indirect
|
golang.org/x/sys v0.0.0-20211006225509-1a26e0398eed // indirect
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect
|
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect
|
||||||
|
2
vendor/github.com/Rhymen/go-whatsapp/README.md
generated
vendored
2
vendor/github.com/Rhymen/go-whatsapp/README.md
generated
vendored
@ -70,7 +70,7 @@ func (myHandler) HandleContactMessage(message whatsapp.ContactMessage) {
|
|||||||
fmt.Println(message)
|
fmt.Println(message)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (myHandler) HandleBatteryMessage(msg whatsapp.BatteryMessage) {
|
func (myHandler) HandleBatteryMessage(message whatsapp.BatteryMessage) {
|
||||||
fmt.Println(message)
|
fmt.Println(message)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/Rhymen/go-whatsapp/session.go
generated
vendored
2
vendor/github.com/Rhymen/go-whatsapp/session.go
generated
vendored
@ -526,5 +526,7 @@ func (wac *Conn) Logout() error {
|
|||||||
return fmt.Errorf("error writing logout: %v\n", err)
|
return fmt.Errorf("error writing logout: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
wac.loggedIn = false
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
7
vendor/github.com/SevereCloud/vksdk/v2/.golangci.yml
generated
vendored
7
vendor/github.com/SevereCloud/vksdk/v2/.golangci.yml
generated
vendored
@ -48,6 +48,11 @@ linters:
|
|||||||
- nilerr
|
- nilerr
|
||||||
- revive
|
- revive
|
||||||
- wastedassign
|
- wastedassign
|
||||||
|
- bidichk
|
||||||
|
- contextcheck
|
||||||
|
- ireturn
|
||||||
|
- nilnil
|
||||||
|
- tenv
|
||||||
|
|
||||||
# - wrapcheck # TODO: v3 Fix
|
# - wrapcheck # TODO: v3 Fix
|
||||||
# - testpackage # TODO: Fix testpackage
|
# - testpackage # TODO: Fix testpackage
|
||||||
@ -75,6 +80,8 @@ linters:
|
|||||||
# - cyclop
|
# - cyclop
|
||||||
# - promlinter
|
# - promlinter
|
||||||
# - tagliatelle
|
# - tagliatelle
|
||||||
|
# - errname
|
||||||
|
# - varnamelen
|
||||||
|
|
||||||
# depricated
|
# depricated
|
||||||
# - maligned
|
# - maligned
|
||||||
|
20
vendor/github.com/SevereCloud/vksdk/v2/.travis.yml
generated
vendored
20
vendor/github.com/SevereCloud/vksdk/v2/.travis.yml
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
---
|
|
||||||
language: go
|
|
||||||
|
|
||||||
cache:
|
|
||||||
directories:
|
|
||||||
- $HOME/.cache/go-build
|
|
||||||
- $HOME/gopath/pkg/mod
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.x
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- git fetch --depth=1 origin +refs/tags/*:refs/tags/*
|
|
||||||
- git describe --tags $(git rev-list --tags --max-count=1) --always
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test -v -race -coverprofile=coverage.txt -covermode=atomic -p=1 ./...
|
|
||||||
|
|
||||||
after_success:
|
|
||||||
- bash <(curl -s https://codecov.io/bash)
|
|
2
vendor/github.com/SevereCloud/vksdk/v2/CONTRIBUTING.md
generated
vendored
2
vendor/github.com/SevereCloud/vksdk/v2/CONTRIBUTING.md
generated
vendored
@ -39,6 +39,7 @@ golangci-lint run
|
|||||||
# CLIENT_SECRET=""
|
# CLIENT_SECRET=""
|
||||||
# USER_TOKEN=""
|
# USER_TOKEN=""
|
||||||
# WIDGET_TOKEN=""
|
# WIDGET_TOKEN=""
|
||||||
|
# MARUSIA_TOKEN=""
|
||||||
# CLIENT_ID="123456"
|
# CLIENT_ID="123456"
|
||||||
# GROUP_ID="123456"
|
# GROUP_ID="123456"
|
||||||
# ACCOUNT_ID="123456"
|
# ACCOUNT_ID="123456"
|
||||||
@ -56,6 +57,7 @@ go test ./...
|
|||||||
"go.testEnvVars": {
|
"go.testEnvVars": {
|
||||||
"SERVICE_TOKEN": "",
|
"SERVICE_TOKEN": "",
|
||||||
"WIDGET_TOKEN": "",
|
"WIDGET_TOKEN": "",
|
||||||
|
"MARUSIA_TOKEN": "",
|
||||||
"GROUP_TOKEN": "",
|
"GROUP_TOKEN": "",
|
||||||
"CLIENT_SECRET": "",
|
"CLIENT_SECRET": "",
|
||||||
"USER_TOKEN": "",
|
"USER_TOKEN": "",
|
||||||
|
1
vendor/github.com/SevereCloud/vksdk/v2/README.md
generated
vendored
1
vendor/github.com/SevereCloud/vksdk/v2/README.md
generated
vendored
@ -1,6 +1,5 @@
|
|||||||
# VK SDK for Golang
|
# VK SDK for Golang
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.com/SevereCloud/vksdk.svg?branch=master)](https://travis-ci.com/SevereCloud/vksdk)
|
|
||||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/SevereCloud/vksdk/v2/v2)](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2?tab=subdirectories)
|
[![PkgGoDev](https://pkg.go.dev/badge/github.com/SevereCloud/vksdk/v2/v2)](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2?tab=subdirectories)
|
||||||
[![VK Developers](https://img.shields.io/badge/developers-%234a76a8.svg?logo=VK&logoColor=white)](https://vk.com/dev/)
|
[![VK Developers](https://img.shields.io/badge/developers-%234a76a8.svg?logo=VK&logoColor=white)](https://vk.com/dev/)
|
||||||
[![codecov](https://codecov.io/gh/SevereCloud/vksdk/branch/master/graph/badge.svg)](https://codecov.io/gh/SevereCloud/vksdk)
|
[![codecov](https://codecov.io/gh/SevereCloud/vksdk/branch/master/graph/badge.svg)](https://codecov.io/gh/SevereCloud/vksdk)
|
||||||
|
48
vendor/github.com/SevereCloud/vksdk/v2/api/errors.go
generated
vendored
48
vendor/github.com/SevereCloud/vksdk/v2/api/errors.go
generated
vendored
@ -159,6 +159,9 @@ const (
|
|||||||
ErrRateLimit ErrorType = 29
|
ErrRateLimit ErrorType = 29
|
||||||
ErrPrivateProfile ErrorType = 30 // This profile is private
|
ErrPrivateProfile ErrorType = 30 // This profile is private
|
||||||
|
|
||||||
|
// Client version deprecated.
|
||||||
|
ErrClientVersionDeprecated ErrorType = 34
|
||||||
|
|
||||||
// Method execution was interrupted due to timeout.
|
// Method execution was interrupted due to timeout.
|
||||||
ErrExecutionTimeout ErrorType = 36
|
ErrExecutionTimeout ErrorType = 36
|
||||||
|
|
||||||
@ -177,6 +180,9 @@ const (
|
|||||||
// Additional signup required.
|
// Additional signup required.
|
||||||
ErrAdditionalSignupRequired ErrorType = 41
|
ErrAdditionalSignupRequired ErrorType = 41
|
||||||
|
|
||||||
|
// IP is not allowed.
|
||||||
|
ErrIPNotAllowed ErrorType = 42
|
||||||
|
|
||||||
// One of the parameters specified was missing or invalid
|
// One of the parameters specified was missing or invalid
|
||||||
//
|
//
|
||||||
// Check the required parameters list and their format on a method
|
// Check the required parameters list and their format on a method
|
||||||
@ -586,6 +592,12 @@ const (
|
|||||||
// Can't send message, reply timed out.
|
// Can't send message, reply timed out.
|
||||||
ErrMessagesReplyTimedOut ErrorType = 950
|
ErrMessagesReplyTimedOut ErrorType = 950
|
||||||
|
|
||||||
|
// You can't access donut chat without subscription.
|
||||||
|
ErrMessagesAccessDonutChat ErrorType = 962
|
||||||
|
|
||||||
|
// This user can't be added to the work chat, as they aren't an employe.
|
||||||
|
ErrMessagesAccessWorkChat ErrorType = 967
|
||||||
|
|
||||||
// Invalid phone number.
|
// Invalid phone number.
|
||||||
ErrParamPhone ErrorType = 1000
|
ErrParamPhone ErrorType = 1000
|
||||||
|
|
||||||
@ -598,6 +610,12 @@ const (
|
|||||||
// Processing.. Try later.
|
// Processing.. Try later.
|
||||||
ErrAuthDelay ErrorType = 1112
|
ErrAuthDelay ErrorType = 1112
|
||||||
|
|
||||||
|
// Anonymous token has expired.
|
||||||
|
ErrAnonymousTokenExpired ErrorType = 1114
|
||||||
|
|
||||||
|
// Anonymous token is invalid.
|
||||||
|
ErrAnonymousTokenInvalid ErrorType = 1116
|
||||||
|
|
||||||
// Invalid document id.
|
// Invalid document id.
|
||||||
ErrParamDocID ErrorType = 1150
|
ErrParamDocID ErrorType = 1150
|
||||||
|
|
||||||
@ -724,6 +742,9 @@ const (
|
|||||||
// Market was already disabled in this group.
|
// Market was already disabled in this group.
|
||||||
ErrMarketAlreadyDisabled ErrorType = 1432
|
ErrMarketAlreadyDisabled ErrorType = 1432
|
||||||
|
|
||||||
|
// Main album can not be hidden.
|
||||||
|
ErrMainAlbumCantHidden ErrorType = 1446
|
||||||
|
|
||||||
// Story has already expired.
|
// Story has already expired.
|
||||||
ErrStoryExpired ErrorType = 1600
|
ErrStoryExpired ErrorType = 1600
|
||||||
|
|
||||||
@ -783,6 +804,33 @@ const (
|
|||||||
|
|
||||||
// Can't set AliExpress tag to this type of object.
|
// Can't set AliExpress tag to this type of object.
|
||||||
ErrAliExpressTag ErrorType = 3800
|
ErrAliExpressTag ErrorType = 3800
|
||||||
|
|
||||||
|
// Invalid upload response.
|
||||||
|
ErrInvalidUploadResponse ErrorType = 5701
|
||||||
|
|
||||||
|
// Invalid upload hash.
|
||||||
|
ErrInvalidUploadHash ErrorType = 5702
|
||||||
|
|
||||||
|
// Invalid upload user.
|
||||||
|
ErrInvalidUploadUser ErrorType = 5703
|
||||||
|
|
||||||
|
// Invalid upload group.
|
||||||
|
ErrInvalidUploadGroup ErrorType = 5704
|
||||||
|
|
||||||
|
// Invalid crop data.
|
||||||
|
ErrInvalidCropData ErrorType = 5705
|
||||||
|
|
||||||
|
// To small avatar.
|
||||||
|
ErrToSmallAvatar ErrorType = 5706
|
||||||
|
|
||||||
|
// Photo not found.
|
||||||
|
ErrPhotoNotFound ErrorType = 5708
|
||||||
|
|
||||||
|
// Invalid Photo.
|
||||||
|
ErrInvalidPhoto ErrorType = 5709
|
||||||
|
|
||||||
|
// Invalid hash.
|
||||||
|
ErrInvalidHash ErrorType = 5710
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrorSubtype is the subtype of an error.
|
// ErrorSubtype is the subtype of an error.
|
||||||
|
3
vendor/github.com/SevereCloud/vksdk/v2/api/execute.go
generated
vendored
3
vendor/github.com/SevereCloud/vksdk/v2/api/execute.go
generated
vendored
@ -22,6 +22,9 @@ func (vk *VK) ExecuteWithArgs(code string, params Params, obj interface{}) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
resp, err := vk.Handler("execute", params, reqParams)
|
resp, err := vk.Handler("execute", params, reqParams)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
jsonErr := json.Unmarshal(resp.Response, &obj)
|
jsonErr := json.Unmarshal(resp.Response, &obj)
|
||||||
if jsonErr != nil {
|
if jsonErr != nil {
|
||||||
|
16
vendor/github.com/SevereCloud/vksdk/v2/api/market.go
generated
vendored
16
vendor/github.com/SevereCloud/vksdk/v2/api/market.go
generated
vendored
@ -318,3 +318,19 @@ func (vk *VK) MarketSearch(params Params) (response MarketSearchResponse, err er
|
|||||||
err = vk.RequestUnmarshal("market.search", &response, params)
|
err = vk.RequestUnmarshal("market.search", &response, params)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarketSearchItemsResponse struct.
|
||||||
|
type MarketSearchItemsResponse struct {
|
||||||
|
Count int `json:"count"`
|
||||||
|
ViewType int `json:"view_type"`
|
||||||
|
Items []object.MarketMarketItem `json:"items"`
|
||||||
|
Groups []object.GroupsGroup `json:"groups,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarketSearchItems method.
|
||||||
|
//
|
||||||
|
// https://vk.com/dev/market.searchItems
|
||||||
|
func (vk *VK) MarketSearchItems(params Params) (response MarketSearchItemsResponse, err error) {
|
||||||
|
err = vk.RequestUnmarshal("market.searchItems", &response, params)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
103
vendor/github.com/SevereCloud/vksdk/v2/api/marusia.go
generated
vendored
Normal file
103
vendor/github.com/SevereCloud/vksdk/v2/api/marusia.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
package api // import "github.com/SevereCloud/vksdk/v2/api"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/SevereCloud/vksdk/v2/object"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarusiaGetPictureUploadLinkResponse struct.
|
||||||
|
type MarusiaGetPictureUploadLinkResponse struct {
|
||||||
|
PictureUploadLink string `json:"picture_upload_link"` // Link
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaGetPictureUploadLink method.
|
||||||
|
//
|
||||||
|
// https://vk.com/dev/marusia_skill_docs10
|
||||||
|
func (vk *VK) MarusiaGetPictureUploadLink(params Params) (response MarusiaGetPictureUploadLinkResponse, err error) {
|
||||||
|
err = vk.RequestUnmarshal("marusia.getPictureUploadLink", &response, params)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaSavePictureResponse struct.
|
||||||
|
type MarusiaSavePictureResponse struct {
|
||||||
|
AppID int `json:"app_id"`
|
||||||
|
PhotoID int `json:"photo_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaSavePicture method.
|
||||||
|
//
|
||||||
|
// https://vk.com/dev/marusia_skill_docs10
|
||||||
|
func (vk *VK) MarusiaSavePicture(params Params) (response MarusiaSavePictureResponse, err error) {
|
||||||
|
err = vk.RequestUnmarshal("marusia.savePicture", &response, params)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaGetPicturesResponse struct.
|
||||||
|
type MarusiaGetPicturesResponse struct {
|
||||||
|
Count int `json:"count"`
|
||||||
|
Items []object.MarusiaPicture `json:"items"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaGetPictures method.
|
||||||
|
//
|
||||||
|
// https://vk.com/dev/marusia_skill_docs10
|
||||||
|
func (vk *VK) MarusiaGetPictures(params Params) (response MarusiaGetPicturesResponse, err error) {
|
||||||
|
err = vk.RequestUnmarshal("marusia.getPictures", &response, params)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaDeletePicture delete picture.
|
||||||
|
//
|
||||||
|
// https://vk.com/dev/marusia_skill_docs10
|
||||||
|
func (vk *VK) MarusiaDeletePicture(params Params) (response int, err error) {
|
||||||
|
err = vk.RequestUnmarshal("marusia.deletePicture", &response, params)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaGetAudioUploadLinkResponse struct.
|
||||||
|
type MarusiaGetAudioUploadLinkResponse struct {
|
||||||
|
AudioUploadLink string `json:"audio_upload_link"` // Link
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaGetAudioUploadLink method.
|
||||||
|
//
|
||||||
|
// https://vk.com/dev/marusia_skill_docs10
|
||||||
|
func (vk *VK) MarusiaGetAudioUploadLink(params Params) (response MarusiaGetAudioUploadLinkResponse, err error) {
|
||||||
|
err = vk.RequestUnmarshal("marusia.getAudioUploadLink", &response, params)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaCreateAudioResponse struct.
|
||||||
|
type MarusiaCreateAudioResponse struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaCreateAudio method.
|
||||||
|
//
|
||||||
|
// https://vk.com/dev/marusia_skill_docs10
|
||||||
|
func (vk *VK) MarusiaCreateAudio(params Params) (response MarusiaCreateAudioResponse, err error) {
|
||||||
|
err = vk.RequestUnmarshal("marusia.createAudio", &response, params)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaGetAudiosResponse struct.
|
||||||
|
type MarusiaGetAudiosResponse struct {
|
||||||
|
Count int `json:"count"`
|
||||||
|
Audios []object.MarusiaAudio `json:"audios"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaGetAudios method.
|
||||||
|
//
|
||||||
|
// https://vk.com/dev/marusia_skill_docs10
|
||||||
|
func (vk *VK) MarusiaGetAudios(params Params) (response MarusiaGetAudiosResponse, err error) {
|
||||||
|
err = vk.RequestUnmarshal("marusia.getAudios", &response, params)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaDeleteAudio delete audio.
|
||||||
|
//
|
||||||
|
// https://vk.com/dev/marusia_skill_docs10
|
||||||
|
func (vk *VK) MarusiaDeleteAudio(params Params) (response int, err error) {
|
||||||
|
err = vk.RequestUnmarshal("marusia.deleteAudio", &response, params)
|
||||||
|
return
|
||||||
|
}
|
54
vendor/github.com/SevereCloud/vksdk/v2/api/upload.go
generated
vendored
54
vendor/github.com/SevereCloud/vksdk/v2/api/upload.go
generated
vendored
@ -959,3 +959,57 @@ func (vk *VK) UploadGroupImage(imageType string, file io.Reader) (response objec
|
|||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UploadMarusiaPicture uploading picture.
|
||||||
|
//
|
||||||
|
// Limits: height not more than 600 px,
|
||||||
|
// aspect ratio of at least 2:1.
|
||||||
|
func (vk *VK) UploadMarusiaPicture(file io.Reader) (response MarusiaSavePictureResponse, err error) {
|
||||||
|
uploadServer, err := vk.MarusiaGetPictureUploadLink(nil)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyContent, err := vk.UploadFile(uploadServer.PictureUploadLink, file, "photo", "photo.jpg")
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var handler object.MarusiaPictureUploadResponse
|
||||||
|
|
||||||
|
err = json.Unmarshal(bodyContent, &handler)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
photo, _ := json.Marshal(handler.Photo)
|
||||||
|
|
||||||
|
response, err = vk.MarusiaSavePicture(Params{
|
||||||
|
"server": handler.Server,
|
||||||
|
"photo": string(photo),
|
||||||
|
"hash": handler.Hash,
|
||||||
|
})
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadMarusiaAudio uploading audio.
|
||||||
|
//
|
||||||
|
// https://vk.com/dev/marusia_skill_docs10
|
||||||
|
func (vk *VK) UploadMarusiaAudio(file io.Reader) (response MarusiaCreateAudioResponse, err error) {
|
||||||
|
uploadServer, err := vk.MarusiaGetAudioUploadLink(nil)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyContent, err := vk.UploadFile(uploadServer.AudioUploadLink, file, "file", "audio.mp3")
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err = vk.MarusiaCreateAudio(Params{
|
||||||
|
"audio_meta": string(bodyContent),
|
||||||
|
})
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
2
vendor/github.com/SevereCloud/vksdk/v2/doc.go
generated
vendored
2
vendor/github.com/SevereCloud/vksdk/v2/doc.go
generated
vendored
@ -7,6 +7,6 @@ package vksdk
|
|||||||
|
|
||||||
// Module constants.
|
// Module constants.
|
||||||
const (
|
const (
|
||||||
Version = "2.10.0"
|
Version = "2.11.0"
|
||||||
API = "5.131"
|
API = "5.131"
|
||||||
)
|
)
|
||||||
|
2
vendor/github.com/SevereCloud/vksdk/v2/object/market.go
generated
vendored
2
vendor/github.com/SevereCloud/vksdk/v2/object/market.go
generated
vendored
@ -28,6 +28,8 @@ type MarketMarketAlbum struct {
|
|||||||
Photo PhotosPhoto `json:"photo"`
|
Photo PhotosPhoto `json:"photo"`
|
||||||
Title string `json:"title"` // Market album title
|
Title string `json:"title"` // Market album title
|
||||||
UpdatedTime int `json:"updated_time"` // Date when album has been updated last time in Unixtime
|
UpdatedTime int `json:"updated_time"` // Date when album has been updated last time in Unixtime
|
||||||
|
IsMain BaseBoolInt `json:"is_main"`
|
||||||
|
IsHidden BaseBoolInt `json:"is_hidden"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToAttachment return attachment format.
|
// ToAttachment return attachment format.
|
||||||
|
52
vendor/github.com/SevereCloud/vksdk/v2/object/marusia.go
generated
vendored
Normal file
52
vendor/github.com/SevereCloud/vksdk/v2/object/marusia.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package object // import "github.com/SevereCloud/vksdk/v2/object"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarusiaPicture struct.
|
||||||
|
type MarusiaPicture struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
OwnerID int `json:"owner_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaPictureUploadResponse struct.
|
||||||
|
type MarusiaPictureUploadResponse struct {
|
||||||
|
Hash string `json:"hash"` // Uploading hash
|
||||||
|
Photo json.RawMessage `json:"photo"` // Uploaded photo data
|
||||||
|
Server int `json:"server"` // Upload server number
|
||||||
|
AID int `json:"aid"`
|
||||||
|
MessageCode int `json:"message_code"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaAudio struct.
|
||||||
|
type MarusiaAudio struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
OwnerID int `json:"owner_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaAudioUploadResponse struct.
|
||||||
|
type MarusiaAudioUploadResponse struct {
|
||||||
|
Sha string `json:"sha"`
|
||||||
|
Secret string `json:"secret"`
|
||||||
|
Meta MarusiaAudioMeta `json:"meta"`
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
Server string `json:"server"`
|
||||||
|
UserID int `json:"user_id"`
|
||||||
|
RequestID string `json:"request_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarusiaAudioMeta struct.
|
||||||
|
type MarusiaAudioMeta struct {
|
||||||
|
Album string `json:"album"`
|
||||||
|
Artist string `json:"artist"`
|
||||||
|
Bitrate string `json:"bitrate"`
|
||||||
|
Duration string `json:"duration"`
|
||||||
|
Genre string `json:"genre"`
|
||||||
|
Kad string `json:"kad"`
|
||||||
|
Md5 string `json:"md5"`
|
||||||
|
Md5DataSize string `json:"md5_data_size"`
|
||||||
|
Samplerate string `json:"samplerate"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
}
|
13
vendor/github.com/SevereCloud/vksdk/v2/object/messages.go
generated
vendored
13
vendor/github.com/SevereCloud/vksdk/v2/object/messages.go
generated
vendored
@ -375,17 +375,17 @@ type MessagesTemplateElement struct {
|
|||||||
|
|
||||||
// MessagesTemplateElementCarousel struct.
|
// MessagesTemplateElementCarousel struct.
|
||||||
type MessagesTemplateElementCarousel struct {
|
type MessagesTemplateElementCarousel struct {
|
||||||
Title string `json:"title"`
|
Title string `json:"title,omitempty"`
|
||||||
Action MessagesTemplateElementCarouselAction `json:"action"`
|
Action MessagesTemplateElementCarouselAction `json:"action,omitempty"`
|
||||||
Description string `json:"description"`
|
Description string `json:"description,omitempty"`
|
||||||
Photo PhotosPhoto `json:"photo"`
|
Photo *PhotosPhoto `json:"photo,omitempty"`
|
||||||
Buttons []MessagesKeyboardButton `json:"buttons"`
|
Buttons []MessagesKeyboardButton `json:"buttons,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MessagesTemplateElementCarouselAction struct.
|
// MessagesTemplateElementCarouselAction struct.
|
||||||
type MessagesTemplateElementCarouselAction struct {
|
type MessagesTemplateElementCarouselAction struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Link string `json:"link"`
|
Link string `json:"link,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MessageContentSourceMessage ...
|
// MessageContentSourceMessage ...
|
||||||
@ -443,6 +443,7 @@ type MessagesChat struct {
|
|||||||
AdminID int `json:"admin_id"` // Chat creator ID
|
AdminID int `json:"admin_id"` // Chat creator ID
|
||||||
ID int `json:"id"` // Chat ID
|
ID int `json:"id"` // Chat ID
|
||||||
IsDefaultPhoto BaseBoolInt `json:"is_default_photo"`
|
IsDefaultPhoto BaseBoolInt `json:"is_default_photo"`
|
||||||
|
IsGroupChannel BaseBoolInt `json:"is_group_channel"`
|
||||||
Photo100 string `json:"photo_100"` // URL of the preview image with 100 px in width
|
Photo100 string `json:"photo_100"` // URL of the preview image with 100 px in width
|
||||||
Photo200 string `json:"photo_200"` // URL of the preview image with 200 px in width
|
Photo200 string `json:"photo_200"` // URL of the preview image with 200 px in width
|
||||||
Photo50 string `json:"photo_50"` // URL of the preview image with 50 px in width
|
Photo50 string `json:"photo_50"` // URL of the preview image with 50 px in width
|
||||||
|
31
vendor/github.com/SevereCloud/vksdk/v2/object/stories.go
generated
vendored
31
vendor/github.com/SevereCloud/vksdk/v2/object/stories.go
generated
vendored
@ -251,8 +251,10 @@ type StoriesClickableSticker struct { // nolint: maligned
|
|||||||
StickerID int `json:"sticker_id,omitempty"`
|
StickerID int `json:"sticker_id,omitempty"`
|
||||||
StickerPackID int `json:"sticker_pack_id,omitempty"`
|
StickerPackID int `json:"sticker_pack_id,omitempty"`
|
||||||
|
|
||||||
// type=place
|
// type=place or geo
|
||||||
PlaceID int `json:"place_id,omitempty"`
|
PlaceID int `json:"place_id,omitempty"`
|
||||||
|
// Title
|
||||||
|
CategoryID int `json:"category_id,omitempty"`
|
||||||
|
|
||||||
// type=question
|
// type=question
|
||||||
Question string `json:"question,omitempty"`
|
Question string `json:"question,omitempty"`
|
||||||
@ -269,6 +271,12 @@ type StoriesClickableSticker struct { // nolint: maligned
|
|||||||
// type=link
|
// type=link
|
||||||
LinkObject BaseLink `json:"link_object,omitempty"`
|
LinkObject BaseLink `json:"link_object,omitempty"`
|
||||||
TooltipText string `json:"tooltip_text,omitempty"`
|
TooltipText string `json:"tooltip_text,omitempty"`
|
||||||
|
TooltipTextKey string `json:"tooltip_text_key,omitempty"`
|
||||||
|
|
||||||
|
// type=time
|
||||||
|
TimestampMs int64 `json:"timestamp_ms,omitempty"`
|
||||||
|
Date string `json:"date,omitempty"`
|
||||||
|
Title string `json:"title,omitempty"`
|
||||||
|
|
||||||
// type=market_item
|
// type=market_item
|
||||||
Subtype string `json:"subtype,omitempty"`
|
Subtype string `json:"subtype,omitempty"`
|
||||||
@ -290,10 +298,19 @@ type StoriesClickableSticker struct { // nolint: maligned
|
|||||||
AudioStartTime int `json:"audio_start_time,omitempty"`
|
AudioStartTime int `json:"audio_start_time,omitempty"`
|
||||||
|
|
||||||
// type=app
|
// type=app
|
||||||
App AppsApp `json:"app"`
|
App AppsApp `json:"app,omitempty"`
|
||||||
AppContext string `json:"app_context"`
|
AppContext string `json:"app_context,omitempty"`
|
||||||
HasNewInteractions BaseBoolInt `json:"has_new_interactions"`
|
HasNewInteractions BaseBoolInt `json:"has_new_interactions,omitempty"`
|
||||||
IsBroadcastNotifyAllowed BaseBoolInt `json:"is_broadcast_notify_allowed"`
|
IsBroadcastNotifyAllowed BaseBoolInt `json:"is_broadcast_notify_allowed,omitempty"`
|
||||||
|
|
||||||
|
// type=emoji
|
||||||
|
Emoji string `json:"emoji,omitempty"`
|
||||||
|
|
||||||
|
// type=text
|
||||||
|
Text string `json:"text,omitempty"`
|
||||||
|
BackgroundStyle string `json:"background_style,omitempty"`
|
||||||
|
Alignment string `json:"alignment,omitempty"`
|
||||||
|
SelectionColor string `json:"selection_color,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: сделать несколько структур для кликабельного стикера
|
// TODO: сделать несколько структур для кликабельного стикера
|
||||||
@ -313,6 +330,10 @@ const (
|
|||||||
ClickableStickerPoll = "poll"
|
ClickableStickerPoll = "poll"
|
||||||
ClickableStickerMusic = "music"
|
ClickableStickerMusic = "music"
|
||||||
ClickableStickerApp = "app"
|
ClickableStickerApp = "app"
|
||||||
|
ClickableStickerTime = "time"
|
||||||
|
ClickableStickerEmoji = "emoji"
|
||||||
|
ClickableStickerGeo = "geo"
|
||||||
|
ClickableStickerText = "text"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Subtype of clickable sticker.
|
// Subtype of clickable sticker.
|
||||||
|
1
vendor/github.com/SevereCloud/vksdk/v2/object/video.go
generated
vendored
1
vendor/github.com/SevereCloud/vksdk/v2/object/video.go
generated
vendored
@ -213,6 +213,7 @@ type VideoVideoFull struct {
|
|||||||
Description string `json:"description"` // Video description
|
Description string `json:"description"` // Video description
|
||||||
Duration int `json:"duration"` // Video duration in seconds
|
Duration int `json:"duration"` // Video duration in seconds
|
||||||
Files VideoVideoFiles `json:"files"`
|
Files VideoVideoFiles `json:"files"`
|
||||||
|
Trailer VideoVideoFiles `json:"trailer"`
|
||||||
ID int `json:"id"` // Video ID
|
ID int `json:"id"` // Video ID
|
||||||
Likes BaseLikes `json:"likes"`
|
Likes BaseLikes `json:"likes"`
|
||||||
Live int `json:"live"` // Returns if the video is live translation
|
Live int `json:"live"` // Returns if the video is live translation
|
||||||
|
4
vendor/github.com/d5/tengo/v2/README.md
generated
vendored
4
vendor/github.com/d5/tengo/v2/README.md
generated
vendored
@ -1,7 +1,3 @@
|
|||||||
<p align="center">
|
|
||||||
<img src="https://raw.githubusercontent.com/d5/tengolang-share/master/logo_400.png" width="200" height="200">
|
|
||||||
</p>
|
|
||||||
|
|
||||||
# The Tengo Language
|
# The Tengo Language
|
||||||
|
|
||||||
[![GoDoc](https://godoc.org/github.com/d5/tengo/v2?status.svg)](https://godoc.org/github.com/d5/tengo/v2)
|
[![GoDoc](https://godoc.org/github.com/d5/tengo/v2?status.svg)](https://godoc.org/github.com/d5/tengo/v2)
|
||||||
|
70
vendor/github.com/d5/tengo/v2/compiler.go
generated
vendored
70
vendor/github.com/d5/tengo/v2/compiler.go
generated
vendored
@ -1,9 +1,11 @@
|
|||||||
package tengo
|
package tengo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -45,11 +47,12 @@ type Compiler struct {
|
|||||||
parent *Compiler
|
parent *Compiler
|
||||||
modulePath string
|
modulePath string
|
||||||
importDir string
|
importDir string
|
||||||
|
importFileExt []string
|
||||||
constants []Object
|
constants []Object
|
||||||
symbolTable *SymbolTable
|
symbolTable *SymbolTable
|
||||||
scopes []compilationScope
|
scopes []compilationScope
|
||||||
scopeIndex int
|
scopeIndex int
|
||||||
modules *ModuleMap
|
modules ModuleGetter
|
||||||
compiledModules map[string]*CompiledFunction
|
compiledModules map[string]*CompiledFunction
|
||||||
allowFileImport bool
|
allowFileImport bool
|
||||||
loops []*loop
|
loops []*loop
|
||||||
@ -63,7 +66,7 @@ func NewCompiler(
|
|||||||
file *parser.SourceFile,
|
file *parser.SourceFile,
|
||||||
symbolTable *SymbolTable,
|
symbolTable *SymbolTable,
|
||||||
constants []Object,
|
constants []Object,
|
||||||
modules *ModuleMap,
|
modules ModuleGetter,
|
||||||
trace io.Writer,
|
trace io.Writer,
|
||||||
) *Compiler {
|
) *Compiler {
|
||||||
mainScope := compilationScope{
|
mainScope := compilationScope{
|
||||||
@ -96,6 +99,7 @@ func NewCompiler(
|
|||||||
trace: trace,
|
trace: trace,
|
||||||
modules: modules,
|
modules: modules,
|
||||||
compiledModules: make(map[string]*CompiledFunction),
|
compiledModules: make(map[string]*CompiledFunction),
|
||||||
|
importFileExt: []string{SourceFileExtDefault},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -538,12 +542,8 @@ func (c *Compiler) Compile(node parser.Node) error {
|
|||||||
}
|
}
|
||||||
} else if c.allowFileImport {
|
} else if c.allowFileImport {
|
||||||
moduleName := node.ModuleName
|
moduleName := node.ModuleName
|
||||||
if !strings.HasSuffix(moduleName, ".tengo") {
|
|
||||||
moduleName += ".tengo"
|
|
||||||
}
|
|
||||||
|
|
||||||
modulePath, err := filepath.Abs(
|
modulePath, err := c.getPathModule(moduleName)
|
||||||
filepath.Join(c.importDir, moduleName))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return c.errorf(node, "module file path error: %s",
|
return c.errorf(node, "module file path error: %s",
|
||||||
err.Error())
|
err.Error())
|
||||||
@ -640,6 +640,39 @@ func (c *Compiler) SetImportDir(dir string) {
|
|||||||
c.importDir = dir
|
c.importDir = dir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetImportFileExt sets the extension name of the source file for loading
|
||||||
|
// local module files.
|
||||||
|
//
|
||||||
|
// Use this method if you want other source file extension than ".tengo".
|
||||||
|
//
|
||||||
|
// // this will search for *.tengo, *.foo, *.bar
|
||||||
|
// err := c.SetImportFileExt(".tengo", ".foo", ".bar")
|
||||||
|
//
|
||||||
|
// This function requires at least one argument, since it will replace the
|
||||||
|
// current list of extension name.
|
||||||
|
func (c *Compiler) SetImportFileExt(exts ...string) error {
|
||||||
|
if len(exts) == 0 {
|
||||||
|
return fmt.Errorf("missing arg: at least one argument is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ext := range exts {
|
||||||
|
if ext != filepath.Ext(ext) || ext == "" {
|
||||||
|
return fmt.Errorf("invalid file extension: %s", ext)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.importFileExt = exts // Replace the hole current extension list
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetImportFileExt returns the current list of extension name.
|
||||||
|
// Thease are the complementary suffix of the source file to search and load
|
||||||
|
// local module files.
|
||||||
|
func (c *Compiler) GetImportFileExt() []string {
|
||||||
|
return c.importFileExt
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Compiler) compileAssign(
|
func (c *Compiler) compileAssign(
|
||||||
node parser.Node,
|
node parser.Node,
|
||||||
lhs, rhs []parser.Expr,
|
lhs, rhs []parser.Expr,
|
||||||
@ -1098,6 +1131,7 @@ func (c *Compiler) fork(
|
|||||||
child.parent = c // parent to set to current compiler
|
child.parent = c // parent to set to current compiler
|
||||||
child.allowFileImport = c.allowFileImport
|
child.allowFileImport = c.allowFileImport
|
||||||
child.importDir = c.importDir
|
child.importDir = c.importDir
|
||||||
|
child.importFileExt = c.importFileExt
|
||||||
if isFile && c.importDir != "" {
|
if isFile && c.importDir != "" {
|
||||||
child.importDir = filepath.Dir(modulePath)
|
child.importDir = filepath.Dir(modulePath)
|
||||||
}
|
}
|
||||||
@ -1287,6 +1321,28 @@ func (c *Compiler) printTrace(a ...interface{}) {
|
|||||||
_, _ = fmt.Fprintln(c.trace, a...)
|
_, _ = fmt.Fprintln(c.trace, a...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Compiler) getPathModule(moduleName string) (pathFile string, err error) {
|
||||||
|
for _, ext := range c.importFileExt {
|
||||||
|
nameFile := moduleName
|
||||||
|
|
||||||
|
if !strings.HasSuffix(nameFile, ext) {
|
||||||
|
nameFile += ext
|
||||||
|
}
|
||||||
|
|
||||||
|
pathFile, err = filepath.Abs(filepath.Join(c.importDir, nameFile))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if file exists
|
||||||
|
if _, err := os.Stat(pathFile); !errors.Is(err, os.ErrNotExist) {
|
||||||
|
return pathFile, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", fmt.Errorf("module '%s' not found at: %s", moduleName, pathFile)
|
||||||
|
}
|
||||||
|
|
||||||
func resolveAssignLHS(
|
func resolveAssignLHS(
|
||||||
expr parser.Expr,
|
expr parser.Expr,
|
||||||
) (name string, selectors []parser.Expr) {
|
) (name string, selectors []parser.Expr) {
|
||||||
|
5
vendor/github.com/d5/tengo/v2/modules.go
generated
vendored
5
vendor/github.com/d5/tengo/v2/modules.go
generated
vendored
@ -6,6 +6,11 @@ type Importable interface {
|
|||||||
Import(moduleName string) (interface{}, error)
|
Import(moduleName string) (interface{}, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ModuleGetter enables implementing dynamic module loading.
|
||||||
|
type ModuleGetter interface {
|
||||||
|
Get(name string) Importable
|
||||||
|
}
|
||||||
|
|
||||||
// ModuleMap represents a set of named modules. Use NewModuleMap to create a
|
// ModuleMap represents a set of named modules. Use NewModuleMap to create a
|
||||||
// new module map.
|
// new module map.
|
||||||
type ModuleMap struct {
|
type ModuleMap struct {
|
||||||
|
16
vendor/github.com/d5/tengo/v2/script.go
generated
vendored
16
vendor/github.com/d5/tengo/v2/script.go
generated
vendored
@ -12,7 +12,7 @@ import (
|
|||||||
// Script can simplify compilation and execution of embedded scripts.
|
// Script can simplify compilation and execution of embedded scripts.
|
||||||
type Script struct {
|
type Script struct {
|
||||||
variables map[string]*Variable
|
variables map[string]*Variable
|
||||||
modules *ModuleMap
|
modules ModuleGetter
|
||||||
input []byte
|
input []byte
|
||||||
maxAllocs int64
|
maxAllocs int64
|
||||||
maxConstObjects int
|
maxConstObjects int
|
||||||
@ -54,7 +54,7 @@ func (s *Script) Remove(name string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetImports sets import modules.
|
// SetImports sets import modules.
|
||||||
func (s *Script) SetImports(modules *ModuleMap) {
|
func (s *Script) SetImports(modules ModuleGetter) {
|
||||||
s.modules = modules
|
s.modules = modules
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -219,6 +219,18 @@ func (c *Compiled) RunContext(ctx context.Context) (err error) {
|
|||||||
v := NewVM(c.bytecode, c.globals, c.maxAllocs)
|
v := NewVM(c.bytecode, c.globals, c.maxAllocs)
|
||||||
ch := make(chan error, 1)
|
ch := make(chan error, 1)
|
||||||
go func() {
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
switch e := r.(type) {
|
||||||
|
case string:
|
||||||
|
ch <- fmt.Errorf(e)
|
||||||
|
case error:
|
||||||
|
ch <- e
|
||||||
|
default:
|
||||||
|
ch <- fmt.Errorf("unknown panic: %v", e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
ch <- v.Run()
|
ch <- v.Run()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
3
vendor/github.com/d5/tengo/v2/tengo.go
generated
vendored
3
vendor/github.com/d5/tengo/v2/tengo.go
generated
vendored
@ -26,6 +26,9 @@ const (
|
|||||||
|
|
||||||
// MaxFrames is the maximum number of function frames for a VM.
|
// MaxFrames is the maximum number of function frames for a VM.
|
||||||
MaxFrames = 1024
|
MaxFrames = 1024
|
||||||
|
|
||||||
|
// SourceFileExtDefault is the default extension for source files.
|
||||||
|
SourceFileExtDefault = ".tengo"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CallableFunc is a function signature for the callable functions.
|
// CallableFunc is a function signature for the callable functions.
|
||||||
|
2
vendor/github.com/d5/tengo/v2/vm.go
generated
vendored
2
vendor/github.com/d5/tengo/v2/vm.go
generated
vendored
@ -293,7 +293,7 @@ func (v *VM) run() {
|
|||||||
case parser.OpMap:
|
case parser.OpMap:
|
||||||
v.ip += 2
|
v.ip += 2
|
||||||
numElements := int(v.curInsts[v.ip]) | int(v.curInsts[v.ip-1])<<8
|
numElements := int(v.curInsts[v.ip]) | int(v.curInsts[v.ip-1])<<8
|
||||||
kv := make(map[string]Object)
|
kv := make(map[string]Object, numElements)
|
||||||
for i := v.sp - numElements; i < v.sp; i += 2 {
|
for i := v.sp - numElements; i < v.sp; i += 2 {
|
||||||
key := v.stack[i]
|
key := v.stack[i]
|
||||||
value := v.stack[i+1]
|
value := v.stack[i+1]
|
||||||
|
59
vendor/github.com/gomarkdown/markdown/parser/block.go
generated
vendored
59
vendor/github.com/gomarkdown/markdown/parser/block.go
generated
vendored
@ -17,6 +17,12 @@ const (
|
|||||||
escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]"
|
escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
captionTable = "Table: "
|
||||||
|
captionFigure = "Figure: "
|
||||||
|
captionQuote = "Quote: "
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
reBackslashOrAmp = regexp.MustCompile("[\\&]")
|
reBackslashOrAmp = regexp.MustCompile("[\\&]")
|
||||||
reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity)
|
reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity)
|
||||||
@ -125,6 +131,16 @@ func (p *Parser) block(data []byte) {
|
|||||||
}
|
}
|
||||||
if consumed > 0 {
|
if consumed > 0 {
|
||||||
included := f(p.includeStack.Last(), path, address)
|
included := f(p.includeStack.Last(), path, address)
|
||||||
|
|
||||||
|
// if we find a caption below this, we need to include it in 'included', so
|
||||||
|
// that the caption will be part of the include text. (+1 to skip newline)
|
||||||
|
for _, caption := range []string{captionFigure, captionTable, captionQuote} {
|
||||||
|
if _, _, capcon := p.caption(data[consumed+1:], []byte(caption)); capcon > 0 {
|
||||||
|
included = append(included, data[consumed+1:consumed+1+capcon]...)
|
||||||
|
consumed += 1 + capcon
|
||||||
|
break // there can only be 1 caption.
|
||||||
|
}
|
||||||
|
}
|
||||||
p.includeStack.Push(path)
|
p.includeStack.Push(path)
|
||||||
p.block(included)
|
p.block(included)
|
||||||
p.includeStack.Pop()
|
p.includeStack.Pop()
|
||||||
@ -295,7 +311,7 @@ func (p *Parser) block(data []byte) {
|
|||||||
//
|
//
|
||||||
// also works with + or -
|
// also works with + or -
|
||||||
if p.uliPrefix(data) > 0 {
|
if p.uliPrefix(data) > 0 {
|
||||||
data = data[p.list(data, 0, 0):]
|
data = data[p.list(data, 0, 0, '.'):]
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -305,14 +321,18 @@ func (p *Parser) block(data []byte) {
|
|||||||
// 2. Item 2
|
// 2. Item 2
|
||||||
if i := p.oliPrefix(data); i > 0 {
|
if i := p.oliPrefix(data); i > 0 {
|
||||||
start := 0
|
start := 0
|
||||||
if i > 2 && p.extensions&OrderedListStart != 0 {
|
delim := byte('.')
|
||||||
|
if i > 2 {
|
||||||
|
if p.extensions&OrderedListStart != 0 {
|
||||||
s := string(data[:i-2])
|
s := string(data[:i-2])
|
||||||
start, _ = strconv.Atoi(s)
|
start, _ = strconv.Atoi(s)
|
||||||
if start == 1 {
|
if start == 1 {
|
||||||
start = 0
|
start = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
data = data[p.list(data, ast.ListTypeOrdered, start):]
|
delim = data[i-2]
|
||||||
|
}
|
||||||
|
data = data[p.list(data, ast.ListTypeOrdered, start, delim):]
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,7 +346,7 @@ func (p *Parser) block(data []byte) {
|
|||||||
// : Definition c
|
// : Definition c
|
||||||
if p.extensions&DefinitionLists != 0 {
|
if p.extensions&DefinitionLists != 0 {
|
||||||
if p.dliPrefix(data) > 0 {
|
if p.dliPrefix(data) > 0 {
|
||||||
data = data[p.list(data, ast.ListTypeDefinition, 0):]
|
data = data[p.list(data, ast.ListTypeDefinition, 0, '.'):]
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -950,7 +970,7 @@ func (p *Parser) fencedCodeBlock(data []byte, doRender bool) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check for caption and if found make it a figure.
|
// Check for caption and if found make it a figure.
|
||||||
if captionContent, id, consumed := p.caption(data[beg:], []byte("Figure: ")); consumed > 0 {
|
if captionContent, id, consumed := p.caption(data[beg:], []byte(captionFigure)); consumed > 0 {
|
||||||
figure := &ast.CaptionFigure{}
|
figure := &ast.CaptionFigure{}
|
||||||
caption := &ast.Caption{}
|
caption := &ast.Caption{}
|
||||||
figure.HeadingID = id
|
figure.HeadingID = id
|
||||||
@ -1070,7 +1090,7 @@ func (p *Parser) quote(data []byte) int {
|
|||||||
return end
|
return end
|
||||||
}
|
}
|
||||||
|
|
||||||
if captionContent, id, consumed := p.caption(data[end:], []byte("Quote: ")); consumed > 0 {
|
if captionContent, id, consumed := p.caption(data[end:], []byte(captionQuote)); consumed > 0 {
|
||||||
figure := &ast.CaptionFigure{}
|
figure := &ast.CaptionFigure{}
|
||||||
caption := &ast.Caption{}
|
caption := &ast.Caption{}
|
||||||
figure.HeadingID = id
|
figure.HeadingID = id
|
||||||
@ -1190,7 +1210,7 @@ func (p *Parser) oliPrefix(data []byte) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// we need >= 1 digits followed by a dot and a space or a tab
|
// we need >= 1 digits followed by a dot and a space or a tab
|
||||||
if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') {
|
if data[i] != '.' && data[i] != ')' || !(data[i+1] == ' ' || data[i+1] == '\t') {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return i + 2
|
return i + 2
|
||||||
@ -1210,13 +1230,14 @@ func (p *Parser) dliPrefix(data []byte) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// parse ordered or unordered list block
|
// parse ordered or unordered list block
|
||||||
func (p *Parser) list(data []byte, flags ast.ListType, start int) int {
|
func (p *Parser) list(data []byte, flags ast.ListType, start int, delim byte) int {
|
||||||
i := 0
|
i := 0
|
||||||
flags |= ast.ListItemBeginningOfList
|
flags |= ast.ListItemBeginningOfList
|
||||||
list := &ast.List{
|
list := &ast.List{
|
||||||
ListFlags: flags,
|
ListFlags: flags,
|
||||||
Tight: true,
|
Tight: true,
|
||||||
Start: start,
|
Start: start,
|
||||||
|
Delimiter: delim,
|
||||||
}
|
}
|
||||||
block := p.addBlock(list)
|
block := p.addBlock(list)
|
||||||
|
|
||||||
@ -1305,10 +1326,16 @@ func (p *Parser) listItem(data []byte, flags *ast.ListType) int {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var bulletChar byte = '*'
|
var (
|
||||||
|
bulletChar byte = '*'
|
||||||
|
delimiter byte = '.'
|
||||||
|
)
|
||||||
i := p.uliPrefix(data)
|
i := p.uliPrefix(data)
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
i = p.oliPrefix(data)
|
i = p.oliPrefix(data)
|
||||||
|
if i > 0 {
|
||||||
|
delimiter = data[i-2]
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
bulletChar = data[i-2]
|
bulletChar = data[i-2]
|
||||||
}
|
}
|
||||||
@ -1468,7 +1495,7 @@ gatherlines:
|
|||||||
ListFlags: *flags,
|
ListFlags: *flags,
|
||||||
Tight: false,
|
Tight: false,
|
||||||
BulletChar: bulletChar,
|
BulletChar: bulletChar,
|
||||||
Delimiter: '.', // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark
|
Delimiter: delimiter,
|
||||||
}
|
}
|
||||||
p.addBlock(listItem)
|
p.addBlock(listItem)
|
||||||
|
|
||||||
@ -1574,7 +1601,7 @@ func (p *Parser) paragraph(data []byte) int {
|
|||||||
// did this blank line followed by a definition list item?
|
// did this blank line followed by a definition list item?
|
||||||
if p.extensions&DefinitionLists != 0 {
|
if p.extensions&DefinitionLists != 0 {
|
||||||
if i < len(data)-1 && data[i+1] == ':' {
|
if i < len(data)-1 && data[i+1] == ':' {
|
||||||
listLen := p.list(data[prev:], ast.ListTypeDefinition, 0)
|
listLen := p.list(data[prev:], ast.ListTypeDefinition, 0, '.')
|
||||||
return prev + listLen
|
return prev + listLen
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1645,10 +1672,18 @@ func (p *Parser) paragraph(data []byte) int {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if there's a table, paragraph is over
|
||||||
|
if p.extensions&Tables != 0 {
|
||||||
|
if j, _, _ := p.tableHeader(current, false); j > 0 {
|
||||||
|
p.renderParagraph(data[:i])
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// if there's a definition list item, prev line is a definition term
|
// if there's a definition list item, prev line is a definition term
|
||||||
if p.extensions&DefinitionLists != 0 {
|
if p.extensions&DefinitionLists != 0 {
|
||||||
if p.dliPrefix(current) != 0 {
|
if p.dliPrefix(current) != 0 {
|
||||||
ret := p.list(data[prev:], ast.ListTypeDefinition, 0)
|
ret := p.list(data[prev:], ast.ListTypeDefinition, 0, '.')
|
||||||
return ret + prev
|
return ret + prev
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
8
vendor/github.com/gomarkdown/markdown/parser/block_table.go
generated
vendored
8
vendor/github.com/gomarkdown/markdown/parser/block_table.go
generated
vendored
@ -105,7 +105,7 @@ func (p *Parser) tableFooter(data []byte) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// tableHeaders parses the header. If recognized it will also add a table.
|
// tableHeaders parses the header. If recognized it will also add a table.
|
||||||
func (p *Parser) tableHeader(data []byte) (size int, columns []ast.CellAlignFlags, table ast.Node) {
|
func (p *Parser) tableHeader(data []byte, doRender bool) (size int, columns []ast.CellAlignFlags, table ast.Node) {
|
||||||
i := 0
|
i := 0
|
||||||
colCount := 1
|
colCount := 1
|
||||||
headerIsUnderline := true
|
headerIsUnderline := true
|
||||||
@ -236,12 +236,14 @@ func (p *Parser) tableHeader(data []byte) (size int, columns []ast.CellAlignFlag
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if doRender {
|
||||||
table = &ast.Table{}
|
table = &ast.Table{}
|
||||||
p.addBlock(table)
|
p.addBlock(table)
|
||||||
if header != nil {
|
if header != nil {
|
||||||
p.addBlock(&ast.TableHeader{})
|
p.addBlock(&ast.TableHeader{})
|
||||||
p.tableRow(header, columns, true)
|
p.tableRow(header, columns, true)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
size = skipCharN(data, i, '\n', 1)
|
size = skipCharN(data, i, '\n', 1)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -255,7 +257,7 @@ Bob | 31 | 555-1234
|
|||||||
Alice | 27 | 555-4321
|
Alice | 27 | 555-4321
|
||||||
*/
|
*/
|
||||||
func (p *Parser) table(data []byte) int {
|
func (p *Parser) table(data []byte) int {
|
||||||
i, columns, table := p.tableHeader(data)
|
i, columns, table := p.tableHeader(data, true)
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@ -284,7 +286,7 @@ func (p *Parser) table(data []byte) int {
|
|||||||
|
|
||||||
p.tableRow(data[rowStart:i], columns, false)
|
p.tableRow(data[rowStart:i], columns, false)
|
||||||
}
|
}
|
||||||
if captionContent, id, consumed := p.caption(data[i:], []byte("Table: ")); consumed > 0 {
|
if captionContent, id, consumed := p.caption(data[i:], []byte(captionTable)); consumed > 0 {
|
||||||
caption := &ast.Caption{}
|
caption := &ast.Caption{}
|
||||||
p.Inline(caption, captionContent)
|
p.Inline(caption, captionContent)
|
||||||
|
|
||||||
|
17
vendor/github.com/gomarkdown/markdown/parser/inline.go
generated
vendored
17
vendor/github.com/gomarkdown/markdown/parser/inline.go
generated
vendored
@ -766,7 +766,22 @@ func entity(p *Parser, data []byte, offset int) (int, ast.Node) {
|
|||||||
// undo & escaping or it will be converted to &amp; by another
|
// undo & escaping or it will be converted to &amp; by another
|
||||||
// escaper in the renderer
|
// escaper in the renderer
|
||||||
if bytes.Equal(ent, []byte("&")) {
|
if bytes.Equal(ent, []byte("&")) {
|
||||||
ent = []byte{'&'}
|
return end, newTextNode([]byte{'&'})
|
||||||
|
}
|
||||||
|
if len(ent) < 4 {
|
||||||
|
return end, newTextNode(ent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if ent consists solely out of numbers (hex or decimal) convert that unicode codepoint to actual rune
|
||||||
|
codepoint := uint64(0)
|
||||||
|
var err error
|
||||||
|
if ent[2] == 'x' || ent[2] == 'X' { // hexadecimal
|
||||||
|
codepoint, err = strconv.ParseUint(string(ent[3:len(ent)-1]), 16, 64)
|
||||||
|
} else {
|
||||||
|
codepoint, err = strconv.ParseUint(string(ent[2:len(ent)-1]), 10, 64)
|
||||||
|
}
|
||||||
|
if err == nil { // only if conversion was valid return here.
|
||||||
|
return end, newTextNode([]byte(string(codepoint)))
|
||||||
}
|
}
|
||||||
|
|
||||||
return end, newTextNode(ent)
|
return end, newTextNode(ent)
|
||||||
|
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
// Copyright 2021 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package uuid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"database/sql/driver"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var jsonNull = []byte("null")
|
||||||
|
|
||||||
|
// NullUUID represents a UUID that may be null.
|
||||||
|
// NullUUID implements the SQL driver.Scanner interface so
|
||||||
|
// it can be used as a scan destination:
|
||||||
|
//
|
||||||
|
// var u uuid.NullUUID
|
||||||
|
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
|
||||||
|
// ...
|
||||||
|
// if u.Valid {
|
||||||
|
// // use u.UUID
|
||||||
|
// } else {
|
||||||
|
// // NULL value
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type NullUUID struct {
|
||||||
|
UUID UUID
|
||||||
|
Valid bool // Valid is true if UUID is not NULL
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan implements the SQL driver.Scanner interface.
|
||||||
|
func (nu *NullUUID) Scan(value interface{}) error {
|
||||||
|
if value == nil {
|
||||||
|
nu.UUID, nu.Valid = Nil, false
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := nu.UUID.Scan(value)
|
||||||
|
if err != nil {
|
||||||
|
nu.Valid = false
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nu.Valid = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value implements the driver Valuer interface.
|
||||||
|
func (nu NullUUID) Value() (driver.Value, error) {
|
||||||
|
if !nu.Valid {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
// Delegate to UUID Value function
|
||||||
|
return nu.UUID.Value()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||||
|
func (nu NullUUID) MarshalBinary() ([]byte, error) {
|
||||||
|
if nu.Valid {
|
||||||
|
return nu.UUID[:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return []byte(nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||||
|
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
|
||||||
|
if len(data) != 16 {
|
||||||
|
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||||
|
}
|
||||||
|
copy(nu.UUID[:], data)
|
||||||
|
nu.Valid = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText implements encoding.TextMarshaler.
|
||||||
|
func (nu NullUUID) MarshalText() ([]byte, error) {
|
||||||
|
if nu.Valid {
|
||||||
|
return nu.UUID.MarshalText()
|
||||||
|
}
|
||||||
|
|
||||||
|
return jsonNull, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||||
|
func (nu *NullUUID) UnmarshalText(data []byte) error {
|
||||||
|
id, err := ParseBytes(data)
|
||||||
|
if err != nil {
|
||||||
|
nu.Valid = false
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nu.UUID = id
|
||||||
|
nu.Valid = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON implements json.Marshaler.
|
||||||
|
func (nu NullUUID) MarshalJSON() ([]byte, error) {
|
||||||
|
if nu.Valid {
|
||||||
|
return json.Marshal(nu.UUID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return jsonNull, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements json.Unmarshaler.
|
||||||
|
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
|
||||||
|
if bytes.Equal(data, jsonNull) {
|
||||||
|
*nu = NullUUID{}
|
||||||
|
return nil // valid null UUID
|
||||||
|
}
|
||||||
|
err := json.Unmarshal(data, &nu.UUID)
|
||||||
|
nu.Valid = err == nil
|
||||||
|
return err
|
||||||
|
}
|
45
vendor/github.com/google/uuid/uuid.go
generated
vendored
45
vendor/github.com/google/uuid/uuid.go
generated
vendored
@ -12,6 +12,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||||
@ -33,7 +34,15 @@ const (
|
|||||||
Future // Reserved for future definition.
|
Future // Reserved for future definition.
|
||||||
)
|
)
|
||||||
|
|
||||||
var rander = rand.Reader // random function
|
const randPoolSize = 16 * 16
|
||||||
|
|
||||||
|
var (
|
||||||
|
rander = rand.Reader // random function
|
||||||
|
poolEnabled = false
|
||||||
|
poolMu sync.Mutex
|
||||||
|
poolPos = randPoolSize // protected with poolMu
|
||||||
|
pool [randPoolSize]byte // protected with poolMu
|
||||||
|
)
|
||||||
|
|
||||||
type invalidLengthError struct{ len int }
|
type invalidLengthError struct{ len int }
|
||||||
|
|
||||||
@ -41,6 +50,12 @@ func (err invalidLengthError) Error() string {
|
|||||||
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsInvalidLengthError is matcher function for custom error invalidLengthError
|
||||||
|
func IsInvalidLengthError(err error) bool {
|
||||||
|
_, ok := err.(invalidLengthError)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
||||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
|
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
|
||||||
@ -249,3 +264,31 @@ func SetRand(r io.Reader) {
|
|||||||
}
|
}
|
||||||
rander = r
|
rander = r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EnableRandPool enables internal randomness pool used for Random
|
||||||
|
// (Version 4) UUID generation. The pool contains random bytes read from
|
||||||
|
// the random number generator on demand in batches. Enabling the pool
|
||||||
|
// may improve the UUID generation throughput significantly.
|
||||||
|
//
|
||||||
|
// Since the pool is stored on the Go heap, this feature may be a bad fit
|
||||||
|
// for security sensitive applications.
|
||||||
|
//
|
||||||
|
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||||
|
// only be called when there is no possibility that New or any other
|
||||||
|
// UUID Version 4 generation function will be called concurrently.
|
||||||
|
func EnableRandPool() {
|
||||||
|
poolEnabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableRandPool disables the randomness pool if it was previously
|
||||||
|
// enabled with EnableRandPool.
|
||||||
|
//
|
||||||
|
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||||
|
// only be called when there is no possibility that New or any other
|
||||||
|
// UUID Version 4 generation function will be called concurrently.
|
||||||
|
func DisableRandPool() {
|
||||||
|
poolEnabled = false
|
||||||
|
defer poolMu.Unlock()
|
||||||
|
poolMu.Lock()
|
||||||
|
poolPos = randPoolSize
|
||||||
|
}
|
||||||
|
25
vendor/github.com/google/uuid/version4.go
generated
vendored
25
vendor/github.com/google/uuid/version4.go
generated
vendored
@ -27,6 +27,8 @@ func NewString() string {
|
|||||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||||
// package.
|
// package.
|
||||||
//
|
//
|
||||||
|
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||||
|
//
|
||||||
// A note about uniqueness derived from the UUID Wikipedia entry:
|
// A note about uniqueness derived from the UUID Wikipedia entry:
|
||||||
//
|
//
|
||||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||||
@ -35,8 +37,11 @@ func NewString() string {
|
|||||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||||
// year and having one duplicate.
|
// year and having one duplicate.
|
||||||
func NewRandom() (UUID, error) {
|
func NewRandom() (UUID, error) {
|
||||||
|
if !poolEnabled {
|
||||||
return NewRandomFromReader(rander)
|
return NewRandomFromReader(rander)
|
||||||
}
|
}
|
||||||
|
return newRandomFromPool()
|
||||||
|
}
|
||||||
|
|
||||||
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
||||||
func NewRandomFromReader(r io.Reader) (UUID, error) {
|
func NewRandomFromReader(r io.Reader) (UUID, error) {
|
||||||
@ -49,3 +54,23 @@ func NewRandomFromReader(r io.Reader) (UUID, error) {
|
|||||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||||
return uuid, nil
|
return uuid, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newRandomFromPool() (UUID, error) {
|
||||||
|
var uuid UUID
|
||||||
|
poolMu.Lock()
|
||||||
|
if poolPos == randPoolSize {
|
||||||
|
_, err := io.ReadFull(rander, pool[:])
|
||||||
|
if err != nil {
|
||||||
|
poolMu.Unlock()
|
||||||
|
return Nil, err
|
||||||
|
}
|
||||||
|
poolPos = 0
|
||||||
|
}
|
||||||
|
copy(uuid[:], pool[poolPos:(poolPos+16)])
|
||||||
|
poolPos += 16
|
||||||
|
poolMu.Unlock()
|
||||||
|
|
||||||
|
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||||
|
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||||
|
return uuid, nil
|
||||||
|
}
|
||||||
|
2
vendor/github.com/json-iterator/go/README.md
generated
vendored
2
vendor/github.com/json-iterator/go/README.md
generated
vendored
@ -8,8 +8,6 @@
|
|||||||
|
|
||||||
A high-performance 100% compatible drop-in replacement of "encoding/json"
|
A high-performance 100% compatible drop-in replacement of "encoding/json"
|
||||||
|
|
||||||
You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
|
|
||||||
|
|
||||||
# Benchmark
|
# Benchmark
|
||||||
|
|
||||||
![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
|
![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
|
||||||
|
21
vendor/github.com/keybase/go-keybase-chat-bot/kbchat/kbchat.go
generated
vendored
21
vendor/github.com/keybase/go-keybase-chat-bot/kbchat/kbchat.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -207,7 +208,9 @@ func (a *API) getUsername(runOpts RunOptions) (username string, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
p.ExtraFiles = []*os.File{output.(*os.File)}
|
p.ExtraFiles = []*os.File{output.(*os.File)}
|
||||||
|
}
|
||||||
if err = p.Start(); err != nil {
|
if err = p.Start(); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -282,7 +285,7 @@ func (a *API) startPipes() (err error) {
|
|||||||
defer a.Unlock()
|
defer a.Unlock()
|
||||||
if a.apiCmd != nil {
|
if a.apiCmd != nil {
|
||||||
if err := a.apiCmd.Process.Kill(); err != nil {
|
if err := a.apiCmd.Process.Kill(); err != nil {
|
||||||
return err
|
return fmt.Errorf("unable to kill previous API command %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.apiCmd = nil
|
a.apiCmd = nil
|
||||||
@ -290,30 +293,32 @@ func (a *API) startPipes() (err error) {
|
|||||||
if a.runOpts.StartService {
|
if a.runOpts.StartService {
|
||||||
args := []string{fmt.Sprintf("-enable-bot-lite-mode=%v", a.runOpts.DisableBotLiteMode), "service"}
|
args := []string{fmt.Sprintf("-enable-bot-lite-mode=%v", a.runOpts.DisableBotLiteMode), "service"}
|
||||||
if err := a.runOpts.Command(args...).Start(); err != nil {
|
if err := a.runOpts.Command(args...).Start(); err != nil {
|
||||||
return err
|
return fmt.Errorf("unable to start service %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.username, err = a.auth(); err != nil {
|
if a.username, err = a.auth(); err != nil {
|
||||||
return err
|
return fmt.Errorf("unable to auth: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := a.runOpts.Command("chat", "notification-settings", fmt.Sprintf("-disable-typing=%v", !a.runOpts.EnableTyping))
|
cmd := a.runOpts.Command("chat", "notification-settings", fmt.Sprintf("-disable-typing=%v", !a.runOpts.EnableTyping))
|
||||||
if err = cmd.Run(); err != nil {
|
if err = cmd.Run(); err != nil {
|
||||||
return err
|
return fmt.Errorf("unable to set notifiation settings %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
a.apiCmd = a.runOpts.Command("chat", "api")
|
a.apiCmd = a.runOpts.Command("chat", "api")
|
||||||
if a.apiInput, err = a.apiCmd.StdinPipe(); err != nil {
|
if a.apiInput, err = a.apiCmd.StdinPipe(); err != nil {
|
||||||
return err
|
return fmt.Errorf("unable to get api stdin: %v", err)
|
||||||
}
|
}
|
||||||
output, err := a.apiCmd.StdoutPipe()
|
output, err := a.apiCmd.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("unabel to get api stdout: %v", err)
|
||||||
}
|
}
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
a.apiCmd.ExtraFiles = []*os.File{output.(*os.File)}
|
a.apiCmd.ExtraFiles = []*os.File{output.(*os.File)}
|
||||||
|
}
|
||||||
if err := a.apiCmd.Start(); err != nil {
|
if err := a.apiCmd.Start(); err != nil {
|
||||||
return err
|
return fmt.Errorf("unable to run chat api cmd: %v", err)
|
||||||
}
|
}
|
||||||
a.apiOutput = bufio.NewReader(output)
|
a.apiOutput = bufio.NewReader(output)
|
||||||
return nil
|
return nil
|
||||||
@ -508,7 +513,9 @@ func (a *API) Listen(opts ListenOptions) (*Subscription, error) {
|
|||||||
time.Sleep(pause)
|
time.Sleep(pause)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
p.ExtraFiles = []*os.File{stderr.(*os.File), output.(*os.File)}
|
p.ExtraFiles = []*os.File{stderr.(*os.File), output.(*os.File)}
|
||||||
|
}
|
||||||
boutput := bufio.NewScanner(output)
|
boutput := bufio.NewScanner(output)
|
||||||
if err := p.Start(); err != nil {
|
if err := p.Start(); err != nil {
|
||||||
|
|
||||||
|
67
vendor/github.com/klauspost/cpuid/v2/.travis.yml
generated
vendored
67
vendor/github.com/klauspost/cpuid/v2/.travis.yml
generated
vendored
@ -1,67 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
os:
|
|
||||||
- linux
|
|
||||||
- osx
|
|
||||||
- windows
|
|
||||||
|
|
||||||
arch:
|
|
||||||
- amd64
|
|
||||||
- arm64
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.13.x
|
|
||||||
- 1.14.x
|
|
||||||
- 1.15.x
|
|
||||||
- 1.16.x
|
|
||||||
- master
|
|
||||||
|
|
||||||
env:
|
|
||||||
- CGO_ENABLED=0
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go vet ./...
|
|
||||||
- go test -test.v -test.run ^TestCPUID$
|
|
||||||
- CGO_ENABLED=1 go test -race ./...
|
|
||||||
- go test -tags=nounsafe -test.v -test.run ^TestCPUID$
|
|
||||||
- go test -tags=noasm ./...
|
|
||||||
- go run ./cmd/cpuid/main.go
|
|
||||||
- go run ./cmd/cpuid/main.go -json
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- go: 'master'
|
|
||||||
fast_finish: true
|
|
||||||
include:
|
|
||||||
- stage: other
|
|
||||||
go: 1.16.x
|
|
||||||
os: linux
|
|
||||||
arch: amd64
|
|
||||||
script:
|
|
||||||
- diff <(gofmt -d .) <(printf "")
|
|
||||||
- diff <(gofmt -d ./private) <(printf "")
|
|
||||||
- curl -sfL https://git.io/goreleaser | VERSION=v0.157.0 sh -s -- check # check goreleaser config for deprecations
|
|
||||||
- curl -sL https://git.io/goreleaser | VERSION=v0.157.0 sh -s -- --snapshot --skip-publish --rm-dist
|
|
||||||
- go get github.com/klauspost/asmfmt&&go install github.com/klauspost/asmfmt/cmd/asmfmt
|
|
||||||
- diff <(asmfmt -d .) <(printf "")
|
|
||||||
- GOOS=linux GOARCH=386 go test .
|
|
||||||
- ./test-architectures.sh
|
|
||||||
- stage: other
|
|
||||||
go: 1.15.x
|
|
||||||
os: linux
|
|
||||||
arch: amd64
|
|
||||||
script:
|
|
||||||
- ./test-architectures.sh
|
|
||||||
|
|
||||||
deploy:
|
|
||||||
- provider: script
|
|
||||||
skip_cleanup: true
|
|
||||||
script: curl -sL https://git.io/goreleaser | VERSION=v0.157.0 bash || true
|
|
||||||
on:
|
|
||||||
tags: true
|
|
||||||
condition: ($TRAVIS_OS_NAME = linux) && ($TRAVIS_CPU_ARCH = amd64)
|
|
||||||
go: 1.16.x
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
- /^v\d+\.\d+(\.\d+)?(-\S*)?$/
|
|
69
vendor/github.com/klauspost/cpuid/v2/cpuid.go
generated
vendored
69
vendor/github.com/klauspost/cpuid/v2/cpuid.go
generated
vendored
@ -83,6 +83,7 @@ const (
|
|||||||
AVX512DQ // AVX-512 Doubleword and Quadword Instructions
|
AVX512DQ // AVX-512 Doubleword and Quadword Instructions
|
||||||
AVX512ER // AVX-512 Exponential and Reciprocal Instructions
|
AVX512ER // AVX-512 Exponential and Reciprocal Instructions
|
||||||
AVX512F // AVX-512 Foundation
|
AVX512F // AVX-512 Foundation
|
||||||
|
AVX512FP16 // AVX-512 FP16 Instructions
|
||||||
AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions
|
AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions
|
||||||
AVX512PF // AVX-512 Prefetch Instructions
|
AVX512PF // AVX-512 Prefetch Instructions
|
||||||
AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions
|
AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions
|
||||||
@ -96,7 +97,9 @@ const (
|
|||||||
BMI2 // Bit Manipulation Instruction Set 2
|
BMI2 // Bit Manipulation Instruction Set 2
|
||||||
CLDEMOTE // Cache Line Demote
|
CLDEMOTE // Cache Line Demote
|
||||||
CLMUL // Carry-less Multiplication
|
CLMUL // Carry-less Multiplication
|
||||||
|
CLZERO // CLZERO instruction supported
|
||||||
CMOV // i686 CMOV
|
CMOV // i686 CMOV
|
||||||
|
CPBOOST // Core Performance Boost
|
||||||
CX16 // CMPXCHG16B Instruction
|
CX16 // CMPXCHG16B Instruction
|
||||||
ENQCMD // Enqueue Command
|
ENQCMD // Enqueue Command
|
||||||
ERMS // Enhanced REP MOVSB/STOSB
|
ERMS // Enhanced REP MOVSB/STOSB
|
||||||
@ -106,6 +109,7 @@ const (
|
|||||||
GFNI // Galois Field New Instructions
|
GFNI // Galois Field New Instructions
|
||||||
HLE // Hardware Lock Elision
|
HLE // Hardware Lock Elision
|
||||||
HTT // Hyperthreading (enabled)
|
HTT // Hyperthreading (enabled)
|
||||||
|
HWA // Hardware assert supported. Indicates support for MSRC001_10
|
||||||
HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors
|
HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors
|
||||||
IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB)
|
IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB)
|
||||||
IBS // Instruction Based Sampling (AMD)
|
IBS // Instruction Based Sampling (AMD)
|
||||||
@ -117,18 +121,25 @@ const (
|
|||||||
IBSOPSAM // Instruction Based Sampling Feature (AMD)
|
IBSOPSAM // Instruction Based Sampling Feature (AMD)
|
||||||
IBSRDWROPCNT // Instruction Based Sampling Feature (AMD)
|
IBSRDWROPCNT // Instruction Based Sampling Feature (AMD)
|
||||||
IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD)
|
IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD)
|
||||||
|
INT_WBINVD // WBINVD/WBNOINVD are interruptible.
|
||||||
|
INVLPGB // NVLPGB and TLBSYNC instruction supported
|
||||||
LZCNT // LZCNT instruction
|
LZCNT // LZCNT instruction
|
||||||
|
MCAOVERFLOW // MCA overflow recovery support.
|
||||||
|
MCOMMIT // MCOMMIT instruction supported
|
||||||
MMX // standard MMX
|
MMX // standard MMX
|
||||||
MMXEXT // SSE integer functions or AMD MMX ext
|
MMXEXT // SSE integer functions or AMD MMX ext
|
||||||
MOVDIR64B // Move 64 Bytes as Direct Store
|
MOVDIR64B // Move 64 Bytes as Direct Store
|
||||||
MOVDIRI // Move Doubleword as Direct Store
|
MOVDIRI // Move Doubleword as Direct Store
|
||||||
MPX // Intel MPX (Memory Protection Extensions)
|
MPX // Intel MPX (Memory Protection Extensions)
|
||||||
|
MSRIRC // Instruction Retired Counter MSR available
|
||||||
NX // NX (No-Execute) bit
|
NX // NX (No-Execute) bit
|
||||||
POPCNT // POPCNT instruction
|
POPCNT // POPCNT instruction
|
||||||
|
RDPRU // RDPRU instruction supported
|
||||||
RDRAND // RDRAND instruction is available
|
RDRAND // RDRAND instruction is available
|
||||||
RDSEED // RDSEED instruction is available
|
RDSEED // RDSEED instruction is available
|
||||||
RDTSCP // RDTSCP Instruction
|
RDTSCP // RDTSCP Instruction
|
||||||
RTM // Restricted Transactional Memory
|
RTM // Restricted Transactional Memory
|
||||||
|
RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort.
|
||||||
SERIALIZE // Serialize Instruction Execution
|
SERIALIZE // Serialize Instruction Execution
|
||||||
SGX // Software Guard Extensions
|
SGX // Software Guard Extensions
|
||||||
SGXLC // Software Guard Extensions Launch Control
|
SGXLC // Software Guard Extensions Launch Control
|
||||||
@ -141,6 +152,7 @@ const (
|
|||||||
SSE4A // AMD Barcelona microarchitecture SSE4a instructions
|
SSE4A // AMD Barcelona microarchitecture SSE4a instructions
|
||||||
SSSE3 // Conroe SSSE3 functions
|
SSSE3 // Conroe SSSE3 functions
|
||||||
STIBP // Single Thread Indirect Branch Predictors
|
STIBP // Single Thread Indirect Branch Predictors
|
||||||
|
SUCCOR // Software uncorrectable error containment and recovery capability.
|
||||||
TBM // AMD Trailing Bit Manipulation
|
TBM // AMD Trailing Bit Manipulation
|
||||||
TSXLDTRK // Intel TSX Suspend Load Address Tracking
|
TSXLDTRK // Intel TSX Suspend Load Address Tracking
|
||||||
VAES // Vector AES
|
VAES // Vector AES
|
||||||
@ -194,7 +206,8 @@ type CPUInfo struct {
|
|||||||
Family int // CPU family number
|
Family int // CPU family number
|
||||||
Model int // CPU model number
|
Model int // CPU model number
|
||||||
CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
|
CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
|
||||||
Hz int64 // Clock speed, if known, 0 otherwise
|
Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed.
|
||||||
|
BoostFreq int64 // Max clock speed, if known, 0 otherwise
|
||||||
Cache struct {
|
Cache struct {
|
||||||
L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected
|
L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected
|
||||||
L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected
|
L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected
|
||||||
@ -363,25 +376,42 @@ func (c CPUInfo) LogicalCPU() int {
|
|||||||
return int(ebx >> 24)
|
return int(ebx >> 24)
|
||||||
}
|
}
|
||||||
|
|
||||||
// hertz tries to compute the clock speed of the CPU. If leaf 15 is
|
// frequencies tries to compute the clock speed of the CPU. If leaf 15 is
|
||||||
// supported, use it, otherwise parse the brand string. Yes, really.
|
// supported, use it, otherwise parse the brand string. Yes, really.
|
||||||
func hertz(model string) int64 {
|
func (c *CPUInfo) frequencies() {
|
||||||
|
c.Hz, c.BoostFreq = 0, 0
|
||||||
mfi := maxFunctionID()
|
mfi := maxFunctionID()
|
||||||
if mfi >= 0x15 {
|
if mfi >= 0x15 {
|
||||||
eax, ebx, ecx, _ := cpuid(0x15)
|
eax, ebx, ecx, _ := cpuid(0x15)
|
||||||
if eax != 0 && ebx != 0 && ecx != 0 {
|
if eax != 0 && ebx != 0 && ecx != 0 {
|
||||||
return int64((int64(ecx) * int64(ebx)) / int64(eax))
|
c.Hz = (int64(ecx) * int64(ebx)) / int64(eax)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if mfi >= 0x16 {
|
||||||
|
a, b, _, _ := cpuid(0x16)
|
||||||
|
// Base...
|
||||||
|
if a&0xffff > 0 {
|
||||||
|
c.Hz = int64(a&0xffff) * 1_000_000
|
||||||
|
}
|
||||||
|
// Boost...
|
||||||
|
if b&0xffff > 0 {
|
||||||
|
c.BoostFreq = int64(b&0xffff) * 1_000_000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.Hz > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// computeHz determines the official rated speed of a CPU from its brand
|
// computeHz determines the official rated speed of a CPU from its brand
|
||||||
// string. This insanity is *actually the official documented way to do
|
// string. This insanity is *actually the official documented way to do
|
||||||
// this according to Intel*, prior to leaf 0x15 existing. The official
|
// this according to Intel*, prior to leaf 0x15 existing. The official
|
||||||
// documentation only shows this working for exactly `x.xx` or `xxxx`
|
// documentation only shows this working for exactly `x.xx` or `xxxx`
|
||||||
// cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other
|
// cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other
|
||||||
// sizes.
|
// sizes.
|
||||||
|
model := c.BrandName
|
||||||
hz := strings.LastIndex(model, "Hz")
|
hz := strings.LastIndex(model, "Hz")
|
||||||
if hz < 3 {
|
if hz < 3 {
|
||||||
return 0
|
return
|
||||||
}
|
}
|
||||||
var multiplier int64
|
var multiplier int64
|
||||||
switch model[hz-1] {
|
switch model[hz-1] {
|
||||||
@ -393,7 +423,7 @@ func hertz(model string) int64 {
|
|||||||
multiplier = 1000 * 1000 * 1000 * 1000
|
multiplier = 1000 * 1000 * 1000 * 1000
|
||||||
}
|
}
|
||||||
if multiplier == 0 {
|
if multiplier == 0 {
|
||||||
return 0
|
return
|
||||||
}
|
}
|
||||||
freq := int64(0)
|
freq := int64(0)
|
||||||
divisor := int64(0)
|
divisor := int64(0)
|
||||||
@ -405,21 +435,22 @@ func hertz(model string) int64 {
|
|||||||
decimalShift *= 10
|
decimalShift *= 10
|
||||||
} else if model[i] == '.' {
|
} else if model[i] == '.' {
|
||||||
if divisor != 0 {
|
if divisor != 0 {
|
||||||
return 0
|
return
|
||||||
}
|
}
|
||||||
divisor = decimalShift
|
divisor = decimalShift
|
||||||
} else {
|
} else {
|
||||||
return 0
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// we didn't find a space
|
// we didn't find a space
|
||||||
if i < 0 {
|
if i < 0 {
|
||||||
return 0
|
return
|
||||||
}
|
}
|
||||||
if divisor != 0 {
|
if divisor != 0 {
|
||||||
return (freq * multiplier) / divisor
|
c.Hz = (freq * multiplier) / divisor
|
||||||
|
return
|
||||||
}
|
}
|
||||||
return freq * multiplier
|
c.Hz = freq * multiplier
|
||||||
}
|
}
|
||||||
|
|
||||||
// VM Will return true if the cpu id indicates we are in
|
// VM Will return true if the cpu id indicates we are in
|
||||||
@ -911,6 +942,7 @@ func support() flagSet {
|
|||||||
fs.setIf(ecx&(1<<29) != 0, ENQCMD)
|
fs.setIf(ecx&(1<<29) != 0, ENQCMD)
|
||||||
fs.setIf(ecx&(1<<30) != 0, SGXLC)
|
fs.setIf(ecx&(1<<30) != 0, SGXLC)
|
||||||
// CPUID.(EAX=7, ECX=0).EDX
|
// CPUID.(EAX=7, ECX=0).EDX
|
||||||
|
fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT)
|
||||||
fs.setIf(edx&(1<<14) != 0, SERIALIZE)
|
fs.setIf(edx&(1<<14) != 0, SERIALIZE)
|
||||||
fs.setIf(edx&(1<<16) != 0, TSXLDTRK)
|
fs.setIf(edx&(1<<16) != 0, TSXLDTRK)
|
||||||
fs.setIf(edx&(1<<26) != 0, IBPB)
|
fs.setIf(edx&(1<<26) != 0, IBPB)
|
||||||
@ -949,6 +981,7 @@ func support() flagSet {
|
|||||||
// edx
|
// edx
|
||||||
fs.setIf(edx&(1<<8) != 0, AVX512VP2INTERSECT)
|
fs.setIf(edx&(1<<8) != 0, AVX512VP2INTERSECT)
|
||||||
fs.setIf(edx&(1<<22) != 0, AMXBF16)
|
fs.setIf(edx&(1<<22) != 0, AMXBF16)
|
||||||
|
fs.setIf(edx&(1<<23) != 0, AVX512FP16)
|
||||||
fs.setIf(edx&(1<<24) != 0, AMXTILE)
|
fs.setIf(edx&(1<<24) != 0, AMXTILE)
|
||||||
fs.setIf(edx&(1<<25) != 0, AMXINT8)
|
fs.setIf(edx&(1<<25) != 0, AMXINT8)
|
||||||
// eax1 = CPUID.(EAX=7, ECX=1).EAX
|
// eax1 = CPUID.(EAX=7, ECX=1).EAX
|
||||||
@ -980,9 +1013,23 @@ func support() flagSet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
if maxExtendedFunction() >= 0x80000007 {
|
||||||
|
_, b, _, d := cpuid(0x80000007)
|
||||||
|
fs.setIf((b&(1<<0)) != 0, MCAOVERFLOW)
|
||||||
|
fs.setIf((b&(1<<1)) != 0, SUCCOR)
|
||||||
|
fs.setIf((b&(1<<2)) != 0, HWA)
|
||||||
|
fs.setIf((d&(1<<9)) != 0, CPBOOST)
|
||||||
|
}
|
||||||
|
|
||||||
if maxExtendedFunction() >= 0x80000008 {
|
if maxExtendedFunction() >= 0x80000008 {
|
||||||
_, b, _, _ := cpuid(0x80000008)
|
_, b, _, _ := cpuid(0x80000008)
|
||||||
fs.setIf((b&(1<<9)) != 0, WBNOINVD)
|
fs.setIf((b&(1<<9)) != 0, WBNOINVD)
|
||||||
|
fs.setIf((b&(1<<8)) != 0, MCOMMIT)
|
||||||
|
fs.setIf((b&(1<<13)) != 0, INT_WBINVD)
|
||||||
|
fs.setIf((b&(1<<4)) != 0, RDPRU)
|
||||||
|
fs.setIf((b&(1<<3)) != 0, INVLPGB)
|
||||||
|
fs.setIf((b&(1<<1)) != 0, MSRIRC)
|
||||||
|
fs.setIf((b&(1<<0)) != 0, CLZERO)
|
||||||
}
|
}
|
||||||
|
|
||||||
if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) {
|
if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) {
|
||||||
|
2
vendor/github.com/klauspost/cpuid/v2/detect_x86.go
generated
vendored
2
vendor/github.com/klauspost/cpuid/v2/detect_x86.go
generated
vendored
@ -30,6 +30,6 @@ func addInfo(c *CPUInfo, safe bool) {
|
|||||||
c.LogicalCores = logicalCores()
|
c.LogicalCores = logicalCores()
|
||||||
c.PhysicalCores = physicalCores()
|
c.PhysicalCores = physicalCores()
|
||||||
c.VendorID, c.VendorString = vendorID()
|
c.VendorID, c.VendorString = vendorID()
|
||||||
c.Hz = hertz(c.BrandName)
|
|
||||||
c.cacheSize()
|
c.cacheSize()
|
||||||
|
c.frequencies()
|
||||||
}
|
}
|
||||||
|
198
vendor/github.com/klauspost/cpuid/v2/featureid_string.go
generated
vendored
198
vendor/github.com/klauspost/cpuid/v2/featureid_string.go
generated
vendored
@ -24,103 +24,115 @@ func _() {
|
|||||||
_ = x[AVX512DQ-14]
|
_ = x[AVX512DQ-14]
|
||||||
_ = x[AVX512ER-15]
|
_ = x[AVX512ER-15]
|
||||||
_ = x[AVX512F-16]
|
_ = x[AVX512F-16]
|
||||||
_ = x[AVX512IFMA-17]
|
_ = x[AVX512FP16-17]
|
||||||
_ = x[AVX512PF-18]
|
_ = x[AVX512IFMA-18]
|
||||||
_ = x[AVX512VBMI-19]
|
_ = x[AVX512PF-19]
|
||||||
_ = x[AVX512VBMI2-20]
|
_ = x[AVX512VBMI-20]
|
||||||
_ = x[AVX512VL-21]
|
_ = x[AVX512VBMI2-21]
|
||||||
_ = x[AVX512VNNI-22]
|
_ = x[AVX512VL-22]
|
||||||
_ = x[AVX512VP2INTERSECT-23]
|
_ = x[AVX512VNNI-23]
|
||||||
_ = x[AVX512VPOPCNTDQ-24]
|
_ = x[AVX512VP2INTERSECT-24]
|
||||||
_ = x[AVXSLOW-25]
|
_ = x[AVX512VPOPCNTDQ-25]
|
||||||
_ = x[BMI1-26]
|
_ = x[AVXSLOW-26]
|
||||||
_ = x[BMI2-27]
|
_ = x[BMI1-27]
|
||||||
_ = x[CLDEMOTE-28]
|
_ = x[BMI2-28]
|
||||||
_ = x[CLMUL-29]
|
_ = x[CLDEMOTE-29]
|
||||||
_ = x[CMOV-30]
|
_ = x[CLMUL-30]
|
||||||
_ = x[CX16-31]
|
_ = x[CLZERO-31]
|
||||||
_ = x[ENQCMD-32]
|
_ = x[CMOV-32]
|
||||||
_ = x[ERMS-33]
|
_ = x[CPBOOST-33]
|
||||||
_ = x[F16C-34]
|
_ = x[CX16-34]
|
||||||
_ = x[FMA3-35]
|
_ = x[ENQCMD-35]
|
||||||
_ = x[FMA4-36]
|
_ = x[ERMS-36]
|
||||||
_ = x[GFNI-37]
|
_ = x[F16C-37]
|
||||||
_ = x[HLE-38]
|
_ = x[FMA3-38]
|
||||||
_ = x[HTT-39]
|
_ = x[FMA4-39]
|
||||||
_ = x[HYPERVISOR-40]
|
_ = x[GFNI-40]
|
||||||
_ = x[IBPB-41]
|
_ = x[HLE-41]
|
||||||
_ = x[IBS-42]
|
_ = x[HTT-42]
|
||||||
_ = x[IBSBRNTRGT-43]
|
_ = x[HWA-43]
|
||||||
_ = x[IBSFETCHSAM-44]
|
_ = x[HYPERVISOR-44]
|
||||||
_ = x[IBSFFV-45]
|
_ = x[IBPB-45]
|
||||||
_ = x[IBSOPCNT-46]
|
_ = x[IBS-46]
|
||||||
_ = x[IBSOPCNTEXT-47]
|
_ = x[IBSBRNTRGT-47]
|
||||||
_ = x[IBSOPSAM-48]
|
_ = x[IBSFETCHSAM-48]
|
||||||
_ = x[IBSRDWROPCNT-49]
|
_ = x[IBSFFV-49]
|
||||||
_ = x[IBSRIPINVALIDCHK-50]
|
_ = x[IBSOPCNT-50]
|
||||||
_ = x[LZCNT-51]
|
_ = x[IBSOPCNTEXT-51]
|
||||||
_ = x[MMX-52]
|
_ = x[IBSOPSAM-52]
|
||||||
_ = x[MMXEXT-53]
|
_ = x[IBSRDWROPCNT-53]
|
||||||
_ = x[MOVDIR64B-54]
|
_ = x[IBSRIPINVALIDCHK-54]
|
||||||
_ = x[MOVDIRI-55]
|
_ = x[INT_WBINVD-55]
|
||||||
_ = x[MPX-56]
|
_ = x[INVLPGB-56]
|
||||||
_ = x[NX-57]
|
_ = x[LZCNT-57]
|
||||||
_ = x[POPCNT-58]
|
_ = x[MCAOVERFLOW-58]
|
||||||
_ = x[RDRAND-59]
|
_ = x[MCOMMIT-59]
|
||||||
_ = x[RDSEED-60]
|
_ = x[MMX-60]
|
||||||
_ = x[RDTSCP-61]
|
_ = x[MMXEXT-61]
|
||||||
_ = x[RTM-62]
|
_ = x[MOVDIR64B-62]
|
||||||
_ = x[SERIALIZE-63]
|
_ = x[MOVDIRI-63]
|
||||||
_ = x[SGX-64]
|
_ = x[MPX-64]
|
||||||
_ = x[SGXLC-65]
|
_ = x[MSRIRC-65]
|
||||||
_ = x[SHA-66]
|
_ = x[NX-66]
|
||||||
_ = x[SSE-67]
|
_ = x[POPCNT-67]
|
||||||
_ = x[SSE2-68]
|
_ = x[RDPRU-68]
|
||||||
_ = x[SSE3-69]
|
_ = x[RDRAND-69]
|
||||||
_ = x[SSE4-70]
|
_ = x[RDSEED-70]
|
||||||
_ = x[SSE42-71]
|
_ = x[RDTSCP-71]
|
||||||
_ = x[SSE4A-72]
|
_ = x[RTM-72]
|
||||||
_ = x[SSSE3-73]
|
_ = x[RTM_ALWAYS_ABORT-73]
|
||||||
_ = x[STIBP-74]
|
_ = x[SERIALIZE-74]
|
||||||
_ = x[TBM-75]
|
_ = x[SGX-75]
|
||||||
_ = x[TSXLDTRK-76]
|
_ = x[SGXLC-76]
|
||||||
_ = x[VAES-77]
|
_ = x[SHA-77]
|
||||||
_ = x[VMX-78]
|
_ = x[SSE-78]
|
||||||
_ = x[VPCLMULQDQ-79]
|
_ = x[SSE2-79]
|
||||||
_ = x[WAITPKG-80]
|
_ = x[SSE3-80]
|
||||||
_ = x[WBNOINVD-81]
|
_ = x[SSE4-81]
|
||||||
_ = x[XOP-82]
|
_ = x[SSE42-82]
|
||||||
_ = x[AESARM-83]
|
_ = x[SSE4A-83]
|
||||||
_ = x[ARMCPUID-84]
|
_ = x[SSSE3-84]
|
||||||
_ = x[ASIMD-85]
|
_ = x[STIBP-85]
|
||||||
_ = x[ASIMDDP-86]
|
_ = x[SUCCOR-86]
|
||||||
_ = x[ASIMDHP-87]
|
_ = x[TBM-87]
|
||||||
_ = x[ASIMDRDM-88]
|
_ = x[TSXLDTRK-88]
|
||||||
_ = x[ATOMICS-89]
|
_ = x[VAES-89]
|
||||||
_ = x[CRC32-90]
|
_ = x[VMX-90]
|
||||||
_ = x[DCPOP-91]
|
_ = x[VPCLMULQDQ-91]
|
||||||
_ = x[EVTSTRM-92]
|
_ = x[WAITPKG-92]
|
||||||
_ = x[FCMA-93]
|
_ = x[WBNOINVD-93]
|
||||||
_ = x[FP-94]
|
_ = x[XOP-94]
|
||||||
_ = x[FPHP-95]
|
_ = x[AESARM-95]
|
||||||
_ = x[GPA-96]
|
_ = x[ARMCPUID-96]
|
||||||
_ = x[JSCVT-97]
|
_ = x[ASIMD-97]
|
||||||
_ = x[LRCPC-98]
|
_ = x[ASIMDDP-98]
|
||||||
_ = x[PMULL-99]
|
_ = x[ASIMDHP-99]
|
||||||
_ = x[SHA1-100]
|
_ = x[ASIMDRDM-100]
|
||||||
_ = x[SHA2-101]
|
_ = x[ATOMICS-101]
|
||||||
_ = x[SHA3-102]
|
_ = x[CRC32-102]
|
||||||
_ = x[SHA512-103]
|
_ = x[DCPOP-103]
|
||||||
_ = x[SM3-104]
|
_ = x[EVTSTRM-104]
|
||||||
_ = x[SM4-105]
|
_ = x[FCMA-105]
|
||||||
_ = x[SVE-106]
|
_ = x[FP-106]
|
||||||
_ = x[lastID-107]
|
_ = x[FPHP-107]
|
||||||
|
_ = x[GPA-108]
|
||||||
|
_ = x[JSCVT-109]
|
||||||
|
_ = x[LRCPC-110]
|
||||||
|
_ = x[PMULL-111]
|
||||||
|
_ = x[SHA1-112]
|
||||||
|
_ = x[SHA2-113]
|
||||||
|
_ = x[SHA3-114]
|
||||||
|
_ = x[SHA512-115]
|
||||||
|
_ = x[SM3-116]
|
||||||
|
_ = x[SM4-117]
|
||||||
|
_ = x[SVE-118]
|
||||||
|
_ = x[lastID-119]
|
||||||
_ = x[firstID-0]
|
_ = x[firstID-0]
|
||||||
}
|
}
|
||||||
|
|
||||||
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWBMI1BMI2CLDEMOTECLMULCMOVCX16ENQCMDERMSF16CFMA3FMA4GFNIHLEHTTHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKLZCNTMMXMMXEXTMOVDIR64BMOVDIRIMPXNXPOPCNTRDRANDRDSEEDRDTSCPRTMSERIALIZESGXSGXLCSHASSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPTBMTSXLDTRKVAESVMXVPCLMULQDQWAITPKGWBNOINVDXOPAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
|
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWBMI1BMI2CLDEMOTECLMULCLZEROCMOVCPBOOSTCX16ENQCMDERMSF16CFMA3FMA4GFNIHLEHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKINT_WBINVDINVLPGBLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVDIR64BMOVDIRIMPXMSRIRCNXPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSERIALIZESGXSGXLCSHASSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSUCCORTBMTSXLDTRKVAESVMXVPCLMULQDQWAITPKGWBNOINVDXOPAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
|
||||||
|
|
||||||
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 141, 151, 162, 170, 180, 198, 213, 220, 224, 228, 236, 241, 245, 249, 255, 259, 263, 267, 271, 275, 278, 281, 291, 295, 298, 308, 319, 325, 333, 344, 352, 364, 380, 385, 388, 394, 403, 410, 413, 415, 421, 427, 433, 439, 442, 451, 454, 459, 462, 465, 469, 473, 477, 482, 487, 492, 497, 500, 508, 512, 515, 525, 532, 540, 543, 549, 557, 562, 569, 576, 584, 591, 596, 601, 608, 612, 614, 618, 621, 626, 631, 636, 640, 644, 648, 654, 657, 660, 663, 669}
|
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 234, 238, 246, 251, 257, 261, 268, 272, 278, 282, 286, 290, 294, 298, 301, 304, 307, 317, 321, 324, 334, 345, 351, 359, 370, 378, 390, 406, 416, 423, 428, 439, 446, 449, 455, 464, 471, 474, 480, 482, 488, 493, 499, 505, 511, 514, 530, 539, 542, 547, 550, 553, 557, 561, 565, 570, 575, 580, 585, 591, 594, 602, 606, 609, 619, 626, 634, 637, 643, 651, 656, 663, 670, 678, 685, 690, 695, 702, 706, 708, 712, 715, 720, 725, 730, 734, 738, 742, 748, 751, 754, 757, 763}
|
||||||
|
|
||||||
func (i FeatureID) String() string {
|
func (i FeatureID) String() string {
|
||||||
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
|
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
|
||||||
|
5
vendor/github.com/lrstanley/girc/builtin.go
generated
vendored
5
vendor/github.com/lrstanley/girc/builtin.go
generated
vendored
@ -108,7 +108,10 @@ func nickCollisionHandler(c *Client, e Event) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Cmd.Nick(c.Config.HandleNickCollide(c.GetNick()))
|
newNick := c.Config.HandleNickCollide(c.GetNick())
|
||||||
|
if newNick != "" {
|
||||||
|
c.Cmd.Nick(newNick)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// handlePING helps respond to ping requests from the server.
|
// handlePING helps respond to ping requests from the server.
|
||||||
|
3
vendor/github.com/lrstanley/girc/client.go
generated
vendored
3
vendor/github.com/lrstanley/girc/client.go
generated
vendored
@ -168,6 +168,9 @@ type Config struct {
|
|||||||
// an invalid nickname. For example, if "test" is already in use, or is
|
// an invalid nickname. For example, if "test" is already in use, or is
|
||||||
// blocked by the network/a service, the client will try and use "test_",
|
// blocked by the network/a service, the client will try and use "test_",
|
||||||
// then it will attempt "test__", "test___", and so on.
|
// then it will attempt "test__", "test___", and so on.
|
||||||
|
//
|
||||||
|
// If HandleNickCollide returns an empty string, the client will not
|
||||||
|
// attempt to fix nickname collisions, and you must handle this yourself.
|
||||||
HandleNickCollide func(oldNick string) (newNick string)
|
HandleNickCollide func(oldNick string) (newNick string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
12
vendor/github.com/matterbridge/matterclient/matterclient.go
generated
vendored
12
vendor/github.com/matterbridge/matterclient/matterclient.go
generated
vendored
@ -71,6 +71,7 @@ type Client struct {
|
|||||||
WsConnected bool
|
WsConnected bool
|
||||||
OnWsConnect func()
|
OnWsConnect func()
|
||||||
reconnectBusy bool
|
reconnectBusy bool
|
||||||
|
Timeout int
|
||||||
|
|
||||||
logger *logrus.Entry
|
logger *logrus.Entry
|
||||||
rootLogger *logrus.Logger
|
rootLogger *logrus.Logger
|
||||||
@ -80,6 +81,8 @@ type Client struct {
|
|||||||
lastPong time.Time
|
lastPong time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var Matterircd bool
|
||||||
|
|
||||||
func New(login string, pass string, team string, server string, mfatoken string) *Client {
|
func New(login string, pass string, team string, server string, mfatoken string) *Client {
|
||||||
rootLogger := logrus.New()
|
rootLogger := logrus.New()
|
||||||
rootLogger.SetFormatter(&prefixed.TextFormatter{
|
rootLogger.SetFormatter(&prefixed.TextFormatter{
|
||||||
@ -229,7 +232,12 @@ func (m *Client) initClient(b *backoff.Backoff) error {
|
|||||||
},
|
},
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: http.ProxyFromEnvironment,
|
||||||
}
|
}
|
||||||
m.Client.HTTPClient.Timeout = time.Second * 10
|
|
||||||
|
if m.Timeout == 0 {
|
||||||
|
m.Timeout = 10
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Client.HTTPClient.Timeout = time.Second * time.Duration(m.Timeout)
|
||||||
|
|
||||||
// handle MMAUTHTOKEN and personal token
|
// handle MMAUTHTOKEN and personal token
|
||||||
if err := m.handleLoginToken(); err != nil {
|
if err := m.handleLoginToken(); err != nil {
|
||||||
@ -613,7 +621,9 @@ func (m *Client) WsReceiver(ctx context.Context) {
|
|||||||
Team: m.Credentials.Team,
|
Team: m.Credentials.Team,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !Matterircd {
|
||||||
m.parseMessage(msg)
|
m.parseMessage(msg)
|
||||||
|
}
|
||||||
|
|
||||||
m.MessageChan <- msg
|
m.MessageChan <- msg
|
||||||
case response := <-m.WsClient.ResponseChannel:
|
case response := <-m.WsClient.ResponseChannel:
|
||||||
|
28
vendor/github.com/mattermost/logr/v2/buffer.go
generated
vendored
Normal file
28
vendor/github.com/mattermost/logr/v2/buffer.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package logr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Buffer provides a thread-safe buffer useful for logging to memory in unit tests.
|
||||||
|
type Buffer struct {
|
||||||
|
buf bytes.Buffer
|
||||||
|
mux sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) Read(p []byte) (n int, err error) {
|
||||||
|
b.mux.Lock()
|
||||||
|
defer b.mux.Unlock()
|
||||||
|
return b.buf.Read(p)
|
||||||
|
}
|
||||||
|
func (b *Buffer) Write(p []byte) (n int, err error) {
|
||||||
|
b.mux.Lock()
|
||||||
|
defer b.mux.Unlock()
|
||||||
|
return b.buf.Write(p)
|
||||||
|
}
|
||||||
|
func (b *Buffer) String() string {
|
||||||
|
b.mux.Lock()
|
||||||
|
defer b.mux.Unlock()
|
||||||
|
return b.buf.String()
|
||||||
|
}
|
8
vendor/github.com/mattermost/logr/v2/config/config.go
generated
vendored
8
vendor/github.com/mattermost/logr/v2/config/config.go
generated
vendored
@ -31,8 +31,8 @@ type TargetFactory func(targetType string, options json.RawMessage) (logr.Target
|
|||||||
type FormatterFactory func(format string, options json.RawMessage) (logr.Formatter, error)
|
type FormatterFactory func(format string, options json.RawMessage) (logr.Formatter, error)
|
||||||
|
|
||||||
type Factories struct {
|
type Factories struct {
|
||||||
targetFactory TargetFactory // can be nil
|
TargetFactory TargetFactory // can be nil
|
||||||
formatterFactory FormatterFactory // can be nil
|
FormatterFactory FormatterFactory // can be nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var removeAll = func(ti logr.TargetInfo) bool { return true }
|
var removeAll = func(ti logr.TargetInfo) bool { return true }
|
||||||
@ -56,7 +56,7 @@ func ConfigureTargets(lgr *logr.Logr, config map[string]TargetCfg, factories *Fa
|
|||||||
}
|
}
|
||||||
|
|
||||||
for name, tcfg := range config {
|
for name, tcfg := range config {
|
||||||
target, err := newTarget(tcfg.Type, tcfg.Options, factories.targetFactory)
|
target, err := newTarget(tcfg.Type, tcfg.Options, factories.TargetFactory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating log target %s: %w", name, err)
|
return fmt.Errorf("error creating log target %s: %w", name, err)
|
||||||
}
|
}
|
||||||
@ -65,7 +65,7 @@ func ConfigureTargets(lgr *logr.Logr, config map[string]TargetCfg, factories *Fa
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
formatter, err := newFormatter(tcfg.Format, tcfg.FormatOptions, factories.formatterFactory)
|
formatter, err := newFormatter(tcfg.Format, tcfg.FormatOptions, factories.FormatterFactory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating formatter for log target %s: %w", name, err)
|
return fmt.Errorf("error creating formatter for log target %s: %w", name, err)
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/mattermost/logr/v2/field.go
generated
vendored
2
vendor/github.com/mattermost/logr/v2/field.go
generated
vendored
@ -15,7 +15,7 @@ var (
|
|||||||
Space = []byte{' '}
|
Space = []byte{' '}
|
||||||
Newline = []byte{'\n'}
|
Newline = []byte{'\n'}
|
||||||
Quote = []byte{'"'}
|
Quote = []byte{'"'}
|
||||||
Colon = []byte{'"'}
|
Colon = []byte{':'}
|
||||||
)
|
)
|
||||||
|
|
||||||
// LogCloner is implemented by `Any` types that require a clone to be provided
|
// LogCloner is implemented by `Any` types that require a clone to be provided
|
||||||
|
6
vendor/github.com/mattermost/logr/v2/filterstd.go
generated
vendored
6
vendor/github.com/mattermost/logr/v2/filterstd.go
generated
vendored
@ -11,6 +11,7 @@ type StdFilter struct {
|
|||||||
// is enabled for this filter.
|
// is enabled for this filter.
|
||||||
func (lt StdFilter) GetEnabledLevel(level Level) (Level, bool) {
|
func (lt StdFilter) GetEnabledLevel(level Level) (Level, bool) {
|
||||||
enabled := level.ID <= lt.Lvl.ID
|
enabled := level.ID <= lt.Lvl.ID
|
||||||
|
stackTrace := level.ID <= lt.Stacktrace.ID
|
||||||
var levelEnabled Level
|
var levelEnabled Level
|
||||||
|
|
||||||
if enabled {
|
if enabled {
|
||||||
@ -33,6 +34,11 @@ func (lt StdFilter) GetEnabledLevel(level Level) (Level, bool) {
|
|||||||
levelEnabled = level
|
levelEnabled = level
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if stackTrace {
|
||||||
|
levelEnabled.Stacktrace = true
|
||||||
|
}
|
||||||
|
|
||||||
return levelEnabled, enabled
|
return levelEnabled, enabled
|
||||||
}
|
}
|
||||||
|
|
||||||
|
78
vendor/github.com/mattermost/logr/v2/sugar.go
generated
vendored
78
vendor/github.com/mattermost/logr/v2/sugar.go
generated
vendored
@ -117,3 +117,81 @@ func (s Sugar) Fatalf(format string, args ...interface{}) {
|
|||||||
func (s Sugar) Panicf(format string, args ...interface{}) {
|
func (s Sugar) Panicf(format string, args ...interface{}) {
|
||||||
s.Logf(Panic, format, args...)
|
s.Logf(Panic, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// K/V style
|
||||||
|
//
|
||||||
|
|
||||||
|
// With returns a new Sugar logger with the specified key/value pairs added to the
|
||||||
|
// fields list.
|
||||||
|
func (s Sugar) With(keyValuePairs ...interface{}) Sugar {
|
||||||
|
return s.logger.With(s.argsToFields(keyValuePairs)...).Sugar()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tracew outputs at trace level with the specified key/value pairs converted to fields.
|
||||||
|
func (s Sugar) Tracew(msg string, keyValuePairs ...interface{}) {
|
||||||
|
s.logger.Log(Trace, msg, s.argsToFields(keyValuePairs)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugw outputs at debug level with the specified key/value pairs converted to fields.
|
||||||
|
func (s Sugar) Debugw(msg string, keyValuePairs ...interface{}) {
|
||||||
|
s.logger.Log(Debug, msg, s.argsToFields(keyValuePairs)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infow outputs at info level with the specified key/value pairs converted to fields.
|
||||||
|
func (s Sugar) Infow(msg string, keyValuePairs ...interface{}) {
|
||||||
|
s.logger.Log(Info, msg, s.argsToFields(keyValuePairs)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnw outputs at warn level with the specified key/value pairs converted to fields.
|
||||||
|
func (s Sugar) Warnw(msg string, keyValuePairs ...interface{}) {
|
||||||
|
s.logger.Log(Warn, msg, s.argsToFields(keyValuePairs)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorw outputs at error level with the specified key/value pairs converted to fields.
|
||||||
|
func (s Sugar) Errorw(msg string, keyValuePairs ...interface{}) {
|
||||||
|
s.logger.Log(Error, msg, s.argsToFields(keyValuePairs)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalw outputs at fatal level with the specified key/value pairs converted to fields.
|
||||||
|
func (s Sugar) Fatalw(msg string, keyValuePairs ...interface{}) {
|
||||||
|
s.logger.Log(Fatal, msg, s.argsToFields(keyValuePairs)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panicw outputs at panic level with the specified key/value pairs converted to fields.
|
||||||
|
func (s Sugar) Panicw(msg string, keyValuePairs ...interface{}) {
|
||||||
|
s.logger.Log(Panic, msg, s.argsToFields(keyValuePairs)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// argsToFields converts an array of args, possibly containing name/value pairs
|
||||||
|
// into a []Field.
|
||||||
|
func (s Sugar) argsToFields(keyValuePairs []interface{}) []Field {
|
||||||
|
if len(keyValuePairs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := make([]Field, 0, len(keyValuePairs))
|
||||||
|
count := len(keyValuePairs)
|
||||||
|
|
||||||
|
for i := 0; i < count; {
|
||||||
|
if fld, ok := keyValuePairs[i].(Field); ok {
|
||||||
|
fields = append(fields, fld)
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == count-1 {
|
||||||
|
s.logger.Error("invalid key/value pair", Any("arg", keyValuePairs[i]))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// we should have a key/value pair now. The key must be a string.
|
||||||
|
if key, ok := keyValuePairs[i].(string); !ok {
|
||||||
|
s.logger.Error("invalid key for key/value pair", Int("pos", i))
|
||||||
|
} else {
|
||||||
|
fields = append(fields, Any(key, keyValuePairs[i+1]))
|
||||||
|
}
|
||||||
|
i += 2
|
||||||
|
}
|
||||||
|
return fields
|
||||||
|
}
|
||||||
|
72
vendor/github.com/mattermost/logr/v2/targets/testing.go
generated
vendored
Normal file
72
vendor/github.com/mattermost/logr/v2/targets/testing.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
package targets
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/mattermost/logr/v2"
|
||||||
|
"github.com/mattermost/logr/v2/formatters"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Testing is a simple log target that writes to a (*testing.T) log.
|
||||||
|
type Testing struct {
|
||||||
|
mux sync.Mutex
|
||||||
|
t *testing.T
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTestingTarget(t *testing.T) *Testing {
|
||||||
|
return &Testing{
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init is called once to initialize the target.
|
||||||
|
func (tt *Testing) Init() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write outputs bytes to this file target.
|
||||||
|
func (tt *Testing) Write(p []byte, rec *logr.LogRec) (int, error) {
|
||||||
|
tt.mux.Lock()
|
||||||
|
defer tt.mux.Unlock()
|
||||||
|
|
||||||
|
if tt.t != nil {
|
||||||
|
s := strings.TrimSpace(string(p))
|
||||||
|
tt.t.Log(s)
|
||||||
|
}
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown is called once to free/close any resources.
|
||||||
|
// Target queue is already drained when this is called.
|
||||||
|
func (tt *Testing) Shutdown() error {
|
||||||
|
tt.mux.Lock()
|
||||||
|
defer tt.mux.Unlock()
|
||||||
|
|
||||||
|
tt.t = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTestLogger creates a logger for unit tests. Log records are output to `(*testing.T).Log`.
|
||||||
|
// A new logger is returned along with a method to shutdown the new logger.
|
||||||
|
func CreateTestLogger(t *testing.T, levels ...logr.Level) (logger logr.Logger, shutdown func() error) {
|
||||||
|
lgr, _ := logr.New()
|
||||||
|
filter := logr.NewCustomFilter(levels...)
|
||||||
|
formatter := &formatters.Plain{EnableCaller: true}
|
||||||
|
target := NewTestingTarget(t)
|
||||||
|
|
||||||
|
if err := lgr.AddTarget(target, "test", filter, formatter, 1000); err != nil {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
shutdown = func() error {
|
||||||
|
err := lgr.Shutdown()
|
||||||
|
if err != nil {
|
||||||
|
target.mux.Lock()
|
||||||
|
target.t.Error("error shutting down test logger", err)
|
||||||
|
target.mux.Unlock()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return lgr.NewLogger(), shutdown
|
||||||
|
}
|
7
vendor/github.com/mattermost/mattermost-server/v6/model/auditconv.go
generated
vendored
7
vendor/github.com/mattermost/mattermost-server/v6/model/auditconv.go
generated
vendored
@ -4,6 +4,8 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/francoispqt/gojay"
|
"github.com/francoispqt/gojay"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -268,7 +270,10 @@ func newAuditCommandArgs(ca *CommandArgs) auditCommandArgs {
|
|||||||
cmdargs.ChannelID = ca.ChannelId
|
cmdargs.ChannelID = ca.ChannelId
|
||||||
cmdargs.TeamID = ca.TeamId
|
cmdargs.TeamID = ca.TeamId
|
||||||
cmdargs.TriggerID = ca.TriggerId
|
cmdargs.TriggerID = ca.TriggerId
|
||||||
cmdargs.Command = ca.Command
|
cmdFields := strings.Fields(ca.Command)
|
||||||
|
if len(cmdFields) > 0 {
|
||||||
|
cmdargs.Command = cmdFields[0]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return cmdargs
|
return cmdargs
|
||||||
}
|
}
|
||||||
|
20
vendor/github.com/mattermost/mattermost-server/v6/model/bot.go
generated
vendored
20
vendor/github.com/mattermost/mattermost-server/v6/model/bot.go
generated
vendored
@ -63,12 +63,8 @@ func (b *Bot) Clone() *Bot {
|
|||||||
return ©
|
return ©
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsValid validates the bot and returns an error if it isn't configured correctly.
|
// IsValidCreate validates bot for Create call. This skips validations of fields that are auto-filled on Create
|
||||||
func (b *Bot) IsValid() *AppError {
|
func (b *Bot) IsValidCreate() *AppError {
|
||||||
if !IsValidId(b.UserId) {
|
|
||||||
return NewAppError("Bot.IsValid", "model.bot.is_valid.user_id.app_error", b.Trace(), "", http.StatusBadRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !IsValidUsername(b.Username) {
|
if !IsValidUsername(b.Username) {
|
||||||
return NewAppError("Bot.IsValid", "model.bot.is_valid.username.app_error", b.Trace(), "", http.StatusBadRequest)
|
return NewAppError("Bot.IsValid", "model.bot.is_valid.username.app_error", b.Trace(), "", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
@ -85,6 +81,15 @@ func (b *Bot) IsValid() *AppError {
|
|||||||
return NewAppError("Bot.IsValid", "model.bot.is_valid.creator_id.app_error", b.Trace(), "", http.StatusBadRequest)
|
return NewAppError("Bot.IsValid", "model.bot.is_valid.creator_id.app_error", b.Trace(), "", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid validates the bot and returns an error if it isn't configured correctly.
|
||||||
|
func (b *Bot) IsValid() *AppError {
|
||||||
|
if !IsValidId(b.UserId) {
|
||||||
|
return NewAppError("Bot.IsValid", "model.bot.is_valid.user_id.app_error", b.Trace(), "", http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
if b.CreateAt == 0 {
|
if b.CreateAt == 0 {
|
||||||
return NewAppError("Bot.IsValid", "model.bot.is_valid.create_at.app_error", b.Trace(), "", http.StatusBadRequest)
|
return NewAppError("Bot.IsValid", "model.bot.is_valid.create_at.app_error", b.Trace(), "", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
@ -92,8 +97,7 @@ func (b *Bot) IsValid() *AppError {
|
|||||||
if b.UpdateAt == 0 {
|
if b.UpdateAt == 0 {
|
||||||
return NewAppError("Bot.IsValid", "model.bot.is_valid.update_at.app_error", b.Trace(), "", http.StatusBadRequest)
|
return NewAppError("Bot.IsValid", "model.bot.is_valid.update_at.app_error", b.Trace(), "", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
|
return b.IsValidCreate()
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PreSave should be run before saving a new bot to the database.
|
// PreSave should be run before saving a new bot to the database.
|
||||||
|
1
vendor/github.com/mattermost/mattermost-server/v6/model/channel.go
generated
vendored
1
vendor/github.com/mattermost/mattermost-server/v6/model/channel.go
generated
vendored
@ -56,6 +56,7 @@ type Channel struct {
|
|||||||
Shared *bool `json:"shared"`
|
Shared *bool `json:"shared"`
|
||||||
TotalMsgCountRoot int64 `json:"total_msg_count_root"`
|
TotalMsgCountRoot int64 `json:"total_msg_count_root"`
|
||||||
PolicyID *string `json:"policy_id" db:"-"`
|
PolicyID *string `json:"policy_id" db:"-"`
|
||||||
|
LastRootPostAt int64 `json:"last_root_post_at"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ChannelWithTeamData struct {
|
type ChannelWithTeamData struct {
|
||||||
|
6
vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go
generated
vendored
6
vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go
generated
vendored
@ -69,7 +69,6 @@ type ChannelMemberForExport struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (o *ChannelMember) IsValid() *AppError {
|
func (o *ChannelMember) IsValid() *AppError {
|
||||||
|
|
||||||
if !IsValidId(o.ChannelId) {
|
if !IsValidId(o.ChannelId) {
|
||||||
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest)
|
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
@ -106,6 +105,11 @@ func (o *ChannelMember) IsValid() *AppError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(o.Roles) > UserRolesMaxLength {
|
||||||
|
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.roles_limit.app_error",
|
||||||
|
map[string]interface{}{"Limit": UserRolesMaxLength}, "", http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
16
vendor/github.com/mattermost/mattermost-server/v6/model/client4.go
generated
vendored
16
vendor/github.com/mattermost/mattermost-server/v6/model/client4.go
generated
vendored
@ -3899,7 +3899,13 @@ func (c *Client4) SearchPostsWithParams(teamId string, params *SearchParameter)
|
|||||||
if jsonErr != nil {
|
if jsonErr != nil {
|
||||||
return nil, nil, NewAppError("SearchFilesWithParams", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
|
return nil, nil, NewAppError("SearchFilesWithParams", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
r, err := c.DoAPIPost(c.teamRoute(teamId)+"/posts/search", string(js))
|
var route string
|
||||||
|
if teamId == "" {
|
||||||
|
route = c.postsRoute() + "/search"
|
||||||
|
} else {
|
||||||
|
route = c.teamRoute(teamId) + "/posts/search"
|
||||||
|
}
|
||||||
|
r, err := c.DoAPIPost(route, string(js))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, BuildResponse(r), err
|
return nil, BuildResponse(r), err
|
||||||
}
|
}
|
||||||
@ -3917,7 +3923,13 @@ func (c *Client4) SearchPostsWithParams(teamId string, params *SearchParameter)
|
|||||||
// SearchPostsWithMatches returns any posts with matching terms string, including.
|
// SearchPostsWithMatches returns any posts with matching terms string, including.
|
||||||
func (c *Client4) SearchPostsWithMatches(teamId string, terms string, isOrSearch bool) (*PostSearchResults, *Response, error) {
|
func (c *Client4) SearchPostsWithMatches(teamId string, terms string, isOrSearch bool) (*PostSearchResults, *Response, error) {
|
||||||
requestBody := map[string]interface{}{"terms": terms, "is_or_search": isOrSearch}
|
requestBody := map[string]interface{}{"terms": terms, "is_or_search": isOrSearch}
|
||||||
r, err := c.DoAPIPost(c.teamRoute(teamId)+"/posts/search", StringInterfaceToJSON(requestBody))
|
var route string
|
||||||
|
if teamId == "" {
|
||||||
|
route = c.postsRoute() + "/search"
|
||||||
|
} else {
|
||||||
|
route = c.teamRoute(teamId) + "/posts/search"
|
||||||
|
}
|
||||||
|
r, err := c.DoAPIPost(route, StringInterfaceToJSON(requestBody))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, BuildResponse(r), err
|
return nil, BuildResponse(r), err
|
||||||
}
|
}
|
||||||
|
5
vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go
generated
vendored
5
vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go
generated
vendored
@ -22,6 +22,7 @@ type BillingScheme string
|
|||||||
const (
|
const (
|
||||||
BillingSchemePerSeat = BillingScheme("per_seat")
|
BillingSchemePerSeat = BillingScheme("per_seat")
|
||||||
BillingSchemeFlatFee = BillingScheme("flat_fee")
|
BillingSchemeFlatFee = BillingScheme("flat_fee")
|
||||||
|
BillingSchemeSalesServe = BillingScheme("sales_serve")
|
||||||
)
|
)
|
||||||
|
|
||||||
type RecurringInterval string
|
type RecurringInterval string
|
||||||
@ -104,7 +105,7 @@ type Address struct {
|
|||||||
// PaymentMethod represents methods of payment for a customer.
|
// PaymentMethod represents methods of payment for a customer.
|
||||||
type PaymentMethod struct {
|
type PaymentMethod struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
LastFour int `json:"last_four"`
|
LastFour string `json:"last_four"`
|
||||||
ExpMonth int `json:"exp_month"`
|
ExpMonth int `json:"exp_month"`
|
||||||
ExpYear int `json:"exp_year"`
|
ExpYear int `json:"exp_year"`
|
||||||
CardBrand string `json:"card_brand"`
|
CardBrand string `json:"card_brand"`
|
||||||
@ -169,7 +170,7 @@ type CWSWebhookPayload struct {
|
|||||||
|
|
||||||
type FailedPayment struct {
|
type FailedPayment struct {
|
||||||
CardBrand string `json:"card_brand"`
|
CardBrand string `json:"card_brand"`
|
||||||
LastFour int `json:"last_four"`
|
LastFour string `json:"last_four"`
|
||||||
FailureMessage string `json:"failure_message"`
|
FailureMessage string `json:"failure_message"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
31
vendor/github.com/mattermost/mattermost-server/v6/model/config.go
generated
vendored
31
vendor/github.com/mattermost/mattermost-server/v6/model/config.go
generated
vendored
@ -352,6 +352,7 @@ type ServiceSettings struct {
|
|||||||
EnableBotAccountCreation *bool `access:"integrations_bot_accounts"`
|
EnableBotAccountCreation *bool `access:"integrations_bot_accounts"`
|
||||||
EnableSVGs *bool `access:"site_posts"`
|
EnableSVGs *bool `access:"site_posts"`
|
||||||
EnableLatex *bool `access:"site_posts"`
|
EnableLatex *bool `access:"site_posts"`
|
||||||
|
EnableInlineLatex *bool `access:"site_posts"`
|
||||||
EnableAPIChannelDeletion *bool
|
EnableAPIChannelDeletion *bool
|
||||||
EnableLocalMode *bool
|
EnableLocalMode *bool
|
||||||
LocalModeSocketLocation *string // telemetry: none
|
LocalModeSocketLocation *string // telemetry: none
|
||||||
@ -736,6 +737,10 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.EnableInlineLatex == nil {
|
||||||
|
s.EnableInlineLatex = NewBool(true)
|
||||||
|
}
|
||||||
|
|
||||||
if s.EnableLocalMode == nil {
|
if s.EnableLocalMode == nil {
|
||||||
s.EnableLocalMode = NewBool(false)
|
s.EnableLocalMode = NewBool(false)
|
||||||
}
|
}
|
||||||
@ -2612,6 +2617,7 @@ func (s *DataRetentionSettings) SetDefaults() {
|
|||||||
type JobSettings struct {
|
type JobSettings struct {
|
||||||
RunJobs *bool `access:"write_restrictable,cloud_restrictable"`
|
RunJobs *bool `access:"write_restrictable,cloud_restrictable"`
|
||||||
RunScheduler *bool `access:"write_restrictable,cloud_restrictable"`
|
RunScheduler *bool `access:"write_restrictable,cloud_restrictable"`
|
||||||
|
CleanupJobsThresholdDays *int `access:"write_restrictable,cloud_restrictable"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *JobSettings) SetDefaults() {
|
func (s *JobSettings) SetDefaults() {
|
||||||
@ -2622,6 +2628,10 @@ func (s *JobSettings) SetDefaults() {
|
|||||||
if s.RunScheduler == nil {
|
if s.RunScheduler == nil {
|
||||||
s.RunScheduler = NewBool(true)
|
s.RunScheduler = NewBool(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.CleanupJobsThresholdDays == nil {
|
||||||
|
s.CleanupJobsThresholdDays = NewInt(-1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type CloudSettings struct {
|
type CloudSettings struct {
|
||||||
@ -3736,9 +3746,11 @@ func (o *Config) Sanitize() {
|
|||||||
*o.LdapSettings.BindPassword = FakeSetting
|
*o.LdapSettings.BindPassword = FakeSetting
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if o.FileSettings.PublicLinkSalt != nil {
|
||||||
*o.FileSettings.PublicLinkSalt = FakeSetting
|
*o.FileSettings.PublicLinkSalt = FakeSetting
|
||||||
|
}
|
||||||
|
|
||||||
if *o.FileSettings.AmazonS3SecretAccessKey != "" {
|
if o.FileSettings.AmazonS3SecretAccessKey != nil && *o.FileSettings.AmazonS3SecretAccessKey != "" {
|
||||||
*o.FileSettings.AmazonS3SecretAccessKey = FakeSetting
|
*o.FileSettings.AmazonS3SecretAccessKey = FakeSetting
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3746,7 +3758,7 @@ func (o *Config) Sanitize() {
|
|||||||
*o.EmailSettings.SMTPPassword = FakeSetting
|
*o.EmailSettings.SMTPPassword = FakeSetting
|
||||||
}
|
}
|
||||||
|
|
||||||
if *o.GitLabSettings.Secret != "" {
|
if o.GitLabSettings.Secret != nil && *o.GitLabSettings.Secret != "" {
|
||||||
*o.GitLabSettings.Secret = FakeSetting
|
*o.GitLabSettings.Secret = FakeSetting
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3762,10 +3774,17 @@ func (o *Config) Sanitize() {
|
|||||||
*o.OpenIdSettings.Secret = FakeSetting
|
*o.OpenIdSettings.Secret = FakeSetting
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if o.SqlSettings.DataSource != nil {
|
||||||
*o.SqlSettings.DataSource = FakeSetting
|
*o.SqlSettings.DataSource = FakeSetting
|
||||||
*o.SqlSettings.AtRestEncryptKey = FakeSetting
|
}
|
||||||
|
|
||||||
|
if o.SqlSettings.AtRestEncryptKey != nil {
|
||||||
|
*o.SqlSettings.AtRestEncryptKey = FakeSetting
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.ElasticsearchSettings.Password != nil {
|
||||||
*o.ElasticsearchSettings.Password = FakeSetting
|
*o.ElasticsearchSettings.Password = FakeSetting
|
||||||
|
}
|
||||||
|
|
||||||
for i := range o.SqlSettings.DataSourceReplicas {
|
for i := range o.SqlSettings.DataSourceReplicas {
|
||||||
o.SqlSettings.DataSourceReplicas[i] = FakeSetting
|
o.SqlSettings.DataSourceReplicas[i] = FakeSetting
|
||||||
@ -3775,7 +3794,9 @@ func (o *Config) Sanitize() {
|
|||||||
o.SqlSettings.DataSourceSearchReplicas[i] = FakeSetting
|
o.SqlSettings.DataSourceSearchReplicas[i] = FakeSetting
|
||||||
}
|
}
|
||||||
|
|
||||||
if o.MessageExportSettings.GlobalRelaySettings.SMTPPassword != nil && *o.MessageExportSettings.GlobalRelaySettings.SMTPPassword != "" {
|
if o.MessageExportSettings.GlobalRelaySettings != nil &&
|
||||||
|
o.MessageExportSettings.GlobalRelaySettings.SMTPPassword != nil &&
|
||||||
|
*o.MessageExportSettings.GlobalRelaySettings.SMTPPassword != "" {
|
||||||
*o.MessageExportSettings.GlobalRelaySettings.SMTPPassword = FakeSetting
|
*o.MessageExportSettings.GlobalRelaySettings.SMTPPassword = FakeSetting
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3783,8 +3804,10 @@ func (o *Config) Sanitize() {
|
|||||||
*o.ServiceSettings.GfycatAPISecret = FakeSetting
|
*o.ServiceSettings.GfycatAPISecret = FakeSetting
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if o.ServiceSettings.SplitKey != nil {
|
||||||
*o.ServiceSettings.SplitKey = FakeSetting
|
*o.ServiceSettings.SplitKey = FakeSetting
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// structToMapFilteredByTag converts a struct into a map removing those fields that has the tag passed
|
// structToMapFilteredByTag converts a struct into a map removing those fields that has the tag passed
|
||||||
// as argument
|
// as argument
|
||||||
|
27
vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go
generated
vendored
27
vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go
generated
vendored
@ -33,9 +33,6 @@ type FeatureFlags struct {
|
|||||||
PluginApps string `plugin_id:"com.mattermost.apps"`
|
PluginApps string `plugin_id:"com.mattermost.apps"`
|
||||||
PluginFocalboard string `plugin_id:"focalboard"`
|
PluginFocalboard string `plugin_id:"focalboard"`
|
||||||
|
|
||||||
// Enable timed dnd support for user status
|
|
||||||
TimedDND bool
|
|
||||||
|
|
||||||
PermalinkPreviews bool
|
PermalinkPreviews bool
|
||||||
|
|
||||||
// Enable the Global Header
|
// Enable the Global Header
|
||||||
@ -43,6 +40,23 @@ type FeatureFlags struct {
|
|||||||
|
|
||||||
// Enable different team menu button treatments, possible values = ("none", "by_team_name", "inverted_sidebar_bg_color")
|
// Enable different team menu button treatments, possible values = ("none", "by_team_name", "inverted_sidebar_bg_color")
|
||||||
AddChannelButton string
|
AddChannelButton string
|
||||||
|
|
||||||
|
// Enable different treatments for first time users, possible values = ("none", "tour_point", "around_input")
|
||||||
|
PrewrittenMessages string
|
||||||
|
|
||||||
|
// Enable different treatments for first time users, possible values = ("none", "tips_and_next_steps")
|
||||||
|
DownloadAppsCTA string
|
||||||
|
|
||||||
|
// Determine whether when a user gets created, they'll have noisy notifications e.g. Send desktop notifications for all activity
|
||||||
|
NewAccountNoisy bool
|
||||||
|
// Enable Boards Unfurl Preview
|
||||||
|
BoardsUnfurl bool
|
||||||
|
|
||||||
|
// Enable Calls plugin support in the mobile app
|
||||||
|
CallsMobile bool
|
||||||
|
|
||||||
|
// Start A/B tour tips automatically, possible values = ("none", "auto")
|
||||||
|
AutoTour string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FeatureFlags) SetDefaults() {
|
func (f *FeatureFlags) SetDefaults() {
|
||||||
@ -54,10 +68,15 @@ func (f *FeatureFlags) SetDefaults() {
|
|||||||
f.AppsEnabled = false
|
f.AppsEnabled = false
|
||||||
f.PluginApps = ""
|
f.PluginApps = ""
|
||||||
f.PluginFocalboard = ""
|
f.PluginFocalboard = ""
|
||||||
f.TimedDND = false
|
|
||||||
f.PermalinkPreviews = true
|
f.PermalinkPreviews = true
|
||||||
f.GlobalHeader = true
|
f.GlobalHeader = true
|
||||||
f.AddChannelButton = "by_team_name"
|
f.AddChannelButton = "by_team_name"
|
||||||
|
f.PrewrittenMessages = "tour_point"
|
||||||
|
f.DownloadAppsCTA = "tips_and_next_steps"
|
||||||
|
f.NewAccountNoisy = false
|
||||||
|
f.BoardsUnfurl = true
|
||||||
|
f.CallsMobile = false
|
||||||
|
f.AutoTour = "none"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FeatureFlags) Plugins() map[string]string {
|
func (f *FeatureFlags) Plugins() map[string]string {
|
||||||
|
8
vendor/github.com/mattermost/mattermost-server/v6/model/integration_action.go
generated
vendored
8
vendor/github.com/mattermost/mattermost-server/v6/model/integration_action.go
generated
vendored
@ -115,6 +115,14 @@ func (p *PostAction) Equals(input *PostAction) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Compare PostActionIntegration
|
// Compare PostActionIntegration
|
||||||
|
|
||||||
|
// If input is nil, then return true if original is also nil.
|
||||||
|
// Else return false.
|
||||||
|
if input.Integration == nil {
|
||||||
|
return p.Integration == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Both are unequal and not nil.
|
||||||
if p.Integration.URL != input.Integration.URL {
|
if p.Integration.URL != input.Integration.URL {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
1
vendor/github.com/mattermost/mattermost-server/v6/model/post_embed.go
generated
vendored
1
vendor/github.com/mattermost/mattermost-server/v6/model/post_embed.go
generated
vendored
@ -9,6 +9,7 @@ const (
|
|||||||
PostEmbedOpengraph PostEmbedType = "opengraph"
|
PostEmbedOpengraph PostEmbedType = "opengraph"
|
||||||
PostEmbedLink PostEmbedType = "link"
|
PostEmbedLink PostEmbedType = "link"
|
||||||
PostEmbedPermalink PostEmbedType = "permalink"
|
PostEmbedPermalink PostEmbedType = "permalink"
|
||||||
|
PostEmbedBoards PostEmbedType = "boards"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PostEmbedType string
|
type PostEmbedType string
|
||||||
|
22
vendor/github.com/mattermost/mattermost-server/v6/model/session.go
generated
vendored
22
vendor/github.com/mattermost/mattermost-server/v6/model/session.go
generated
vendored
@ -4,6 +4,7 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -78,6 +79,27 @@ func (s *Session) DeepCopy() *Session {
|
|||||||
return ©Session
|
return ©Session
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Session) IsValid() *AppError {
|
||||||
|
if !IsValidId(s.Id) {
|
||||||
|
return NewAppError("Session.IsValid", "model.session.is_valid.id.app_error", nil, "", http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !IsValidId(s.UserId) {
|
||||||
|
return NewAppError("Session.IsValid", "model.session.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.CreateAt == 0 {
|
||||||
|
return NewAppError("Session.IsValid", "model.session.is_valid.create_at.app_error", nil, "", http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.Roles) > UserRolesMaxLength {
|
||||||
|
return NewAppError("Session.IsValid", "model.session.is_valid.roles_limit.app_error",
|
||||||
|
map[string]interface{}{"Limit": UserRolesMaxLength}, "session_id="+s.Id, http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Session) PreSave() {
|
func (s *Session) PreSave() {
|
||||||
if s.Id == "" {
|
if s.Id == "" {
|
||||||
s.Id = NewId()
|
s.Id = NewId()
|
||||||
|
1
vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go
generated
vendored
1
vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go
generated
vendored
@ -238,6 +238,7 @@ func (scf *SharedChannelAttachment) IsValid() *AppError {
|
|||||||
type SharedChannelFilterOpts struct {
|
type SharedChannelFilterOpts struct {
|
||||||
TeamId string
|
TeamId string
|
||||||
CreatorId string
|
CreatorId string
|
||||||
|
MemberId string
|
||||||
ExcludeHome bool
|
ExcludeHome bool
|
||||||
ExcludeRemote bool
|
ExcludeRemote bool
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go
generated
vendored
6
vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go
generated
vendored
@ -98,7 +98,6 @@ func TeamMemberWithErrorToString(o *TeamMemberWithError) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (o *TeamMember) IsValid() *AppError {
|
func (o *TeamMember) IsValid() *AppError {
|
||||||
|
|
||||||
if !IsValidId(o.TeamId) {
|
if !IsValidId(o.TeamId) {
|
||||||
return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.team_id.app_error", nil, "", http.StatusBadRequest)
|
return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.team_id.app_error", nil, "", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
@ -107,6 +106,11 @@ func (o *TeamMember) IsValid() *AppError {
|
|||||||
return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
|
return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(o.Roles) > UserRolesMaxLength {
|
||||||
|
return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.roles_limit.app_error",
|
||||||
|
map[string]interface{}{"Limit": UserRolesMaxLength}, "", http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
7
vendor/github.com/mattermost/mattermost-server/v6/model/user.go
generated
vendored
7
vendor/github.com/mattermost/mattermost-server/v6/model/user.go
generated
vendored
@ -60,6 +60,7 @@ const (
|
|||||||
UserPasswordMaxLength = 72
|
UserPasswordMaxLength = 72
|
||||||
UserLocaleMaxLength = 5
|
UserLocaleMaxLength = 5
|
||||||
UserTimezoneMaxRunes = 256
|
UserTimezoneMaxRunes = 256
|
||||||
|
UserRolesMaxLength = 256
|
||||||
)
|
)
|
||||||
|
|
||||||
//msgp:tuple User
|
//msgp:tuple User
|
||||||
@ -261,7 +262,6 @@ func (u *User) DeepCopy() *User {
|
|||||||
// IsValid validates the user and returns an error if it isn't configured
|
// IsValid validates the user and returns an error if it isn't configured
|
||||||
// correctly.
|
// correctly.
|
||||||
func (u *User) IsValid() *AppError {
|
func (u *User) IsValid() *AppError {
|
||||||
|
|
||||||
if !IsValidId(u.Id) {
|
if !IsValidId(u.Id) {
|
||||||
return InvalidUserError("id", "")
|
return InvalidUserError("id", "")
|
||||||
}
|
}
|
||||||
@ -332,6 +332,11 @@ func (u *User) IsValid() *AppError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(u.Roles) > UserRolesMaxLength {
|
||||||
|
return NewAppError("User.IsValid", "model.user.is_valid.roles_limit.app_error",
|
||||||
|
map[string]interface{}{"Limit": UserRolesMaxLength}, "user_id="+u.Id, http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
26
vendor/github.com/mattermost/mattermost-server/v6/model/utils.go
generated
vendored
26
vendor/github.com/mattermost/mattermost-server/v6/model/utils.go
generated
vendored
@ -6,6 +6,7 @@ package model
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"database/sql/driver"
|
||||||
"encoding/base32"
|
"encoding/base32"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -24,6 +25,7 @@ import (
|
|||||||
|
|
||||||
"github.com/mattermost/mattermost-server/v6/shared/i18n"
|
"github.com/mattermost/mattermost-server/v6/shared/i18n"
|
||||||
"github.com/pborman/uuid"
|
"github.com/pborman/uuid"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -72,6 +74,30 @@ func (sa StringArray) Equals(input StringArray) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Value converts StringArray to database value
|
||||||
|
func (sa StringArray) Value() (driver.Value, error) {
|
||||||
|
return json.Marshal(sa)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan converts database column value to StringArray
|
||||||
|
func (sa *StringArray) Scan(value interface{}) error {
|
||||||
|
if value == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, ok := value.([]byte)
|
||||||
|
if ok {
|
||||||
|
return json.Unmarshal(buf, sa)
|
||||||
|
}
|
||||||
|
|
||||||
|
str, ok := value.(string)
|
||||||
|
if ok {
|
||||||
|
return json.Unmarshal([]byte(str), sa)
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("received value is neither a byte slice nor string")
|
||||||
|
}
|
||||||
|
|
||||||
var translateFunc i18n.TranslateFunc
|
var translateFunc i18n.TranslateFunc
|
||||||
var translateFuncOnce sync.Once
|
var translateFuncOnce sync.Once
|
||||||
|
|
||||||
|
3
vendor/github.com/mattermost/mattermost-server/v6/model/version.go
generated
vendored
3
vendor/github.com/mattermost/mattermost-server/v6/model/version.go
generated
vendored
@ -13,8 +13,7 @@ import (
|
|||||||
// It should be maintained in chronological order with most current
|
// It should be maintained in chronological order with most current
|
||||||
// release at the front of the list.
|
// release at the front of the list.
|
||||||
var versions = []string{
|
var versions = []string{
|
||||||
"6.0.2",
|
"6.1.0",
|
||||||
"6.0.1",
|
|
||||||
"6.0.0",
|
"6.0.0",
|
||||||
"5.39.0",
|
"5.39.0",
|
||||||
"5.38.0",
|
"5.38.0",
|
||||||
|
32
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/mlog.go
generated
vendored
32
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/mlog.go
generated
vendored
@ -49,6 +49,9 @@ type LogRec = logr.LogRec
|
|||||||
type LogCloner = logr.LogCloner
|
type LogCloner = logr.LogCloner
|
||||||
type MetricsCollector = logr.MetricsCollector
|
type MetricsCollector = logr.MetricsCollector
|
||||||
type TargetCfg = logrcfg.TargetCfg
|
type TargetCfg = logrcfg.TargetCfg
|
||||||
|
type TargetFactory = logrcfg.TargetFactory
|
||||||
|
type FormatterFactory = logrcfg.FormatterFactory
|
||||||
|
type Factories = logrcfg.Factories
|
||||||
type Sugar = logr.Sugar
|
type Sugar = logr.Sugar
|
||||||
|
|
||||||
// LoggerConfiguration is a map of LogTarget configurations.
|
// LoggerConfiguration is a map of LogTarget configurations.
|
||||||
@ -179,7 +182,10 @@ func NewLogger(options ...Option) (*Logger, error) {
|
|||||||
// For each case JSON containing log targets is provided. Target name collisions are resolved
|
// For each case JSON containing log targets is provided. Target name collisions are resolved
|
||||||
// using the following precedence:
|
// using the following precedence:
|
||||||
// cfgFile > cfgEscaped
|
// cfgFile > cfgEscaped
|
||||||
func (l *Logger) Configure(cfgFile string, cfgEscaped string) error {
|
//
|
||||||
|
// An optional set of factories can be provided which will be called to create any target
|
||||||
|
// types or formatters not built-in.
|
||||||
|
func (l *Logger) Configure(cfgFile string, cfgEscaped string, factories *Factories) error {
|
||||||
if atomic.LoadInt32(l.lockConfig) != 0 {
|
if atomic.LoadInt32(l.lockConfig) != 0 {
|
||||||
return ErrConfigurationLock
|
return ErrConfigurationLock
|
||||||
}
|
}
|
||||||
@ -213,16 +219,18 @@ func (l *Logger) Configure(cfgFile string, cfgEscaped string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return logrcfg.ConfigureTargets(l.log.Logr(), cfgMap.toTargetCfg(), nil)
|
return logrcfg.ConfigureTargets(l.log.Logr(), cfgMap.toTargetCfg(), factories)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigureTargets provides a new configuration for this logger via a `LoggerConfig` map.
|
// ConfigureTargets provides a new configuration for this logger via a `LoggerConfig` map.
|
||||||
// Typically `mlog.Configure` is used instead which accepts JSON formatted configuration.
|
// Typically `mlog.Configure` is used instead which accepts JSON formatted configuration.
|
||||||
func (l *Logger) ConfigureTargets(cfg LoggerConfiguration) error {
|
// An optional set of factories can be provided which will be called to create any target
|
||||||
|
// types or formatters not built-in.
|
||||||
|
func (l *Logger) ConfigureTargets(cfg LoggerConfiguration, factories *Factories) error {
|
||||||
if atomic.LoadInt32(l.lockConfig) != 0 {
|
if atomic.LoadInt32(l.lockConfig) != 0 {
|
||||||
return ErrConfigurationLock
|
return ErrConfigurationLock
|
||||||
}
|
}
|
||||||
return logrcfg.ConfigureTargets(l.log.Logr(), cfg.toTargetCfg(), nil)
|
return logrcfg.ConfigureTargets(l.log.Logr(), cfg.toTargetCfg(), factories)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LockConfiguration disallows further configuration changes until `UnlockConfiguration`
|
// LockConfiguration disallows further configuration changes until `UnlockConfiguration`
|
||||||
@ -405,6 +413,22 @@ func GetPackageName(f string) string {
|
|||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShouldQuote returns true if val contains any characters that might be unsafe
|
||||||
|
// when injecting log output into an aggregator, viewer or report.
|
||||||
|
// Returning true means that val should be surrounded by quotation marks before being
|
||||||
|
// output into logs.
|
||||||
|
func ShouldQuote(val string) bool {
|
||||||
|
for _, c := range val {
|
||||||
|
if !((c >= '0' && c <= '9') ||
|
||||||
|
(c >= 'a' && c <= 'z') ||
|
||||||
|
(c >= 'A' && c <= 'Z') ||
|
||||||
|
c == '-' || c == '.' || c == '_' || c == '/' || c == '@' || c == '^' || c == '+') {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
type logWriter struct {
|
type logWriter struct {
|
||||||
logger *Logger
|
logger *Logger
|
||||||
}
|
}
|
||||||
|
15
vendor/github.com/mattn/go-colorable/.travis.yml
generated
vendored
15
vendor/github.com/mattn/go-colorable/.travis.yml
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
language: go
|
|
||||||
sudo: false
|
|
||||||
go:
|
|
||||||
- 1.13.x
|
|
||||||
- tip
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- go get -t -v ./...
|
|
||||||
|
|
||||||
script:
|
|
||||||
- ./go.test.sh
|
|
||||||
|
|
||||||
after_success:
|
|
||||||
- bash <(curl -s https://codecov.io/bash)
|
|
||||||
|
|
2
vendor/github.com/mattn/go-colorable/README.md
generated
vendored
2
vendor/github.com/mattn/go-colorable/README.md
generated
vendored
@ -1,6 +1,6 @@
|
|||||||
# go-colorable
|
# go-colorable
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable)
|
[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest)
|
||||||
[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable)
|
[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable)
|
||||||
[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable)
|
[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable)
|
||||||
[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable)
|
[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable)
|
||||||
|
1
vendor/github.com/mattn/go-colorable/colorable_appengine.go
generated
vendored
1
vendor/github.com/mattn/go-colorable/colorable_appengine.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build appengine
|
||||||
// +build appengine
|
// +build appengine
|
||||||
|
|
||||||
package colorable
|
package colorable
|
||||||
|
4
vendor/github.com/mattn/go-colorable/colorable_others.go
generated
vendored
4
vendor/github.com/mattn/go-colorable/colorable_others.go
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
// +build !windows
|
//go:build !windows && !appengine
|
||||||
// +build !appengine
|
// +build !windows,!appengine
|
||||||
|
|
||||||
package colorable
|
package colorable
|
||||||
|
|
||||||
|
14
vendor/github.com/mattn/go-colorable/colorable_windows.go
generated
vendored
14
vendor/github.com/mattn/go-colorable/colorable_windows.go
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
// +build windows
|
//go:build windows && !appengine
|
||||||
// +build !appengine
|
// +build windows,!appengine
|
||||||
|
|
||||||
package colorable
|
package colorable
|
||||||
|
|
||||||
@ -452,18 +452,22 @@ func (w *Writer) Write(data []byte) (n int, err error) {
|
|||||||
} else {
|
} else {
|
||||||
er = bytes.NewReader(data)
|
er = bytes.NewReader(data)
|
||||||
}
|
}
|
||||||
var bw [1]byte
|
var plaintext bytes.Buffer
|
||||||
loop:
|
loop:
|
||||||
for {
|
for {
|
||||||
c1, err := er.ReadByte()
|
c1, err := er.ReadByte()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
plaintext.WriteTo(w.out)
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
if c1 != 0x1b {
|
if c1 != 0x1b {
|
||||||
bw[0] = c1
|
plaintext.WriteByte(c1)
|
||||||
w.out.Write(bw[:])
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
_, err = plaintext.WriteTo(w.out)
|
||||||
|
if err != nil {
|
||||||
|
break loop
|
||||||
|
}
|
||||||
c2, err := er.ReadByte()
|
c2, err := er.ReadByte()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break loop
|
break loop
|
||||||
|
10
vendor/github.com/mattn/go-colorable/noncolorable.go
generated
vendored
10
vendor/github.com/mattn/go-colorable/noncolorable.go
generated
vendored
@ -18,18 +18,22 @@ func NewNonColorable(w io.Writer) io.Writer {
|
|||||||
// Write writes data on console
|
// Write writes data on console
|
||||||
func (w *NonColorable) Write(data []byte) (n int, err error) {
|
func (w *NonColorable) Write(data []byte) (n int, err error) {
|
||||||
er := bytes.NewReader(data)
|
er := bytes.NewReader(data)
|
||||||
var bw [1]byte
|
var plaintext bytes.Buffer
|
||||||
loop:
|
loop:
|
||||||
for {
|
for {
|
||||||
c1, err := er.ReadByte()
|
c1, err := er.ReadByte()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
plaintext.WriteTo(w.out)
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
if c1 != 0x1b {
|
if c1 != 0x1b {
|
||||||
bw[0] = c1
|
plaintext.WriteByte(c1)
|
||||||
w.out.Write(bw[:])
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
_, err = plaintext.WriteTo(w.out)
|
||||||
|
if err != nil {
|
||||||
|
break loop
|
||||||
|
}
|
||||||
c2, err := er.ReadByte()
|
c2, err := er.ReadByte()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break loop
|
break loop
|
||||||
|
10
vendor/github.com/minio/minio-go/v7/README.md
generated
vendored
10
vendor/github.com/minio/minio-go/v7/README.md
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
|
# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
|
||||||
|
|
||||||
The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
|
The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
|
||||||
|
|
||||||
@ -171,9 +171,9 @@ The full API Reference is available here.
|
|||||||
* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy)
|
* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy)
|
||||||
|
|
||||||
### API Reference : Client custom settings
|
### API Reference : Client custom settings
|
||||||
* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo)
|
* [`SetAppInfo`](https://docs.min.io/docs/golang-client-api-reference#SetAppInfo)
|
||||||
* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn)
|
* [`TraceOn`](https://docs.min.io/docs/golang-client-api-reference#TraceOn)
|
||||||
* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff)
|
* [`TraceOff`](https://docs.min.io/docs/golang-client-api-reference#TraceOff)
|
||||||
|
|
||||||
## Full Examples
|
## Full Examples
|
||||||
|
|
||||||
@ -248,4 +248,4 @@ The full API Reference is available here.
|
|||||||
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
|
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
|
||||||
|
|
||||||
## License
|
## License
|
||||||
This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information.
|
This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information.
|
||||||
|
51
vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
generated
vendored
51
vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/minio/minio-go/v7/pkg/replication"
|
"github.com/minio/minio-go/v7/pkg/replication"
|
||||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||||
)
|
)
|
||||||
@ -187,12 +188,39 @@ func (c Client) GetBucketReplicationMetrics(ctx context.Context, bucketName stri
|
|||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mustGetUUID - get a random UUID.
|
||||||
|
func mustGetUUID() string {
|
||||||
|
u, err := uuid.NewRandom()
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return u.String()
|
||||||
|
}
|
||||||
|
|
||||||
// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
|
// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
|
||||||
// is enabled in the replication config
|
// is enabled in the replication config
|
||||||
func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (resetID string, err error) {
|
func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) {
|
||||||
|
rID = mustGetUUID()
|
||||||
|
_, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID)
|
||||||
|
if err != nil {
|
||||||
|
return rID, err
|
||||||
|
}
|
||||||
|
return rID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
|
||||||
|
// is enabled in the replication config
|
||||||
|
func (c Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (rinfo replication.ResyncTargetsInfo, err error) {
|
||||||
|
rID := mustGetUUID()
|
||||||
|
return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, rID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
|
||||||
|
// is enabled in the replication config
|
||||||
|
func (c Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string, resetID string) (rinfo replication.ResyncTargetsInfo, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return "", err
|
return
|
||||||
}
|
}
|
||||||
// Get resources properly escaped and lined up before
|
// Get resources properly escaped and lined up before
|
||||||
// using them in http request.
|
// using them in http request.
|
||||||
@ -201,7 +229,10 @@ func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, o
|
|||||||
if olderThan > 0 {
|
if olderThan > 0 {
|
||||||
urlValues.Set("older-than", olderThan.String())
|
urlValues.Set("older-than", olderThan.String())
|
||||||
}
|
}
|
||||||
|
if tgtArn != "" {
|
||||||
|
urlValues.Set("arn", tgtArn)
|
||||||
|
}
|
||||||
|
urlValues.Set("reset-id", resetID)
|
||||||
// Execute GET on bucket to get replication config.
|
// Execute GET on bucket to get replication config.
|
||||||
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
|
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
@ -210,19 +241,19 @@ func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, o
|
|||||||
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return rinfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return "", httpRespToErrorResponse(resp, bucketName, "")
|
return rinfo, httpRespToErrorResponse(resp, bucketName, "")
|
||||||
}
|
}
|
||||||
respBytes, err := ioutil.ReadAll(resp.Body)
|
respBytes, err := ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return rinfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(respBytes, &resetID); err != nil {
|
if err := json.Unmarshal(respBytes, &rinfo); err != nil {
|
||||||
return "", err
|
return rinfo, err
|
||||||
}
|
}
|
||||||
return resetID, nil
|
return rinfo, nil
|
||||||
}
|
}
|
||||||
|
12
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
12
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
@ -223,6 +223,16 @@ func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuck
|
|||||||
if dstOpts.Internal.ReplicationRequest {
|
if dstOpts.Internal.ReplicationRequest {
|
||||||
headers.Set(minIOBucketReplicationRequest, "")
|
headers.Set(minIOBucketReplicationRequest, "")
|
||||||
}
|
}
|
||||||
|
if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
|
||||||
|
headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
|
||||||
|
}
|
||||||
|
if !dstOpts.Internal.RetentionTimestamp.IsZero() {
|
||||||
|
headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
|
||||||
|
}
|
||||||
|
if !dstOpts.Internal.TaggingTimestamp.IsZero() {
|
||||||
|
headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
|
||||||
|
}
|
||||||
|
|
||||||
if len(dstOpts.UserTags) != 0 {
|
if len(dstOpts.UserTags) != 0 {
|
||||||
headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
|
headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
|
||||||
}
|
}
|
||||||
@ -513,7 +523,7 @@ func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...
|
|||||||
|
|
||||||
// 4. Make final complete-multipart request.
|
// 4. Make final complete-multipart request.
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
|
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
|
||||||
completeMultipartUpload{Parts: objParts})
|
completeMultipartUpload{Parts: objParts}, PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
24
vendor/github.com/minio/minio-go/v7/api-datatypes.go
generated
vendored
24
vendor/github.com/minio/minio-go/v7/api-datatypes.go
generated
vendored
@ -64,8 +64,9 @@ func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
|||||||
|
|
||||||
// Owner name.
|
// Owner name.
|
||||||
type Owner struct {
|
type Owner struct {
|
||||||
DisplayName string `json:"name"`
|
XMLName xml.Name `xml:"Owner" json:"owner"`
|
||||||
ID string `json:"id"`
|
DisplayName string `xml:"ID" json:"name"`
|
||||||
|
ID string `xml:"DisplayName" json:"id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadInfo contains information about the
|
// UploadInfo contains information about the
|
||||||
@ -85,6 +86,14 @@ type UploadInfo struct {
|
|||||||
ExpirationRuleID string
|
ExpirationRuleID string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RestoreInfo contains information of the restore operation of an archived object
|
||||||
|
type RestoreInfo struct {
|
||||||
|
// Is the restoring operation is still ongoing
|
||||||
|
OngoingRestore bool
|
||||||
|
// When the restored copy of the archived object will be removed
|
||||||
|
ExpiryTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
// ObjectInfo container for object metadata.
|
// ObjectInfo container for object metadata.
|
||||||
type ObjectInfo struct {
|
type ObjectInfo struct {
|
||||||
// An ETag is optionally set to md5sum of an object. In case of multipart objects,
|
// An ETag is optionally set to md5sum of an object. In case of multipart objects,
|
||||||
@ -115,14 +124,7 @@ type ObjectInfo struct {
|
|||||||
Owner Owner
|
Owner Owner
|
||||||
|
|
||||||
// ACL grant.
|
// ACL grant.
|
||||||
Grant []struct {
|
Grant []Grant
|
||||||
Grantee struct {
|
|
||||||
ID string `xml:"ID"`
|
|
||||||
DisplayName string `xml:"DisplayName"`
|
|
||||||
URI string `xml:"URI"`
|
|
||||||
} `xml:"Grantee"`
|
|
||||||
Permission string `xml:"Permission"`
|
|
||||||
} `xml:"Grant"`
|
|
||||||
|
|
||||||
// The class of storage used to store the object.
|
// The class of storage used to store the object.
|
||||||
StorageClass string `json:"storageClass"`
|
StorageClass string `json:"storageClass"`
|
||||||
@ -144,6 +146,8 @@ type ObjectInfo struct {
|
|||||||
Expiration time.Time
|
Expiration time.Time
|
||||||
ExpirationRuleID string
|
ExpirationRuleID string
|
||||||
|
|
||||||
|
Restore *RestoreInfo
|
||||||
|
|
||||||
// Error
|
// Error
|
||||||
Err error `json:"-"`
|
Err error `json:"-"`
|
||||||
}
|
}
|
||||||
|
33
vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
generated
vendored
33
vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
generated
vendored
@ -19,25 +19,36 @@ package minio
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/xml"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
)
|
)
|
||||||
|
|
||||||
type accessControlPolicy struct {
|
// Grantee represents the person being granted permissions.
|
||||||
Owner struct {
|
type Grantee struct {
|
||||||
ID string `xml:"ID"`
|
XMLName xml.Name `xml:"Grantee"`
|
||||||
DisplayName string `xml:"DisplayName"`
|
|
||||||
} `xml:"Owner"`
|
|
||||||
AccessControlList struct {
|
|
||||||
Grant []struct {
|
|
||||||
Grantee struct {
|
|
||||||
ID string `xml:"ID"`
|
ID string `xml:"ID"`
|
||||||
DisplayName string `xml:"DisplayName"`
|
DisplayName string `xml:"DisplayName"`
|
||||||
URI string `xml:"URI"`
|
URI string `xml:"URI"`
|
||||||
} `xml:"Grantee"`
|
}
|
||||||
|
|
||||||
|
// Grant holds grant information
|
||||||
|
type Grant struct {
|
||||||
|
XMLName xml.Name `xml:"Grant"`
|
||||||
|
Grantee Grantee
|
||||||
Permission string `xml:"Permission"`
|
Permission string `xml:"Permission"`
|
||||||
} `xml:"Grant"`
|
}
|
||||||
} `xml:"AccessControlList"`
|
|
||||||
|
// AccessControlList contains the set of grantees and the permissions assigned to each grantee.
|
||||||
|
type AccessControlList struct {
|
||||||
|
XMLName xml.Name `xml:"AccessControlList"`
|
||||||
|
Grant []Grant
|
||||||
|
Permission string `xml:"Permission"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type accessControlPolicy struct {
|
||||||
|
Owner
|
||||||
|
AccessControlList
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectACL get object ACLs
|
// GetObjectACL get object ACLs
|
||||||
|
77
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
77
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
@ -56,14 +56,13 @@ func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
|||||||
return listAllMyBucketsResult.Buckets.Bucket, nil
|
return listAllMyBucketsResult.Buckets.Bucket, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Bucket Read Operations.
|
/// Bucket List Operations.
|
||||||
|
func (c Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
|
||||||
func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix string, recursive, metadata bool, maxKeys int) <-chan ObjectInfo {
|
|
||||||
// Allocate new list objects channel.
|
// Allocate new list objects channel.
|
||||||
objectStatCh := make(chan ObjectInfo, 1)
|
objectStatCh := make(chan ObjectInfo, 1)
|
||||||
// Default listing is delimited at "/"
|
// Default listing is delimited at "/"
|
||||||
delimiter := "/"
|
delimiter := "/"
|
||||||
if recursive {
|
if opts.Recursive {
|
||||||
// If recursive we do not delimit.
|
// If recursive we do not delimit.
|
||||||
delimiter = ""
|
delimiter = ""
|
||||||
}
|
}
|
||||||
@ -81,7 +80,7 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Validate incoming object prefix.
|
// Validate incoming object prefix.
|
||||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
|
||||||
defer close(objectStatCh)
|
defer close(objectStatCh)
|
||||||
objectStatCh <- ObjectInfo{
|
objectStatCh <- ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
@ -96,8 +95,8 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix stri
|
|||||||
var continuationToken string
|
var continuationToken string
|
||||||
for {
|
for {
|
||||||
// Get list of objects a maximum of 1000 per request.
|
// Get list of objects a maximum of 1000 per request.
|
||||||
result, err := c.listObjectsV2Query(ctx, bucketName, objectPrefix, continuationToken,
|
result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken,
|
||||||
fetchOwner, metadata, delimiter, maxKeys)
|
fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
objectStatCh <- ObjectInfo{
|
objectStatCh <- ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
@ -148,12 +147,13 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix stri
|
|||||||
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
|
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
|
||||||
// request parameters :-
|
// request parameters :-
|
||||||
// ---------
|
// ---------
|
||||||
// ?continuation-token - Used to continue iterating over a set of objects
|
|
||||||
// ?delimiter - A delimiter is a character you use to group keys.
|
|
||||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
// ?continuation-token - Used to continue iterating over a set of objects
|
||||||
// ?metadata - Specifies if we want metadata for the objects as part of list operation.
|
// ?metadata - Specifies if we want metadata for the objects as part of list operation.
|
||||||
func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
|
// ?delimiter - A delimiter is a character you use to group keys.
|
||||||
|
// ?start-after - Sets a marker to start listing lexically at this key onwards.
|
||||||
|
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||||
|
func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) {
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return ListBucketV2Result{}, err
|
return ListBucketV2Result{}, err
|
||||||
@ -173,6 +173,11 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix
|
|||||||
urlValues.Set("metadata", "true")
|
urlValues.Set("metadata", "true")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set this conditionally if asked
|
||||||
|
if startAfter != "" {
|
||||||
|
urlValues.Set("start-after", startAfter)
|
||||||
|
}
|
||||||
|
|
||||||
// Always set encoding-type in ListObjects V2
|
// Always set encoding-type in ListObjects V2
|
||||||
urlValues.Set("encoding-type", "url")
|
urlValues.Set("encoding-type", "url")
|
||||||
|
|
||||||
@ -202,6 +207,7 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix
|
|||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
contentSHA256Hex: emptySHA256Hex,
|
contentSHA256Hex: emptySHA256Hex,
|
||||||
|
customHeader: headers,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -246,12 +252,12 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix
|
|||||||
return listBucketResult, nil
|
return listBucketResult, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string, recursive bool, maxKeys int) <-chan ObjectInfo {
|
func (c Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
|
||||||
// Allocate new list objects channel.
|
// Allocate new list objects channel.
|
||||||
objectStatCh := make(chan ObjectInfo, 1)
|
objectStatCh := make(chan ObjectInfo, 1)
|
||||||
// Default listing is delimited at "/"
|
// Default listing is delimited at "/"
|
||||||
delimiter := "/"
|
delimiter := "/"
|
||||||
if recursive {
|
if opts.Recursive {
|
||||||
// If recursive we do not delimit.
|
// If recursive we do not delimit.
|
||||||
delimiter = ""
|
delimiter = ""
|
||||||
}
|
}
|
||||||
@ -264,7 +270,7 @@ func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string
|
|||||||
return objectStatCh
|
return objectStatCh
|
||||||
}
|
}
|
||||||
// Validate incoming object prefix.
|
// Validate incoming object prefix.
|
||||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
|
||||||
defer close(objectStatCh)
|
defer close(objectStatCh)
|
||||||
objectStatCh <- ObjectInfo{
|
objectStatCh <- ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
@ -276,10 +282,10 @@ func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string
|
|||||||
go func(objectStatCh chan<- ObjectInfo) {
|
go func(objectStatCh chan<- ObjectInfo) {
|
||||||
defer close(objectStatCh)
|
defer close(objectStatCh)
|
||||||
|
|
||||||
marker := ""
|
marker := opts.StartAfter
|
||||||
for {
|
for {
|
||||||
// Get list of objects a maximum of 1000 per request.
|
// Get list of objects a maximum of 1000 per request.
|
||||||
result, err := c.listObjectsQuery(ctx, bucketName, objectPrefix, marker, delimiter, maxKeys)
|
result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
objectStatCh <- ObjectInfo{
|
objectStatCh <- ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
@ -326,12 +332,12 @@ func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string
|
|||||||
return objectStatCh
|
return objectStatCh
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix string, recursive bool, maxKeys int) <-chan ObjectInfo {
|
func (c Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
|
||||||
// Allocate new list objects channel.
|
// Allocate new list objects channel.
|
||||||
resultCh := make(chan ObjectInfo, 1)
|
resultCh := make(chan ObjectInfo, 1)
|
||||||
// Default listing is delimited at "/"
|
// Default listing is delimited at "/"
|
||||||
delimiter := "/"
|
delimiter := "/"
|
||||||
if recursive {
|
if opts.Recursive {
|
||||||
// If recursive we do not delimit.
|
// If recursive we do not delimit.
|
||||||
delimiter = ""
|
delimiter = ""
|
||||||
}
|
}
|
||||||
@ -346,7 +352,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Validate incoming object prefix.
|
// Validate incoming object prefix.
|
||||||
if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
|
||||||
defer close(resultCh)
|
defer close(resultCh)
|
||||||
resultCh <- ObjectInfo{
|
resultCh <- ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
@ -365,7 +371,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
// Get list of objects a maximum of 1000 per request.
|
// Get list of objects a maximum of 1000 per request.
|
||||||
result, err := c.listObjectVersionsQuery(ctx, bucketName, prefix, keyMarker, versionIDMarker, delimiter, maxKeys)
|
result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resultCh <- ObjectInfo{
|
resultCh <- ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
@ -384,7 +390,6 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin
|
|||||||
StorageClass: version.StorageClass,
|
StorageClass: version.StorageClass,
|
||||||
IsLatest: version.IsLatest,
|
IsLatest: version.IsLatest,
|
||||||
VersionID: version.VersionID,
|
VersionID: version.VersionID,
|
||||||
|
|
||||||
IsDeleteMarker: version.isDeleteMarker,
|
IsDeleteMarker: version.isDeleteMarker,
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
@ -438,7 +443,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin
|
|||||||
// ?delimiter - A delimiter is a character you use to group keys.
|
// ?delimiter - A delimiter is a character you use to group keys.
|
||||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||||
func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int) (ListVersionsResult, error) {
|
func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int, headers http.Header) (ListVersionsResult, error) {
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return ListVersionsResult{}, err
|
return ListVersionsResult{}, err
|
||||||
@ -483,6 +488,7 @@ func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix,
|
|||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
contentSHA256Hex: emptySHA256Hex,
|
contentSHA256Hex: emptySHA256Hex,
|
||||||
|
customHeader: headers,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -534,7 +540,7 @@ func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix,
|
|||||||
// ?delimiter - A delimiter is a character you use to group keys.
|
// ?delimiter - A delimiter is a character you use to group keys.
|
||||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||||
func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
|
func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) {
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return ListBucketResult{}, err
|
return ListBucketResult{}, err
|
||||||
@ -571,6 +577,7 @@ func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix,
|
|||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
contentSHA256Hex: emptySHA256Hex,
|
contentSHA256Hex: emptySHA256Hex,
|
||||||
|
customHeader: headers,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -626,9 +633,25 @@ type ListObjectsOptions struct {
|
|||||||
// batch, advanced use-case not useful for most
|
// batch, advanced use-case not useful for most
|
||||||
// applications
|
// applications
|
||||||
MaxKeys int
|
MaxKeys int
|
||||||
|
// StartAfter start listing lexically at this
|
||||||
|
// object onwards, this value can also be set
|
||||||
|
// for Marker when `UseV1` is set to true.
|
||||||
|
StartAfter string
|
||||||
|
|
||||||
// Use the deprecated list objects V1 API
|
// Use the deprecated list objects V1 API
|
||||||
UseV1 bool
|
UseV1 bool
|
||||||
|
|
||||||
|
headers http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set adds a key value pair to the options. The
|
||||||
|
// key-value pair will be part of the HTTP GET request
|
||||||
|
// headers.
|
||||||
|
func (o *ListObjectsOptions) Set(key, value string) {
|
||||||
|
if o.headers == nil {
|
||||||
|
o.headers = make(http.Header)
|
||||||
|
}
|
||||||
|
o.headers.Set(key, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjects returns objects list after evaluating the passed options.
|
// ListObjects returns objects list after evaluating the passed options.
|
||||||
@ -640,22 +663,22 @@ type ListObjectsOptions struct {
|
|||||||
//
|
//
|
||||||
func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
|
func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
|
||||||
if opts.WithVersions {
|
if opts.WithVersions {
|
||||||
return c.listObjectVersions(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys)
|
return c.listObjectVersions(ctx, bucketName, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use legacy list objects v1 API
|
// Use legacy list objects v1 API
|
||||||
if opts.UseV1 {
|
if opts.UseV1 {
|
||||||
return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys)
|
return c.listObjects(ctx, bucketName, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1.
|
// Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1.
|
||||||
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
||||||
if location == "snowball" {
|
if location == "snowball" {
|
||||||
return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys)
|
return c.listObjects(ctx, bucketName, opts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.listObjectsV2(ctx, bucketName, opts.Prefix, opts.Recursive, opts.WithMetadata, opts.MaxKeys)
|
return c.listObjectsV2(ctx, bucketName, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListIncompleteUploads - List incompletely uploaded multipart objects.
|
// ListIncompleteUploads - List incompletely uploaded multipart objects.
|
||||||
|
5
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
@ -176,7 +176,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje
|
|||||||
// Sort all completed parts.
|
// Sort all completed parts.
|
||||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
|
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
|
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
@ -309,7 +309,7 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID
|
|||||||
|
|
||||||
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
|
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
|
||||||
func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
|
func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
|
||||||
complete completeMultipartUpload) (UploadInfo, error) {
|
complete completeMultipartUpload, opts PutObjectOptions) (UploadInfo, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
@ -336,6 +336,7 @@ func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectN
|
|||||||
contentBody: completeMultipartUploadBuffer,
|
contentBody: completeMultipartUploadBuffer,
|
||||||
contentLength: int64(len(completeMultipartUploadBytes)),
|
contentLength: int64(len(completeMultipartUploadBytes)),
|
||||||
contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
|
contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
|
||||||
|
customHeader: opts.Header(),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute POST to complete multipart upload for an objectName.
|
// Execute POST to complete multipart upload for an objectName.
|
||||||
|
4
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
@ -231,7 +231,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa
|
|||||||
// Sort all completed parts.
|
// Sort all completed parts.
|
||||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
|
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
|
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
@ -358,7 +358,7 @@ func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bu
|
|||||||
// Sort all completed parts.
|
// Sort all completed parts.
|
||||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
|
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
|
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
15
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
15
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
@ -60,6 +60,9 @@ type AdvancedPutOptions struct {
|
|||||||
ReplicationStatus ReplicationStatus
|
ReplicationStatus ReplicationStatus
|
||||||
SourceMTime time.Time
|
SourceMTime time.Time
|
||||||
ReplicationRequest bool
|
ReplicationRequest bool
|
||||||
|
RetentionTimestamp time.Time
|
||||||
|
TaggingTimestamp time.Time
|
||||||
|
LegalholdTimestamp time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectOptions represents options specified by user for PutObject call
|
// PutObjectOptions represents options specified by user for PutObject call
|
||||||
@ -156,6 +159,16 @@ func (opts PutObjectOptions) Header() (header http.Header) {
|
|||||||
if opts.Internal.ReplicationRequest {
|
if opts.Internal.ReplicationRequest {
|
||||||
header.Set(minIOBucketReplicationRequest, "")
|
header.Set(minIOBucketReplicationRequest, "")
|
||||||
}
|
}
|
||||||
|
if !opts.Internal.LegalholdTimestamp.IsZero() {
|
||||||
|
header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
|
||||||
|
}
|
||||||
|
if !opts.Internal.RetentionTimestamp.IsZero() {
|
||||||
|
header.Set(minIOBucketReplicationObjectRetentionTimestamp, opts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
|
||||||
|
}
|
||||||
|
if !opts.Internal.TaggingTimestamp.IsZero() {
|
||||||
|
header.Set(minIOBucketReplicationTaggingTimestamp, opts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
|
||||||
|
}
|
||||||
|
|
||||||
if len(opts.UserTags) != 0 {
|
if len(opts.UserTags) != 0 {
|
||||||
header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags))
|
header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags))
|
||||||
}
|
}
|
||||||
@ -360,7 +373,7 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName
|
|||||||
// Sort all completed parts.
|
// Sort all completed parts.
|
||||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
|
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
|
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
48
vendor/github.com/minio/minio-go/v7/api-remove.go
generated
vendored
48
vendor/github.com/minio/minio-go/v7/api-remove.go
generated
vendored
@ -29,6 +29,50 @@ import (
|
|||||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// BucketOptions special headers to purge buckets, only
|
||||||
|
// useful when endpoint is MinIO
|
||||||
|
type BucketOptions struct {
|
||||||
|
ForceDelete bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveBucketWithOptions deletes the bucket name.
|
||||||
|
//
|
||||||
|
// All objects (including all object versions and delete markers)
|
||||||
|
// in the bucket will be deleted forcibly if bucket options set
|
||||||
|
// ForceDelete to 'true'.
|
||||||
|
func (c Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts BucketOptions) error {
|
||||||
|
// Input validation.
|
||||||
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build headers.
|
||||||
|
headers := make(http.Header)
|
||||||
|
if opts.ForceDelete {
|
||||||
|
headers.Set(minIOForceDelete, "true")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute DELETE on bucket.
|
||||||
|
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
|
||||||
|
bucketName: bucketName,
|
||||||
|
contentSHA256Hex: emptySHA256Hex,
|
||||||
|
customHeader: headers,
|
||||||
|
})
|
||||||
|
defer closeResponse(resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp != nil {
|
||||||
|
if resp.StatusCode != http.StatusNoContent {
|
||||||
|
return httpRespToErrorResponse(resp, bucketName, "")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the location from cache on a successful delete.
|
||||||
|
c.bucketLocCache.Delete(bucketName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// RemoveBucket deletes the bucket name.
|
// RemoveBucket deletes the bucket name.
|
||||||
//
|
//
|
||||||
// All objects (including all object versions and delete markers).
|
// All objects (including all object versions and delete markers).
|
||||||
@ -69,6 +113,7 @@ type AdvancedRemoveOptions struct {
|
|||||||
|
|
||||||
// RemoveObjectOptions represents options specified by user for RemoveObject call
|
// RemoveObjectOptions represents options specified by user for RemoveObject call
|
||||||
type RemoveObjectOptions struct {
|
type RemoveObjectOptions struct {
|
||||||
|
ForceDelete bool
|
||||||
GovernanceBypass bool
|
GovernanceBypass bool
|
||||||
VersionID string
|
VersionID string
|
||||||
Internal AdvancedRemoveOptions
|
Internal AdvancedRemoveOptions
|
||||||
@ -116,6 +161,9 @@ func (c Client) removeObject(ctx context.Context, bucketName, objectName string,
|
|||||||
if opts.Internal.ReplicationRequest {
|
if opts.Internal.ReplicationRequest {
|
||||||
headers.Set(minIOBucketReplicationRequest, "")
|
headers.Set(minIOBucketReplicationRequest, "")
|
||||||
}
|
}
|
||||||
|
if opts.ForceDelete {
|
||||||
|
headers.Set(minIOForceDelete, "true")
|
||||||
|
}
|
||||||
// Execute DELETE on objectName.
|
// Execute DELETE on objectName.
|
||||||
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
|
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
|
182
vendor/github.com/minio/minio-go/v7/api-restore.go
generated
vendored
Normal file
182
vendor/github.com/minio/minio-go/v7/api-restore.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
/*
|
||||||
|
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||||
|
* (C) 2018-2021 MinIO, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/xml"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||||
|
"github.com/minio/minio-go/v7/pkg/tags"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RestoreType represents the restore request type
|
||||||
|
type RestoreType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RestoreSelect represents the restore SELECT operation
|
||||||
|
RestoreSelect = RestoreType("SELECT")
|
||||||
|
)
|
||||||
|
|
||||||
|
// TierType represents a retrieval tier
|
||||||
|
type TierType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TierStandard is the standard retrieval tier
|
||||||
|
TierStandard = TierType("Standard")
|
||||||
|
// TierBulk is the bulk retrieval tier
|
||||||
|
TierBulk = TierType("Bulk")
|
||||||
|
// TierExpedited is the expedited retrieval tier
|
||||||
|
TierExpedited = TierType("Expedited")
|
||||||
|
)
|
||||||
|
|
||||||
|
// GlacierJobParameters represents the retrieval tier parameter
|
||||||
|
type GlacierJobParameters struct {
|
||||||
|
Tier TierType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encryption contains the type of server-side encryption used during object retrieval
|
||||||
|
type Encryption struct {
|
||||||
|
EncryptionType string
|
||||||
|
KMSContext string
|
||||||
|
KMSKeyID string `xml:"KMSKeyId"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetadataEntry represents a metadata information of the restored object.
|
||||||
|
type MetadataEntry struct {
|
||||||
|
Name string
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
// S3 holds properties of the copy of the archived object
|
||||||
|
type S3 struct {
|
||||||
|
AccessControlList *AccessControlList `xml:"AccessControlList,omiempty"`
|
||||||
|
BucketName string
|
||||||
|
Prefix string
|
||||||
|
CannedACL *string `xml:"CannedACL,omitempty"`
|
||||||
|
Encryption *Encryption `xml:"Encryption,omitempty"`
|
||||||
|
StorageClass *string `xml:"StorageClass,omitempty"`
|
||||||
|
Tagging *tags.Tags `xml:"Tagging,omitempty"`
|
||||||
|
UserMetadata *MetadataEntry `xml:"UserMetadata,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SelectParameters holds the select request parameters
|
||||||
|
type SelectParameters struct {
|
||||||
|
XMLName xml.Name `xml:"SelectParameters"`
|
||||||
|
ExpressionType QueryExpressionType
|
||||||
|
Expression string
|
||||||
|
InputSerialization SelectObjectInputSerialization
|
||||||
|
OutputSerialization SelectObjectOutputSerialization
|
||||||
|
}
|
||||||
|
|
||||||
|
// OutputLocation holds properties of the copy of the archived object
|
||||||
|
type OutputLocation struct {
|
||||||
|
XMLName xml.Name `xml:"OutputLocation"`
|
||||||
|
S3 S3 `xml:"S3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestoreRequest holds properties of the restore object request
|
||||||
|
type RestoreRequest struct {
|
||||||
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest"`
|
||||||
|
Type *RestoreType `xml:"Type,omitempty"`
|
||||||
|
Tier *TierType `xml:"Tier,omitempty"`
|
||||||
|
Days *int `xml:"Days,omitempty"`
|
||||||
|
GlacierJobParameters *GlacierJobParameters `xml:"GlacierJobParameters,omitempty"`
|
||||||
|
Description *string `xml:"Description,omitempty"`
|
||||||
|
SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"`
|
||||||
|
OutputLocation *OutputLocation `xml:"OutputLocation,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDays sets the days parameter of the restore request
|
||||||
|
func (r *RestoreRequest) SetDays(v int) {
|
||||||
|
r.Days = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDays sets the GlacierJobParameters of the restore request
|
||||||
|
func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) {
|
||||||
|
r.GlacierJobParameters = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetType sets the type of the restore request
|
||||||
|
func (r *RestoreRequest) SetType(v RestoreType) {
|
||||||
|
r.Type = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTier sets the retrieval tier of the restore request
|
||||||
|
func (r *RestoreRequest) SetTier(v TierType) {
|
||||||
|
r.Tier = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDescription sets the description of the restore request
|
||||||
|
func (r *RestoreRequest) SetDescription(v string) {
|
||||||
|
r.Description = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSelectParameters sets SelectParameters of the restore select request
|
||||||
|
func (r *RestoreRequest) SetSelectParameters(v SelectParameters) {
|
||||||
|
r.SelectParameters = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetOutputLocation sets the properties of the copy of the archived object
|
||||||
|
func (r *RestoreRequest) SetOutputLocation(v OutputLocation) {
|
||||||
|
r.OutputLocation = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API
|
||||||
|
func (c Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error {
|
||||||
|
// Input validation.
|
||||||
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
restoreRequestBytes, err := xml.Marshal(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
urlValues := make(url.Values)
|
||||||
|
urlValues.Set("restore", "")
|
||||||
|
if versionID != "" {
|
||||||
|
urlValues.Set("versionId", versionID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute POST on bucket/object.
|
||||||
|
resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
|
||||||
|
bucketName: bucketName,
|
||||||
|
objectName: objectName,
|
||||||
|
queryValues: urlValues,
|
||||||
|
contentMD5Base64: sumMD5Base64(restoreRequestBytes),
|
||||||
|
contentSHA256Hex: sum256Hex(restoreRequestBytes),
|
||||||
|
contentBody: bytes.NewReader(restoreRequestBytes),
|
||||||
|
contentLength: int64(len(restoreRequestBytes)),
|
||||||
|
})
|
||||||
|
defer closeResponse(resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusAccepted {
|
||||||
|
return httpRespToErrorResponse(resp, bucketName, "")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
9
vendor/github.com/minio/minio-go/v7/api-select.go
generated
vendored
9
vendor/github.com/minio/minio-go/v7/api-select.go
generated
vendored
@ -54,6 +54,13 @@ const (
|
|||||||
SelectCompressionNONE SelectCompressionType = "NONE"
|
SelectCompressionNONE SelectCompressionType = "NONE"
|
||||||
SelectCompressionGZIP = "GZIP"
|
SelectCompressionGZIP = "GZIP"
|
||||||
SelectCompressionBZIP = "BZIP2"
|
SelectCompressionBZIP = "BZIP2"
|
||||||
|
|
||||||
|
// Non-standard compression schemes, supported by MinIO hosts:
|
||||||
|
|
||||||
|
SelectCompressionZSTD = "ZSTD" // Zstandard compression.
|
||||||
|
SelectCompressionLZ4 = "LZ4" // LZ4 Stream
|
||||||
|
SelectCompressionS2 = "S2" // S2 Stream
|
||||||
|
SelectCompressionSNAPPY = "SNAPPY" // Snappy stream
|
||||||
)
|
)
|
||||||
|
|
||||||
// CSVQuoteFields - is the parameter for how CSV fields are quoted.
|
// CSVQuoteFields - is the parameter for how CSV fields are quoted.
|
||||||
@ -330,7 +337,7 @@ func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) er
|
|||||||
|
|
||||||
// SelectObjectInputSerialization - input serialization parameters
|
// SelectObjectInputSerialization - input serialization parameters
|
||||||
type SelectObjectInputSerialization struct {
|
type SelectObjectInputSerialization struct {
|
||||||
CompressionType SelectCompressionType
|
CompressionType SelectCompressionType `xml:"CompressionType,omitempty"`
|
||||||
Parquet *ParquetInputOptions `xml:"Parquet,omitempty"`
|
Parquet *ParquetInputOptions `xml:"Parquet,omitempty"`
|
||||||
CSV *CSVInputOptions `xml:"CSV,omitempty"`
|
CSV *CSVInputOptions `xml:"CSV,omitempty"`
|
||||||
JSON *JSONInputOptions `xml:"JSON,omitempty"`
|
JSON *JSONInputOptions `xml:"JSON,omitempty"`
|
||||||
|
4
vendor/github.com/minio/minio-go/v7/api-stat.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api-stat.go
generated
vendored
@ -99,11 +99,11 @@ func (c Client) statObject(ctx context.Context, bucketName, objectName string, o
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
deleteMarker := resp.Header.Get(amzDeleteMarker) == "true"
|
|
||||||
|
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
|
deleteMarker := resp.Header.Get(amzDeleteMarker) == "true"
|
||||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||||
if resp.StatusCode == http.StatusBadRequest && opts.VersionID != "" && deleteMarker {
|
if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker {
|
||||||
errResp := ErrorResponse{
|
errResp := ErrorResponse{
|
||||||
StatusCode: resp.StatusCode,
|
StatusCode: resp.StatusCode,
|
||||||
Code: "MethodNotAllowed",
|
Code: "MethodNotAllowed",
|
||||||
|
92
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
92
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
@ -34,6 +34,7 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
md5simd "github.com/minio/md5-simd"
|
md5simd "github.com/minio/md5-simd"
|
||||||
@ -90,6 +91,10 @@ type Client struct {
|
|||||||
// Factory for MD5 hash functions.
|
// Factory for MD5 hash functions.
|
||||||
md5Hasher func() md5simd.Hasher
|
md5Hasher func() md5simd.Hasher
|
||||||
sha256Hasher func() md5simd.Hasher
|
sha256Hasher func() md5simd.Hasher
|
||||||
|
|
||||||
|
healthCheckCh chan struct{}
|
||||||
|
healthCheck int32
|
||||||
|
lastOnline time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options for New method
|
// Options for New method
|
||||||
@ -108,7 +113,7 @@ type Options struct {
|
|||||||
// Global constants.
|
// Global constants.
|
||||||
const (
|
const (
|
||||||
libraryName = "minio-go"
|
libraryName = "minio-go"
|
||||||
libraryVersion = "v7.0.11"
|
libraryVersion = "v7.0.14"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User Agent should always following the below style.
|
// User Agent should always following the below style.
|
||||||
@ -305,6 +310,10 @@ func privateNew(endpoint string, opts *Options) (*Client, error) {
|
|||||||
// Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
|
// Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
|
||||||
// by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
|
// by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
|
||||||
clnt.lookup = opts.BucketLookup
|
clnt.lookup = opts.BucketLookup
|
||||||
|
|
||||||
|
// healthcheck is not initialized
|
||||||
|
clnt.healthCheck = unknown
|
||||||
|
|
||||||
// Return.
|
// Return.
|
||||||
return clnt, nil
|
return clnt, nil
|
||||||
}
|
}
|
||||||
@ -387,6 +396,72 @@ func (c *Client) hashMaterials(isMd5Requested bool) (hashAlgos map[string]md5sim
|
|||||||
return hashAlgos, hashSums
|
return hashAlgos, hashSums
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
unknown = -1
|
||||||
|
offline = 0
|
||||||
|
online = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsOnline returns true if healthcheck enabled and client is online
|
||||||
|
func (c *Client) IsOnline() bool {
|
||||||
|
switch atomic.LoadInt32(&c.healthCheck) {
|
||||||
|
case online, unknown:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsOffline returns true if healthcheck enabled and client is offline
|
||||||
|
func (c *Client) IsOffline() bool {
|
||||||
|
return !c.IsOnline()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HealthCheck starts a healthcheck to see if endpoint is up. Returns a context cancellation function
|
||||||
|
// and and error if health check is already started
|
||||||
|
func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) {
|
||||||
|
if atomic.LoadInt32(&c.healthCheck) == online {
|
||||||
|
return nil, fmt.Errorf("health check running already")
|
||||||
|
}
|
||||||
|
if hcDuration < 1*time.Second {
|
||||||
|
return nil, fmt.Errorf("health check duration should be atleast 1 second")
|
||||||
|
}
|
||||||
|
ctx, cancelFn := context.WithCancel(context.Background())
|
||||||
|
c.healthCheckCh = make(chan struct{})
|
||||||
|
atomic.StoreInt32(&c.healthCheck, online)
|
||||||
|
probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-")
|
||||||
|
go func(duration time.Duration) {
|
||||||
|
timer := time.NewTimer(duration)
|
||||||
|
defer timer.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
close(c.healthCheckCh)
|
||||||
|
atomic.StoreInt32(&c.healthCheck, unknown)
|
||||||
|
return
|
||||||
|
case <-timer.C:
|
||||||
|
|
||||||
|
timer.Reset(duration)
|
||||||
|
// Do health check the first time and ONLY if the connection is marked offline
|
||||||
|
if c.IsOffline() || c.lastOnline.IsZero() {
|
||||||
|
_, err := c.getBucketLocation(context.Background(), probeBucketName)
|
||||||
|
if err != nil && IsNetworkOrHostDown(err, false) {
|
||||||
|
atomic.StoreInt32(&c.healthCheck, offline)
|
||||||
|
}
|
||||||
|
switch ToErrorResponse(err).Code {
|
||||||
|
case "NoSuchBucket", "AccessDenied", "":
|
||||||
|
c.lastOnline = time.Now()
|
||||||
|
atomic.StoreInt32(&c.healthCheck, online)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case <-c.healthCheckCh:
|
||||||
|
// set offline if client saw a network error
|
||||||
|
atomic.StoreInt32(&c.healthCheck, offline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(hcDuration)
|
||||||
|
return cancelFn, nil
|
||||||
|
}
|
||||||
|
|
||||||
// requestMetadata - is container for all the values to make a request.
|
// requestMetadata - is container for all the values to make a request.
|
||||||
type requestMetadata struct {
|
type requestMetadata struct {
|
||||||
// If set newRequest presigns the URL.
|
// If set newRequest presigns the URL.
|
||||||
@ -565,12 +640,25 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque
|
|||||||
if isS3CodeRetryable(errResponse.Code) {
|
if isS3CodeRetryable(errResponse.Code) {
|
||||||
continue // Retry.
|
continue // Retry.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if atomic.LoadInt32(&c.healthCheck) != unknown && IsNetworkOrHostDown(err, false) {
|
||||||
|
select {
|
||||||
|
case c.healthCheckCh <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initiate the request.
|
// Initiate the request.
|
||||||
res, err = c.do(req)
|
res, err = c.do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if atomic.LoadInt32(&c.healthCheck) != unknown && IsNetworkOrHostDown(err, false) {
|
||||||
|
select {
|
||||||
|
case c.healthCheckCh <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
9
vendor/github.com/minio/minio-go/v7/constants.go
generated
vendored
9
vendor/github.com/minio/minio-go/v7/constants.go
generated
vendored
@ -69,6 +69,7 @@ const (
|
|||||||
amzVersionID = "X-Amz-Version-Id"
|
amzVersionID = "X-Amz-Version-Id"
|
||||||
amzTaggingCount = "X-Amz-Tagging-Count"
|
amzTaggingCount = "X-Amz-Tagging-Count"
|
||||||
amzExpiration = "X-Amz-Expiration"
|
amzExpiration = "X-Amz-Expiration"
|
||||||
|
amzRestore = "X-Amz-Restore"
|
||||||
amzReplicationStatus = "X-Amz-Replication-Status"
|
amzReplicationStatus = "X-Amz-Replication-Status"
|
||||||
amzDeleteMarker = "X-Amz-Delete-Marker"
|
amzDeleteMarker = "X-Amz-Delete-Marker"
|
||||||
|
|
||||||
@ -89,4 +90,12 @@ const (
|
|||||||
minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker"
|
minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker"
|
||||||
minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request"
|
minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request"
|
||||||
minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request"
|
minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request"
|
||||||
|
// Header indicates last tag update time on source
|
||||||
|
minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp"
|
||||||
|
// Header indicates last retention update time on source
|
||||||
|
minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp"
|
||||||
|
// Header indicates last legalhold update time on source
|
||||||
|
minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp"
|
||||||
|
|
||||||
|
minIOForceDelete = "x-minio-force-delete"
|
||||||
)
|
)
|
||||||
|
10
vendor/github.com/minio/minio-go/v7/core.go
generated
vendored
10
vendor/github.com/minio/minio-go/v7/core.go
generated
vendored
@ -46,13 +46,13 @@ func NewCore(endpoint string, opts *Options) (*Core, error) {
|
|||||||
// ListObjects - List all the objects at a prefix, optionally with marker and delimiter
|
// ListObjects - List all the objects at a prefix, optionally with marker and delimiter
|
||||||
// you can further filter the results.
|
// you can further filter the results.
|
||||||
func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {
|
func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {
|
||||||
return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys)
|
return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
|
// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
|
||||||
// continuationToken instead of marker to support iteration over the results.
|
// continuationToken instead of marker to support iteration over the results.
|
||||||
func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
|
func (c Core) ListObjectsV2(bucketName, objectPrefix, startAfter, continuationToken, delimiter string, maxkeys int) (ListBucketV2Result, error) {
|
||||||
return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, fetchOwner, false, delimiter, maxkeys)
|
return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, true, false, delimiter, startAfter, maxkeys, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyObject - copies an object from source object to destination object on server side.
|
// CopyObject - copies an object from source object to destination object on server side.
|
||||||
@ -97,10 +97,10 @@ func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
|
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
|
||||||
func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart) (string, error) {
|
func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (string, error) {
|
||||||
res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{
|
res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{
|
||||||
Parts: parts,
|
Parts: parts,
|
||||||
})
|
}, opts)
|
||||||
return res.ETag, err
|
return res.ETag, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
342
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
342
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
@ -38,6 +38,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/dustin/go-humanize"
|
"github.com/dustin/go-humanize"
|
||||||
@ -1054,6 +1055,153 @@ func testGetObjectWithVersioning() {
|
|||||||
successLogger(testName, function, args, startTime).Info()
|
successLogger(testName, function, args, startTime).Info()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testPutObjectWithVersioning() {
|
||||||
|
// initialize logging params
|
||||||
|
startTime := time.Now()
|
||||||
|
testName := getFuncName()
|
||||||
|
function := "GetObject()"
|
||||||
|
args := map[string]interface{}{}
|
||||||
|
|
||||||
|
// Seed random based on current time.
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
// Instantiate new minio client object.
|
||||||
|
c, err := minio.New(os.Getenv(serverEndpoint),
|
||||||
|
&minio.Options{
|
||||||
|
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
|
||||||
|
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable tracing, write to stderr.
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
|
||||||
|
|
||||||
|
// Generate a new random bucket name.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
|
||||||
|
args["bucketName"] = bucketName
|
||||||
|
|
||||||
|
// Make a new bucket.
|
||||||
|
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Make bucket failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.EnableVersioning(context.Background(), bucketName)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Enable versioning failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
|
args["objectName"] = objectName
|
||||||
|
|
||||||
|
const n = 10
|
||||||
|
// Read input...
|
||||||
|
|
||||||
|
// Save the data concurrently.
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(n)
|
||||||
|
var buffers = make([][]byte, n)
|
||||||
|
var errs [n]error
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
r := newRandomReader(int64((1<<20)*i+i), int64(i))
|
||||||
|
buf, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
buffers[i] = buf
|
||||||
|
|
||||||
|
go func(i int) {
|
||||||
|
defer wg.Done()
|
||||||
|
_, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20})
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
for _, err := range errs {
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
|
||||||
|
var results []minio.ObjectInfo
|
||||||
|
for info := range objectsInfo {
|
||||||
|
if info.Err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
results = append(results, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(results) != n {
|
||||||
|
logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(results, func(i, j int) bool {
|
||||||
|
return results[i].Size < results[j].Size
|
||||||
|
})
|
||||||
|
|
||||||
|
sort.Slice(buffers, func(i, j int) bool {
|
||||||
|
return len(buffers[i]) < len(buffers[j])
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := 0; i < len(results); i++ {
|
||||||
|
opts := minio.GetObjectOptions{VersionID: results[i].VersionID}
|
||||||
|
reader, err := c.GetObject(context.Background(), bucketName, objectName, opts)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "error during GET object", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
statInfo, err := reader.Stat()
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if statInfo.ETag != results[i].ETag {
|
||||||
|
logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if statInfo.LastModified.Unix() != results[i].LastModified.Unix() {
|
||||||
|
logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if statInfo.Size != results[i].Size {
|
||||||
|
logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpBuffer := bytes.NewBuffer([]byte{})
|
||||||
|
_, err = io.Copy(tmpBuffer, reader)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "unexpected io.Copy()", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) {
|
||||||
|
logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all objects and their versions as long as the bucket itself
|
||||||
|
if err = cleanupVersionedBucket(bucketName, c); err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
successLogger(testName, function, args, startTime).Info()
|
||||||
|
}
|
||||||
|
|
||||||
func testCopyObjectWithVersioning() {
|
func testCopyObjectWithVersioning() {
|
||||||
// initialize logging params
|
// initialize logging params
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
@ -1191,6 +1339,166 @@ func testCopyObjectWithVersioning() {
|
|||||||
successLogger(testName, function, args, startTime).Info()
|
successLogger(testName, function, args, startTime).Info()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testConcurrentCopyObjectWithVersioning() {
|
||||||
|
// initialize logging params
|
||||||
|
startTime := time.Now()
|
||||||
|
testName := getFuncName()
|
||||||
|
function := "CopyObject()"
|
||||||
|
args := map[string]interface{}{}
|
||||||
|
|
||||||
|
// Seed random based on current time.
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
// Instantiate new minio client object.
|
||||||
|
c, err := minio.New(os.Getenv(serverEndpoint),
|
||||||
|
&minio.Options{
|
||||||
|
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
|
||||||
|
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable tracing, write to stderr.
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
|
||||||
|
|
||||||
|
// Generate a new random bucket name.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
|
||||||
|
args["bucketName"] = bucketName
|
||||||
|
|
||||||
|
// Make a new bucket.
|
||||||
|
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Make bucket failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.EnableVersioning(context.Background(), bucketName)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Enable versioning failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
|
args["objectName"] = objectName
|
||||||
|
|
||||||
|
var testFiles = []string{"datafile-10-kB"}
|
||||||
|
for _, testFile := range testFiles {
|
||||||
|
r := getDataReader(testFile)
|
||||||
|
buf, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.Close()
|
||||||
|
_, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
|
||||||
|
var infos []minio.ObjectInfo
|
||||||
|
for info := range objectsInfo {
|
||||||
|
if info.Err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
infos = append(infos, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(infos, func(i, j int) bool {
|
||||||
|
return infos[i].Size < infos[j].Size
|
||||||
|
})
|
||||||
|
|
||||||
|
reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
oldestContent, err := ioutil.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy Source
|
||||||
|
srcOpts := minio.CopySrcOptions{
|
||||||
|
Bucket: bucketName,
|
||||||
|
Object: objectName,
|
||||||
|
VersionID: infos[0].VersionID,
|
||||||
|
}
|
||||||
|
args["src"] = srcOpts
|
||||||
|
|
||||||
|
dstOpts := minio.CopyDestOptions{
|
||||||
|
Bucket: bucketName,
|
||||||
|
Object: objectName + "-copy",
|
||||||
|
}
|
||||||
|
args["dst"] = dstOpts
|
||||||
|
|
||||||
|
// Perform the Copy concurrently
|
||||||
|
const n = 10
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(n)
|
||||||
|
var errs [n]error
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
go func(i int) {
|
||||||
|
defer wg.Done()
|
||||||
|
_, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts)
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
for _, err := range errs {
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "CopyObject failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object})
|
||||||
|
infos = []minio.ObjectInfo{}
|
||||||
|
for info := range objectsInfo {
|
||||||
|
// Destination object
|
||||||
|
readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "GetObject failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer readerCopy.Close()
|
||||||
|
|
||||||
|
newestContent, err := ioutil.ReadAll(readerCopy)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) {
|
||||||
|
logError(testName, function, args, startTime, "", "Unexpected destination object content", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
infos = append(infos, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(infos) != n {
|
||||||
|
logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all objects and their versions as long as the bucket itself
|
||||||
|
if err = cleanupVersionedBucket(bucketName, c); err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
successLogger(testName, function, args, startTime).Info()
|
||||||
|
}
|
||||||
|
|
||||||
func testComposeObjectWithVersioning() {
|
func testComposeObjectWithVersioning() {
|
||||||
// initialize logging params
|
// initialize logging params
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
@ -7548,7 +7856,7 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() {
|
|||||||
completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag})
|
completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag})
|
||||||
|
|
||||||
// Complete the multipart upload
|
// Complete the multipart upload
|
||||||
_, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts)
|
_, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
||||||
return
|
return
|
||||||
@ -7606,7 +7914,7 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Complete the multipart upload
|
// Complete the multipart upload
|
||||||
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
|
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
||||||
return
|
return
|
||||||
@ -7783,7 +8091,7 @@ func testSSECEncryptedToSSECCopyObjectPart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Complete the multipart upload
|
// Complete the multipart upload
|
||||||
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
|
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
||||||
return
|
return
|
||||||
@ -7959,7 +8267,7 @@ func testSSECEncryptedToUnencryptedCopyPart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Complete the multipart upload
|
// Complete the multipart upload
|
||||||
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
|
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
||||||
return
|
return
|
||||||
@ -8138,7 +8446,7 @@ func testSSECEncryptedToSSES3CopyObjectPart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Complete the multipart upload
|
// Complete the multipart upload
|
||||||
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
|
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
||||||
return
|
return
|
||||||
@ -8312,7 +8620,7 @@ func testUnencryptedToSSECCopyObjectPart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Complete the multipart upload
|
// Complete the multipart upload
|
||||||
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
|
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
||||||
return
|
return
|
||||||
@ -8482,7 +8790,7 @@ func testUnencryptedToUnencryptedCopyPart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Complete the multipart upload
|
// Complete the multipart upload
|
||||||
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
|
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
||||||
return
|
return
|
||||||
@ -8654,7 +8962,7 @@ func testUnencryptedToSSES3CopyObjectPart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Complete the multipart upload
|
// Complete the multipart upload
|
||||||
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
|
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
||||||
return
|
return
|
||||||
@ -8829,7 +9137,7 @@ func testSSES3EncryptedToSSECCopyObjectPart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Complete the multipart upload
|
// Complete the multipart upload
|
||||||
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
|
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
||||||
return
|
return
|
||||||
@ -9000,7 +9308,7 @@ func testSSES3EncryptedToUnencryptedCopyPart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Complete the multipart upload
|
// Complete the multipart upload
|
||||||
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
|
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
||||||
return
|
return
|
||||||
@ -9174,7 +9482,7 @@ func testSSES3EncryptedToSSES3CopyObjectPart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Complete the multipart upload
|
// Complete the multipart upload
|
||||||
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
|
_, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
|
||||||
return
|
return
|
||||||
@ -11285,22 +11593,20 @@ func testRemoveObjects() {
|
|||||||
var reader = getDataReader("datafile-129-MB")
|
var reader = getDataReader("datafile-129-MB")
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
|
|
||||||
n, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
|
_, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
logError(testName, function, args, startTime, "", "Error uploading object", err)
|
||||||
}
|
}
|
||||||
log.Println("Uploaded", objectName, " of size: ", n, "to bucket: ", bucketName, "Successfully.")
|
|
||||||
|
|
||||||
// Replace with smaller...
|
// Replace with smaller...
|
||||||
bufSize = dataFileMap["datafile-10-kB"]
|
bufSize = dataFileMap["datafile-10-kB"]
|
||||||
reader = getDataReader("datafile-10-kB")
|
reader = getDataReader("datafile-10-kB")
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
|
|
||||||
n, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
|
_, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
logError(testName, function, args, startTime, "", "Error uploading object", err)
|
||||||
}
|
}
|
||||||
log.Println("Uploaded", objectName, " of size: ", n, "to bucket: ", bucketName, "Successfully.")
|
|
||||||
|
|
||||||
t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC)
|
t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC)
|
||||||
m := minio.RetentionMode(minio.Governance)
|
m := minio.RetentionMode(minio.Governance)
|
||||||
@ -11416,6 +11722,7 @@ func main() {
|
|||||||
testFPutObjectContextV2()
|
testFPutObjectContextV2()
|
||||||
testFGetObjectContextV2()
|
testFGetObjectContextV2()
|
||||||
testPutObjectContextV2()
|
testPutObjectContextV2()
|
||||||
|
testPutObjectWithVersioning()
|
||||||
testMakeBucketError()
|
testMakeBucketError()
|
||||||
testMakeBucketRegions()
|
testMakeBucketRegions()
|
||||||
testPutObjectWithMetadata()
|
testPutObjectWithMetadata()
|
||||||
@ -11453,6 +11760,7 @@ func main() {
|
|||||||
testStatObjectWithVersioning()
|
testStatObjectWithVersioning()
|
||||||
testGetObjectWithVersioning()
|
testGetObjectWithVersioning()
|
||||||
testCopyObjectWithVersioning()
|
testCopyObjectWithVersioning()
|
||||||
|
testConcurrentCopyObjectWithVersioning()
|
||||||
testComposeObjectWithVersioning()
|
testComposeObjectWithVersioning()
|
||||||
testRemoveObjectWithVersioning()
|
testRemoveObjectWithVersioning()
|
||||||
testRemoveObjectsWithVersioning()
|
testRemoveObjectsWithVersioning()
|
||||||
|
18
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
generated
vendored
18
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
generated
vendored
@ -22,8 +22,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
// STSVersion sts version string
|
// STSVersion sts version string
|
||||||
const STSVersion = "2011-06-15"
|
STSVersion = "2011-06-15"
|
||||||
|
|
||||||
|
// How much duration to slash from the given expiration duration
|
||||||
|
defaultExpiryWindow = 0.8
|
||||||
|
)
|
||||||
|
|
||||||
// A Value is the AWS credentials value for individual credential fields.
|
// A Value is the AWS credentials value for individual credential fields.
|
||||||
type Value struct {
|
type Value struct {
|
||||||
@ -82,10 +87,15 @@ type Expiry struct {
|
|||||||
// the expiration time given to ensure no requests are made with expired
|
// the expiration time given to ensure no requests are made with expired
|
||||||
// tokens.
|
// tokens.
|
||||||
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
|
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
|
||||||
e.expiration = expiration
|
if e.CurrentTime == nil {
|
||||||
if window > 0 {
|
e.CurrentTime = time.Now
|
||||||
e.expiration = e.expiration.Add(-window)
|
|
||||||
}
|
}
|
||||||
|
cut := window
|
||||||
|
if cut < 0 {
|
||||||
|
expireIn := expiration.Sub(e.CurrentTime())
|
||||||
|
cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow))
|
||||||
|
}
|
||||||
|
e.expiration = expiration.Add(-cut)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsExpired returns if the credentials are expired.
|
// IsExpired returns if the credentials are expired.
|
||||||
|
13
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
13
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
@ -38,7 +38,10 @@ import (
|
|||||||
// prior to the credentials actually expiring. This is beneficial
|
// prior to the credentials actually expiring. This is beneficial
|
||||||
// so race conditions with expiring credentials do not cause
|
// so race conditions with expiring credentials do not cause
|
||||||
// request to fail unexpectedly due to ExpiredTokenException exceptions.
|
// request to fail unexpectedly due to ExpiredTokenException exceptions.
|
||||||
const DefaultExpiryWindow = time.Second * 10 // 10 secs
|
// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration.
|
||||||
|
// When used the tokens refresh will be triggered when 80% of the elapsed
|
||||||
|
// time until the actual expiration time is passed.
|
||||||
|
const DefaultExpiryWindow = -1
|
||||||
|
|
||||||
// A IAM retrieves credentials from the EC2 service, and keeps track if
|
// A IAM retrieves credentials from the EC2 service, and keeps track if
|
||||||
// those credentials are expired.
|
// those credentials are expired.
|
||||||
@ -181,10 +184,6 @@ type ec2RoleCredRespBody struct {
|
|||||||
// be sent to fetch the rolling access credentials.
|
// be sent to fetch the rolling access credentials.
|
||||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||||
func getIAMRoleURL(endpoint string) (*url.URL, error) {
|
func getIAMRoleURL(endpoint string) (*url.URL, error) {
|
||||||
if endpoint == "" {
|
|
||||||
endpoint = defaultIAMRoleEndpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
u, err := url.Parse(endpoint)
|
u, err := url.Parse(endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -281,6 +280,10 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
|
|||||||
// If the credentials cannot be found, or there is an error
|
// If the credentials cannot be found, or there is an error
|
||||||
// reading the response an error will be returned.
|
// reading the response an error will be returned.
|
||||||
func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
|
func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
|
||||||
|
if endpoint == "" {
|
||||||
|
endpoint = defaultIAMRoleEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
|
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
|
||||||
token, _ := fetchIMDSToken(client, endpoint)
|
token, _ := fetchIMDSToken(client, endpoint)
|
||||||
|
|
||||||
|
96
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
generated
vendored
96
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
generated
vendored
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||||
* Copyright 2019 MinIO, Inc.
|
* Copyright 2019-2021 MinIO, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -20,6 +20,7 @@ package credentials
|
|||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
@ -60,26 +61,86 @@ type LDAPIdentity struct {
|
|||||||
|
|
||||||
// LDAP username/password used to fetch LDAP STS credentials.
|
// LDAP username/password used to fetch LDAP STS credentials.
|
||||||
LDAPUsername, LDAPPassword string
|
LDAPUsername, LDAPPassword string
|
||||||
|
|
||||||
|
// Session policy to apply to the generated credentials. Leave empty to
|
||||||
|
// use the full access policy available to the user.
|
||||||
|
Policy string
|
||||||
|
|
||||||
|
// RequestedExpiry is the configured expiry duration for credentials
|
||||||
|
// requested from LDAP.
|
||||||
|
RequestedExpiry time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLDAPIdentity returns new credentials object that uses LDAP
|
// NewLDAPIdentity returns new credentials object that uses LDAP
|
||||||
// Identity.
|
// Identity.
|
||||||
func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string) (*Credentials, error) {
|
func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) {
|
||||||
|
l := LDAPIdentity{
|
||||||
|
Client: &http.Client{Transport: http.DefaultTransport},
|
||||||
|
STSEndpoint: stsEndpoint,
|
||||||
|
LDAPUsername: ldapUsername,
|
||||||
|
LDAPPassword: ldapPassword,
|
||||||
|
}
|
||||||
|
for _, optFunc := range optFuncs {
|
||||||
|
optFunc(&l)
|
||||||
|
}
|
||||||
|
return New(&l), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LDAPIdentityOpt is a function type used to configured the LDAPIdentity
|
||||||
|
// instance.
|
||||||
|
type LDAPIdentityOpt func(*LDAPIdentity)
|
||||||
|
|
||||||
|
// LDAPIdentityPolicyOpt sets the session policy for requested credentials.
|
||||||
|
func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt {
|
||||||
|
return func(k *LDAPIdentity) {
|
||||||
|
k.Policy = policy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials.
|
||||||
|
func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt {
|
||||||
|
return func(k *LDAPIdentity) {
|
||||||
|
k.RequestedExpiry = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func stripPassword(err error) error {
|
||||||
|
urlErr, ok := err.(*url.Error)
|
||||||
|
if ok {
|
||||||
|
u, _ := url.Parse(urlErr.URL)
|
||||||
|
if u == nil {
|
||||||
|
return urlErr
|
||||||
|
}
|
||||||
|
values := u.Query()
|
||||||
|
values.Set("LDAPPassword", "xxxxx")
|
||||||
|
u.RawQuery = values.Encode()
|
||||||
|
urlErr.URL = u.String()
|
||||||
|
return urlErr
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses
|
||||||
|
// LDAP Identity with a specified session policy. The `policy` parameter must be
|
||||||
|
// a JSON string specifying the policy document.
|
||||||
|
//
|
||||||
|
// DEPRECATED: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead.
|
||||||
|
func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) {
|
||||||
return New(&LDAPIdentity{
|
return New(&LDAPIdentity{
|
||||||
Client: &http.Client{Transport: http.DefaultTransport},
|
Client: &http.Client{Transport: http.DefaultTransport},
|
||||||
STSEndpoint: stsEndpoint,
|
STSEndpoint: stsEndpoint,
|
||||||
LDAPUsername: ldapUsername,
|
LDAPUsername: ldapUsername,
|
||||||
LDAPPassword: ldapPassword,
|
LDAPPassword: ldapPassword,
|
||||||
|
Policy: policy,
|
||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve gets the credential by calling the MinIO STS API for
|
// Retrieve gets the credential by calling the MinIO STS API for
|
||||||
// LDAP on the configured stsEndpoint.
|
// LDAP on the configured stsEndpoint.
|
||||||
func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||||
u, kerr := url.Parse(k.STSEndpoint)
|
u, err := url.Parse(k.STSEndpoint)
|
||||||
if kerr != nil {
|
if err != nil {
|
||||||
err = kerr
|
return value, err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
v := url.Values{}
|
v := url.Values{}
|
||||||
@ -87,25 +148,28 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
|||||||
v.Set("Version", STSVersion)
|
v.Set("Version", STSVersion)
|
||||||
v.Set("LDAPUsername", k.LDAPUsername)
|
v.Set("LDAPUsername", k.LDAPUsername)
|
||||||
v.Set("LDAPPassword", k.LDAPPassword)
|
v.Set("LDAPPassword", k.LDAPPassword)
|
||||||
|
if k.Policy != "" {
|
||||||
|
v.Set("Policy", k.Policy)
|
||||||
|
}
|
||||||
|
if k.RequestedExpiry != 0 {
|
||||||
|
v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds())))
|
||||||
|
}
|
||||||
|
|
||||||
u.RawQuery = v.Encode()
|
u.RawQuery = v.Encode()
|
||||||
|
|
||||||
req, kerr := http.NewRequest(http.MethodPost, u.String(), nil)
|
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
|
||||||
if kerr != nil {
|
if err != nil {
|
||||||
err = kerr
|
return value, stripPassword(err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, kerr := k.Client.Do(req)
|
resp, err := k.Client.Do(req)
|
||||||
if kerr != nil {
|
if err != nil {
|
||||||
err = kerr
|
return value, stripPassword(err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
err = errors.New(resp.Status)
|
return value, errors.New(resp.Status)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r := AssumeRoleWithLDAPResponse{}
|
r := AssumeRoleWithLDAPResponse{}
|
||||||
|
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
@ -55,6 +55,7 @@ type WebIdentityResult struct {
|
|||||||
// WebIdentityToken - web identity token with expiry.
|
// WebIdentityToken - web identity token with expiry.
|
||||||
type WebIdentityToken struct {
|
type WebIdentityToken struct {
|
||||||
Token string
|
Token string
|
||||||
|
AccessToken string
|
||||||
Expiry int
|
Expiry int
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,6 +122,10 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
|
|||||||
v.Set("RoleSessionName", roleSessionName)
|
v.Set("RoleSessionName", roleSessionName)
|
||||||
}
|
}
|
||||||
v.Set("WebIdentityToken", idToken.Token)
|
v.Set("WebIdentityToken", idToken.Token)
|
||||||
|
if idToken.AccessToken != "" {
|
||||||
|
// Usually set when server is using extended userInfo endpoint.
|
||||||
|
v.Set("WebIdentityAccessToken", idToken.AccessToken)
|
||||||
|
}
|
||||||
if idToken.Expiry > 0 {
|
if idToken.Expiry > 0 {
|
||||||
v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
|
v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
|
||||||
}
|
}
|
||||||
|
107
vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
generated
vendored
107
vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
generated
vendored
@ -19,6 +19,7 @@
|
|||||||
package lifecycle
|
package lifecycle
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@ -116,6 +117,26 @@ type Transition struct {
|
|||||||
Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
|
Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalJSON customizes json encoding by omitting empty values
|
||||||
|
func (t Transition) MarshalJSON() ([]byte, error) {
|
||||||
|
type transition struct {
|
||||||
|
Date *ExpirationDate `json:"Date,omitempty"`
|
||||||
|
StorageClass string `json:"StorageClass,omitempty"`
|
||||||
|
Days *ExpirationDays `json:"Days,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
newt := transition{
|
||||||
|
StorageClass: t.StorageClass,
|
||||||
|
}
|
||||||
|
if !t.IsDaysNull() {
|
||||||
|
newt.Days = &t.Days
|
||||||
|
}
|
||||||
|
if !t.IsDateNull() {
|
||||||
|
newt.Date = &t.Date
|
||||||
|
}
|
||||||
|
return json.Marshal(newt)
|
||||||
|
}
|
||||||
|
|
||||||
// IsDaysNull returns true if days field is null
|
// IsDaysNull returns true if days field is null
|
||||||
func (t Transition) IsDaysNull() bool {
|
func (t Transition) IsDaysNull() bool {
|
||||||
return t.Days == ExpirationDays(0)
|
return t.Days == ExpirationDays(0)
|
||||||
@ -160,6 +181,31 @@ type Filter struct {
|
|||||||
Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
|
Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsNull returns true if all Filter fields are empty.
|
||||||
|
func (f Filter) IsNull() bool {
|
||||||
|
return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON customizes json encoding by removing empty values.
|
||||||
|
func (f Filter) MarshalJSON() ([]byte, error) {
|
||||||
|
type filter struct {
|
||||||
|
And *And `json:"And,omitempty"`
|
||||||
|
Prefix string `json:"Prefix,omitempty"`
|
||||||
|
Tag *Tag `json:"Tag,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
newf := filter{
|
||||||
|
Prefix: f.Prefix,
|
||||||
|
}
|
||||||
|
if !f.Tag.IsEmpty() {
|
||||||
|
newf.Tag = &f.Tag
|
||||||
|
}
|
||||||
|
if !f.And.IsEmpty() {
|
||||||
|
newf.And = &f.And
|
||||||
|
}
|
||||||
|
return json.Marshal(newf)
|
||||||
|
}
|
||||||
|
|
||||||
// MarshalXML - produces the xml representation of the Filter struct
|
// MarshalXML - produces the xml representation of the Filter struct
|
||||||
// only one of Prefix, And and Tag should be present in the output.
|
// only one of Prefix, And and Tag should be present in the output.
|
||||||
func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||||
@ -238,6 +284,26 @@ type Expiration struct {
|
|||||||
DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"`
|
DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalJSON customizes json encoding by removing empty day/date specification.
|
||||||
|
func (e Expiration) MarshalJSON() ([]byte, error) {
|
||||||
|
type expiration struct {
|
||||||
|
Date *ExpirationDate `json:"Date,omitempty"`
|
||||||
|
Days *ExpirationDays `json:"Days,omitempty"`
|
||||||
|
DeleteMarker ExpireDeleteMarker
|
||||||
|
}
|
||||||
|
|
||||||
|
newexp := expiration{
|
||||||
|
DeleteMarker: e.DeleteMarker,
|
||||||
|
}
|
||||||
|
if !e.IsDaysNull() {
|
||||||
|
newexp.Days = &e.Days
|
||||||
|
}
|
||||||
|
if !e.IsDateNull() {
|
||||||
|
newexp.Date = &e.Date
|
||||||
|
}
|
||||||
|
return json.Marshal(newexp)
|
||||||
|
}
|
||||||
|
|
||||||
// IsDaysNull returns true if days field is null
|
// IsDaysNull returns true if days field is null
|
||||||
func (e Expiration) IsDaysNull() bool {
|
func (e Expiration) IsDaysNull() bool {
|
||||||
return e.Days == ExpirationDays(0)
|
return e.Days == ExpirationDays(0)
|
||||||
@ -267,6 +333,47 @@ func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) e
|
|||||||
return en.EncodeElement(expirationWrapper(e), startElement)
|
return en.EncodeElement(expirationWrapper(e), startElement)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalJSON customizes json encoding by omitting empty values
|
||||||
|
func (r Rule) MarshalJSON() ([]byte, error) {
|
||||||
|
type rule struct {
|
||||||
|
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"`
|
||||||
|
Expiration *Expiration `json:"Expiration,omitempty"`
|
||||||
|
ID string `json:"ID"`
|
||||||
|
RuleFilter *Filter `json:"Filter,omitempty"`
|
||||||
|
NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"`
|
||||||
|
NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"`
|
||||||
|
Prefix string `json:"Prefix,omitempty"`
|
||||||
|
Status string `json:"Status"`
|
||||||
|
Transition *Transition `json:"Transition,omitempty"`
|
||||||
|
}
|
||||||
|
newr := rule{
|
||||||
|
Prefix: r.Prefix,
|
||||||
|
Status: r.Status,
|
||||||
|
ID: r.ID,
|
||||||
|
}
|
||||||
|
|
||||||
|
if !r.RuleFilter.IsNull() {
|
||||||
|
newr.RuleFilter = &r.RuleFilter
|
||||||
|
}
|
||||||
|
if !r.AbortIncompleteMultipartUpload.IsDaysNull() {
|
||||||
|
newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload
|
||||||
|
}
|
||||||
|
if !r.Expiration.IsNull() {
|
||||||
|
newr.Expiration = &r.Expiration
|
||||||
|
}
|
||||||
|
if !r.Transition.IsNull() {
|
||||||
|
newr.Transition = &r.Transition
|
||||||
|
}
|
||||||
|
if !r.NoncurrentVersionExpiration.IsDaysNull() {
|
||||||
|
newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration
|
||||||
|
}
|
||||||
|
if !r.NoncurrentVersionTransition.IsDaysNull() {
|
||||||
|
newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(newr)
|
||||||
|
}
|
||||||
|
|
||||||
// Rule represents a single rule in lifecycle configuration
|
// Rule represents a single rule in lifecycle configuration
|
||||||
type Rule struct {
|
type Rule struct {
|
||||||
XMLName xml.Name `xml:"Rule,omitempty" json:"-"`
|
XMLName xml.Name `xml:"Rule,omitempty" json:"-"`
|
||||||
|
93
vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
generated
vendored
93
vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
generated
vendored
@ -47,13 +47,13 @@ const (
|
|||||||
// Options represents options to set a replication configuration rule
|
// Options represents options to set a replication configuration rule
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Op OptionType
|
Op OptionType
|
||||||
|
RoleArn string
|
||||||
ID string
|
ID string
|
||||||
Prefix string
|
Prefix string
|
||||||
RuleStatus string
|
RuleStatus string
|
||||||
Priority string
|
Priority string
|
||||||
TagString string
|
TagString string
|
||||||
StorageClass string
|
StorageClass string
|
||||||
RoleArn string
|
|
||||||
DestBucket string
|
DestBucket string
|
||||||
IsTagSet bool
|
IsTagSet bool
|
||||||
IsSCSet bool
|
IsSCSet bool
|
||||||
@ -103,9 +103,17 @@ func (c *Config) AddRule(opts Options) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if opts.RoleArn != c.Role && c.Role != "" {
|
if opts.RoleArn != "" {
|
||||||
return fmt.Errorf("role ARN does not match existing configuration")
|
tokens := strings.Split(opts.RoleArn, ":")
|
||||||
|
if len(tokens) != 6 {
|
||||||
|
return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn)
|
||||||
}
|
}
|
||||||
|
if !strings.HasPrefix(opts.RoleArn, "arn:aws:iam") {
|
||||||
|
return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn)
|
||||||
|
}
|
||||||
|
c.Role = opts.RoleArn
|
||||||
|
}
|
||||||
|
|
||||||
var status Status
|
var status Status
|
||||||
// toggle rule status for edit option
|
// toggle rule status for edit option
|
||||||
switch opts.RuleStatus {
|
switch opts.RuleStatus {
|
||||||
@ -139,29 +147,12 @@ func (c *Config) AddRule(opts Options) error {
|
|||||||
if opts.ID == "" {
|
if opts.ID == "" {
|
||||||
opts.ID = xid.New().String()
|
opts.ID = xid.New().String()
|
||||||
}
|
}
|
||||||
arnStr := opts.RoleArn
|
|
||||||
if opts.RoleArn == "" {
|
|
||||||
arnStr = c.Role
|
|
||||||
}
|
|
||||||
if arnStr == "" {
|
|
||||||
return fmt.Errorf("role ARN required")
|
|
||||||
}
|
|
||||||
tokens := strings.Split(arnStr, ":")
|
|
||||||
if len(tokens) != 6 {
|
|
||||||
return fmt.Errorf("invalid format for replication Arn")
|
|
||||||
}
|
|
||||||
if c.Role == "" {
|
|
||||||
c.Role = arnStr
|
|
||||||
}
|
|
||||||
destBucket := opts.DestBucket
|
destBucket := opts.DestBucket
|
||||||
// ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
|
// ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
|
||||||
if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 {
|
if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 {
|
||||||
if len(btokens) == 1 {
|
|
||||||
destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket)
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("destination bucket needs to be in Arn format")
|
return fmt.Errorf("destination bucket needs to be in Arn format")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
dmStatus := Disabled
|
dmStatus := Disabled
|
||||||
if opts.ReplicateDeleteMarkers != "" {
|
if opts.ReplicateDeleteMarkers != "" {
|
||||||
switch opts.ReplicateDeleteMarkers {
|
switch opts.ReplicateDeleteMarkers {
|
||||||
@ -236,13 +227,18 @@ func (c *Config) AddRule(opts Options) error {
|
|||||||
if err := newRule.Validate(); err != nil {
|
if err := newRule.Validate(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration
|
||||||
|
if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") {
|
||||||
|
for i := range c.Rules {
|
||||||
|
c.Rules[i].Destination.Bucket = c.Role
|
||||||
|
}
|
||||||
|
c.Role = ""
|
||||||
|
}
|
||||||
|
|
||||||
for _, rule := range c.Rules {
|
for _, rule := range c.Rules {
|
||||||
if rule.Priority == newRule.Priority {
|
if rule.Priority == newRule.Priority {
|
||||||
return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
|
return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
|
||||||
}
|
}
|
||||||
if rule.Destination.Bucket != newRule.Destination.Bucket {
|
|
||||||
return fmt.Errorf("the destination bucket must be same for all rules")
|
|
||||||
}
|
|
||||||
if rule.ID == newRule.ID {
|
if rule.ID == newRule.ID {
|
||||||
return fmt.Errorf("a rule exists with this ID")
|
return fmt.Errorf("a rule exists with this ID")
|
||||||
}
|
}
|
||||||
@ -257,6 +253,14 @@ func (c *Config) EditRule(opts Options) error {
|
|||||||
if opts.ID == "" {
|
if opts.ID == "" {
|
||||||
return fmt.Errorf("rule ID missing")
|
return fmt.Errorf("rule ID missing")
|
||||||
}
|
}
|
||||||
|
// if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS.
|
||||||
|
if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") {
|
||||||
|
for i := range c.Rules {
|
||||||
|
c.Rules[i].Destination.Bucket = c.Role
|
||||||
|
}
|
||||||
|
c.Role = ""
|
||||||
|
}
|
||||||
|
|
||||||
rIdx := -1
|
rIdx := -1
|
||||||
var newRule Rule
|
var newRule Rule
|
||||||
for i, rule := range c.Rules {
|
for i, rule := range c.Rules {
|
||||||
@ -351,7 +355,7 @@ func (c *Config) EditRule(opts Options) error {
|
|||||||
return fmt.Errorf("replica metadata sync should be either [enable|disable]")
|
return fmt.Errorf("replica metadata sync should be either [enable|disable]")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fmt.Println("opts.ExistingObjectReplicate>", opts.ExistingObjectReplicate)
|
|
||||||
if opts.ExistingObjectReplicate != "" {
|
if opts.ExistingObjectReplicate != "" {
|
||||||
switch opts.ExistingObjectReplicate {
|
switch opts.ExistingObjectReplicate {
|
||||||
case "enable":
|
case "enable":
|
||||||
@ -376,12 +380,8 @@ func (c *Config) EditRule(opts Options) error {
|
|||||||
destBucket := opts.DestBucket
|
destBucket := opts.DestBucket
|
||||||
// ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
|
// ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
|
||||||
if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 {
|
if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 {
|
||||||
if len(btokens) == 1 {
|
|
||||||
destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket)
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("destination bucket needs to be in Arn format")
|
return fmt.Errorf("destination bucket needs to be in Arn format")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
newRule.Destination.Bucket = destBucket
|
newRule.Destination.Bucket = destBucket
|
||||||
}
|
}
|
||||||
// validate rule
|
// validate rule
|
||||||
@ -393,8 +393,8 @@ func (c *Config) EditRule(opts Options) error {
|
|||||||
if rule.Priority == newRule.Priority && rIdx != idx {
|
if rule.Priority == newRule.Priority && rIdx != idx {
|
||||||
return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
|
return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority")
|
||||||
}
|
}
|
||||||
if rule.Destination.Bucket != newRule.Destination.Bucket {
|
if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID {
|
||||||
return fmt.Errorf("the destination bucket must be same for all rules")
|
return fmt.Errorf("invalid destination bucket for this rule")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -678,9 +678,9 @@ func (e ExistingObjectReplication) Validate() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metrics represents inline replication metrics
|
// TargetMetrics represents inline replication metrics
|
||||||
// such as pending, failed and completed bytes in total for a bucket
|
// such as pending, failed and completed bytes in total for a bucket remote target
|
||||||
type Metrics struct {
|
type TargetMetrics struct {
|
||||||
// Pending size in bytes
|
// Pending size in bytes
|
||||||
PendingSize uint64 `json:"pendingReplicationSize"`
|
PendingSize uint64 `json:"pendingReplicationSize"`
|
||||||
// Completed size in bytes
|
// Completed size in bytes
|
||||||
@ -694,3 +694,28 @@ type Metrics struct {
|
|||||||
// Total number of failed operations including metadata updates
|
// Total number of failed operations including metadata updates
|
||||||
FailedCount uint64 `json:"failedReplicationCount"`
|
FailedCount uint64 `json:"failedReplicationCount"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Metrics represents inline replication metrics for a bucket.
|
||||||
|
type Metrics struct {
|
||||||
|
Stats map[string]TargetMetrics
|
||||||
|
// Total Pending size in bytes across targets
|
||||||
|
PendingSize uint64 `json:"pendingReplicationSize"`
|
||||||
|
// Completed size in bytes across targets
|
||||||
|
ReplicatedSize uint64 `json:"completedReplicationSize"`
|
||||||
|
// Total Replica size in bytes across targets
|
||||||
|
ReplicaSize uint64 `json:"replicaSize"`
|
||||||
|
// Failed size in bytes across targets
|
||||||
|
FailedSize uint64 `json:"failedReplicationSize"`
|
||||||
|
// Total number of pending operations including metadata updates across targets
|
||||||
|
PendingCount uint64 `json:"pendingReplicationCount"`
|
||||||
|
// Total number of failed operations including metadata updates across targets
|
||||||
|
FailedCount uint64 `json:"failedReplicationCount"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResyncTargetsInfo struct {
|
||||||
|
Targets []ResyncTarget `json:"target,omitempty"`
|
||||||
|
}
|
||||||
|
type ResyncTarget struct {
|
||||||
|
Arn string `json:"arn"`
|
||||||
|
ResetID string `json:"resetid"`
|
||||||
|
}
|
||||||
|
167
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
167
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
@ -18,14 +18,17 @@
|
|||||||
package minio
|
package minio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -58,6 +61,26 @@ func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) {
|
|||||||
return time.Time{}, ""
|
return time.Time{}, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`)
|
||||||
|
|
||||||
|
func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) {
|
||||||
|
matches := restoreRegex.FindStringSubmatch(restore)
|
||||||
|
if len(matches) != 4 {
|
||||||
|
return false, time.Time{}, errors.New("unexpected restore header")
|
||||||
|
}
|
||||||
|
ongoing, err = strconv.ParseBool(matches[1])
|
||||||
|
if err != nil {
|
||||||
|
return false, time.Time{}, err
|
||||||
|
}
|
||||||
|
if matches[3] != "" {
|
||||||
|
expTime, err = time.Parse(http.TimeFormat, matches[3])
|
||||||
|
if err != nil {
|
||||||
|
return false, time.Time{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// xmlDecoder provide decoded value in xml.
|
// xmlDecoder provide decoded value in xml.
|
||||||
func xmlDecoder(body io.Reader, v interface{}) error {
|
func xmlDecoder(body io.Reader, v interface{}) error {
|
||||||
d := xml.NewDecoder(body)
|
d := xml.NewDecoder(body)
|
||||||
@ -294,6 +317,16 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Nil if not found
|
||||||
|
var restore *RestoreInfo
|
||||||
|
if restoreHdr := h.Get(amzRestore); restoreHdr != "" {
|
||||||
|
ongoing, expTime, err := amzRestoreToStruct(restoreHdr)
|
||||||
|
if err != nil {
|
||||||
|
return ObjectInfo{}, err
|
||||||
|
}
|
||||||
|
restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime}
|
||||||
|
}
|
||||||
|
|
||||||
// extract lifecycle expiry date and rule ID
|
// extract lifecycle expiry date and rule ID
|
||||||
expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration))
|
expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration))
|
||||||
|
|
||||||
@ -319,6 +352,7 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn
|
|||||||
UserMetadata: userMetadata,
|
UserMetadata: userMetadata,
|
||||||
UserTags: userTags,
|
UserTags: userTags,
|
||||||
UserTagCount: tagCount,
|
UserTagCount: tagCount,
|
||||||
|
Restore: restore,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -397,19 +431,20 @@ func getDefaultLocation(u url.URL, regionOverride string) (location string) {
|
|||||||
return region
|
return region
|
||||||
}
|
}
|
||||||
|
|
||||||
var supportedHeaders = []string{
|
var supportedHeaders = map[string]bool{
|
||||||
"content-type",
|
"content-type": true,
|
||||||
"cache-control",
|
"cache-control": true,
|
||||||
"content-encoding",
|
"content-encoding": true,
|
||||||
"content-disposition",
|
"content-disposition": true,
|
||||||
"content-language",
|
"content-language": true,
|
||||||
"x-amz-website-redirect-location",
|
"x-amz-website-redirect-location": true,
|
||||||
"x-amz-object-lock-mode",
|
"x-amz-object-lock-mode": true,
|
||||||
"x-amz-metadata-directive",
|
"x-amz-metadata-directive": true,
|
||||||
"x-amz-object-lock-retain-until-date",
|
"x-amz-object-lock-retain-until-date": true,
|
||||||
"expires",
|
"expires": true,
|
||||||
"x-amz-replication-status",
|
"x-amz-replication-status": true,
|
||||||
// Add more supported headers here.
|
// Add more supported headers here.
|
||||||
|
// Must be lower case.
|
||||||
}
|
}
|
||||||
|
|
||||||
// isStorageClassHeader returns true if the header is a supported storage class header
|
// isStorageClassHeader returns true if the header is a supported storage class header
|
||||||
@ -419,34 +454,24 @@ func isStorageClassHeader(headerKey string) bool {
|
|||||||
|
|
||||||
// isStandardHeader returns true if header is a supported header and not a custom header
|
// isStandardHeader returns true if header is a supported header and not a custom header
|
||||||
func isStandardHeader(headerKey string) bool {
|
func isStandardHeader(headerKey string) bool {
|
||||||
key := strings.ToLower(headerKey)
|
return supportedHeaders[strings.ToLower(headerKey)]
|
||||||
for _, header := range supportedHeaders {
|
|
||||||
if strings.ToLower(header) == key {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// sseHeaders is list of server side encryption headers
|
// sseHeaders is list of server side encryption headers
|
||||||
var sseHeaders = []string{
|
var sseHeaders = map[string]bool{
|
||||||
"x-amz-server-side-encryption",
|
"x-amz-server-side-encryption": true,
|
||||||
"x-amz-server-side-encryption-aws-kms-key-id",
|
"x-amz-server-side-encryption-aws-kms-key-id": true,
|
||||||
"x-amz-server-side-encryption-context",
|
"x-amz-server-side-encryption-context": true,
|
||||||
"x-amz-server-side-encryption-customer-algorithm",
|
"x-amz-server-side-encryption-customer-algorithm": true,
|
||||||
"x-amz-server-side-encryption-customer-key",
|
"x-amz-server-side-encryption-customer-key": true,
|
||||||
"x-amz-server-side-encryption-customer-key-MD5",
|
"x-amz-server-side-encryption-customer-key-md5": true,
|
||||||
|
// Add more supported headers here.
|
||||||
|
// Must be lower case.
|
||||||
}
|
}
|
||||||
|
|
||||||
// isSSEHeader returns true if header is a server side encryption header.
|
// isSSEHeader returns true if header is a server side encryption header.
|
||||||
func isSSEHeader(headerKey string) bool {
|
func isSSEHeader(headerKey string) bool {
|
||||||
key := strings.ToLower(headerKey)
|
return sseHeaders[strings.ToLower(headerKey)]
|
||||||
for _, h := range sseHeaders {
|
|
||||||
if strings.ToLower(h) == key {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
|
// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
|
||||||
@ -486,3 +511,79 @@ func (m hashWrapper) Close() {
|
|||||||
}
|
}
|
||||||
m.Hash = nil
|
m.Hash = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
|
||||||
|
const (
|
||||||
|
letterIdxBits = 6 // 6 bits to represent a letter index
|
||||||
|
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
|
||||||
|
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
|
||||||
|
)
|
||||||
|
|
||||||
|
// randString generates random names and prepends them with a known prefix.
|
||||||
|
func randString(n int, src rand.Source, prefix string) string {
|
||||||
|
b := make([]byte, n)
|
||||||
|
// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
|
||||||
|
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
|
||||||
|
if remain == 0 {
|
||||||
|
cache, remain = src.Int63(), letterIdxMax
|
||||||
|
}
|
||||||
|
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
|
||||||
|
b[i] = letterBytes[idx]
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
cache >>= letterIdxBits
|
||||||
|
remain--
|
||||||
|
}
|
||||||
|
return prefix + string(b[0:30-len(prefix)])
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNetworkOrHostDown - if there was a network error or if the host is down.
|
||||||
|
// expectTimeouts indicates that *context* timeouts are expected and does not
|
||||||
|
// indicate a downed host. Other timeouts still returns down.
|
||||||
|
func IsNetworkOrHostDown(err error, expectTimeouts bool) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if expectTimeouts && errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// We need to figure if the error either a timeout
|
||||||
|
// or a non-temporary error.
|
||||||
|
urlErr := &url.Error{}
|
||||||
|
if errors.As(err, &urlErr) {
|
||||||
|
switch urlErr.Err.(type) {
|
||||||
|
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var e net.Error
|
||||||
|
if errors.As(err, &e) {
|
||||||
|
if e.Timeout() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to other mechanisms.
|
||||||
|
switch {
|
||||||
|
case strings.Contains(err.Error(), "Connection closed by foreign host"):
|
||||||
|
return true
|
||||||
|
case strings.Contains(err.Error(), "TLS handshake timeout"):
|
||||||
|
// If error is - tlsHandshakeTimeoutError.
|
||||||
|
return true
|
||||||
|
case strings.Contains(err.Error(), "i/o timeout"):
|
||||||
|
// If error is - tcp timeoutError.
|
||||||
|
return true
|
||||||
|
case strings.Contains(err.Error(), "connection timed out"):
|
||||||
|
// If err is a net.Dial timeout.
|
||||||
|
return true
|
||||||
|
case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"):
|
||||||
|
// Denial errors
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
2
vendor/github.com/modern-go/reflect2/.travis.yml
generated
vendored
2
vendor/github.com/modern-go/reflect2/.travis.yml
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
language: go
|
language: go
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.8.x
|
- 1.9.x
|
||||||
- 1.x
|
- 1.x
|
||||||
|
|
||||||
before_install:
|
before_install:
|
||||||
|
8
vendor/github.com/modern-go/reflect2/Gopkg.lock
generated
vendored
8
vendor/github.com/modern-go/reflect2/Gopkg.lock
generated
vendored
@ -1,15 +1,9 @@
|
|||||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/modern-go/concurrent"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
|
|
||||||
version = "1.0.0"
|
|
||||||
|
|
||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7"
|
input-imports = []
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
4
vendor/github.com/modern-go/reflect2/Gopkg.toml
generated
vendored
4
vendor/github.com/modern-go/reflect2/Gopkg.toml
generated
vendored
@ -26,10 +26,6 @@
|
|||||||
|
|
||||||
ignored = []
|
ignored = []
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/modern-go/concurrent"
|
|
||||||
version = "1.0.0"
|
|
||||||
|
|
||||||
[prune]
|
[prune]
|
||||||
go-tests = true
|
go-tests = true
|
||||||
unused-packages = true
|
unused-packages = true
|
||||||
|
23
vendor/github.com/modern-go/reflect2/go_above_118.go
generated
vendored
Normal file
23
vendor/github.com/modern-go/reflect2/go_above_118.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
//+build go1.18
|
||||||
|
|
||||||
|
package reflect2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// m escapes into the return value, but the caller of mapiterinit
|
||||||
|
// doesn't let the return value escape.
|
||||||
|
//go:noescape
|
||||||
|
//go:linkname mapiterinit reflect.mapiterinit
|
||||||
|
func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer, it *hiter)
|
||||||
|
|
||||||
|
func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
|
||||||
|
var it hiter
|
||||||
|
mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj), &it)
|
||||||
|
return &UnsafeMapIterator{
|
||||||
|
hiter: &it,
|
||||||
|
pKeyRType: type2.pKeyRType,
|
||||||
|
pElemRType: type2.pElemRType,
|
||||||
|
}
|
||||||
|
}
|
8
vendor/github.com/modern-go/reflect2/go_above_17.go
generated
vendored
8
vendor/github.com/modern-go/reflect2/go_above_17.go
generated
vendored
@ -1,8 +0,0 @@
|
|||||||
//+build go1.7
|
|
||||||
|
|
||||||
package reflect2
|
|
||||||
|
|
||||||
import "unsafe"
|
|
||||||
|
|
||||||
//go:linkname resolveTypeOff reflect.resolveTypeOff
|
|
||||||
func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
|
|
3
vendor/github.com/modern-go/reflect2/go_above_19.go
generated
vendored
3
vendor/github.com/modern-go/reflect2/go_above_19.go
generated
vendored
@ -6,6 +6,9 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//go:linkname resolveTypeOff reflect.resolveTypeOff
|
||||||
|
func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
|
||||||
|
|
||||||
//go:linkname makemap reflect.makemap
|
//go:linkname makemap reflect.makemap
|
||||||
func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer)
|
func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer)
|
||||||
|
|
||||||
|
21
vendor/github.com/modern-go/reflect2/go_below_118.go
generated
vendored
Normal file
21
vendor/github.com/modern-go/reflect2/go_below_118.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
//+build !go1.18
|
||||||
|
|
||||||
|
package reflect2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// m escapes into the return value, but the caller of mapiterinit
|
||||||
|
// doesn't let the return value escape.
|
||||||
|
//go:noescape
|
||||||
|
//go:linkname mapiterinit reflect.mapiterinit
|
||||||
|
func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) (val *hiter)
|
||||||
|
|
||||||
|
func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
|
||||||
|
return &UnsafeMapIterator{
|
||||||
|
hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)),
|
||||||
|
pKeyRType: type2.pKeyRType,
|
||||||
|
pElemRType: type2.pElemRType,
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user