From 075a84427f6332aab707d283ad770d69f8816032 Mon Sep 17 00:00:00 2001 From: Wim Date: Mon, 19 Oct 2020 23:40:00 +0200 Subject: [PATCH] Update vendor (#1265) --- go.mod | 8 +- go.sum | 335 +++- .../go-asn1-ber/asn1-ber/.travis.yml | 73 +- vendor/github.com/go-asn1-ber/asn1-ber/ber.go | 198 +- .../go-asn1-ber/asn1-ber/content_int.go | 2 +- .../go-asn1-ber/asn1-ber/generalizedTime.go | 105 ++ .../github.com/go-asn1-ber/asn1-ber/header.go | 23 +- .../github.com/go-asn1-ber/asn1-ber/length.go | 12 +- .../github.com/go-asn1-ber/asn1-ber/real.go | 157 ++ .../github.com/go-asn1-ber/asn1-ber/util.go | 2 +- vendor/github.com/hashicorp/errwrap/LICENSE | 354 ++++ vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 169 ++ vendor/github.com/hashicorp/errwrap/go.mod | 1 + .../hashicorp/go-multierror/.travis.yml | 12 + .../hashicorp/go-multierror/LICENSE | 353 ++++ .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 131 ++ .../hashicorp/go-multierror/append.go | 41 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../github.com/hashicorp/go-multierror/go.mod | 5 + .../github.com/hashicorp/go-multierror/go.sum | 2 + .../hashicorp/go-multierror/group.go | 38 + .../hashicorp/go-multierror/multierror.go | 118 ++ .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-multierror/sort.go | 16 + .../go-windows-terminal-sequences/go.mod | 1 - .../sequences.go | 35 - .../sequences_dummy.go | 11 - vendor/github.com/mattermost/logr/.gitignore | 36 + vendor/github.com/mattermost/logr/.travis.yml | 4 + vendor/github.com/mattermost/logr/LICENSE | 21 + vendor/github.com/mattermost/logr/README.md | 193 ++ vendor/github.com/mattermost/logr/config.go | 11 + vendor/github.com/mattermost/logr/const.go | 34 + vendor/github.com/mattermost/logr/filter.go | 26 + .../github.com/mattermost/logr/format/json.go | 273 +++ .../mattermost/logr/format/plain.go | 75 + .../github.com/mattermost/logr/formatter.go | 119 ++ vendor/github.com/mattermost/logr/go.mod | 11 + vendor/github.com/mattermost/logr/go.sum | 174 ++ .../github.com/mattermost/logr/levelcache.go | 98 + .../github.com/mattermost/logr/levelcustom.go | 45 + vendor/github.com/mattermost/logr/levelstd.go | 37 + vendor/github.com/mattermost/logr/logger.go | 218 +++ vendor/github.com/mattermost/logr/logr.go | 664 +++++++ vendor/github.com/mattermost/logr/logrec.go | 189 ++ vendor/github.com/mattermost/logr/metrics.go | 117 ++ vendor/github.com/mattermost/logr/target.go | 299 +++ .../github.com/mattermost/logr/target/file.go | 87 + .../mattermost/logr/target/syslog.go | 89 + .../mattermost/logr/target/writer.go | 40 + vendor/github.com/mattermost/logr/timeout.go | 34 + .../mattermost-server/v5/LICENSE.txt | 2 +- .../mattermost-server/v5/NOTICE.txt | 142 ++ .../mattermost-server/v5/mlog/default.go | 44 + .../mattermost-server/v5/mlog/errors.go | 30 + .../mattermost-server/v5/mlog/global.go | 53 +- .../mattermost-server/v5/mlog/levels.go | 39 + .../mattermost-server/v5/mlog/log.go | 161 +- .../mattermost-server/v5/mlog/logr.go | 247 +++ .../mattermost-server/v5/mlog/syslog.go | 142 ++ .../mattermost-server/v5/mlog/tcp.go | 274 +++ .../v5/mlog/test-tls-client-cert.pem | 43 + .../mattermost-server/v5/mlog/testing.go | 3 +- .../mattermost-server/v5/model/bot.go | 7 +- .../mattermost-server/v5/model/channel.go | 18 +- .../v5/model/channel_member_history_result.go | 7 +- .../v5/model/channel_search.go | 17 +- .../v5/model/channel_sidebar.go | 111 ++ .../mattermost-server/v5/model/client4.go | 573 +++++- .../v5/model/cluster_message.go | 10 + .../mattermost-server/v5/model/command.go | 49 +- .../v5/model/command_args.go | 4 +- .../mattermost-server/v5/model/config.go | 1033 ++++++----- .../mattermost-server/v5/model/file_info.go | 12 +- .../mattermost-server/v5/model/group.go | 11 + .../mattermost-server/v5/model/integrity.go | 58 + .../mattermost-server/v5/model/job.go | 6 + .../mattermost-server/v5/model/ldap.go | 4 +- .../mattermost-server/v5/model/license.go | 14 +- .../v5/model/link_metadata.go | 4 +- .../mattermost-server/v5/model/migration.go | 4 + .../v5/model/outgoing_webhook.go | 3 + .../mattermost-server/v5/model/permission.go | 618 +++++-- .../mattermost-server/v5/model/post.go | 16 +- .../mattermost-server/v5/model/preference.go | 1 + .../v5/model/product_notices.go | 213 +++ .../v5/model/push_notification.go | 1 + .../mattermost-server/v5/model/role.go | 277 ++- .../mattermost-server/v5/model/saml.go | 1 + .../v5/model/search_params.go | 11 + .../v5/model/serialized_gen.go | 1622 +++++++++++++++++ .../mattermost-server/v5/model/session.go | 24 +- .../mattermost-server/v5/model/status.go | 3 +- .../mattermost-server/v5/model/system.go | 151 +- .../mattermost-server/v5/model/team_search.go | 9 +- .../v5/model/typing_request.go | 25 + .../v5/model/upload_session.go | 141 ++ .../mattermost-server/v5/model/user.go | 6 + .../mattermost-server/v5/model/user_count.go | 8 + .../mattermost-server/v5/model/user_get.go | 8 + .../mattermost-server/v5/model/user_search.go | 30 +- .../mattermost-server/v5/model/utils.go | 8 +- .../mattermost-server/v5/model/version.go | 3 + .../v5/model/websocket_message.go | 6 + .../mitchellh/mapstructure/CHANGELOG.md | 17 + .../mitchellh/mapstructure/mapstructure.go | 159 +- vendor/github.com/pborman/uuid/time.go | 2 +- vendor/github.com/pborman/uuid/version4.go | 2 +- vendor/github.com/pelletier/go-toml/README.md | 12 +- .../pelletier/go-toml/example-crlf.toml | 1 + .../github.com/pelletier/go-toml/example.toml | 1 + vendor/github.com/pelletier/go-toml/go.mod | 2 +- vendor/github.com/pelletier/go-toml/go.sum | 2 + .../pelletier/go-toml/keysparsing.go | 3 +- vendor/github.com/pelletier/go-toml/lexer.go | 59 +- .../github.com/pelletier/go-toml/marshal.go | 442 ++++- vendor/github.com/pelletier/go-toml/parser.go | 24 +- vendor/github.com/pelletier/go-toml/token.go | 9 +- vendor/github.com/pelletier/go-toml/toml.go | 2 + .../pelletier/go-toml/tomltree_write.go | 60 +- .../LICENSE => philhofer/fwd/LICENSE.md} | 8 +- vendor/github.com/philhofer/fwd/README.md | 315 ++++ vendor/github.com/philhofer/fwd/reader.go | 383 ++++ vendor/github.com/philhofer/fwd/writer.go | 224 +++ .../philhofer/fwd/writer_appengine.go | 5 + .../github.com/philhofer/fwd/writer_unsafe.go | 18 + vendor/github.com/sirupsen/logrus/.gitignore | 2 + .../github.com/sirupsen/logrus/buffer_pool.go | 52 + vendor/github.com/sirupsen/logrus/entry.go | 14 +- vendor/github.com/sirupsen/logrus/exported.go | 45 + vendor/github.com/sirupsen/logrus/go.mod | 3 +- vendor/github.com/sirupsen/logrus/go.sum | 6 +- vendor/github.com/sirupsen/logrus/logger.go | 54 +- .../sirupsen/logrus/terminal_check_windows.go | 29 +- vendor/github.com/slack-go/slack/apps.go | 43 + vendor/github.com/slack-go/slack/dialog.go | 4 +- .../github.com/slack-go/slack/interactions.go | 59 + vendor/github.com/slack-go/slack/reminders.go | 30 + vendor/github.com/slack-go/slack/slack.go | 16 +- vendor/github.com/slack-go/slack/slash.go | 2 + vendor/github.com/spf13/afero/.gitignore | 2 + vendor/github.com/spf13/afero/.travis.yml | 43 +- vendor/github.com/spf13/afero/README.md | 32 +- vendor/github.com/spf13/afero/appveyor.yml | 4 +- vendor/github.com/spf13/afero/basepath.go | 26 + vendor/github.com/spf13/afero/const_bsds.go | 2 +- .../github.com/spf13/afero/const_win_unix.go | 1 + .../github.com/spf13/afero/copyOnWriteFs.go | 20 + vendor/github.com/spf13/afero/go.mod | 8 +- vendor/github.com/spf13/afero/go.sum | 27 + vendor/github.com/spf13/afero/ioutil.go | 32 +- vendor/github.com/spf13/afero/match.go | 2 +- vendor/github.com/spf13/afero/mem/file.go | 8 +- vendor/github.com/spf13/afero/memmap.go | 48 +- vendor/github.com/spf13/afero/os.go | 8 + vendor/github.com/spf13/afero/readonlyfs.go | 12 + vendor/github.com/spf13/afero/regexpfs.go | 3 + vendor/github.com/spf13/afero/symlink.go | 55 + .../README.md => tinylib/msgp/LICENSE} | 42 +- .../tinylib/msgp/msgp/advise_linux.go | 24 + .../tinylib/msgp/msgp/advise_other.go | 17 + .../github.com/tinylib/msgp/msgp/circular.go | 39 + vendor/github.com/tinylib/msgp/msgp/defs.go | 142 ++ vendor/github.com/tinylib/msgp/msgp/edit.go | 242 +++ vendor/github.com/tinylib/msgp/msgp/elsize.go | 99 + vendor/github.com/tinylib/msgp/msgp/errors.go | 314 ++++ .../github.com/tinylib/msgp/msgp/extension.go | 549 ++++++ vendor/github.com/tinylib/msgp/msgp/file.go | 92 + .../github.com/tinylib/msgp/msgp/file_port.go | 47 + .../github.com/tinylib/msgp/msgp/integers.go | 174 ++ vendor/github.com/tinylib/msgp/msgp/json.go | 568 ++++++ .../tinylib/msgp/msgp/json_bytes.go | 363 ++++ vendor/github.com/tinylib/msgp/msgp/number.go | 267 +++ vendor/github.com/tinylib/msgp/msgp/purego.go | 15 + vendor/github.com/tinylib/msgp/msgp/read.go | 1358 ++++++++++++++ .../tinylib/msgp/msgp/read_bytes.go | 1197 ++++++++++++ vendor/github.com/tinylib/msgp/msgp/size.go | 38 + vendor/github.com/tinylib/msgp/msgp/unsafe.go | 41 + vendor/github.com/tinylib/msgp/msgp/write.go | 845 +++++++++ .../tinylib/msgp/msgp/write_bytes.go | 411 +++++ vendor/github.com/wiggin77/cfg/.gitignore | 12 + vendor/github.com/wiggin77/cfg/.travis.yml | 5 + vendor/github.com/wiggin77/cfg/LICENSE | 21 + vendor/github.com/wiggin77/cfg/README.md | 43 + vendor/github.com/wiggin77/cfg/config.go | 366 ++++ vendor/github.com/wiggin77/cfg/go.mod | 5 + vendor/github.com/wiggin77/cfg/go.sum | 2 + vendor/github.com/wiggin77/cfg/ini/ini.go | 167 ++ vendor/github.com/wiggin77/cfg/ini/parser.go | 142 ++ vendor/github.com/wiggin77/cfg/ini/section.go | 109 ++ vendor/github.com/wiggin77/cfg/listener.go | 11 + vendor/github.com/wiggin77/cfg/nocopy.go | 11 + vendor/github.com/wiggin77/cfg/source.go | 58 + vendor/github.com/wiggin77/cfg/srcfile.go | 63 + vendor/github.com/wiggin77/cfg/srcmap.go | 78 + .../github.com/wiggin77/cfg/timeconv/parse.go | 108 ++ vendor/github.com/wiggin77/merror/.gitignore | 12 + vendor/github.com/wiggin77/merror/LICENSE | 21 + vendor/github.com/wiggin77/merror/README.md | 2 + vendor/github.com/wiggin77/merror/format.go | 43 + vendor/github.com/wiggin77/merror/go.mod | 1 + vendor/github.com/wiggin77/merror/merror.go | 87 + vendor/github.com/wiggin77/srslog/.gitignore | 1 + vendor/github.com/wiggin77/srslog/.travis.yml | 15 + .../wiggin77/srslog/CODE_OF_CONDUCT.md | 50 + vendor/github.com/wiggin77/srslog/LICENSE | 27 + vendor/github.com/wiggin77/srslog/README.md | 147 ++ .../github.com/wiggin77/srslog/constants.go | 68 + vendor/github.com/wiggin77/srslog/dialer.go | 104 ++ .../github.com/wiggin77/srslog/formatter.go | 58 + vendor/github.com/wiggin77/srslog/framer.go | 24 + vendor/github.com/wiggin77/srslog/go.mod | 3 + vendor/github.com/wiggin77/srslog/logger.go | 13 + vendor/github.com/wiggin77/srslog/net_conn.go | 76 + vendor/github.com/wiggin77/srslog/srslog.go | 125 ++ .../github.com/wiggin77/srslog/srslog_unix.go | 54 + vendor/github.com/wiggin77/srslog/writer.go | 201 ++ vendor/golang.org/x/image/webp/decode.go | 17 +- vendor/gopkg.in/ini.v1/.travis.yml | 21 - vendor/gopkg.in/ini.v1/README.md | 6 +- vendor/gopkg.in/ini.v1/codecov.yml | 9 + vendor/gopkg.in/ini.v1/data_source.go | 4 +- vendor/gopkg.in/ini.v1/file.go | 11 +- vendor/gopkg.in/ini.v1/ini.go | 12 +- vendor/gopkg.in/ini.v1/key.go | 120 +- vendor/gopkg.in/ini.v1/parser.go | 21 +- vendor/gopkg.in/ini.v1/section.go | 10 +- vendor/gopkg.in/ini.v1/struct.go | 24 +- vendor/gopkg.in/yaml.v2/apic.go | 1 + vendor/gopkg.in/yaml.v3/.travis.yml | 1 + vendor/gopkg.in/yaml.v3/apic.go | 1 + vendor/gopkg.in/yaml.v3/decode.go | 63 +- vendor/gopkg.in/yaml.v3/emitterc.go | 54 +- vendor/gopkg.in/yaml.v3/encode.go | 25 +- vendor/gopkg.in/yaml.v3/parserc.go | 48 +- vendor/gopkg.in/yaml.v3/scannerc.go | 21 +- vendor/gopkg.in/yaml.v3/yaml.go | 35 +- vendor/gopkg.in/yaml.v3/yamlh.go | 2 + vendor/modules.txt | 46 +- 242 files changed, 22338 insertions(+), 1486 deletions(-) create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/real.go create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/errwrap/go.mod create mode 100644 vendor/github.com/hashicorp/go-multierror/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/go.mod create mode 100644 vendor/github.com/hashicorp/go-multierror/go.sum create mode 100644 vendor/github.com/hashicorp/go-multierror/group.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go delete mode 100644 vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod delete mode 100644 vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go delete mode 100644 vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go create mode 100644 vendor/github.com/mattermost/logr/.gitignore create mode 100644 vendor/github.com/mattermost/logr/.travis.yml create mode 100644 vendor/github.com/mattermost/logr/LICENSE create mode 100644 vendor/github.com/mattermost/logr/README.md create mode 100644 vendor/github.com/mattermost/logr/config.go create mode 100644 vendor/github.com/mattermost/logr/const.go create mode 100644 vendor/github.com/mattermost/logr/filter.go create mode 100644 vendor/github.com/mattermost/logr/format/json.go create mode 100644 vendor/github.com/mattermost/logr/format/plain.go create mode 100644 vendor/github.com/mattermost/logr/formatter.go create mode 100644 vendor/github.com/mattermost/logr/go.mod create mode 100644 vendor/github.com/mattermost/logr/go.sum create mode 100644 vendor/github.com/mattermost/logr/levelcache.go create mode 100644 vendor/github.com/mattermost/logr/levelcustom.go create mode 100644 vendor/github.com/mattermost/logr/levelstd.go create mode 100644 vendor/github.com/mattermost/logr/logger.go create mode 100644 vendor/github.com/mattermost/logr/logr.go create mode 100644 vendor/github.com/mattermost/logr/logrec.go create mode 100644 vendor/github.com/mattermost/logr/metrics.go create mode 100644 vendor/github.com/mattermost/logr/target.go create mode 100644 vendor/github.com/mattermost/logr/target/file.go create mode 100644 vendor/github.com/mattermost/logr/target/syslog.go create mode 100644 vendor/github.com/mattermost/logr/target/writer.go create mode 100644 vendor/github.com/mattermost/logr/timeout.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/integrity.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/product_notices.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/serialized_gen.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/upload_session.go rename vendor/github.com/{konsorten/go-windows-terminal-sequences/LICENSE => philhofer/fwd/LICENSE.md} (79%) create mode 100644 vendor/github.com/philhofer/fwd/README.md create mode 100644 vendor/github.com/philhofer/fwd/reader.go create mode 100644 vendor/github.com/philhofer/fwd/writer.go create mode 100644 vendor/github.com/philhofer/fwd/writer_appengine.go create mode 100644 vendor/github.com/philhofer/fwd/writer_unsafe.go create mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go create mode 100644 vendor/github.com/slack-go/slack/apps.go create mode 100644 vendor/github.com/spf13/afero/.gitignore create mode 100644 vendor/github.com/spf13/afero/symlink.go rename vendor/github.com/{konsorten/go-windows-terminal-sequences/README.md => tinylib/msgp/LICENSE} (50%) create mode 100644 vendor/github.com/tinylib/msgp/msgp/advise_linux.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/advise_other.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/circular.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/defs.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/edit.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/elsize.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/errors.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/extension.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/file.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/file_port.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/integers.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/json.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/json_bytes.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/number.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/purego.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/read.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/read_bytes.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/size.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/unsafe.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/write.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/write_bytes.go create mode 100644 vendor/github.com/wiggin77/cfg/.gitignore create mode 100644 vendor/github.com/wiggin77/cfg/.travis.yml create mode 100644 vendor/github.com/wiggin77/cfg/LICENSE create mode 100644 vendor/github.com/wiggin77/cfg/README.md create mode 100644 vendor/github.com/wiggin77/cfg/config.go create mode 100644 vendor/github.com/wiggin77/cfg/go.mod create mode 100644 vendor/github.com/wiggin77/cfg/go.sum create mode 100644 vendor/github.com/wiggin77/cfg/ini/ini.go create mode 100644 vendor/github.com/wiggin77/cfg/ini/parser.go create mode 100644 vendor/github.com/wiggin77/cfg/ini/section.go create mode 100644 vendor/github.com/wiggin77/cfg/listener.go create mode 100644 vendor/github.com/wiggin77/cfg/nocopy.go create mode 100644 vendor/github.com/wiggin77/cfg/source.go create mode 100644 vendor/github.com/wiggin77/cfg/srcfile.go create mode 100644 vendor/github.com/wiggin77/cfg/srcmap.go create mode 100644 vendor/github.com/wiggin77/cfg/timeconv/parse.go create mode 100644 vendor/github.com/wiggin77/merror/.gitignore create mode 100644 vendor/github.com/wiggin77/merror/LICENSE create mode 100644 vendor/github.com/wiggin77/merror/README.md create mode 100644 vendor/github.com/wiggin77/merror/format.go create mode 100644 vendor/github.com/wiggin77/merror/go.mod create mode 100644 vendor/github.com/wiggin77/merror/merror.go create mode 100644 vendor/github.com/wiggin77/srslog/.gitignore create mode 100644 vendor/github.com/wiggin77/srslog/.travis.yml create mode 100644 vendor/github.com/wiggin77/srslog/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/wiggin77/srslog/LICENSE create mode 100644 vendor/github.com/wiggin77/srslog/README.md create mode 100644 vendor/github.com/wiggin77/srslog/constants.go create mode 100644 vendor/github.com/wiggin77/srslog/dialer.go create mode 100644 vendor/github.com/wiggin77/srslog/formatter.go create mode 100644 vendor/github.com/wiggin77/srslog/framer.go create mode 100644 vendor/github.com/wiggin77/srslog/go.mod create mode 100644 vendor/github.com/wiggin77/srslog/logger.go create mode 100644 vendor/github.com/wiggin77/srslog/net_conn.go create mode 100644 vendor/github.com/wiggin77/srslog/srslog.go create mode 100644 vendor/github.com/wiggin77/srslog/srslog_unix.go create mode 100644 vendor/github.com/wiggin77/srslog/writer.go delete mode 100644 vendor/gopkg.in/ini.v1/.travis.yml create mode 100644 vendor/gopkg.in/ini.v1/codecov.yml diff --git a/go.mod b/go.mod index 8cf37bb5..f5da29b0 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/matterbridge/go-xmpp v0.0.0-20200418225040-c8a3a57b4050 github.com/matterbridge/gozulipbot v0.0.0-20200820220548-be5824faa913 github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba - github.com/mattermost/mattermost-server/v5 v5.25.2 + github.com/mattermost/mattermost-server/v5 v5.28.1 github.com/mattn/godown v0.0.0-20200217152941-afc959f6a561 github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/missdeer/golib v1.0.3 @@ -39,8 +39,8 @@ require ( github.com/russross/blackfriday v1.5.2 github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca github.com/shazow/ssh-chat v1.8.3-0.20200308224626-80ddf1f43a98 - github.com/sirupsen/logrus v1.6.0 - github.com/slack-go/slack v0.7.0 + github.com/sirupsen/logrus v1.7.0 + github.com/slack-go/slack v0.7.2 github.com/spf13/viper v1.7.1 github.com/stretchr/testify v1.6.1 github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 @@ -48,7 +48,7 @@ require ( github.com/x-cray/logrus-prefixed-formatter v0.5.2 // indirect github.com/yaegashi/msgraph.go v0.1.4 github.com/zfjagann/golang-ring v0.0.0-20190304061218-d34796e0a6c2 - golang.org/x/image v0.0.0-20200801110659-972c09e46d76 + golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 gomod.garykim.dev/nc-talk v0.1.5 gopkg.in/olahol/melody.v1 v1.0.0-20170518105555-d52139073376 diff --git a/go.sum b/go.sum index e0169839..b4ac8f1b 100644 --- a/go.sum +++ b/go.sum @@ -44,6 +44,7 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/42wim/go-gitter v0.0.0-20170828205020-017310c2d557 h1:IZtuWGfzQnKnCSu+vl8WGLhpVQ5Uvy3rlSwqXSg+sQg= github.com/42wim/go-gitter v0.0.0-20170828205020-017310c2d557/go.mod h1:jL0YSXMs/txjtGJ4PWrmETOk6KUHMDPMshgQZlTeB3Y= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-sdk-for-go v26.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-autorest v11.5.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Baozisoftware/qrcode-terminal-go v0.0.0-20170407111555-c0650d8dff0f h1:2dk3eOnYllh+wUOuDhOoC2vUVoJF/5z478ryJ+wzEII= @@ -51,12 +52,18 @@ github.com/Baozisoftware/qrcode-terminal-go v0.0.0-20170407111555-c0650d8dff0f/g github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= +github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/squirrel v1.2.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA= +github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/squirrel v1.4.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA= github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -71,25 +78,36 @@ github.com/Rhymen/go-whatsapp/examples/restoreSession v0.0.0-20190325075644-cc25 github.com/Rhymen/go-whatsapp/examples/sendImage v0.0.0-20190325075644-cc2581bbf24d/go.mod h1:RdiyhanVEGXTam+mZ3k6Y3VDCCvXYCwReOoxGozqhHw= github.com/Rhymen/go-whatsapp/examples/sendTextMessages v0.0.0-20190325075644-cc2581bbf24d/go.mod h1:suwzklatySS3Q0+NCxCDh5hYfgXdQUWU1DNcxwAxStM= github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= +github.com/RoaringBitmap/roaring v0.5.0/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexcesaro/log v0.0.0-20150915221235-61e686294e58/go.mod h1:YNfsMyWSs+h+PaYkxGeMVmVCX75Zj/pqdjbu12ciCYE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-metrics v0.3.4/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/avct/uasurfer v0.0.0-20191028135549-26b5daa857f1/go.mod h1:noBAuukeYOXa0aXGqxr24tADqkwDO2KRD15FsuaZ5a8= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.19.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -98,17 +116,21 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blevesearch/bleve v1.0.7/go.mod h1:3xvmBtaw12Y4C9iA1RTzwWCof5j5HjydjCTiDE2TeE0= +github.com/blevesearch/bleve v1.0.9/go.mod h1:tb04/rbU29clbtNgorgFd8XdJea4x3ybYaOjWKr+UBU= github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M= github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ= github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= -github.com/blevesearch/zap/v11 v11.0.7/go.mod h1:bJoY56fdU2m/IP4LLz/1h4jY2thBoREvoqbuJ8zhm9k= -github.com/blevesearch/zap/v12 v12.0.7/go.mod h1:70DNK4ZN4tb42LubeDbfpp6xnm8g3ROYVvvZ6pEoXD8= +github.com/blevesearch/zap/v11 v11.0.9/go.mod h1:47hzinvmY2EvvJruzsSCJpro7so8L1neseaGjrtXHOY= +github.com/blevesearch/zap/v12 v12.0.9/go.mod h1:paQuvxy7yXor+0Mx8p2KNmJgygQbQNN+W6HRfL5Hvwc= +github.com/blevesearch/zap/v13 v13.0.1/go.mod h1:XmyNLMvMf8Z5FjLANXwUeDW3e1+o77TTGUWrth7T9WI= +github.com/blevesearch/zap/v14 v14.0.0/go.mod h1:sUc/gPGJlFbSQ2ZUh/wGRYwkKx+Dg/5p+dd+eq6QMXk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -118,25 +140,32 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= github.com/couchbase/vellum v1.0.1/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= @@ -146,54 +175,73 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dchote/go-openal v0.0.0-20171116030048-f4a9a141d372/go.mod h1:74z+CYu2/mx4N+mcIS/rsvfAxBPBV9uv8zRAnwyFkdI= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/dgoogauth v0.0.0-20190221195224-5a805980a5f3/go.mod h1:hEfFauPHz7+NnjR/yHJGhrKo1Za+zStgwUETx3yzqgY= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/die-net/lrucache v0.0.0-20181227122439-19a39ef22a11/go.mod h1:ew0MSjCVDdtGMjF3kzLK9hwdgF5mOE8SbYVF3Rc7mkU= github.com/disintegration/imaging v1.6.0/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ= github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dyatlov/go-opengraph v0.0.0-20180429202543-816b6608b3c8 h1:6muCmMJat6z7qptVrIf/+OWPxsjAfvhw5/6t+FwEkgg= github.com/dyatlov/go-opengraph v0.0.0-20180429202543-816b6608b3c8/go.mod h1:nYia/MIs9OyvXXYboPmNOj0gVWo97Wx0sde+ZuKkoM4= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/getsentry/sentry-go v0.7.0/go.mod h1:pLFpD2Y5RHIKF9Bw3KH6/68DeN2K/XBJd8awjdPnUwg= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-asn1-ber/asn1-ber v1.3.2-0.20191121212151-29be175fc3a3/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-asn1-ber/asn1-ber v1.4.1 h1:qP/QDxOtmMoJVgXHCXNzDpA0+wkgYB2x5QoLMVOciyw= -github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= +github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gorp/gorp v2.0.0+incompatible/go.mod h1:7IfkAQnO7jfT/9IQ3R9wL1dFhukN6aQxzKTHnkxzA/E= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= @@ -203,11 +251,16 @@ github.com/go-telegram-bot-api/telegram-bot-api v1.0.1-0.20200524105306-7434b045 github.com/go-telegram-bot-api/telegram-bot-api v1.0.1-0.20200524105306-7434b0456e81/go.mod h1:lDm2E64X4OjFdBUA4hlN4mEvbSitvhJdKw7rsA8KHgI= github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -240,6 +293,7 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 h1:LP/6EfrZ/LyCc+SXvANDrIJ4sP9u2NAtqyv6QknetNQ= github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -280,14 +334,18 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/schema v1.1.0 h1:CamqUDOFUBqzrvxuz2vEwo8+SUdwsluFh7IlzJh30LY= github.com/gorilla/schema v1.1.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc= github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= @@ -296,27 +354,33 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c/go.mod h1:ObS/W+h8RYb1Y7fYivughjxojTmIu5iAIjSrSLCLeqE= -github.com/hako/durafmt v0.0.0-20191009132224-3f39dc1ed9f4/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE= +github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-plugin v1.2.2/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= +github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -324,6 +388,7 @@ github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjG github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -337,13 +402,20 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jamiealquiza/envy v1.1.0/go.mod h1:MP36BriGCLwEHhi1OU8E9569JNZrjWfCvzG7RsPnHus= github.com/jaytaylor/html2text v0.0.0-20200412013138-3577fbdbcff7/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= @@ -353,26 +425,44 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= +github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= +github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= +github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= github.com/keybase/go-keybase-chat-bot v0.0.0-20200505163032-5cacf52379da h1:LK+8uBG3kNikj664cjFt88RBmuGmonxkXv2rUVfbqz4= github.com/keybase/go-keybase-chat-bot v0.0.0-20200505163032-5cacf52379da/go.mod h1:xJA+X9ZVyT/irGldcb7q1XnJBq5F9s5H9h2L44Y+poY= github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19 h1:WjT3fLi9n8YWh/Ih8Q1LHAPsTqGddPcHqscN+PJ3i68= github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19/go.mod h1:hY+WOq6m2FpbvyrI93sMaypsttvaIL5nhVR92dTMUcQ= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -380,6 +470,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= github.com/labstack/echo/v4 v4.1.17 h1:PQIBaRplyRy3OjwILGkPg89JRtH2x5bssi59G2EL3fo= github.com/labstack/echo/v4 v4.1.17/go.mod h1:Tn2yRQL/UclUalpb5rPdXDevbkJ+lp/2svdyFBg6CHQ= github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= @@ -387,15 +478,19 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.4.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lrstanley/girc v0.0.0-20190801035559-4fc93959e1a7 h1:BS9tqL0OCiOGuy/CYYk2gc33fxqaqh5/rhqMKu4tcYA= github.com/lrstanley/girc v0.0.0-20190801035559-4fc93959e1a7/go.mod h1:liX5MxHPrwgHaKowoLkYGwbXfYABh1jbZ6FpElbGF1I= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.3/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marstr/guid v0.0.0-20170427235115-8bdf7d1a087c/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd h1:xVrqJK3xHREMNjwjljkAUaadalWc0rRbmVuQatzmgwg= github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s= @@ -413,47 +508,55 @@ github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d97130 github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba/go.mod h1:iXGEotOvwI1R1SjLxRc+BF5rUORTMtE0iMZBT2lxqAU= github.com/mattermost/go-i18n v1.11.0 h1:1hLKqn/ZvhZ80OekjVPGYcCrBfMz+YxNNgqS+beL7zE= github.com/mattermost/go-i18n v1.11.0/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34= -github.com/mattermost/gorp v2.0.1-0.20190301154413-3b31e9a39d05+incompatible/go.mod h1:0kX1qa3DOpaPJyOdMLeo7TcBN0QmUszj9a/VygOhDe0= +github.com/mattermost/gorp v1.6.2-0.20200624165429-2595d5e54111/go.mod h1:QCQ3U0M9T/BlAdjKFJo0I1oe/YAgbyjNdhU8bpOLafk= github.com/mattermost/gosaml2 v0.3.2/go.mod h1:Z429EIOiEi9kbq6yHoApfzlcXpa6dzRDc6pO+Vy2Ksk= github.com/mattermost/ldap v0.0.0-20191128190019-9f62ba4b8d4d h1:2DV7VIlEv6J5R5o6tUcb3ZMKJYeeZuWZL7Rv1m23TgQ= github.com/mattermost/ldap v0.0.0-20191128190019-9f62ba4b8d4d/go.mod h1:HLbgMEI5K131jpxGazJ97AxfPDt31osq36YS1oxFQPQ= -github.com/mattermost/mattermost-server/v5 v5.25.2 h1:A1nyhIbRgY6NoSqg5zQP47F3zt2KEDEBcQs0sy5fAmw= -github.com/mattermost/mattermost-server/v5 v5.25.2/go.mod h1:TVkOfVyk4wGw8j5J2IX3PDCP5R7j20IEP4FAezDK8Wk= +github.com/mattermost/logr v1.0.13 h1:6F/fM3csvH6Oy5sUpJuW7YyZSzZZAhJm5VcgKMxA2P8= +github.com/mattermost/logr v1.0.13/go.mod h1:Mt4DPu1NXMe6JxPdwCC0XBoxXmN9eXOIRPoZarU2PXs= +github.com/mattermost/mattermost-server/v5 v5.28.1 h1:zCPvSlEYCs0REHzGxaf06Dha4UnFlhBVutH3Fthw7J0= +github.com/mattermost/mattermost-server/v5 v5.28.1/go.mod h1:9FfgZY9Ywx64bzPBYo4mmR05ApyOxO+tr43eDhpWups= github.com/mattermost/rsc v0.0.0-20160330161541-bbaefb05eaa0/go.mod h1:nV5bfVpT//+B1RPD2JvRnxbkLmJEYXmRaaVl15fsXjs= github.com/mattermost/viper v1.0.4/go.mod h1:uc5hKG9lv4/KRwPOt2c1omOyirS/UnuA2TytiZQSFHM= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0= github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/godown v0.0.0-20200217152941-afc959f6a561 h1:0YGo77enc6tJpXQxUeQWs9bPIQPTH1lbOmc5tgRuq4o= github.com/mattn/godown v0.0.0-20200217152941-afc959f6a561/go.mod h1:/ivCKurgV/bx6yqtP/Jtc2Xmrv3beCYBvlfAUl4X5g4= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= +github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/minio/minio-go/v6 v6.0.55/go.mod h1:KQMM+/44DSlSGSQWSfRrAZ12FVMmpWNuX37i2AX0jfI= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= +github.com/minio/minio-go/v7 v7.0.4/go.mod h1:CSt2ETZNs+bIIhWTse0mcZKZWMGrFU7Er7RR0TmkDYk= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/missdeer/golib v1.0.3 h1:+kz/tn1lXlPS8i+gjHHVAZC8YcgrmfiMTqELyvOwI4g= github.com/missdeer/golib v1.0.3/go.mod h1:Cys1ITPPZxIk2eTcQcKT3jDsBdhICAfrrw+ki/eRXxA= @@ -469,14 +572,15 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.2.3 h1:f/MjBEBDLttYCGfRaKBbKSRVF5aV2O6fnBpzknuE3jU= -github.com/mitchellh/mapstructure v1.2.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/monaco-io/request v1.0.4 h1:AbogA+IvPOWqyGZIFU7kSb8YS2Jv5Dnl5ncMj8cQV+o= github.com/monaco-io/request v1.0.4/go.mod h1:EmggwHktBsbJmCgwZXqy7o0H1NNsAstQBWZrFVd3xtQ= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474 h1:oKIteTqeSpenyTrOVj5zkiyCaflLa8B+CD0324otT+o= github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= github.com/mrexodia/wray v0.0.0-20160318003008-78a2c1f284ff h1:HLGD5/9UxxfEuO9DtP8gnTmNtMxbPyhYltfxsITel8g= @@ -486,79 +590,122 @@ github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOl github.com/muesli/smartcrop v0.2.1-0.20181030220600-548bbf0c0965/go.mod h1:i2fCI/UorTfgEpPPLWiFBv4pye+YAG78RwcQLUkocpI= github.com/muesli/smartcrop v0.3.0/go.mod h1:i2fCI/UorTfgEpPPLWiFBv4pye+YAG78RwcQLUkocpI= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nelsonken/gomf v0.0.0-20180504123937-a9dd2f9deae9 h1:mp6tU1r0xLostUGLkTspf/9/AiHuVD7ptyXhySkDEsE= github.com/nelsonken/gomf v0.0.0-20180504123937-a9dd2f9deae9/go.mod h1:A5SRAcpTemjGgIuBq6Kic2yHcoeUFWUinOAlMP/i9xo= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= -github.com/olivere/elastic v6.2.30+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/olivere/elastic v6.2.34+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0 h1:M76yO2HkZASFjXL0HSoZJ1AYEmQxNJmY41Jx1zNUq1Y= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/oov/psd v0.0.0-20200705094106-99303fb2511f/go.mod h1:GHI1bnmAcbp96z6LNfBJvtrjxhaXGkbsk967utPlvL8= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulrosania/go-charset v0.0.0-20190326053356-55c9d7a5834c h1:P6XGcuPTigoHf4TSu+3D/7QOQ1MbL6alNwrGhcW7sKw= github.com/paulrosania/go-charset v0.0.0-20190326053356-55c9d7a5834c/go.mod h1:YnNlZP7l4MhyGQ4CBRwv6ohZTPrUJJZtEv4ZgADkbs4= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw= +github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v0.0.0-20171120014656-2973218375c3/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.12.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/reflog/dateconstraints v0.2.1/go.mod h1:Ax8AxTBcJc3E/oVS2hd2j7RDM/5MDtuPwuR7lIHtPLo= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rickb777/date v1.12.4 h1:+6IzcCCS/1t17DrmnEvrznyq7nM8vPwir6/UhlyohKw= github.com/rickb777/date v1.12.4/go.mod h1:xP0eo/I5qmUt97yRGClHZfyLZ3ikMw6v6SU5MOGZTE0= @@ -579,9 +726,10 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI= github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v0.0.0-20180103174451-36e9d2ebbde5/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/segmentio/analytics-go v3.1.0+incompatible/go.mod h1:C7CYBtQWk4vRk2RyLu0qOcbHJ18E3F1HV2C/8JvKN48= github.com/segmentio/backo-go v0.0.0-20200129164019-23eae7c10bd3/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shazow/rateio v0.0.0-20150116013248-e8e00881e5c1 h1:Lx3BlDGFElJt4u/zKc9A3BuGYbQAGlEFyPuUA3jeMD0= @@ -615,39 +763,41 @@ github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYED github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 h1:lpEzuenPuO1XNTeikEmvqYFcU37GVLl8SRNblzyvGBE= github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9/go.mod h1:PLPIyL7ikehBD1OAjmKKiOEhbvWyHGaNDjquXMcYABo= -github.com/slack-go/slack v0.7.0 h1:0t+Hh446VqaazWkaCuoyayHanTi7BJKY/GFSMMBcmEA= -github.com/slack-go/slack v0.7.0/go.mod h1:FGqNzJBmxIsZURAxh2a8D21AnOVvvXZvGligs4npPUM= +github.com/slack-go/slack v0.7.2 h1:oLy2a2YqrtoHSSxbjRhrtLDGbCKcZJwgbuQ826BWxaI= +github.com/slack-go/slack v0.7.2/go.mod h1:FGqNzJBmxIsZURAxh2a8D21AnOVvvXZvGligs4npPUM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.4 h1:8q6vk3hthlpb2SouZcnBVKboxWQWMDNF38bwholZrJc= +github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -658,9 +808,12 @@ github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -675,34 +828,52 @@ github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cb github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/technoweenie/multipartstreamer v1.0.1 h1:XRztA5MXiR1TIRHxH2uNxXxaIkKQDeX7m2XsSOlQEnM= github.com/technoweenie/multipartstreamer v1.0.1/go.mod h1:jNVxdtShOxzAsukZwTSw6MDx5eUJoiEBsSvzDU9uzog= -github.com/throttled/throttled v2.2.4+incompatible/go.mod h1:0BjlrEGQmvxps+HuXLsyRdqpSRvJpq0PNIsOtqP9Nos= +github.com/throttled/throttled v2.2.5+incompatible/go.mod h1:0BjlrEGQmvxps+HuXLsyRdqpSRvJpq0PNIsOtqP9Nos= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tylerb/graceful v1.2.15/go.mod h1:LPYTbOYmUTdabwRt0TGhLllQ0MUNbs0Y5q1WXJOI9II= -github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 h1:uxE3GYdXIOfhMv3unJKETJEhw78gvzuQqRX/rVirc2A= github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= +github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/msgpack/v5 v5.0.0-beta.1/go.mod h1:xlngVLeyQ/Qi05oQxhQ+oTuqa03RjMwMfk/7/TCs+QI= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/wiggin77/cfg v1.0.2 h1:NBUX+iJRr+RTncTqTNvajHwzduqbhCQjEqxLHr6Fk7A= github.com/wiggin77/cfg v1.0.2/go.mod h1:b3gotba2e5bXTqTW48DwIFoLc+4lWKP7WPi/CdvZ4aE= -github.com/wiggin77/logr v1.0.4/go.mod h1:h98FF6GPfThhDrHCg063hZA1sIyOEzQ/P85wgqI0IqE= +github.com/wiggin77/merror v1.0.2 h1:V0nH9eFp64ASyaXC+pB5WpvBoCg7NUwvaCSKdzlcHqw= github.com/wiggin77/merror v1.0.2/go.mod h1:uQTcIU0Z6jRK4OwqganPYerzQxSFJ4GSHM3aurxxQpg= +github.com/wiggin77/srslog v1.0.1 h1:gA2XjSMy3DrRdX9UqLuDtuVAAshb8bE1NhX1YK0Qe+8= github.com/wiggin77/srslog v1.0.1/go.mod h1:fehkyYDq1QfuYn60TDPu9YdY2bB85VUW2mvN1WynEls= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/writeas/go-strip-markdown v2.0.1+incompatible h1:IIqxTM5Jr7RzhigcL6FkrCNfXkvbR+Nbu1ls48pXYcw= github.com/writeas/go-strip-markdown v2.0.1+incompatible/go.mod h1:Rsyu10ZhbEK9pXdk8V6MVnZmTzRG0alMNLMwa0J01fE= github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.0.0 h1:J0TkWtiuYgtdlrkkrDLISYBQ92M+X5m4LrIIMKrbDTs= github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= @@ -711,6 +882,10 @@ github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63M github.com/yaegashi/msgraph.go v0.1.4 h1:leDXSczAbwBpYFSmmZrdByTiPoUw8dbTfNMetAjJvbw= github.com/yaegashi/msgraph.go v0.1.4/go.mod h1:vgeYhHa5skJt/3lTyjGXThTZhwbhRnGo6uUxzoJIGME= github.com/yaegashi/wtz.go v0.0.2/go.mod h1:nOLA5QXsmdkRxBkP5tljhua13ADHCKirLBrzPf4PEJc= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -718,27 +893,36 @@ github.com/zfjagann/golang-ring v0.0.0-20190304061218-d34796e0a6c2 h1:UQwvu7FjUE github.com/zfjagann/golang-ring v0.0.0-20190304061218-d34796e0a6c2/go.mod h1:0MsIttMJIF/8Y7x0XjonJP7K99t3sR6bjj4m5S4JmqU= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= @@ -754,14 +938,15 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 h1:DZhuSZLsGlFL4CmhA8BcRA0mnthyA/nZ00AqCUo7vHg= golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -779,9 +964,9 @@ golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMx golang.org/x/image v0.0.0-20190321063152-3fc05d484e9f/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200801110659-972c09e46d76 h1:U7GPaoQyQmX+CBRWXKrvRzWTbd+slqeSh8uARsIyhAw= golang.org/x/image v0.0.0-20200801110659-972c09e46d76/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 h1:QelT11PB4FXiDEXucrfNckHoFxwt8USGY1ajP1ZF5lM= +golang.org/x/image v0.0.0-20200927104501-e162460cd6b5/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -820,6 +1005,7 @@ golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -829,6 +1015,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -842,9 +1030,11 @@ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 h1:WQ8q63x+f/zpC8Ac1s9wLElVo golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -890,15 +1080,20 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -914,8 +1109,12 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9 h1:YTzHMGlqJu67/uEo1lBv0n3wBXhXNeUbB1XfN2vmTm0= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 h1:DvY3Zkh7KabQE/kfzMvYvKirSiguP9Q/veMtkYyf0o8= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -934,11 +1133,13 @@ golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -962,6 +1163,7 @@ golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -973,7 +1175,6 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200428021058-7ae4988eb4d9/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -983,6 +1184,7 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200731060945-b5fad4ed8dd6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200818005847-188abfa75333/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d h1:W07d4xkoAUSNOkOzdzXCdFGxT7o2rW4q8M34tB2i//k= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -991,8 +1193,6 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IV golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomod.garykim.dev/nc-talk v0.1.4 h1:U9viudEgq/biocorgWvZRVR+27IPEczYl/yszSvzN+8= -gomod.garykim.dev/nc-talk v0.1.4/go.mod h1:zKg8yxCk2KaTy6aPDEfRac0Jik72czX+nRsG8CZuhtc= gomod.garykim.dev/nc-talk v0.1.5 h1:zZ/FviVpwJuhD/YrKiAvs6Z3Oew/DL/w6RKbKaanhFA= gomod.garykim.dev/nc-talk v0.1.5/go.mod h1:zKg8yxCk2KaTy6aPDEfRac0Jik72czX+nRsG8CZuhtc= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= @@ -1000,6 +1200,7 @@ google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+ google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1038,6 +1239,7 @@ google.golang.org/genproto v0.0.0-20190321212433-e79c0c59cdb5/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1054,7 +1256,6 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200424135956-bca184e23272/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= @@ -1062,6 +1263,7 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -1069,10 +1271,13 @@ google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9M google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1100,24 +1305,30 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.55.0 h1:E8yzL5unfpW3M6fz/eB7Cb5MQAYSZ7GKo4Qth+N2sgQ= -gopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.60.0 h1:P5ZzC7RJO04094NJYlEnBdFK2wwmnCAy/+7sAzvWs60= +gopkg.in/ini.v1 v1.60.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/olahol/melody.v1 v1.0.0-20170518105555-d52139073376 h1:sY2a+y0j4iDrajJcorb+a0hJIQ6uakU5gybjfLWHlXo= gopkg.in/olahol/melody.v1 v1.0.0-20170518105555-d52139073376/go.mod h1:BHKOc1m5wm8WwQkMqYBoo4vNxhmF7xg8+xhG8L+Cy3M= -gopkg.in/olivere/elastic.v6 v6.2.30/go.mod h1:2cTT8Z+/LcArSWpCgvZqBgt3VOqXiy7v00w12Lz8bd4= +gopkg.in/olivere/elastic.v6 v6.2.34/go.mod h1:2cTT8Z+/LcArSWpCgvZqBgt3VOqXiy7v00w12Lz8bd4= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1125,8 +1336,12 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1147,6 +1362,8 @@ rsc.io/goversion v1.2.0 h1:SPn+NLTiAG7w30IRK/DKp1BjvpWabYgxlLp/+kx5J8w= rsc.io/goversion v1.2.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= willnorris.com/go/gifresize v1.0.0/go.mod h1:eBM8gogBGCcaH603vxSpnfjwXIpq6nmnj/jauBDKtAk= diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml b/vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml index 4ad2067b..8bffb901 100644 --- a/vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml +++ b/vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml @@ -1,38 +1,39 @@ language: go -matrix: - include: - - go: 1.2.x - env: GOOS=linux GOARCH=amd64 - - go: 1.2.x - env: GOOS=linux GOARCH=386 - - go: 1.2.x - env: GOOS=windows GOARCH=amd64 - - go: 1.2.x - env: GOOS=windows GOARCH=386 - - go: 1.3.x - - go: 1.4.x - - go: 1.5.x - - go: 1.6.x - - go: 1.7.x - - go: 1.8.x - - go: 1.9.x - - go: 1.10.x - - go: 1.11.x - - go: 1.12.x - - go: 1.13.x - env: GOOS=linux GOARCH=amd64 - - go: 1.13.x - env: GOOS=linux GOARCH=386 - - go: 1.13.x - env: GOOS=windows GOARCH=amd64 - - go: 1.13.x - env: GOOS=windows GOARCH=386 - - go: tip -go_import_path: gopkg.in/asn-ber.v1 -install: - - go list -f '{{range .Imports}}{{.}} {{end}}' ./... | xargs go get -v - - go list -f '{{range .TestImports}}{{.}} {{end}}' ./... | xargs go get -v - - go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover - - go build -v ./... + +go: + - 1.2.x + - 1.6.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.14.x + - tip + +os: + - linux + +arch: + - amd64 + +dist: xenial + +env: + - GOARCH=amd64 + +jobs: + include: + - os: windows + go: 1.14.x + - os: osx + go: 1.14.x + - os: linux + go: 1.14.x + arch: arm64 + - os: linux + go: 1.14.x + env: + - GOARCH=386 + script: - - go test -v -cover ./... || go test -v ./... + - go test -v -cover ./... || go test -v ./... diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/ber.go b/vendor/github.com/go-asn1-ber/asn1-ber/ber.go index 1e186cb8..4fd7a66e 100644 --- a/vendor/github.com/go-asn1-ber/asn1-ber/ber.go +++ b/vendor/github.com/go-asn1-ber/asn1-ber/ber.go @@ -8,6 +8,8 @@ import ( "math" "os" "reflect" + "time" + "unicode/utf8" ) // MaxPacketLengthBytes specifies the maximum allowed packet size when calling ReadPacket or DecodePacket. Set to 0 for @@ -143,20 +145,20 @@ var TypeMap = map[Type]string{ TypeConstructed: "Constructed", } -var Debug bool = false +var Debug = false func PrintBytes(out io.Writer, buf []byte, indent string) { - data_lines := make([]string, (len(buf)/30)+1) - num_lines := make([]string, (len(buf)/30)+1) + dataLines := make([]string, (len(buf)/30)+1) + numLines := make([]string, (len(buf)/30)+1) for i, b := range buf { - data_lines[i/30] += fmt.Sprintf("%02x ", b) - num_lines[i/30] += fmt.Sprintf("%02d ", (i+1)%100) + dataLines[i/30] += fmt.Sprintf("%02x ", b) + numLines[i/30] += fmt.Sprintf("%02d ", (i+1)%100) } - for i := 0; i < len(data_lines); i++ { - out.Write([]byte(indent + data_lines[i] + "\n")) - out.Write([]byte(indent + num_lines[i] + "\n\n")) + for i := 0; i < len(dataLines); i++ { + _, _ = out.Write([]byte(indent + dataLines[i] + "\n")) + _, _ = out.Write([]byte(indent + numLines[i] + "\n\n")) } } @@ -169,20 +171,20 @@ func PrintPacket(p *Packet) { } func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) { - indent_str := "" + indentStr := "" - for len(indent_str) != indent { - indent_str += " " + for len(indentStr) != indent { + indentStr += " " } - class_str := ClassMap[p.ClassType] + classStr := ClassMap[p.ClassType] - tagtype_str := TypeMap[p.TagType] + tagTypeStr := TypeMap[p.TagType] - tag_str := fmt.Sprintf("0x%02X", p.Tag) + tagStr := fmt.Sprintf("0x%02X", p.Tag) if p.ClassType == ClassUniversal { - tag_str = tagMap[p.Tag] + tagStr = tagMap[p.Tag] } value := fmt.Sprint(p.Value) @@ -192,10 +194,10 @@ func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) { description = p.Description + ": " } - fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indent_str, description, class_str, tagtype_str, tag_str, p.Data.Len(), value) + _, _ = fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indentStr, description, classStr, tagTypeStr, tagStr, p.Data.Len(), value) if printBytes { - PrintBytes(out, p.Bytes(), indent_str) + PrintBytes(out, p.Bytes(), indentStr) } for _, child := range p.Children { @@ -203,7 +205,7 @@ func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) { } } -// ReadPacket reads a single Packet from the reader +// ReadPacket reads a single Packet from the reader. func ReadPacket(reader io.Reader) (*Packet, error) { p, _, err := readPacket(reader) if err != nil { @@ -239,7 +241,7 @@ func encodeInteger(i int64) []byte { var j int for ; n > 0; n-- { - out[j] = (byte(i >> uint((n-1)*8))) + out[j] = byte(i >> uint((n-1)*8)) j++ } @@ -271,7 +273,7 @@ func DecodePacket(data []byte) *Packet { } // DecodePacketErr decodes the given bytes into a single Packet -// If a decode error is encountered, nil is returned +// If a decode error is encountered, nil is returned. func DecodePacketErr(data []byte) (*Packet, error) { p, _, err := readPacket(bytes.NewBuffer(data)) if err != nil { @@ -280,7 +282,7 @@ func DecodePacketErr(data []byte) (*Packet, error) { return p, nil } -// readPacket reads a single Packet from the reader, returning the number of bytes read +// readPacket reads a single Packet from the reader, returning the number of bytes read. func readPacket(reader io.Reader) (*Packet, int, error) { identifier, length, read, err := readHeader(reader) if err != nil { @@ -342,7 +344,7 @@ func readPacket(reader io.Reader) (*Packet, int, error) { if MaxPacketLengthBytes > 0 && int64(length) > MaxPacketLengthBytes { return nil, read, fmt.Errorf("length %d greater than maximum %d", length, MaxPacketLengthBytes) } - content := make([]byte, length, length) + content := make([]byte, length) if length > 0 { _, err := io.ReadFull(reader, content) if err != nil { @@ -377,22 +379,42 @@ func readPacket(reader io.Reader) (*Packet, int, error) { case TagObjectDescriptor: case TagExternal: case TagRealFloat: + p.Value, err = ParseReal(content) case TagEnumerated: p.Value, _ = ParseInt64(content) case TagEmbeddedPDV: case TagUTF8String: - p.Value = DecodeString(content) + val := DecodeString(content) + if !utf8.Valid([]byte(val)) { + err = errors.New("invalid UTF-8 string") + } else { + p.Value = val + } case TagRelativeOID: case TagSequence: case TagSet: case TagNumericString: case TagPrintableString: - p.Value = DecodeString(content) + val := DecodeString(content) + if err = isPrintableString(val); err == nil { + p.Value = val + } case TagT61String: case TagVideotexString: case TagIA5String: + val := DecodeString(content) + for i, c := range val { + if c >= 0x7F { + err = fmt.Errorf("invalid character for IA5String at pos %d: %c", i, c) + break + } + } + if err == nil { + p.Value = val + } case TagUTCTime: case TagGeneralizedTime: + p.Value, err = ParseGeneralizedTime(content) case TagGraphicString: case TagVisibleString: case TagGeneralString: @@ -404,7 +426,24 @@ func readPacket(reader io.Reader) (*Packet, int, error) { p.Data.Write(content) } - return p, read, nil + return p, read, err +} + +func isPrintableString(val string) error { + for i, c := range val { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c >= '0' && c <= '9': + default: + switch c { + case '\'', '(', ')', '+', ',', '-', '.', '=', '/', ':', '?', ' ': + default: + return fmt.Errorf("invalid character in position %d", i) + } + } + } + return nil } func (p *Packet) Bytes() []byte { @@ -422,77 +461,99 @@ func (p *Packet) AppendChild(child *Packet) { p.Children = append(p.Children, child) } -func Encode(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet { +func Encode(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet { p := new(Packet) - p.ClassType = ClassType - p.TagType = TagType - p.Tag = Tag + p.ClassType = classType + p.TagType = tagType + p.Tag = tag p.Data = new(bytes.Buffer) p.Children = make([]*Packet, 0, 2) - p.Value = Value - p.Description = Description + p.Value = value + p.Description = description - if Value != nil { - v := reflect.ValueOf(Value) + if value != nil { + v := reflect.ValueOf(value) - if ClassType == ClassUniversal { - switch Tag { + if classType == ClassUniversal { + switch tag { case TagOctetString: sv, ok := v.Interface().(string) if ok { p.Data.Write([]byte(sv)) } + case TagEnumerated: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + case TagEmbeddedPDV: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + } + } else if classType == ClassContext { + switch tag { + case TagEnumerated: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + case TagEmbeddedPDV: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } } } } - return p } -func NewSequence(Description string) *Packet { - return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, Description) +func NewSequence(description string) *Packet { + return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, description) } -func NewBoolean(ClassType Class, TagType Type, Tag Tag, Value bool, Description string) *Packet { +func NewBoolean(classType Class, tagType Type, tag Tag, value bool, description string) *Packet { intValue := int64(0) - if Value { + if value { intValue = 1 } - p := Encode(ClassType, TagType, Tag, nil, Description) + p := Encode(classType, tagType, tag, nil, description) - p.Value = Value + p.Value = value p.Data.Write(encodeInteger(intValue)) return p } -// NewLDAPBoolean returns a RFC 4511-compliant Boolean packet -func NewLDAPBoolean(Value bool, Description string) *Packet { +// NewLDAPBoolean returns a RFC 4511-compliant Boolean packet. +func NewLDAPBoolean(classType Class, tagType Type, tag Tag, value bool, description string) *Packet { intValue := int64(0) - if Value { + if value { intValue = 255 } - p := Encode(ClassUniversal, TypePrimitive, TagBoolean, nil, Description) + p := Encode(classType, tagType, tag, nil, description) - p.Value = Value + p.Value = value p.Data.Write(encodeInteger(intValue)) return p } -func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet { - p := Encode(ClassType, TagType, Tag, nil, Description) +func NewInteger(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) - p.Value = Value - switch v := Value.(type) { + p.Value = value + switch v := value.(type) { case int: p.Data.Write(encodeInteger(int64(v))) case uint: @@ -522,11 +583,38 @@ func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Descr return p } -func NewString(ClassType Class, TagType Type, Tag Tag, Value, Description string) *Packet { - p := Encode(ClassType, TagType, Tag, nil, Description) +func NewString(classType Class, tagType Type, tag Tag, value, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) - p.Value = Value - p.Data.Write([]byte(Value)) + p.Value = value + p.Data.Write([]byte(value)) return p } + +func NewGeneralizedTime(classType Class, tagType Type, tag Tag, value time.Time, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) + var s string + if value.Nanosecond() != 0 { + s = value.Format(`20060102150405.000000000Z`) + } else { + s = value.Format(`20060102150405Z`) + } + p.Value = s + p.Data.Write([]byte(s)) + return p +} + +func NewReal(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) + + switch v := value.(type) { + case float64: + p.Data.Write(encodeFloat(v)) + case float32: + p.Data.Write(encodeFloat(float64(v))) + default: + panic(fmt.Sprintf("Invalid type %T, expected float{64|32}", v)) + } + return p +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go b/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go index 1858b74b..20b500f5 100644 --- a/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go +++ b/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go @@ -6,7 +6,7 @@ func encodeUnsignedInteger(i uint64) []byte { var j int for ; n > 0; n-- { - out[j] = (byte(i >> uint((n-1)*8))) + out[j] = byte(i >> uint((n-1)*8)) j++ } diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go b/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go new file mode 100644 index 00000000..51215f06 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go @@ -0,0 +1,105 @@ +package ber + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "time" +) + +// ErrInvalidTimeFormat is returned when the generalizedTime string was not correct. +var ErrInvalidTimeFormat = errors.New("invalid time format") + +var zeroTime = time.Time{} + +// ParseGeneralizedTime parses a string value and if it conforms to +// GeneralizedTime[^0] format, will return a time.Time for that value. +// +// [^0]: https://www.itu.int/rec/T-REC-X.690-201508-I/en Section 11.7 +func ParseGeneralizedTime(v []byte) (time.Time, error) { + var format string + var fract time.Duration + + str := []byte(DecodeString(v)) + tzIndex := bytes.IndexAny(str, "Z+-") + if tzIndex < 0 { + return zeroTime, ErrInvalidTimeFormat + } + + dot := bytes.IndexAny(str, ".,") + switch dot { + case -1: + switch tzIndex { + case 10: + format = `2006010215Z` + case 12: + format = `200601021504Z` + case 14: + format = `20060102150405Z` + default: + return zeroTime, ErrInvalidTimeFormat + } + + case 10, 12: + if tzIndex < dot { + return zeroTime, ErrInvalidTimeFormat + } + // a "," is also allowed, but would not be parsed by time.Parse(): + str[dot] = '.' + + // If is omitted, then represents a fraction of an + // hour; otherwise, if and are omitted, then + // represents a fraction of a minute; otherwise, + // represents a fraction of a second. + + // parse as float from dot to timezone + f, err := strconv.ParseFloat(string(str[dot:tzIndex]), 64) + if err != nil { + return zeroTime, fmt.Errorf("failed to parse float: %s", err) + } + // ...and strip that part + str = append(str[:dot], str[tzIndex:]...) + tzIndex = dot + + if dot == 10 { + fract = time.Duration(int64(f * float64(time.Hour))) + format = `2006010215Z` + } else { + fract = time.Duration(int64(f * float64(time.Minute))) + format = `200601021504Z` + } + + case 14: + if tzIndex < dot { + return zeroTime, ErrInvalidTimeFormat + } + str[dot] = '.' + // no need for fractional seconds, time.Parse() handles that + format = `20060102150405Z` + + default: + return zeroTime, ErrInvalidTimeFormat + } + + l := len(str) + switch l - tzIndex { + case 1: + if str[l-1] != 'Z' { + return zeroTime, ErrInvalidTimeFormat + } + case 3: + format += `0700` + str = append(str, []byte("00")...) + case 5: + format += `0700` + default: + return zeroTime, ErrInvalidTimeFormat + } + + t, err := time.Parse(format, string(str)) + if err != nil { + return zeroTime, fmt.Errorf("%s: %s", ErrInvalidTimeFormat, err) + } + return t.Add(fract), nil +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/header.go b/vendor/github.com/go-asn1-ber/asn1-ber/header.go index 71615621..7dfa6b9a 100644 --- a/vendor/github.com/go-asn1-ber/asn1-ber/header.go +++ b/vendor/github.com/go-asn1-ber/asn1-ber/header.go @@ -7,19 +7,22 @@ import ( ) func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) { - if i, c, err := readIdentifier(reader); err != nil { - return Identifier{}, 0, read, err - } else { - identifier = i - read += c - } + var ( + c, l int + i Identifier + ) - if l, c, err := readLength(reader); err != nil { + if i, c, err = readIdentifier(reader); err != nil { return Identifier{}, 0, read, err - } else { - length = l - read += c } + identifier = i + read += c + + if l, c, err = readLength(reader); err != nil { + return Identifier{}, 0, read, err + } + length = l + read += c // Validate length type with identifier (x.600, 8.1.3.2.a) if length == LengthIndefinite && identifier.TagType == TypePrimitive { diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/length.go b/vendor/github.com/go-asn1-ber/asn1-ber/length.go index 750e8f44..9cc195d0 100644 --- a/vendor/github.com/go-asn1-ber/asn1-ber/length.go +++ b/vendor/github.com/go-asn1-ber/asn1-ber/length.go @@ -71,11 +71,11 @@ func readLength(reader io.Reader) (length int, read int, err error) { } func encodeLength(length int) []byte { - length_bytes := encodeUnsignedInteger(uint64(length)) - if length > 127 || len(length_bytes) > 1 { - longFormBytes := []byte{(LengthLongFormBitmask | byte(len(length_bytes)))} - longFormBytes = append(longFormBytes, length_bytes...) - length_bytes = longFormBytes + lengthBytes := encodeUnsignedInteger(uint64(length)) + if length > 127 || len(lengthBytes) > 1 { + longFormBytes := []byte{LengthLongFormBitmask | byte(len(lengthBytes))} + longFormBytes = append(longFormBytes, lengthBytes...) + lengthBytes = longFormBytes } - return length_bytes + return lengthBytes } diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/real.go b/vendor/github.com/go-asn1-ber/asn1-ber/real.go new file mode 100644 index 00000000..610a003a --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/real.go @@ -0,0 +1,157 @@ +package ber + +import ( + "bytes" + "errors" + "fmt" + "math" + "strconv" + "strings" +) + +func encodeFloat(v float64) []byte { + switch { + case math.IsInf(v, 1): + return []byte{0x40} + case math.IsInf(v, -1): + return []byte{0x41} + case math.IsNaN(v): + return []byte{0x42} + case v == 0.0: + if math.Signbit(v) { + return []byte{0x43} + } + return []byte{} + default: + // we take the easy part ;-) + value := []byte(strconv.FormatFloat(v, 'G', -1, 64)) + var ret []byte + if bytes.Contains(value, []byte{'E'}) { + ret = []byte{0x03} + } else { + ret = []byte{0x02} + } + ret = append(ret, value...) + return ret + } +} + +func ParseReal(v []byte) (val float64, err error) { + if len(v) == 0 { + return 0.0, nil + } + switch { + case v[0]&0x80 == 0x80: + val, err = parseBinaryFloat(v) + case v[0]&0xC0 == 0x40: + val, err = parseSpecialFloat(v) + case v[0]&0xC0 == 0x0: + val, err = parseDecimalFloat(v) + default: + return 0.0, fmt.Errorf("invalid info block") + } + if err != nil { + return 0.0, err + } + + if val == 0.0 && !math.Signbit(val) { + return 0.0, errors.New("REAL value +0 must be encoded with zero-length value block") + } + return val, nil +} + +func parseBinaryFloat(v []byte) (float64, error) { + var info byte + var buf []byte + + info, v = v[0], v[1:] + + var base int + switch info & 0x30 { + case 0x00: + base = 2 + case 0x10: + base = 8 + case 0x20: + base = 16 + case 0x30: + return 0.0, errors.New("bits 6 and 5 of information octet for REAL are equal to 11") + } + + scale := uint((info & 0x0c) >> 2) + + var expLen int + switch info & 0x03 { + case 0x00: + expLen = 1 + case 0x01: + expLen = 2 + case 0x02: + expLen = 3 + case 0x03: + expLen = int(v[0]) + if expLen > 8 { + return 0.0, errors.New("too big value of exponent") + } + v = v[1:] + } + buf, v = v[:expLen], v[expLen:] + exponent, err := ParseInt64(buf) + if err != nil { + return 0.0, err + } + + if len(v) > 8 { + return 0.0, errors.New("too big value of mantissa") + } + + mant, err := ParseInt64(v) + if err != nil { + return 0.0, err + } + mantissa := mant << scale + + if info&0x40 == 0x40 { + mantissa = -mantissa + } + + return float64(mantissa) * math.Pow(float64(base), float64(exponent)), nil +} + +func parseDecimalFloat(v []byte) (val float64, err error) { + switch v[0] & 0x3F { + case 0x01: // NR form 1 + var iVal int64 + iVal, err = strconv.ParseInt(strings.TrimLeft(string(v[1:]), " "), 10, 64) + val = float64(iVal) + case 0x02, 0x03: // NR form 2, 3 + val, err = strconv.ParseFloat(strings.Replace(strings.TrimLeft(string(v[1:]), " "), ",", ".", -1), 64) + default: + err = errors.New("incorrect NR form") + } + if err != nil { + return 0.0, err + } + + if val == 0.0 && math.Signbit(val) { + return 0.0, errors.New("REAL value -0 must be encoded as a special value") + } + return val, nil +} + +func parseSpecialFloat(v []byte) (float64, error) { + if len(v) != 1 { + return 0.0, errors.New(`encoding of "special value" must not contain exponent and mantissa`) + } + switch v[0] { + case 0x40: + return math.Inf(1), nil + case 0x41: + return math.Inf(-1), nil + case 0x42: + return math.NaN(), nil + case 0x43: + return math.Copysign(0, -1), nil + } + return 0.0, errors.New(`encoding of "special value" not from ASN.1 standard`) +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/util.go b/vendor/github.com/go-asn1-ber/asn1-ber/util.go index 3e56b66c..14dc87d7 100644 --- a/vendor/github.com/go-asn1-ber/asn1-ber/util.go +++ b/vendor/github.com/go-asn1-ber/asn1-ber/util.go @@ -3,7 +3,7 @@ package ber import "io" func readByte(reader io.Reader) (byte, error) { - bytes := make([]byte, 1, 1) + bytes := make([]byte, 1) _, err := io.ReadFull(reader, bytes) if err != nil { if err == io.EOF { diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 00000000..444df08f --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, "does not exist") { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 00000000..a733bef1 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,169 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} diff --git a/vendor/github.com/hashicorp/errwrap/go.mod b/vendor/github.com/hashicorp/errwrap/go.mod new file mode 100644 index 00000000..c9b84022 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/errwrap diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml new file mode 100644 index 00000000..24b80388 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.x + +branches: + only: + - master + +script: env GO111MODULE=on make test testrace diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 00000000..82b4de97 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 00000000..b97cd6ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 00000000..e92fa614 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,131 @@ +# go-multierror + +[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: https://travis-ci.org/hashicorp/go-multierror +[godocs]: https://godoc.org/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` is fully compatible with the Go standard library +[errors](https://golang.org/pkg/errors/) package, including the +functions `As`, `Is`, and `Unwrap`. This provides a standardized approach +for introspecting on error values. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-multierror + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) +function. This will continue to unwrap into subsequent errors until none exist. + +**Extracting an error** + +The standard library [`errors.As`](https://golang.org/pkg/errors/#As) +function can be used directly with a multierror to extract a specific error: + +```go +// Assume err is a multierror value +err := somefunc() + +// We want to know if "err" has a "RichErrorType" in it and extract it. +var errRich RichErrorType +if errors.As(err, &errRich) { + // It has it, and now errRich is populated. +} +``` + +**Checking for an exact error value** + +Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) +error in the `os` package. You can check if this error is present by using +the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. + +```go +// Assume err is a multierror value +err := somefunc() +if errors.Is(err, os.ErrNotExist) { + // err contains os.ErrNotExist +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 00000000..775b6e75 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,41 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 00000000..aab8e9ab --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 00000000..47f13c49 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod new file mode 100644 index 00000000..0afe8e6f --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/go-multierror + +go 1.14 + +require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum new file mode 100644 index 00000000..e8238e9e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.sum @@ -0,0 +1,2 @@ +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go new file mode 100644 index 00000000..9c29efb7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/group.go @@ -0,0 +1,38 @@ +package multierror + +import "sync" + +// Group is a collection of goroutines which return errors that need to be +// coalesced. +type Group struct { + mutex sync.Mutex + err *Error + wg sync.WaitGroup +} + +// Go calls the given function in a new goroutine. +// +// If the function returns an error it is added to the group multierror which +// is returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.mutex.Lock() + g.err = Append(g.err, err) + g.mutex.Unlock() + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the multierror. +func (g *Group) Wait() *Error { + g.wg.Wait() + g.mutex.Lock() + defer g.mutex.Unlock() + return g.err +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 00000000..d05dd926 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,118 @@ +package multierror + +import ( + "errors" + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. +// It is an implementation of the errwrap.Wrapper interface so that +// multierror.Error can be used with that library. +// +// This method is not safe to be called concurrently and is no different +// than accessing the Errors field directly. It is implemented only to +// satisfy the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + return e.Errors +} + +// Unwrap returns an error from Error (or nil if there are no errors). +// This error returned will further support Unwrap to get the next error, +// etc. The order will match the order of Errors in the multierror.Error +// at the time of calling. +// +// The resulting error supports errors.As/Is/Unwrap so you can continue +// to use the stdlib errors package to introspect further. +// +// This will perform a shallow copy of the errors slice. Any errors appended +// to this error after calling Unwrap will not be available until a new +// Unwrap is called on the multierror.Error. +func (e *Error) Unwrap() error { + // If we have no errors then we do nothing + if e == nil || len(e.Errors) == 0 { + return nil + } + + // If we have exactly one error, we can just return that directly. + if len(e.Errors) == 1 { + return e.Errors[0] + } + + // Shallow copy the slice + errs := make([]error, len(e.Errors)) + copy(errs, e.Errors) + return chain(errs) +} + +// chain implements the interfaces necessary for errors.Is/As/Unwrap to +// work in a deterministic way with multierror. A chain tracks a list of +// errors while accounting for the current represented error. This lets +// Is/As be meaningful. +// +// Unwrap returns the next error. In the cleanest form, Unwrap would return +// the wrapped error here but we can't do that if we want to properly +// get access to all the errors. Instead, users are recommended to use +// Is/As to get the correct error type out. +// +// Precondition: []error is non-empty (len > 0) +type chain []error + +// Error implements the error interface +func (e chain) Error() string { + return e[0].Error() +} + +// Unwrap implements errors.Unwrap by returning the next error in the +// chain or nil if there are no more errors. +func (e chain) Unwrap() error { + if len(e) == 1 { + return nil + } + + return e[1:] +} + +// As implements errors.As by attempting to map to the current value. +func (e chain) As(target interface{}) bool { + return errors.As(e[0], target) +} + +// Is implements errors.Is by comparing the current value directly. +func (e chain) Is(target error) bool { + return errors.Is(e[0], target) +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 00000000..5c477abe --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 00000000..fecb14e8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod deleted file mode 100644 index 716c6131..00000000 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/konsorten/go-windows-terminal-sequences diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go deleted file mode 100644 index 57f530ae..00000000 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build windows - -package sequences - -import ( - "syscall" -) - -var ( - kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll") - setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode") -) - -func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error { - const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4 - - var mode uint32 - err := syscall.GetConsoleMode(syscall.Stdout, &mode) - if err != nil { - return err - } - - if enable { - mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING - } else { - mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING - } - - ret, _, err := setConsoleMode.Call(uintptr(stream), uintptr(mode)) - if ret == 0 { - return err - } - - return nil -} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go deleted file mode 100644 index df61a6f2..00000000 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux darwin - -package sequences - -import ( - "fmt" -) - -func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error { - return fmt.Errorf("windows only package") -} diff --git a/vendor/github.com/mattermost/logr/.gitignore b/vendor/github.com/mattermost/logr/.gitignore new file mode 100644 index 00000000..c2c0a9e2 --- /dev/null +++ b/vendor/github.com/mattermost/logr/.gitignore @@ -0,0 +1,36 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +debug +dynip + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Output of profiler +*.prof + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# IntelliJ config +.idea + +# log files +*.log + +# transient directories +vendor +output +build +app +logs + +# test apps +test/cmd/testapp1/testapp1 +test/cmd/simple/simple diff --git a/vendor/github.com/mattermost/logr/.travis.yml b/vendor/github.com/mattermost/logr/.travis.yml new file mode 100644 index 00000000..e6c7caf1 --- /dev/null +++ b/vendor/github.com/mattermost/logr/.travis.yml @@ -0,0 +1,4 @@ +language: go +sudo: false +go: + - 1.x \ No newline at end of file diff --git a/vendor/github.com/mattermost/logr/LICENSE b/vendor/github.com/mattermost/logr/LICENSE new file mode 100644 index 00000000..3bea6788 --- /dev/null +++ b/vendor/github.com/mattermost/logr/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 wiggin77 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattermost/logr/README.md b/vendor/github.com/mattermost/logr/README.md new file mode 100644 index 00000000..a25d6de0 --- /dev/null +++ b/vendor/github.com/mattermost/logr/README.md @@ -0,0 +1,193 @@ +# logr + +[![GoDoc](https://godoc.org/github.com/mattermost/logr?status.svg)](http://godoc.org/github.com/mattermost/logr) +[![Report Card](https://goreportcard.com/badge/github.com/mattermost/logr)](https://goreportcard.com/report/github.com/mattermost/logr) + +Logr is a fully asynchronous, contextual logger for Go. + +It is very much inspired by [Logrus](https://github.com/sirupsen/logrus) but addresses two issues: + +1. Logr is fully asynchronous, meaning that all formatting and writing is done in the background. Latency sensitive applications benefit from not waiting for logging to complete. + +2. Logr provides custom filters which provide more flexibility than Trace, Debug, Info... levels. If you need to temporarily increase verbosity of logging while tracking down a problem you can avoid the fire-hose that typically comes from Debug or Trace by using custom filters. + +## Concepts + + +| entity | description | +| ------ | ----------- | +| Logr | Engine instance typically instantiated once; used to configure logging.
```lgr := &Logr{}```| +| Logger | Provides contextual logging via fields; lightweight, can be created once and accessed globally or create on demand.
```logger := lgr.NewLogger()```
```logger2 := logger.WithField("user", "Sam")```| +| Target | A destination for log items such as console, file, database or just about anything that can be written to. Each target has its own filter/level and formatter, and any number of targets can be added to a Logr. Targets for syslog and any io.Writer are built-in and it is easy to create your own. You can also use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) via a simple [adapter](https://github.com/wiggin77/logrus4logr).| +| Filter | Determines which logging calls get written versus filtered out. Also determines which logging calls generate a stack trace.
```filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Fatal}```| +| Formatter | Formats the output. Logr includes built-in formatters for JSON and plain text with delimiters. It is easy to create your own formatters or you can also use any [Logrus formatters](https://github.com/sirupsen/logrus#formatters) via a simple [adapter](https://github.com/wiggin77/logrus4logr).
```formatter := &format.Plain{Delim: " \| "}```| + +## Usage + +```go +// Create Logr instance. +lgr := &logr.Logr{} + +// Create a filter and formatter. Both can be shared by multiple +// targets. +filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Error} +formatter := &format.Plain{Delim: " | "} + +// WriterTarget outputs to any io.Writer +t := target.NewWriterTarget(filter, formatter, os.StdOut, 1000) +lgr.AddTarget(t) + +// One or more Loggers can be created, shared, used concurrently, +// or created on demand. +logger := lgr.NewLogger().WithField("user", "Sarah") + +// Now we can log to the target(s). +logger.Debug("login attempt") +logger.Error("login failed") + +// Ensure targets are drained before application exit. +lgr.Shutdown() +``` + +## Fields + +Fields allow for contextual logging, meaning information can be added to log statements without changing the statements themselves. Information can be shared across multiple logging statements thus allowing log analysis tools to group them. + +Fields are added via Loggers: + +```go +lgr := &Logr{} +// ... add targets ... +logger := lgr.NewLogger().WithFields(logr.Fields{ + "user": user, + "role": role}) +logger.Info("login attempt") +// ... later ... +logger.Info("login successful") +``` + +`Logger.WithFields` can be used to create additional Loggers that add more fields. + +Logr fields are inspired by and work the same as [Logrus fields](https://github.com/sirupsen/logrus#fields). + +## Filters + +Logr supports the traditional seven log levels via `logr.StdFilter`: Panic, Fatal, Error, Warning, Info, Debug, and Trace. + +```go +// When added to a target, this filter will only allow +// log statements with level severity Warn or higher. +// It will also generate stack traces for Error or higher. +filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Error} +``` + +Logr also supports custom filters (logr.CustomFilter) which allow fine grained inclusion of log items without turning on the fire-hose. + +```go + // create custom levels; use IDs > 10. + LoginLevel := logr.Level{ID: 100, Name: "login ", Stacktrace: false} + LogoutLevel := logr.Level{ID: 101, Name: "logout", Stacktrace: false} + + lgr := &logr.Logr{} + + // create a custom filter with custom levels. + filter := &logr.CustomFilter{} + filter.Add(LoginLevel, LogoutLevel) + + formatter := &format.Plain{Delim: " | "} + tgr := target.NewWriterTarget(filter, formatter, os.StdOut, 1000) + lgr.AddTarget(tgr) + logger := lgr.NewLogger().WithFields(logr.Fields{"user": "Bob", "role": "admin"}) + + logger.Log(LoginLevel, "this item will get logged") + logger.Debug("won't be logged since Debug wasn't added to custom filter") +``` + +Both filter types allow you to determine which levels require a stack trace to be output. Note that generating stack traces cannot happen fully asynchronously and thus add latency to the calling goroutine. + +## Targets + +There are built-in targets for outputting to syslog, file, or any `io.Writer`. More will be added. + +You can use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) via a simple [adapter](https://github.com/wiggin77/logrus4logr). + +You can create your own target by implementing the [Target](./target.go) interface. + +An easier method is to use the [logr.Basic](./target.go) type target and build your functionality on that. Basic handles all the queuing and other plumbing so you only need to implement two methods. Example target that outputs to `io.Writer`: + +```go +type Writer struct { + logr.Basic + out io.Writer +} + +func NewWriterTarget(filter logr.Filter, formatter logr.Formatter, out io.Writer, maxQueue int) *Writer { + w := &Writer{out: out} + w.Basic.Start(w, w, filter, formatter, maxQueue) + return w +} + +// Write will always be called by a single goroutine, so no locking needed. +// Just convert a log record to a []byte using the formatter and output the +// bytes to your sink. +func (w *Writer) Write(rec *logr.LogRec) error { + _, stacktrace := w.IsLevelEnabled(rec.Level()) + + // take a buffer from the pool to avoid allocations or just allocate a new one. + buf := rec.Logger().Logr().BorrowBuffer() + defer rec.Logger().Logr().ReleaseBuffer(buf) + + buf, err := w.Formatter().Format(rec, stacktrace, buf) + if err != nil { + return err + } + _, err = w.out.Write(buf.Bytes()) + return err +} +``` + +## Formatters + +Logr has two built-in formatters, one for JSON and the other plain, delimited text. + +You can use any [Logrus formatters](https://github.com/sirupsen/logrus#formatters) via a simple [adapter](https://github.com/wiggin77/logrus4logr). + +You can create your own formatter by implementing the [Formatter](./formatter.go) interface: + +```go +Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) +``` + +## Handlers + +When creating the Logr instance, you can add several handlers that get called when exceptional events occur: + +### ```Logr.OnLoggerError(err error)``` + +Called any time an internal logging error occurs. For example, this can happen when a target cannot connect to its data sink. + +It may be tempting to log this error, however there is a danger that logging this will simply generate another error and so on. If you must log it, use a target and custom level specifically for this event and ensure it cannot generate more errors. + +### ```Logr.OnQueueFull func(rec *LogRec, maxQueueSize int) bool``` + +Called on an attempt to add a log record to a full Logr queue. This generally means the Logr maximum queue size is too small, or at least one target is very slow. Logr maximum queue size can be changed before adding any targets via: + +```go +lgr := logr.Logr{MaxQueueSize: 10000} +``` + +Returning true will drop the log record. False will block until the log record can be added, which creates a natural throttle at the expense of latency for the calling goroutine. The default is to block. + +### ```Logr.OnTargetQueueFull func(target Target, rec *LogRec, maxQueueSize int) bool``` + +Called on an attempt to add a log record to a full target queue. This generally means your target's max queue size is too small, or the target is very slow to output. + +As with the Logr queue, returning true will drop the log record. False will block until the log record can be added, which creates a natural throttle at the expense of latency for the calling goroutine. The default is to block. + +### ```Logr.OnExit func(code int) and Logr.OnPanic func(err interface{})``` + +OnExit and OnPanic are called when the Logger.FatalXXX and Logger.PanicXXX functions are called respectively. + +In both cases the default behavior is to shut down gracefully, draining all targets, and calling `os.Exit` or `panic` respectively. + +When adding your own handlers, be sure to call `Logr.Shutdown` before exiting the application to avoid losing log records. diff --git a/vendor/github.com/mattermost/logr/config.go b/vendor/github.com/mattermost/logr/config.go new file mode 100644 index 00000000..83d4b0c1 --- /dev/null +++ b/vendor/github.com/mattermost/logr/config.go @@ -0,0 +1,11 @@ +package logr + +import ( + "fmt" + + "github.com/wiggin77/cfg" +) + +func ConfigLogger(config *cfg.Config) error { + return fmt.Errorf("Not implemented yet") +} diff --git a/vendor/github.com/mattermost/logr/const.go b/vendor/github.com/mattermost/logr/const.go new file mode 100644 index 00000000..704d0507 --- /dev/null +++ b/vendor/github.com/mattermost/logr/const.go @@ -0,0 +1,34 @@ +package logr + +import "time" + +// Defaults. +const ( + // DefaultMaxQueueSize is the default maximum queue size for Logr instances. + DefaultMaxQueueSize = 1000 + + // DefaultMaxStackFrames is the default maximum max number of stack frames collected + // when generating stack traces for logging. + DefaultMaxStackFrames = 30 + + // MaxLevelID is the maximum value of a level ID. Some level cache implementations will + // allocate a cache of this size. Cannot exceed uint. + MaxLevelID = 256 + + // DefaultEnqueueTimeout is the default amount of time a log record can take to be queued. + // This only applies to blocking enqueue which happen after `logr.OnQueueFull` is called + // and returns false. + DefaultEnqueueTimeout = time.Second * 30 + + // DefaultShutdownTimeout is the default amount of time `logr.Shutdown` can execute before + // timing out. + DefaultShutdownTimeout = time.Second * 30 + + // DefaultFlushTimeout is the default amount of time `logr.Flush` can execute before + // timing out. + DefaultFlushTimeout = time.Second * 30 + + // DefaultMaxPooledBuffer is the maximum size a pooled buffer can be. + // Buffers that grow beyond this size are garbage collected. + DefaultMaxPooledBuffer = 1024 * 1024 +) diff --git a/vendor/github.com/mattermost/logr/filter.go b/vendor/github.com/mattermost/logr/filter.go new file mode 100644 index 00000000..6e654cd7 --- /dev/null +++ b/vendor/github.com/mattermost/logr/filter.go @@ -0,0 +1,26 @@ +package logr + +// LevelID is the unique id of each level. +type LevelID uint + +// Level provides a mechanism to enable/disable specific log lines. +type Level struct { + ID LevelID + Name string + Stacktrace bool +} + +// String returns the name of this level. +func (level Level) String() string { + return level.Name +} + +// Filter allows targets to determine which Level(s) are active +// for logging and which Level(s) require a stack trace to be output. +// A default implementation using "panic, fatal..." is provided, and +// a more flexible alternative implementation is also provided that +// allows any number of custom levels. +type Filter interface { + IsEnabled(Level) bool + IsStacktraceEnabled(Level) bool +} diff --git a/vendor/github.com/mattermost/logr/format/json.go b/vendor/github.com/mattermost/logr/format/json.go new file mode 100644 index 00000000..8f56c6cb --- /dev/null +++ b/vendor/github.com/mattermost/logr/format/json.go @@ -0,0 +1,273 @@ +package format + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "sync" + "time" + + "github.com/francoispqt/gojay" + "github.com/mattermost/logr" +) + +// ContextField is a name/value pair within the context fields. +type ContextField struct { + Key string + Val interface{} +} + +// JSON formats log records as JSON. +type JSON struct { + // DisableTimestamp disables output of timestamp field. + DisableTimestamp bool + // DisableLevel disables output of level field. + DisableLevel bool + // DisableMsg disables output of msg field. + DisableMsg bool + // DisableContext disables output of all context fields. + DisableContext bool + // DisableStacktrace disables output of stack trace. + DisableStacktrace bool + + // TimestampFormat is an optional format for timestamps. If empty + // then DefTimestampFormat is used. + TimestampFormat string + + // Deprecated: this has no effect. + Indent string + + // EscapeHTML determines if certain characters (e.g. `<`, `>`, `&`) + // are escaped. + EscapeHTML bool + + // KeyTimestamp overrides the timestamp field key name. + KeyTimestamp string + + // KeyLevel overrides the level field key name. + KeyLevel string + + // KeyMsg overrides the msg field key name. + KeyMsg string + + // KeyContextFields when not empty will group all context fields + // under this key. + KeyContextFields string + + // KeyStacktrace overrides the stacktrace field key name. + KeyStacktrace string + + // ContextSorter allows custom sorting for the context fields. + ContextSorter func(fields logr.Fields) []ContextField + + once sync.Once +} + +// Format converts a log record to bytes in JSON format. +func (j *JSON) Format(rec *logr.LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) { + j.once.Do(j.applyDefaultKeyNames) + + if buf == nil { + buf = &bytes.Buffer{} + } + enc := gojay.BorrowEncoder(buf) + defer func() { + enc.Release() + }() + + sorter := j.ContextSorter + if sorter == nil { + sorter = j.defaultContextSorter + } + + jlr := JSONLogRec{ + LogRec: rec, + JSON: j, + stacktrace: stacktrace, + sorter: sorter, + } + + err := enc.EncodeObject(jlr) + if err != nil { + return nil, err + } + buf.WriteByte('\n') + return buf, nil +} + +func (j *JSON) applyDefaultKeyNames() { + if j.KeyTimestamp == "" { + j.KeyTimestamp = "timestamp" + } + if j.KeyLevel == "" { + j.KeyLevel = "level" + } + if j.KeyMsg == "" { + j.KeyMsg = "msg" + } + if j.KeyStacktrace == "" { + j.KeyStacktrace = "stacktrace" + } +} + +// defaultContextSorter sorts the context fields alphabetically by key. +func (j *JSON) defaultContextSorter(fields logr.Fields) []ContextField { + keys := make([]string, 0, len(fields)) + for k := range fields { + keys = append(keys, k) + } + sort.Strings(keys) + + cf := make([]ContextField, 0, len(keys)) + for _, k := range keys { + cf = append(cf, ContextField{Key: k, Val: fields[k]}) + } + return cf +} + +// JSONLogRec decorates a LogRec adding JSON encoding. +type JSONLogRec struct { + *logr.LogRec + *JSON + stacktrace bool + sorter func(fields logr.Fields) []ContextField +} + +// MarshalJSONObject encodes the LogRec as JSON. +func (rec JSONLogRec) MarshalJSONObject(enc *gojay.Encoder) { + if !rec.DisableTimestamp { + timestampFmt := rec.TimestampFormat + if timestampFmt == "" { + timestampFmt = logr.DefTimestampFormat + } + time := rec.Time() + enc.AddTimeKey(rec.KeyTimestamp, &time, timestampFmt) + } + if !rec.DisableLevel { + enc.AddStringKey(rec.KeyLevel, rec.Level().Name) + } + if !rec.DisableMsg { + enc.AddStringKey(rec.KeyMsg, rec.Msg()) + } + if !rec.DisableContext { + ctxFields := rec.sorter(rec.Fields()) + if rec.KeyContextFields != "" { + enc.AddObjectKey(rec.KeyContextFields, jsonFields(ctxFields)) + } else { + if len(ctxFields) > 0 { + for _, cf := range ctxFields { + key := rec.prefixCollision(cf.Key) + encodeField(enc, key, cf.Val) + } + } + } + } + if rec.stacktrace && !rec.DisableStacktrace { + frames := rec.StackFrames() + if len(frames) > 0 { + enc.AddArrayKey(rec.KeyStacktrace, stackFrames(frames)) + } + } + +} + +// IsNil returns true if the LogRec pointer is nil. +func (rec JSONLogRec) IsNil() bool { + return rec.LogRec == nil +} + +func (rec JSONLogRec) prefixCollision(key string) string { + switch key { + case rec.KeyTimestamp, rec.KeyLevel, rec.KeyMsg, rec.KeyStacktrace: + return rec.prefixCollision("_" + key) + } + return key +} + +type stackFrames []runtime.Frame + +// MarshalJSONArray encodes stackFrames slice as JSON. +func (s stackFrames) MarshalJSONArray(enc *gojay.Encoder) { + for _, frame := range s { + enc.AddObject(stackFrame(frame)) + } +} + +// IsNil returns true if stackFrames is empty slice. +func (s stackFrames) IsNil() bool { + return len(s) == 0 +} + +type stackFrame runtime.Frame + +// MarshalJSONArray encodes stackFrame as JSON. +func (f stackFrame) MarshalJSONObject(enc *gojay.Encoder) { + enc.AddStringKey("Function", f.Function) + enc.AddStringKey("File", f.File) + enc.AddIntKey("Line", f.Line) +} + +func (f stackFrame) IsNil() bool { + return false +} + +type jsonFields []ContextField + +// MarshalJSONObject encodes Fields map to JSON. +func (f jsonFields) MarshalJSONObject(enc *gojay.Encoder) { + for _, ctxField := range f { + encodeField(enc, ctxField.Key, ctxField.Val) + } +} + +// IsNil returns true if map is nil. +func (f jsonFields) IsNil() bool { + return f == nil +} + +func encodeField(enc *gojay.Encoder, key string, val interface{}) { + switch vt := val.(type) { + case gojay.MarshalerJSONObject: + enc.AddObjectKey(key, vt) + case gojay.MarshalerJSONArray: + enc.AddArrayKey(key, vt) + case string: + enc.AddStringKey(key, vt) + case error: + enc.AddStringKey(key, vt.Error()) + case bool: + enc.AddBoolKey(key, vt) + case int: + enc.AddIntKey(key, vt) + case int64: + enc.AddInt64Key(key, vt) + case int32: + enc.AddIntKey(key, int(vt)) + case int16: + enc.AddIntKey(key, int(vt)) + case int8: + enc.AddIntKey(key, int(vt)) + case uint64: + enc.AddIntKey(key, int(vt)) + case uint32: + enc.AddIntKey(key, int(vt)) + case uint16: + enc.AddIntKey(key, int(vt)) + case uint8: + enc.AddIntKey(key, int(vt)) + case float64: + enc.AddFloatKey(key, vt) + case float32: + enc.AddFloat32Key(key, vt) + case *gojay.EmbeddedJSON: + enc.AddEmbeddedJSONKey(key, vt) + case time.Time: + enc.AddTimeKey(key, &vt, logr.DefTimestampFormat) + case *time.Time: + enc.AddTimeKey(key, vt, logr.DefTimestampFormat) + default: + s := fmt.Sprintf("%v", vt) + enc.AddStringKey(key, s) + } +} diff --git a/vendor/github.com/mattermost/logr/format/plain.go b/vendor/github.com/mattermost/logr/format/plain.go new file mode 100644 index 00000000..3fa92b49 --- /dev/null +++ b/vendor/github.com/mattermost/logr/format/plain.go @@ -0,0 +1,75 @@ +package format + +import ( + "bytes" + "fmt" + + "github.com/mattermost/logr" +) + +// Plain is the simplest formatter, outputting only text with +// no colors. +type Plain struct { + // DisableTimestamp disables output of timestamp field. + DisableTimestamp bool + // DisableLevel disables output of level field. + DisableLevel bool + // DisableMsg disables output of msg field. + DisableMsg bool + // DisableContext disables output of all context fields. + DisableContext bool + // DisableStacktrace disables output of stack trace. + DisableStacktrace bool + + // Delim is an optional delimiter output between each log field. + // Defaults to a single space. + Delim string + + // TimestampFormat is an optional format for timestamps. If empty + // then DefTimestampFormat is used. + TimestampFormat string +} + +// Format converts a log record to bytes. +func (p *Plain) Format(rec *logr.LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) { + delim := p.Delim + if delim == "" { + delim = " " + } + if buf == nil { + buf = &bytes.Buffer{} + } + + timestampFmt := p.TimestampFormat + if timestampFmt == "" { + timestampFmt = logr.DefTimestampFormat + } + + if !p.DisableTimestamp { + var arr [128]byte + tbuf := rec.Time().AppendFormat(arr[:0], timestampFmt) + buf.Write(tbuf) + buf.WriteString(delim) + } + if !p.DisableLevel { + fmt.Fprintf(buf, "%v%s", rec.Level().Name, delim) + } + if !p.DisableMsg { + fmt.Fprint(buf, rec.Msg(), delim) + } + if !p.DisableContext { + ctx := rec.Fields() + if len(ctx) > 0 { + logr.WriteFields(buf, ctx, " ") + } + } + if stacktrace && !p.DisableStacktrace { + frames := rec.StackFrames() + if len(frames) > 0 { + buf.WriteString("\n") + logr.WriteStacktrace(buf, rec.StackFrames()) + } + } + buf.WriteString("\n") + return buf, nil +} diff --git a/vendor/github.com/mattermost/logr/formatter.go b/vendor/github.com/mattermost/logr/formatter.go new file mode 100644 index 00000000..bb8df2d4 --- /dev/null +++ b/vendor/github.com/mattermost/logr/formatter.go @@ -0,0 +1,119 @@ +package logr + +import ( + "bytes" + "fmt" + "io" + "runtime" + "sort" +) + +// Formatter turns a LogRec into a formatted string. +type Formatter interface { + // Format converts a log record to bytes. If buf is not nil then it will be + // be filled with the formatted results, otherwise a new buffer will be allocated. + Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) +} + +const ( + // DefTimestampFormat is the default time stamp format used by + // Plain formatter and others. + DefTimestampFormat = "2006-01-02 15:04:05.000 Z07:00" +) + +// DefaultFormatter is the default formatter, outputting only text with +// no colors and a space delimiter. Use `format.Plain` instead. +type DefaultFormatter struct { +} + +// Format converts a log record to bytes. +func (p *DefaultFormatter) Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) { + if buf == nil { + buf = &bytes.Buffer{} + } + delim := " " + timestampFmt := DefTimestampFormat + + fmt.Fprintf(buf, "%s%s", rec.Time().Format(timestampFmt), delim) + fmt.Fprintf(buf, "%v%s", rec.Level(), delim) + fmt.Fprint(buf, rec.Msg(), delim) + + ctx := rec.Fields() + if len(ctx) > 0 { + WriteFields(buf, ctx, " ") + } + + if stacktrace { + frames := rec.StackFrames() + if len(frames) > 0 { + buf.WriteString("\n") + WriteStacktrace(buf, rec.StackFrames()) + } + } + buf.WriteString("\n") + + return buf, nil +} + +// WriteFields writes zero or more name value pairs to the io.Writer. +// The pairs are sorted by key name and output in key=value format +// with optional separator between fields. +func WriteFields(w io.Writer, flds Fields, separator string) { + keys := make([]string, 0, len(flds)) + for k := range flds { + keys = append(keys, k) + } + sort.Strings(keys) + sep := "" + for _, key := range keys { + writeField(w, key, flds[key], sep) + sep = separator + } +} + +func writeField(w io.Writer, key string, val interface{}, sep string) { + var template string + switch v := val.(type) { + case error: + val := v.Error() + if shouldQuote(val) { + template = "%s%s=%q" + } else { + template = "%s%s=%s" + } + case string: + if shouldQuote(v) { + template = "%s%s=%q" + } else { + template = "%s%s=%s" + } + default: + template = "%s%s=%v" + } + fmt.Fprintf(w, template, sep, key, val) +} + +// shouldQuote returns true if val contains any characters that might be unsafe +// when injecting log output into an aggregator, viewer or report. +func shouldQuote(val string) bool { + for _, c := range val { + if !((c >= '0' && c <= '9') || + (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z')) { + return true + } + } + return false +} + +// WriteStacktrace formats and outputs a stack trace to an io.Writer. +func WriteStacktrace(w io.Writer, frames []runtime.Frame) { + for _, frame := range frames { + if frame.Function != "" { + fmt.Fprintf(w, " %s\n", frame.Function) + } + if frame.File != "" { + fmt.Fprintf(w, " %s:%d\n", frame.File, frame.Line) + } + } +} diff --git a/vendor/github.com/mattermost/logr/go.mod b/vendor/github.com/mattermost/logr/go.mod new file mode 100644 index 00000000..e8e8acfb --- /dev/null +++ b/vendor/github.com/mattermost/logr/go.mod @@ -0,0 +1,11 @@ +module github.com/mattermost/logr + +go 1.12 + +require ( + github.com/francoispqt/gojay v1.2.13 + github.com/stretchr/testify v1.2.2 + github.com/wiggin77/cfg v1.0.2 + github.com/wiggin77/merror v1.0.2 + gopkg.in/natefinch/lumberjack.v2 v2.0.0 +) diff --git a/vendor/github.com/mattermost/logr/go.sum b/vendor/github.com/mattermost/logr/go.sum new file mode 100644 index 00000000..ea688513 --- /dev/null +++ b/vendor/github.com/mattermost/logr/go.sum @@ -0,0 +1,174 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/wiggin77/cfg v1.0.2 h1:NBUX+iJRr+RTncTqTNvajHwzduqbhCQjEqxLHr6Fk7A= +github.com/wiggin77/cfg v1.0.2/go.mod h1:b3gotba2e5bXTqTW48DwIFoLc+4lWKP7WPi/CdvZ4aE= +github.com/wiggin77/merror v1.0.2 h1:V0nH9eFp64ASyaXC+pB5WpvBoCg7NUwvaCSKdzlcHqw= +github.com/wiggin77/merror v1.0.2/go.mod h1:uQTcIU0Z6jRK4OwqganPYerzQxSFJ4GSHM3aurxxQpg= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/github.com/mattermost/logr/levelcache.go b/vendor/github.com/mattermost/logr/levelcache.go new file mode 100644 index 00000000..2cefb61d --- /dev/null +++ b/vendor/github.com/mattermost/logr/levelcache.go @@ -0,0 +1,98 @@ +package logr + +import ( + "fmt" + "sync" +) + +// LevelStatus represents whether a level is enabled and +// requires a stack trace. +type LevelStatus struct { + Enabled bool + Stacktrace bool + empty bool +} + +type levelCache interface { + setup() + get(id LevelID) (LevelStatus, bool) + put(id LevelID, status LevelStatus) error + clear() +} + +// syncMapLevelCache uses sync.Map which may better handle large concurrency +// scenarios. +type syncMapLevelCache struct { + m sync.Map +} + +func (c *syncMapLevelCache) setup() { + c.clear() +} + +func (c *syncMapLevelCache) get(id LevelID) (LevelStatus, bool) { + if id > MaxLevelID { + return LevelStatus{}, false + } + s, _ := c.m.Load(id) + status := s.(LevelStatus) + return status, !status.empty +} + +func (c *syncMapLevelCache) put(id LevelID, status LevelStatus) error { + if id > MaxLevelID { + return fmt.Errorf("level id cannot exceed MaxLevelID (%d)", MaxLevelID) + } + c.m.Store(id, status) + return nil +} + +func (c *syncMapLevelCache) clear() { + var i LevelID + for i = 0; i < MaxLevelID; i++ { + c.m.Store(i, LevelStatus{empty: true}) + } +} + +// arrayLevelCache using array and a mutex. +type arrayLevelCache struct { + arr [MaxLevelID + 1]LevelStatus + mux sync.RWMutex +} + +func (c *arrayLevelCache) setup() { + c.clear() +} + +//var dummy = LevelStatus{} + +func (c *arrayLevelCache) get(id LevelID) (LevelStatus, bool) { + if id > MaxLevelID { + return LevelStatus{}, false + } + c.mux.RLock() + status := c.arr[id] + ok := !status.empty + c.mux.RUnlock() + return status, ok +} + +func (c *arrayLevelCache) put(id LevelID, status LevelStatus) error { + if id > MaxLevelID { + return fmt.Errorf("level id cannot exceed MaxLevelID (%d)", MaxLevelID) + } + c.mux.Lock() + defer c.mux.Unlock() + + c.arr[id] = status + return nil +} + +func (c *arrayLevelCache) clear() { + c.mux.Lock() + defer c.mux.Unlock() + + for i := range c.arr { + c.arr[i] = LevelStatus{empty: true} + } +} diff --git a/vendor/github.com/mattermost/logr/levelcustom.go b/vendor/github.com/mattermost/logr/levelcustom.go new file mode 100644 index 00000000..384fe4e9 --- /dev/null +++ b/vendor/github.com/mattermost/logr/levelcustom.go @@ -0,0 +1,45 @@ +package logr + +import ( + "sync" +) + +// CustomFilter allows targets to enable logging via a list of levels. +type CustomFilter struct { + mux sync.RWMutex + levels map[LevelID]Level +} + +// IsEnabled returns true if the specified Level exists in this list. +func (st *CustomFilter) IsEnabled(level Level) bool { + st.mux.RLock() + defer st.mux.RUnlock() + _, ok := st.levels[level.ID] + return ok +} + +// IsStacktraceEnabled returns true if the specified Level requires a stack trace. +func (st *CustomFilter) IsStacktraceEnabled(level Level) bool { + st.mux.RLock() + defer st.mux.RUnlock() + lvl, ok := st.levels[level.ID] + if ok { + return lvl.Stacktrace + } + return false +} + +// Add adds one or more levels to the list. Adding a level enables logging for +// that level on any targets using this CustomFilter. +func (st *CustomFilter) Add(levels ...Level) { + st.mux.Lock() + defer st.mux.Unlock() + + if st.levels == nil { + st.levels = make(map[LevelID]Level) + } + + for _, s := range levels { + st.levels[s.ID] = s + } +} diff --git a/vendor/github.com/mattermost/logr/levelstd.go b/vendor/github.com/mattermost/logr/levelstd.go new file mode 100644 index 00000000..f5e0fa46 --- /dev/null +++ b/vendor/github.com/mattermost/logr/levelstd.go @@ -0,0 +1,37 @@ +package logr + +// StdFilter allows targets to filter via classic log levels where any level +// beyond a certain verbosity/severity is enabled. +type StdFilter struct { + Lvl Level + Stacktrace Level +} + +// IsEnabled returns true if the specified Level is at or above this verbosity. Also +// determines if a stack trace is required. +func (lt StdFilter) IsEnabled(level Level) bool { + return level.ID <= lt.Lvl.ID +} + +// IsStacktraceEnabled returns true if the specified Level requires a stack trace. +func (lt StdFilter) IsStacktraceEnabled(level Level) bool { + return level.ID <= lt.Stacktrace.ID +} + +var ( + // Panic is the highest level of severity. Logs the message and then panics. + Panic = Level{ID: 0, Name: "panic"} + // Fatal designates a catastrophic error. Logs the message and then calls + // `logr.Exit(1)`. + Fatal = Level{ID: 1, Name: "fatal"} + // Error designates a serious but possibly recoverable error. + Error = Level{ID: 2, Name: "error"} + // Warn designates non-critical error. + Warn = Level{ID: 3, Name: "warn"} + // Info designates information regarding application events. + Info = Level{ID: 4, Name: "info"} + // Debug designates verbose information typically used for debugging. + Debug = Level{ID: 5, Name: "debug"} + // Trace designates the highest verbosity of log output. + Trace = Level{ID: 6, Name: "trace"} +) diff --git a/vendor/github.com/mattermost/logr/logger.go b/vendor/github.com/mattermost/logr/logger.go new file mode 100644 index 00000000..c2386312 --- /dev/null +++ b/vendor/github.com/mattermost/logr/logger.go @@ -0,0 +1,218 @@ +package logr + +import ( + "fmt" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Logger provides context for logging via fields. +type Logger struct { + logr *Logr + fields Fields +} + +// Logr returns the `Logr` instance that created this `Logger`. +func (logger Logger) Logr() *Logr { + return logger.logr +} + +// WithField creates a new `Logger` with any existing fields +// plus the new one. +func (logger Logger) WithField(key string, value interface{}) Logger { + return logger.WithFields(Fields{key: value}) +} + +// WithFields creates a new `Logger` with any existing fields +// plus the new ones. +func (logger Logger) WithFields(fields Fields) Logger { + l := Logger{logr: logger.logr} + // if parent has no fields then avoid creating a new map. + oldLen := len(logger.fields) + if oldLen == 0 { + l.fields = fields + return l + } + + l.fields = make(Fields, len(fields)+oldLen) + for k, v := range logger.fields { + l.fields[k] = v + } + for k, v := range fields { + l.fields[k] = v + } + return l +} + +// Log checks that the level matches one or more targets, and +// if so, generates a log record that is added to the Logr queue. +// Arguments are handled in the manner of fmt.Print. +func (logger Logger) Log(lvl Level, args ...interface{}) { + status := logger.logr.IsLevelEnabled(lvl) + if status.Enabled { + rec := NewLogRec(lvl, logger, "", args, status.Stacktrace) + logger.logr.enqueue(rec) + } +} + +// Trace is a convenience method equivalent to `Log(TraceLevel, args...)`. +func (logger Logger) Trace(args ...interface{}) { + logger.Log(Trace, args...) +} + +// Debug is a convenience method equivalent to `Log(DebugLevel, args...)`. +func (logger Logger) Debug(args ...interface{}) { + logger.Log(Debug, args...) +} + +// Print ensures compatibility with std lib logger. +func (logger Logger) Print(args ...interface{}) { + logger.Info(args...) +} + +// Info is a convenience method equivalent to `Log(InfoLevel, args...)`. +func (logger Logger) Info(args ...interface{}) { + logger.Log(Info, args...) +} + +// Warn is a convenience method equivalent to `Log(WarnLevel, args...)`. +func (logger Logger) Warn(args ...interface{}) { + logger.Log(Warn, args...) +} + +// Error is a convenience method equivalent to `Log(ErrorLevel, args...)`. +func (logger Logger) Error(args ...interface{}) { + logger.Log(Error, args...) +} + +// Fatal is a convenience method equivalent to `Log(FatalLevel, args...)` +// followed by a call to os.Exit(1). +func (logger Logger) Fatal(args ...interface{}) { + logger.Log(Fatal, args...) + logger.logr.exit(1) +} + +// Panic is a convenience method equivalent to `Log(PanicLevel, args...)` +// followed by a call to panic(). +func (logger Logger) Panic(args ...interface{}) { + logger.Log(Panic, args...) + panic(fmt.Sprint(args...)) +} + +// +// Printf style +// + +// Logf checks that the level matches one or more targets, and +// if so, generates a log record that is added to the main +// queue (channel). Arguments are handled in the manner of fmt.Printf. +func (logger Logger) Logf(lvl Level, format string, args ...interface{}) { + status := logger.logr.IsLevelEnabled(lvl) + if status.Enabled { + rec := NewLogRec(lvl, logger, format, args, status.Stacktrace) + logger.logr.enqueue(rec) + } +} + +// Tracef is a convenience method equivalent to `Logf(TraceLevel, args...)`. +func (logger Logger) Tracef(format string, args ...interface{}) { + logger.Logf(Trace, format, args...) +} + +// Debugf is a convenience method equivalent to `Logf(DebugLevel, args...)`. +func (logger Logger) Debugf(format string, args ...interface{}) { + logger.Logf(Debug, format, args...) +} + +// Infof is a convenience method equivalent to `Logf(InfoLevel, args...)`. +func (logger Logger) Infof(format string, args ...interface{}) { + logger.Logf(Info, format, args...) +} + +// Printf ensures compatibility with std lib logger. +func (logger Logger) Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Warnf is a convenience method equivalent to `Logf(WarnLevel, args...)`. +func (logger Logger) Warnf(format string, args ...interface{}) { + logger.Logf(Warn, format, args...) +} + +// Errorf is a convenience method equivalent to `Logf(ErrorLevel, args...)`. +func (logger Logger) Errorf(format string, args ...interface{}) { + logger.Logf(Error, format, args...) +} + +// Fatalf is a convenience method equivalent to `Logf(FatalLevel, args...)` +// followed by a call to os.Exit(1). +func (logger Logger) Fatalf(format string, args ...interface{}) { + logger.Logf(Fatal, format, args...) + logger.logr.exit(1) +} + +// Panicf is a convenience method equivalent to `Logf(PanicLevel, args...)` +// followed by a call to panic(). +func (logger Logger) Panicf(format string, args ...interface{}) { + logger.Logf(Panic, format, args...) +} + +// +// Println style +// + +// Logln checks that the level matches one or more targets, and +// if so, generates a log record that is added to the main +// queue (channel). Arguments are handled in the manner of fmt.Println. +func (logger Logger) Logln(lvl Level, args ...interface{}) { + status := logger.logr.IsLevelEnabled(lvl) + if status.Enabled { + rec := NewLogRec(lvl, logger, "", args, status.Stacktrace) + rec.newline = true + logger.logr.enqueue(rec) + } +} + +// Traceln is a convenience method equivalent to `Logln(TraceLevel, args...)`. +func (logger Logger) Traceln(args ...interface{}) { + logger.Logln(Trace, args...) +} + +// Debugln is a convenience method equivalent to `Logln(DebugLevel, args...)`. +func (logger Logger) Debugln(args ...interface{}) { + logger.Logln(Debug, args...) +} + +// Infoln is a convenience method equivalent to `Logln(InfoLevel, args...)`. +func (logger Logger) Infoln(args ...interface{}) { + logger.Logln(Info, args...) +} + +// Println ensures compatibility with std lib logger. +func (logger Logger) Println(args ...interface{}) { + logger.Infoln(args...) +} + +// Warnln is a convenience method equivalent to `Logln(WarnLevel, args...)`. +func (logger Logger) Warnln(args ...interface{}) { + logger.Logln(Warn, args...) +} + +// Errorln is a convenience method equivalent to `Logln(ErrorLevel, args...)`. +func (logger Logger) Errorln(args ...interface{}) { + logger.Logln(Error, args...) +} + +// Fatalln is a convenience method equivalent to `Logln(FatalLevel, args...)` +// followed by a call to os.Exit(1). +func (logger Logger) Fatalln(args ...interface{}) { + logger.Logln(Fatal, args...) + logger.logr.exit(1) +} + +// Panicln is a convenience method equivalent to `Logln(PanicLevel, args...)` +// followed by a call to panic(). +func (logger Logger) Panicln(args ...interface{}) { + logger.Logln(Panic, args...) +} diff --git a/vendor/github.com/mattermost/logr/logr.go b/vendor/github.com/mattermost/logr/logr.go new file mode 100644 index 00000000..631366a5 --- /dev/null +++ b/vendor/github.com/mattermost/logr/logr.go @@ -0,0 +1,664 @@ +package logr + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/wiggin77/cfg" + "github.com/wiggin77/merror" +) + +// Logr maintains a list of log targets and accepts incoming +// log records. +type Logr struct { + tmux sync.RWMutex // target mutex + targets []Target + + mux sync.RWMutex + maxQueueSizeActual int + in chan *LogRec + done chan struct{} + once sync.Once + shutdown bool + lvlCache levelCache + + metricsInitOnce sync.Once + metricsCloseOnce sync.Once + metricsDone chan struct{} + metrics MetricsCollector + queueSizeGauge Gauge + loggedCounter Counter + errorCounter Counter + + bufferPool sync.Pool + + // MaxQueueSize is the maximum number of log records that can be queued. + // If exceeded, `OnQueueFull` is called which determines if the log + // record will be dropped or block until add is successful. + // If this is modified, it must be done before `Configure` or + // `AddTarget`. Defaults to DefaultMaxQueueSize. + MaxQueueSize int + + // OnLoggerError, when not nil, is called any time an internal + // logging error occurs. For example, this can happen when a + // target cannot connect to its data sink. + OnLoggerError func(error) + + // OnQueueFull, when not nil, is called on an attempt to add + // a log record to a full Logr queue. + // `MaxQueueSize` can be used to modify the maximum queue size. + // This function should return quickly, with a bool indicating whether + // the log record should be dropped (true) or block until the log record + // is successfully added (false). If nil then blocking (false) is assumed. + OnQueueFull func(rec *LogRec, maxQueueSize int) bool + + // OnTargetQueueFull, when not nil, is called on an attempt to add + // a log record to a full target queue provided the target supports reporting + // this condition. + // This function should return quickly, with a bool indicating whether + // the log record should be dropped (true) or block until the log record + // is successfully added (false). If nil then blocking (false) is assumed. + OnTargetQueueFull func(target Target, rec *LogRec, maxQueueSize int) bool + + // OnExit, when not nil, is called when a FatalXXX style log API is called. + // When nil, then the default behavior is to cleanly shut down this Logr and + // call `os.Exit(code)`. + OnExit func(code int) + + // OnPanic, when not nil, is called when a PanicXXX style log API is called. + // When nil, then the default behavior is to cleanly shut down this Logr and + // call `panic(err)`. + OnPanic func(err interface{}) + + // EnqueueTimeout is the amount of time a log record can take to be queued. + // This only applies to blocking enqueue which happen after `logr.OnQueueFull` + // is called and returns false. + EnqueueTimeout time.Duration + + // ShutdownTimeout is the amount of time `logr.Shutdown` can execute before + // timing out. + ShutdownTimeout time.Duration + + // FlushTimeout is the amount of time `logr.Flush` can execute before + // timing out. + FlushTimeout time.Duration + + // UseSyncMapLevelCache can be set to true before the first target is added + // when high concurrency (e.g. >32 cores) is expected. This may improve + // performance with large numbers of cores - benchmark for your use case. + UseSyncMapLevelCache bool + + // MaxPooledFormatBuffer determines the maximum size of a buffer that can be + // pooled. To reduce allocations, the buffers needed during formatting (etc) + // are pooled. A very large log item will grow a buffer that could stay in + // memory indefinitely. This settings lets you control how big a pooled buffer + // can be - anything larger will be garbage collected after use. + // Defaults to 1MB. + MaxPooledBuffer int + + // DisableBufferPool when true disables the buffer pool. See MaxPooledBuffer. + DisableBufferPool bool + + // MetricsUpdateFreqMillis determines how often polled metrics are updated + // when metrics are enabled. + MetricsUpdateFreqMillis int64 +} + +// Configure adds/removes targets via the supplied `Config`. +func (logr *Logr) Configure(config *cfg.Config) error { + // TODO + return fmt.Errorf("not implemented yet") +} + +func (logr *Logr) ensureInit() { + logr.once.Do(func() { + defer func() { + go logr.start() + }() + + logr.mux.Lock() + defer logr.mux.Unlock() + + logr.maxQueueSizeActual = logr.MaxQueueSize + if logr.maxQueueSizeActual == 0 { + logr.maxQueueSizeActual = DefaultMaxQueueSize + } + + if logr.maxQueueSizeActual < 0 { + logr.maxQueueSizeActual = 0 + } + + logr.in = make(chan *LogRec, logr.maxQueueSizeActual) + logr.done = make(chan struct{}) + + if logr.UseSyncMapLevelCache { + logr.lvlCache = &syncMapLevelCache{} + } else { + logr.lvlCache = &arrayLevelCache{} + } + + if logr.MaxPooledBuffer == 0 { + logr.MaxPooledBuffer = DefaultMaxPooledBuffer + } + logr.bufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + logr.lvlCache.setup() + }) +} + +// AddTarget adds one or more targets to the logger which will receive +// log records for outputting. +func (logr *Logr) AddTarget(targets ...Target) error { + if logr.IsShutdown() { + return fmt.Errorf("AddTarget called after Logr shut down") + } + + logr.ensureInit() + metrics := logr.getMetricsCollector() + defer logr.ResetLevelCache() // call this after tmux is released + + logr.tmux.Lock() + defer logr.tmux.Unlock() + + errs := merror.New() + for _, t := range targets { + if t == nil { + continue + } + + logr.targets = append(logr.targets, t) + if metrics != nil { + if tm, ok := t.(TargetWithMetrics); ok { + if err := tm.EnableMetrics(metrics, logr.MetricsUpdateFreqMillis); err != nil { + errs.Append(err) + } + } + } + } + return errs.ErrorOrNil() +} + +// NewLogger creates a Logger using defaults. A `Logger` is light-weight +// enough to create on-demand, but typically one or more Loggers are +// created and re-used. +func (logr *Logr) NewLogger() Logger { + logger := Logger{logr: logr} + return logger +} + +var levelStatusDisabled = LevelStatus{} + +// IsLevelEnabled returns true if at least one target has the specified +// level enabled. The result is cached so that subsequent checks are fast. +func (logr *Logr) IsLevelEnabled(lvl Level) LevelStatus { + status, ok := logr.isLevelEnabledFromCache(lvl) + if ok { + return status + } + + // Check each target. + logr.tmux.RLock() + for _, t := range logr.targets { + e, s := t.IsLevelEnabled(lvl) + if e { + status.Enabled = true + if s { + status.Stacktrace = true + break // if both enabled then no sense checking more targets + } + } + } + logr.tmux.RUnlock() + + // Cache and return the result. + if err := logr.updateLevelCache(lvl.ID, status); err != nil { + logr.ReportError(err) + return LevelStatus{} + } + return status +} + +func (logr *Logr) isLevelEnabledFromCache(lvl Level) (LevelStatus, bool) { + logr.mux.RLock() + defer logr.mux.RUnlock() + + // Don't accept new log records after shutdown. + if logr.shutdown { + return levelStatusDisabled, true + } + + // Check cache. lvlCache may still be nil if no targets added. + if logr.lvlCache == nil { + return levelStatusDisabled, true + } + status, ok := logr.lvlCache.get(lvl.ID) + if ok { + return status, true + } + return LevelStatus{}, false +} + +func (logr *Logr) updateLevelCache(id LevelID, status LevelStatus) error { + logr.mux.RLock() + defer logr.mux.RUnlock() + if logr.lvlCache != nil { + return logr.lvlCache.put(id, status) + } + return nil +} + +// HasTargets returns true only if at least one target exists within the Logr. +func (logr *Logr) HasTargets() bool { + logr.tmux.RLock() + defer logr.tmux.RUnlock() + return len(logr.targets) > 0 +} + +// TargetInfo provides name and type for a Target. +type TargetInfo struct { + Name string + Type string +} + +// TargetInfos enumerates all the targets added to this Logr. +// The resulting slice represents a snapshot at time of calling. +func (logr *Logr) TargetInfos() []TargetInfo { + logr.tmux.RLock() + defer logr.tmux.RUnlock() + + infos := make([]TargetInfo, 0) + + for _, t := range logr.targets { + inf := TargetInfo{ + Name: fmt.Sprintf("%v", t), + Type: fmt.Sprintf("%T", t), + } + infos = append(infos, inf) + } + return infos +} + +// RemoveTargets safely removes one or more targets based on the filtering method. +// f should return true to delete the target, false to keep it. +// When removing a target, best effort is made to write any queued log records before +// closing, with cxt determining how much time can be spent in total. +// Note, keep the timeout short since this method blocks certain logging operations. +func (logr *Logr) RemoveTargets(cxt context.Context, f func(ti TargetInfo) bool) error { + var removed bool + defer func() { + if removed { + // call this after tmux is released since + // it will lock mux and we don't want to + // introduce possible deadlock. + logr.ResetLevelCache() + } + }() + + errs := merror.New() + + logr.tmux.Lock() + defer logr.tmux.Unlock() + + cp := make([]Target, 0) + + for _, t := range logr.targets { + inf := TargetInfo{ + Name: fmt.Sprintf("%v", t), + Type: fmt.Sprintf("%T", t), + } + if f(inf) { + if err := t.Shutdown(cxt); err != nil { + errs.Append(err) + } + removed = true + } else { + cp = append(cp, t) + } + } + logr.targets = cp + return errs.ErrorOrNil() +} + +// ResetLevelCache resets the cached results of `IsLevelEnabled`. This is +// called any time a Target is added or a target's level is changed. +func (logr *Logr) ResetLevelCache() { + // Write lock so that new cache entries cannot be stored while we + // clear the cache. + logr.mux.Lock() + defer logr.mux.Unlock() + logr.resetLevelCache() +} + +// resetLevelCache empties the level cache without locking. +// mux.Lock must be held before calling this function. +func (logr *Logr) resetLevelCache() { + // lvlCache may still be nil if no targets added. + if logr.lvlCache != nil { + logr.lvlCache.clear() + } +} + +// enqueue adds a log record to the logr queue. If the queue is full then +// this function either blocks or the log record is dropped, depending on +// the result of calling `OnQueueFull`. +func (logr *Logr) enqueue(rec *LogRec) { + if logr.in == nil { + logr.ReportError(fmt.Errorf("AddTarget or Configure must be called before enqueue")) + } + + select { + case logr.in <- rec: + default: + if logr.OnQueueFull != nil && logr.OnQueueFull(rec, logr.maxQueueSizeActual) { + return // drop the record + } + select { + case <-time.After(logr.enqueueTimeout()): + logr.ReportError(fmt.Errorf("enqueue timed out for log rec [%v]", rec)) + case logr.in <- rec: // block until success or timeout + } + } +} + +// exit is called by one of the FatalXXX style APIS. If `logr.OnExit` is not nil +// then that method is called, otherwise the default behavior is to shut down this +// Logr cleanly then call `os.Exit(code)`. +func (logr *Logr) exit(code int) { + if logr.OnExit != nil { + logr.OnExit(code) + return + } + + if err := logr.Shutdown(); err != nil { + logr.ReportError(err) + } + os.Exit(code) +} + +// panic is called by one of the PanicXXX style APIS. If `logr.OnPanic` is not nil +// then that method is called, otherwise the default behavior is to shut down this +// Logr cleanly then call `panic(err)`. +func (logr *Logr) panic(err interface{}) { + if logr.OnPanic != nil { + logr.OnPanic(err) + return + } + + if err := logr.Shutdown(); err != nil { + logr.ReportError(err) + } + panic(err) +} + +// Flush blocks while flushing the logr queue and all target queues, by +// writing existing log records to valid targets. +// Any attempts to add new log records will block until flush is complete. +// `logr.FlushTimeout` determines how long flush can execute before +// timing out. Use `IsTimeoutError` to determine if the returned error is +// due to a timeout. +func (logr *Logr) Flush() error { + ctx, cancel := context.WithTimeout(context.Background(), logr.flushTimeout()) + defer cancel() + return logr.FlushWithTimeout(ctx) +} + +// Flush blocks while flushing the logr queue and all target queues, by +// writing existing log records to valid targets. +// Any attempts to add new log records will block until flush is complete. +// Use `IsTimeoutError` to determine if the returned error is +// due to a timeout. +func (logr *Logr) FlushWithTimeout(ctx context.Context) error { + if !logr.HasTargets() { + return nil + } + + if logr.IsShutdown() { + return errors.New("Flush called on shut down Logr") + } + + rec := newFlushLogRec(logr.NewLogger()) + logr.enqueue(rec) + + select { + case <-ctx.Done(): + return newTimeoutError("logr queue shutdown timeout") + case <-rec.flush: + } + return nil +} + +// IsShutdown returns true if this Logr instance has been shut down. +// No further log records can be enqueued and no targets added after +// shutdown. +func (logr *Logr) IsShutdown() bool { + logr.mux.Lock() + defer logr.mux.Unlock() + return logr.shutdown +} + +// Shutdown cleanly stops the logging engine after making best efforts +// to flush all targets. Call this function right before application +// exit - logr cannot be restarted once shut down. +// `logr.ShutdownTimeout` determines how long shutdown can execute before +// timing out. Use `IsTimeoutError` to determine if the returned error is +// due to a timeout. +func (logr *Logr) Shutdown() error { + ctx, cancel := context.WithTimeout(context.Background(), logr.shutdownTimeout()) + defer cancel() + return logr.ShutdownWithTimeout(ctx) +} + +// Shutdown cleanly stops the logging engine after making best efforts +// to flush all targets. Call this function right before application +// exit - logr cannot be restarted once shut down. +// Use `IsTimeoutError` to determine if the returned error is due to a +// timeout. +func (logr *Logr) ShutdownWithTimeout(ctx context.Context) error { + logr.mux.Lock() + if logr.shutdown { + logr.mux.Unlock() + return errors.New("Shutdown called again after shut down") + } + logr.shutdown = true + logr.resetLevelCache() + logr.mux.Unlock() + + logr.metricsCloseOnce.Do(func() { + if logr.metricsDone != nil { + close(logr.metricsDone) + } + }) + + errs := merror.New() + + // close the incoming channel and wait for read loop to exit. + if logr.in != nil { + close(logr.in) + select { + case <-ctx.Done(): + errs.Append(newTimeoutError("logr queue shutdown timeout")) + case <-logr.done: + } + } + + // logr.in channel should now be drained to targets and no more log records + // can be added. + logr.tmux.RLock() + defer logr.tmux.RUnlock() + for _, t := range logr.targets { + err := t.Shutdown(ctx) + if err != nil { + errs.Append(err) + } + } + return errs.ErrorOrNil() +} + +// ReportError is used to notify the host application of any internal logging errors. +// If `OnLoggerError` is not nil, it is called with the error, otherwise the error is +// output to `os.Stderr`. +func (logr *Logr) ReportError(err interface{}) { + logr.incErrorCounter() + + if logr.OnLoggerError == nil { + fmt.Fprintln(os.Stderr, err) + return + } + logr.OnLoggerError(fmt.Errorf("%v", err)) +} + +// BorrowBuffer borrows a buffer from the pool. Release the buffer to reduce garbage collection. +func (logr *Logr) BorrowBuffer() *bytes.Buffer { + if logr.DisableBufferPool { + return &bytes.Buffer{} + } + return logr.bufferPool.Get().(*bytes.Buffer) +} + +// ReleaseBuffer returns a buffer to the pool to reduce garbage collection. The buffer is only +// retained if less than MaxPooledBuffer. +func (logr *Logr) ReleaseBuffer(buf *bytes.Buffer) { + if !logr.DisableBufferPool && buf.Cap() < logr.MaxPooledBuffer { + buf.Reset() + logr.bufferPool.Put(buf) + } +} + +// enqueueTimeout returns amount of time a log record can take to be queued. +// This only applies to blocking enqueue which happen after `logr.OnQueueFull` is called +// and returns false. +func (logr *Logr) enqueueTimeout() time.Duration { + if logr.EnqueueTimeout == 0 { + return DefaultEnqueueTimeout + } + return logr.EnqueueTimeout +} + +// shutdownTimeout returns the timeout duration for `logr.Shutdown`. +func (logr *Logr) shutdownTimeout() time.Duration { + if logr.ShutdownTimeout == 0 { + return DefaultShutdownTimeout + } + return logr.ShutdownTimeout +} + +// flushTimeout returns the timeout duration for `logr.Flush`. +func (logr *Logr) flushTimeout() time.Duration { + if logr.FlushTimeout == 0 { + return DefaultFlushTimeout + } + return logr.FlushTimeout +} + +// start selects on incoming log records until done channel signals. +// Incoming log records are fanned out to all log targets. +func (logr *Logr) start() { + defer func() { + if r := recover(); r != nil { + logr.ReportError(r) + go logr.start() + } + }() + + for rec := range logr.in { + if rec.flush != nil { + logr.flush(rec.flush) + } else { + rec.prep() + logr.fanout(rec) + } + } + close(logr.done) +} + +// startMetricsUpdater updates the metrics for any polled values every `MetricsUpdateFreqSecs` seconds until +// logr is closed. +func (logr *Logr) startMetricsUpdater() { + for { + updateFreq := logr.getMetricsUpdateFreqMillis() + if updateFreq == 0 { + updateFreq = DefMetricsUpdateFreqMillis + } + if updateFreq < 250 { + updateFreq = 250 // don't peg the CPU + } + + select { + case <-logr.metricsDone: + return + case <-time.After(time.Duration(updateFreq) * time.Millisecond): + logr.setQueueSizeGauge(float64(len(logr.in))) + } + } +} + +func (logr *Logr) getMetricsUpdateFreqMillis() int64 { + logr.mux.RLock() + defer logr.mux.RUnlock() + return logr.MetricsUpdateFreqMillis +} + +// fanout pushes a LogRec to all targets. +func (logr *Logr) fanout(rec *LogRec) { + var target Target + defer func() { + if r := recover(); r != nil { + logr.ReportError(fmt.Errorf("fanout failed for target %s, %v", target, r)) + } + }() + + var logged bool + defer func() { + if logged { + logr.incLoggedCounter() // call this after tmux is released + } + }() + + logr.tmux.RLock() + defer logr.tmux.RUnlock() + for _, target = range logr.targets { + if enabled, _ := target.IsLevelEnabled(rec.Level()); enabled { + target.Log(rec) + logged = true + } + } +} + +// flush drains the queue and notifies when done. +func (logr *Logr) flush(done chan<- struct{}) { + // first drain the logr queue. +loop: + for { + var rec *LogRec + select { + case rec = <-logr.in: + if rec.flush == nil { + rec.prep() + logr.fanout(rec) + } + default: + break loop + } + } + + logger := logr.NewLogger() + + // drain all the targets; block until finished. + logr.tmux.RLock() + defer logr.tmux.RUnlock() + for _, target := range logr.targets { + rec := newFlushLogRec(logger) + target.Log(rec) + <-rec.flush + } + done <- struct{}{} +} diff --git a/vendor/github.com/mattermost/logr/logrec.go b/vendor/github.com/mattermost/logr/logrec.go new file mode 100644 index 00000000..9428aaec --- /dev/null +++ b/vendor/github.com/mattermost/logr/logrec.go @@ -0,0 +1,189 @@ +package logr + +import ( + "fmt" + "runtime" + "strings" + "sync" + "time" +) + +var ( + logrPkg string +) + +func init() { + // Calc current package name + pcs := make([]uintptr, 2) + _ = runtime.Callers(0, pcs) + tmp := runtime.FuncForPC(pcs[1]).Name() + logrPkg = getPackageName(tmp) +} + +// LogRec collects raw, unformatted data to be logged. +// TODO: pool these? how to reliably know when targets are done with them? Copy for each target? +type LogRec struct { + mux sync.RWMutex + time time.Time + + level Level + logger Logger + + template string + newline bool + args []interface{} + + stackPC []uintptr + stackCount int + + // flushes Logr and target queues when not nil. + flush chan struct{} + + // remaining fields calculated by `prep` + msg string + frames []runtime.Frame +} + +// NewLogRec creates a new LogRec with the current time and optional stack trace. +func NewLogRec(lvl Level, logger Logger, template string, args []interface{}, incStacktrace bool) *LogRec { + rec := &LogRec{time: time.Now(), logger: logger, level: lvl, template: template, args: args} + if incStacktrace { + rec.stackPC = make([]uintptr, DefaultMaxStackFrames) + rec.stackCount = runtime.Callers(2, rec.stackPC) + } + return rec +} + +// newFlushLogRec creates a LogRec that flushes the Logr queue and +// any target queues that support flushing. +func newFlushLogRec(logger Logger) *LogRec { + return &LogRec{logger: logger, flush: make(chan struct{})} +} + +// prep resolves all args and field values to strings, and +// resolves stack trace to frames. +func (rec *LogRec) prep() { + rec.mux.Lock() + defer rec.mux.Unlock() + + // resolve args + if rec.template == "" { + if rec.newline { + rec.msg = fmt.Sprintln(rec.args...) + } else { + rec.msg = fmt.Sprint(rec.args...) + } + } else { + rec.msg = fmt.Sprintf(rec.template, rec.args...) + } + + // resolve stack trace + if rec.stackCount > 0 { + frames := runtime.CallersFrames(rec.stackPC[:rec.stackCount]) + for { + f, more := frames.Next() + rec.frames = append(rec.frames, f) + if !more { + break + } + } + + // remove leading logr package entries. + var start int + for i, frame := range rec.frames { + pkg := getPackageName(frame.Function) + if pkg != "" && pkg != logrPkg { + start = i + break + } + } + rec.frames = rec.frames[start:] + } +} + +// WithTime returns a shallow copy of the log record while replacing +// the time. This can be used by targets and formatters to adjust +// the time, or take ownership of the log record. +func (rec *LogRec) WithTime(time time.Time) *LogRec { + rec.mux.RLock() + defer rec.mux.RUnlock() + + return &LogRec{ + time: time, + level: rec.level, + logger: rec.logger, + template: rec.template, + newline: rec.newline, + args: rec.args, + msg: rec.msg, + stackPC: rec.stackPC, + stackCount: rec.stackCount, + frames: rec.frames, + } +} + +// Logger returns the `Logger` that created this `LogRec`. +func (rec *LogRec) Logger() Logger { + return rec.logger +} + +// Time returns this log record's time stamp. +func (rec *LogRec) Time() time.Time { + // no locking needed as this field is not mutated. + return rec.time +} + +// Level returns this log record's Level. +func (rec *LogRec) Level() Level { + // no locking needed as this field is not mutated. + return rec.level +} + +// Fields returns this log record's Fields. +func (rec *LogRec) Fields() Fields { + // no locking needed as this field is not mutated. + return rec.logger.fields +} + +// Msg returns this log record's message text. +func (rec *LogRec) Msg() string { + rec.mux.RLock() + defer rec.mux.RUnlock() + return rec.msg +} + +// StackFrames returns this log record's stack frames or +// nil if no stack trace was required. +func (rec *LogRec) StackFrames() []runtime.Frame { + rec.mux.RLock() + defer rec.mux.RUnlock() + return rec.frames +} + +// String returns a string representation of this log record. +func (rec *LogRec) String() string { + if rec.flush != nil { + return "[flusher]" + } + + f := &DefaultFormatter{} + buf := rec.logger.logr.BorrowBuffer() + defer rec.logger.logr.ReleaseBuffer(buf) + buf, _ = f.Format(rec, true, buf) + return strings.TrimSpace(buf.String()) +} + +// getPackageName reduces a fully qualified function name to the package name +// By sirupsen: https://github.com/sirupsen/logrus/blob/master/entry.go +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + return f +} diff --git a/vendor/github.com/mattermost/logr/metrics.go b/vendor/github.com/mattermost/logr/metrics.go new file mode 100644 index 00000000..24fe22b6 --- /dev/null +++ b/vendor/github.com/mattermost/logr/metrics.go @@ -0,0 +1,117 @@ +package logr + +import ( + "errors" + + "github.com/wiggin77/merror" +) + +const ( + DefMetricsUpdateFreqMillis = 15000 // 15 seconds +) + +// Counter is a simple metrics sink that can only increment a value. +// Implementations are external to Logr and provided via `MetricsCollector`. +type Counter interface { + // Inc increments the counter by 1. Use Add to increment it by arbitrary non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < 0. + Add(float64) +} + +// Gauge is a simple metrics sink that can receive values and increase or decrease. +// Implementations are external to Logr and provided via `MetricsCollector`. +type Gauge interface { + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Add adds the given value to the Gauge. (The value can be negative, resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be negative, resulting in an increase of the Gauge.) + Sub(float64) +} + +// MetricsCollector provides a way for users of this Logr package to have metrics pushed +// in an efficient way to any backend, e.g. Prometheus. +// For each target added to Logr, the supplied MetricsCollector will provide a Gauge +// and Counters that will be called frequently as logging occurs. +type MetricsCollector interface { + // QueueSizeGauge returns a Gauge that will be updated by the named target. + QueueSizeGauge(target string) (Gauge, error) + // LoggedCounter returns a Counter that will be incremented by the named target. + LoggedCounter(target string) (Counter, error) + // ErrorCounter returns a Counter that will be incremented by the named target. + ErrorCounter(target string) (Counter, error) + // DroppedCounter returns a Counter that will be incremented by the named target. + DroppedCounter(target string) (Counter, error) + // BlockedCounter returns a Counter that will be incremented by the named target. + BlockedCounter(target string) (Counter, error) +} + +// TargetWithMetrics is a target that provides metrics. +type TargetWithMetrics interface { + EnableMetrics(collector MetricsCollector, updateFreqMillis int64) error +} + +func (logr *Logr) getMetricsCollector() MetricsCollector { + logr.mux.RLock() + defer logr.mux.RUnlock() + return logr.metrics +} + +// SetMetricsCollector enables metrics collection by supplying a MetricsCollector. +// The MetricsCollector provides counters and gauges that are updated by log targets. +func (logr *Logr) SetMetricsCollector(collector MetricsCollector) error { + if collector == nil { + return errors.New("collector cannot be nil") + } + + logr.mux.Lock() + logr.metrics = collector + logr.queueSizeGauge, _ = collector.QueueSizeGauge("_logr") + logr.loggedCounter, _ = collector.LoggedCounter("_logr") + logr.errorCounter, _ = collector.ErrorCounter("_logr") + logr.mux.Unlock() + + logr.metricsInitOnce.Do(func() { + logr.metricsDone = make(chan struct{}) + go logr.startMetricsUpdater() + }) + + merr := merror.New() + + logr.tmux.RLock() + defer logr.tmux.RUnlock() + for _, target := range logr.targets { + if tm, ok := target.(TargetWithMetrics); ok { + if err := tm.EnableMetrics(collector, logr.MetricsUpdateFreqMillis); err != nil { + merr.Append(err) + } + } + + } + return merr.ErrorOrNil() +} + +func (logr *Logr) setQueueSizeGauge(val float64) { + logr.mux.RLock() + defer logr.mux.RUnlock() + if logr.queueSizeGauge != nil { + logr.queueSizeGauge.Set(val) + } +} + +func (logr *Logr) incLoggedCounter() { + logr.mux.RLock() + defer logr.mux.RUnlock() + if logr.loggedCounter != nil { + logr.loggedCounter.Inc() + } +} + +func (logr *Logr) incErrorCounter() { + logr.mux.RLock() + defer logr.mux.RUnlock() + if logr.errorCounter != nil { + logr.errorCounter.Inc() + } +} diff --git a/vendor/github.com/mattermost/logr/target.go b/vendor/github.com/mattermost/logr/target.go new file mode 100644 index 00000000..f8e7bf75 --- /dev/null +++ b/vendor/github.com/mattermost/logr/target.go @@ -0,0 +1,299 @@ +package logr + +import ( + "context" + "fmt" + "os" + "sync" + "time" +) + +// Target represents a destination for log records such as file, +// database, TCP socket, etc. +type Target interface { + // SetName provides an optional name for the target. + SetName(name string) + + // IsLevelEnabled returns true if this target should emit + // logs for the specified level. Also determines if + // a stack trace is required. + IsLevelEnabled(Level) (enabled bool, stacktrace bool) + + // Formatter returns the Formatter associated with this Target. + Formatter() Formatter + + // Log outputs the log record to this target's destination. + Log(rec *LogRec) + + // Shutdown makes best effort to flush target queue and + // frees/closes all resources. + Shutdown(ctx context.Context) error +} + +// RecordWriter can convert a LogRecord to bytes and output to some data sink. +type RecordWriter interface { + Write(rec *LogRec) error +} + +// Basic provides the basic functionality of a Target that can be used +// to more easily compose your own Targets. To use, just embed Basic +// in your target type, implement `RecordWriter`, and call `(*Basic).Start`. +type Basic struct { + target Target + + filter Filter + formatter Formatter + + in chan *LogRec + done chan struct{} + w RecordWriter + + mux sync.RWMutex + name string + + metrics bool + queueSizeGauge Gauge + loggedCounter Counter + errorCounter Counter + droppedCounter Counter + blockedCounter Counter + + metricsUpdateFreqMillis int64 +} + +// Start initializes this target helper and starts accepting log records for processing. +func (b *Basic) Start(target Target, rw RecordWriter, filter Filter, formatter Formatter, maxQueued int) { + if filter == nil { + filter = &StdFilter{Lvl: Fatal} + } + if formatter == nil { + formatter = &DefaultFormatter{} + } + + b.target = target + b.filter = filter + b.formatter = formatter + b.in = make(chan *LogRec, maxQueued) + b.done = make(chan struct{}, 1) + b.w = rw + go b.start() + + if b.hasMetrics() { + go b.startMetricsUpdater() + } +} + +func (b *Basic) SetName(name string) { + b.mux.Lock() + defer b.mux.Unlock() + b.name = name +} + +// IsLevelEnabled returns true if this target should emit +// logs for the specified level. Also determines if +// a stack trace is required. +func (b *Basic) IsLevelEnabled(lvl Level) (enabled bool, stacktrace bool) { + return b.filter.IsEnabled(lvl), b.filter.IsStacktraceEnabled(lvl) +} + +// Formatter returns the Formatter associated with this Target. +func (b *Basic) Formatter() Formatter { + return b.formatter +} + +// Shutdown stops processing log records after making best +// effort to flush queue. +func (b *Basic) Shutdown(ctx context.Context) error { + // close the incoming channel and wait for read loop to exit. + close(b.in) + select { + case <-ctx.Done(): + case <-b.done: + } + + // b.in channel should now be drained. + return nil +} + +// Log outputs the log record to this targets destination. +func (b *Basic) Log(rec *LogRec) { + lgr := rec.Logger().Logr() + select { + case b.in <- rec: + default: + handler := lgr.OnTargetQueueFull + if handler != nil && handler(b.target, rec, cap(b.in)) { + b.incDroppedCounter() + return // drop the record + } + b.incBlockedCounter() + + select { + case <-time.After(lgr.enqueueTimeout()): + lgr.ReportError(fmt.Errorf("target enqueue timeout for log rec [%v]", rec)) + case b.in <- rec: // block until success or timeout + } + } +} + +// Metrics enables metrics collection using the provided MetricsCollector. +func (b *Basic) EnableMetrics(collector MetricsCollector, updateFreqMillis int64) error { + name := fmt.Sprintf("%v", b) + + b.mux.Lock() + defer b.mux.Unlock() + + b.metrics = true + b.metricsUpdateFreqMillis = updateFreqMillis + + var err error + + if b.queueSizeGauge, err = collector.QueueSizeGauge(name); err != nil { + return err + } + if b.loggedCounter, err = collector.LoggedCounter(name); err != nil { + return err + } + if b.errorCounter, err = collector.ErrorCounter(name); err != nil { + return err + } + if b.droppedCounter, err = collector.DroppedCounter(name); err != nil { + return err + } + if b.blockedCounter, err = collector.BlockedCounter(name); err != nil { + return err + } + return nil +} + +func (b *Basic) hasMetrics() bool { + b.mux.RLock() + defer b.mux.RUnlock() + return b.metrics +} + +func (b *Basic) setQueueSizeGauge(val float64) { + b.mux.RLock() + defer b.mux.RUnlock() + if b.queueSizeGauge != nil { + b.queueSizeGauge.Set(val) + } +} + +func (b *Basic) incLoggedCounter() { + b.mux.RLock() + defer b.mux.RUnlock() + if b.loggedCounter != nil { + b.loggedCounter.Inc() + } +} + +func (b *Basic) incErrorCounter() { + b.mux.RLock() + defer b.mux.RUnlock() + if b.errorCounter != nil { + b.errorCounter.Inc() + } +} + +func (b *Basic) incDroppedCounter() { + b.mux.RLock() + defer b.mux.RUnlock() + if b.droppedCounter != nil { + b.droppedCounter.Inc() + } +} + +func (b *Basic) incBlockedCounter() { + b.mux.RLock() + defer b.mux.RUnlock() + if b.blockedCounter != nil { + b.blockedCounter.Inc() + } +} + +// String returns a name for this target. Use `SetName` to specify a name. +func (b *Basic) String() string { + b.mux.RLock() + defer b.mux.RUnlock() + + if b.name != "" { + return b.name + } + return fmt.Sprintf("%T", b.target) +} + +// Start accepts log records via In channel and writes to the +// supplied writer, until Done channel signaled. +func (b *Basic) start() { + defer func() { + if r := recover(); r != nil { + fmt.Fprintln(os.Stderr, "Basic.start -- ", r) + go b.start() + } + }() + + for rec := range b.in { + if rec.flush != nil { + b.flush(rec.flush) + } else { + err := b.w.Write(rec) + if err != nil { + b.incErrorCounter() + rec.Logger().Logr().ReportError(err) + } else { + b.incLoggedCounter() + } + } + } + close(b.done) +} + +// startMetricsUpdater updates the metrics for any polled values every `MetricsUpdateFreqSecs` seconds until +// target is closed. +func (b *Basic) startMetricsUpdater() { + for { + updateFreq := b.getMetricsUpdateFreqMillis() + if updateFreq == 0 { + updateFreq = DefMetricsUpdateFreqMillis + } + if updateFreq < 250 { + updateFreq = 250 // don't peg the CPU + } + + select { + case <-b.done: + return + case <-time.After(time.Duration(updateFreq) * time.Millisecond): + b.setQueueSizeGauge(float64(len(b.in))) + } + } +} + +func (b *Basic) getMetricsUpdateFreqMillis() int64 { + b.mux.RLock() + defer b.mux.RUnlock() + return b.metricsUpdateFreqMillis +} + +// flush drains the queue and notifies when done. +func (b *Basic) flush(done chan<- struct{}) { + for { + var rec *LogRec + var err error + select { + case rec = <-b.in: + // ignore any redundant flush records. + if rec.flush == nil { + err = b.w.Write(rec) + if err != nil { + b.incErrorCounter() + rec.Logger().Logr().ReportError(err) + } + } + default: + done <- struct{}{} + return + } + } +} diff --git a/vendor/github.com/mattermost/logr/target/file.go b/vendor/github.com/mattermost/logr/target/file.go new file mode 100644 index 00000000..0fd50768 --- /dev/null +++ b/vendor/github.com/mattermost/logr/target/file.go @@ -0,0 +1,87 @@ +package target + +import ( + "context" + "io" + + "github.com/mattermost/logr" + "github.com/wiggin77/merror" + "gopkg.in/natefinch/lumberjack.v2" +) + +type FileOptions struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool +} + +// File outputs log records to a file which can be log rotated based on size or age. +// Uses `https://github.com/natefinch/lumberjack` for rotation. +type File struct { + logr.Basic + out io.WriteCloser +} + +// NewFileTarget creates a target capable of outputting log records to a rotated file. +func NewFileTarget(filter logr.Filter, formatter logr.Formatter, opts FileOptions, maxQueue int) *File { + lumber := &lumberjack.Logger{ + Filename: opts.Filename, + MaxSize: opts.MaxSize, + MaxBackups: opts.MaxBackups, + MaxAge: opts.MaxAge, + Compress: opts.Compress, + } + f := &File{out: lumber} + f.Basic.Start(f, f, filter, formatter, maxQueue) + return f +} + +// Write converts the log record to bytes, via the Formatter, +// and outputs to a file. +func (f *File) Write(rec *logr.LogRec) error { + _, stacktrace := f.IsLevelEnabled(rec.Level()) + + buf := rec.Logger().Logr().BorrowBuffer() + defer rec.Logger().Logr().ReleaseBuffer(buf) + + buf, err := f.Formatter().Format(rec, stacktrace, buf) + if err != nil { + return err + } + _, err = f.out.Write(buf.Bytes()) + return err +} + +// Shutdown flushes any remaining log records and closes the file. +func (f *File) Shutdown(ctx context.Context) error { + errs := merror.New() + + err := f.Basic.Shutdown(ctx) + errs.Append(err) + + err = f.out.Close() + errs.Append(err) + + return errs.ErrorOrNil() +} diff --git a/vendor/github.com/mattermost/logr/target/syslog.go b/vendor/github.com/mattermost/logr/target/syslog.go new file mode 100644 index 00000000..1d2013b6 --- /dev/null +++ b/vendor/github.com/mattermost/logr/target/syslog.go @@ -0,0 +1,89 @@ +// +build !windows,!nacl,!plan9 + +package target + +import ( + "context" + "fmt" + "log/syslog" + + "github.com/mattermost/logr" + "github.com/wiggin77/merror" +) + +// Syslog outputs log records to local or remote syslog. +type Syslog struct { + logr.Basic + w *syslog.Writer +} + +// SyslogParams provides parameters for dialing a syslog daemon. +type SyslogParams struct { + Network string + Raddr string + Priority syslog.Priority + Tag string +} + +// NewSyslogTarget creates a target capable of outputting log records to remote or local syslog. +func NewSyslogTarget(filter logr.Filter, formatter logr.Formatter, params *SyslogParams, maxQueue int) (*Syslog, error) { + writer, err := syslog.Dial(params.Network, params.Raddr, params.Priority, params.Tag) + if err != nil { + return nil, err + } + + s := &Syslog{w: writer} + s.Basic.Start(s, s, filter, formatter, maxQueue) + + return s, nil +} + +// Shutdown stops processing log records after making best +// effort to flush queue. +func (s *Syslog) Shutdown(ctx context.Context) error { + errs := merror.New() + + err := s.Basic.Shutdown(ctx) + errs.Append(err) + + err = s.w.Close() + errs.Append(err) + + return errs.ErrorOrNil() +} + +// Write converts the log record to bytes, via the Formatter, +// and outputs to syslog. +func (s *Syslog) Write(rec *logr.LogRec) error { + _, stacktrace := s.IsLevelEnabled(rec.Level()) + + buf := rec.Logger().Logr().BorrowBuffer() + defer rec.Logger().Logr().ReleaseBuffer(buf) + + buf, err := s.Formatter().Format(rec, stacktrace, buf) + if err != nil { + return err + } + txt := buf.String() + + switch rec.Level() { + case logr.Panic, logr.Fatal: + err = s.w.Crit(txt) + case logr.Error: + err = s.w.Err(txt) + case logr.Warn: + err = s.w.Warning(txt) + case logr.Debug, logr.Trace: + err = s.w.Debug(txt) + default: + // logr.Info plus all custom levels. + err = s.w.Info(txt) + } + + if err != nil { + reporter := rec.Logger().Logr().ReportError + reporter(fmt.Errorf("syslog write fail: %w", err)) + // syslog writer will try to reconnect. + } + return err +} diff --git a/vendor/github.com/mattermost/logr/target/writer.go b/vendor/github.com/mattermost/logr/target/writer.go new file mode 100644 index 00000000..2250da51 --- /dev/null +++ b/vendor/github.com/mattermost/logr/target/writer.go @@ -0,0 +1,40 @@ +package target + +import ( + "io" + "io/ioutil" + + "github.com/mattermost/logr" +) + +// Writer outputs log records to any `io.Writer`. +type Writer struct { + logr.Basic + out io.Writer +} + +// NewWriterTarget creates a target capable of outputting log records to an io.Writer. +func NewWriterTarget(filter logr.Filter, formatter logr.Formatter, out io.Writer, maxQueue int) *Writer { + if out == nil { + out = ioutil.Discard + } + w := &Writer{out: out} + w.Basic.Start(w, w, filter, formatter, maxQueue) + return w +} + +// Write converts the log record to bytes, via the Formatter, +// and outputs to the io.Writer. +func (w *Writer) Write(rec *logr.LogRec) error { + _, stacktrace := w.IsLevelEnabled(rec.Level()) + + buf := rec.Logger().Logr().BorrowBuffer() + defer rec.Logger().Logr().ReleaseBuffer(buf) + + buf, err := w.Formatter().Format(rec, stacktrace, buf) + if err != nil { + return err + } + _, err = w.out.Write(buf.Bytes()) + return err +} diff --git a/vendor/github.com/mattermost/logr/timeout.go b/vendor/github.com/mattermost/logr/timeout.go new file mode 100644 index 00000000..37737bcf --- /dev/null +++ b/vendor/github.com/mattermost/logr/timeout.go @@ -0,0 +1,34 @@ +package logr + +import "github.com/wiggin77/merror" + +// timeoutError is returned from functions that can timeout. +type timeoutError struct { + text string +} + +// newTimeoutError returns a TimeoutError. +func newTimeoutError(text string) timeoutError { + return timeoutError{text: text} +} + +// IsTimeoutError returns true if err is a TimeoutError. +func IsTimeoutError(err error) bool { + if _, ok := err.(timeoutError); ok { + return true + } + // if a multi-error, return true if any of the errors + // are TimeoutError + if merr, ok := err.(*merror.MError); ok { + for _, e := range merr.Errors() { + if IsTimeoutError(e) { + return true + } + } + } + return false +} + +func (err timeoutError) Error() string { + return err.text +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt index b40b5e58..8382687d 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt +++ b/vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt @@ -11,7 +11,7 @@ You may be licensed to use source code to create compiled versions not produced 1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or 2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com -You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/default.json, model/, +You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/default.json, i18n/, model/, plugin/ and all subdirectories thereof) under the Apache License v2.0. We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not diff --git a/vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt b/vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt index 0316f702..c067c12f 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt +++ b/vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt @@ -4132,3 +4132,145 @@ A caching, resizing image proxy written in Go of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS + +--- + +## oov/psd + +This product contains 'psd' by oov. + +A PSD/PSB file reader for go + +* HOMEPAGE: + * https://github.com/oov/psd + +* LICENSE: MIT + +MIT License + +Copyright (c) 2016 oov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--- + +## gopherjs + +This product contains 'gopherjs' by Richard Musiol. + +A Go code to javascript code compiler. + +* HOMEPAGE: + * https://github.com/gopherjs/gopherjs + +* LICENSE: + +Copyright (c) 2013 Richard Musiol. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--- + +## semver + +This product contains 'semver' by Masterminds. + +The semver package provides the ability to work with Semantic Versions in Go. + +* HOMEPAGE: + * https://github.com/Masterminds/semver + +* LICENSE: + +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +--- + +## Date Constraints + +This product contains 'dateconstraints' by Eli Yukelzon. + +Go library to validate a date against constraints + +* HOMEPAGE: + * https://github.com/reflog/dateconstraints + +* LICENSE: + +MIT License + +Copyright (c) 2020 Eli Yukelzon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go index f356eec7..1e409b19 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go @@ -4,9 +4,13 @@ package mlog import ( + "context" "encoding/json" + "errors" "fmt" "os" + + "github.com/mattermost/logr" ) // defaultLog manually encodes the log to STDERR, providing a basic, default logging implementation @@ -49,3 +53,43 @@ func defaultCriticalLog(msg string, fields ...Field) { // We map critical to error in zap, so be consistent. defaultLog("error", msg, fields...) } + +func defaultCustomLog(lvl LogLevel, msg string, fields ...Field) { + // custom log levels are only output once log targets are configured. +} + +func defaultCustomMultiLog(lvl []LogLevel, msg string, fields ...Field) { + // custom log levels are only output once log targets are configured. +} + +func defaultFlush(ctx context.Context) error { + return nil +} + +func defaultAdvancedConfig(cfg LogTargetCfg) error { + // mlog.ConfigAdvancedConfig should not be called until default + // logger is replaced with mlog.Logger instance. + return errors.New("cannot config advanced logging on default logger") +} + +func defaultAdvancedShutdown(ctx context.Context) error { + return nil +} + +func defaultAddTarget(targets ...logr.Target) error { + // mlog.AddTarget should not be called until default + // logger is replaced with mlog.Logger instance. + return errors.New("cannot AddTarget on default logger") +} + +func defaultRemoveTargets(ctx context.Context, f func(TargetInfo) bool) error { + // mlog.RemoveTargets should not be called until default + // logger is replaced with mlog.Logger instance. + return errors.New("cannot RemoveTargets on default logger") +} + +func defaultEnableMetrics(collector logr.MetricsCollector) error { + // mlog.EnableMetrics should not be called until default + // logger is replaced with mlog.Logger instance. + return errors.New("cannot EnableMetrics on default logger") +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go new file mode 100644 index 00000000..f8d58968 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go @@ -0,0 +1,30 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import "github.com/mattermost/logr" + +// onLoggerError is called when the logging system encounters an error, +// such as a target not able to write records. The targets will keep trying +// however the error will be logged with a dedicated level that can be output +// to a safe/always available target for monitoring or alerting. +func onLoggerError(err error) { + Log(LvlLogError, "advanced logging error", Err(err)) +} + +// onQueueFull is called when the main logger queue is full, indicating the +// volume and frequency of log record creation is too high for the queue size +// and/or the target latencies. +func onQueueFull(rec *logr.LogRec, maxQueueSize int) bool { + Log(LvlLogError, "main queue full, dropping record", Any("rec", rec)) + return true // drop record +} + +// onTargetQueueFull is called when the main logger queue is full, indicating the +// volume and frequency of log record creation is too high for the target's queue size +// and/or the target latency. +func onTargetQueueFull(target logr.Target, rec *logr.LogRec, maxQueueSize int) bool { + Log(LvlLogError, "target queue full, dropping record", String("target", ""), Any("rec", rec)) + return true // drop record +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go index 73f40b2f..2986d92d 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go @@ -4,6 +4,11 @@ package mlog import ( + "context" + "log" + "sync/atomic" + + "github.com/mattermost/logr" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) @@ -11,6 +16,10 @@ import ( var globalLogger *Logger func InitGlobalLogger(logger *Logger) { + // Clean up previous instance. + if globalLogger != nil && globalLogger.logrLogger != nil { + globalLogger.logrLogger.Logr().Shutdown() + } glob := *logger glob.zap = glob.zap.WithOptions(zap.AddCallerSkip(1)) globalLogger = &glob @@ -19,13 +28,46 @@ func InitGlobalLogger(logger *Logger) { Warn = globalLogger.Warn Error = globalLogger.Error Critical = globalLogger.Critical + Log = globalLogger.Log + LogM = globalLogger.LogM + Flush = globalLogger.Flush + ConfigAdvancedLogging = globalLogger.ConfigAdvancedLogging + ShutdownAdvancedLogging = globalLogger.ShutdownAdvancedLogging + AddTarget = globalLogger.AddTarget + RemoveTargets = globalLogger.RemoveTargets + EnableMetrics = globalLogger.EnableMetrics +} + +// logWriterFunc provides access to mlog via io.Writer, so the standard logger +// can be redirected to use mlog and whatever targets are defined. +type logWriterFunc func([]byte) (int, error) + +func (lw logWriterFunc) Write(p []byte) (int, error) { + return lw(p) } func RedirectStdLog(logger *Logger) { - zap.RedirectStdLogAt(logger.zap.With(zap.String("source", "stdlog")).WithOptions(zap.AddCallerSkip(-2)), zapcore.ErrorLevel) + if atomic.LoadInt32(&disableZap) == 0 { + zap.RedirectStdLogAt(logger.zap.With(zap.String("source", "stdlog")).WithOptions(zap.AddCallerSkip(-2)), zapcore.ErrorLevel) + return + } + + writer := func(p []byte) (int, error) { + Log(LvlStdLog, string(p)) + return len(p), nil + } + log.SetOutput(logWriterFunc(writer)) } type LogFunc func(string, ...Field) +type LogFuncCustom func(LogLevel, string, ...Field) +type LogFuncCustomMulti func([]LogLevel, string, ...Field) +type FlushFunc func(context.Context) error +type ConfigFunc func(cfg LogTargetCfg) error +type ShutdownFunc func(context.Context) error +type AddTargetFunc func(...logr.Target) error +type RemoveTargetsFunc func(context.Context, func(TargetInfo) bool) error +type EnableMetricsFunc func(logr.MetricsCollector) error // DON'T USE THIS Modify the level on the app logger func GloballyDisableDebugLogForTest() { @@ -42,3 +84,12 @@ var Info LogFunc = defaultInfoLog var Warn LogFunc = defaultWarnLog var Error LogFunc = defaultErrorLog var Critical LogFunc = defaultCriticalLog +var Log LogFuncCustom = defaultCustomLog +var LogM LogFuncCustomMulti = defaultCustomMultiLog +var Flush FlushFunc = defaultFlush + +var ConfigAdvancedLogging ConfigFunc = defaultAdvancedConfig +var ShutdownAdvancedLogging ShutdownFunc = defaultAdvancedShutdown +var AddTarget AddTargetFunc = defaultAddTarget +var RemoveTargets RemoveTargetsFunc = defaultRemoveTargets +var EnableMetrics EnableMetricsFunc = defaultEnableMetrics diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go new file mode 100644 index 00000000..54bd2549 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go @@ -0,0 +1,39 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +// Standard levels +var ( + LvlPanic = LogLevel{ID: 0, Name: "panic", Stacktrace: true} + LvlFatal = LogLevel{ID: 1, Name: "fatal", Stacktrace: true} + LvlError = LogLevel{ID: 2, Name: "error"} + LvlWarn = LogLevel{ID: 3, Name: "warn"} + LvlInfo = LogLevel{ID: 4, Name: "info"} + LvlDebug = LogLevel{ID: 5, Name: "debug"} + LvlTrace = LogLevel{ID: 6, Name: "trace"} + // used by redirected standard logger + LvlStdLog = LogLevel{ID: 10, Name: "stdlog"} + // used only by the logger + LvlLogError = LogLevel{ID: 11, Name: "logerror", Stacktrace: true} +) + +// Register custom (discrete) levels here. +// !!!!! ID's must not exceed 32,768 !!!!!! +var ( + // used by the audit system + LvlAuditAPI = LogLevel{ID: 100, Name: "audit-api"} + LvlAuditContent = LogLevel{ID: 101, Name: "audit-content"} + LvlAuditPerms = LogLevel{ID: 102, Name: "audit-permissions"} + LvlAuditCLI = LogLevel{ID: 103, Name: "audit-cli"} + + // used by the TCP log target + LvlTcpLogTarget = LogLevel{ID: 120, Name: "TcpLogTarget"} + + // add more here ... +) + +// Combinations for LogM (log multi) +var ( + MLvlAuditAll = []LogLevel{LvlAuditAPI, LvlAuditContent, LvlAuditPerms, LvlAuditCLI} +) diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go index 1a6c2de9..eaa8c109 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go @@ -4,10 +4,15 @@ package mlog import ( + "context" + "fmt" "io" "log" "os" + "sync/atomic" + "time" + "github.com/mattermost/logr" "go.uber.org/zap" "go.uber.org/zap/zapcore" "gopkg.in/natefinch/lumberjack.v2" @@ -22,6 +27,19 @@ const ( LevelWarn = "warn" // Errors are messages about things we know are problems LevelError = "error" + + // DefaultFlushTimeout is the default amount of time mlog.Flush will wait + // before timing out. + DefaultFlushTimeout = time.Second * 5 +) + +var ( + // disableZap is set when Zap should be disabled and Logr used instead. + // This is needed for unit testing as Zap has no shutdown capabilities + // and holds file handles until process exit. Currently unit test create + // many server instances, and thus many Zap log files. + // This flag will be removed when Zap is permanently replaced. + disableZap int32 ) // Type and function aliases from zap to limit the libraries scope into MM code @@ -38,6 +56,8 @@ var NamedErr = zap.NamedError var Bool = zap.Bool var Duration = zap.Duration +type TargetInfo logr.TargetInfo + type LoggerConfiguration struct { EnableConsole bool ConsoleJson bool @@ -52,6 +72,7 @@ type Logger struct { zap *zap.Logger consoleLevel zap.AtomicLevel fileLevel zap.AtomicLevel + logrLogger *logr.Logger } func getZapLevel(level string) zapcore.Level { @@ -84,6 +105,7 @@ func NewLogger(config *LoggerConfiguration) *Logger { logger := &Logger{ consoleLevel: zap.NewAtomicLevelAt(getZapLevel(config.ConsoleLevel)), fileLevel: zap.NewAtomicLevelAt(getZapLevel(config.FileLevel)), + logrLogger: newLogr(), } if config.EnableConsole { @@ -93,13 +115,33 @@ func NewLogger(config *LoggerConfiguration) *Logger { } if config.EnableFile { - writer := zapcore.AddSync(&lumberjack.Logger{ - Filename: config.FileLocation, - MaxSize: 100, - Compress: true, - }) - core := zapcore.NewCore(makeEncoder(config.FileJson), writer, logger.fileLevel) - cores = append(cores, core) + if atomic.LoadInt32(&disableZap) != 0 { + t := &LogTarget{ + Type: "file", + Format: "json", + Levels: mlogLevelToLogrLevels(config.FileLevel), + MaxQueueSize: DefaultMaxTargetQueue, + Options: []byte(fmt.Sprintf(`{"Filename":"%s", "MaxSizeMB":%d, "Compress":%t}`, + config.FileLocation, 100, true)), + } + if !config.FileJson { + t.Format = "plain" + } + if tgt, err := NewLogrTarget("mlogFile", t); err == nil { + logger.logrLogger.Logr().AddTarget(tgt) + } else { + Error("error creating mlogFile", Err(err)) + } + } else { + writer := zapcore.AddSync(&lumberjack.Logger{ + Filename: config.FileLocation, + MaxSize: 100, + Compress: true, + }) + + core := zapcore.NewCore(makeEncoder(config.FileJson), writer, logger.fileLevel) + cores = append(cores, core) + } } combinedCore := zapcore.NewTee(cores...) @@ -107,7 +149,6 @@ func NewLogger(config *LoggerConfiguration) *Logger { logger.zap = zap.New(combinedCore, zap.AddCaller(), ) - return logger } @@ -123,6 +164,10 @@ func (l *Logger) SetConsoleLevel(level string) { func (l *Logger) With(fields ...Field) *Logger { newlogger := *l newlogger.zap = newlogger.zap.With(fields...) + if newlogger.logrLogger != nil { + ll := newlogger.logrLogger.WithFields(zapToLogr(fields)) + newlogger.logrLogger = &ll + } return &newlogger } @@ -161,20 +206,120 @@ func (l *Logger) Sugar() *SugarLogger { func (l *Logger) Debug(message string, fields ...Field) { l.zap.Debug(message, fields...) + if isLevelEnabled(l.logrLogger, logr.Debug) { + l.logrLogger.WithFields(zapToLogr(fields)).Debug(message) + } } func (l *Logger) Info(message string, fields ...Field) { l.zap.Info(message, fields...) + if isLevelEnabled(l.logrLogger, logr.Info) { + l.logrLogger.WithFields(zapToLogr(fields)).Info(message) + } } func (l *Logger) Warn(message string, fields ...Field) { l.zap.Warn(message, fields...) + if isLevelEnabled(l.logrLogger, logr.Warn) { + l.logrLogger.WithFields(zapToLogr(fields)).Warn(message) + } } func (l *Logger) Error(message string, fields ...Field) { l.zap.Error(message, fields...) + if isLevelEnabled(l.logrLogger, logr.Error) { + l.logrLogger.WithFields(zapToLogr(fields)).Error(message) + } } func (l *Logger) Critical(message string, fields ...Field) { l.zap.Error(message, fields...) + if isLevelEnabled(l.logrLogger, logr.Error) { + l.logrLogger.WithFields(zapToLogr(fields)).Error(message) + } +} + +func (l *Logger) Log(level LogLevel, message string, fields ...Field) { + l.logrLogger.WithFields(zapToLogr(fields)).Log(logr.Level(level), message) +} + +func (l *Logger) LogM(levels []LogLevel, message string, fields ...Field) { + var logger *logr.Logger + for _, lvl := range levels { + if isLevelEnabled(l.logrLogger, logr.Level(lvl)) { + // don't create logger with fields unless at least one level is active. + if logger == nil { + l := l.logrLogger.WithFields(zapToLogr(fields)) + logger = &l + } + logger.Log(logr.Level(lvl), message) + } + } +} + +func (l *Logger) Flush(cxt context.Context) error { + return l.logrLogger.Logr().FlushWithTimeout(cxt) +} + +// ShutdownAdvancedLogging stops the logger from accepting new log records and tries to +// flush queues within the context timeout. Once complete all targets are shutdown +// and any resources released. +func (l *Logger) ShutdownAdvancedLogging(cxt context.Context) error { + err := l.logrLogger.Logr().ShutdownWithTimeout(cxt) + l.logrLogger = newLogr() + return err +} + +// ConfigAdvancedLoggingConfig (re)configures advanced logging based on the +// specified log targets. This is the easiest way to get the advanced logger +// configured via a config source such as file. +func (l *Logger) ConfigAdvancedLogging(targets LogTargetCfg) error { + if err := l.ShutdownAdvancedLogging(context.Background()); err != nil { + Error("error shutting down previous logger", Err(err)) + } + + err := logrAddTargets(l.logrLogger, targets) + return err +} + +// AddTarget adds one or more logr.Target to the advanced logger. This is the preferred method +// to add custom targets or provide configuration that cannot be expressed via a +// config source. +func (l *Logger) AddTarget(targets ...logr.Target) error { + return l.logrLogger.Logr().AddTarget(targets...) +} + +// RemoveTargets selectively removes targets that were previously added to this logger instance +// using the passed in filter function. The filter function should return true to remove the target +// and false to keep it. +func (l *Logger) RemoveTargets(ctx context.Context, f func(ti TargetInfo) bool) error { + // Use locally defined TargetInfo type so we don't spread Logr dependencies. + fc := func(tic logr.TargetInfo) bool { + return f(TargetInfo(tic)) + } + return l.logrLogger.Logr().RemoveTargets(ctx, fc) +} + +// EnableMetrics enables metrics collection by supplying a MetricsCollector. +// The MetricsCollector provides counters and gauges that are updated by log targets. +func (l *Logger) EnableMetrics(collector logr.MetricsCollector) error { + return l.logrLogger.Logr().SetMetricsCollector(collector) +} + +// DisableZap is called to disable Zap, and Logr will be used instead. Any Logger +// instances created after this call will only use Logr. +// +// This is needed for unit testing as Zap has no shutdown capabilities +// and holds file handles until process exit. Currently unit tests create +// many server instances, and thus many Zap log file handles. +// +// This method will be removed when Zap is permanently replaced. +func DisableZap() { + atomic.StoreInt32(&disableZap, 1) +} + +// EnableZap re-enables Zap such that any Logger instances created after this +// call will allow Zap targets. +func EnableZap() { + atomic.StoreInt32(&disableZap, 0) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go new file mode 100644 index 00000000..01b39024 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go @@ -0,0 +1,247 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "encoding/json" + "fmt" + "io" + "os" + + "github.com/hashicorp/go-multierror" + "github.com/mattermost/logr" + logrFmt "github.com/mattermost/logr/format" + "github.com/mattermost/logr/target" + "go.uber.org/zap/zapcore" +) + +const ( + DefaultMaxTargetQueue = 1000 + DefaultSysLogPort = 514 +) + +type LogLevel struct { + ID logr.LevelID + Name string + Stacktrace bool +} + +type LogTarget struct { + Type string // one of "console", "file", "tcp", "syslog", "none". + Format string // one of "json", "plain" + Levels []LogLevel + Options json.RawMessage + MaxQueueSize int +} + +type LogTargetCfg map[string]*LogTarget +type LogrCleanup func() error + +func newLogr() *logr.Logger { + lgr := &logr.Logr{} + lgr.OnExit = func(int) {} + lgr.OnPanic = func(interface{}) {} + lgr.OnLoggerError = onLoggerError + lgr.OnQueueFull = onQueueFull + lgr.OnTargetQueueFull = onTargetQueueFull + + logger := lgr.NewLogger() + return &logger +} + +func logrAddTargets(logger *logr.Logger, targets LogTargetCfg) error { + lgr := logger.Logr() + var errs error + for name, t := range targets { + target, err := NewLogrTarget(name, t) + if err != nil { + errs = multierror.Append(err) + continue + } + if target != nil { + target.SetName(name) + lgr.AddTarget(target) + } + } + return errs +} + +// NewLogrTarget creates a `logr.Target` based on a target config. +// Can be used when parsing custom config files, or when programmatically adding +// built-in targets. Use `mlog.AddTarget` to add custom targets. +func NewLogrTarget(name string, t *LogTarget) (logr.Target, error) { + formatter, err := newFormatter(name, t.Format) + if err != nil { + return nil, err + } + filter, err := newFilter(name, t.Levels) + if err != nil { + return nil, err + } + + if t.MaxQueueSize == 0 { + t.MaxQueueSize = DefaultMaxTargetQueue + } + + switch t.Type { + case "console": + return newConsoleTarget(name, t, filter, formatter) + case "file": + return newFileTarget(name, t, filter, formatter) + case "syslog": + return newSyslogTarget(name, t, filter, formatter) + case "tcp": + return newTCPTarget(name, t, filter, formatter) + case "none": + return nil, nil + } + return nil, fmt.Errorf("invalid type '%s' for target %s", t.Type, name) +} + +func newFilter(name string, levels []LogLevel) (logr.Filter, error) { + filter := &logr.CustomFilter{} + for _, lvl := range levels { + filter.Add(logr.Level(lvl)) + } + return filter, nil +} + +func newFormatter(name string, format string) (logr.Formatter, error) { + switch format { + case "json", "": + return &logrFmt.JSON{}, nil + case "plain": + return &logrFmt.Plain{Delim: " | "}, nil + default: + return nil, fmt.Errorf("invalid format '%s' for target %s", format, name) + } +} + +func newConsoleTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { + type consoleOptions struct { + Out string `json:"Out"` + } + options := &consoleOptions{} + if err := json.Unmarshal(t.Options, options); err != nil { + return nil, err + } + + var w io.Writer + switch options.Out { + case "stdout", "": + w = os.Stdout + case "stderr": + w = os.Stderr + default: + return nil, fmt.Errorf("invalid out '%s' for target %s", options.Out, name) + } + + newTarget := target.NewWriterTarget(filter, formatter, w, t.MaxQueueSize) + return newTarget, nil +} + +func newFileTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { + type fileOptions struct { + Filename string `json:"Filename"` + MaxSize int `json:"MaxSizeMB"` + MaxAge int `json:"MaxAgeDays"` + MaxBackups int `json:"MaxBackups"` + Compress bool `json:"Compress"` + } + options := &fileOptions{} + if err := json.Unmarshal(t.Options, options); err != nil { + return nil, err + } + return newFileTargetWithOpts(name, t, target.FileOptions(*options), filter, formatter) +} + +func newFileTargetWithOpts(name string, t *LogTarget, opts target.FileOptions, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { + if opts.Filename == "" { + return nil, fmt.Errorf("missing 'Filename' option for target %s", name) + } + if err := checkFileWritable(opts.Filename); err != nil { + return nil, fmt.Errorf("error writing to 'Filename' for target %s: %w", name, err) + } + + newTarget := target.NewFileTarget(filter, formatter, opts, t.MaxQueueSize) + return newTarget, nil +} + +func newSyslogTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { + options := &SyslogParams{} + if err := json.Unmarshal(t.Options, options); err != nil { + return nil, err + } + + if options.IP == "" { + return nil, fmt.Errorf("missing 'IP' option for target %s", name) + } + if options.Port == 0 { + options.Port = DefaultSysLogPort + } + return NewSyslogTarget(filter, formatter, options, t.MaxQueueSize) +} + +func newTCPTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { + options := &TcpParams{} + if err := json.Unmarshal(t.Options, options); err != nil { + return nil, err + } + + if options.IP == "" { + return nil, fmt.Errorf("missing 'IP' option for target %s", name) + } + if options.Port == 0 { + return nil, fmt.Errorf("missing 'Port' option for target %s", name) + } + return NewTcpTarget(filter, formatter, options, t.MaxQueueSize) +} + +func checkFileWritable(filename string) error { + // try opening/creating the file for writing + file, err := os.OpenFile(filename, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) + if err != nil { + return err + } + file.Close() + return nil +} + +func isLevelEnabled(logger *logr.Logger, level logr.Level) bool { + if logger == nil || logger.Logr() == nil { + return false + } + + status := logger.Logr().IsLevelEnabled(level) + return status.Enabled +} + +// zapToLogr converts Zap fields to Logr fields. +// This will not be needed once Logr is used for all logging. +func zapToLogr(zapFields []Field) logr.Fields { + encoder := zapcore.NewMapObjectEncoder() + for _, zapField := range zapFields { + zapField.AddTo(encoder) + } + return logr.Fields(encoder.Fields) +} + +// mlogLevelToLogrLevel converts a mlog logger level to +// an array of discrete Logr levels. +func mlogLevelToLogrLevels(level string) []LogLevel { + levels := make([]LogLevel, 0) + levels = append(levels, LvlError, LvlPanic, LvlFatal, LvlStdLog) + + switch level { + case LevelDebug: + levels = append(levels, LvlDebug) + fallthrough + case LevelInfo: + levels = append(levels, LvlInfo) + fallthrough + case LevelWarn: + levels = append(levels, LvlWarn) + } + return levels +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go new file mode 100644 index 00000000..8766a964 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go @@ -0,0 +1,142 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + + "github.com/mattermost/logr" + "github.com/wiggin77/merror" + syslog "github.com/wiggin77/srslog" +) + +// Syslog outputs log records to local or remote syslog. +type Syslog struct { + logr.Basic + w *syslog.Writer +} + +// SyslogParams provides parameters for dialing a syslog daemon. +type SyslogParams struct { + IP string `json:"IP"` + Port int `json:"Port"` + Tag string `json:"Tag"` + TLS bool `json:"TLS"` + Cert string `json:"Cert"` + Insecure bool `json:"Insecure"` +} + +// NewSyslogTarget creates a target capable of outputting log records to remote or local syslog, with or without TLS. +func NewSyslogTarget(filter logr.Filter, formatter logr.Formatter, params *SyslogParams, maxQueue int) (*Syslog, error) { + network := "tcp" + var config *tls.Config + + if params.TLS { + network = "tcp+tls" + config = &tls.Config{InsecureSkipVerify: params.Insecure} + if params.Cert != "" { + pool, err := getCertPool(params.Cert) + if err != nil { + return nil, err + } + config.RootCAs = pool + } + } + raddr := fmt.Sprintf("%s:%d", params.IP, params.Port) + + writer, err := syslog.DialWithTLSConfig(network, raddr, syslog.LOG_INFO, params.Tag, config) + if err != nil { + return nil, err + } + + s := &Syslog{w: writer} + s.Basic.Start(s, s, filter, formatter, maxQueue) + + return s, nil +} + +// Shutdown stops processing log records after making best effort to flush queue. +func (s *Syslog) Shutdown(ctx context.Context) error { + errs := merror.New() + + err := s.Basic.Shutdown(ctx) + errs.Append(err) + + err = s.w.Close() + errs.Append(err) + + return errs.ErrorOrNil() +} + +// getCertPool returns a x509.CertPool containing the cert(s) +// from `cert`, which can be a path to a .pem or .crt file, +// or a base64 encoded cert. +func getCertPool(cert string) (*x509.CertPool, error) { + if cert == "" { + return nil, errors.New("no cert provided") + } + + // first treat as a file and try to read. + serverCert, err := ioutil.ReadFile(cert) + if err != nil { + // maybe it's a base64 encoded cert + serverCert, err = base64.StdEncoding.DecodeString(cert) + if err != nil { + return nil, errors.New("cert cannot be read") + } + } + + pool := x509.NewCertPool() + if ok := pool.AppendCertsFromPEM(serverCert); ok { + return pool, nil + } + return nil, errors.New("cannot parse cert") +} + +// Write converts the log record to bytes, via the Formatter, +// and outputs to syslog. +func (s *Syslog) Write(rec *logr.LogRec) error { + _, stacktrace := s.IsLevelEnabled(rec.Level()) + + buf := rec.Logger().Logr().BorrowBuffer() + defer rec.Logger().Logr().ReleaseBuffer(buf) + + buf, err := s.Formatter().Format(rec, stacktrace, buf) + if err != nil { + return err + } + txt := buf.String() + + switch rec.Level() { + case logr.Panic, logr.Fatal: + err = s.w.Crit(txt) + case logr.Error: + err = s.w.Err(txt) + case logr.Warn: + err = s.w.Warning(txt) + case logr.Debug, logr.Trace: + err = s.w.Debug(txt) + default: + // logr.Info plus all custom levels. + err = s.w.Info(txt) + } + + if err != nil { + reporter := rec.Logger().Logr().ReportError + reporter(fmt.Errorf("syslog write fail: %w", err)) + // syslog writer will try to reconnect. + } + return err +} + +// String returns a string representation of this target. +func (s *Syslog) String() string { + return "SyslogTarget" +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go new file mode 100644 index 00000000..dad20474 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go @@ -0,0 +1,274 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/mattermost/logr" + + _ "net/http/pprof" +) + +const ( + DialTimeoutSecs = 30 + WriteTimeoutSecs = 30 + RetryBackoffMillis int64 = 100 + MaxRetryBackoffMillis int64 = 30 * 1000 // 30 seconds +) + +// Tcp outputs log records to raw socket server. +type Tcp struct { + logr.Basic + + params *TcpParams + addy string + + mutex sync.Mutex + conn net.Conn + monitor chan struct{} + shutdown chan struct{} +} + +// TcpParams provides parameters for dialing a socket server. +type TcpParams struct { + IP string `json:"IP"` + Port int `json:"Port"` + TLS bool `json:"TLS"` + Cert string `json:"Cert"` + Insecure bool `json:"Insecure"` +} + +// NewTcpTarget creates a target capable of outputting log records to a raw socket, with or without TLS. +func NewTcpTarget(filter logr.Filter, formatter logr.Formatter, params *TcpParams, maxQueue int) (*Tcp, error) { + tcp := &Tcp{ + params: params, + addy: fmt.Sprintf("%s:%d", params.IP, params.Port), + monitor: make(chan struct{}), + shutdown: make(chan struct{}), + } + tcp.Basic.Start(tcp, tcp, filter, formatter, maxQueue) + + return tcp, nil +} + +// getConn provides a net.Conn. If a connection already exists, it is returned immediately, +// otherwise this method blocks until a new connection is created, timeout or shutdown. +func (tcp *Tcp) getConn() (net.Conn, error) { + tcp.mutex.Lock() + defer tcp.mutex.Unlock() + + Log(LvlTcpLogTarget, "getConn enter", String("addy", tcp.addy)) + defer Log(LvlTcpLogTarget, "getConn exit", String("addy", tcp.addy)) + + if tcp.conn != nil { + Log(LvlTcpLogTarget, "reusing existing conn", String("addy", tcp.addy)) // use "With" once Zap is removed + return tcp.conn, nil + } + + type result struct { + conn net.Conn + err error + } + + connChan := make(chan result) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*DialTimeoutSecs) + defer cancel() + + go func(ctx context.Context, ch chan result) { + Log(LvlTcpLogTarget, "dailing", String("addy", tcp.addy)) + conn, err := tcp.dial(ctx) + if err == nil { + tcp.conn = conn + tcp.monitor = make(chan struct{}) + go monitor(tcp.conn, tcp.monitor) + } + connChan <- result{conn: conn, err: err} + }(ctx, connChan) + + select { + case <-tcp.shutdown: + return nil, errors.New("shutdown") + case res := <-connChan: + return res.conn, res.err + } +} + +// dial connects to a TCP socket, and optionally performs a TLS handshake. +// A non-nil context must be provided which can cancel the dial. +func (tcp *Tcp) dial(ctx context.Context) (net.Conn, error) { + var dialer net.Dialer + dialer.Timeout = time.Second * DialTimeoutSecs + conn, err := dialer.DialContext(ctx, "tcp", fmt.Sprintf("%s:%d", tcp.params.IP, tcp.params.Port)) + if err != nil { + return nil, err + } + + if !tcp.params.TLS { + return conn, nil + } + + Log(LvlTcpLogTarget, "TLS handshake", String("addy", tcp.addy)) + + tlsconfig := &tls.Config{ + ServerName: tcp.params.IP, + InsecureSkipVerify: tcp.params.Insecure, + } + if tcp.params.Cert != "" { + pool, err := getCertPool(tcp.params.Cert) + if err != nil { + return nil, err + } + tlsconfig.RootCAs = pool + } + + tlsConn := tls.Client(conn, tlsconfig) + if err := tlsConn.Handshake(); err != nil { + return nil, err + } + return tlsConn, nil +} + +func (tcp *Tcp) close() error { + tcp.mutex.Lock() + defer tcp.mutex.Unlock() + + var err error + if tcp.conn != nil { + Log(LvlTcpLogTarget, "closing connection", String("addy", tcp.addy)) + close(tcp.monitor) + err = tcp.conn.Close() + tcp.conn = nil + } + return err +} + +// Shutdown stops processing log records after making best effort to flush queue. +func (tcp *Tcp) Shutdown(ctx context.Context) error { + errs := &multierror.Error{} + + Log(LvlTcpLogTarget, "shutting down", String("addy", tcp.addy)) + + if err := tcp.Basic.Shutdown(ctx); err != nil { + errs = multierror.Append(errs, err) + } + + if err := tcp.close(); err != nil { + errs = multierror.Append(errs, err) + } + + close(tcp.shutdown) + return errs.ErrorOrNil() +} + +// Write converts the log record to bytes, via the Formatter, and outputs to the socket. +// Called by dedicated target goroutine and will block until success or shutdown. +func (tcp *Tcp) Write(rec *logr.LogRec) error { + _, stacktrace := tcp.IsLevelEnabled(rec.Level()) + + buf := rec.Logger().Logr().BorrowBuffer() + defer rec.Logger().Logr().ReleaseBuffer(buf) + + buf, err := tcp.Formatter().Format(rec, stacktrace, buf) + if err != nil { + return err + } + + try := 1 + backoff := RetryBackoffMillis + for { + select { + case <-tcp.shutdown: + return err + default: + } + + conn, err := tcp.getConn() + if err != nil { + Log(LvlTcpLogTarget, "failed getting connection", String("addy", tcp.addy), Err(err)) + reporter := rec.Logger().Logr().ReportError + reporter(fmt.Errorf("log target %s connection error: %w", tcp.String(), err)) + backoff = tcp.sleep(backoff) + continue + } + + conn.SetWriteDeadline(time.Now().Add(time.Second * WriteTimeoutSecs)) + _, err = buf.WriteTo(conn) + if err == nil { + return nil + } + + Log(LvlTcpLogTarget, "write error", String("addy", tcp.addy), Err(err)) + reporter := rec.Logger().Logr().ReportError + reporter(fmt.Errorf("log target %s write error: %w", tcp.String(), err)) + + _ = tcp.close() + + backoff = tcp.sleep(backoff) + try++ + Log(LvlTcpLogTarget, "retrying write", String("addy", tcp.addy), Int("try", try)) + } +} + +// monitor continuously tries to read from the connection to detect socket close. +// This is needed because TCP target uses a write only socket and Linux systems +// take a long time to detect a loss of connectivity on a socket when only writing; +// the writes simply fail without an error returned. +func monitor(conn net.Conn, done <-chan struct{}) { + addy := conn.RemoteAddr().String() + defer Log(LvlTcpLogTarget, "monitor exiting", String("addy", addy)) + + buf := make([]byte, 1) + for { + Log(LvlTcpLogTarget, "monitor loop", String("addy", addy)) + + select { + case <-done: + return + case <-time.After(1 * time.Second): + } + + err := conn.SetReadDeadline(time.Now().Add(time.Second * 30)) + if err != nil { + continue + } + + _, err = conn.Read(buf) + + if errt, ok := err.(net.Error); ok && errt.Timeout() { + // read timeout is expected, keep looping. + continue + } + + // Any other error closes the connection, forcing a reconnect. + Log(LvlTcpLogTarget, "monitor closing connection", Err(err)) + conn.Close() + return + } +} + +// String returns a string representation of this target. +func (tcp *Tcp) String() string { + return fmt.Sprintf("TcpTarget[%s:%d]", tcp.params.IP, tcp.params.Port) +} + +func (tcp *Tcp) sleep(backoff int64) int64 { + select { + case <-tcp.shutdown: + case <-time.After(time.Millisecond * time.Duration(backoff)): + } + + nextBackoff := backoff + (backoff >> 1) + if nextBackoff > MaxRetryBackoffMillis { + nextBackoff = MaxRetryBackoffMillis + } + return nextBackoff +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem b/vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem new file mode 100644 index 00000000..6ce8d042 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem @@ -0,0 +1,43 @@ +-----BEGIN CERTIFICATE----- +MIIDjzCCAnegAwIBAgIRAPYfRSwdzKopBKxYxKqslJUwDQYJKoZIhvcNAQELBQAw +JzElMCMGA1UEAwwcTWF0dGVybW9zdCwgSW5jLiBJbnRlcm5hbCBDQTAeFw0xOTAz +MjIwMDE0MTVaFw0yMjAzMDYwMDE0MTVaMDsxOTA3BgNVBAMTME1hdHRlcm1vc3Qs +IEluYy4gSW50ZXJuYWwgSW50ZXJtZWRpYXRlIEF1dGhvcml0eTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMjliRdmvnNL4u/Jr/M2dPwQmTJXEBY/Vq9Q +vAU52X3tRMCPxcaFz+x6ftuvdO2NdohXGAmtx9QU5LZcvFeTDpoVEBo9A+4jtLvD +DZYaTNLpJmoSoJHaDbdWX+OAOqyDiWS741LuiMKWHhew9QOisat2ZINPxjmAd9wE +xthTMgzsv7MUqnMer8U5OGQ0Qy7wAmNRc+2K3qPwkxe2RUvcte50DUFNgxEginsh +vrkOXR383vUCZfu72qu8oggjiQpyTllu5je2Ap6JLjYLkEMiMqrYADuWor/ZHwa6 +WrFqVETxWfAV5u9Eh0wZM/KKYwRQuw9y+Nans77FmUl1tVWWNN8CAwEAAaOBoTCB +njAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBQY4Uqswyr2hO/HetZt2RDxJdTIPjBi +BgNVHSMEWzBZgBRFZXVg2Z5tNIsWeWjBLEy2yzKbMKErpCkwJzElMCMGA1UEAwwc +TWF0dGVybW9zdCwgSW5jLiBJbnRlcm5hbCBDQYIUEifGUOM+bIFZo1tkjZB5YGBr +0xEwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQAEdexL30Q0zBHmPAH8 +LhdK7dbzW1CmILbxRZlKAwRN+hKRXiMW3MHIkhNuoV9Aev602Q+ja4lWsRi/ktOL +ni1FWx5gSScgdG8JGj47dOmoT3vXKX7+umiv4rQLPDl9/DKMuv204OYJq6VT+uNU +6C6kL157jGJEO76H4fMZ8oYsD7Sq0zjiNKtuCYii0ngH3j3gB1jACLqRgveU7MdT +pqOV2KfY31+h8VBtkUvljNztQ9xNY8Fjmt0SMf7E3FaUcaar3ZCr70G5aU3dKbe7 +47vGOBa5tCqw4YK0jgDKid3IJQul9a3J1mSsH8Wy3to9cAV4KGZBQLnzCX15a/+v +3yVh +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDfjCCAmagAwIBAgIUEifGUOM+bIFZo1tkjZB5YGBr0xEwDQYJKoZIhvcNAQEL +BQAwJzElMCMGA1UEAwwcTWF0dGVybW9zdCwgSW5jLiBJbnRlcm5hbCBDQTAeFw0x +OTAzMjEyMTI4NDNaFw0yOTAzMTgyMTI4NDNaMCcxJTAjBgNVBAMMHE1hdHRlcm1v +c3QsIEluYy4gSW50ZXJuYWwgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDH0Xq5rMBGpKOVWTpb5MnaJIWFP/vOtvEk+7hVrfOfe1/5x0Kk3UgAHj85 +otaEZD1Lhn/JLkEqCiE/UXMJFwJDlNcO4CkdKBSpYX4bKAqy5q/X3QwioMSNpJG1 ++YYrNGBH0sgKcKjyCaLhmqYLD0xZDVOmWIYBU9jUPyXw5U0tnsVrTqGMxVkm1xCY +krCWN1ZoUrLvL0MCZc5qpxoPTopr9UO9cqSBSuy6BVWVuEWBZhpqHt+ul8VxhzzY +q1k4l7r2qw+/wm1iJBedTeBVeWNag8JaVfLgu+/W7oJVlPO32Po7pnvHp8iJ3b4K +zXyVHaTX4S6Em+6LV8855TYrShzlAgMBAAGjgaEwgZ4wHQYDVR0OBBYEFEVldWDZ +nm00ixZ5aMEsTLbLMpswMGIGA1UdIwRbMFmAFEVldWDZnm00ixZ5aMEsTLbLMpsw +oSukKTAnMSUwIwYDVQQDDBxNYXR0ZXJtb3N0LCBJbmMuIEludGVybmFsIENBghQS +J8ZQ4z5sgVmjW2SNkHlgYGvTETAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjAN +BgkqhkiG9w0BAQsFAAOCAQEAPiCWFmopyAkY2T3Zyo4yaRPhX1+VOTMKJtY6EUhq +/GHz6kzEyvCUBf0N892cibGxekrEoItY9NqO6RQRfowg+Gn5kc13z4NyL2W8/eoT +Xy0ZvfaQbU++fQ6pVtWtMblDMU9xiYd7/MDvJpO328l1Vhcdp8kEi+lCvpy0sCRc +PxzPhbgCMAbZEGx+4TMQd4SZKzlRxW/2fflpReh6v1Dv0VDUSYQWwsUnaLpdKHfh +a5k0vuySYcszE4YKlY0zakeFlJfp7fBp1xTwcdW8aTfw15EicPMwTc6xxA4JJUJx +cddu817n1nayK5u6r9Qh1oIVkr0nC9YELMMy4dpPgJ88SA== +-----END CERTIFICATE----- diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go index bf1bcedf..1f2f437f 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go @@ -32,9 +32,10 @@ func NewTestingLogger(tb testing.TB, writer io.Writer) *Logger { testingLogger := &Logger{ consoleLevel: zap.NewAtomicLevelAt(getZapLevel("debug")), fileLevel: zap.NewAtomicLevelAt(getZapLevel("info")), + logrLogger: newLogr(), } - logWriterCore := zapcore.NewCore(makeEncoder(true), logWriterSync, testingLogger.consoleLevel) + logWriterCore := zapcore.NewCore(makeEncoder(true), zapcore.Lock(logWriterSync), testingLogger.consoleLevel) testingLogger.zap = zap.New(logWriterCore, zap.AddCaller(), diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/bot.go b/vendor/github.com/mattermost/mattermost-server/v5/model/bot.go index 15ef6a70..fb46be49 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/bot.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/bot.go @@ -13,9 +13,10 @@ import ( ) const ( - BOT_DISPLAY_NAME_MAX_RUNES = USER_FIRST_NAME_MAX_RUNES - BOT_DESCRIPTION_MAX_RUNES = 1024 - BOT_CREATOR_ID_MAX_RUNES = KEY_VALUE_PLUGIN_ID_MAX_RUNES // UserId or PluginId + BOT_DISPLAY_NAME_MAX_RUNES = USER_FIRST_NAME_MAX_RUNES + BOT_DESCRIPTION_MAX_RUNES = 1024 + BOT_CREATOR_ID_MAX_RUNES = KEY_VALUE_PLUGIN_ID_MAX_RUNES // UserId or PluginId + BOT_WARN_METRIC_BOT_USERNAME = "mattermost-advisor" ) // Bot is a special type of User meant for programmatic interactions. diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel.go index 6a84b355..282271ad 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel.go @@ -120,12 +120,18 @@ type ChannelModeratedRolesPatch struct { // PerPage number of results per page, if paginated. // type ChannelSearchOpts struct { - NotAssociatedToGroup string - ExcludeDefaultChannels bool - IncludeDeleted bool - ExcludeChannelNames []string - Page *int - PerPage *int + NotAssociatedToGroup string + ExcludeDefaultChannels bool + IncludeDeleted bool + Deleted bool + ExcludeChannelNames []string + TeamIds []string + GroupConstrained bool + ExcludeGroupConstrained bool + Public bool + Private bool + Page *int + PerPage *int } type ChannelMemberCountByGroup struct { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go index 6197d410..8f43ca4e 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go @@ -10,7 +10,8 @@ type ChannelMemberHistoryResult struct { LeaveTime *int64 // these two fields are never set in the database - when we SELECT, we join on Users to get them - UserEmail string `db:"Email"` - Username string - IsBot bool + UserEmail string `db:"Email"` + Username string + IsBot bool + UserDeleteAt int64 } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go index 2e994227..87fd3aef 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go @@ -11,11 +11,18 @@ import ( const CHANNEL_SEARCH_DEFAULT_LIMIT = 50 type ChannelSearch struct { - Term string `json:"term"` - ExcludeDefaultChannels bool `json:"exclude_default_channels"` - NotAssociatedToGroup string `json:"not_associated_to_group"` - Page *int `json:"page,omitempty"` - PerPage *int `json:"per_page,omitempty"` + Term string `json:"term"` + ExcludeDefaultChannels bool `json:"exclude_default_channels"` + NotAssociatedToGroup string `json:"not_associated_to_group"` + TeamIds []string `json:"team_ids"` + GroupConstrained bool `json:"group_constrained"` + ExcludeGroupConstrained bool `json:"exclude_group_constrained"` + Public bool `json:"public"` + Private bool `json:"private"` + IncludeDeleted bool `json:"include_deleted"` + Deleted bool `json:"deleted"` + Page *int `json:"page,omitempty"` + PerPage *int `json:"per_page,omitempty"` } // ToJson convert a Channel to a json string diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go new file mode 100644 index 00000000..6a79593c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go @@ -0,0 +1,111 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type SidebarCategoryType string +type SidebarCategorySorting string + +const ( + // Each sidebar category has a 'type'. System categories are Channels, Favorites and DMs + // All user-created categories will have type Custom + SidebarCategoryChannels SidebarCategoryType = "channels" + SidebarCategoryDirectMessages SidebarCategoryType = "direct_messages" + SidebarCategoryFavorites SidebarCategoryType = "favorites" + SidebarCategoryCustom SidebarCategoryType = "custom" + // Increment to use when adding/reordering things in the sidebar + MinimalSidebarSortDistance = 10 + // Default Sort Orders for categories + DefaultSidebarSortOrderFavorites = 0 + DefaultSidebarSortOrderChannels = DefaultSidebarSortOrderFavorites + MinimalSidebarSortDistance + DefaultSidebarSortOrderDMs = DefaultSidebarSortOrderChannels + MinimalSidebarSortDistance + // Sorting modes + // default for all categories except DMs (behaves like manual) + SidebarCategorySortDefault SidebarCategorySorting = "" + // sort manually + SidebarCategorySortManual SidebarCategorySorting = "manual" + // sort by recency (default for DMs) + SidebarCategorySortRecent SidebarCategorySorting = "recent" + // sort by display name alphabetically + SidebarCategorySortAlphabetical SidebarCategorySorting = "alpha" +) + +// SidebarCategory represents the corresponding DB table +// SortOrder is never returned to the user and only used for queries +type SidebarCategory struct { + Id string `json:"id"` + UserId string `json:"user_id"` + TeamId string `json:"team_id"` + SortOrder int64 `json:"-"` + Sorting SidebarCategorySorting `json:"sorting"` + Type SidebarCategoryType `json:"type"` + DisplayName string `json:"display_name"` +} + +// SidebarCategoryWithChannels combines data from SidebarCategory table with the Channel IDs that belong to that category +type SidebarCategoryWithChannels struct { + SidebarCategory + Channels []string `json:"channel_ids"` +} + +type SidebarCategoryOrder []string + +// OrderedSidebarCategories combines categories, their channel IDs and an array of Category IDs, sorted +type OrderedSidebarCategories struct { + Categories SidebarCategoriesWithChannels `json:"categories"` + Order SidebarCategoryOrder `json:"order"` +} + +type SidebarChannel struct { + ChannelId string `json:"channel_id"` + UserId string `json:"user_id"` + CategoryId string `json:"category_id"` + SortOrder int64 `json:"-"` +} + +type SidebarChannels []*SidebarChannel +type SidebarCategoriesWithChannels []*SidebarCategoryWithChannels + +func SidebarCategoryFromJson(data io.Reader) (*SidebarCategoryWithChannels, error) { + var o *SidebarCategoryWithChannels + err := json.NewDecoder(data).Decode(&o) + return o, err +} + +func SidebarCategoriesFromJson(data io.Reader) ([]*SidebarCategoryWithChannels, error) { + var o []*SidebarCategoryWithChannels + err := json.NewDecoder(data).Decode(&o) + return o, err +} + +func OrderedSidebarCategoriesFromJson(data io.Reader) (*OrderedSidebarCategories, error) { + var o *OrderedSidebarCategories + err := json.NewDecoder(data).Decode(&o) + return o, err +} + +func (o SidebarCategoryWithChannels) ToJson() []byte { + b, _ := json.Marshal(o) + return b +} + +func SidebarCategoriesWithChannelsToJson(o []*SidebarCategoryWithChannels) []byte { + if b, err := json.Marshal(o); err != nil { + return []byte("[]") + } else { + return b + } +} + +func (o OrderedSidebarCategories) ToJson() []byte { + if b, err := json.Marshal(o); err != nil { + return []byte("[]") + } else { + return b + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go b/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go index b522ecb8..b3c34c39 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go @@ -10,6 +10,7 @@ import ( "io" "io/ioutil" "mime/multipart" + "net" "net/http" "net/url" "strconv" @@ -61,6 +62,40 @@ type Client4 struct { AuthToken string AuthType string HttpHeader map[string]string // Headers to be copied over for each request + + // TrueString is the string value sent to the server for true boolean query parameters. + trueString string + + // FalseString is the string value sent to the server for false boolean query parameters. + falseString string +} + +// SetBoolString is a helper method for overriding how true and false query string parameters are +// sent to the server. +// +// This method is only exposed for testing. It is never necessary to configure these values +// in production. +func (c *Client4) SetBoolString(value bool, valueStr string) { + if value { + c.trueString = valueStr + } else { + c.falseString = valueStr + } +} + +// boolString builds the query string parameter for boolean values. +func (c *Client4) boolString(value bool) string { + if value && c.trueString != "" { + return c.trueString + } else if !value && c.falseString != "" { + return c.falseString + } + + if value { + return "true" + } else { + return "false" + } } func closeBody(r *http.Response) { @@ -81,7 +116,21 @@ func (c *Client4) Must(result interface{}, resp *Response) interface{} { } func NewAPIv4Client(url string) *Client4 { - return &Client4{url, url + API_URL_SUFFIX, &http.Client{}, "", "", map[string]string{}} + url = strings.TrimRight(url, "/") + return &Client4{url, url + API_URL_SUFFIX, &http.Client{}, "", "", map[string]string{}, "", ""} +} + +func NewAPIv4SocketClient(socketPath string) *Client4 { + tr := &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + return net.Dial("unix", socketPath) + }, + } + + client := NewAPIv4Client("http://_") + client.HttpClient = &http.Client{Transport: tr} + + return client } func BuildErrorResponse(r *http.Response, err *AppError) *Response { @@ -140,6 +189,10 @@ func (c *Client4) GetUserRoute(userId string) string { return fmt.Sprintf(c.GetUsersRoute()+"/%v", userId) } +func (c *Client4) GetUserCategoryRoute(userID, teamID string) string { + return c.GetUserRoute(userID) + c.GetTeamRoute(teamID) + "/channels/categories" +} + func (c *Client4) GetUserAccessTokensRoute() string { return fmt.Sprintf(c.GetUsersRoute() + "/tokens") } @@ -261,6 +314,14 @@ func (c *Client4) GetFileRoute(fileId string) string { return fmt.Sprintf(c.GetFilesRoute()+"/%v", fileId) } +func (c *Client4) GetUploadsRoute() string { + return "/uploads" +} + +func (c *Client4) GetUploadRoute(uploadId string) string { + return fmt.Sprintf("%s/%s", c.GetUploadsRoute(), uploadId) +} + func (c *Client4) GetPluginsRoute() string { return "/plugins" } @@ -453,6 +514,10 @@ func (c *Client4) GetGroupsRoute() string { return "/groups" } +func (c *Client4) GetPublishUserTypingRoute(userId string) string { + return c.GetUserRoute(userId) + "/typing" +} + func (c *Client4) GetGroupRoute(groupID string) string { return fmt.Sprintf("%s/%s", c.GetGroupsRoute(), groupID) } @@ -650,7 +715,7 @@ func (c *Client4) LoginByLdap(loginId string, password string) (*User, *Response m := make(map[string]string) m["login_id"] = loginId m["password"] = password - m["ldap_only"] = "true" + m["ldap_only"] = c.boolString(true) return c.login(m) } @@ -967,6 +1032,17 @@ func (c *Client4) GetUsersWithoutTeam(page int, perPage int, etag string) ([]*Us return UserListFromJson(r.Body), BuildResponse(r) } +// GetUsersInGroup returns a page of users in a group. Page counting starts at 0. +func (c *Client4) GetUsersInGroup(groupID string, page int, perPage int, etag string) ([]*User, *Response) { + query := fmt.Sprintf("?in_group=%v&page=%v&per_page=%v", groupID, page, perPage) + r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) +} + // GetUsersByIds returns a list of users based on the provided user ids. func (c *Client4) GetUsersByIds(userIds []string) ([]*User, *Response) { r, err := c.DoApiPost(c.GetUsersRoute()+"/ids", ArrayToJson(userIds)) @@ -1119,6 +1195,17 @@ func (c *Client4) UpdateUserPassword(userId, currentPassword, newPassword string return CheckStatusOK(r), BuildResponse(r) } +// UpdateUserHashedPassword updates a user's password with an already-hashed password. Must be a system administrator. +func (c *Client4) UpdateUserHashedPassword(userId, newHashedPassword string) (bool, *Response) { + requestBody := map[string]string{"already_hashed": "true", "new_password": newHashedPassword} + r, err := c.DoApiPut(c.GetUserRoute(userId)+"/password", MapToJson(requestBody)) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + // PromoteGuestToUser convert a guest into a regular user func (c *Client4) PromoteGuestToUser(guestId string) (bool, *Response) { r, err := c.DoApiPost(c.GetUserRoute(guestId)+"/promote", "") @@ -1173,6 +1260,50 @@ func (c *Client4) DeleteUser(userId string) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// PermanentDeleteUser deletes a user in the system based on the provided user id string. +func (c *Client4) PermanentDeleteUser(userId string) (bool, *Response) { + r, err := c.DoApiDelete(c.GetUserRoute(userId) + "?permanent=" + c.boolString(true)) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// ConvertUserToBot converts a user to a bot user. +func (c *Client4) ConvertUserToBot(userId string) (*Bot, *Response) { + r, err := c.DoApiPost(c.GetUserRoute(userId)+"/convert_to_bot", "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return BotFromJson(r.Body), BuildResponse(r) +} + +// ConvertBotToUser converts a bot user to a user. +func (c *Client4) ConvertBotToUser(userId string, userPatch *UserPatch, setSystemAdmin bool) (*User, *Response) { + var query string + if setSystemAdmin { + query = "?set_system_admin=true" + } + r, err := c.DoApiPost(c.GetBotRoute(userId)+"/convert_to_user"+query, userPatch.ToJson()) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) +} + +// PermanentDeleteAll permanently deletes all users in the system. This is a local only endpoint +func (c *Client4) PermanentDeleteAllUsers() (bool, *Response) { + r, err := c.DoApiDelete(c.GetUsersRoute()) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + // SendPasswordResetEmail will send a link for password resetting to a user with the // provided email. func (c *Client4) SendPasswordResetEmail(email string) (bool, *Response) { @@ -1287,6 +1418,16 @@ func (c *Client4) VerifyUserEmail(token string) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// VerifyUserEmailWithoutToken will verify a user's email by its Id. (Requires manage system role) +func (c *Client4) VerifyUserEmailWithoutToken(userId string) (*User, *Response) { + r, err := c.DoApiPost(c.GetUserRoute(userId)+"/email/verify/member", "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) +} + // SendVerificationEmail will send an email to the user with the provided email address, if // that user exists. The email will contain a link that can be used to verify the user's // email address. @@ -1487,7 +1628,7 @@ func (c *Client4) GetBot(userId string, etag string) (*Bot, *Response) { // GetBot fetches the given bot, even if it is deleted. func (c *Client4) GetBotIncludeDeleted(userId string, etag string) (*Bot, *Response) { - r, err := c.DoApiGet(c.GetBotRoute(userId)+"?include_deleted=true", etag) + r, err := c.DoApiGet(c.GetBotRoute(userId)+"?include_deleted="+c.boolString(true), etag) if err != nil { return nil, BuildErrorResponse(r, err) } @@ -1508,7 +1649,7 @@ func (c *Client4) GetBots(page, perPage int, etag string) ([]*Bot, *Response) { // GetBotsIncludeDeleted fetches the given page of bots, including deleted. func (c *Client4) GetBotsIncludeDeleted(page, perPage int, etag string) ([]*Bot, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted=true", page, perPage) + query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted="+c.boolString(true), page, perPage) r, err := c.DoApiGet(c.GetBotsRoute()+query, etag) if err != nil { return nil, BuildErrorResponse(r, err) @@ -1519,7 +1660,7 @@ func (c *Client4) GetBotsIncludeDeleted(page, perPage int, etag string) ([]*Bot, // GetBotsOrphaned fetches the given page of bots, only including orphanded bots. func (c *Client4) GetBotsOrphaned(page, perPage int, etag string) ([]*Bot, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&only_orphaned=true", page, perPage) + query := fmt.Sprintf("?page=%v&per_page=%v&only_orphaned="+c.boolString(true), page, perPage) r, err := c.DoApiGet(c.GetBotsRoute()+query, etag) if err != nil { return nil, BuildErrorResponse(r, err) @@ -1659,7 +1800,7 @@ func (c *Client4) GetAllTeams(etag string, page int, perPage int) ([]*Team, *Res // GetAllTeamsWithTotalCount returns all teams based on permissions. func (c *Client4) GetAllTeamsWithTotalCount(etag string, page int, perPage int) ([]*Team, int64, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count=true", page, perPage) + query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count="+c.boolString(true), page, perPage) r, err := c.DoApiGet(c.GetTeamsRoute()+query, etag) if err != nil { return nil, 0, BuildErrorResponse(r, err) @@ -1811,7 +1952,7 @@ func (c *Client4) SoftDeleteTeam(teamId string) (bool, *Response) { // PermanentDeleteTeam deletes the team, should only be used when needed for // compliance and the like. func (c *Client4) PermanentDeleteTeam(teamId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetTeamRoute(teamId) + "?permanent=true") + r, err := c.DoApiDelete(c.GetTeamRoute(teamId) + "?permanent=" + c.boolString(true)) if err != nil { return false, BuildErrorResponse(r, err) } @@ -1931,7 +2072,7 @@ func (c *Client4) AddTeamMembersGracefully(teamId string, userIds []string) ([]* members = append(members, member) } - r, err := c.DoApiPost(c.GetTeamMembersRoute(teamId)+"/batch?graceful=true", TeamMembersToJson(members)) + r, err := c.DoApiPost(c.GetTeamMembersRoute(teamId)+"/batch?graceful="+c.boolString(true), TeamMembersToJson(members)) if err != nil { return nil, BuildErrorResponse(r, err) } @@ -2049,7 +2190,7 @@ func (c *Client4) InviteGuestsToTeam(teamId string, userEmails []string, channel // InviteUsersToTeam invite users by email to the team. func (c *Client4) InviteUsersToTeamGracefully(teamId string, userEmails []string) ([]*EmailInviteWithError, *Response) { - r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite/email?graceful=true", ArrayToJson(userEmails)) + r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite/email?graceful="+c.boolString(true), ArrayToJson(userEmails)) if err != nil { return nil, BuildErrorResponse(r, err) } @@ -2064,7 +2205,7 @@ func (c *Client4) InviteGuestsToTeamGracefully(teamId string, userEmails []strin Channels: channels, Message: message, } - r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite-guests/email?graceful=true", guestsInvite.ToJson()) + r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite-guests/email?graceful="+c.boolString(true), guestsInvite.ToJson()) if err != nil { return nil, BuildErrorResponse(r, err) } @@ -2163,7 +2304,16 @@ func (c *Client4) RemoveTeamIcon(teamId string) (bool, *Response) { // GetAllChannels get all the channels. Must be a system administrator. func (c *Client4) GetAllChannels(page int, perPage int, etag string) (*ChannelListWithTeamData, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + return c.getAllChannels(page, perPage, etag, false) +} + +// GetAllChannelsIncludeDeleted get all the channels. Must be a system administrator. +func (c *Client4) GetAllChannelsIncludeDeleted(page int, perPage int, etag string) (*ChannelListWithTeamData, *Response) { + return c.getAllChannels(page, perPage, etag, true) +} + +func (c *Client4) getAllChannels(page int, perPage int, etag string, includeDeleted bool) (*ChannelListWithTeamData, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted=%v", page, perPage, includeDeleted) r, err := c.DoApiGet(c.GetChannelsRoute()+query, etag) if err != nil { return nil, BuildErrorResponse(r, err) @@ -2174,7 +2324,7 @@ func (c *Client4) GetAllChannels(page int, perPage int, etag string) (*ChannelLi // GetAllChannelsWithCount get all the channels including the total count. Must be a system administrator. func (c *Client4) GetAllChannelsWithCount(page int, perPage int, etag string) (*ChannelListWithTeamData, int64, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count=true", page, perPage) + query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count="+c.boolString(true), page, perPage) r, err := c.DoApiGet(c.GetChannelsRoute()+query, etag) if err != nil { return nil, 0, BuildErrorResponse(r, err) @@ -2307,6 +2457,17 @@ func (c *Client4) GetPinnedPosts(channelId string, etag string) (*PostList, *Res return PostListFromJson(r.Body), BuildResponse(r) } +// GetPrivateChannelsForTeam returns a list of private channels based on the provided team id string. +func (c *Client4) GetPrivateChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) { + query := fmt.Sprintf("/private?page=%v&per_page=%v", page, perPage) + r, err := c.DoApiGet(c.GetChannelsForTeamRoute(teamId)+query, etag) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelSliceFromJson(r.Body), BuildResponse(r) +} + // GetPublicChannelsForTeam returns a list of public channels based on the provided team id string. func (c *Client4) GetPublicChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) { query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) @@ -2349,6 +2510,18 @@ func (c *Client4) GetChannelsForTeamForUser(teamId, userId string, includeDelete return ChannelSliceFromJson(r.Body), BuildResponse(r) } +// GetChannelsForTeamAndUserWithLastDeleteAt returns a list channels of a team for a user, additionally filtered with lastDeleteAt. This does not have any effect if includeDeleted is set to false. +func (c *Client4) GetChannelsForTeamAndUserWithLastDeleteAt(teamId, userId string, includeDeleted bool, lastDeleteAt int, etag string) ([]*Channel, *Response) { + route := fmt.Sprintf(c.GetUserRoute(userId) + c.GetTeamRoute(teamId) + "/channels") + route += fmt.Sprintf("?include_deleted=%v&last_delete_at=%d", includeDeleted, lastDeleteAt) + r, err := c.DoApiGet(route, etag) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelSliceFromJson(r.Body), BuildResponse(r) +} + // SearchChannels returns the channels on a team matching the provided search term. func (c *Client4) SearchChannels(teamId string, search *ChannelSearch) ([]*Channel, *Response) { r, err := c.DoApiPost(c.GetChannelsForTeamRoute(teamId)+"/search", search.ToJson()) @@ -2409,6 +2582,30 @@ func (c *Client4) DeleteChannel(channelId string) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// PermanentDeleteChannel deletes a channel based on the provided channel id string. +func (c *Client4) PermanentDeleteChannel(channelId string) (bool, *Response) { + r, err := c.DoApiDelete(c.GetChannelRoute(channelId) + "?permanent=" + c.boolString(true)) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// MoveChannel moves the channel to the destination team. +func (c *Client4) MoveChannel(channelId, teamId string, force bool) (*Channel, *Response) { + requestBody := map[string]interface{}{ + "team_id": teamId, + "force": force, + } + r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/move", StringInterfaceToJson(requestBody)) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) +} + // GetChannelByName returns a channel based on the provided channel name and team id strings. func (c *Client4) GetChannelByName(channelName, teamId string, etag string) (*Channel, *Response) { r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId), etag) @@ -2421,7 +2618,7 @@ func (c *Client4) GetChannelByName(channelName, teamId string, etag string) (*Ch // GetChannelByNameIncludeDeleted returns a channel based on the provided channel name and team id strings. Other then GetChannelByName it will also return deleted channels. func (c *Client4) GetChannelByNameIncludeDeleted(channelName, teamId string, etag string) (*Channel, *Response) { - r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId)+"?include_deleted=true", etag) + r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId)+"?include_deleted="+c.boolString(true), etag) if err != nil { return nil, BuildErrorResponse(r, err) } @@ -2441,7 +2638,7 @@ func (c *Client4) GetChannelByNameForTeamName(channelName, teamName string, etag // GetChannelByNameForTeamNameIncludeDeleted returns a channel based on the provided channel name and team name strings. Other then GetChannelByNameForTeamName it will also return deleted channels. func (c *Client4) GetChannelByNameForTeamNameIncludeDeleted(channelName, teamName string, etag string) (*Channel, *Response) { - r, err := c.DoApiGet(c.GetChannelByNameForTeamNameRoute(channelName, teamName)+"?include_deleted=true", etag) + r, err := c.DoApiGet(c.GetChannelByNameForTeamNameRoute(channelName, teamName)+"?include_deleted="+c.boolString(true), etag) if err != nil { return nil, BuildErrorResponse(r, err) } @@ -3059,7 +3256,7 @@ func (c *Client4) GetPing() (string, *Response) { // GetPingWithServerStatus will return ok if several basic server health checks // all pass successfully. func (c *Client4) GetPingWithServerStatus() (string, *Response) { - r, err := c.DoApiGet(c.GetSystemRoute()+"/ping?get_server_status=true", "") + r, err := c.DoApiGet(c.GetSystemRoute()+"/ping?get_server_status="+c.boolString(true), "") if r != nil && r.StatusCode == 500 { defer r.Body.Close() return STATUS_UNHEALTHY, BuildErrorResponse(r, err) @@ -3187,6 +3384,19 @@ func (c *Client4) UpdateConfig(config *Config) (*Config, *Response) { return ConfigFromJson(r.Body), BuildResponse(r) } +// MigrateConfig will migrate existing config to the new one. +func (c *Client4) MigrateConfig(from, to string) (bool, *Response) { + m := make(map[string]string, 2) + m["from"] = from + m["to"] = to + r, err := c.DoApiPost(c.GetConfigRoute()+"/migrate", MapToJson(m)) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return true, BuildResponse(r) +} + // UploadLicenseFile will add a license file to the system. func (c *Client4) UploadLicenseFile(data []byte) (bool, *Response) { body := &bytes.Buffer{} @@ -3470,7 +3680,7 @@ func (c *Client4) GetSamlMetadata() (string, *Response) { return buf.String(), BuildResponse(r) } -func samlFileToMultipart(data []byte, filename string) ([]byte, *multipart.Writer, error) { +func fileToMultipart(data []byte, filename string) ([]byte, *multipart.Writer, error) { body := &bytes.Buffer{} writer := multipart.NewWriter(body) @@ -3493,7 +3703,7 @@ func samlFileToMultipart(data []byte, filename string) ([]byte, *multipart.Write // UploadSamlIdpCertificate will upload an IDP certificate for SAML and set the config to use it. // The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk. func (c *Client4) UploadSamlIdpCertificate(data []byte, filename string) (bool, *Response) { - body, writer, err := samlFileToMultipart(data, filename) + body, writer, err := fileToMultipart(data, filename) if err != nil { return false, &Response{Error: NewAppError("UploadSamlIdpCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)} } @@ -3505,7 +3715,7 @@ func (c *Client4) UploadSamlIdpCertificate(data []byte, filename string) (bool, // UploadSamlPublicCertificate will upload a public certificate for SAML and set the config to use it. // The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk. func (c *Client4) UploadSamlPublicCertificate(data []byte, filename string) (bool, *Response) { - body, writer, err := samlFileToMultipart(data, filename) + body, writer, err := fileToMultipart(data, filename) if err != nil { return false, &Response{Error: NewAppError("UploadSamlPublicCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)} } @@ -3517,7 +3727,7 @@ func (c *Client4) UploadSamlPublicCertificate(data []byte, filename string) (boo // UploadSamlPrivateCertificate will upload a private key for SAML and set the config to use it. // The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk. func (c *Client4) UploadSamlPrivateCertificate(data []byte, filename string) (bool, *Response) { - body, writer, err := samlFileToMultipart(data, filename) + body, writer, err := fileToMultipart(data, filename) if err != nil { return false, &Response{Error: NewAppError("UploadSamlPrivateCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)} } @@ -3685,7 +3895,19 @@ func (c *Client4) GetLdapGroups() ([]*Group, *Response) { } defer closeBody(r) - return GroupsFromJson(r.Body), BuildResponse(r) + responseData := struct { + Count int `json:"count"` + Groups []*Group `json:"groups"` + }{} + if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil { + appErr := NewAppError("Api4.GetLdapGroups", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + return nil, BuildErrorResponse(r, appErr) + } + for i := range responseData.Groups { + responseData.Groups[i].DisplayName = *responseData.Groups[i].Name + } + + return responseData.Groups, BuildResponse(r) } // LinkLdapGroup creates or undeletes a Mattermost group and associates it to the given LDAP group DN. @@ -3714,6 +3936,18 @@ func (c *Client4) UnlinkLdapGroup(dn string) (*Group, *Response) { return GroupFromJson(r.Body), BuildResponse(r) } +// MigrateIdLdap migrates the LDAP enabled users to given attribute +func (c *Client4) MigrateIdLdap(toAttribute string) (bool, *Response) { + r, err := c.DoApiPost(c.GetLdapRoute()+"/migrateid", MapToJson(map[string]string{ + "toAttribute": toAttribute, + })) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + // GetGroupsByChannel retrieves the Mattermost Groups associated with a given channel func (c *Client4) GetGroupsByChannel(channelId string, opts GroupSearchOpts) ([]*GroupWithSchemeAdmin, int, *Response) { path := fmt.Sprintf("%s/groups?q=%v&include_member_count=%v&filter_allow_reference=%v", c.GetChannelRoute(channelId), opts.Q, opts.IncludeMemberCount, opts.FilterAllowReference) @@ -3828,6 +4062,74 @@ func (c *Client4) GetGroupsByUserId(userId string) ([]*Group, *Response) { return GroupsFromJson(r.Body), BuildResponse(r) } +func (c *Client4) MigrateAuthToLdap(fromAuthService string, matchField string, force bool) (bool, *Response) { + r, err := c.DoApiPost(c.GetUsersRoute()+"/migrate_auth/ldap", StringInterfaceToJson(map[string]interface{}{ + "from": fromAuthService, + "force": force, + "match_field": matchField, + })) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +func (c *Client4) MigrateAuthToSaml(fromAuthService string, usersMap map[string]string, auto bool) (bool, *Response) { + r, err := c.DoApiPost(c.GetUsersRoute()+"/migrate_auth/saml", StringInterfaceToJson(map[string]interface{}{ + "from": fromAuthService, + "auto": auto, + "matches": usersMap, + })) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// UploadLdapPublicCertificate will upload a public certificate for LDAP and set the config to use it. +func (c *Client4) UploadLdapPublicCertificate(data []byte) (bool, *Response) { + body, writer, err := fileToMultipart(data, LDAP_PUBIC_CERTIFICATE_NAME) + if err != nil { + return false, &Response{Error: NewAppError("UploadLdapPublicCertificate", "model.client.upload_ldap_cert.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + _, resp := c.DoUploadFile(c.GetLdapRoute()+"/certificate/public", body, writer.FormDataContentType()) + return resp.Error == nil, resp +} + +// UploadLdapPrivateCertificate will upload a private key for LDAP and set the config to use it. +func (c *Client4) UploadLdapPrivateCertificate(data []byte) (bool, *Response) { + body, writer, err := fileToMultipart(data, LDAP_PRIVATE_KEY_NAME) + if err != nil { + return false, &Response{Error: NewAppError("UploadLdapPrivateCertificate", "model.client.upload_Ldap_cert.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + _, resp := c.DoUploadFile(c.GetLdapRoute()+"/certificate/private", body, writer.FormDataContentType()) + return resp.Error == nil, resp +} + +// DeleteLdapPublicCertificate deletes the LDAP IDP certificate from the server and updates the config to not use it and disable LDAP. +func (c *Client4) DeleteLdapPublicCertificate() (bool, *Response) { + r, err := c.DoApiDelete(c.GetLdapRoute() + "/certificate/public") + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// DeleteLDAPPrivateCertificate deletes the LDAP IDP certificate from the server and updates the config to not use it and disable LDAP. +func (c *Client4) DeleteLdapPrivateCertificate() (bool, *Response) { + r, err := c.DoApiDelete(c.GetLdapRoute() + "/certificate/private") + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + // Audits Section // GetAudits returns a list of audits for the whole system. @@ -4519,6 +4821,21 @@ func (c *Client4) CancelJob(jobId string) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// DownloadJob downloads the results of the job +func (c *Client4) DownloadJob(jobId string) ([]byte, *Response) { + r, appErr := c.DoApiGet(c.GetJobsRoute()+fmt.Sprintf("/%v/download", jobId), "") + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildErrorResponse(r, NewAppError("GetFile", "model.client.read_job_result_file.app_error", nil, err.Error(), r.StatusCode)) + } + return data, BuildResponse(r) +} + // Roles Section // GetRole gets a single role by ID. @@ -4650,7 +4967,7 @@ func (c *Client4) uploadPlugin(file io.Reader, force bool) (*Manifest, *Response writer := multipart.NewWriter(body) if force { - err := writer.WriteField("force", "true") + err := writer.WriteField("force", c.boolString(true)) if err != nil { return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)} } @@ -4693,10 +5010,7 @@ func (c *Client4) uploadPlugin(file io.Reader, force bool) (*Manifest, *Response } func (c *Client4) InstallPluginFromUrl(downloadUrl string, force bool) (*Manifest, *Response) { - forceStr := "false" - if force { - forceStr = "true" - } + forceStr := c.boolString(force) url := fmt.Sprintf("%s?plugin_download_url=%s&force=%s", c.GetPluginsRoute()+"/install_from_url", url.QueryEscape(downloadUrl), forceStr) r, err := c.DoApiPost(url, "") @@ -5074,6 +5388,16 @@ func (c *Client4) GetKnownUsers() ([]string, *Response) { return userIds, BuildResponse(r) } +// PublishUserTyping publishes a user is typing websocket event based on the provided TypingRequest. +func (c *Client4) PublishUserTyping(userID string, typingRequest TypingRequest) (bool, *Response) { + r, err := c.DoApiPost(c.GetPublishUserTypingRoute(userID), typingRequest.ToJson()) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + func (c *Client4) GetChannelMemberCountsByGroup(channelID string, includeTimezones bool, etag string) ([]*ChannelMemberCountByGroup, *Response) { r, err := c.DoApiGet(c.GetChannelRoute(channelID)+"/member_counts_by_group?include_timezones="+strconv.FormatBool(includeTimezones), etag) if err != nil { @@ -5093,3 +5417,202 @@ func (c *Client4) RequestTrialLicense(users int) (bool, *Response) { defer closeBody(r) return CheckStatusOK(r), BuildResponse(r) } + +// GetGroupStats retrieves stats for a Mattermost Group +func (c *Client4) GetGroupStats(groupID string) (*GroupStats, *Response) { + r, appErr := c.DoApiGet(c.GetGroupRoute(groupID)+"/stats", "") + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + return GroupStatsFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) GetSidebarCategoriesForTeamForUser(userID, teamID, etag string) (*OrderedSidebarCategories, *Response) { + route := c.GetUserCategoryRoute(userID, teamID) + r, appErr := c.DoApiGet(route, etag) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + cat, err := OrderedSidebarCategoriesFromJson(r.Body) + if err != nil { + return nil, BuildErrorResponse(r, NewAppError("Client4.GetSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode)) + } + return cat, BuildResponse(r) +} + +func (c *Client4) CreateSidebarCategoryForTeamForUser(userID, teamID string, category *SidebarCategoryWithChannels) (*SidebarCategoryWithChannels, *Response) { + payload, _ := json.Marshal(category) + route := c.GetUserCategoryRoute(userID, teamID) + r, appErr := c.doApiPostBytes(route, payload) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + cat, err := SidebarCategoryFromJson(r.Body) + if err != nil { + return nil, BuildErrorResponse(r, NewAppError("Client4.CreateSidebarCategoryForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode)) + } + return cat, BuildResponse(r) +} + +func (c *Client4) UpdateSidebarCategoriesForTeamForUser(userID, teamID string, categories []*SidebarCategoryWithChannels) ([]*SidebarCategoryWithChannels, *Response) { + payload, _ := json.Marshal(categories) + route := c.GetUserCategoryRoute(userID, teamID) + + r, appErr := c.doApiPutBytes(route, payload) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + categories, err := SidebarCategoriesFromJson(r.Body) + if err != nil { + return nil, BuildErrorResponse(r, NewAppError("Client4.UpdateSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode)) + } + + return categories, BuildResponse(r) +} + +func (c *Client4) GetSidebarCategoryOrderForTeamForUser(userID, teamID, etag string) ([]string, *Response) { + route := c.GetUserCategoryRoute(userID, teamID) + "/order" + r, err := c.DoApiGet(route, etag) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ArrayFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) UpdateSidebarCategoryOrderForTeamForUser(userID, teamID string, order []string) ([]string, *Response) { + payload, _ := json.Marshal(order) + route := c.GetUserCategoryRoute(userID, teamID) + "/order" + r, err := c.doApiPutBytes(route, payload) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ArrayFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) GetSidebarCategoryForTeamForUser(userID, teamID, categoryID, etag string) (*SidebarCategoryWithChannels, *Response) { + route := c.GetUserCategoryRoute(userID, teamID) + "/" + categoryID + r, appErr := c.DoApiGet(route, etag) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + cat, err := SidebarCategoryFromJson(r.Body) + if err != nil { + return nil, BuildErrorResponse(r, NewAppError("Client4.UpdateSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode)) + } + + return cat, BuildResponse(r) +} + +func (c *Client4) UpdateSidebarCategoryForTeamForUser(userID, teamID, categoryID string, category *SidebarCategoryWithChannels) (*SidebarCategoryWithChannels, *Response) { + payload, _ := json.Marshal(category) + route := c.GetUserCategoryRoute(userID, teamID) + "/" + categoryID + r, appErr := c.doApiPutBytes(route, payload) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + cat, err := SidebarCategoryFromJson(r.Body) + if err != nil { + return nil, BuildErrorResponse(r, NewAppError("Client4.UpdateSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode)) + } + + return cat, BuildResponse(r) +} + +// CheckIntegrity performs a database integrity check. +func (c *Client4) CheckIntegrity() ([]IntegrityCheckResult, *Response) { + r, err := c.DoApiPost("/integrity", "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + var results []IntegrityCheckResult + if err := json.NewDecoder(r.Body).Decode(&results); err != nil { + appErr := NewAppError("Api4.CheckIntegrity", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + return nil, BuildErrorResponse(r, appErr) + } + return results, BuildResponse(r) +} + +func (c *Client4) GetNotices(lastViewed int64, teamId string, client NoticeClientType, clientVersion, locale, etag string) (NoticeMessages, *Response) { + url := fmt.Sprintf("/system/notices/%s?lastViewed=%d&client=%s&clientVersion=%s&locale=%s", teamId, lastViewed, client, clientVersion, locale) + r, appErr := c.DoApiGet(url, etag) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + notices, err := UnmarshalProductNoticeMessages(r.Body) + if err != nil { + return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)} + } + return notices, BuildResponse(r) +} + +func (c *Client4) MarkNoticesViewed(ids []string) *Response { + r, err := c.DoApiPut("/system/notices/view", ArrayToJson(ids)) + if err != nil { + return BuildErrorResponse(r, err) + } + defer closeBody(r) + return BuildResponse(r) +} + +// CreateUpload creates a new upload session. +func (c *Client4) CreateUpload(us *UploadSession) (*UploadSession, *Response) { + r, err := c.DoApiPost(c.GetUploadsRoute(), us.ToJson()) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UploadSessionFromJson(r.Body), BuildResponse(r) +} + +// GetUpload returns the upload session for the specified uploadId. +func (c *Client4) GetUpload(uploadId string) (*UploadSession, *Response) { + r, err := c.DoApiGet(c.GetUploadRoute(uploadId), "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UploadSessionFromJson(r.Body), BuildResponse(r) +} + +// GetUploadsForUser returns the upload sessions created by the specified +// userId. +func (c *Client4) GetUploadsForUser(userId string) ([]*UploadSession, *Response) { + r, err := c.DoApiGet(c.GetUserRoute(userId)+"/uploads", "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UploadSessionsFromJson(r.Body), BuildResponse(r) +} + +// UploadData performs an upload. On success it returns +// a FileInfo object. +func (c *Client4) UploadData(uploadId string, data io.Reader) (*FileInfo, *Response) { + url := c.GetUploadRoute(uploadId) + r, err := c.doApiRequestReader("POST", c.ApiUrl+url, data, "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return FileInfoFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) UpdatePassword(userId, currentPassword, newPassword string) *Response { + requestBody := map[string]string{"current_password": currentPassword, "new_password": newPassword} + r, err := c.DoApiPut(c.GetUserRoute(userId)+"/password", MapToJson(requestBody)) + if err != nil { + return BuildErrorResponse(r, err) + } + defer closeBody(r) + return BuildResponse(r) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go index 86113d78..529f4a93 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go @@ -43,6 +43,16 @@ const ( CLUSTER_EVENT_INVALIDATE_CACHE_FOR_TERMS_OF_SERVICE = "inv_terms_of_service" CLUSTER_EVENT_BUSY_STATE_CHANGED = "busy_state_change" + // Gossip communication + CLUSTER_GOSSIP_EVENT_REQUEST_GET_LOGS = "gossip_request_get_logs" + CLUSTER_GOSSIP_EVENT_RESPONSE_GET_LOGS = "gossip_response_get_logs" + CLUSTER_GOSSIP_EVENT_REQUEST_GET_CLUSTER_STATS = "gossip_request_cluster_stats" + CLUSTER_GOSSIP_EVENT_RESPONSE_GET_CLUSTER_STATS = "gossip_response_cluster_stats" + CLUSTER_GOSSIP_EVENT_REQUEST_GET_PLUGIN_STATUSES = "gossip_request_plugin_statuses" + CLUSTER_GOSSIP_EVENT_RESPONSE_GET_PLUGIN_STATUSES = "gossip_response_plugin_statuses" + CLUSTER_GOSSIP_EVENT_REQUEST_SAVE_CONFIG = "gossip_request_save_config" + CLUSTER_GOSSIP_EVENT_RESPONSE_SAVE_CONFIG = "gossip_response_save_config" + // SendTypes for ClusterMessage. CLUSTER_SEND_BEST_EFFORT = "best_effort" CLUSTER_SEND_RELIABLE = "reliable" diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command.go b/vendor/github.com/mattermost/mattermost-server/v5/model/command.go index 6dcf52ae..0013046b 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/command.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/command.go @@ -18,23 +18,26 @@ const ( ) type Command struct { - Id string `json:"id"` - Token string `json:"token"` - CreateAt int64 `json:"create_at"` - UpdateAt int64 `json:"update_at"` - DeleteAt int64 `json:"delete_at"` - CreatorId string `json:"creator_id"` - TeamId string `json:"team_id"` - Trigger string `json:"trigger"` - Method string `json:"method"` - Username string `json:"username"` - IconURL string `json:"icon_url"` - AutoComplete bool `json:"auto_complete"` - AutoCompleteDesc string `json:"auto_complete_desc"` - AutoCompleteHint string `json:"auto_complete_hint"` - DisplayName string `json:"display_name"` - Description string `json:"description"` - URL string `json:"url"` + Id string `json:"id"` + Token string `json:"token"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + CreatorId string `json:"creator_id"` + TeamId string `json:"team_id"` + Trigger string `json:"trigger"` + Method string `json:"method"` + Username string `json:"username"` + IconURL string `json:"icon_url"` + AutoComplete bool `json:"auto_complete"` + AutoCompleteDesc string `json:"auto_complete_desc"` + AutoCompleteHint string `json:"auto_complete_hint"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + URL string `json:"url"` + // PluginId records the id of the plugin that created this Command. If it is blank, the Command + // was not created by a plugin. + PluginId string `json:"plugin_id"` AutocompleteData *AutocompleteData `db:"-" json:"autocomplete_data,omitempty"` // AutocompleteIconData is a base64 encoded svg AutocompleteIconData string `db:"-" json:"autocomplete_icon_data,omitempty"` @@ -80,10 +83,20 @@ func (o *Command) IsValid() *AppError { return NewAppError("Command.IsValid", "model.command.is_valid.update_at.app_error", nil, "", http.StatusBadRequest) } - if !IsValidId(o.CreatorId) { + // If the CreatorId is blank, this should be a command created by a plugin. + if o.CreatorId == "" && !IsValidPluginId(o.PluginId) { + return NewAppError("Command.IsValid", "model.command.is_valid.plugin_id.app_error", nil, "", http.StatusBadRequest) + } + + // If the PluginId is blank, this should be a command associated with a userId. + if o.PluginId == "" && !IsValidId(o.CreatorId) { return NewAppError("Command.IsValid", "model.command.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } + if o.CreatorId != "" && o.PluginId != "" { + return NewAppError("Command.IsValid", "model.command.is_valid.plugin_id.app_error", nil, "command cannot have both a CreatorId and a PluginId", http.StatusBadRequest) + } + if !IsValidId(o.TeamId) { return NewAppError("Command.IsValid", "model.command.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go b/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go index a3bbb4c9..15a6372a 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go @@ -20,9 +20,11 @@ type CommandArgs struct { Command string `json:"command"` SiteURL string `json:"-"` T goi18n.TranslateFunc `json:"-"` - Session Session `json:"-"` UserMentions UserMentionMap `json:"-"` ChannelMentions ChannelMentionMap `json:"-"` + + // DO NOT USE Session field is deprecated. MM-26398 + Session Session `json:"-"` } func (o *CommandArgs) ToJson() string { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/config.go b/vendor/github.com/mattermost/mattermost-server/v5/model/config.go index 4ca62e79..f50bbf29 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/config.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/config.go @@ -47,6 +47,7 @@ const ( GENERIC_NO_CHANNEL_NOTIFICATION = "generic_no_channel" GENERIC_NOTIFICATION = "generic" GENERIC_NOTIFICATION_SERVER = "https://push-test.mattermost.com" + MM_SUPPORT_ADDRESS = "support@mattermost.com" FULL_NOTIFICATION = "full" ID_LOADED_NOTIFICATION = "id_loaded" @@ -160,8 +161,10 @@ const ( ANALYTICS_SETTINGS_DEFAULT_MAX_USERS_FOR_STATISTICS = 2500 - ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR = "#f2a93b" - ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR = "#333333" + ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR = "#f2a93b" + ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR = "#333333" + ANNOUNCEMENT_SETTINGS_DEFAULT_NOTICES_JSON_URL = "https://notices.mattermost.com/" + ANNOUNCEMENT_SETTINGS_DEFAULT_NOTICES_FETCH_FREQUENCY_SECONDS = 3600 TEAM_SETTINGS_DEFAULT_TEAM_TEXT = "default" @@ -246,94 +249,97 @@ var ServerTLSSupportedCiphers = map[string]uint16{ } type ServiceSettings struct { - SiteURL *string `restricted:"true"` - WebsocketURL *string `restricted:"true"` - LicenseFileLocation *string `restricted:"true"` - ListenAddress *string `restricted:"true"` - ConnectionSecurity *string `restricted:"true"` - TLSCertFile *string `restricted:"true"` - TLSKeyFile *string `restricted:"true"` - TLSMinVer *string `restricted:"true"` - TLSStrictTransport *bool `restricted:"true"` - TLSStrictTransportMaxAge *int64 `restricted:"true"` - TLSOverwriteCiphers []string `restricted:"true"` - UseLetsEncrypt *bool `restricted:"true"` - LetsEncryptCertificateCacheFile *string `restricted:"true"` - Forward80To443 *bool `restricted:"true"` - TrustedProxyIPHeader []string `restricted:"true"` - ReadTimeout *int `restricted:"true"` - WriteTimeout *int `restricted:"true"` - IdleTimeout *int `restricted:"true"` - MaximumLoginAttempts *int `restricted:"true"` - GoroutineHealthThreshold *int `restricted:"true"` - GoogleDeveloperKey *string `restricted:"true"` - EnableOAuthServiceProvider *bool - EnableIncomingWebhooks *bool - EnableOutgoingWebhooks *bool - EnableCommands *bool - DEPRECATED_DO_NOT_USE_EnableOnlyAdminIntegrations *bool `json:"EnableOnlyAdminIntegrations" mapstructure:"EnableOnlyAdminIntegrations"` // This field is deprecated and must not be used. - EnablePostUsernameOverride *bool - EnablePostIconOverride *bool - EnableLinkPreviews *bool - EnableTesting *bool `restricted:"true"` - EnableDeveloper *bool `restricted:"true"` - EnableOpenTracing *bool `restricted:"true"` - EnableSecurityFixAlert *bool `restricted:"true"` - EnableInsecureOutgoingConnections *bool `restricted:"true"` - AllowedUntrustedInternalConnections *string `restricted:"true"` - EnableMultifactorAuthentication *bool - EnforceMultifactorAuthentication *bool - EnableUserAccessTokens *bool - AllowCorsFrom *string `restricted:"true"` - CorsExposedHeaders *string `restricted:"true"` - CorsAllowCredentials *bool `restricted:"true"` - CorsDebug *bool `restricted:"true"` - AllowCookiesForSubdomains *bool `restricted:"true"` - ExtendSessionLengthWithActivity *bool `restricted:"true"` - SessionLengthWebInDays *int `restricted:"true"` - SessionLengthMobileInDays *int `restricted:"true"` - SessionLengthSSOInDays *int `restricted:"true"` - SessionCacheInMinutes *int `restricted:"true"` - SessionIdleTimeoutInMinutes *int `restricted:"true"` - WebsocketSecurePort *int `restricted:"true"` - WebsocketPort *int `restricted:"true"` - WebserverMode *string `restricted:"true"` - EnableCustomEmoji *bool - EnableEmojiPicker *bool - EnableGifPicker *bool - GfycatApiKey *string - GfycatApiSecret *string - DEPRECATED_DO_NOT_USE_RestrictCustomEmojiCreation *string `json:"RestrictCustomEmojiCreation" mapstructure:"RestrictCustomEmojiCreation"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPostDelete *string `json:"RestrictPostDelete" mapstructure:"RestrictPostDelete"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_AllowEditPost *string `json:"AllowEditPost" mapstructure:"AllowEditPost"` // This field is deprecated and must not be used. - PostEditTimeLimit *int - TimeBetweenUserTypingUpdatesMilliseconds *int64 `restricted:"true"` - EnablePostSearch *bool `restricted:"true"` - MinimumHashtagLength *int `restricted:"true"` - EnableUserTypingMessages *bool `restricted:"true"` - EnableChannelViewedMessages *bool `restricted:"true"` - EnableUserStatuses *bool `restricted:"true"` - ExperimentalEnableAuthenticationTransfer *bool `restricted:"true"` - ClusterLogTimeoutMilliseconds *int `restricted:"true"` - CloseUnusedDirectMessages *bool - EnablePreviewFeatures *bool - EnableTutorial *bool - ExperimentalEnableDefaultChannelLeaveJoinMessages *bool - ExperimentalGroupUnreadChannels *string - ExperimentalChannelOrganization *bool - ExperimentalChannelSidebarOrganization *string - DEPRECATED_DO_NOT_USE_ImageProxyType *string `json:"ImageProxyType" mapstructure:"ImageProxyType"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_ImageProxyURL *string `json:"ImageProxyURL" mapstructure:"ImageProxyURL"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_ImageProxyOptions *string `json:"ImageProxyOptions" mapstructure:"ImageProxyOptions"` // This field is deprecated and must not be used. + SiteURL *string `access:"environment,authentication,write_restrictable"` + WebsocketURL *string `access:"write_restrictable"` + LicenseFileLocation *string `access:"write_restrictable"` + ListenAddress *string `access:"environment,write_restrictable"` + ConnectionSecurity *string `access:"environment,write_restrictable"` + TLSCertFile *string `access:"environment,write_restrictable"` + TLSKeyFile *string `access:"environment,write_restrictable"` + TLSMinVer *string `access:"write_restrictable"` + TLSStrictTransport *bool `access:"write_restrictable"` + TLSStrictTransportMaxAge *int64 `access:"write_restrictable"` + TLSOverwriteCiphers []string `access:"write_restrictable"` + UseLetsEncrypt *bool `access:"environment,write_restrictable"` + LetsEncryptCertificateCacheFile *string `access:"environment,write_restrictable"` + Forward80To443 *bool `access:"environment,write_restrictable"` + TrustedProxyIPHeader []string `access:"write_restrictable"` + ReadTimeout *int `access:"environment,write_restrictable"` + WriteTimeout *int `access:"environment,write_restrictable"` + IdleTimeout *int `access:"write_restrictable"` + MaximumLoginAttempts *int `access:"authentication,write_restrictable"` + GoroutineHealthThreshold *int `access:"write_restrictable"` + GoogleDeveloperKey *string `access:"site,write_restrictable"` + EnableOAuthServiceProvider *bool `access:"integrations"` + EnableIncomingWebhooks *bool `access:"integrations"` + EnableOutgoingWebhooks *bool `access:"integrations"` + EnableCommands *bool `access:"integrations"` + DEPRECATED_DO_NOT_USE_EnableOnlyAdminIntegrations *bool `json:"EnableOnlyAdminIntegrations" mapstructure:"EnableOnlyAdminIntegrations"` // This field is deprecated and must not be used. + EnablePostUsernameOverride *bool `access:"integrations"` + EnablePostIconOverride *bool `access:"integrations"` + EnableLinkPreviews *bool `access:"site"` + EnableTesting *bool `access:"environment,write_restrictable"` + EnableDeveloper *bool `access:"environment,write_restrictable"` + EnableOpenTracing *bool `access:"write_restrictable"` + EnableSecurityFixAlert *bool `access:"environment,write_restrictable"` + EnableInsecureOutgoingConnections *bool `access:"environment,write_restrictable"` + AllowedUntrustedInternalConnections *string `access:"environment,write_restrictable"` + EnableMultifactorAuthentication *bool `access:"authentication"` + EnforceMultifactorAuthentication *bool `access:"authentication"` + EnableUserAccessTokens *bool `access:"integrations"` + AllowCorsFrom *string `access:"integrations,write_restrictable"` + CorsExposedHeaders *string `access:"integrations,write_restrictable"` + CorsAllowCredentials *bool `access:"integrations,write_restrictable"` + CorsDebug *bool `access:"integrations,write_restrictable"` + AllowCookiesForSubdomains *bool `access:"write_restrictable"` + ExtendSessionLengthWithActivity *bool `access:"environment,write_restrictable"` + SessionLengthWebInDays *int `access:"environment,write_restrictable"` + SessionLengthMobileInDays *int `access:"environment,write_restrictable"` + SessionLengthSSOInDays *int `access:"environment,write_restrictable"` + SessionCacheInMinutes *int `access:"environment,write_restrictable"` + SessionIdleTimeoutInMinutes *int `access:"environment,write_restrictable"` + WebsocketSecurePort *int `access:"write_restrictable"` + WebsocketPort *int `access:"write_restrictable"` + WebserverMode *string `access:"environment,write_restrictable"` + EnableCustomEmoji *bool `access:"site"` + EnableEmojiPicker *bool `access:"site"` + EnableGifPicker *bool `access:"integrations"` + GfycatApiKey *string `access:"integrations"` + GfycatApiSecret *string `access:"integrations"` + DEPRECATED_DO_NOT_USE_RestrictCustomEmojiCreation *string `json:"RestrictCustomEmojiCreation" mapstructure:"RestrictCustomEmojiCreation"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPostDelete *string `json:"RestrictPostDelete" mapstructure:"RestrictPostDelete"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_AllowEditPost *string `json:"AllowEditPost" mapstructure:"AllowEditPost"` // This field is deprecated and must not be used. + PostEditTimeLimit *int `access:"user_management_permissions"` + TimeBetweenUserTypingUpdatesMilliseconds *int64 `access:"experimental,write_restrictable"` + EnablePostSearch *bool `access:"write_restrictable"` + MinimumHashtagLength *int `access:"environment,write_restrictable"` + EnableUserTypingMessages *bool `access:"experimental,write_restrictable"` + EnableChannelViewedMessages *bool `access:"experimental,write_restrictable"` + EnableUserStatuses *bool `access:"write_restrictable"` + ExperimentalEnableAuthenticationTransfer *bool `access:"experimental,write_restrictable"` + ClusterLogTimeoutMilliseconds *int `access:"write_restrictable"` + CloseUnusedDirectMessages *bool `access:"experimental"` + EnablePreviewFeatures *bool `access:"experimental"` + EnableTutorial *bool `access:"experimental"` + ExperimentalEnableDefaultChannelLeaveJoinMessages *bool `access:"experimental"` + ExperimentalGroupUnreadChannels *string `access:"experimental"` + ExperimentalChannelOrganization *bool `access:"experimental"` + ExperimentalChannelSidebarOrganization *string `access:"experimental"` + ExperimentalDataPrefetch *bool `access:"experimental"` + DEPRECATED_DO_NOT_USE_ImageProxyType *string `json:"ImageProxyType" mapstructure:"ImageProxyType"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_ImageProxyURL *string `json:"ImageProxyURL" mapstructure:"ImageProxyURL"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_ImageProxyOptions *string `json:"ImageProxyOptions" mapstructure:"ImageProxyOptions"` // This field is deprecated and must not be used. EnableAPITeamDeletion *bool - ExperimentalEnableHardenedMode *bool - DisableLegacyMFA *bool `restricted:"true"` - ExperimentalStrictCSRFEnforcement *bool `restricted:"true"` - EnableEmailInvitations *bool - DisableBotsWhenOwnerIsDeactivated *bool `restricted:"true"` - EnableBotAccountCreation *bool - EnableSVGs *bool - EnableLatex *bool + EnableAPIUserDeletion *bool + ExperimentalEnableHardenedMode *bool `access:"experimental"` + DisableLegacyMFA *bool `access:"write_restrictable"` + ExperimentalStrictCSRFEnforcement *bool `access:"experimental,write_restrictable"` + EnableEmailInvitations *bool `access:"authentication"` + DisableBotsWhenOwnerIsDeactivated *bool `access:"integrations,write_restrictable"` + EnableBotAccountCreation *bool `access:"integrations"` + EnableSVGs *bool `access:"site"` + EnableLatex *bool `access:"site"` + EnableAPIChannelDeletion *bool EnableLocalMode *bool LocalModeSocketLocation *string } @@ -614,7 +620,7 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { } if s.EnableCustomEmoji == nil { - s.EnableCustomEmoji = NewBool(false) + s.EnableCustomEmoji = NewBool(true) } if s.EnableEmojiPicker == nil { @@ -622,7 +628,7 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { } if s.EnableGifPicker == nil { - s.EnableGifPicker = NewBool(false) + s.EnableGifPicker = NewBool(true) } if s.GfycatApiKey == nil || *s.GfycatApiKey == "" { @@ -678,6 +684,10 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { s.ExperimentalChannelSidebarOrganization = NewString("disabled") } + if s.ExperimentalDataPrefetch == nil { + s.ExperimentalDataPrefetch = NewBool(true) + } + if s.DEPRECATED_DO_NOT_USE_ImageProxyType == nil { s.DEPRECATED_DO_NOT_USE_ImageProxyType = NewString("") } @@ -694,6 +704,14 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { s.EnableAPITeamDeletion = NewBool(false) } + if s.EnableAPIUserDeletion == nil { + s.EnableAPIUserDeletion = NewBool(false) + } + + if s.EnableAPIChannelDeletion == nil { + s.EnableAPIChannelDeletion = NewBool(false) + } + if s.ExperimentalEnableHardenedMode == nil { s.ExperimentalEnableHardenedMode = NewBool(false) } @@ -740,20 +758,21 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { } type ClusterSettings struct { - Enable *bool `restricted:"true"` - ClusterName *string `restricted:"true"` - OverrideHostname *string `restricted:"true"` - NetworkInterface *string `restricted:"true"` - BindAddress *string `restricted:"true"` - AdvertiseAddress *string `restricted:"true"` - UseIpAddress *bool `restricted:"true"` - UseExperimentalGossip *bool `restricted:"true"` - ReadOnlyConfig *bool `restricted:"true"` - GossipPort *int `restricted:"true"` - StreamingPort *int `restricted:"true"` - MaxIdleConns *int `restricted:"true"` - MaxIdleConnsPerHost *int `restricted:"true"` - IdleConnTimeoutMilliseconds *int `restricted:"true"` + Enable *bool `access:"environment,write_restrictable"` + ClusterName *string `access:"environment,write_restrictable"` + OverrideHostname *string `access:"environment,write_restrictable"` + NetworkInterface *string `access:"environment,write_restrictable"` + BindAddress *string `access:"environment,write_restrictable"` + AdvertiseAddress *string `access:"environment,write_restrictable"` + UseIpAddress *bool `access:"environment,write_restrictable"` + UseExperimentalGossip *bool `access:"environment,write_restrictable"` + EnableExperimentalGossipEncryption *bool `access:"environment,write_restrictable"` + ReadOnlyConfig *bool `access:"environment,write_restrictable"` + GossipPort *int `access:"environment,write_restrictable"` + StreamingPort *int `access:"environment,write_restrictable"` + MaxIdleConns *int `access:"environment,write_restrictable"` + MaxIdleConnsPerHost *int `access:"environment,write_restrictable"` + IdleConnTimeoutMilliseconds *int `access:"environment,write_restrictable"` } func (s *ClusterSettings) SetDefaults() { @@ -789,6 +808,10 @@ func (s *ClusterSettings) SetDefaults() { s.UseExperimentalGossip = NewBool(false) } + if s.EnableExperimentalGossipEncryption == nil { + s.EnableExperimentalGossipEncryption = NewBool(false) + } + if s.ReadOnlyConfig == nil { s.ReadOnlyConfig = NewBool(true) } @@ -815,9 +838,9 @@ func (s *ClusterSettings) SetDefaults() { } type MetricsSettings struct { - Enable *bool `restricted:"true"` - BlockProfileRate *int `restricted:"true"` - ListenAddress *string `restricted:"true"` + Enable *bool `access:"environment,write_restrictable"` + BlockProfileRate *int `access:"environment,write_restrictable"` + ListenAddress *string `access:"environment,write_restrictable"` } func (s *MetricsSettings) SetDefaults() { @@ -835,12 +858,14 @@ func (s *MetricsSettings) SetDefaults() { } type ExperimentalSettings struct { - ClientSideCertEnable *bool - ClientSideCertCheck *string - EnableClickToReply *bool `restricted:"true"` - LinkMetadataTimeoutMilliseconds *int64 `restricted:"true"` - RestrictSystemAdmin *bool `restricted:"true"` - UseNewSAMLLibrary *bool + ClientSideCertEnable *bool `access:"experimental"` + ClientSideCertCheck *string `access:"experimental"` + EnableClickToReply *bool `access:"experimental,write_restrictable"` + LinkMetadataTimeoutMilliseconds *int64 `access:"experimental,write_restrictable"` + RestrictSystemAdmin *bool `access:"experimental,write_restrictable"` + UseNewSAMLLibrary *bool `access:"experimental"` + CloudUserLimit *int64 `access:"experimental,write_restrictable"` + CloudBilling *bool `access:"experimental,write_restrictable"` } func (s *ExperimentalSettings) SetDefaults() { @@ -863,13 +888,23 @@ func (s *ExperimentalSettings) SetDefaults() { if s.RestrictSystemAdmin == nil { s.RestrictSystemAdmin = NewBool(false) } + + if s.CloudUserLimit == nil { + // User limit 0 is treated as no limit + s.CloudUserLimit = NewInt64(0) + } + + if s.CloudBilling == nil { + s.CloudBilling = NewBool(false) + } + if s.UseNewSAMLLibrary == nil { s.UseNewSAMLLibrary = NewBool(false) } } type AnalyticsSettings struct { - MaxUsersForStatistics *int `restricted:"true"` + MaxUsersForStatistics *int `access:"write_restrictable"` } func (s *AnalyticsSettings) SetDefaults() { @@ -879,13 +914,13 @@ func (s *AnalyticsSettings) SetDefaults() { } type SSOSettings struct { - Enable *bool - Secret *string - Id *string - Scope *string - AuthEndpoint *string - TokenEndpoint *string - UserApiEndpoint *string + Enable *bool `access:"authentication"` + Secret *string `access:"authentication"` + Id *string `access:"authentication"` + Scope *string `access:"authentication"` + AuthEndpoint *string `access:"authentication"` + TokenEndpoint *string `access:"authentication"` + UserApiEndpoint *string `access:"authentication"` } func (s *SSOSettings) setDefaults(scope, authEndpoint, tokenEndpoint, userApiEndpoint string) { @@ -919,14 +954,14 @@ func (s *SSOSettings) setDefaults(scope, authEndpoint, tokenEndpoint, userApiEnd } type Office365Settings struct { - Enable *bool - Secret *string - Id *string - Scope *string - AuthEndpoint *string - TokenEndpoint *string - UserApiEndpoint *string - DirectoryId *string + Enable *bool `access:"authentication"` + Secret *string `access:"authentication"` + Id *string `access:"authentication"` + Scope *string `access:"authentication"` + AuthEndpoint *string `access:"authentication"` + TokenEndpoint *string `access:"authentication"` + UserApiEndpoint *string `access:"authentication"` + DirectoryId *string `access:"authentication"` } func (s *Office365Settings) setDefaults() { @@ -976,17 +1011,17 @@ func (s *Office365Settings) SSOSettings() *SSOSettings { } type SqlSettings struct { - DriverName *string `restricted:"true"` - DataSource *string `restricted:"true"` - DataSourceReplicas []string `restricted:"true"` - DataSourceSearchReplicas []string `restricted:"true"` - MaxIdleConns *int `restricted:"true"` - ConnMaxLifetimeMilliseconds *int `restricted:"true"` - MaxOpenConns *int `restricted:"true"` - Trace *bool `restricted:"true"` - AtRestEncryptKey *string `restricted:"true"` - QueryTimeout *int `restricted:"true"` - DisableDatabaseSearch *bool `restricted:"true"` + DriverName *string `access:"environment,write_restrictable"` + DataSource *string `access:"environment,write_restrictable"` + DataSourceReplicas []string `access:"environment,write_restrictable"` + DataSourceSearchReplicas []string `access:"environment,write_restrictable"` + MaxIdleConns *int `access:"environment,write_restrictable"` + ConnMaxLifetimeMilliseconds *int `access:"environment,write_restrictable"` + MaxOpenConns *int `access:"environment,write_restrictable"` + Trace *bool `access:"environment,write_restrictable"` + AtRestEncryptKey *string `access:"environment,write_restrictable"` + QueryTimeout *int `access:"environment,write_restrictable"` + DisableDatabaseSearch *bool `access:"environment,write_restrictable"` } func (s *SqlSettings) SetDefaults(isUpdate bool) { @@ -1042,15 +1077,17 @@ func (s *SqlSettings) SetDefaults(isUpdate bool) { } type LogSettings struct { - EnableConsole *bool `restricted:"true"` - ConsoleLevel *string `restricted:"true"` - ConsoleJson *bool `restricted:"true"` - EnableFile *bool `restricted:"true"` - FileLevel *string `restricted:"true"` - FileJson *bool `restricted:"true"` - FileLocation *string `restricted:"true"` - EnableWebhookDebugging *bool `restricted:"true"` - EnableDiagnostics *bool `restricted:"true"` + EnableConsole *bool `access:"environment,write_restrictable"` + ConsoleLevel *string `access:"environment,write_restrictable"` + ConsoleJson *bool `access:"environment,write_restrictable"` + EnableFile *bool `access:"environment,write_restrictable"` + FileLevel *string `access:"environment,write_restrictable"` + FileJson *bool `access:"environment,write_restrictable"` + FileLocation *string `access:"environment,write_restrictable"` + EnableWebhookDebugging *bool `access:"environment,write_restrictable"` + EnableDiagnostics *bool `access:"environment,write_restrictable"` + EnableSentry *bool `access:"environment,write_restrictable"` + AdvancedLoggingConfig *string `access:"environment,write_restrictable"` } func (s *LogSettings) SetDefaults() { @@ -1082,6 +1119,10 @@ func (s *LogSettings) SetDefaults() { s.EnableDiagnostics = NewBool(true) } + if s.EnableSentry == nil { + s.EnableSentry = NewBool(*s.EnableDiagnostics) + } + if s.ConsoleJson == nil { s.ConsoleJson = NewBool(true) } @@ -1089,55 +1130,24 @@ func (s *LogSettings) SetDefaults() { if s.FileJson == nil { s.FileJson = NewBool(true) } + + if s.AdvancedLoggingConfig == nil { + s.AdvancedLoggingConfig = NewString("") + } } type ExperimentalAuditSettings struct { - SysLogEnabled *bool `restricted:"true"` - SysLogIP *string `restricted:"true"` - SysLogPort *int `restricted:"true"` - SysLogTag *string `restricted:"true"` - SysLogCert *string `restricted:"true"` - SysLogInsecure *bool `restricted:"true"` - SysLogMaxQueueSize *int `restricted:"true"` - - FileEnabled *bool `restricted:"true"` - FileName *string `restricted:"true"` - FileMaxSizeMB *int `restricted:"true"` - FileMaxAgeDays *int `restricted:"true"` - FileMaxBackups *int `restricted:"true"` - FileCompress *bool `restricted:"true"` - FileMaxQueueSize *int `restricted:"true"` + FileEnabled *bool `access:"experimental,write_restrictable"` + FileName *string `access:"experimental,write_restrictable"` + FileMaxSizeMB *int `access:"experimental,write_restrictable"` + FileMaxAgeDays *int `access:"experimental,write_restrictable"` + FileMaxBackups *int `access:"experimental,write_restrictable"` + FileCompress *bool `access:"experimental,write_restrictable"` + FileMaxQueueSize *int `access:"experimental,write_restrictable"` + AdvancedLoggingConfig *string `access:"experimental,write_restrictable"` } func (s *ExperimentalAuditSettings) SetDefaults() { - if s.SysLogEnabled == nil { - s.SysLogEnabled = NewBool(false) - } - - if s.SysLogIP == nil { - s.SysLogIP = NewString("localhost") - } - - if s.SysLogPort == nil { - s.SysLogPort = NewInt(6514) - } - - if s.SysLogTag == nil { - s.SysLogTag = NewString("") - } - - if s.SysLogCert == nil { - s.SysLogCert = NewString("") - } - - if s.SysLogInsecure == nil { - s.SysLogInsecure = NewBool(false) - } - - if s.SysLogMaxQueueSize == nil { - s.SysLogMaxQueueSize = NewInt(1000) - } - if s.FileEnabled == nil { s.FileEnabled = NewBool(false) } @@ -1165,16 +1175,21 @@ func (s *ExperimentalAuditSettings) SetDefaults() { if s.FileMaxQueueSize == nil { s.FileMaxQueueSize = NewInt(1000) } + + if s.AdvancedLoggingConfig == nil { + s.AdvancedLoggingConfig = NewString("") + } } type NotificationLogSettings struct { - EnableConsole *bool `restricted:"true"` - ConsoleLevel *string `restricted:"true"` - ConsoleJson *bool `restricted:"true"` - EnableFile *bool `restricted:"true"` - FileLevel *string `restricted:"true"` - FileJson *bool `restricted:"true"` - FileLocation *string `restricted:"true"` + EnableConsole *bool `access:"write_restrictable"` + ConsoleLevel *string `access:"write_restrictable"` + ConsoleJson *bool `access:"write_restrictable"` + EnableFile *bool `access:"write_restrictable"` + FileLevel *string `access:"write_restrictable"` + FileJson *bool `access:"write_restrictable"` + FileLocation *string `access:"write_restrictable"` + AdvancedLoggingConfig *string `access:"write_restrictable"` } func (s *NotificationLogSettings) SetDefaults() { @@ -1205,14 +1220,18 @@ func (s *NotificationLogSettings) SetDefaults() { if s.FileJson == nil { s.FileJson = NewBool(true) } + + if s.AdvancedLoggingConfig == nil { + s.AdvancedLoggingConfig = NewString("") + } } type PasswordSettings struct { - MinimumLength *int - Lowercase *bool - Number *bool - Uppercase *bool - Symbol *bool + MinimumLength *int `access:"authentication"` + Lowercase *bool `access:"authentication"` + Number *bool `access:"authentication"` + Uppercase *bool `access:"authentication"` + Symbol *bool `access:"authentication"` } func (s *PasswordSettings) SetDefaults() { @@ -1238,24 +1257,25 @@ func (s *PasswordSettings) SetDefaults() { } type FileSettings struct { - EnableFileAttachments *bool - EnableMobileUpload *bool - EnableMobileDownload *bool - MaxFileSize *int64 - DriverName *string `restricted:"true"` - Directory *string `restricted:"true"` - EnablePublicLink *bool - PublicLinkSalt *string - InitialFont *string - AmazonS3AccessKeyId *string `restricted:"true"` - AmazonS3SecretAccessKey *string `restricted:"true"` - AmazonS3Bucket *string `restricted:"true"` - AmazonS3Region *string `restricted:"true"` - AmazonS3Endpoint *string `restricted:"true"` - AmazonS3SSL *bool `restricted:"true"` - AmazonS3SignV2 *bool `restricted:"true"` - AmazonS3SSE *bool `restricted:"true"` - AmazonS3Trace *bool `restricted:"true"` + EnableFileAttachments *bool `access:"site"` + EnableMobileUpload *bool `access:"site"` + EnableMobileDownload *bool `access:"site"` + MaxFileSize *int64 `access:"environment"` + DriverName *string `access:"environment,write_restrictable"` + Directory *string `access:"environment,write_restrictable"` + EnablePublicLink *bool `access:"site"` + PublicLinkSalt *string `access:"site"` + InitialFont *string `access:"environment"` + AmazonS3AccessKeyId *string `access:"environment,write_restrictable"` + AmazonS3SecretAccessKey *string `access:"environment,write_restrictable"` + AmazonS3Bucket *string `access:"environment,write_restrictable"` + AmazonS3PathPrefix *string `access:"environment,write_restrictable"` + AmazonS3Region *string `access:"environment,write_restrictable"` + AmazonS3Endpoint *string `access:"environment,write_restrictable"` + AmazonS3SSL *bool `access:"environment,write_restrictable"` + AmazonS3SignV2 *bool `access:"environment,write_restrictable"` + AmazonS3SSE *bool `access:"environment,write_restrictable"` + AmazonS3Trace *bool `access:"environment,write_restrictable"` } func (s *FileSettings) SetDefaults(isUpdate bool) { @@ -1279,7 +1299,7 @@ func (s *FileSettings) SetDefaults(isUpdate bool) { s.DriverName = NewString(IMAGE_DRIVER_LOCAL) } - if s.Directory == nil { + if s.Directory == nil || *s.Directory == "" { s.Directory = NewString(FILE_SETTINGS_DEFAULT_DIRECTORY) } @@ -1314,6 +1334,10 @@ func (s *FileSettings) SetDefaults(isUpdate bool) { s.AmazonS3Bucket = NewString("") } + if s.AmazonS3PathPrefix == nil { + s.AmazonS3PathPrefix = NewString("") + } + if s.AmazonS3Region == nil { s.AmazonS3Region = NewString("") } @@ -1342,35 +1366,36 @@ func (s *FileSettings) SetDefaults(isUpdate bool) { } type EmailSettings struct { - EnableSignUpWithEmail *bool - EnableSignInWithEmail *bool - EnableSignInWithUsername *bool - SendEmailNotifications *bool - UseChannelInEmailNotifications *bool - RequireEmailVerification *bool - FeedbackName *string - FeedbackEmail *string - ReplyToAddress *string - FeedbackOrganization *string - EnableSMTPAuth *bool `restricted:"true"` - SMTPUsername *string `restricted:"true"` - SMTPPassword *string `restricted:"true"` - SMTPServer *string `restricted:"true"` - SMTPPort *string `restricted:"true"` + EnableSignUpWithEmail *bool `access:"authentication"` + EnableSignInWithEmail *bool `access:"authentication"` + EnableSignInWithUsername *bool `access:"authentication"` + SendEmailNotifications *bool `access:"site"` + UseChannelInEmailNotifications *bool `access:"experimental"` + RequireEmailVerification *bool `access:"authentication"` + FeedbackName *string `access:"site"` + FeedbackEmail *string `access:"site"` + ReplyToAddress *string `access:"site"` + FeedbackOrganization *string `access:"site"` + EnableSMTPAuth *bool `access:"environment,write_restrictable"` + SMTPUsername *string `access:"environment,write_restrictable"` + SMTPPassword *string `access:"environment,write_restrictable"` + SMTPServer *string `access:"environment,write_restrictable"` + SMTPPort *string `access:"environment,write_restrictable"` SMTPServerTimeout *int - ConnectionSecurity *string `restricted:"true"` - SendPushNotifications *bool - PushNotificationServer *string - PushNotificationContents *string - EnableEmailBatching *bool - EmailBatchingBufferSize *int - EmailBatchingInterval *int - EnablePreviewModeBanner *bool - SkipServerCertificateVerification *bool `restricted:"true"` - EmailNotificationContentsType *string - LoginButtonColor *string - LoginButtonBorderColor *string - LoginButtonTextColor *string + ConnectionSecurity *string `access:"environment,write_restrictable"` + SendPushNotifications *bool `access:"environment"` + PushNotificationServer *string `access:"environment"` + PushNotificationContents *string `access:"site"` + PushNotificationBuffer *int + EnableEmailBatching *bool `access:"site"` + EmailBatchingBufferSize *int `access:"experimental"` + EmailBatchingInterval *int `access:"experimental"` + EnablePreviewModeBanner *bool `access:"site"` + SkipServerCertificateVerification *bool `access:"environment,write_restrictable"` + EmailNotificationContentsType *string `access:"site"` + LoginButtonColor *string `access:"experimental"` + LoginButtonBorderColor *string `access:"experimental"` + LoginButtonTextColor *string `access:"experimental"` } func (s *EmailSettings) SetDefaults(isUpdate bool) { @@ -1462,6 +1487,10 @@ func (s *EmailSettings) SetDefaults(isUpdate bool) { s.PushNotificationContents = NewString(FULL_NOTIFICATION) } + if s.PushNotificationBuffer == nil { + s.PushNotificationBuffer = NewInt(1000) + } + if s.EnableEmailBatching == nil { s.EnableEmailBatching = NewBool(false) } @@ -1512,13 +1541,13 @@ func (s *EmailSettings) SetDefaults(isUpdate bool) { } type RateLimitSettings struct { - Enable *bool `restricted:"true"` - PerSec *int `restricted:"true"` - MaxBurst *int `restricted:"true"` - MemoryStoreSize *int `restricted:"true"` - VaryByRemoteAddr *bool `restricted:"true"` - VaryByUser *bool `restricted:"true"` - VaryByHeader string `restricted:"true"` + Enable *bool `access:"environment,write_restrictable"` + PerSec *int `access:"environment,write_restrictable"` + MaxBurst *int `access:"environment,write_restrictable"` + MemoryStoreSize *int `access:"environment,write_restrictable"` + VaryByRemoteAddr *bool `access:"environment,write_restrictable"` + VaryByUser *bool `access:"environment,write_restrictable"` + VaryByHeader string `access:"environment,write_restrictable"` } func (s *RateLimitSettings) SetDefaults() { @@ -1548,8 +1577,8 @@ func (s *RateLimitSettings) SetDefaults() { } type PrivacySettings struct { - ShowEmailAddress *bool - ShowFullName *bool + ShowEmailAddress *bool `access:"site"` + ShowFullName *bool `access:"site"` } func (s *PrivacySettings) setDefaults() { @@ -1563,14 +1592,15 @@ func (s *PrivacySettings) setDefaults() { } type SupportSettings struct { - TermsOfServiceLink *string `restricted:"true"` - PrivacyPolicyLink *string `restricted:"true"` - AboutLink *string `restricted:"true"` - HelpLink *string `restricted:"true"` - ReportAProblemLink *string `restricted:"true"` - SupportEmail *string - CustomTermsOfServiceEnabled *bool - CustomTermsOfServiceReAcceptancePeriod *int + TermsOfServiceLink *string `access:"site,write_restrictable"` + PrivacyPolicyLink *string `access:"site,write_restrictable"` + AboutLink *string `access:"site,write_restrictable"` + HelpLink *string `access:"site,write_restrictable"` + ReportAProblemLink *string `access:"site,write_restrictable"` + SupportEmail *string `access:"site"` + CustomTermsOfServiceEnabled *bool `access:"compliance"` + CustomTermsOfServiceReAcceptancePeriod *int `access:"compliance"` + EnableAskCommunityLink *bool `access:"site"` } func (s *SupportSettings) SetDefaults() { @@ -1625,14 +1655,23 @@ func (s *SupportSettings) SetDefaults() { if s.CustomTermsOfServiceReAcceptancePeriod == nil { s.CustomTermsOfServiceReAcceptancePeriod = NewInt(SUPPORT_SETTINGS_DEFAULT_RE_ACCEPTANCE_PERIOD) } + + if s.EnableAskCommunityLink == nil { + s.EnableAskCommunityLink = NewBool(true) + } } type AnnouncementSettings struct { - EnableBanner *bool - BannerText *string - BannerColor *string - BannerTextColor *string - AllowBannerDismissal *bool + EnableBanner *bool `access:"site"` + BannerText *string `access:"site"` + BannerColor *string `access:"site"` + BannerTextColor *string `access:"site"` + AllowBannerDismissal *bool `access:"site"` + AdminNoticesEnabled *bool `access:"site"` + UserNoticesEnabled *bool `access:"site"` + NoticesURL *string `access:"site,write_restrictable"` + NoticesFetchFrequency *int `access:"site,write_restrictable"` + NoticesSkipCache *bool `access:"site,write_restrictable"` } func (s *AnnouncementSettings) SetDefaults() { @@ -1655,12 +1694,30 @@ func (s *AnnouncementSettings) SetDefaults() { if s.AllowBannerDismissal == nil { s.AllowBannerDismissal = NewBool(true) } + + if s.AdminNoticesEnabled == nil { + s.AdminNoticesEnabled = NewBool(true) + } + + if s.UserNoticesEnabled == nil { + s.UserNoticesEnabled = NewBool(true) + } + if s.NoticesURL == nil { + s.NoticesURL = NewString(ANNOUNCEMENT_SETTINGS_DEFAULT_NOTICES_JSON_URL) + } + if s.NoticesSkipCache == nil { + s.NoticesSkipCache = NewBool(false) + } + if s.NoticesFetchFrequency == nil { + s.NoticesFetchFrequency = NewInt(ANNOUNCEMENT_SETTINGS_DEFAULT_NOTICES_FETCH_FREQUENCY_SECONDS) + } + } type ThemeSettings struct { - EnableThemeSelection *bool - DefaultTheme *string - AllowCustomThemes *bool + EnableThemeSelection *bool `access:"experimental"` + DefaultTheme *string `access:"experimental"` + AllowCustomThemes *bool `access:"experimental"` AllowedThemes []string } @@ -1683,38 +1740,38 @@ func (s *ThemeSettings) SetDefaults() { } type TeamSettings struct { - SiteName *string - MaxUsersPerTeam *int - DEPRECATED_DO_NOT_USE_EnableTeamCreation *bool `json:"EnableTeamCreation" mapstructure:"EnableTeamCreation"` // This field is deprecated and must not be used. - EnableUserCreation *bool - EnableOpenServer *bool - EnableUserDeactivation *bool - RestrictCreationToDomains *string - EnableCustomBrand *bool - CustomBrandText *string - CustomDescriptionText *string - RestrictDirectMessage *string - DEPRECATED_DO_NOT_USE_RestrictTeamInvite *string `json:"RestrictTeamInvite" mapstructure:"RestrictTeamInvite"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPublicChannelManagement *string `json:"RestrictPublicChannelManagement" mapstructure:"RestrictPublicChannelManagement"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManagement *string `json:"RestrictPrivateChannelManagement" mapstructure:"RestrictPrivateChannelManagement"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPublicChannelCreation *string `json:"RestrictPublicChannelCreation" mapstructure:"RestrictPublicChannelCreation"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPrivateChannelCreation *string `json:"RestrictPrivateChannelCreation" mapstructure:"RestrictPrivateChannelCreation"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPublicChannelDeletion *string `json:"RestrictPublicChannelDeletion" mapstructure:"RestrictPublicChannelDeletion"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPrivateChannelDeletion *string `json:"RestrictPrivateChannelDeletion" mapstructure:"RestrictPrivateChannelDeletion"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManageMembers *string `json:"RestrictPrivateChannelManageMembers" mapstructure:"RestrictPrivateChannelManageMembers"` // This field is deprecated and must not be used. - EnableXToLeaveChannelsFromLHS *bool - UserStatusAwayTimeout *int64 - MaxChannelsPerTeam *int64 - MaxNotificationsPerChannel *int64 - EnableConfirmNotificationsToChannel *bool - TeammateNameDisplay *string - ExperimentalViewArchivedChannels *bool - ExperimentalEnableAutomaticReplies *bool - ExperimentalHideTownSquareinLHS *bool - ExperimentalTownSquareIsReadOnly *bool - LockTeammateNameDisplay *bool - ExperimentalPrimaryTeam *string - ExperimentalDefaultChannels []string + SiteName *string `access:"site"` + MaxUsersPerTeam *int `access:"site"` + DEPRECATED_DO_NOT_USE_EnableTeamCreation *bool `json:"EnableTeamCreation" mapstructure:"EnableTeamCreation"` // This field is deprecated and must not be used. + EnableUserCreation *bool `access:"authentication"` + EnableOpenServer *bool `access:"authentication"` + EnableUserDeactivation *bool `access:"experimental"` + RestrictCreationToDomains *string `access:"authentication"` + EnableCustomBrand *bool `access:"site"` + CustomBrandText *string `access:"site"` + CustomDescriptionText *string `access:"site"` + RestrictDirectMessage *string `access:"site"` + DEPRECATED_DO_NOT_USE_RestrictTeamInvite *string `json:"RestrictTeamInvite" mapstructure:"RestrictTeamInvite"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPublicChannelManagement *string `json:"RestrictPublicChannelManagement" mapstructure:"RestrictPublicChannelManagement"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManagement *string `json:"RestrictPrivateChannelManagement" mapstructure:"RestrictPrivateChannelManagement"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPublicChannelCreation *string `json:"RestrictPublicChannelCreation" mapstructure:"RestrictPublicChannelCreation"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPrivateChannelCreation *string `json:"RestrictPrivateChannelCreation" mapstructure:"RestrictPrivateChannelCreation"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPublicChannelDeletion *string `json:"RestrictPublicChannelDeletion" mapstructure:"RestrictPublicChannelDeletion"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPrivateChannelDeletion *string `json:"RestrictPrivateChannelDeletion" mapstructure:"RestrictPrivateChannelDeletion"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManageMembers *string `json:"RestrictPrivateChannelManageMembers" mapstructure:"RestrictPrivateChannelManageMembers"` // This field is deprecated and must not be used. + EnableXToLeaveChannelsFromLHS *bool `access:"experimental"` + UserStatusAwayTimeout *int64 `access:"experimental"` + MaxChannelsPerTeam *int64 `access:"site"` + MaxNotificationsPerChannel *int64 `access:"environment"` + EnableConfirmNotificationsToChannel *bool `access:"site"` + TeammateNameDisplay *string `access:"site"` + ExperimentalViewArchivedChannels *bool `access:"experimental,site"` + ExperimentalEnableAutomaticReplies *bool `access:"experimental"` + ExperimentalHideTownSquareinLHS *bool `access:"experimental"` + ExperimentalTownSquareIsReadOnly *bool `access:"experimental"` + LockTeammateNameDisplay *bool `access:"site"` + ExperimentalPrimaryTeam *string `access:"experimental"` + ExperimentalDefaultChannels []string `access:"experimental"` } func (s *TeamSettings) SetDefaults() { @@ -1857,7 +1914,7 @@ func (s *TeamSettings) SetDefaults() { } if s.ExperimentalViewArchivedChannels == nil { - s.ExperimentalViewArchivedChannels = NewBool(false) + s.ExperimentalViewArchivedChannels = NewBool(true) } if s.LockTeammateNameDisplay == nil { @@ -1866,63 +1923,65 @@ func (s *TeamSettings) SetDefaults() { } type ClientRequirements struct { - AndroidLatestVersion string `restricted:"true"` - AndroidMinVersion string `restricted:"true"` - DesktopLatestVersion string `restricted:"true"` - DesktopMinVersion string `restricted:"true"` - IosLatestVersion string `restricted:"true"` - IosMinVersion string `restricted:"true"` + AndroidLatestVersion string `access:"write_restrictable"` + AndroidMinVersion string `access:"write_restrictable"` + DesktopLatestVersion string `access:"write_restrictable"` + DesktopMinVersion string `access:"write_restrictable"` + IosLatestVersion string `access:"write_restrictable"` + IosMinVersion string `access:"write_restrictable"` } type LdapSettings struct { // Basic - Enable *bool - EnableSync *bool - LdapServer *string - LdapPort *int - ConnectionSecurity *string - BaseDN *string - BindUsername *string - BindPassword *string + Enable *bool `access:"authentication"` + EnableSync *bool `access:"authentication"` + LdapServer *string `access:"authentication"` + LdapPort *int `access:"authentication"` + ConnectionSecurity *string `access:"authentication"` + BaseDN *string `access:"authentication"` + BindUsername *string `access:"authentication"` + BindPassword *string `access:"authentication"` // Filtering - UserFilter *string - GroupFilter *string - GuestFilter *string + UserFilter *string `access:"authentication"` + GroupFilter *string `access:"authentication"` + GuestFilter *string `access:"authentication"` EnableAdminFilter *bool AdminFilter *string // Group Mapping - GroupDisplayNameAttribute *string - GroupIdAttribute *string + GroupDisplayNameAttribute *string `access:"authentication"` + GroupIdAttribute *string `access:"authentication"` // User Mapping - FirstNameAttribute *string - LastNameAttribute *string - EmailAttribute *string - UsernameAttribute *string - NicknameAttribute *string - IdAttribute *string - PositionAttribute *string - LoginIdAttribute *string - PictureAttribute *string + FirstNameAttribute *string `access:"authentication"` + LastNameAttribute *string `access:"authentication"` + EmailAttribute *string `access:"authentication"` + UsernameAttribute *string `access:"authentication"` + NicknameAttribute *string `access:"authentication"` + IdAttribute *string `access:"authentication"` + PositionAttribute *string `access:"authentication"` + LoginIdAttribute *string `access:"authentication"` + PictureAttribute *string `access:"authentication"` // Synchronization - SyncIntervalMinutes *int + SyncIntervalMinutes *int `access:"authentication"` // Advanced - SkipCertificateVerification *bool - QueryTimeout *int - MaxPageSize *int + SkipCertificateVerification *bool `access:"authentication"` + PublicCertificateFile *string `access:"authentication"` + PrivateKeyFile *string `access:"authentication"` + QueryTimeout *int `access:"authentication"` + MaxPageSize *int `access:"authentication"` // Customization - LoginFieldName *string + LoginFieldName *string `access:"authentication"` - LoginButtonColor *string - LoginButtonBorderColor *string - LoginButtonTextColor *string + LoginButtonColor *string `access:"authentication"` + LoginButtonBorderColor *string `access:"authentication"` + LoginButtonTextColor *string `access:"authentication"` - Trace *bool + Trace *bool `access:"authentication"` } func (s *LdapSettings) SetDefaults() { @@ -1951,6 +2010,14 @@ func (s *LdapSettings) SetDefaults() { s.ConnectionSecurity = NewString("") } + if s.PublicCertificateFile == nil { + s.PublicCertificateFile = NewString("") + } + + if s.PrivateKeyFile == nil { + s.PrivateKeyFile = NewString("") + } + if s.BaseDN == nil { s.BaseDN = NewString("") } @@ -2063,9 +2130,9 @@ func (s *LdapSettings) SetDefaults() { } type ComplianceSettings struct { - Enable *bool - Directory *string - EnableDaily *bool + Enable *bool `access:"compliance"` + Directory *string `access:"compliance"` + EnableDaily *bool `access:"compliance"` } func (s *ComplianceSettings) SetDefaults() { @@ -2083,9 +2150,9 @@ func (s *ComplianceSettings) SetDefaults() { } type LocalizationSettings struct { - DefaultServerLocale *string - DefaultClientLocale *string - AvailableLocales *string + DefaultServerLocale *string `access:"site"` + DefaultClientLocale *string `access:"site"` + AvailableLocales *string `access:"site"` } func (s *LocalizationSettings) SetDefaults() { @@ -2104,48 +2171,48 @@ func (s *LocalizationSettings) SetDefaults() { type SamlSettings struct { // Basic - Enable *bool - EnableSyncWithLdap *bool - EnableSyncWithLdapIncludeAuth *bool + Enable *bool `access:"authentication"` + EnableSyncWithLdap *bool `access:"authentication"` + EnableSyncWithLdapIncludeAuth *bool `access:"authentication"` - Verify *bool - Encrypt *bool - SignRequest *bool + Verify *bool `access:"authentication"` + Encrypt *bool `access:"authentication"` + SignRequest *bool `access:"authentication"` - IdpUrl *string - IdpDescriptorUrl *string - IdpMetadataUrl *string - ServiceProviderIdentifier *string - AssertionConsumerServiceURL *string + IdpUrl *string `access:"authentication"` + IdpDescriptorUrl *string `access:"authentication"` + IdpMetadataUrl *string `access:"authentication"` + ServiceProviderIdentifier *string `access:"authentication"` + AssertionConsumerServiceURL *string `access:"authentication"` - SignatureAlgorithm *string - CanonicalAlgorithm *string + SignatureAlgorithm *string `access:"authentication"` + CanonicalAlgorithm *string `access:"authentication"` - ScopingIDPProviderId *string - ScopingIDPName *string + ScopingIDPProviderId *string `access:"authentication"` + ScopingIDPName *string `access:"authentication"` - IdpCertificateFile *string - PublicCertificateFile *string - PrivateKeyFile *string + IdpCertificateFile *string `access:"authentication"` + PublicCertificateFile *string `access:"authentication"` + PrivateKeyFile *string `access:"authentication"` // User Mapping - IdAttribute *string - GuestAttribute *string + IdAttribute *string `access:"authentication"` + GuestAttribute *string `access:"authentication"` EnableAdminAttribute *bool AdminAttribute *string - FirstNameAttribute *string - LastNameAttribute *string - EmailAttribute *string - UsernameAttribute *string - NicknameAttribute *string - LocaleAttribute *string - PositionAttribute *string + FirstNameAttribute *string `access:"authentication"` + LastNameAttribute *string `access:"authentication"` + EmailAttribute *string `access:"authentication"` + UsernameAttribute *string `access:"authentication"` + NicknameAttribute *string `access:"authentication"` + LocaleAttribute *string `access:"authentication"` + PositionAttribute *string `access:"authentication"` - LoginButtonText *string + LoginButtonText *string `access:"authentication"` - LoginButtonColor *string - LoginButtonBorderColor *string - LoginButtonTextColor *string + LoginButtonColor *string `access:"authentication"` + LoginButtonBorderColor *string `access:"authentication"` + LoginButtonTextColor *string `access:"authentication"` } func (s *SamlSettings) SetDefaults() { @@ -2285,9 +2352,9 @@ func (s *SamlSettings) SetDefaults() { } type NativeAppSettings struct { - AppDownloadLink *string `restricted:"true"` - AndroidAppDownloadLink *string `restricted:"true"` - IosAppDownloadLink *string `restricted:"true"` + AppDownloadLink *string `access:"site,write_restrictable"` + AndroidAppDownloadLink *string `access:"site,write_restrictable"` + IosAppDownloadLink *string `access:"site,write_restrictable"` } func (s *NativeAppSettings) SetDefaults() { @@ -2305,27 +2372,27 @@ func (s *NativeAppSettings) SetDefaults() { } type ElasticsearchSettings struct { - ConnectionUrl *string `restricted:"true"` - Username *string `restricted:"true"` - Password *string `restricted:"true"` - EnableIndexing *bool `restricted:"true"` - EnableSearching *bool `restricted:"true"` - EnableAutocomplete *bool `restricted:"true"` - Sniff *bool `restricted:"true"` - PostIndexReplicas *int `restricted:"true"` - PostIndexShards *int `restricted:"true"` - ChannelIndexReplicas *int `restricted:"true"` - ChannelIndexShards *int `restricted:"true"` - UserIndexReplicas *int `restricted:"true"` - UserIndexShards *int `restricted:"true"` - AggregatePostsAfterDays *int `restricted:"true"` - PostsAggregatorJobStartTime *string `restricted:"true"` - IndexPrefix *string `restricted:"true"` - LiveIndexingBatchSize *int `restricted:"true"` - BulkIndexingTimeWindowSeconds *int `restricted:"true"` - RequestTimeoutSeconds *int `restricted:"true"` - SkipTLSVerification *bool `restricted:"true"` - Trace *string `restricted:"true"` + ConnectionUrl *string `access:"environment,write_restrictable"` + Username *string `access:"environment,write_restrictable"` + Password *string `access:"environment,write_restrictable"` + EnableIndexing *bool `access:"environment,write_restrictable"` + EnableSearching *bool `access:"environment,write_restrictable"` + EnableAutocomplete *bool `access:"environment,write_restrictable"` + Sniff *bool `access:"environment,write_restrictable"` + PostIndexReplicas *int `access:"environment,write_restrictable"` + PostIndexShards *int `access:"environment,write_restrictable"` + ChannelIndexReplicas *int `access:"environment,write_restrictable"` + ChannelIndexShards *int `access:"environment,write_restrictable"` + UserIndexReplicas *int `access:"environment,write_restrictable"` + UserIndexShards *int `access:"environment,write_restrictable"` + AggregatePostsAfterDays *int `access:"environment,write_restrictable"` + PostsAggregatorJobStartTime *string `access:"environment,write_restrictable"` + IndexPrefix *string `access:"environment,write_restrictable"` + LiveIndexingBatchSize *int `access:"environment,write_restrictable"` + BulkIndexingTimeWindowSeconds *int `access:"environment,write_restrictable"` + RequestTimeoutSeconds *int `access:"environment,write_restrictable"` + SkipTLSVerification *bool `access:"environment,write_restrictable"` + Trace *string `access:"environment,write_restrictable"` } func (s *ElasticsearchSettings) SetDefaults() { @@ -2415,11 +2482,11 @@ func (s *ElasticsearchSettings) SetDefaults() { } type BleveSettings struct { - IndexDir *string - EnableIndexing *bool - EnableSearching *bool - EnableAutocomplete *bool - BulkIndexingTimeWindowSeconds *int + IndexDir *string `access:"experimental"` + EnableIndexing *bool `access:"experimental"` + EnableSearching *bool `access:"experimental"` + EnableAutocomplete *bool `access:"experimental"` + BulkIndexingTimeWindowSeconds *int `access:"experimental"` } func (bs *BleveSettings) SetDefaults() { @@ -2445,11 +2512,11 @@ func (bs *BleveSettings) SetDefaults() { } type DataRetentionSettings struct { - EnableMessageDeletion *bool - EnableFileDeletion *bool - MessageRetentionDays *int - FileRetentionDays *int - DeletionJobStartTime *string + EnableMessageDeletion *bool `access:"compliance"` + EnableFileDeletion *bool `access:"compliance"` + MessageRetentionDays *int `access:"compliance"` + FileRetentionDays *int `access:"compliance"` + DeletionJobStartTime *string `access:"compliance"` } func (s *DataRetentionSettings) SetDefaults() { @@ -2475,8 +2542,8 @@ func (s *DataRetentionSettings) SetDefaults() { } type JobSettings struct { - RunJobs *bool `restricted:"true"` - RunScheduler *bool `restricted:"true"` + RunJobs *bool `access:"write_restrictable"` + RunScheduler *bool `access:"write_restrictable"` } func (s *JobSettings) SetDefaults() { @@ -2494,20 +2561,20 @@ type PluginState struct { } type PluginSettings struct { - Enable *bool - EnableUploads *bool `restricted:"true"` - AllowInsecureDownloadUrl *bool `restricted:"true"` - EnableHealthCheck *bool `restricted:"true"` - Directory *string `restricted:"true"` - ClientDirectory *string `restricted:"true"` - Plugins map[string]map[string]interface{} - PluginStates map[string]*PluginState - EnableMarketplace *bool - EnableRemoteMarketplace *bool - AutomaticPrepackagedPlugins *bool - RequirePluginSignature *bool - MarketplaceUrl *string - SignaturePublicKeyFiles []string + Enable *bool `access:"plugins"` + EnableUploads *bool `access:"plugins,write_restrictable"` + AllowInsecureDownloadUrl *bool `access:"plugins,write_restrictable"` + EnableHealthCheck *bool `access:"plugins,write_restrictable"` + Directory *string `access:"plugins,write_restrictable"` + ClientDirectory *string `access:"plugins,write_restrictable"` + Plugins map[string]map[string]interface{} `access:"plugins"` + PluginStates map[string]*PluginState `access:"plugins"` + EnableMarketplace *bool `access:"plugins"` + EnableRemoteMarketplace *bool `access:"plugins"` + AutomaticPrepackagedPlugins *bool `access:"plugins"` + RequirePluginSignature *bool `access:"plugins"` + MarketplaceUrl *string `access:"plugins"` + SignaturePublicKeyFiles []string `access:"plugins"` } func (s *PluginSettings) SetDefaults(ls LogSettings) { @@ -2574,10 +2641,11 @@ func (s *PluginSettings) SetDefaults(ls LogSettings) { } type GlobalRelayMessageExportSettings struct { - CustomerType *string // must be either A9 or A10, dictates SMTP server url - SmtpUsername *string - SmtpPassword *string - EmailAddress *string // the address to send messages to + CustomerType *string `access:"compliance"` // must be either A9 or A10, dictates SMTP server url + SmtpUsername *string `access:"compliance"` + SmtpPassword *string `access:"compliance"` + EmailAddress *string `access:"compliance"` // the address to send messages to + SMTPServerTimeout *int `access:"compliance"` } func (s *GlobalRelayMessageExportSettings) SetDefaults() { @@ -2593,14 +2661,18 @@ func (s *GlobalRelayMessageExportSettings) SetDefaults() { if s.EmailAddress == nil { s.EmailAddress = NewString("") } + if s.SMTPServerTimeout == nil || *s.SMTPServerTimeout == 0 { + s.SMTPServerTimeout = NewInt(1800) + } } type MessageExportSettings struct { - EnableExport *bool - ExportFormat *string - DailyRunTime *string - ExportFromTimestamp *int64 - BatchSize *int + EnableExport *bool `access:"compliance"` + ExportFormat *string `access:"compliance"` + DailyRunTime *string `access:"compliance"` + ExportFromTimestamp *int64 `access:"compliance"` + BatchSize *int `access:"compliance"` + DownloadExportResults *bool `access:"compliance"` // formatter-specific settings - these are only expected to be non-nil if ExportFormat is set to the associated format GlobalRelaySettings *GlobalRelayMessageExportSettings @@ -2611,6 +2683,10 @@ func (s *MessageExportSettings) SetDefaults() { s.EnableExport = NewBool(false) } + if s.DownloadExportResults == nil { + s.DownloadExportResults = NewBool(false) + } + if s.ExportFormat == nil { s.ExportFormat = NewString(COMPLIANCE_EXPORT_TYPE_ACTIANCE) } @@ -2634,8 +2710,8 @@ func (s *MessageExportSettings) SetDefaults() { } type DisplaySettings struct { - CustomUrlSchemes []string - ExperimentalTimezone *bool + CustomUrlSchemes []string `access:"site"` + ExperimentalTimezone *bool `access:"experimental"` } func (s *DisplaySettings) SetDefaults() { @@ -2645,15 +2721,15 @@ func (s *DisplaySettings) SetDefaults() { } if s.ExperimentalTimezone == nil { - s.ExperimentalTimezone = NewBool(false) + s.ExperimentalTimezone = NewBool(true) } } type GuestAccountsSettings struct { - Enable *bool - AllowEmailAccounts *bool - EnforceMultifactorAuthentication *bool - RestrictCreationToDomains *string + Enable *bool `access:"authentication"` + AllowEmailAccounts *bool `access:"authentication"` + EnforceMultifactorAuthentication *bool `access:"authentication"` + RestrictCreationToDomains *string `access:"authentication"` } func (s *GuestAccountsSettings) SetDefaults() { @@ -2675,10 +2751,10 @@ func (s *GuestAccountsSettings) SetDefaults() { } type ImageProxySettings struct { - Enable *bool - ImageProxyType *string - RemoteImageProxyURL *string - RemoteImageProxyOptions *string + Enable *bool `access:"environment"` + ImageProxyType *string `access:"environment"` + RemoteImageProxyURL *string `access:"environment"` + RemoteImageProxyOptions *string `access:"environment"` } func (s *ImageProxySettings) SetDefaults(ss ServiceSettings) { @@ -2717,6 +2793,35 @@ func (s *ImageProxySettings) SetDefaults(ss ServiceSettings) { type ConfigFunc func() *Config +const ConfigAccessTagWriteRestrictable = "write_restrictable" + +// Config fields support the 'access' tag with the following values corresponding to the suffix of the associated +// PERMISSION_SYSCONSOLE_*_* permission Id: 'about', 'reporting', 'user_management_users', +// 'user_management_groups', 'user_management_teams', 'user_management_channels', +// 'user_management_permissions', 'environment', 'site', 'authentication', 'plugins', +// 'integrations', 'compliance', 'plugins', and 'experimental'. They grant read and/or write access to the config field +// to roles without PERMISSION_MANAGE_SYSTEM. +// +// By default config values can be written with PERMISSION_MANAGE_SYSTEM, but if ExperimentalSettings.RestrictSystemAdmin is true +// and the access tag contains the value 'write_restrictable', then even PERMISSION_MANAGE_SYSTEM does not grant write access. +// +// PERMISSION_MANAGE_SYSTEM always grants read access. +// +// Example: +// type HairSettings struct { +// // Colour is writeable with either PERMISSION_SYSCONSOLE_WRITE_REPORTING or PERMISSION_SYSCONSOLE_WRITE_USER_MANAGEMENT_GROUPS. +// // It is readable by PERMISSION_SYSCONSOLE_READ_REPORTING and PERMISSION_SYSCONSOLE_READ_USER_MANAGEMENT_GROUPS permissions. +// // PERMISSION_MANAGE_SYSTEM grants read and write access. +// Colour string `access:"reporting,user_management_groups"` +// +// +// // Length is only readable and writable via PERMISSION_MANAGE_SYSTEM. +// Length string +// +// // Product is only writeable by PERMISSION_MANAGE_SYSTEM if ExperimentalSettings.RestrictSystemAdmin is false. +// // PERMISSION_MANAGE_SYSTEM can always read the value. +// Product bool `access:write_restrictable` +// } type Config struct { ServiceSettings ServiceSettings TeamSettings TeamSettings @@ -2994,6 +3099,10 @@ func (s *FileSettings) isValid() *AppError { return NewAppError("Config.IsValid", "model.config.is_valid.file_salt.app_error", nil, "", http.StatusBadRequest) } + if *s.Directory == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.directory.app_error", nil, "", http.StatusBadRequest) + } + return nil } @@ -3436,6 +3545,14 @@ func (o *Config) Sanitize() { *o.GitLabSettings.Secret = FAKE_SETTING } + if o.GoogleSettings.Secret != nil && len(*o.GoogleSettings.Secret) > 0 { + *o.GoogleSettings.Secret = FAKE_SETTING + } + + if o.Office365Settings.Secret != nil && len(*o.Office365Settings.Secret) > 0 { + *o.Office365Settings.Secret = FAKE_SETTING + } + *o.SqlSettings.DataSource = FAKE_SETTING *o.SqlSettings.AtRestEncryptKey = FAKE_SETTING @@ -3448,4 +3565,12 @@ func (o *Config) Sanitize() { for i := range o.SqlSettings.DataSourceSearchReplicas { o.SqlSettings.DataSourceSearchReplicas[i] = FAKE_SETTING } + + if o.MessageExportSettings.GlobalRelaySettings.SmtpPassword != nil && len(*o.MessageExportSettings.GlobalRelaySettings.SmtpPassword) > 0 { + *o.MessageExportSettings.GlobalRelaySettings.SmtpPassword = FAKE_SETTING + } + + if o.ServiceSettings.GfycatApiSecret != nil && len(*o.ServiceSettings.GfycatApiSecret) > 0 { + *o.ServiceSettings.GfycatApiSecret = FAKE_SETTING + } } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go b/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go index 8a3a5cc0..4b71f5a8 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go @@ -4,7 +4,6 @@ package model import ( - "bytes" "encoding/json" "image" "image/gif" @@ -151,10 +150,10 @@ func NewInfo(name string) *FileInfo { return info } -func GetInfoForBytes(name string, data []byte) (*FileInfo, *AppError) { +func GetInfoForBytes(name string, data io.ReadSeeker, size int) (*FileInfo, *AppError) { info := &FileInfo{ Name: name, - Size: int64(len(data)), + Size: int64(size), } var err *AppError @@ -170,16 +169,17 @@ func GetInfoForBytes(name string, data []byte) (*FileInfo, *AppError) { if info.IsImage() { // Only set the width and height if it's actually an image that we can understand - if config, _, err := image.DecodeConfig(bytes.NewReader(data)); err == nil { + if config, _, err := image.DecodeConfig(data); err == nil { info.Width = config.Width info.Height = config.Height if info.MimeType == "image/gif" { // Just show the gif itself instead of a preview image for animated gifs - if gifConfig, err := gif.DecodeAll(bytes.NewReader(data)); err != nil { + data.Seek(0, io.SeekStart) + if gifConfig, err := gif.DecodeAll(data); err != nil { // Still return the rest of the info even though it doesn't appear to be an actual gif info.HasPreviewImage = true - return info, NewAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, "name="+name, http.StatusBadRequest) + return info, NewAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, err.Error(), http.StatusBadRequest) } else { info.HasPreviewImage = len(gifConfig.Image) == 1 } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/group.go b/vendor/github.com/mattermost/mattermost-server/v5/model/group.go index 4de0dcc4..2eda1184 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/group.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/group.go @@ -94,6 +94,11 @@ type PageOpts struct { PerPage int } +type GroupStats struct { + GroupID string `json:"group_id"` + TotalMemberCount int64 `json:"total_member_count"` +} + func (group *Group) Patch(patch *GroupPatch) { if patch.Name != nil { group.Name = patch.Name @@ -208,3 +213,9 @@ func GroupPatchFromJson(data io.Reader) *GroupPatch { json.NewDecoder(data).Decode(&groupPatch) return groupPatch } + +func GroupStatsFromJson(data io.Reader) *GroupStats { + var groupStats *GroupStats + json.NewDecoder(data).Decode(&groupStats) + return groupStats +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/integrity.go b/vendor/github.com/mattermost/mattermost-server/v5/model/integrity.go new file mode 100644 index 00000000..744ad07c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/integrity.go @@ -0,0 +1,58 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "errors" +) + +type OrphanedRecord struct { + ParentId *string `json:"parent_id"` + ChildId *string `json:"child_id"` +} + +type RelationalIntegrityCheckData struct { + ParentName string `json:"parent_name"` + ChildName string `json:"child_name"` + ParentIdAttr string `json:"parent_id_attr"` + ChildIdAttr string `json:"child_id_attr"` + Records []OrphanedRecord `json:"records"` +} + +type IntegrityCheckResult struct { + Data interface{} `json:"data"` + Err error `json:"err"` +} + +func (r *IntegrityCheckResult) UnmarshalJSON(b []byte) error { + var data map[string]interface{} + if err := json.Unmarshal(b, &data); err != nil { + return err + } + if d, ok := data["data"]; ok && d != nil { + var rdata RelationalIntegrityCheckData + m := d.(map[string]interface{}) + rdata.ParentName = m["parent_name"].(string) + rdata.ChildName = m["child_name"].(string) + rdata.ParentIdAttr = m["parent_id_attr"].(string) + rdata.ChildIdAttr = m["child_id_attr"].(string) + for _, recData := range m["records"].([]interface{}) { + var record OrphanedRecord + m := recData.(map[string]interface{}) + if val := m["parent_id"]; val != nil { + record.ParentId = NewString(val.(string)) + } + if val := m["child_id"]; val != nil { + record.ChildId = NewString(val.(string)) + } + rdata.Records = append(rdata.Records, record) + } + r.Data = rdata + } + if err, ok := data["err"]; ok && err != nil { + r.Err = errors.New(data["err"].(string)) + } + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/job.go b/vendor/github.com/mattermost/mattermost-server/v5/model/job.go index e6e1d689..a4bb30a1 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/job.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/job.go @@ -19,6 +19,9 @@ const ( JOB_TYPE_LDAP_SYNC = "ldap_sync" JOB_TYPE_MIGRATIONS = "migrations" JOB_TYPE_PLUGINS = "plugins" + JOB_TYPE_EXPIRY_NOTIFY = "expiry_notify" + JOB_TYPE_PRODUCT_NOTICES = "product_notices" + JOB_TYPE_ACTIVE_USERS = "active_users" JOB_STATUS_PENDING = "pending" JOB_STATUS_IN_PROGRESS = "in_progress" @@ -59,6 +62,9 @@ func (j *Job) IsValid() *AppError { case JOB_TYPE_MESSAGE_EXPORT: case JOB_TYPE_MIGRATIONS: case JOB_TYPE_PLUGINS: + case JOB_TYPE_PRODUCT_NOTICES: + case JOB_TYPE_EXPIRY_NOTIFY: + case JOB_TYPE_ACTIVE_USERS: default: return NewAppError("Job.IsValid", "model.job.is_valid.type.app_error", nil, "id="+j.Id, http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go b/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go index d5f98f1a..4e19c5b1 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go @@ -4,5 +4,7 @@ package model const ( - USER_AUTH_SERVICE_LDAP = "ldap" + USER_AUTH_SERVICE_LDAP = "ldap" + LDAP_PUBIC_CERTIFICATE_NAME = "ldap-public.crt" + LDAP_PRIVATE_KEY_NAME = "ldap-private.key" ) diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/license.go b/vendor/github.com/mattermost/mattermost-server/v5/model/license.go index 0504edc0..3de4aba8 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/license.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/license.go @@ -13,7 +13,7 @@ const ( EXPIRED_LICENSE_ERROR = "api.license.add_license.expired.app_error" INVALID_LICENSE_ERROR = "api.license.add_license.invalid.app_error" LICENSE_GRACE_PERIOD = 1000 * 60 * 60 * 24 * 10 //10 days - LICENSE_RENEWAL_LINK = "https://licensing.mattermost.com/renew" + LICENSE_RENEWAL_LINK = "https://mattermost.com/renew/" ) type LicenseRecord struct { @@ -81,6 +81,8 @@ type Features struct { IDLoadedPushNotifications *bool `json:"id_loaded"` LockTeammateNameDisplay *bool `json:"lock_teammate_name_display"` EnterprisePlugins *bool `json:"enterprise_plugins"` + AdvancedLogging *bool `json:"advanced_logging"` + Cloud *bool `json:"cloud"` // after we enabled more features we'll need to control them with this FutureFeatures *bool `json:"future_features"` @@ -108,6 +110,8 @@ func (f *Features) ToMap() map[string]interface{} { "id_loaded": *f.IDLoadedPushNotifications, "lock_teammate_name_display": *f.LockTeammateNameDisplay, "enterprise_plugins": *f.EnterprisePlugins, + "advanced_logging": *f.AdvancedLogging, + "cloud": *f.Cloud, "future": *f.FutureFeatures, } } @@ -212,6 +216,14 @@ func (f *Features) SetDefaults() { if f.EnterprisePlugins == nil { f.EnterprisePlugins = NewBool(*f.FutureFeatures) } + + if f.AdvancedLogging == nil { + f.AdvancedLogging = NewBool(*f.FutureFeatures) + } + + if f.Cloud == nil { + f.Cloud = NewBool(false) + } } func (l *License) IsExpired() bool { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go b/vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go index d20be2cb..6c3e0bd8 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go @@ -171,9 +171,9 @@ func (o *LinkMetadata) DeserializeDataToConcreteType() error { // FloorToNearestHour takes a timestamp (in milliseconds) and returns it rounded to the previous hour in UTC. func FloorToNearestHour(ms int64) int64 { - t := time.Unix(0, ms*int64(1000*1000)) + t := time.Unix(0, ms*int64(1000*1000)).UTC() - return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location()).UnixNano() / int64(time.Millisecond) + return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, time.UTC).UnixNano() / int64(time.Millisecond) } // isRoundedToNearestHour returns true if the given timestamp (in milliseconds) has been rounded to the nearest hour in UTC. diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go b/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go index 7dd08bef..2e7a0f71 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go @@ -4,6 +4,7 @@ package model const ( + ADVANCED_PERMISSIONS_MIGRATION_KEY = "AdvancedPermissionsMigrationComplete" MIGRATION_KEY_ADVANCED_PERMISSIONS_PHASE_2 = "migration_advanced_permissions_phase_2" MIGRATION_KEY_EMOJI_PERMISSIONS_SPLIT = "emoji_permissions_split" @@ -17,4 +18,7 @@ const ( MIGRATION_KEY_ADD_MANAGE_GUESTS_PERMISSIONS = "add_manage_guests_permissions" MIGRATION_KEY_CHANNEL_MODERATIONS_PERMISSIONS = "channel_moderations_permissions" MIGRATION_KEY_ADD_USE_GROUP_MENTIONS_PERMISSION = "add_use_group_mentions_permission" + MIGRATION_KEY_ADD_SYSTEM_CONSOLE_PERMISSIONS = "add_system_console_permissions" + MIGRATION_KEY_SIDEBAR_CATEGORIES_PHASE_2 = "migration_sidebar_categories_phase_2" + MIGRATION_KEY_ADD_CONVERT_CHANNEL_PERMISSIONS = "add_convert_channel_permissions" ) diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go b/vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go index f4278de0..d6cb2138 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go @@ -112,6 +112,9 @@ func (o *OutgoingWebhookResponse) ToJson() string { func OutgoingWebhookResponseFromJson(data io.Reader) (*OutgoingWebhookResponse, error) { var o *OutgoingWebhookResponse err := json.NewDecoder(data).Decode(&o) + if err == io.EOF { + return nil, nil + } return o, err } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go b/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go index cc3c5a70..cf05c281 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go @@ -4,9 +4,9 @@ package model const ( - PERMISSION_SCOPE_SYSTEM = "system_scope" - PERMISSION_SCOPE_TEAM = "team_scope" - PERMISSION_SCOPE_CHANNEL = "channel_scope" + PermissionScopeSystem = "system_scope" + PermissionScopeTeam = "team_scope" + PermissionScopeChannel = "channel_scope" ) type Permission struct { @@ -25,6 +25,8 @@ var PERMISSION_CREATE_PUBLIC_CHANNEL *Permission var PERMISSION_CREATE_PRIVATE_CHANNEL *Permission var PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS *Permission var PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS *Permission +var PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE *Permission +var PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC *Permission var PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE *Permission var PERMISSION_MANAGE_ROLES *Permission var PERMISSION_MANAGE_TEAM_ROLES *Permission @@ -43,6 +45,8 @@ var PERMISSION_DELETE_PUBLIC_CHANNEL *Permission var PERMISSION_DELETE_PRIVATE_CHANNEL *Permission var PERMISSION_EDIT_OTHER_USERS *Permission var PERMISSION_READ_CHANNEL *Permission +var PERMISSION_READ_PUBLIC_CHANNEL_GROUPS *Permission +var PERMISSION_READ_PRIVATE_CHANNEL_GROUPS *Permission var PERMISSION_READ_PUBLIC_CHANNEL *Permission var PERMISSION_ADD_REACTION *Permission var PERMISSION_REMOVE_REACTION *Permission @@ -76,6 +80,7 @@ var PERMISSION_MANAGE_TEAM *Permission var PERMISSION_IMPORT_TEAM *Permission var PERMISSION_VIEW_TEAM *Permission var PERMISSION_LIST_USERS_WITHOUT_TEAM *Permission +var PERMISSION_READ_JOBS *Permission var PERMISSION_MANAGE_JOBS *Permission var PERMISSION_CREATE_USER_ACCESS_TOKEN *Permission var PERMISSION_READ_USER_ACCESS_TOKEN *Permission @@ -92,537 +97,867 @@ var PERMISSION_PROMOTE_GUEST *Permission var PERMISSION_DEMOTE_TO_GUEST *Permission var PERMISSION_USE_CHANNEL_MENTIONS *Permission var PERMISSION_USE_GROUP_MENTIONS *Permission +var PERMISSION_READ_OTHER_USERS_TEAMS *Permission +var PERMISSION_EDIT_BRAND *Permission + +var PERMISSION_SYSCONSOLE_READ_ABOUT *Permission +var PERMISSION_SYSCONSOLE_WRITE_ABOUT *Permission + +var PERMISSION_SYSCONSOLE_READ_REPORTING *Permission +var PERMISSION_SYSCONSOLE_WRITE_REPORTING *Permission + +var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS *Permission +var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_USERS *Permission + +var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS *Permission +var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS *Permission + +var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS *Permission +var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS *Permission + +var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS *Permission +var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS *Permission + +var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS *Permission +var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_PERMISSIONS *Permission + +var PERMISSION_SYSCONSOLE_READ_ENVIRONMENT *Permission +var PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT *Permission + +var PERMISSION_SYSCONSOLE_READ_SITE *Permission +var PERMISSION_SYSCONSOLE_WRITE_SITE *Permission + +var PERMISSION_SYSCONSOLE_READ_AUTHENTICATION *Permission +var PERMISSION_SYSCONSOLE_WRITE_AUTHENTICATION *Permission + +var PERMISSION_SYSCONSOLE_READ_PLUGINS *Permission +var PERMISSION_SYSCONSOLE_WRITE_PLUGINS *Permission + +var PERMISSION_SYSCONSOLE_READ_INTEGRATIONS *Permission +var PERMISSION_SYSCONSOLE_WRITE_INTEGRATIONS *Permission + +var PERMISSION_SYSCONSOLE_READ_COMPLIANCE *Permission +var PERMISSION_SYSCONSOLE_WRITE_COMPLIANCE *Permission + +var PERMISSION_SYSCONSOLE_READ_EXPERIMENTAL *Permission +var PERMISSION_SYSCONSOLE_WRITE_EXPERIMENTAL *Permission // General permission that encompasses all system admin functions // in the future this could be broken up to allow access to some // admin functions but not others var PERMISSION_MANAGE_SYSTEM *Permission -var ALL_PERMISSIONS []*Permission +var AllPermissions []*Permission +var DeprecatedPermissions []*Permission -var CHANNEL_MODERATED_PERMISSIONS []string -var CHANNEL_MODERATED_PERMISSIONS_MAP map[string]string +var ChannelModeratedPermissions []string +var ChannelModeratedPermissionsMap map[string]string + +var SysconsoleReadPermissions []*Permission +var SysconsoleWritePermissions []*Permission func initializePermissions() { PERMISSION_INVITE_USER = &Permission{ "invite_user", "authentication.permissions.team_invite_user.name", "authentication.permissions.team_invite_user.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_ADD_USER_TO_TEAM = &Permission{ "add_user_to_team", "authentication.permissions.add_user_to_team.name", "authentication.permissions.add_user_to_team.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_USE_SLASH_COMMANDS = &Permission{ "use_slash_commands", "authentication.permissions.team_use_slash_commands.name", "authentication.permissions.team_use_slash_commands.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_MANAGE_SLASH_COMMANDS = &Permission{ "manage_slash_commands", "authentication.permissions.manage_slash_commands.name", "authentication.permissions.manage_slash_commands.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS = &Permission{ "manage_others_slash_commands", "authentication.permissions.manage_others_slash_commands.name", "authentication.permissions.manage_others_slash_commands.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_CREATE_PUBLIC_CHANNEL = &Permission{ "create_public_channel", "authentication.permissions.create_public_channel.name", "authentication.permissions.create_public_channel.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_CREATE_PRIVATE_CHANNEL = &Permission{ "create_private_channel", "authentication.permissions.create_private_channel.name", "authentication.permissions.create_private_channel.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS = &Permission{ "manage_public_channel_members", "authentication.permissions.manage_public_channel_members.name", "authentication.permissions.manage_public_channel_members.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS = &Permission{ "manage_private_channel_members", "authentication.permissions.manage_private_channel_members.name", "authentication.permissions.manage_private_channel_members.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, + } + PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE = &Permission{ + "convert_public_channel_to_private", + "authentication.permissions.convert_public_channel_to_private.name", + "authentication.permissions.convert_public_channel_to_private.description", + PermissionScopeChannel, + } + PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC = &Permission{ + "convert_private_channel_to_public", + "authentication.permissions.convert_private_channel_to_public.name", + "authentication.permissions.convert_private_channel_to_public.description", + PermissionScopeChannel, } PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE = &Permission{ "assign_system_admin_role", "authentication.permissions.assign_system_admin_role.name", "authentication.permissions.assign_system_admin_role.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_ROLES = &Permission{ "manage_roles", "authentication.permissions.manage_roles.name", "authentication.permissions.manage_roles.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_TEAM_ROLES = &Permission{ "manage_team_roles", "authentication.permissions.manage_team_roles.name", "authentication.permissions.manage_team_roles.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_CHANNEL_ROLES = &Permission{ "manage_channel_roles", "authentication.permissions.manage_channel_roles.name", "authentication.permissions.manage_channel_roles.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_MANAGE_SYSTEM = &Permission{ "manage_system", "authentication.permissions.manage_system.name", "authentication.permissions.manage_system.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_CREATE_DIRECT_CHANNEL = &Permission{ "create_direct_channel", "authentication.permissions.create_direct_channel.name", "authentication.permissions.create_direct_channel.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_CREATE_GROUP_CHANNEL = &Permission{ "create_group_channel", "authentication.permissions.create_group_channel.name", "authentication.permissions.create_group_channel.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES = &Permission{ "manage_public_channel_properties", "authentication.permissions.manage_public_channel_properties.name", "authentication.permissions.manage_public_channel_properties.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES = &Permission{ "manage_private_channel_properties", "authentication.permissions.manage_private_channel_properties.name", "authentication.permissions.manage_private_channel_properties.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_LIST_PUBLIC_TEAMS = &Permission{ "list_public_teams", "authentication.permissions.list_public_teams.name", "authentication.permissions.list_public_teams.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_JOIN_PUBLIC_TEAMS = &Permission{ "join_public_teams", "authentication.permissions.join_public_teams.name", "authentication.permissions.join_public_teams.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_LIST_PRIVATE_TEAMS = &Permission{ "list_private_teams", "authentication.permissions.list_private_teams.name", "authentication.permissions.list_private_teams.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_JOIN_PRIVATE_TEAMS = &Permission{ "join_private_teams", "authentication.permissions.join_private_teams.name", "authentication.permissions.join_private_teams.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_LIST_TEAM_CHANNELS = &Permission{ "list_team_channels", "authentication.permissions.list_team_channels.name", "authentication.permissions.list_team_channels.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_JOIN_PUBLIC_CHANNELS = &Permission{ "join_public_channels", "authentication.permissions.join_public_channels.name", "authentication.permissions.join_public_channels.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_DELETE_PUBLIC_CHANNEL = &Permission{ "delete_public_channel", "authentication.permissions.delete_public_channel.name", "authentication.permissions.delete_public_channel.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_DELETE_PRIVATE_CHANNEL = &Permission{ "delete_private_channel", "authentication.permissions.delete_private_channel.name", "authentication.permissions.delete_private_channel.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_EDIT_OTHER_USERS = &Permission{ "edit_other_users", "authentication.permissions.edit_other_users.name", "authentication.permissions.edit_other_users.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_READ_CHANNEL = &Permission{ "read_channel", "authentication.permissions.read_channel.name", "authentication.permissions.read_channel.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, + } + PERMISSION_READ_PUBLIC_CHANNEL_GROUPS = &Permission{ + "read_public_channel_groups", + "authentication.permissions.read_public_channel_groups.name", + "authentication.permissions.read_public_channel_groups.description", + PermissionScopeChannel, + } + PERMISSION_READ_PRIVATE_CHANNEL_GROUPS = &Permission{ + "read_private_channel_groups", + "authentication.permissions.read_private_channel_groups.name", + "authentication.permissions.read_private_channel_groups.description", + PermissionScopeChannel, } PERMISSION_READ_PUBLIC_CHANNEL = &Permission{ "read_public_channel", "authentication.permissions.read_public_channel.name", "authentication.permissions.read_public_channel.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_ADD_REACTION = &Permission{ "add_reaction", "authentication.permissions.add_reaction.name", "authentication.permissions.add_reaction.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_REMOVE_REACTION = &Permission{ "remove_reaction", "authentication.permissions.remove_reaction.name", "authentication.permissions.remove_reaction.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_REMOVE_OTHERS_REACTIONS = &Permission{ "remove_others_reactions", "authentication.permissions.remove_others_reactions.name", "authentication.permissions.remove_others_reactions.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } // DEPRECATED PERMISSION_PERMANENT_DELETE_USER = &Permission{ "permanent_delete_user", "authentication.permissions.permanent_delete_user.name", "authentication.permissions.permanent_delete_user.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_UPLOAD_FILE = &Permission{ "upload_file", "authentication.permissions.upload_file.name", "authentication.permissions.upload_file.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_GET_PUBLIC_LINK = &Permission{ "get_public_link", "authentication.permissions.get_public_link.name", "authentication.permissions.get_public_link.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } // DEPRECATED PERMISSION_MANAGE_WEBHOOKS = &Permission{ "manage_webhooks", "authentication.permissions.manage_webhooks.name", "authentication.permissions.manage_webhooks.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } // DEPRECATED PERMISSION_MANAGE_OTHERS_WEBHOOKS = &Permission{ "manage_others_webhooks", "authentication.permissions.manage_others_webhooks.name", "authentication.permissions.manage_others_webhooks.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_INCOMING_WEBHOOKS = &Permission{ "manage_incoming_webhooks", "authentication.permissions.manage_incoming_webhooks.name", "authentication.permissions.manage_incoming_webhooks.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_OUTGOING_WEBHOOKS = &Permission{ "manage_outgoing_webhooks", "authentication.permissions.manage_outgoing_webhooks.name", "authentication.permissions.manage_outgoing_webhooks.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS = &Permission{ "manage_others_incoming_webhooks", "authentication.permissions.manage_others_incoming_webhooks.name", "authentication.permissions.manage_others_incoming_webhooks.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS = &Permission{ "manage_others_outgoing_webhooks", "authentication.permissions.manage_others_outgoing_webhooks.name", "authentication.permissions.manage_others_outgoing_webhooks.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_OAUTH = &Permission{ "manage_oauth", "authentication.permissions.manage_oauth.name", "authentication.permissions.manage_oauth.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH = &Permission{ "manage_system_wide_oauth", "authentication.permissions.manage_system_wide_oauth.name", "authentication.permissions.manage_system_wide_oauth.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } // DEPRECATED PERMISSION_MANAGE_EMOJIS = &Permission{ "manage_emojis", "authentication.permissions.manage_emojis.name", "authentication.permissions.manage_emojis.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } // DEPRECATED PERMISSION_MANAGE_OTHERS_EMOJIS = &Permission{ "manage_others_emojis", "authentication.permissions.manage_others_emojis.name", "authentication.permissions.manage_others_emojis.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_CREATE_EMOJIS = &Permission{ "create_emojis", "authentication.permissions.create_emojis.name", "authentication.permissions.create_emojis.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_DELETE_EMOJIS = &Permission{ "delete_emojis", "authentication.permissions.delete_emojis.name", "authentication.permissions.delete_emojis.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_DELETE_OTHERS_EMOJIS = &Permission{ "delete_others_emojis", "authentication.permissions.delete_others_emojis.name", "authentication.permissions.delete_others_emojis.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_CREATE_POST = &Permission{ "create_post", "authentication.permissions.create_post.name", "authentication.permissions.create_post.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_CREATE_POST_PUBLIC = &Permission{ "create_post_public", "authentication.permissions.create_post_public.name", "authentication.permissions.create_post_public.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_CREATE_POST_EPHEMERAL = &Permission{ "create_post_ephemeral", "authentication.permissions.create_post_ephemeral.name", "authentication.permissions.create_post_ephemeral.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_EDIT_POST = &Permission{ "edit_post", "authentication.permissions.edit_post.name", "authentication.permissions.edit_post.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_EDIT_OTHERS_POSTS = &Permission{ "edit_others_posts", "authentication.permissions.edit_others_posts.name", "authentication.permissions.edit_others_posts.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_DELETE_POST = &Permission{ "delete_post", "authentication.permissions.delete_post.name", "authentication.permissions.delete_post.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_DELETE_OTHERS_POSTS = &Permission{ "delete_others_posts", "authentication.permissions.delete_others_posts.name", "authentication.permissions.delete_others_posts.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_REMOVE_USER_FROM_TEAM = &Permission{ "remove_user_from_team", "authentication.permissions.remove_user_from_team.name", "authentication.permissions.remove_user_from_team.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_CREATE_TEAM = &Permission{ "create_team", "authentication.permissions.create_team.name", "authentication.permissions.create_team.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_TEAM = &Permission{ "manage_team", "authentication.permissions.manage_team.name", "authentication.permissions.manage_team.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_IMPORT_TEAM = &Permission{ "import_team", "authentication.permissions.import_team.name", "authentication.permissions.import_team.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_VIEW_TEAM = &Permission{ "view_team", "authentication.permissions.view_team.name", "authentication.permissions.view_team.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_LIST_USERS_WITHOUT_TEAM = &Permission{ "list_users_without_team", "authentication.permissions.list_users_without_team.name", "authentication.permissions.list_users_without_team.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_CREATE_USER_ACCESS_TOKEN = &Permission{ "create_user_access_token", "authentication.permissions.create_user_access_token.name", "authentication.permissions.create_user_access_token.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_READ_USER_ACCESS_TOKEN = &Permission{ "read_user_access_token", "authentication.permissions.read_user_access_token.name", "authentication.permissions.read_user_access_token.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_REVOKE_USER_ACCESS_TOKEN = &Permission{ "revoke_user_access_token", "authentication.permissions.revoke_user_access_token.name", "authentication.permissions.revoke_user_access_token.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_CREATE_BOT = &Permission{ "create_bot", "authentication.permissions.create_bot.name", "authentication.permissions.create_bot.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_ASSIGN_BOT = &Permission{ "assign_bot", "authentication.permissions.assign_bot.name", "authentication.permissions.assign_bot.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_READ_BOTS = &Permission{ "read_bots", "authentication.permissions.read_bots.name", "authentication.permissions.read_bots.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_READ_OTHERS_BOTS = &Permission{ "read_others_bots", "authentication.permissions.read_others_bots.name", "authentication.permissions.read_others_bots.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_BOTS = &Permission{ "manage_bots", "authentication.permissions.manage_bots.name", "authentication.permissions.manage_bots.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_OTHERS_BOTS = &Permission{ "manage_others_bots", "authentication.permissions.manage_others_bots.name", "authentication.permissions.manage_others_bots.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, + } + PERMISSION_READ_JOBS = &Permission{ + "read_jobs", + "authentication.permisssions.read_jobs.name", + "authentication.permisssions.read_jobs.description", + PermissionScopeSystem, } PERMISSION_MANAGE_JOBS = &Permission{ "manage_jobs", "authentication.permisssions.manage_jobs.name", "authentication.permisssions.manage_jobs.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_VIEW_MEMBERS = &Permission{ "view_members", "authentication.permisssions.view_members.name", "authentication.permisssions.view_members.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_INVITE_GUEST = &Permission{ "invite_guest", "authentication.permissions.invite_guest.name", "authentication.permissions.invite_guest.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_PROMOTE_GUEST = &Permission{ "promote_guest", "authentication.permissions.promote_guest.name", "authentication.permissions.promote_guest.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } - PERMISSION_DEMOTE_TO_GUEST = &Permission{ "demote_to_guest", "authentication.permissions.demote_to_guest.name", "authentication.permissions.demote_to_guest.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } - PERMISSION_USE_CHANNEL_MENTIONS = &Permission{ "use_channel_mentions", "authentication.permissions.use_channel_mentions.name", "authentication.permissions.use_channel_mentions.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } - PERMISSION_USE_GROUP_MENTIONS = &Permission{ "use_group_mentions", "authentication.permissions.use_group_mentions.name", "authentication.permissions.use_group_mentions.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, + } + PERMISSION_READ_OTHER_USERS_TEAMS = &Permission{ + "read_other_users_teams", + "authentication.permissions.read_other_users_teams.name", + "authentication.permissions.read_other_users_teams.description", + PermissionScopeSystem, + } + PERMISSION_EDIT_BRAND = &Permission{ + "edit_brand", + "authentication.permissions.edit_brand.name", + "authentication.permissions.edit_brand.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_ABOUT = &Permission{ + "sysconsole_read_about", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_ABOUT = &Permission{ + "sysconsole_write_about", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_REPORTING = &Permission{ + "sysconsole_read_reporting", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_REPORTING = &Permission{ + "sysconsole_write_reporting", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS = &Permission{ + "sysconsole_read_user_management_users", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_USERS = &Permission{ + "sysconsole_write_user_management_users", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS = &Permission{ + "sysconsole_read_user_management_groups", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS = &Permission{ + "sysconsole_write_user_management_groups", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS = &Permission{ + "sysconsole_read_user_management_teams", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS = &Permission{ + "sysconsole_write_user_management_teams", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS = &Permission{ + "sysconsole_read_user_management_channels", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS = &Permission{ + "sysconsole_write_user_management_channels", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS = &Permission{ + "sysconsole_read_user_management_permissions", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_PERMISSIONS = &Permission{ + "sysconsole_write_user_management_permissions", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_ENVIRONMENT = &Permission{ + "sysconsole_read_environment", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT = &Permission{ + "sysconsole_write_environment", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_SITE = &Permission{ + "sysconsole_read_site", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_SITE = &Permission{ + "sysconsole_write_site", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_AUTHENTICATION = &Permission{ + "sysconsole_read_authentication", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_AUTHENTICATION = &Permission{ + "sysconsole_write_authentication", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_PLUGINS = &Permission{ + "sysconsole_read_plugins", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_PLUGINS = &Permission{ + "sysconsole_write_plugins", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_INTEGRATIONS = &Permission{ + "sysconsole_read_integrations", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_INTEGRATIONS = &Permission{ + "sysconsole_write_integrations", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_COMPLIANCE = &Permission{ + "sysconsole_read_compliance", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_COMPLIANCE = &Permission{ + "sysconsole_write_compliance", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_PLUGINS = &Permission{ + "sysconsole_read_plugins", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_PLUGINS = &Permission{ + "sysconsole_write_plugins", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_EXPERIMENTAL = &Permission{ + "sysconsole_read_experimental", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_EXPERIMENTAL = &Permission{ + "sysconsole_write_experimental", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, } - ALL_PERMISSIONS = []*Permission{ - PERMISSION_INVITE_USER, - PERMISSION_ADD_USER_TO_TEAM, - PERMISSION_USE_SLASH_COMMANDS, - PERMISSION_MANAGE_SLASH_COMMANDS, - PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS, - PERMISSION_CREATE_PUBLIC_CHANNEL, - PERMISSION_CREATE_PRIVATE_CHANNEL, - PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS, - PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS, + SysconsoleReadPermissions = []*Permission{ + PERMISSION_SYSCONSOLE_READ_ABOUT, + PERMISSION_SYSCONSOLE_READ_REPORTING, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS, + PERMISSION_SYSCONSOLE_READ_ENVIRONMENT, + PERMISSION_SYSCONSOLE_READ_SITE, + PERMISSION_SYSCONSOLE_READ_AUTHENTICATION, + PERMISSION_SYSCONSOLE_READ_PLUGINS, + PERMISSION_SYSCONSOLE_READ_INTEGRATIONS, + PERMISSION_SYSCONSOLE_READ_COMPLIANCE, + PERMISSION_SYSCONSOLE_READ_EXPERIMENTAL, + } + + SysconsoleWritePermissions = []*Permission{ + PERMISSION_SYSCONSOLE_WRITE_ABOUT, + PERMISSION_SYSCONSOLE_WRITE_REPORTING, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_USERS, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_PERMISSIONS, + PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT, + PERMISSION_SYSCONSOLE_WRITE_SITE, + PERMISSION_SYSCONSOLE_WRITE_AUTHENTICATION, + PERMISSION_SYSCONSOLE_WRITE_PLUGINS, + PERMISSION_SYSCONSOLE_WRITE_INTEGRATIONS, + PERMISSION_SYSCONSOLE_WRITE_COMPLIANCE, + PERMISSION_SYSCONSOLE_WRITE_EXPERIMENTAL, + } + + SystemScopedPermissionsMinusSysconsole := []*Permission{ PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE, PERMISSION_MANAGE_ROLES, - PERMISSION_MANAGE_TEAM_ROLES, - PERMISSION_MANAGE_CHANNEL_ROLES, + PERMISSION_MANAGE_SYSTEM, PERMISSION_CREATE_DIRECT_CHANNEL, PERMISSION_CREATE_GROUP_CHANNEL, - PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES, - PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES, PERMISSION_LIST_PUBLIC_TEAMS, PERMISSION_JOIN_PUBLIC_TEAMS, PERMISSION_LIST_PRIVATE_TEAMS, PERMISSION_JOIN_PRIVATE_TEAMS, + PERMISSION_EDIT_OTHER_USERS, + PERMISSION_READ_OTHER_USERS_TEAMS, + PERMISSION_GET_PUBLIC_LINK, + PERMISSION_MANAGE_OAUTH, + PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH, + PERMISSION_CREATE_TEAM, + PERMISSION_LIST_USERS_WITHOUT_TEAM, + PERMISSION_CREATE_USER_ACCESS_TOKEN, + PERMISSION_READ_USER_ACCESS_TOKEN, + PERMISSION_REVOKE_USER_ACCESS_TOKEN, + PERMISSION_CREATE_BOT, + PERMISSION_ASSIGN_BOT, + PERMISSION_READ_BOTS, + PERMISSION_READ_OTHERS_BOTS, + PERMISSION_MANAGE_BOTS, + PERMISSION_MANAGE_OTHERS_BOTS, + PERMISSION_READ_JOBS, + PERMISSION_MANAGE_JOBS, + PERMISSION_PROMOTE_GUEST, + PERMISSION_DEMOTE_TO_GUEST, + PERMISSION_EDIT_BRAND, + } + + TeamScopedPermissions := []*Permission{ + PERMISSION_INVITE_USER, + PERMISSION_ADD_USER_TO_TEAM, + PERMISSION_MANAGE_SLASH_COMMANDS, + PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS, + PERMISSION_CREATE_PUBLIC_CHANNEL, + PERMISSION_CREATE_PRIVATE_CHANNEL, + PERMISSION_MANAGE_TEAM_ROLES, PERMISSION_LIST_TEAM_CHANNELS, PERMISSION_JOIN_PUBLIC_CHANNELS, - PERMISSION_DELETE_PUBLIC_CHANNEL, - PERMISSION_DELETE_PRIVATE_CHANNEL, - PERMISSION_EDIT_OTHER_USERS, - PERMISSION_READ_CHANNEL, PERMISSION_READ_PUBLIC_CHANNEL, - PERMISSION_ADD_REACTION, - PERMISSION_REMOVE_REACTION, - PERMISSION_REMOVE_OTHERS_REACTIONS, - PERMISSION_PERMANENT_DELETE_USER, - PERMISSION_UPLOAD_FILE, - PERMISSION_GET_PUBLIC_LINK, - PERMISSION_MANAGE_WEBHOOKS, - PERMISSION_MANAGE_OTHERS_WEBHOOKS, PERMISSION_MANAGE_INCOMING_WEBHOOKS, PERMISSION_MANAGE_OUTGOING_WEBHOOKS, PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS, PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS, - PERMISSION_MANAGE_OAUTH, - PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH, - PERMISSION_MANAGE_EMOJIS, - PERMISSION_MANAGE_OTHERS_EMOJIS, PERMISSION_CREATE_EMOJIS, PERMISSION_DELETE_EMOJIS, PERMISSION_DELETE_OTHERS_EMOJIS, + PERMISSION_REMOVE_USER_FROM_TEAM, + PERMISSION_MANAGE_TEAM, + PERMISSION_IMPORT_TEAM, + PERMISSION_VIEW_TEAM, + PERMISSION_VIEW_MEMBERS, + PERMISSION_INVITE_GUEST, + } + + ChannelScopedPermissions := []*Permission{ + PERMISSION_USE_SLASH_COMMANDS, + PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS, + PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS, + PERMISSION_MANAGE_CHANNEL_ROLES, + PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES, + PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES, + PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE, + PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC, + PERMISSION_DELETE_PUBLIC_CHANNEL, + PERMISSION_DELETE_PRIVATE_CHANNEL, + PERMISSION_READ_CHANNEL, + PERMISSION_READ_PUBLIC_CHANNEL_GROUPS, + PERMISSION_READ_PRIVATE_CHANNEL_GROUPS, + PERMISSION_ADD_REACTION, + PERMISSION_REMOVE_REACTION, + PERMISSION_REMOVE_OTHERS_REACTIONS, + PERMISSION_UPLOAD_FILE, PERMISSION_CREATE_POST, PERMISSION_CREATE_POST_PUBLIC, PERMISSION_CREATE_POST_EPHEMERAL, @@ -630,44 +965,39 @@ func initializePermissions() { PERMISSION_EDIT_OTHERS_POSTS, PERMISSION_DELETE_POST, PERMISSION_DELETE_OTHERS_POSTS, - PERMISSION_REMOVE_USER_FROM_TEAM, - PERMISSION_CREATE_TEAM, - PERMISSION_MANAGE_TEAM, - PERMISSION_IMPORT_TEAM, - PERMISSION_VIEW_TEAM, - PERMISSION_LIST_USERS_WITHOUT_TEAM, - PERMISSION_MANAGE_JOBS, - PERMISSION_CREATE_USER_ACCESS_TOKEN, - PERMISSION_READ_USER_ACCESS_TOKEN, - PERMISSION_REVOKE_USER_ACCESS_TOKEN, - PERMISSION_CREATE_BOT, - PERMISSION_READ_BOTS, - PERMISSION_READ_OTHERS_BOTS, - PERMISSION_MANAGE_BOTS, - PERMISSION_MANAGE_OTHERS_BOTS, - PERMISSION_MANAGE_SYSTEM, - PERMISSION_VIEW_MEMBERS, - PERMISSION_INVITE_GUEST, - PERMISSION_PROMOTE_GUEST, - PERMISSION_DEMOTE_TO_GUEST, PERMISSION_USE_CHANNEL_MENTIONS, PERMISSION_USE_GROUP_MENTIONS, } - CHANNEL_MODERATED_PERMISSIONS = []string{ + DeprecatedPermissions = []*Permission{ + PERMISSION_PERMANENT_DELETE_USER, + PERMISSION_MANAGE_WEBHOOKS, + PERMISSION_MANAGE_OTHERS_WEBHOOKS, + PERMISSION_MANAGE_EMOJIS, + PERMISSION_MANAGE_OTHERS_EMOJIS, + } + + AllPermissions = []*Permission{} + AllPermissions = append(AllPermissions, SystemScopedPermissionsMinusSysconsole...) + AllPermissions = append(AllPermissions, TeamScopedPermissions...) + AllPermissions = append(AllPermissions, ChannelScopedPermissions...) + AllPermissions = append(AllPermissions, SysconsoleReadPermissions...) + AllPermissions = append(AllPermissions, SysconsoleWritePermissions...) + + ChannelModeratedPermissions = []string{ PERMISSION_CREATE_POST.Id, "create_reactions", "manage_members", PERMISSION_USE_CHANNEL_MENTIONS.Id, } - CHANNEL_MODERATED_PERMISSIONS_MAP = map[string]string{ - PERMISSION_CREATE_POST.Id: CHANNEL_MODERATED_PERMISSIONS[0], - PERMISSION_ADD_REACTION.Id: CHANNEL_MODERATED_PERMISSIONS[1], - PERMISSION_REMOVE_REACTION.Id: CHANNEL_MODERATED_PERMISSIONS[1], - PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id: CHANNEL_MODERATED_PERMISSIONS[2], - PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id: CHANNEL_MODERATED_PERMISSIONS[2], - PERMISSION_USE_CHANNEL_MENTIONS.Id: CHANNEL_MODERATED_PERMISSIONS[3], + ChannelModeratedPermissionsMap = map[string]string{ + PERMISSION_CREATE_POST.Id: ChannelModeratedPermissions[0], + PERMISSION_ADD_REACTION.Id: ChannelModeratedPermissions[1], + PERMISSION_REMOVE_REACTION.Id: ChannelModeratedPermissions[1], + PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id: ChannelModeratedPermissions[2], + PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id: ChannelModeratedPermissions[2], + PERMISSION_USE_CHANNEL_MENTIONS.Id: ChannelModeratedPermissions[3], } } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/post.go b/vendor/github.com/mattermost/mattermost-server/v5/model/post.go index 817ca08a..6e29ba3e 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/post.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/post.go @@ -64,6 +64,7 @@ const ( POST_PROPS_MENTION_HIGHLIGHT_DISABLED = "mentionHighlightDisabled" POST_PROPS_GROUP_HIGHLIGHT_DISABLED = "disable_group_highlight" + POST_SYSTEM_WARN_METRIC_STATUS = "warn_metric_status" ) var AT_MENTION_PATTEN = regexp.MustCompile(`\B@`) @@ -312,7 +313,8 @@ func (o *Post) IsValid(maxPostSize int) *AppError { POST_CHANNEL_RESTORED, POST_CHANGE_CHANNEL_PRIVACY, POST_ME, - POST_ADD_BOT_TEAMS_CHANNELS: + POST_ADD_BOT_TEAMS_CHANNELS, + POST_SYSTEM_WARN_METRIC_STATUS: default: if !strings.HasPrefix(o.Type, POST_CUSTOM_TYPE_PREFIX) { return NewAppError("Post.IsValid", "model.post.is_valid.type.app_error", nil, "id="+o.Type, http.StatusBadRequest) @@ -495,15 +497,14 @@ func (o *SearchParameter) SearchParameterToJson() string { return string(b) } -func SearchParameterFromJson(data io.Reader) *SearchParameter { +func SearchParameterFromJson(data io.Reader) (*SearchParameter, error) { decoder := json.NewDecoder(data) var searchParam SearchParameter - err := decoder.Decode(&searchParam) - if err != nil { - return nil + if err := decoder.Decode(&searchParam); err != nil { + return nil, err } - return &searchParam + return &searchParam, nil } func (o *Post) ChannelMentions() []string { @@ -521,6 +522,9 @@ func (o *Post) DisableMentionHighlights() string { // DisableMentionHighlights disables mention highlighting for a post patch if required. func (o *PostPatch) DisableMentionHighlights() { + if o.Message == nil { + return + } if _, hasMentions := findAtChannelMention(*o.Message); hasMentions { if o.Props == nil { o.Props = &StringInterface{} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/preference.go b/vendor/github.com/mattermost/mattermost-server/v5/model/preference.go index 346f88f8..e752bb54 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/preference.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/preference.go @@ -14,6 +14,7 @@ import ( const ( PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW = "direct_channel_show" + PREFERENCE_CATEGORY_GROUP_CHANNEL_SHOW = "group_channel_show" PREFERENCE_CATEGORY_TUTORIAL_STEPS = "tutorial_step" PREFERENCE_CATEGORY_ADVANCED_SETTINGS = "advanced_settings" PREFERENCE_CATEGORY_FLAGGED_POST = "flagged_post" diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/product_notices.go b/vendor/github.com/mattermost/mattermost-server/v5/model/product_notices.go new file mode 100644 index 00000000..6aa88fc7 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/product_notices.go @@ -0,0 +1,213 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "github.com/pkg/errors" + "io" +) + +type ProductNotices []ProductNotice + +func (r *ProductNotices) Marshal() ([]byte, error) { + return json.Marshal(r) +} + +func UnmarshalProductNotices(data []byte) (ProductNotices, error) { + var r ProductNotices + err := json.Unmarshal(data, &r) + return r, err +} + +// List of product notices. Order is important and is used to resolve priorities. +// Each notice will only be show if conditions are met. +type ProductNotice struct { + Conditions Conditions `json:"conditions"` + ID string `json:"id"` // Unique identifier for this notice. Can be a running number. Used for storing 'viewed'; state on the server. + LocalizedMessages map[string]NoticeMessageInternal `json:"localizedMessages"` // Notice message data, organized by locale.; Example:; "localizedMessages": {; "en": { "title": "English", description: "English description"},; "frFR": { "title": "Frances", description: "French description"}; } + Repeatable *bool `json:"repeatable,omitempty"` // Configurable flag if the notice should reappear after it’s seen and dismissed +} + +func (n *ProductNotice) SysAdminOnly() bool { + return n.Conditions.Audience != nil && *n.Conditions.Audience == NoticeAudience_Sysadmin +} + +func (n *ProductNotice) TeamAdminOnly() bool { + return n.Conditions.Audience != nil && *n.Conditions.Audience == NoticeAudience_TeamAdmin +} + +type Conditions struct { + Audience *NoticeAudience `json:"audience,omitempty"` + ClientType *NoticeClientType `json:"clientType,omitempty"` // Only show the notice on specific clients. Defaults to 'all' + DesktopVersion []string `json:"desktopVersion,omitempty"` // What desktop client versions does this notice apply to.; Format: semver ranges (https://devhints.io/semver); Example: [">=1.2.3 < ~2.4.x"]; Example: ["= 2020-03-01T00:00:00Z" - show after specified date; "< 2020-03-01T00:00:00Z" - show before the specified date; "> 2020-03-01T00:00:00Z <= 2020-04-01T00:00:00Z" - show only between the specified dates + InstanceType *NoticeInstanceType `json:"instanceType,omitempty"` + MobileVersion []string `json:"mobileVersion,omitempty"` // What mobile client versions does this notice apply to.; Format: semver ranges (https://devhints.io/semver); Example: [">=1.2.3 < ~2.4.x"]; Example: ["=1.2.3 < ~2.4.x"]; Example: [" ROLE_NAME_MAX_LENGTH { return false @@ -493,6 +645,8 @@ func MakeDefaultRoles() map[string]*Role { PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS.Id, PERMISSION_MANAGE_INCOMING_WEBHOOKS.Id, PERMISSION_MANAGE_OUTGOING_WEBHOOKS.Id, + PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE.Id, + PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC.Id, }, SchemeManaged: true, BuiltIn: true, @@ -562,6 +716,38 @@ func MakeDefaultRoles() map[string]*Role { BuiltIn: true, } + roles[SYSTEM_USER_MANAGER_ROLE_ID] = &Role{ + Name: "system_user_manager", + DisplayName: "authentication.roles.system_user_manager.name", + Description: "authentication.roles.system_user_manager.description", + Permissions: SystemUserManagerDefaultPermissions, + SchemeManaged: false, + BuiltIn: true, + } + + roles[SYSTEM_READ_ONLY_ADMIN_ROLE_ID] = &Role{ + Name: "system_read_only_admin", + DisplayName: "authentication.roles.system_read_only_admin.name", + Description: "authentication.roles.system_read_only_admin.description", + Permissions: SystemReadOnlyAdminDefaultPermissions, + SchemeManaged: false, + BuiltIn: true, + } + + roles[SYSTEM_MANAGER_ROLE_ID] = &Role{ + Name: "system_manager", + DisplayName: "authentication.roles.system_manager.name", + Description: "authentication.roles.system_manager.description", + Permissions: SystemManagerDefaultPermissions, + SchemeManaged: false, + BuiltIn: true, + } + + allPermissionIDs := []string{} + for _, permission := range AllPermissions { + allPermissionIDs = append(allPermissionIDs, permission.Id) + } + roles[SYSTEM_ADMIN_ROLE_ID] = &Role{ Name: "system_admin", DisplayName: "authentication.roles.global_admin.name", @@ -569,64 +755,21 @@ func MakeDefaultRoles() map[string]*Role { // System admins can do anything channel and team admins can do // plus everything members of teams and channels can do to all teams // and channels on the system - Permissions: append( - append( - append( - append( - []string{ - PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE.Id, - PERMISSION_MANAGE_SYSTEM.Id, - PERMISSION_MANAGE_ROLES.Id, - PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES.Id, - PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id, - PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id, - PERMISSION_DELETE_PUBLIC_CHANNEL.Id, - PERMISSION_CREATE_PUBLIC_CHANNEL.Id, - PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES.Id, - PERMISSION_DELETE_PRIVATE_CHANNEL.Id, - PERMISSION_CREATE_PRIVATE_CHANNEL.Id, - PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH.Id, - PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS.Id, - PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS.Id, - PERMISSION_EDIT_OTHER_USERS.Id, - PERMISSION_EDIT_OTHERS_POSTS.Id, - PERMISSION_MANAGE_OAUTH.Id, - PERMISSION_INVITE_USER.Id, - PERMISSION_INVITE_GUEST.Id, - PERMISSION_PROMOTE_GUEST.Id, - PERMISSION_DEMOTE_TO_GUEST.Id, - PERMISSION_DELETE_POST.Id, - PERMISSION_DELETE_OTHERS_POSTS.Id, - PERMISSION_CREATE_TEAM.Id, - PERMISSION_ADD_USER_TO_TEAM.Id, - PERMISSION_LIST_USERS_WITHOUT_TEAM.Id, - PERMISSION_MANAGE_JOBS.Id, - PERMISSION_CREATE_POST_PUBLIC.Id, - PERMISSION_CREATE_POST_EPHEMERAL.Id, - PERMISSION_CREATE_USER_ACCESS_TOKEN.Id, - PERMISSION_READ_USER_ACCESS_TOKEN.Id, - PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id, - PERMISSION_CREATE_BOT.Id, - PERMISSION_READ_BOTS.Id, - PERMISSION_READ_OTHERS_BOTS.Id, - PERMISSION_MANAGE_BOTS.Id, - PERMISSION_MANAGE_OTHERS_BOTS.Id, - PERMISSION_REMOVE_OTHERS_REACTIONS.Id, - PERMISSION_LIST_PRIVATE_TEAMS.Id, - PERMISSION_JOIN_PRIVATE_TEAMS.Id, - PERMISSION_VIEW_MEMBERS.Id, - }, - roles[TEAM_USER_ROLE_ID].Permissions..., - ), - roles[CHANNEL_USER_ROLE_ID].Permissions..., - ), - roles[TEAM_ADMIN_ROLE_ID].Permissions..., - ), - roles[CHANNEL_ADMIN_ROLE_ID].Permissions..., - ), + Permissions: allPermissionIDs, SchemeManaged: true, BuiltIn: true, } return roles } + +func addAncillaryPermissions(permissions []string) []string { + for _, permission := range permissions { + if ancillaryPermissions, ok := SysconsoleAncillaryPermissions[permission]; ok { + for _, ancillaryPermission := range ancillaryPermissions { + permissions = append(permissions, ancillaryPermission.Id) + } + } + } + return permissions +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go b/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go index 59ac2acc..feaf325a 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go @@ -15,6 +15,7 @@ const ( USER_AUTH_SERVICE_SAML_TEXT = "SAML" USER_AUTH_SERVICE_IS_SAML = "isSaml" USER_AUTH_SERVICE_IS_MOBILE = "isMobile" + USER_AUTH_SERVICE_IS_OAUTH = "isOAuthUser" ) type SamlAuthRequest struct { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go b/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go index e6dce73c..d34c8865 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go @@ -4,6 +4,7 @@ package model import ( + "net/http" "regexp" "strings" "time" @@ -367,3 +368,13 @@ func ParseSearchParams(text string, timeZoneOffset int) []*SearchParams { return paramsList } + +func IsSearchParamsListValid(paramsList []*SearchParams) *AppError { + // All SearchParams should have same IncludeDeletedChannels value. + for _, params := range paramsList { + if params.IncludeDeletedChannels != paramsList[0].IncludeDeletedChannels { + return NewAppError("IsSearchParamsListValid", "model.search_params_list.is_valid.include_deleted_channels.app_error", nil, "", http.StatusInternalServerError) + } + } + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/serialized_gen.go b/vendor/github.com/mattermost/mattermost-server/v5/model/serialized_gen.go new file mode 100644 index 00000000..c64d88de --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/serialized_gen.go @@ -0,0 +1,1622 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *Session) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0001 uint32 + zb0001, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 13 { + err = msgp.ArrayError{Wanted: 13, Got: zb0001} + return + } + z.Id, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + z.Token, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Token") + return + } + z.CreateAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + z.ExpiresAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "ExpiresAt") + return + } + z.LastActivityAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + z.UserId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + z.DeviceId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "DeviceId") + return + } + z.Roles, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.IsOAuth, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "IsOAuth") + return + } + z.ExpiredNotify, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "ExpiredNotify") + return + } + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + if z.Props == nil { + z.Props = make(StringMap, zb0002) + } else if len(z.Props) > 0 { + for key := range z.Props { + delete(z.Props, key) + } + } + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 string + za0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + za0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + z.Props[za0001] = za0002 + } + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "TeamMembers") + return + } + if cap(z.TeamMembers) >= int(zb0003) { + z.TeamMembers = (z.TeamMembers)[:zb0003] + } else { + z.TeamMembers = make([]*TeamMember, zb0003) + } + for za0003 := range z.TeamMembers { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + z.TeamMembers[za0003] = nil + } else { + if z.TeamMembers[za0003] == nil { + z.TeamMembers[za0003] = new(TeamMember) + } + err = z.TeamMembers[za0003].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + } + } + z.Local, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Local") + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Session) EncodeMsg(en *msgp.Writer) (err error) { + // array header, size 13 + err = en.Append(0x9d) + if err != nil { + return + } + err = en.WriteString(z.Id) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + err = en.WriteString(z.Token) + if err != nil { + err = msgp.WrapError(err, "Token") + return + } + err = en.WriteInt64(z.CreateAt) + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + err = en.WriteInt64(z.ExpiresAt) + if err != nil { + err = msgp.WrapError(err, "ExpiresAt") + return + } + err = en.WriteInt64(z.LastActivityAt) + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + err = en.WriteString(z.UserId) + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + err = en.WriteString(z.DeviceId) + if err != nil { + err = msgp.WrapError(err, "DeviceId") + return + } + err = en.WriteString(z.Roles) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + err = en.WriteBool(z.IsOAuth) + if err != nil { + err = msgp.WrapError(err, "IsOAuth") + return + } + err = en.WriteBool(z.ExpiredNotify) + if err != nil { + err = msgp.WrapError(err, "ExpiredNotify") + return + } + err = en.WriteMapHeader(uint32(len(z.Props))) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + for za0001, za0002 := range z.Props { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + } + err = en.WriteArrayHeader(uint32(len(z.TeamMembers))) + if err != nil { + err = msgp.WrapError(err, "TeamMembers") + return + } + for za0003 := range z.TeamMembers { + if z.TeamMembers[za0003] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.TeamMembers[za0003].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + } + } + err = en.WriteBool(z.Local) + if err != nil { + err = msgp.WrapError(err, "Local") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Session) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // array header, size 13 + o = append(o, 0x9d) + o = msgp.AppendString(o, z.Id) + o = msgp.AppendString(o, z.Token) + o = msgp.AppendInt64(o, z.CreateAt) + o = msgp.AppendInt64(o, z.ExpiresAt) + o = msgp.AppendInt64(o, z.LastActivityAt) + o = msgp.AppendString(o, z.UserId) + o = msgp.AppendString(o, z.DeviceId) + o = msgp.AppendString(o, z.Roles) + o = msgp.AppendBool(o, z.IsOAuth) + o = msgp.AppendBool(o, z.ExpiredNotify) + o = msgp.AppendMapHeader(o, uint32(len(z.Props))) + for za0001, za0002 := range z.Props { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + o = msgp.AppendArrayHeader(o, uint32(len(z.TeamMembers))) + for za0003 := range z.TeamMembers { + if z.TeamMembers[za0003] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.TeamMembers[za0003].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + } + } + o = msgp.AppendBool(o, z.Local) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Session) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0001 uint32 + zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 13 { + err = msgp.ArrayError{Wanted: 13, Got: zb0001} + return + } + z.Id, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + z.Token, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Token") + return + } + z.CreateAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + z.ExpiresAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ExpiresAt") + return + } + z.LastActivityAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + z.UserId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + z.DeviceId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeviceId") + return + } + z.Roles, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.IsOAuth, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "IsOAuth") + return + } + z.ExpiredNotify, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ExpiredNotify") + return + } + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + if z.Props == nil { + z.Props = make(StringMap, zb0002) + } else if len(z.Props) > 0 { + for key := range z.Props { + delete(z.Props, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 string + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + z.Props[za0001] = za0002 + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TeamMembers") + return + } + if cap(z.TeamMembers) >= int(zb0003) { + z.TeamMembers = (z.TeamMembers)[:zb0003] + } else { + z.TeamMembers = make([]*TeamMember, zb0003) + } + for za0003 := range z.TeamMembers { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.TeamMembers[za0003] = nil + } else { + if z.TeamMembers[za0003] == nil { + z.TeamMembers[za0003] = new(TeamMember) + } + bts, err = z.TeamMembers[za0003].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + } + } + z.Local, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Local") + return + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Session) Msgsize() (s int) { + s = 1 + msgp.StringPrefixSize + len(z.Id) + msgp.StringPrefixSize + len(z.Token) + msgp.Int64Size + msgp.Int64Size + msgp.Int64Size + msgp.StringPrefixSize + len(z.UserId) + msgp.StringPrefixSize + len(z.DeviceId) + msgp.StringPrefixSize + len(z.Roles) + msgp.BoolSize + msgp.BoolSize + msgp.MapHeaderSize + if z.Props != nil { + for za0001, za0002 := range z.Props { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += msgp.ArrayHeaderSize + for za0003 := range z.TeamMembers { + if z.TeamMembers[za0003] == nil { + s += msgp.NilSize + } else { + s += z.TeamMembers[za0003].Msgsize() + } + } + s += msgp.BoolSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *StringMap) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if (*z) == nil { + (*z) = make(StringMap, zb0003) + } else if len((*z)) > 0 { + for key := range *z { + delete((*z), key) + } + } + for zb0003 > 0 { + zb0003-- + var zb0001 string + var zb0002 string + zb0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err) + return + } + zb0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + (*z)[zb0001] = zb0002 + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z StringMap) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteMapHeader(uint32(len(z))) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0004, zb0005 := range z { + err = en.WriteString(zb0004) + if err != nil { + err = msgp.WrapError(err) + return + } + err = en.WriteString(zb0005) + if err != nil { + err = msgp.WrapError(err, zb0004) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z StringMap) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendMapHeader(o, uint32(len(z))) + for zb0004, zb0005 := range z { + o = msgp.AppendString(o, zb0004) + o = msgp.AppendString(o, zb0005) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *StringMap) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if (*z) == nil { + (*z) = make(StringMap, zb0003) + } else if len((*z)) > 0 { + for key := range *z { + delete((*z), key) + } + } + for zb0003 > 0 { + var zb0001 string + var zb0002 string + zb0003-- + zb0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + zb0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + (*z)[zb0001] = zb0002 + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z StringMap) Msgsize() (s int) { + s = msgp.MapHeaderSize + if z != nil { + for zb0004, zb0005 := range z { + _ = zb0005 + s += msgp.StringPrefixSize + len(zb0004) + msgp.StringPrefixSize + len(zb0005) + } + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *TeamMember) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "TeamId": + z.TeamId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "TeamId") + return + } + case "UserId": + z.UserId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + case "Roles": + z.Roles, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + case "DeleteAt": + z.DeleteAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + case "SchemeGuest": + z.SchemeGuest, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "SchemeGuest") + return + } + case "SchemeUser": + z.SchemeUser, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "SchemeUser") + return + } + case "SchemeAdmin": + z.SchemeAdmin, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "SchemeAdmin") + return + } + case "ExplicitRoles": + z.ExplicitRoles, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ExplicitRoles") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *TeamMember) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 8 + // write "TeamId" + err = en.Append(0x88, 0xa6, 0x54, 0x65, 0x61, 0x6d, 0x49, 0x64) + if err != nil { + return + } + err = en.WriteString(z.TeamId) + if err != nil { + err = msgp.WrapError(err, "TeamId") + return + } + // write "UserId" + err = en.Append(0xa6, 0x55, 0x73, 0x65, 0x72, 0x49, 0x64) + if err != nil { + return + } + err = en.WriteString(z.UserId) + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + // write "Roles" + err = en.Append(0xa5, 0x52, 0x6f, 0x6c, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteString(z.Roles) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + // write "DeleteAt" + err = en.Append(0xa8, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x74) + if err != nil { + return + } + err = en.WriteInt64(z.DeleteAt) + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + // write "SchemeGuest" + err = en.Append(0xab, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x47, 0x75, 0x65, 0x73, 0x74) + if err != nil { + return + } + err = en.WriteBool(z.SchemeGuest) + if err != nil { + err = msgp.WrapError(err, "SchemeGuest") + return + } + // write "SchemeUser" + err = en.Append(0xaa, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72) + if err != nil { + return + } + err = en.WriteBool(z.SchemeUser) + if err != nil { + err = msgp.WrapError(err, "SchemeUser") + return + } + // write "SchemeAdmin" + err = en.Append(0xab, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e) + if err != nil { + return + } + err = en.WriteBool(z.SchemeAdmin) + if err != nil { + err = msgp.WrapError(err, "SchemeAdmin") + return + } + // write "ExplicitRoles" + err = en.Append(0xad, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x52, 0x6f, 0x6c, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteString(z.ExplicitRoles) + if err != nil { + err = msgp.WrapError(err, "ExplicitRoles") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *TeamMember) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 8 + // string "TeamId" + o = append(o, 0x88, 0xa6, 0x54, 0x65, 0x61, 0x6d, 0x49, 0x64) + o = msgp.AppendString(o, z.TeamId) + // string "UserId" + o = append(o, 0xa6, 0x55, 0x73, 0x65, 0x72, 0x49, 0x64) + o = msgp.AppendString(o, z.UserId) + // string "Roles" + o = append(o, 0xa5, 0x52, 0x6f, 0x6c, 0x65, 0x73) + o = msgp.AppendString(o, z.Roles) + // string "DeleteAt" + o = append(o, 0xa8, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x74) + o = msgp.AppendInt64(o, z.DeleteAt) + // string "SchemeGuest" + o = append(o, 0xab, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x47, 0x75, 0x65, 0x73, 0x74) + o = msgp.AppendBool(o, z.SchemeGuest) + // string "SchemeUser" + o = append(o, 0xaa, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72) + o = msgp.AppendBool(o, z.SchemeUser) + // string "SchemeAdmin" + o = append(o, 0xab, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e) + o = msgp.AppendBool(o, z.SchemeAdmin) + // string "ExplicitRoles" + o = append(o, 0xad, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x52, 0x6f, 0x6c, 0x65, 0x73) + o = msgp.AppendString(o, z.ExplicitRoles) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TeamMember) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "TeamId": + z.TeamId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TeamId") + return + } + case "UserId": + z.UserId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + case "Roles": + z.Roles, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + case "DeleteAt": + z.DeleteAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + case "SchemeGuest": + z.SchemeGuest, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SchemeGuest") + return + } + case "SchemeUser": + z.SchemeUser, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SchemeUser") + return + } + case "SchemeAdmin": + z.SchemeAdmin, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SchemeAdmin") + return + } + case "ExplicitRoles": + z.ExplicitRoles, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ExplicitRoles") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *TeamMember) Msgsize() (s int) { + s = 1 + 7 + msgp.StringPrefixSize + len(z.TeamId) + 7 + msgp.StringPrefixSize + len(z.UserId) + 6 + msgp.StringPrefixSize + len(z.Roles) + 9 + msgp.Int64Size + 12 + msgp.BoolSize + 11 + msgp.BoolSize + 12 + msgp.BoolSize + 14 + msgp.StringPrefixSize + len(z.ExplicitRoles) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *User) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0001 uint32 + zb0001, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 31 { + err = msgp.ArrayError{Wanted: 31, Got: zb0001} + return + } + z.Id, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + z.CreateAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + z.UpdateAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "UpdateAt") + return + } + z.DeleteAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + z.Username, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Username") + return + } + z.Password, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Password") + return + } + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "AuthData") + return + } + z.AuthData = nil + } else { + if z.AuthData == nil { + z.AuthData = new(string) + } + *z.AuthData, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AuthData") + return + } + } + z.AuthService, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AuthService") + return + } + z.Email, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Email") + return + } + z.EmailVerified, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "EmailVerified") + return + } + z.Nickname, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Nickname") + return + } + z.FirstName, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "FirstName") + return + } + z.LastName, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "LastName") + return + } + z.Position, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Position") + return + } + z.Roles, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.AllowMarketing, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "AllowMarketing") + return + } + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + if z.Props == nil { + z.Props = make(StringMap, zb0002) + } else if len(z.Props) > 0 { + for key := range z.Props { + delete(z.Props, key) + } + } + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 string + za0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + za0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + z.Props[za0001] = za0002 + } + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + if z.NotifyProps == nil { + z.NotifyProps = make(StringMap, zb0003) + } else if len(z.NotifyProps) > 0 { + for key := range z.NotifyProps { + delete(z.NotifyProps, key) + } + } + for zb0003 > 0 { + zb0003-- + var za0003 string + var za0004 string + za0003, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + za0004, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "NotifyProps", za0003) + return + } + z.NotifyProps[za0003] = za0004 + } + z.LastPasswordUpdate, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "LastPasswordUpdate") + return + } + z.LastPictureUpdate, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "LastPictureUpdate") + return + } + z.FailedAttempts, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "FailedAttempts") + return + } + z.Locale, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Locale") + return + } + var zb0004 uint32 + zb0004, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + if z.Timezone == nil { + z.Timezone = make(StringMap, zb0004) + } else if len(z.Timezone) > 0 { + for key := range z.Timezone { + delete(z.Timezone, key) + } + } + for zb0004 > 0 { + zb0004-- + var za0005 string + var za0006 string + za0005, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + za0006, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Timezone", za0005) + return + } + z.Timezone[za0005] = za0006 + } + z.MfaActive, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "MfaActive") + return + } + z.MfaSecret, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "MfaSecret") + return + } + z.LastActivityAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + z.IsBot, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "IsBot") + return + } + z.BotDescription, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "BotDescription") + return + } + z.BotLastIconUpdate, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "BotLastIconUpdate") + return + } + z.TermsOfServiceId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceId") + return + } + z.TermsOfServiceCreateAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceCreateAt") + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *User) EncodeMsg(en *msgp.Writer) (err error) { + // array header, size 31 + err = en.Append(0xdc, 0x0, 0x1f) + if err != nil { + return + } + err = en.WriteString(z.Id) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + err = en.WriteInt64(z.CreateAt) + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + err = en.WriteInt64(z.UpdateAt) + if err != nil { + err = msgp.WrapError(err, "UpdateAt") + return + } + err = en.WriteInt64(z.DeleteAt) + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + err = en.WriteString(z.Username) + if err != nil { + err = msgp.WrapError(err, "Username") + return + } + err = en.WriteString(z.Password) + if err != nil { + err = msgp.WrapError(err, "Password") + return + } + if z.AuthData == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = en.WriteString(*z.AuthData) + if err != nil { + err = msgp.WrapError(err, "AuthData") + return + } + } + err = en.WriteString(z.AuthService) + if err != nil { + err = msgp.WrapError(err, "AuthService") + return + } + err = en.WriteString(z.Email) + if err != nil { + err = msgp.WrapError(err, "Email") + return + } + err = en.WriteBool(z.EmailVerified) + if err != nil { + err = msgp.WrapError(err, "EmailVerified") + return + } + err = en.WriteString(z.Nickname) + if err != nil { + err = msgp.WrapError(err, "Nickname") + return + } + err = en.WriteString(z.FirstName) + if err != nil { + err = msgp.WrapError(err, "FirstName") + return + } + err = en.WriteString(z.LastName) + if err != nil { + err = msgp.WrapError(err, "LastName") + return + } + err = en.WriteString(z.Position) + if err != nil { + err = msgp.WrapError(err, "Position") + return + } + err = en.WriteString(z.Roles) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + err = en.WriteBool(z.AllowMarketing) + if err != nil { + err = msgp.WrapError(err, "AllowMarketing") + return + } + err = en.WriteMapHeader(uint32(len(z.Props))) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + for za0001, za0002 := range z.Props { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + } + err = en.WriteMapHeader(uint32(len(z.NotifyProps))) + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + for za0003, za0004 := range z.NotifyProps { + err = en.WriteString(za0003) + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + err = en.WriteString(za0004) + if err != nil { + err = msgp.WrapError(err, "NotifyProps", za0003) + return + } + } + err = en.WriteInt64(z.LastPasswordUpdate) + if err != nil { + err = msgp.WrapError(err, "LastPasswordUpdate") + return + } + err = en.WriteInt64(z.LastPictureUpdate) + if err != nil { + err = msgp.WrapError(err, "LastPictureUpdate") + return + } + err = en.WriteInt(z.FailedAttempts) + if err != nil { + err = msgp.WrapError(err, "FailedAttempts") + return + } + err = en.WriteString(z.Locale) + if err != nil { + err = msgp.WrapError(err, "Locale") + return + } + err = en.WriteMapHeader(uint32(len(z.Timezone))) + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + for za0005, za0006 := range z.Timezone { + err = en.WriteString(za0005) + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + err = en.WriteString(za0006) + if err != nil { + err = msgp.WrapError(err, "Timezone", za0005) + return + } + } + err = en.WriteBool(z.MfaActive) + if err != nil { + err = msgp.WrapError(err, "MfaActive") + return + } + err = en.WriteString(z.MfaSecret) + if err != nil { + err = msgp.WrapError(err, "MfaSecret") + return + } + err = en.WriteInt64(z.LastActivityAt) + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + err = en.WriteBool(z.IsBot) + if err != nil { + err = msgp.WrapError(err, "IsBot") + return + } + err = en.WriteString(z.BotDescription) + if err != nil { + err = msgp.WrapError(err, "BotDescription") + return + } + err = en.WriteInt64(z.BotLastIconUpdate) + if err != nil { + err = msgp.WrapError(err, "BotLastIconUpdate") + return + } + err = en.WriteString(z.TermsOfServiceId) + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceId") + return + } + err = en.WriteInt64(z.TermsOfServiceCreateAt) + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceCreateAt") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *User) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // array header, size 31 + o = append(o, 0xdc, 0x0, 0x1f) + o = msgp.AppendString(o, z.Id) + o = msgp.AppendInt64(o, z.CreateAt) + o = msgp.AppendInt64(o, z.UpdateAt) + o = msgp.AppendInt64(o, z.DeleteAt) + o = msgp.AppendString(o, z.Username) + o = msgp.AppendString(o, z.Password) + if z.AuthData == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendString(o, *z.AuthData) + } + o = msgp.AppendString(o, z.AuthService) + o = msgp.AppendString(o, z.Email) + o = msgp.AppendBool(o, z.EmailVerified) + o = msgp.AppendString(o, z.Nickname) + o = msgp.AppendString(o, z.FirstName) + o = msgp.AppendString(o, z.LastName) + o = msgp.AppendString(o, z.Position) + o = msgp.AppendString(o, z.Roles) + o = msgp.AppendBool(o, z.AllowMarketing) + o = msgp.AppendMapHeader(o, uint32(len(z.Props))) + for za0001, za0002 := range z.Props { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + o = msgp.AppendMapHeader(o, uint32(len(z.NotifyProps))) + for za0003, za0004 := range z.NotifyProps { + o = msgp.AppendString(o, za0003) + o = msgp.AppendString(o, za0004) + } + o = msgp.AppendInt64(o, z.LastPasswordUpdate) + o = msgp.AppendInt64(o, z.LastPictureUpdate) + o = msgp.AppendInt(o, z.FailedAttempts) + o = msgp.AppendString(o, z.Locale) + o = msgp.AppendMapHeader(o, uint32(len(z.Timezone))) + for za0005, za0006 := range z.Timezone { + o = msgp.AppendString(o, za0005) + o = msgp.AppendString(o, za0006) + } + o = msgp.AppendBool(o, z.MfaActive) + o = msgp.AppendString(o, z.MfaSecret) + o = msgp.AppendInt64(o, z.LastActivityAt) + o = msgp.AppendBool(o, z.IsBot) + o = msgp.AppendString(o, z.BotDescription) + o = msgp.AppendInt64(o, z.BotLastIconUpdate) + o = msgp.AppendString(o, z.TermsOfServiceId) + o = msgp.AppendInt64(o, z.TermsOfServiceCreateAt) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *User) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0001 uint32 + zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 31 { + err = msgp.ArrayError{Wanted: 31, Got: zb0001} + return + } + z.Id, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + z.CreateAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + z.UpdateAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "UpdateAt") + return + } + z.DeleteAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + z.Username, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Username") + return + } + z.Password, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Password") + return + } + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.AuthData = nil + } else { + if z.AuthData == nil { + z.AuthData = new(string) + } + *z.AuthData, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AuthData") + return + } + } + z.AuthService, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AuthService") + return + } + z.Email, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Email") + return + } + z.EmailVerified, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "EmailVerified") + return + } + z.Nickname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Nickname") + return + } + z.FirstName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "FirstName") + return + } + z.LastName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastName") + return + } + z.Position, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Position") + return + } + z.Roles, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.AllowMarketing, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllowMarketing") + return + } + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + if z.Props == nil { + z.Props = make(StringMap, zb0002) + } else if len(z.Props) > 0 { + for key := range z.Props { + delete(z.Props, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 string + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + z.Props[za0001] = za0002 + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + if z.NotifyProps == nil { + z.NotifyProps = make(StringMap, zb0003) + } else if len(z.NotifyProps) > 0 { + for key := range z.NotifyProps { + delete(z.NotifyProps, key) + } + } + for zb0003 > 0 { + var za0003 string + var za0004 string + zb0003-- + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + za0004, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NotifyProps", za0003) + return + } + z.NotifyProps[za0003] = za0004 + } + z.LastPasswordUpdate, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastPasswordUpdate") + return + } + z.LastPictureUpdate, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastPictureUpdate") + return + } + z.FailedAttempts, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "FailedAttempts") + return + } + z.Locale, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Locale") + return + } + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + if z.Timezone == nil { + z.Timezone = make(StringMap, zb0004) + } else if len(z.Timezone) > 0 { + for key := range z.Timezone { + delete(z.Timezone, key) + } + } + for zb0004 > 0 { + var za0005 string + var za0006 string + zb0004-- + za0005, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + za0006, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Timezone", za0005) + return + } + z.Timezone[za0005] = za0006 + } + z.MfaActive, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MfaActive") + return + } + z.MfaSecret, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MfaSecret") + return + } + z.LastActivityAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + z.IsBot, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "IsBot") + return + } + z.BotDescription, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "BotDescription") + return + } + z.BotLastIconUpdate, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "BotLastIconUpdate") + return + } + z.TermsOfServiceId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceId") + return + } + z.TermsOfServiceCreateAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceCreateAt") + return + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *User) Msgsize() (s int) { + s = 3 + msgp.StringPrefixSize + len(z.Id) + msgp.Int64Size + msgp.Int64Size + msgp.Int64Size + msgp.StringPrefixSize + len(z.Username) + msgp.StringPrefixSize + len(z.Password) + if z.AuthData == nil { + s += msgp.NilSize + } else { + s += msgp.StringPrefixSize + len(*z.AuthData) + } + s += msgp.StringPrefixSize + len(z.AuthService) + msgp.StringPrefixSize + len(z.Email) + msgp.BoolSize + msgp.StringPrefixSize + len(z.Nickname) + msgp.StringPrefixSize + len(z.FirstName) + msgp.StringPrefixSize + len(z.LastName) + msgp.StringPrefixSize + len(z.Position) + msgp.StringPrefixSize + len(z.Roles) + msgp.BoolSize + msgp.MapHeaderSize + if z.Props != nil { + for za0001, za0002 := range z.Props { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += msgp.MapHeaderSize + if z.NotifyProps != nil { + for za0003, za0004 := range z.NotifyProps { + _ = za0004 + s += msgp.StringPrefixSize + len(za0003) + msgp.StringPrefixSize + len(za0004) + } + } + s += msgp.Int64Size + msgp.Int64Size + msgp.IntSize + msgp.StringPrefixSize + len(z.Locale) + msgp.MapHeaderSize + if z.Timezone != nil { + for za0005, za0006 := range z.Timezone { + _ = za0006 + s += msgp.StringPrefixSize + len(za0005) + msgp.StringPrefixSize + len(za0006) + } + } + s += msgp.BoolSize + msgp.StringPrefixSize + len(z.MfaSecret) + msgp.Int64Size + msgp.BoolSize + msgp.StringPrefixSize + len(z.BotDescription) + msgp.Int64Size + msgp.StringPrefixSize + len(z.TermsOfServiceId) + msgp.Int64Size + return +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/session.go b/vendor/github.com/mattermost/mattermost-server/v5/model/session.go index b5567a65..976e1229 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/session.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/session.go @@ -30,6 +30,11 @@ const ( SESSION_USER_ACCESS_TOKEN_EXPIRY = 100 * 365 // 100 years ) +//msgp:tuple Session + +// Session contains the user session details. +// This struct's serializer methods are auto-generated. If a new field is added/removed, +// please run make gen-serialized. type Session struct { Id string `json:"id"` Token string `json:"token"` @@ -40,6 +45,7 @@ type Session struct { DeviceId string `json:"device_id"` Roles string `json:"roles"` IsOAuth bool `json:"is_oauth"` + ExpiredNotify bool `json:"expired_notify"` Props StringMap `json:"props"` TeamMembers []*TeamMember `json:"team_members" db:"-"` Local bool `json:"local" db:"-"` @@ -114,6 +120,9 @@ func (me *Session) IsExpired() bool { return false } +// Deprecated: SetExpireInDays is deprecated and should not be used. +// Use (*App).SetSessionExpireInDays instead which handles the +// cases where the new ExpiresAt is not relative to CreateAt. func (me *Session) SetExpireInDays(days int) { if me.CreateAt == 0 { me.ExpiresAt = GetMillis() + (1000 * 60 * 60 * 24 * int64(days)) @@ -171,8 +180,21 @@ func (me *Session) IsSaml() bool { return isSaml } +func (me *Session) IsOAuthUser() bool { + val, ok := me.Props[USER_AUTH_SERVICE_IS_OAUTH] + if !ok { + return false + } + isOAuthUser, err := strconv.ParseBool(val) + if err != nil { + mlog.Error("Error parsing boolean property from Session", mlog.Err(err)) + return false + } + return isOAuthUser +} + func (me *Session) IsSSOLogin() bool { - return me.IsOAuth || me.IsSaml() + return me.IsOAuthUser() || me.IsSaml() } func (me *Session) GetUserRoles() []string { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/status.go b/vendor/github.com/mattermost/mattermost-server/v5/model/status.go index 741fa1ed..1f32422a 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/status.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/status.go @@ -35,7 +35,8 @@ func (o *Status) ToJson() string { } func (o *Status) ToClusterJson() string { - b, _ := json.Marshal(o) + oCopy := *o + b, _ := json.Marshal(oCopy) return string(b) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/system.go b/vendor/github.com/mattermost/mattermost-server/v5/model/system.go index 4c3132e2..f826276f 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/system.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/system.go @@ -10,15 +10,37 @@ import ( ) const ( - SYSTEM_DIAGNOSTIC_ID = "DiagnosticId" - SYSTEM_RAN_UNIT_TESTS = "RanUnitTests" - SYSTEM_LAST_SECURITY_TIME = "LastSecurityTime" - SYSTEM_ACTIVE_LICENSE_ID = "ActiveLicenseId" - SYSTEM_LAST_COMPLIANCE_TIME = "LastComplianceTime" - SYSTEM_ASYMMETRIC_SIGNING_KEY = "AsymmetricSigningKey" - SYSTEM_POST_ACTION_COOKIE_SECRET = "PostActionCookieSecret" - SYSTEM_INSTALLATION_DATE_KEY = "InstallationDate" - SYSTEM_FIRST_SERVER_RUN_TIMESTAMP_KEY = "FirstServerRunTimestamp" + SYSTEM_TELEMETRY_ID = "DiagnosticId" + SYSTEM_RAN_UNIT_TESTS = "RanUnitTests" + SYSTEM_LAST_SECURITY_TIME = "LastSecurityTime" + SYSTEM_ACTIVE_LICENSE_ID = "ActiveLicenseId" + SYSTEM_LAST_COMPLIANCE_TIME = "LastComplianceTime" + SYSTEM_ASYMMETRIC_SIGNING_KEY = "AsymmetricSigningKey" + SYSTEM_POST_ACTION_COOKIE_SECRET = "PostActionCookieSecret" + SYSTEM_INSTALLATION_DATE_KEY = "InstallationDate" + SYSTEM_FIRST_SERVER_RUN_TIMESTAMP_KEY = "FirstServerRunTimestamp" + SYSTEM_CLUSTER_ENCRYPTION_KEY = "ClusterEncryptionKey" + SYSTEM_UPGRADED_FROM_TE_ID = "UpgradedFromTE" + SYSTEM_WARN_METRIC_NUMBER_OF_TEAMS_5 = "warn_metric_number_of_teams_5" + SYSTEM_WARN_METRIC_NUMBER_OF_CHANNELS_50 = "warn_metric_number_of_channels_50" + SYSTEM_WARN_METRIC_MFA = "warn_metric_mfa" + SYSTEM_WARN_METRIC_EMAIL_DOMAIN = "warn_metric_email_domain" + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_100 = "warn_metric_number_of_active_users_100" + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_200 = "warn_metric_number_of_active_users_200" + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_300 = "warn_metric_number_of_active_users_300" + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_500 = "warn_metric_number_of_active_users_500" + SYSTEM_WARN_METRIC_NUMBER_OF_POSTS_2M = "warn_metric_number_of_posts_2M" + SYSTEM_WARN_METRIC_LAST_RUN_TIMESTAMP_KEY = "LastWarnMetricRunTimestamp" +) + +const ( + WARN_METRIC_STATUS_LIMIT_REACHED = "true" + WARN_METRIC_STATUS_RUNONCE = "runonce" + WARN_METRIC_STATUS_ACK = "ack" + WARN_METRIC_STATUS_STORE_PREFIX = "warn_metric_" + WARN_METRIC_JOB_INTERVAL = 24 * 7 + WARN_METRIC_NUMBER_OF_ACTIVE_USERS_25 = 25 + WARN_METRIC_JOB_WAIT_TIME = 1000 * 3600 * 24 * 7 // 7 days ) type System struct { @@ -69,3 +91,114 @@ func ServerBusyStateFromJson(r io.Reader) *ServerBusyState { json.NewDecoder(r).Decode(&sbs) return sbs } + +var WarnMetricsTable = map[string]WarnMetric{ + SYSTEM_WARN_METRIC_MFA: { + Id: SYSTEM_WARN_METRIC_MFA, + Limit: -1, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_EMAIL_DOMAIN: { + Id: SYSTEM_WARN_METRIC_EMAIL_DOMAIN, + Limit: -1, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_TEAMS_5: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_TEAMS_5, + Limit: 5, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_CHANNELS_50: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_CHANNELS_50, + Limit: 50, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_100: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_100, + Limit: 100, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_200: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_200, + Limit: 200, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_300: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_300, + Limit: 300, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_500: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_500, + Limit: 500, + IsBotOnly: false, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_POSTS_2M: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_POSTS_2M, + Limit: 2000000, + IsBotOnly: false, + IsRunOnce: true, + }, +} + +type WarnMetric struct { + Id string + Limit int64 + IsBotOnly bool + IsRunOnce bool +} + +type WarnMetricDisplayTexts struct { + BotTitle string + BotMessageBody string + BotSuccessMessage string + EmailBody string +} +type WarnMetricStatus struct { + Id string `json:"id"` + Limit int64 `json:"limit"` + Acked bool `json:"acked"` + StoreStatus string `json:"store_status,omitempty"` +} + +func (wms *WarnMetricStatus) ToJson() string { + b, _ := json.Marshal(wms) + return string(b) +} + +func WarnMetricStatusFromJson(data io.Reader) *WarnMetricStatus { + var o WarnMetricStatus + if err := json.NewDecoder(data).Decode(&o); err != nil { + return nil + } else { + return &o + } +} + +func MapWarnMetricStatusToJson(o map[string]*WarnMetricStatus) string { + b, _ := json.Marshal(o) + return string(b) +} + +type SendWarnMetricAck struct { + ForceAck bool `json:"forceAck"` +} + +func (swma *SendWarnMetricAck) ToJson() string { + b, _ := json.Marshal(swma) + return string(b) +} + +func SendWarnMetricAckFromJson(r io.Reader) *SendWarnMetricAck { + var swma *SendWarnMetricAck + json.NewDecoder(r).Decode(&swma) + return swma +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go index b8b1fe30..f9de5801 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go @@ -9,9 +9,12 @@ import ( ) type TeamSearch struct { - Term string `json:"term"` - Page *int `json:"page,omitempty"` - PerPage *int `json:"per_page,omitempty"` + Term string `json:"term"` + Page *int `json:"page,omitempty"` + PerPage *int `json:"per_page,omitempty"` + AllowOpenInvite *bool `json:"allow_open_invite,omitempty"` + GroupConstrained *bool `json:"group_constrained,omitempty"` + IncludeGroupConstrained *bool `json:"include_group_constrained,omitempty"` } func (t *TeamSearch) IsPaginated() bool { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go b/vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go new file mode 100644 index 00000000..e2e9d3bf --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go @@ -0,0 +1,25 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type TypingRequest struct { + ChannelId string `json:"channel_id"` + ParentId string `json:"parent_id"` +} + +func (o *TypingRequest) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func TypingRequestFromJson(data io.Reader) *TypingRequest { + var o *TypingRequest + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/upload_session.go b/vendor/github.com/mattermost/mattermost-server/v5/model/upload_session.go new file mode 100644 index 00000000..663ee0b1 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/upload_session.go @@ -0,0 +1,141 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" + "net/http" +) + +// UploadType defines the type of an upload. +type UploadType string + +const ( + UploadTypeAttachment UploadType = "attachment" + UploadTypeImport UploadType = "import" +) + +// UploadSession contains information used to keep track of a file upload. +type UploadSession struct { + // The unique identifier for the session. + Id string `json:"id"` + // The type of the upload. + Type UploadType `json:"type"` + // The timestamp of creation. + CreateAt int64 `json:"create_at"` + // The id of the user performing the upload. + UserId string `json:"user_id"` + // The id of the channel to upload to. + ChannelId string `json:"channel_id"` + // The name of the file to upload. + Filename string `json:"filename"` + // The path where the file is stored. + Path string `json:"-"` + // The size of the file to upload. + FileSize int64 `json:"file_size"` + // The amount of received data in bytes. If equal to FileSize it means the + // upload has finished. + FileOffset int64 `json:"file_offset"` +} + +// ToJson serializes the UploadSession into JSON and returns it as string. +func (us *UploadSession) ToJson() string { + b, _ := json.Marshal(us) + return string(b) +} + +// UploadSessionsToJson serializes a list of UploadSession into JSON and +// returns it as string. +func UploadSessionsToJson(uss []*UploadSession) string { + b, _ := json.Marshal(uss) + return string(b) +} + +// UploadSessionsFromJson deserializes a list of UploadSession from JSON data. +func UploadSessionsFromJson(data io.Reader) []*UploadSession { + decoder := json.NewDecoder(data) + var uss []*UploadSession + if err := decoder.Decode(&uss); err != nil { + return nil + } + return uss +} + +// UploadSessionFromJson deserializes the UploadSession from JSON data. +func UploadSessionFromJson(data io.Reader) *UploadSession { + decoder := json.NewDecoder(data) + var us UploadSession + if err := decoder.Decode(&us); err != nil { + return nil + } + return &us +} + +// PreSave is a utility function used to fill required information. +func (us *UploadSession) PreSave() { + if us.Id == "" { + us.Id = NewId() + } + + if us.CreateAt == 0 { + us.CreateAt = GetMillis() + } +} + +// IsValid validates an UploadType. It returns an error in case of +// failure. +func (t UploadType) IsValid() error { + switch t { + case UploadTypeAttachment: + return nil + case UploadTypeImport: + return nil + default: + } + return fmt.Errorf("invalid UploadType %s", t) +} + +// IsValid validates an UploadSession. It returns an error in case of +// failure. +func (us *UploadSession) IsValid() *AppError { + if !IsValidId(us.Id) { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if err := us.Type.IsValid(); err != nil { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.type.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if !IsValidId(us.UserId) { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.user_id.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.Type == UploadTypeAttachment && !IsValidId(us.ChannelId) { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.channel_id.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.CreateAt == 0 { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.create_at.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.Filename == "" { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.filename.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.FileSize <= 0 { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.file_size.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.FileOffset < 0 || us.FileOffset > us.FileSize { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.file_offset.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.Path == "" { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.path.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user.go index 168605ad..4e4d067c 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/user.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user.go @@ -59,6 +59,11 @@ const ( USER_LOCALE_MAX_LENGTH = 5 ) +//msgp:tuple User + +// User contains the details about the user. +// This struct's serializer methods are auto-generated. If a new field is added/removed, +// please run make gen-serialized. type User struct { Id string `json:"id"` CreateAt int64 `json:"create_at,omitempty"` @@ -124,6 +129,7 @@ type UserForIndexing struct { Nickname string `json:"nickname"` FirstName string `json:"first_name"` LastName string `json:"last_name"` + Roles string `json:"roles"` CreateAt int64 `json:"create_at"` DeleteAt int64 `json:"delete_at"` TeamsIds []string `json:"team_id"` diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go index 3c20b23a..ee474883 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go @@ -13,6 +13,14 @@ type UserCountOptions struct { ExcludeRegularUsers bool // Only include users on a specific team. "" for any team. TeamId string + // Only include users on a specific channel. "" for any channel. + ChannelId string // Restrict to search in a list of teams and channels ViewRestrictions *ViewUsersRestrictions + // Only include users matching any of the given system wide roles. + Roles []string + // Only include users matching any of the given channel roles, must be used with ChannelId. + ChannelRoles []string + // Only include users matching any of the given team roles, must be used with TeamId. + TeamRoles []string } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go index f865d53c..2748d735 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go @@ -12,6 +12,8 @@ type UserGetOptions struct { InChannelId string // Filters the users not in the channel NotInChannelId string + // Filters the users in the group + InGroupId string // Filters the users group constrained GroupConstrained bool // Filters the users without a team @@ -22,6 +24,12 @@ type UserGetOptions struct { Active bool // Filters for the given role Role string + // Filters for users matching any of the given system wide roles + Roles []string + // Filters for users matching any of the given channel roles, must be used with InChannelId + ChannelRoles []string + // Filters for users matching any of the given team roles, must be used with InTeamId + TeamRoles []string // Sorting option Sort string // Restrict to search in a list of teams and channels diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go index fa9fa8a2..0a721eac 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go @@ -13,16 +13,20 @@ const USER_SEARCH_DEFAULT_LIMIT = 100 // UserSearch captures the parameters provided by a client for initiating a user search. type UserSearch struct { - Term string `json:"term"` - TeamId string `json:"team_id"` - NotInTeamId string `json:"not_in_team_id"` - InChannelId string `json:"in_channel_id"` - NotInChannelId string `json:"not_in_channel_id"` - GroupConstrained bool `json:"group_constrained"` - AllowInactive bool `json:"allow_inactive"` - WithoutTeam bool `json:"without_team"` - Limit int `json:"limit"` - Role string `json:"role"` + Term string `json:"term"` + TeamId string `json:"team_id"` + NotInTeamId string `json:"not_in_team_id"` + InChannelId string `json:"in_channel_id"` + NotInChannelId string `json:"not_in_channel_id"` + InGroupId string `json:"in_group_id"` + GroupConstrained bool `json:"group_constrained"` + AllowInactive bool `json:"allow_inactive"` + WithoutTeam bool `json:"without_team"` + Limit int `json:"limit"` + Role string `json:"role"` + Roles []string `json:"roles"` + ChannelRoles []string `json:"channel_roles"` + TeamRoles []string `json:"team_roles"` } // ToJson convert a User to a json string @@ -60,6 +64,12 @@ type UserSearchOptions struct { Limit int // Filters for the given role Role string + // Filters for users that have any of the given system roles + Roles []string + // Filters for users that have the given channel roles to be used when searching in a channel + ChannelRoles []string + // Filters for users that have the given team roles to be used when searching in a team + TeamRoles []string // Restrict to search in a list of teams and channels ViewRestrictions *ViewUsersRestrictions // List of allowed channels diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/utils.go b/vendor/github.com/mattermost/mattermost-server/v5/model/utils.go index e75fb022..2ab71090 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/utils.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/utils.go @@ -659,12 +659,12 @@ func AsStringBoolMap(list []string) map[string]bool { // SanitizeUnicode will remove undesirable Unicode characters from a string. func SanitizeUnicode(s string) string { - return strings.Map(filterBlacklist, s) + return strings.Map(filterBlocklist, s) } -// filterBlacklist returns `r` if it is not in the blacklist, otherwise drop (-1). -// Blacklist is taken from https://www.w3.org/TR/unicode-xml/#Charlist -func filterBlacklist(r rune) rune { +// filterBlocklist returns `r` if it is not in the blocklist, otherwise drop (-1). +// Blocklist is taken from https://www.w3.org/TR/unicode-xml/#Charlist +func filterBlocklist(r rune) rune { const drop = -1 switch r { case '\u0340', '\u0341': // clones of grave and acute; deprecated in Unicode diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/version.go b/vendor/github.com/mattermost/mattermost-server/v5/model/version.go index 3cefff8c..11e0427a 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/version.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/version.go @@ -13,6 +13,9 @@ import ( // It should be maintained in chronological order with most current // release at the front of the list. var versions = []string{ + "5.28.0", + "5.27.0", + "5.26.0", "5.25.0", "5.24.0", "5.23.0", diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go b/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go index b3e4b186..281b50cf 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go @@ -62,6 +62,12 @@ const ( WEBSOCKET_EVENT_RECEIVED_GROUP_NOT_ASSOCIATED_TO_TEAM = "received_group_not_associated_to_team" WEBSOCKET_EVENT_RECEIVED_GROUP_ASSOCIATED_TO_CHANNEL = "received_group_associated_to_channel" WEBSOCKET_EVENT_RECEIVED_GROUP_NOT_ASSOCIATED_TO_CHANNEL = "received_group_not_associated_to_channel" + WEBSOCKET_EVENT_SIDEBAR_CATEGORY_CREATED = "sidebar_category_created" + WEBSOCKET_EVENT_SIDEBAR_CATEGORY_UPDATED = "sidebar_category_updated" + WEBSOCKET_EVENT_SIDEBAR_CATEGORY_DELETED = "sidebar_category_deleted" + WEBSOCKET_EVENT_SIDEBAR_CATEGORY_ORDER_UPDATED = "sidebar_category_order_updated" + WEBSOCKET_WARN_METRIC_STATUS_RECEIVED = "warn_metric_status_received" + WEBSOCKET_WARN_METRIC_STATUS_REMOVED = "warn_metric_status_removed" ) type WebSocketMessage interface { diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 60816288..20eea2b7 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,3 +1,20 @@ +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + ## 1.2.3 * Fix duplicate entries in Keys list with pointer values. [GH-185] diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index e0535104..f41bcc58 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -100,6 +100,47 @@ // "address": "123 Maple St.", // } // +// Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source { +// Age int `mapstructure:",omitempty"` +// } +// +// Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// // Other Configuration // // mapstructure is highly configurable. See the DecoderConfig struct @@ -422,7 +463,34 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e // value to "data" of that type. func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { if val.IsValid() && val.Elem().IsValid() { - return d.decode(name, data, val.Elem()) + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil } dataVal := reflect.ValueOf(data) @@ -799,30 +867,31 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } tagValue := f.Tag.Get(d.config.TagName) - tagParts := strings.Split(tagValue, ",") - - // Determine the name of the key in the map keyName := f.Name - if tagParts[0] != "" { - if tagParts[0] == "-" { - continue - } - keyName = tagParts[0] - } // If Squash is set in the config, we squash the field down. - squash := d.config.Squash && v.Kind() == reflect.Struct - // If "squash" is specified in the tag, we squash the field down. - if !squash { - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1 if squash && v.Kind() != reflect.Struct { return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) } + keyName = tagValue[:index] + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue } switch v.Kind() { @@ -837,11 +906,22 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re mType := reflect.MapOf(vKeyType, vElemType) vMap := reflect.MakeMap(mType) - err := d.decode(keyName, x.Interface(), vMap) + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) if err != nil { return err } + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + if squash { for _, k := range vMap.MapKeys() { valMap.SetMapIndex(k, vMap.MapIndex(k)) @@ -1085,13 +1165,23 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Not the most efficient way to do this but we can optimize later if // we want to. To convert from struct to struct we go to map first // as an intermediary. - m := make(map[string]interface{}) - mval := reflect.Indirect(reflect.ValueOf(&m)) - if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil { + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { return err } - result := d.decodeStructFromMap(name, mval, val) + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) return result default: @@ -1145,7 +1235,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e fieldKind := fieldType.Type.Kind() // If "squash" is specified in the tag, we squash the field down. - squash := d.config.Squash && fieldKind == reflect.Struct + squash := d.config.Squash && fieldKind == reflect.Struct && fieldType.Anonymous remain := false // We always parse the tags cause we're looking for other tags too @@ -1173,9 +1263,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } // Build our field - fieldCurrent := field{fieldType, structVal.Field(i)} if remain { - remainField = &fieldCurrent + remainField = &field{fieldType, structVal.Field(i)} } else { // Normal struct field, store it away fields = append(fields, field{fieldType, structVal.Field(i)}) @@ -1294,6 +1383,24 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e return nil } +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + func getKind(val reflect.Value) reflect.Kind { kind := val.Kind() diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go index 5c0960d8..7286824d 100644 --- a/vendor/github.com/pborman/uuid/time.go +++ b/vendor/github.com/pborman/uuid/time.go @@ -29,7 +29,7 @@ func GetTime() (Time, uint16, error) { return guuid.GetTime() } // for func ClockSequence() int { return guuid.ClockSequence() } -// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to // -1 causes a new sequence to be generated. func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go index b459d46d..767dd0c3 100644 --- a/vendor/github.com/pborman/uuid/version4.go +++ b/vendor/github.com/pborman/uuid/version4.go @@ -6,7 +6,7 @@ package uuid import guuid "github.com/google/uuid" -// Random returns a Random (Version 4) UUID or panics. +// NewRandom returns a Random (Version 4) UUID or panics. // // The strength of the UUIDs is based on the strength of the crypto/rand // package. diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md index 4ef303af..6831deb5 100644 --- a/vendor/github.com/pelletier/go-toml/README.md +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -3,7 +3,7 @@ Go library for the [TOML](https://github.com/mojombo/toml) format. This library supports TOML version -[v0.5.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md) +[v1.0.0-rc.1](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v1.0.0-rc.1.md) [![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml) [![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) @@ -18,7 +18,7 @@ Go-toml provides the following features for using data parsed from TOML document * Load TOML documents from files and string data * Easily navigate TOML structure using Tree -* Mashaling and unmarshaling to and from data structures +* Marshaling and unmarshaling to and from data structures * Line & column position data for all parsed elements * [Query support similar to JSON-Path](query/) * Syntax errors contain line and column numbers @@ -74,7 +74,7 @@ Or use a query: q, _ := query.Compile("$..[user,password]") results := q.Execute(config) for ii, item := range results.Values() { - fmt.Println("Query result %d: %v", ii, item) + fmt.Printf("Query result %d: %v\n", ii, item) } ``` @@ -87,7 +87,7 @@ The documentation and additional examples are available at Go-toml provides two handy command line tools: -* `tomll`: Reads TOML files and lint them. +* `tomll`: Reads TOML files and lints them. ``` go install github.com/pelletier/go-toml/cmd/tomll @@ -99,9 +99,9 @@ Go-toml provides two handy command line tools: go install github.com/pelletier/go-toml/cmd/tomljson tomljson --help ``` - + * `jsontoml`: Reads a JSON file and outputs a TOML representation. - + ``` go install github.com/pelletier/go-toml/cmd/jsontoml jsontoml --help diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml index 12950a16..780d9c68 100644 --- a/vendor/github.com/pelletier/go-toml/example-crlf.toml +++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml @@ -27,3 +27,4 @@ enabled = true [clients] data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml index 3d902f28..f45bf88b 100644 --- a/vendor/github.com/pelletier/go-toml/example.toml +++ b/vendor/github.com/pelletier/go-toml/example.toml @@ -27,3 +27,4 @@ enabled = true [clients] data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/go.mod b/vendor/github.com/pelletier/go-toml/go.mod index 07a258be..c7faa6b3 100644 --- a/vendor/github.com/pelletier/go-toml/go.mod +++ b/vendor/github.com/pelletier/go-toml/go.mod @@ -5,5 +5,5 @@ go 1.12 require ( github.com/BurntSushi/toml v0.3.1 github.com/davecgh/go-spew v1.1.1 - gopkg.in/yaml.v2 v2.2.8 + gopkg.in/yaml.v2 v2.3.0 ) diff --git a/vendor/github.com/pelletier/go-toml/go.sum b/vendor/github.com/pelletier/go-toml/go.sum index b30bc766..6f356470 100644 --- a/vendor/github.com/pelletier/go-toml/go.sum +++ b/vendor/github.com/pelletier/go-toml/go.sum @@ -15,3 +15,5 @@ gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go index e923bc4f..e091500b 100644 --- a/vendor/github.com/pelletier/go-toml/keysparsing.go +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -5,7 +5,6 @@ package toml import ( "errors" "fmt" - "unicode" ) // Convert the bare key group string to an array. @@ -109,5 +108,5 @@ func parseKey(key string) ([]string, error) { } func isValidBareChar(r rune) bool { - return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r) + return isAlphanumeric(r) || r == '-' || isDigit(r) } diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go index 88fd91ee..425e847a 100644 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -26,7 +26,7 @@ type tomlLexer struct { currentTokenStart int currentTokenStop int tokens []token - depth int + brackets []rune line int col int endbufferLine int @@ -123,6 +123,8 @@ func (l *tomlLexer) lexVoid() tomlLexStateFn { for { next := l.peek() switch next { + case '}': // after '{' + return l.lexRightCurlyBrace case '[': return l.lexTableKey case '#': @@ -140,10 +142,6 @@ func (l *tomlLexer) lexVoid() tomlLexStateFn { l.skip() } - if l.depth > 0 { - return l.lexRvalue - } - if isKeyStartChar(next) { return l.lexKey } @@ -167,10 +165,8 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { case '=': return l.lexEqual case '[': - l.depth++ return l.lexLeftBracket case ']': - l.depth-- return l.lexRightBracket case '{': return l.lexLeftCurlyBrace @@ -188,12 +184,10 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { fallthrough case '\n': l.skip() - if l.depth == 0 { - return l.lexVoid + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { + return l.lexRvalue } - return l.lexRvalue - case '_': - return l.errorf("cannot start number with underscore") + return l.lexVoid } if l.follow("true") { @@ -236,10 +230,6 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { return l.lexNumber } - if isAlphanumeric(next) { - return l.lexKey - } - return l.errorf("no value can start with %c", next) } @@ -250,12 +240,17 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { l.next() l.emit(tokenLeftCurlyBrace) + l.brackets = append(l.brackets, '{') return l.lexVoid } func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { l.next() l.emit(tokenRightCurlyBrace) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { + return l.errorf("cannot have '}' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] return l.lexRvalue } @@ -302,6 +297,9 @@ func (l *tomlLexer) lexEqual() tomlLexStateFn { func (l *tomlLexer) lexComma() tomlLexStateFn { l.next() l.emit(tokenComma) + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { + return l.lexVoid + } return l.lexRvalue } @@ -332,7 +330,26 @@ func (l *tomlLexer) lexKey() tomlLexStateFn { } else if r == '\n' { return l.errorf("keys cannot contain new lines") } else if isSpace(r) { - break + str := " " + // skip trailing whitespace + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str += string(r) + l.next() + } + // break loop if not a dot + if r != '.' { + break + } + str += "." + // skip trailing whitespace after dot + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str += string(r) + l.next() + } + growingString += str + continue } else if r == '.' { // skip } else if !isValidBareChar(r) { @@ -361,6 +378,7 @@ func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { l.next() l.emit(tokenLeftBracket) + l.brackets = append(l.brackets, '[') return l.lexRvalue } @@ -512,7 +530,7 @@ func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, } else { r := l.peek() - if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) { + if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { return "", fmt.Errorf("unescaped control character %U", r) } l.next() @@ -543,7 +561,6 @@ func (l *tomlLexer) lexString() tomlLexStateFn { } str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) - if err != nil { return l.errorf(err.Error()) } @@ -615,6 +632,10 @@ func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { func (l *tomlLexer) lexRightBracket() tomlLexStateFn { l.next() l.emit(tokenRightBracket) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { + return l.errorf("cannot have ']' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] return l.lexRvalue } diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go index dcddad8d..db5a7b4f 100644 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -2,6 +2,7 @@ package toml import ( "bytes" + "encoding" "errors" "fmt" "io" @@ -22,6 +23,7 @@ const ( type tomlOpts struct { name string + nameFromTag bool comment string commented bool multiline bool @@ -68,6 +70,9 @@ const ( var timeType = reflect.TypeOf(time.Time{}) var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() +var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() +var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() var localDateType = reflect.TypeOf(LocalDate{}) var localTimeType = reflect.TypeOf(LocalTime{}) var localDateTimeType = reflect.TypeOf(LocalDateTime{}) @@ -88,12 +93,16 @@ func isPrimitive(mtype reflect.Type) bool { case reflect.String: return true case reflect.Struct: - return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType || isCustomMarshaler(mtype) + return isTimeType(mtype) default: return false } } +func isTimeType(mtype reflect.Type) bool { + return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType +} + // Check if the given marshal type maps to a Tree slice or array func isTreeSequence(mtype reflect.Type) bool { switch mtype.Kind() { @@ -106,6 +115,30 @@ func isTreeSequence(mtype reflect.Type) bool { } } +// Check if the given marshal type maps to a slice or array of a custom marshaler type +func isCustomMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isCustomMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a text marshaler type +func isTextMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTextMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + // Check if the given marshal type maps to a non-Tree slice or array func isOtherSequence(mtype reflect.Type) bool { switch mtype.Kind() { @@ -140,12 +173,42 @@ func callCustomMarshaler(mval reflect.Value) ([]byte, error) { return mval.Interface().(Marshaler).MarshalTOML() } +func isTextMarshaler(mtype reflect.Type) bool { + return mtype.Implements(textMarshalerType) && !isTimeType(mtype) +} + +func callTextMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(encoding.TextMarshaler).MarshalText() +} + +func isCustomUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(unmarshalerType) +} + +func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { + return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) +} + +func isTextUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(textUnmarshalerType) +} + +func callTextUnmarshaler(mval reflect.Value, text []byte) error { + return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) +} + // Marshaler is the interface implemented by types that // can marshal themselves into valid TOML. type Marshaler interface { MarshalTOML() ([]byte, error) } +// Unmarshaler is the interface implemented by types that +// can unmarshal a TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + /* Marshal returns the TOML encoding of v. Behavior is similar to the Go json encoder, except that there is no concept of a Marshaler interface or MarshalTOML @@ -190,20 +253,23 @@ type Encoder struct { w io.Writer encOpts annotation - line int - col int - order marshalOrder + line int + col int + order marshalOrder + promoteAnon bool + indentation string } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{ - w: w, - encOpts: encOptsDefaults, - annotation: annotationDefault, - line: 0, - col: 1, - order: OrderAlphabetical, + w: w, + encOpts: encOptsDefaults, + annotation: annotationDefault, + line: 0, + col: 1, + order: OrderAlphabetical, + indentation: " ", } } @@ -255,6 +321,12 @@ func (e *Encoder) Order(ord marshalOrder) *Encoder { return e } +// Indentation allows to change indentation when marshalling. +func (e *Encoder) Indentation(indent string) *Encoder { + e.indentation = indent + return e +} + // SetTagName allows changing default tag "toml" func (e *Encoder) SetTagName(v string) *Encoder { e.tag = v @@ -279,8 +351,31 @@ func (e *Encoder) SetTagMultiline(v string) *Encoder { return e } +// PromoteAnonymous allows to change how anonymous struct fields are marshaled. +// Usually, they are marshaled as if the inner exported fields were fields in +// the outer struct. However, if an anonymous struct field is given a name in +// its TOML tag, it is treated like a regular struct field with that name. +// rather than being anonymous. +// +// In case anonymous promotion is enabled, all anonymous structs are promoted +// and treated like regular struct fields. +func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { + e.promoteAnon = promote + return e +} + func (e *Encoder) marshal(v interface{}) ([]byte, error) { + // Check if indentation is valid + for _, char := range e.indentation { + if !isSpace(char) { + return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") + } + } + mtype := reflect.TypeOf(v) + if mtype == nil { + return []byte{}, errors.New("nil cannot be marshaled to TOML") + } switch mtype.Kind() { case reflect.Struct, reflect.Map: @@ -288,6 +383,9 @@ func (e *Encoder) marshal(v interface{}) ([]byte, error) { if mtype.Elem().Kind() != reflect.Struct { return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") } + if reflect.ValueOf(v).IsNil() { + return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") + } default: return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") } @@ -296,13 +394,16 @@ func (e *Encoder) marshal(v interface{}) ([]byte, error) { if isCustomMarshaler(mtype) { return callCustomMarshaler(sval) } + if isTextMarshaler(mtype) { + return callTextMarshaler(sval) + } t, err := e.valueToTree(mtype, sval) if err != nil { return []byte{}, err } var buf bytes.Buffer - _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, false) + _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, false) return buf.Bytes(), err } @@ -332,12 +433,15 @@ func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, er if err != nil { return nil, err } - - tval.SetWithOptions(opts.name, SetOptions{ - Comment: opts.comment, - Commented: opts.commented, - Multiline: opts.multiline, - }, val) + if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { + e.appendTree(tval, tree) + } else { + tval.SetPathWithOptions([]string{opts.name}, SetOptions{ + Comment: opts.comment, + Commented: opts.commented, + Multiline: opts.multiline, + }, val) + } } } } @@ -371,13 +475,13 @@ func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, er return nil, err } if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.arraysOneElementPerLine) + keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) if err != nil { return nil, err } tval.SetPath([]string{keyStr}, val) } else { - tval.Set(key.String(), val) + tval.SetPath([]string{key.String()}, val) } } } @@ -399,9 +503,6 @@ func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*T // Convert given marshal slice to slice of toml values func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - if mtype.Elem().Kind() == reflect.Interface { - return nil, fmt.Errorf("marshal can't handle []interface{}") - } tval := make([]interface{}, mval.Len(), mval.Len()) for i := 0; i < mval.Len(); i++ { val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) @@ -417,7 +518,14 @@ func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (int func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { e.line++ if mtype.Kind() == reflect.Ptr { - return e.valueToToml(mtype.Elem(), mval.Elem()) + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + return callTextMarshaler(mval) + default: + return e.valueToToml(mtype.Elem(), mval.Elem()) + } } if mtype.Kind() == reflect.Interface { return e.valueToToml(mval.Elem().Type(), mval.Elem()) @@ -425,12 +533,14 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface switch { case isCustomMarshaler(mtype): return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + return callTextMarshaler(mval) case isTree(mtype): return e.valueToTree(mtype, mval) + case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): + return e.valueToOtherSlice(mtype, mval) case isTreeSequence(mtype): return e.valueToTreeSlice(mtype, mval) - case isOtherSequence(mtype): - return e.valueToOtherSlice(mtype, mval) default: switch mtype.Kind() { case reflect.Bool: @@ -454,6 +564,19 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface } } +func (e *Encoder) appendTree(t, o *Tree) error { + for key, value := range o.values { + if _, ok := t.values[key]; ok { + continue + } + if tomlValue, ok := value.(*tomlValue); ok { + tomlValue.position.Col = t.position.Col + } + t.values[key] = value + } + return nil +} + // Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. // Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for // sub-structs, and only definite types can be unmarshaled. @@ -506,6 +629,8 @@ type Decoder struct { tval *Tree encOpts tagName string + strict bool + visitor visitorState } // NewDecoder returns a new decoder that reads from r. @@ -536,8 +661,18 @@ func (d *Decoder) SetTagName(v string) *Decoder { return d } +// Strict allows changing to strict decoding. Any fields that are found in the +// input data and do not have a corresponding struct member cause an error. +func (d *Decoder) Strict(strict bool) *Decoder { + d.strict = strict + return d +} + func (d *Decoder) unmarshal(v interface{}) error { mtype := reflect.TypeOf(v) + if mtype == nil { + return errors.New("nil cannot be unmarshaled from TOML") + } if mtype.Kind() != reflect.Ptr { return errors.New("only a pointer to struct or map can be unmarshaled from TOML") } @@ -550,12 +685,23 @@ func (d *Decoder) unmarshal(v interface{}) error { return errors.New("only a pointer to struct or map can be unmarshaled from TOML") } + if reflect.ValueOf(v).IsNil() { + return errors.New("nil pointer cannot be unmarshaled from TOML") + } + vv := reflect.ValueOf(v).Elem() + if d.strict { + d.visitor = newVisitorState(d.tval) + } + sval, err := d.valueFromTree(elem, d.tval, &vv) if err != nil { return err } + if err := d.visitor.validate(); err != nil { + return err + } reflect.ValueOf(v).Elem().Set(sval) return nil } @@ -566,6 +712,17 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.V if mtype.Kind() == reflect.Ptr { return d.unwrapPointer(mtype, tval, mval1) } + + // Check if pointer to value implements the Unmarshaler interface. + if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { + d.visitor.visitAll() + + if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) + } + return mvalPtr.Elem(), nil + } + var mval reflect.Value switch mtype.Kind() { case reflect.Struct: @@ -597,18 +754,21 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.V found := false if tval != nil { for _, key := range keysToTry { - exists := tval.Has(key) + exists := tval.HasPath([]string{key}) if !exists { continue } - val := tval.Get(key) + + d.visitor.push(key) + val := tval.GetPath([]string{key}) fval := mval.Field(i) mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) if err != nil { - return mval, formatError(err, tval.GetPosition(key)) + return mval, formatError(err, tval.GetPositionPath([]string{key})) } mval.Field(i).Set(mvalf) found = true + d.visitor.pop() break } } @@ -618,32 +778,42 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.V var val interface{} var err error switch mvalf.Kind() { - case reflect.Bool: - val, err = strconv.ParseBool(opts.defaultValue) - if err != nil { - return mval.Field(i), err - } - case reflect.Int: - val, err = strconv.Atoi(opts.defaultValue) - if err != nil { - return mval.Field(i), err - } case reflect.String: val = opts.defaultValue + case reflect.Bool: + val, err = strconv.ParseBool(opts.defaultValue) + case reflect.Uint: + val, err = strconv.ParseUint(opts.defaultValue, 10, 0) + case reflect.Uint8: + val, err = strconv.ParseUint(opts.defaultValue, 10, 8) + case reflect.Uint16: + val, err = strconv.ParseUint(opts.defaultValue, 10, 16) + case reflect.Uint32: + val, err = strconv.ParseUint(opts.defaultValue, 10, 32) + case reflect.Uint64: + val, err = strconv.ParseUint(opts.defaultValue, 10, 64) + case reflect.Int: + val, err = strconv.ParseInt(opts.defaultValue, 10, 0) + case reflect.Int8: + val, err = strconv.ParseInt(opts.defaultValue, 10, 8) + case reflect.Int16: + val, err = strconv.ParseInt(opts.defaultValue, 10, 16) + case reflect.Int32: + val, err = strconv.ParseInt(opts.defaultValue, 10, 32) case reflect.Int64: val, err = strconv.ParseInt(opts.defaultValue, 10, 64) - if err != nil { - return mval.Field(i), err - } + case reflect.Float32: + val, err = strconv.ParseFloat(opts.defaultValue, 32) case reflect.Float64: val, err = strconv.ParseFloat(opts.defaultValue, 64) - if err != nil { - return mval.Field(i), err - } default: - return mval.Field(i), fmt.Errorf("unsuported field type for default option") + return mvalf, fmt.Errorf("unsupported field type for default option") } - mval.Field(i).Set(reflect.ValueOf(val)) + + if err != nil { + return mvalf, err + } + mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) } // save the old behavior above and try to check structs @@ -652,7 +822,8 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.V if !mtypef.Anonymous { tmpTval = nil } - v, err := d.valueFromTree(mtypef.Type, tmpTval, nil) + fval := mval.Field(i) + v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) if err != nil { return v, err } @@ -663,13 +834,15 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.V case reflect.Map: mval = reflect.MakeMap(mtype) for _, key := range tval.Keys() { + d.visitor.push(key) // TODO: path splits key val := tval.GetPath([]string{key}) mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) if err != nil { - return mval, formatError(err, tval.GetPosition(key)) + return mval, formatError(err, tval.GetPositionPath([]string{key})) } mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) + d.visitor.pop() } } return mval, nil @@ -677,22 +850,52 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.V // Convert toml value to marshal struct/map slice, using marshal type func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { - mval := reflect.MakeSlice(mtype, len(tval), len(tval)) + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + for i := 0; i < len(tval); i++ { + d.visitor.push(strconv.Itoa(i)) val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) if err != nil { return mval, err } mval.Index(i).Set(val) + d.visitor.pop() + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + + for i := 0; i < len(tval); i++ { + val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) } return mval, nil } // Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { - mval := reflect.MakeSlice(mtype, len(tval), len(tval)) - for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) +func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val := reflect.ValueOf(tval) + length := val.Len() + + mval, err := makeSliceOrArray(mtype, length) + if err != nil { + return mval, err + } + + for i := 0; i < length; i++ { + val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) if err != nil { return mval, err } @@ -701,6 +904,21 @@ func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (r return mval, nil } +// Create a new slice or a new array with specified length +func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { + var mval reflect.Value + switch mtype.Kind() { + case reflect.Slice: + mval = reflect.MakeSlice(mtype, tLength, tLength) + case reflect.Array: + mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() + if tLength > mtype.Len() { + return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) + } + } + return mval, nil +} + // Convert toml value to marshal value, using marshal type. When mval1 is non-nil // and the given type is a struct value, merge fields into it. func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { @@ -742,6 +960,7 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) case []interface{}: + d.visitor.visit() if isOtherSequence(mtype) { return d.valueFromOtherSlice(mtype, t) } @@ -755,6 +974,15 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) default: + d.visitor.visit() + // Check if pointer to value implements the encoding.TextUnmarshaler. + if mvalPtr := reflect.New(mtype); isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { + if err := d.unmarshalText(tval, mvalPtr); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) + } + return mvalPtr.Elem(), nil + } + switch mtype.Kind() { case reflect.Bool, reflect.Struct: val := reflect.ValueOf(tval) @@ -805,34 +1033,34 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref } return reflect.ValueOf(d), nil } - if !val.Type().ConvertibleTo(mtype) { + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(mtype).Int()) { + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } return val.Convert(mtype), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Convert(mtype).Uint())) { + if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } return val.Convert(mtype), nil case reflect.Float32, reflect.Float64: val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(mtype).Float()) { + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } @@ -844,6 +1072,11 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref ival := mval1.Elem() return d.valueFromToml(mval1.Elem().Type(), t, &ival) } + case reflect.Slice, reflect.Array: + if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { + return d.valueFromOtherSliceI(mtype, t) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) default: return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) } @@ -867,6 +1100,12 @@ func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *ref return mval, nil } +func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { + var buf bytes.Buffer + fmt.Fprint(&buf, tval) + return callTextUnmarshaler(mval, buf.Bytes()) +} + func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { tag := vf.Tag.Get(an.tag) parse := strings.Split(tag, ",") @@ -879,6 +1118,7 @@ func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { defaultValue := vf.Tag.Get(tagDefault) result := tomlOpts{ name: vf.Name, + nameFromTag: false, comment: comment, commented: commented, multiline: multiline, @@ -891,6 +1131,7 @@ func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { result.include = false } else { result.name = strings.Trim(parse[0], " ") + result.nameFromTag = true } } if vf.PkgPath != "" { @@ -907,11 +1148,7 @@ func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { func isZero(val reflect.Value) bool { switch val.Type().Kind() { - case reflect.Map: - fallthrough - case reflect.Array: - fallthrough - case reflect.Slice: + case reflect.Slice, reflect.Array, reflect.Map: return val.Len() == 0 default: return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) @@ -924,3 +1161,80 @@ func formatError(err error, pos Position) error { } return fmt.Errorf("%s: %s", pos, err) } + +// visitorState keeps track of which keys were unmarshaled. +type visitorState struct { + tree *Tree + path []string + keys map[string]struct{} + active bool +} + +func newVisitorState(tree *Tree) visitorState { + path, result := []string{}, map[string]struct{}{} + insertKeys(path, result, tree) + return visitorState{ + tree: tree, + path: path[:0], + keys: result, + active: true, + } +} + +func (s *visitorState) push(key string) { + if s.active { + s.path = append(s.path, key) + } +} + +func (s *visitorState) pop() { + if s.active { + s.path = s.path[:len(s.path)-1] + } +} + +func (s *visitorState) visit() { + if s.active { + delete(s.keys, strings.Join(s.path, ".")) + } +} + +func (s *visitorState) visitAll() { + if s.active { + for k := range s.keys { + if strings.HasPrefix(k, strings.Join(s.path, ".")) { + delete(s.keys, k) + } + } + } +} + +func (s *visitorState) validate() error { + if !s.active { + return nil + } + undecoded := make([]string, 0, len(s.keys)) + for key := range s.keys { + undecoded = append(undecoded, key) + } + sort.Strings(undecoded) + if len(undecoded) > 0 { + return fmt.Errorf("undecoded keys: %q", undecoded) + } + return nil +} + +func insertKeys(path []string, m map[string]struct{}, tree *Tree) { + for k, v := range tree.values { + switch node := v.(type) { + case []*Tree: + for i, item := range node { + insertKeys(append(path, k, strconv.Itoa(i)), m, item) + } + case *Tree: + insertKeys(append(path, k), m, node) + case *tomlValue: + m[strings.Join(append(path, k), ".")] = struct{}{} + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go index 1b344fee..7bf40bbd 100644 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -158,6 +158,11 @@ func (p *tomlParser) parseGroup() tomlParserStateFn { if err := p.tree.createSubTree(keys, startToken.Position); err != nil { p.raiseError(key, "%s", err) } + destTree := p.tree.GetPath(keys) + if target, ok := destTree.(*Tree); ok && target != nil && target.inline { + p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", + strings.Join(keys, ".")) + } p.assume(tokenRightBracket) p.currentTable = keys return p.parseStart @@ -201,6 +206,11 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { strings.Join(tableKey, ".")) } + if targetNode.inline { + p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", + strings.Join(tableKey, ".")) + } + // assign value to the found table keyVal := parsedKey[len(parsedKey)-1] localKey := []string{keyVal} @@ -411,12 +421,13 @@ Loop: if tokenIsComma(previous) { p.raiseError(previous, "trailing comma at the end of inline table") } + tree.inline = true return tree } func (p *tomlParser) parseArray() interface{} { var array []interface{} - arrayType := reflect.TypeOf(nil) + arrayType := reflect.TypeOf(newTree()) for { follow := p.peek() if follow == nil || follow.typ == tokenEOF { @@ -427,11 +438,8 @@ func (p *tomlParser) parseArray() interface{} { break } val := p.parseRvalue() - if arrayType == nil { - arrayType = reflect.TypeOf(val) - } if reflect.TypeOf(val) != arrayType { - p.raiseError(follow, "mixed types in array") + arrayType = nil } array = append(array, val) follow = p.peek() @@ -445,6 +453,12 @@ func (p *tomlParser) parseArray() interface{} { p.getToken() } } + + // if the array is a mixed-type array or its length is 0, + // don't convert it to a table array + if len(array) <= 0 { + arrayType = nil + } // An array of Trees is actually an array of inline // tables, which is a shorthand for a table array. If the // array was not converted from []interface{} to []*Tree, diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go index 36a3fc88..6af4ec46 100644 --- a/vendor/github.com/pelletier/go-toml/token.go +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -1,9 +1,6 @@ package toml -import ( - "fmt" - "unicode" -) +import "fmt" // Define tokens type tokenType int @@ -112,7 +109,7 @@ func isSpace(r rune) bool { } func isAlphanumeric(r rune) bool { - return unicode.IsLetter(r) || r == '_' + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' } func isKeyChar(r rune) bool { @@ -127,7 +124,7 @@ func isKeyStartChar(r rune) bool { } func isDigit(r rune) bool { - return unicode.IsNumber(r) + return '0' <= r && r <= '9' } func isHexDigit(r rune) bool { diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go index f4d56870..d323c39b 100644 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -23,6 +23,7 @@ type Tree struct { values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree comment string commented bool + inline bool position Position } @@ -311,6 +312,7 @@ func (t *Tree) createSubTree(keys []string, pos Position) error { if !exists { tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) tree.position = pos + tree.inline = subtree.inline subtree.values[intermediateKey] = tree nextTree = tree } diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go index 16c19867..2d6487ed 100644 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -30,9 +30,15 @@ type sortNode struct { // are preserved. Quotation marks and backslashes are also not escaped. func encodeMultilineTomlString(value string, commented string) string { var b bytes.Buffer + adjacentQuoteCount := 0 b.WriteString(commented) - for _, rr := range value { + for i, rr := range value { + if rr != '"' { + adjacentQuoteCount = 0 + } else { + adjacentQuoteCount++ + } switch rr { case '\b': b.WriteString(`\b`) @@ -45,7 +51,12 @@ func encodeMultilineTomlString(value string, commented string) string { case '\r': b.WriteString("\r") case '"': - b.WriteString(`"`) + if adjacentQuoteCount >= 3 || i == len(value)-1 { + adjacentQuoteCount = 0 + b.WriteString(`\"`) + } else { + b.WriteString(`"`) + } case '\\': b.WriteString(`\`) default: @@ -92,7 +103,30 @@ func encodeTomlString(value string) string { return b.String() } -func tomlValueStringRepresentation(v interface{}, commented string, indent string, arraysOneElementPerLine bool) (string, error) { +func tomlTreeStringRepresentation(t *Tree, ord marshalOrder) (string, error) { + var orderedVals []sortNode + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + var values []string + for _, node := range orderedVals { + k := node.key + v := t.values[k] + + repr, err := tomlValueStringRepresentation(v, "", "", ord, false) + if err != nil { + return "", err + } + values = append(values, quoteKeyIfNeeded(k)+" = "+repr) + } + return "{ " + strings.Join(values, ", ") + " }", nil +} + +func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord marshalOrder, arraysOneElementPerLine bool) (string, error) { // this interface check is added to dereference the change made in the writeTo function. // That change was made to allow this function to see formatting options. tv, ok := v.(*tomlValue) @@ -129,7 +163,7 @@ func tomlValueStringRepresentation(v interface{}, commented string, indent strin return "\"" + encodeTomlString(value) + "\"", nil case []byte: b, _ := v.([]byte) - return tomlValueStringRepresentation(string(b), commented, indent, arraysOneElementPerLine) + return tomlValueStringRepresentation(string(b), commented, indent, ord, arraysOneElementPerLine) case bool: if value { return "true", nil @@ -143,6 +177,8 @@ func tomlValueStringRepresentation(v interface{}, commented string, indent strin return value.String(), nil case LocalTime: return value.String(), nil + case *Tree: + return tomlTreeStringRepresentation(value, ord) case nil: return "", nil } @@ -153,7 +189,7 @@ func tomlValueStringRepresentation(v interface{}, commented string, indent strin var values []string for i := 0; i < rv.Len(); i++ { item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, commented, indent, arraysOneElementPerLine) + itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) if err != nil { return "", err } @@ -176,7 +212,7 @@ func tomlValueStringRepresentation(v interface{}, commented string, indent strin return stringBuffer.String(), nil } - return "[" + strings.Join(values, ",") + "]", nil + return "[" + strings.Join(values, ", ") + "]", nil } return "", fmt.Errorf("unsupported value type %T: %v", v, v) } @@ -271,10 +307,10 @@ func sortAlphabetical(t *Tree) (vals []sortNode) { } func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, false) + return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false) } -func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder, parentCommented bool) (int64, error) { +func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder, indentString string, parentCommented bool) (int64, error) { var orderedVals []sortNode switch ord { @@ -290,7 +326,7 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i k := node.key v := t.values[k] - combinedKey := k + combinedKey := quoteKeyIfNeeded(k) if keyspace != "" { combinedKey = keyspace + "." + combinedKey } @@ -324,7 +360,7 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i if err != nil { return bytesCount, err } - bytesCount, err = node.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord, parentCommented || t.commented || tv.commented) + bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || tv.commented) if err != nil { return bytesCount, err } @@ -340,7 +376,7 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i return bytesCount, err } - bytesCount, err = subTree.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord, parentCommented || t.commented || subTree.commented) + bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || subTree.commented) if err != nil { return bytesCount, err } @@ -357,7 +393,7 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i if parentCommented || t.commented || v.commented { commented = "# " } - repr, err := tomlValueStringRepresentation(v, commented, indent, arraysOneElementPerLine) + repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) if err != nil { return bytesCount, err } diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/philhofer/fwd/LICENSE.md similarity index 79% rename from vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE rename to vendor/github.com/philhofer/fwd/LICENSE.md index 14127cd8..1ac6a81f 100644 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE +++ b/vendor/github.com/philhofer/fwd/LICENSE.md @@ -1,9 +1,7 @@ -(The MIT License) +Copyright (c) 2014-2015, Philip Hofer -Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/README.md b/vendor/github.com/philhofer/fwd/README.md new file mode 100644 index 00000000..38349af3 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/README.md @@ -0,0 +1,315 @@ + +# fwd + import "github.com/philhofer/fwd" + +The `fwd` package provides a buffered reader +and writer. Each has methods that help improve +the encoding/decoding performance of some binary +protocols. + +The `fwd.Writer` and `fwd.Reader` type provide similar +functionality to their counterparts in `bufio`, plus +a few extra utility methods that simplify read-ahead +and write-ahead. I wrote this package to improve serialization +performance for http://github.com/tinylib/msgp, +where it provided about a 2x speedup over `bufio` for certain +workloads. However, care must be taken to understand the semantics of the +extra methods provided by this package, as they allow +the user to access and manipulate the buffer memory +directly. + +The extra methods for `fwd.Reader` are `Peek`, `Skip` +and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, +will re-allocate the read buffer in order to accommodate arbitrarily +large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes +in the stream, and uses the `io.Seeker` interface if the underlying +stream implements it. `(*fwd.Reader).Next` returns a slice pointing +to the next `n` bytes in the read buffer (like `Peek`), but also +increments the read position. This allows users to process streams +in arbitrary block sizes without having to manage appropriately-sized +slices. Additionally, obviating the need to copy the data from the +buffer to another location in memory can improve performance dramatically +in CPU-bound applications. + +`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which +returns a slice pointing to the next `n` bytes of the writer, and increments +the write position by the length of the returned slice. This allows users +to write directly to the end of the buffer. + + + + +## Constants +``` go +const ( + // DefaultReaderSize is the default size of the read buffer + DefaultReaderSize = 2048 +) +``` +``` go +const ( + // DefaultWriterSize is the + // default write buffer size. + DefaultWriterSize = 2048 +) +``` + + + +## type Reader +``` go +type Reader struct { + // contains filtered or unexported fields +} +``` +Reader is a buffered look-ahead reader + + + + + + + + + +### func NewReader +``` go +func NewReader(r io.Reader) *Reader +``` +NewReader returns a new *Reader that reads from 'r' + + +### func NewReaderSize +``` go +func NewReaderSize(r io.Reader, n int) *Reader +``` +NewReaderSize returns a new *Reader that +reads from 'r' and has a buffer size 'n' + + + + +### func (\*Reader) BufferSize +``` go +func (r *Reader) BufferSize() int +``` +BufferSize returns the total size of the buffer + + + +### func (\*Reader) Buffered +``` go +func (r *Reader) Buffered() int +``` +Buffered returns the number of bytes currently in the buffer + + + +### func (\*Reader) Next +``` go +func (r *Reader) Next(n int) ([]byte, error) +``` +Next returns the next 'n' bytes in the stream. +Unlike Peek, Next advances the reader position. +The returned bytes point to the same +data as the buffer, so the slice is +only valid until the next reader method call. +An EOF is considered an unexpected error. +If an the returned slice is less than the +length asked for, an error will be returned, +and the reader position will not be incremented. + + + +### func (\*Reader) Peek +``` go +func (r *Reader) Peek(n int) ([]byte, error) +``` +Peek returns the next 'n' buffered bytes, +reading from the underlying reader if necessary. +It will only return a slice shorter than 'n' bytes +if it also returns an error. Peek does not advance +the reader. EOF errors are *not* returned as +io.ErrUnexpectedEOF. + + + +### func (\*Reader) Read +``` go +func (r *Reader) Read(b []byte) (int, error) +``` +Read implements `io.Reader` + + + +### func (\*Reader) ReadByte +``` go +func (r *Reader) ReadByte() (byte, error) +``` +ReadByte implements `io.ByteReader` + + + +### func (\*Reader) ReadFull +``` go +func (r *Reader) ReadFull(b []byte) (int, error) +``` +ReadFull attempts to read len(b) bytes into +'b'. It returns the number of bytes read into +'b', and an error if it does not return len(b). +EOF is considered an unexpected error. + + + +### func (\*Reader) Reset +``` go +func (r *Reader) Reset(rd io.Reader) +``` +Reset resets the underlying reader +and the read buffer. + + + +### func (\*Reader) Skip +``` go +func (r *Reader) Skip(n int) (int, error) +``` +Skip moves the reader forward 'n' bytes. +Returns the number of bytes skipped and any +errors encountered. It is analogous to Seek(n, 1). +If the underlying reader implements io.Seeker, then +that method will be used to skip forward. + +If the reader encounters +an EOF before skipping 'n' bytes, it +returns io.ErrUnexpectedEOF. If the +underlying reader implements io.Seeker, then +those rules apply instead. (Many implementations +will not return `io.EOF` until the next call +to Read.) + + + +### func (\*Reader) WriteTo +``` go +func (r *Reader) WriteTo(w io.Writer) (int64, error) +``` +WriteTo implements `io.WriterTo` + + + +## type Writer +``` go +type Writer struct { + // contains filtered or unexported fields +} +``` +Writer is a buffered writer + + + + + + + + + +### func NewWriter +``` go +func NewWriter(w io.Writer) *Writer +``` +NewWriter returns a new writer +that writes to 'w' and has a buffer +that is `DefaultWriterSize` bytes. + + +### func NewWriterSize +``` go +func NewWriterSize(w io.Writer, size int) *Writer +``` +NewWriterSize returns a new writer +that writes to 'w' and has a buffer +that is 'size' bytes. + + + + +### func (\*Writer) BufferSize +``` go +func (w *Writer) BufferSize() int +``` +BufferSize returns the maximum size of the buffer. + + + +### func (\*Writer) Buffered +``` go +func (w *Writer) Buffered() int +``` +Buffered returns the number of buffered bytes +in the reader. + + + +### func (\*Writer) Flush +``` go +func (w *Writer) Flush() error +``` +Flush flushes any buffered bytes +to the underlying writer. + + + +### func (\*Writer) Next +``` go +func (w *Writer) Next(n int) ([]byte, error) +``` +Next returns the next 'n' free bytes +in the write buffer, flushing the writer +as necessary. Next will return `io.ErrShortBuffer` +if 'n' is greater than the size of the write buffer. +Calls to 'next' increment the write position by +the size of the returned buffer. + + + +### func (\*Writer) ReadFrom +``` go +func (w *Writer) ReadFrom(r io.Reader) (int64, error) +``` +ReadFrom implements `io.ReaderFrom` + + + +### func (\*Writer) Write +``` go +func (w *Writer) Write(p []byte) (int, error) +``` +Write implements `io.Writer` + + + +### func (\*Writer) WriteByte +``` go +func (w *Writer) WriteByte(b byte) error +``` +WriteByte implements `io.ByteWriter` + + + +### func (\*Writer) WriteString +``` go +func (w *Writer) WriteString(s string) (int, error) +``` +WriteString is analogous to Write, but it takes a string. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/reader.go b/vendor/github.com/philhofer/fwd/reader.go new file mode 100644 index 00000000..75be62ab --- /dev/null +++ b/vendor/github.com/philhofer/fwd/reader.go @@ -0,0 +1,383 @@ +// The `fwd` package provides a buffered reader +// and writer. Each has methods that help improve +// the encoding/decoding performance of some binary +// protocols. +// +// The `fwd.Writer` and `fwd.Reader` type provide similar +// functionality to their counterparts in `bufio`, plus +// a few extra utility methods that simplify read-ahead +// and write-ahead. I wrote this package to improve serialization +// performance for http://github.com/tinylib/msgp, +// where it provided about a 2x speedup over `bufio` for certain +// workloads. However, care must be taken to understand the semantics of the +// extra methods provided by this package, as they allow +// the user to access and manipulate the buffer memory +// directly. +// +// The extra methods for `fwd.Reader` are `Peek`, `Skip` +// and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, +// will re-allocate the read buffer in order to accommodate arbitrarily +// large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes +// in the stream, and uses the `io.Seeker` interface if the underlying +// stream implements it. `(*fwd.Reader).Next` returns a slice pointing +// to the next `n` bytes in the read buffer (like `Peek`), but also +// increments the read position. This allows users to process streams +// in arbitrary block sizes without having to manage appropriately-sized +// slices. Additionally, obviating the need to copy the data from the +// buffer to another location in memory can improve performance dramatically +// in CPU-bound applications. +// +// `fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which +// returns a slice pointing to the next `n` bytes of the writer, and increments +// the write position by the length of the returned slice. This allows users +// to write directly to the end of the buffer. +// +package fwd + +import "io" + +const ( + // DefaultReaderSize is the default size of the read buffer + DefaultReaderSize = 2048 + + // minimum read buffer; straight from bufio + minReaderSize = 16 +) + +// NewReader returns a new *Reader that reads from 'r' +func NewReader(r io.Reader) *Reader { + return NewReaderSize(r, DefaultReaderSize) +} + +// NewReaderSize returns a new *Reader that +// reads from 'r' and has a buffer size 'n' +func NewReaderSize(r io.Reader, n int) *Reader { + rd := &Reader{ + r: r, + data: make([]byte, 0, max(minReaderSize, n)), + } + if s, ok := r.(io.Seeker); ok { + rd.rs = s + } + return rd +} + +// Reader is a buffered look-ahead reader +type Reader struct { + r io.Reader // underlying reader + + // data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space + data []byte // data + n int // read offset + state error // last read error + + // if the reader past to NewReader was + // also an io.Seeker, this is non-nil + rs io.Seeker +} + +// Reset resets the underlying reader +// and the read buffer. +func (r *Reader) Reset(rd io.Reader) { + r.r = rd + r.data = r.data[0:0] + r.n = 0 + r.state = nil + if s, ok := rd.(io.Seeker); ok { + r.rs = s + } else { + r.rs = nil + } +} + +// more() does one read on the underlying reader +func (r *Reader) more() { + // move data backwards so that + // the read offset is 0; this way + // we can supply the maximum number of + // bytes to the reader + if r.n != 0 { + if r.n < len(r.data) { + r.data = r.data[:copy(r.data[0:], r.data[r.n:])] + } else { + r.data = r.data[:0] + } + r.n = 0 + } + var a int + a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)]) + if a == 0 && r.state == nil { + r.state = io.ErrNoProgress + return + } else if a > 0 && r.state == io.EOF { + // discard the io.EOF if we read more than 0 bytes. + // the next call to Read should return io.EOF again. + r.state = nil + } + r.data = r.data[:len(r.data)+a] +} + +// pop error +func (r *Reader) err() (e error) { + e, r.state = r.state, nil + return +} + +// pop error; EOF -> io.ErrUnexpectedEOF +func (r *Reader) noEOF() (e error) { + e, r.state = r.state, nil + if e == io.EOF { + e = io.ErrUnexpectedEOF + } + return +} + +// buffered bytes +func (r *Reader) buffered() int { return len(r.data) - r.n } + +// Buffered returns the number of bytes currently in the buffer +func (r *Reader) Buffered() int { return len(r.data) - r.n } + +// BufferSize returns the total size of the buffer +func (r *Reader) BufferSize() int { return cap(r.data) } + +// Peek returns the next 'n' buffered bytes, +// reading from the underlying reader if necessary. +// It will only return a slice shorter than 'n' bytes +// if it also returns an error. Peek does not advance +// the reader. EOF errors are *not* returned as +// io.ErrUnexpectedEOF. +func (r *Reader) Peek(n int) ([]byte, error) { + // in the degenerate case, + // we may need to realloc + // (the caller asked for more + // bytes than the size of the buffer) + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // keep filling until + // we hit an error or + // read enough bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + // we must have hit an error + if r.buffered() < n { + return r.data[r.n:], r.err() + } + + return r.data[r.n : r.n+n], nil +} + +// Skip moves the reader forward 'n' bytes. +// Returns the number of bytes skipped and any +// errors encountered. It is analogous to Seek(n, 1). +// If the underlying reader implements io.Seeker, then +// that method will be used to skip forward. +// +// If the reader encounters +// an EOF before skipping 'n' bytes, it +// returns io.ErrUnexpectedEOF. If the +// underlying reader implements io.Seeker, then +// those rules apply instead. (Many implementations +// will not return `io.EOF` until the next call +// to Read.) +func (r *Reader) Skip(n int) (int, error) { + + // fast path + if r.buffered() >= n { + r.n += n + return n, nil + } + + // use seeker implementation + // if we can + if r.rs != nil { + return r.skipSeek(n) + } + + // loop on filling + // and then erasing + o := n + for r.buffered() < n && r.state == nil { + r.more() + // we can skip forward + // up to r.buffered() bytes + step := min(r.buffered(), n) + r.n += step + n -= step + } + // at this point, n should be + // 0 if everything went smoothly + return o - n, r.noEOF() +} + +// Next returns the next 'n' bytes in the stream. +// Unlike Peek, Next advances the reader position. +// The returned bytes point to the same +// data as the buffer, so the slice is +// only valid until the next reader method call. +// An EOF is considered an unexpected error. +// If an the returned slice is less than the +// length asked for, an error will be returned, +// and the reader position will not be incremented. +func (r *Reader) Next(n int) ([]byte, error) { + + // in case the buffer is too small + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // fill at least 'n' bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + if r.buffered() < n { + return r.data[r.n:], r.noEOF() + } + out := r.data[r.n : r.n+n] + r.n += n + return out, nil +} + +// skipSeek uses the io.Seeker to seek forward. +// only call this function when n > r.buffered() +func (r *Reader) skipSeek(n int) (int, error) { + o := r.buffered() + // first, clear buffer + n -= o + r.n = 0 + r.data = r.data[:0] + + // then seek forward remaning bytes + i, err := r.rs.Seek(int64(n), 1) + return int(i) + o, err +} + +// Read implements `io.Reader` +func (r *Reader) Read(b []byte) (int, error) { + // if we have data in the buffer, just + // return that. + if r.buffered() != 0 { + x := copy(b, r.data[r.n:]) + r.n += x + return x, nil + } + var n int + // we have no buffered data; determine + // whether or not to buffer or call + // the underlying reader directly + if len(b) >= cap(r.data) { + n, r.state = r.r.Read(b) + } else { + r.more() + n = copy(b, r.data) + r.n = n + } + if n == 0 { + return 0, r.err() + } + return n, nil +} + +// ReadFull attempts to read len(b) bytes into +// 'b'. It returns the number of bytes read into +// 'b', and an error if it does not return len(b). +// EOF is considered an unexpected error. +func (r *Reader) ReadFull(b []byte) (int, error) { + var n int // read into b + var nn int // scratch + l := len(b) + // either read buffered data, + // or read directly for the underlying + // buffer, or fetch more buffered data. + for n < l && r.state == nil { + if r.buffered() != 0 { + nn = copy(b[n:], r.data[r.n:]) + n += nn + r.n += nn + } else if l-n > cap(r.data) { + nn, r.state = r.r.Read(b[n:]) + n += nn + } else { + r.more() + } + } + if n < l { + return n, r.noEOF() + } + return n, nil +} + +// ReadByte implements `io.ByteReader` +func (r *Reader) ReadByte() (byte, error) { + for r.buffered() < 1 && r.state == nil { + r.more() + } + if r.buffered() < 1 { + return 0, r.err() + } + b := r.data[r.n] + r.n++ + return b, nil +} + +// WriteTo implements `io.WriterTo` +func (r *Reader) WriteTo(w io.Writer) (int64, error) { + var ( + i int64 + ii int + err error + ) + // first, clear buffer + if r.buffered() > 0 { + ii, err = w.Write(r.data[r.n:]) + i += int64(ii) + if err != nil { + return i, err + } + r.data = r.data[0:0] + r.n = 0 + } + for r.state == nil { + // here we just do + // 1:1 reads and writes + r.more() + if r.buffered() > 0 { + ii, err = w.Write(r.data) + i += int64(ii) + if err != nil { + return i, err + } + r.data = r.data[0:0] + r.n = 0 + } + } + if r.state != io.EOF { + return i, r.err() + } + return i, nil +} + +func min(a int, b int) int { + if a < b { + return a + } + return b +} + +func max(a int, b int) int { + if a < b { + return b + } + return a +} diff --git a/vendor/github.com/philhofer/fwd/writer.go b/vendor/github.com/philhofer/fwd/writer.go new file mode 100644 index 00000000..2dc392a9 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer.go @@ -0,0 +1,224 @@ +package fwd + +import "io" + +const ( + // DefaultWriterSize is the + // default write buffer size. + DefaultWriterSize = 2048 + + minWriterSize = minReaderSize +) + +// Writer is a buffered writer +type Writer struct { + w io.Writer // writer + buf []byte // 0:len(buf) is bufered data +} + +// NewWriter returns a new writer +// that writes to 'w' and has a buffer +// that is `DefaultWriterSize` bytes. +func NewWriter(w io.Writer) *Writer { + if wr, ok := w.(*Writer); ok { + return wr + } + return &Writer{ + w: w, + buf: make([]byte, 0, DefaultWriterSize), + } +} + +// NewWriterSize returns a new writer +// that writes to 'w' and has a buffer +// that is 'size' bytes. +func NewWriterSize(w io.Writer, size int) *Writer { + if wr, ok := w.(*Writer); ok && cap(wr.buf) >= size { + return wr + } + return &Writer{ + w: w, + buf: make([]byte, 0, max(size, minWriterSize)), + } +} + +// Buffered returns the number of buffered bytes +// in the reader. +func (w *Writer) Buffered() int { return len(w.buf) } + +// BufferSize returns the maximum size of the buffer. +func (w *Writer) BufferSize() int { return cap(w.buf) } + +// Flush flushes any buffered bytes +// to the underlying writer. +func (w *Writer) Flush() error { + l := len(w.buf) + if l > 0 { + n, err := w.w.Write(w.buf) + + // if we didn't write the whole + // thing, copy the unwritten + // bytes to the beginnning of the + // buffer. + if n < l && n > 0 { + w.pushback(n) + if err == nil { + err = io.ErrShortWrite + } + } + if err != nil { + return err + } + w.buf = w.buf[:0] + return nil + } + return nil +} + +// Write implements `io.Writer` +func (w *Writer) Write(p []byte) (int, error) { + c, l, ln := cap(w.buf), len(w.buf), len(p) + avail := c - l + + // requires flush + if avail < ln { + if err := w.Flush(); err != nil { + return 0, err + } + l = len(w.buf) + } + // too big to fit in buffer; + // write directly to w.w + if c < ln { + return w.w.Write(p) + } + + // grow buf slice; copy; return + w.buf = w.buf[:l+ln] + return copy(w.buf[l:], p), nil +} + +// WriteString is analogous to Write, but it takes a string. +func (w *Writer) WriteString(s string) (int, error) { + c, l, ln := cap(w.buf), len(w.buf), len(s) + avail := c - l + + // requires flush + if avail < ln { + if err := w.Flush(); err != nil { + return 0, err + } + l = len(w.buf) + } + // too big to fit in buffer; + // write directly to w.w + // + // yes, this is unsafe. *but* + // io.Writer is not allowed + // to mutate its input or + // maintain a reference to it, + // per the spec in package io. + // + // plus, if the string is really + // too big to fit in the buffer, then + // creating a copy to write it is + // expensive (and, strictly speaking, + // unnecessary) + if c < ln { + return w.w.Write(unsafestr(s)) + } + + // grow buf slice; copy; return + w.buf = w.buf[:l+ln] + return copy(w.buf[l:], s), nil +} + +// WriteByte implements `io.ByteWriter` +func (w *Writer) WriteByte(b byte) error { + if len(w.buf) == cap(w.buf) { + if err := w.Flush(); err != nil { + return err + } + } + w.buf = append(w.buf, b) + return nil +} + +// Next returns the next 'n' free bytes +// in the write buffer, flushing the writer +// as necessary. Next will return `io.ErrShortBuffer` +// if 'n' is greater than the size of the write buffer. +// Calls to 'next' increment the write position by +// the size of the returned buffer. +func (w *Writer) Next(n int) ([]byte, error) { + c, l := cap(w.buf), len(w.buf) + if n > c { + return nil, io.ErrShortBuffer + } + avail := c - l + if avail < n { + if err := w.Flush(); err != nil { + return nil, err + } + l = len(w.buf) + } + w.buf = w.buf[:l+n] + return w.buf[l:], nil +} + +// take the bytes from w.buf[n:len(w.buf)] +// and put them at the beginning of w.buf, +// and resize to the length of the copied segment. +func (w *Writer) pushback(n int) { + w.buf = w.buf[:copy(w.buf, w.buf[n:])] +} + +// ReadFrom implements `io.ReaderFrom` +func (w *Writer) ReadFrom(r io.Reader) (int64, error) { + // anticipatory flush + if err := w.Flush(); err != nil { + return 0, err + } + + w.buf = w.buf[0:cap(w.buf)] // expand buffer + + var nn int64 // written + var err error // error + var x int // read + + // 1:1 reads and writes + for err == nil { + x, err = r.Read(w.buf) + if x > 0 { + n, werr := w.w.Write(w.buf[:x]) + nn += int64(n) + + if err != nil { + if n < x && n > 0 { + w.pushback(n - x) + } + return nn, werr + } + if n < x { + w.pushback(n - x) + return nn, io.ErrShortWrite + } + } else if err == nil { + err = io.ErrNoProgress + break + } + } + if err != io.EOF { + return nn, err + } + + // we only clear here + // because we are sure + // the writes have + // succeeded. otherwise, + // we retain the data in case + // future writes succeed. + w.buf = w.buf[0:0] + + return nn, nil +} diff --git a/vendor/github.com/philhofer/fwd/writer_appengine.go b/vendor/github.com/philhofer/fwd/writer_appengine.go new file mode 100644 index 00000000..e367f393 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_appengine.go @@ -0,0 +1,5 @@ +// +build appengine + +package fwd + +func unsafestr(s string) []byte { return []byte(s) } diff --git a/vendor/github.com/philhofer/fwd/writer_unsafe.go b/vendor/github.com/philhofer/fwd/writer_unsafe.go new file mode 100644 index 00000000..a0bf453b --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_unsafe.go @@ -0,0 +1,18 @@ +// +build !appengine + +package fwd + +import ( + "reflect" + "unsafe" +) + +// unsafe cast string as []byte +func unsafestr(b string) []byte { + l := len(b) + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Len: l, + Cap: l, + Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data, + })) +} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore index 6b7d7d1e..1fb13abe 100644 --- a/vendor/github.com/sirupsen/logrus/.gitignore +++ b/vendor/github.com/sirupsen/logrus/.gitignore @@ -1,2 +1,4 @@ logrus vendor + +.idea/ diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go new file mode 100644 index 00000000..4545dec0 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -0,0 +1,52 @@ +package logrus + +import ( + "bytes" + "sync" +) + +var ( + bufferPool BufferPool +) + +type BufferPool interface { + Put(*bytes.Buffer) + Get() *bytes.Buffer +} + +type defaultPool struct { + pool *sync.Pool +} + +func (p *defaultPool) Put(buf *bytes.Buffer) { + p.pool.Put(buf) +} + +func (p *defaultPool) Get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func getBuffer() *bytes.Buffer { + return bufferPool.Get() +} + +func putBuffer(buf *bytes.Buffer) { + buf.Reset() + bufferPool.Put(buf) +} + +// SetBufferPool allows to replace the default logrus buffer pool +// to better meets the specific needs of an application. +func SetBufferPool(bp BufferPool) { + bufferPool = bp +} + +func init() { + SetBufferPool(&defaultPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + }) +} diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index f6e062a3..5a5cbfe7 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -13,7 +13,6 @@ import ( ) var ( - bufferPool *sync.Pool // qualified package name, cached at first use logrusPackage string @@ -31,12 +30,6 @@ const ( ) func init() { - bufferPool = &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - } - // start at the bottom of the stack before the package-name cache is primed minimumCallerDepth = 1 } @@ -243,9 +236,12 @@ func (entry Entry) log(level Level, msg string) { entry.fireHooks() - buffer = bufferPool.Get().(*bytes.Buffer) + buffer = getBuffer() + defer func() { + entry.Buffer = nil + putBuffer(buffer) + }() buffer.Reset() - defer bufferPool.Put(buffer) entry.Buffer = buffer entry.write() diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go index 42b04f6c..017c30ce 100644 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -134,6 +134,51 @@ func Fatal(args ...interface{}) { std.Fatal(args...) } +// TraceFn logs a message from a func at level Trace on the standard logger. +func TraceFn(fn LogFunction) { + std.TraceFn(fn) +} + +// DebugFn logs a message from a func at level Debug on the standard logger. +func DebugFn(fn LogFunction) { + std.DebugFn(fn) +} + +// PrintFn logs a message from a func at level Info on the standard logger. +func PrintFn(fn LogFunction) { + std.PrintFn(fn) +} + +// InfoFn logs a message from a func at level Info on the standard logger. +func InfoFn(fn LogFunction) { + std.InfoFn(fn) +} + +// WarnFn logs a message from a func at level Warn on the standard logger. +func WarnFn(fn LogFunction) { + std.WarnFn(fn) +} + +// WarningFn logs a message from a func at level Warn on the standard logger. +func WarningFn(fn LogFunction) { + std.WarningFn(fn) +} + +// ErrorFn logs a message from a func at level Error on the standard logger. +func ErrorFn(fn LogFunction) { + std.ErrorFn(fn) +} + +// PanicFn logs a message from a func at level Panic on the standard logger. +func PanicFn(fn LogFunction) { + std.PanicFn(fn) +} + +// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. +func FatalFn(fn LogFunction) { + std.FatalFn(fn) +} + // Tracef logs a message at level Trace on the standard logger. func Tracef(format string, args ...interface{}) { std.Tracef(format, args...) diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod index d4132967..b3919d5e 100644 --- a/vendor/github.com/sirupsen/logrus/go.mod +++ b/vendor/github.com/sirupsen/logrus/go.mod @@ -2,10 +2,9 @@ module github.com/sirupsen/logrus require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/konsorten/go-windows-terminal-sequences v1.0.3 github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/testify v1.2.2 - golang.org/x/sys v0.0.0-20190422165155-953cdadca894 + golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 ) go 1.13 diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum index 49c690f2..1edc143b 100644 --- a/vendor/github.com/sirupsen/logrus/go.sum +++ b/vendor/github.com/sirupsen/logrus/go.sum @@ -1,12 +1,10 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index 6fdda748..dbf627c9 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -9,6 +9,11 @@ import ( "time" ) +// LogFunction For big messages, it can be more efficient to pass a function +// and only call it if the log level is actually enables rather than +// generating the log message and then checking if the level is enabled +type LogFunction func()[]interface{} + type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a // file, or leave it default which is `os.Stderr`. You can also set this to @@ -70,7 +75,7 @@ func (mw *MutexWrap) Disable() { // // var log = &logrus.Logger{ // Out: os.Stderr, -// Formatter: new(logrus.JSONFormatter), +// Formatter: new(logrus.TextFormatter), // Hooks: make(logrus.LevelHooks), // Level: logrus.DebugLevel, // } @@ -195,6 +200,14 @@ func (logger *Logger) Log(level Level, args ...interface{}) { } } +func (logger *Logger) LogFn(level Level, fn LogFunction) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, fn()...) + logger.releaseEntry(entry) + } +} + func (logger *Logger) Trace(args ...interface{}) { logger.Log(TraceLevel, args...) } @@ -234,6 +247,45 @@ func (logger *Logger) Panic(args ...interface{}) { logger.Log(PanicLevel, args...) } +func (logger *Logger) TraceFn(fn LogFunction) { + logger.LogFn(TraceLevel, fn) +} + +func (logger *Logger) DebugFn(fn LogFunction) { + logger.LogFn(DebugLevel, fn) +} + +func (logger *Logger) InfoFn(fn LogFunction) { + logger.LogFn(InfoLevel, fn) +} + +func (logger *Logger) PrintFn(fn LogFunction) { + entry := logger.newEntry() + entry.Print(fn()...) + logger.releaseEntry(entry) +} + +func (logger *Logger) WarnFn(fn LogFunction) { + logger.LogFn(WarnLevel, fn) +} + +func (logger *Logger) WarningFn(fn LogFunction) { + logger.WarnFn(fn) +} + +func (logger *Logger) ErrorFn(fn LogFunction) { + logger.LogFn(ErrorLevel, fn) +} + +func (logger *Logger) FatalFn(fn LogFunction) { + logger.LogFn(FatalLevel, fn) + logger.Exit(1) +} + +func (logger *Logger) PanicFn(fn LogFunction) { + logger.LogFn(PanicLevel, fn) +} + func (logger *Logger) Logln(level Level, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go index 572889db..2879eb50 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -5,30 +5,23 @@ package logrus import ( "io" "os" - "syscall" - sequences "github.com/konsorten/go-windows-terminal-sequences" + "golang.org/x/sys/windows" ) -func initTerminal(w io.Writer) { - switch v := w.(type) { - case *os.File: - sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true) - } -} - func checkIfTerminal(w io.Writer) bool { - var ret bool switch v := w.(type) { case *os.File: + handle := windows.Handle(v.Fd()) var mode uint32 - err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode) - ret = (err == nil) - default: - ret = false + if err := windows.GetConsoleMode(handle, &mode); err != nil { + return false + } + mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + if err := windows.SetConsoleMode(handle, mode); err != nil { + return false + } + return true } - if ret { - initTerminal(w) - } - return ret + return false } diff --git a/vendor/github.com/slack-go/slack/apps.go b/vendor/github.com/slack-go/slack/apps.go new file mode 100644 index 00000000..cb26ad8e --- /dev/null +++ b/vendor/github.com/slack-go/slack/apps.go @@ -0,0 +1,43 @@ +package slack + +import ( + "context" + "encoding/json" +) + +type listEventAuthorizationsResponse struct { + SlackResponse + Authorizations []EventAuthorization `json:"authorizations"` +} + +type EventAuthorization struct { + EnterpriseID string `json:"enterprise_id"` + TeamID string `json:"team_id"` + UserID string `json:"user_id"` + IsBot bool `json:"is_bot"` + IsEnterpriseInstall bool `json:"is_enterprise_install"` +} + +func (api *Client) ListEventAuthorizations(eventContext string) ([]EventAuthorization, error) { + return api.ListEventAuthorizationsContext(context.Background(), eventContext) +} + +// ListEventAuthorizationsContext lists authed users and teams for the given event_context. You must provide an app-level token to the client using OptionAppLevelToken. More info: https://api.slack.com/methods/apps.event.authorizations.list +func (api *Client) ListEventAuthorizationsContext(ctx context.Context, eventContext string) ([]EventAuthorization, error) { + resp := &listEventAuthorizationsResponse{} + + request, _ := json.Marshal(map[string]string{ + "event_context": eventContext, + }) + + err := postJSON(ctx, api.httpclient, api.endpoint+"apps.event.authorizations.list", api.appLevelToken, request, &resp, api) + + if err != nil { + return nil, err + } + if !resp.Ok { + return nil, resp.Err() + } + + return resp.Authorizations, nil +} diff --git a/vendor/github.com/slack-go/slack/dialog.go b/vendor/github.com/slack-go/slack/dialog.go index 376cd9e6..f94113f4 100644 --- a/vendor/github.com/slack-go/slack/dialog.go +++ b/vendor/github.com/slack-go/slack/dialog.go @@ -54,7 +54,9 @@ type DialogCallback InteractionCallback // DialogSubmissionCallback is sent from Slack when a user submits a form from within a dialog type DialogSubmissionCallback struct { - State string `json:"state,omitempty"` + // NOTE: State is only used with the dialog_submission type. + // You should use InteractionCallback.BlockActionsState for block_actions type. + State string `json:"-"` Submission map[string]string `json:"submission"` } diff --git a/vendor/github.com/slack-go/slack/interactions.go b/vendor/github.com/slack-go/slack/interactions.go index c7f59217..2515e290 100644 --- a/vendor/github.com/slack-go/slack/interactions.go +++ b/vendor/github.com/slack-go/slack/interactions.go @@ -56,6 +56,65 @@ type InteractionCallback struct { DialogSubmissionCallback ViewSubmissionCallback ViewClosedCallback + + // FIXME(kanata2): just workaround for backward-compatibility. + // See also https://github.com/slack-go/slack/issues/816 + RawState json.RawMessage `json:"state,omitempty"` + + // BlockActionState stands for the `state` field in block_actions type. + // NOTE: InteractionCallback.State has a role for the state of dialog_submission type, + // so we cannot use this field for backward-compatibility for now. + BlockActionState *BlockActionStates `json:"-"` +} + +type BlockActionStates struct { + Values map[string]map[string]BlockAction `json:"values"` +} + +func (ic *InteractionCallback) MarshalJSON() ([]byte, error) { + type alias InteractionCallback + tmp := alias(*ic) + if tmp.Type == InteractionTypeBlockActions { + if tmp.BlockActionState == nil { + tmp.RawState = []byte(`{}`) + } else { + state, err := json.Marshal(tmp.BlockActionState.Values) + if err != nil { + return nil, err + } + tmp.RawState = []byte(`{"values":` + string(state) + `}`) + } + } else if ic.Type == InteractionTypeDialogSubmission { + tmp.RawState = []byte(tmp.State) + } + // Use pointer for go1.7 + return json.Marshal(&tmp) +} + +func (ic *InteractionCallback) UnmarshalJSON(b []byte) error { + type alias InteractionCallback + tmp := struct { + Type InteractionType `json:"type"` + *alias + }{ + alias: (*alias)(ic), + } + if err := json.Unmarshal(b, &tmp); err != nil { + return err + } + *ic = InteractionCallback(*tmp.alias) + ic.Type = tmp.Type + if ic.Type == InteractionTypeBlockActions { + if len(ic.RawState) > 0 { + err := json.Unmarshal(ic.RawState, &ic.BlockActionState) + if err != nil { + return err + } + } + } else if ic.Type == InteractionTypeDialogSubmission { + ic.State = string(ic.RawState) + } + return nil } type Container struct { diff --git a/vendor/github.com/slack-go/slack/reminders.go b/vendor/github.com/slack-go/slack/reminders.go index 9b905387..de1170a6 100644 --- a/vendor/github.com/slack-go/slack/reminders.go +++ b/vendor/github.com/slack-go/slack/reminders.go @@ -21,6 +21,11 @@ type reminderResp struct { Reminder Reminder `json:"reminder"` } +type remindersResp struct { + SlackResponse + Reminders []Reminder `json:"reminders"` +} + func (api *Client) doReminder(ctx context.Context, path string, values url.Values) (*Reminder, error) { response := &reminderResp{} if err := api.postMethod(ctx, path, values, response); err != nil { @@ -29,6 +34,31 @@ func (api *Client) doReminder(ctx context.Context, path string, values url.Value return &response.Reminder, response.Err() } +func (api *Client) doReminders(ctx context.Context, path string, values url.Values) ([]*Reminder, error) { + response := &remindersResp{} + if err := api.postMethod(ctx, path, values, response); err != nil { + return nil, err + } + + // create an array of pointers to reminders + var reminders = make([]*Reminder, 0, len(response.Reminders)) + for _, reminder := range response.Reminders { + reminders = append(reminders, &reminder) + } + + return reminders, response.Err() +} + +// ListReminders lists all the reminders created by or for the authenticated user +// +// See https://api.slack.com/methods/reminders.list +func (api *Client) ListReminders() ([]*Reminder, error) { + values := url.Values{ + "token": {api.token}, + } + return api.doReminders(context.Background(), "reminders.list", values) +} + // AddChannelReminder adds a reminder for a channel. // // See https://api.slack.com/methods/reminders.add (NOTE: the ability to set diff --git a/vendor/github.com/slack-go/slack/slack.go b/vendor/github.com/slack-go/slack/slack.go index 0972d52e..14367365 100644 --- a/vendor/github.com/slack-go/slack/slack.go +++ b/vendor/github.com/slack-go/slack/slack.go @@ -57,11 +57,12 @@ type authTestResponseFull struct { type ParamOption func(*url.Values) type Client struct { - token string - endpoint string - debug bool - log ilogger - httpclient httpClient + token string + appLevelToken string + endpoint string + debug bool + log ilogger + httpclient httpClient } // Option defines an option for a Client @@ -93,6 +94,11 @@ func OptionAPIURL(u string) func(*Client) { return func(c *Client) { c.endpoint = u } } +// OptionAppLevelToken sets an app-level token for the client. +func OptionAppLevelToken(token string) func(*Client) { + return func(c *Client) { c.appLevelToken = token } +} + // New builds a slack client from the provided token and options. func New(token string, options ...Option) *Client { s := &Client{ diff --git a/vendor/github.com/slack-go/slack/slash.go b/vendor/github.com/slack-go/slack/slash.go index f62065a2..b2c50947 100644 --- a/vendor/github.com/slack-go/slack/slash.go +++ b/vendor/github.com/slack-go/slack/slash.go @@ -19,6 +19,7 @@ type SlashCommand struct { Text string `json:"text"` ResponseURL string `json:"response_url"` TriggerID string `json:"trigger_id"` + APIAppID string `json:"api_app_id"` } // SlashCommandParse will parse the request of the slash command @@ -39,6 +40,7 @@ func SlashCommandParse(r *http.Request) (s SlashCommand, err error) { s.Text = r.PostForm.Get("text") s.ResponseURL = r.PostForm.Get("response_url") s.TriggerID = r.PostForm.Get("trigger_id") + s.APIAppID = r.PostForm.Get("api_app_id") return s, nil } diff --git a/vendor/github.com/spf13/afero/.gitignore b/vendor/github.com/spf13/afero/.gitignore new file mode 100644 index 00000000..9c1d9861 --- /dev/null +++ b/vendor/github.com/spf13/afero/.gitignore @@ -0,0 +1,2 @@ +sftpfs/file1 +sftpfs/test/ diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml index 0637db72..fdaa9998 100644 --- a/vendor/github.com/spf13/afero/.travis.yml +++ b/vendor/github.com/spf13/afero/.travis.yml @@ -1,21 +1,22 @@ -sudo: false -language: go - -go: - - 1.9 - - "1.10" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build - - go test -race -v ./... - +sudo: false +language: go + +go: + - "1.13" + - "1.14" + - tip + +os: + - linux + - osx + +matrix: + allow_failures: + - go: tip + fast_finish: true + +script: + - go build -v ./... + - go test -count=1 -cover -race -v ./... + - go vet ./... + - FILES=$(gofmt -s -l . zipfs sftpfs mem); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index 0c9b04b5..16b06f2b 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -6,7 +6,7 @@ A FileSystem Abstraction System for Go # Overview -Afero is an filesystem framework providing a simple, uniform and universal API +Afero is a filesystem framework providing a simple, uniform and universal API interacting with any filesystem, as an abstraction layer providing interfaces, types and methods. Afero has an exceptionally clean interface and simple design without needless constructors or initialization methods. @@ -18,7 +18,7 @@ and benefit of the os and ioutil packages. Afero provides significant improvements over using the os package alone, most notably the ability to create mock and testing filesystems without relying on the disk. -It is suitable for use in a any situation where you would consider using the OS +It is suitable for use in any situation where you would consider using the OS package as it provides an additional abstraction that makes it easy to use a memory backed file system during testing. It also adds support for the http filesystem for full interoperability. @@ -41,8 +41,8 @@ Afero is easy to use and easier to adopt. A few different ways you could use Afero: -* Use the interfaces alone to define you own file system. -* Wrap for the OS packages. +* Use the interfaces alone to define your own file system. +* Wrapper for the OS packages. * Define different filesystems for different parts of your application. * Use Afero for mock filesystems while testing @@ -380,7 +380,6 @@ The following is a short list of possible backends we hope someone will implement: * SSH -* ZIP * TAR * S3 @@ -406,28 +405,7 @@ Googles very well. ## Release Notes -* **0.10.0** 2015.12.10 - * Full compatibility with Windows - * Introduction of afero utilities - * Test suite rewritten to work cross platform - * Normalize paths for MemMapFs - * Adding Sync to the file interface - * **Breaking Change** Walk and ReadDir have changed parameter order - * Moving types used by MemMapFs to a subpackage - * General bugfixes and improvements -* **0.9.0** 2015.11.05 - * New Walk function similar to filepath.Walk - * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC - * MemMapFs.Remove now really deletes the file - * InMemoryFile.Readdir and Readdirnames work correctly - * InMemoryFile functions lock it for concurrent access - * Test suite improvements -* **0.8.0** 2014.10.28 - * First public version - * Interfaces feel ready for people to build using - * Interfaces satisfy all known uses - * MemMapFs passes the majority of the OS test suite - * OsFs passes the majority of the OS test suite +See the [Releases Page](https://github.com/spf13/afero/releases). ## Contributing diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml index a633ad50..5d2f34bf 100644 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ b/vendor/github.com/spf13/afero/appveyor.yml @@ -10,6 +10,6 @@ build_script: go get -v github.com/spf13/afero/... - go build github.com/spf13/afero + go build -v github.com/spf13/afero/... test_script: -- cmd: go test -race -v github.com/spf13/afero/... +- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go index 616ff8ff..3a14b833 100644 --- a/vendor/github.com/spf13/afero/basepath.go +++ b/vendor/github.com/spf13/afero/basepath.go @@ -177,4 +177,30 @@ func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { return fi, false, err } +func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error { + oldname, err := b.RealPath(oldname) + if err != nil { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} + } + newname, err = b.RealPath(newname) + if err != nil { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} + } + if linker, ok := b.source.(Linker); ok { + return linker.SymlinkIfPossible(oldname, newname) + } + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} +} + +func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) { + name, err := b.RealPath(name) + if err != nil { + return "", &os.PathError{Op: "readlink", Path: name, Err: err} + } + if reader, ok := b.source.(LinkReader); ok { + return reader.ReadlinkIfPossible(name) + } + return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} +} + // vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go index 5728243d..18b45824 100644 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build darwin openbsd freebsd netbsd dragonfly +// +build aix darwin openbsd freebsd netbsd dragonfly package afero diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go index 968fc278..2b850e4d 100644 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -15,6 +15,7 @@ // +build !freebsd // +build !dragonfly // +build !netbsd +// +build !aix package afero diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go index e8108a85..96b77012 100644 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -117,6 +117,26 @@ func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) return fi, false, err } +func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error { + if slayer, ok := u.layer.(Linker); ok { + return slayer.SymlinkIfPossible(oldname, newname) + } + + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} +} + +func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) { + if rlayer, ok := u.layer.(LinkReader); ok { + return rlayer.ReadlinkIfPossible(name) + } + + if rbase, ok := u.base.(LinkReader); ok { + return rbase.ReadlinkIfPossible(name) + } + + return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} +} + func (u *CopyOnWriteFs) isNotExist(err error) bool { if e, ok := err.(*os.PathError); ok { err = e.Err diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod index 08685509..abe4fe1c 100644 --- a/vendor/github.com/spf13/afero/go.mod +++ b/vendor/github.com/spf13/afero/go.mod @@ -1,3 +1,9 @@ module github.com/spf13/afero -require golang.org/x/text v0.3.0 +require ( + github.com/pkg/sftp v1.10.1 + golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 + golang.org/x/text v0.3.3 +) + +go 1.13 diff --git a/vendor/github.com/spf13/afero/go.sum b/vendor/github.com/spf13/afero/go.sum index 6bad37b2..89d9bfbc 100644 --- a/vendor/github.com/spf13/afero/go.sum +++ b/vendor/github.com/spf13/afero/go.sum @@ -1,2 +1,29 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go index 5c3a3d8f..a403133e 100644 --- a/vendor/github.com/spf13/afero/ioutil.go +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -22,6 +22,7 @@ import ( "path/filepath" "sort" "strconv" + "strings" "sync" "time" ) @@ -147,7 +148,7 @@ func reseed() uint32 { return uint32(time.Now().UnixNano() + int64(os.Getpid())) } -func nextSuffix() string { +func nextRandom() string { randmu.Lock() r := rand if r == 0 { @@ -159,27 +160,36 @@ func nextSuffix() string { return strconv.Itoa(int(1e9 + r%1e9))[1:] } -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *File. +// TempFile creates a new temporary file in the directory dir, +// opens the file for reading and writing, and returns the resulting *os.File. +// The filename is generated by taking pattern and adding a random +// string to the end. If pattern includes a "*", the random string +// replaces the last "*". // If dir is the empty string, TempFile uses the default directory // for temporary files (see os.TempDir). // Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility // to remove the file when no longer needed. -func (a Afero) TempFile(dir, prefix string) (f File, err error) { - return TempFile(a.Fs, dir, prefix) +func (a Afero) TempFile(dir, pattern string) (f File, err error) { + return TempFile(a.Fs, dir, pattern) } -func TempFile(fs Fs, dir, prefix string) (f File, err error) { +func TempFile(fs Fs, dir, pattern string) (f File, err error) { if dir == "" { dir = os.TempDir() } + var prefix, suffix string + if pos := strings.LastIndex(pattern, "*"); pos != -1 { + prefix, suffix = pattern[:pos], pattern[pos+1:] + } else { + prefix = pattern + } + nconflict := 0 for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) + name := filepath.Join(dir, prefix+nextRandom()+suffix) f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if os.IsExist(err) { if nconflict++; nconflict > 10 { @@ -211,7 +221,7 @@ func TempDir(fs Fs, dir, prefix string) (name string, err error) { nconflict := 0 for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextSuffix()) + try := filepath.Join(dir, prefix+nextRandom()) err = fs.Mkdir(try, 0700) if os.IsExist(err) { if nconflict++; nconflict > 10 { diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go index c18a87fb..7db4b7de 100644 --- a/vendor/github.com/spf13/afero/match.go +++ b/vendor/github.com/spf13/afero/match.go @@ -106,5 +106,5 @@ func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { // recognized by Match. func hasMeta(path string) bool { // TODO(niemeyer): Should other magic characters be added here? - return strings.IndexAny(path, "*?[") >= 0 + return strings.ContainsAny(path, "*?[") } diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go index 7af2fb56..699f1fb0 100644 --- a/vendor/github.com/spf13/afero/mem/file.go +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -193,8 +193,11 @@ func (f *File) Read(b []byte) (n int, err error) { } func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + prev := atomic.LoadInt64(&f.at) atomic.StoreInt64(&f.at, off) - return f.Read(b) + n, err = f.Read(b) + atomic.StoreInt64(&f.at, prev) + return } func (f *File) Truncate(size int64) error { @@ -233,6 +236,9 @@ func (f *File) Seek(offset int64, whence int) (int64, error) { } func (f *File) Write(b []byte) (n int, err error) { + if f.closed == true { + return 0, ErrFileClosed + } if f.readOnly { return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} } diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go index 09498e70..bbcc2381 100644 --- a/vendor/github.com/spf13/afero/memmap.go +++ b/vendor/github.com/spf13/afero/memmap.go @@ -25,6 +25,8 @@ import ( "github.com/spf13/afero/mem" ) +const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod() + type MemMapFs struct { mu sync.RWMutex data map[string]*mem.FileData @@ -40,7 +42,9 @@ func (m *MemMapFs) getData() map[string]*mem.FileData { m.data = make(map[string]*mem.FileData) // Root should always exist, right? // TODO: what about windows? - m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) + root := mem.CreateDir(FilePathSeparator) + mem.SetMode(root, os.ModeDir|0755) + m.data[FilePathSeparator] = root }) return m.data } @@ -52,7 +56,7 @@ func (m *MemMapFs) Create(name string) (File, error) { m.mu.Lock() file := mem.CreateFile(name) m.getData()[name] = file - m.registerWithParent(file) + m.registerWithParent(file, 0) m.mu.Unlock() return mem.NewFileHandle(file), nil } @@ -83,14 +87,14 @@ func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { return pfile } -func (m *MemMapFs) registerWithParent(f *mem.FileData) { +func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { if f == nil { return } parent := m.findParent(f) if parent == nil { pdir := filepath.Dir(filepath.Clean(f.Name())) - err := m.lockfreeMkdir(pdir, 0777) + err := m.lockfreeMkdir(pdir, perm) if err != nil { //log.Println("Mkdir error:", err) return @@ -119,13 +123,15 @@ func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { } } else { item := mem.CreateDir(name) + mem.SetMode(item, os.ModeDir|perm) m.getData()[name] = item - m.registerWithParent(item) + m.registerWithParent(item, perm) } return nil } func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + perm &= chmodBits name = normalizePath(name) m.mu.RLock() @@ -137,13 +143,12 @@ func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { m.mu.Lock() item := mem.CreateDir(name) + mem.SetMode(item, os.ModeDir|perm) m.getData()[name] = item - m.registerWithParent(item) + m.registerWithParent(item, perm) m.mu.Unlock() - m.Chmod(name, perm|os.ModeDir) - - return nil + return m.setFileMode(name, perm|os.ModeDir) } func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { @@ -210,8 +215,12 @@ func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { } func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + perm &= chmodBits chmod := false file, err := m.openWrite(name) + if err == nil && (flag&os.O_EXCL > 0) { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists} + } if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { file, err = m.Create(name) chmod = true @@ -237,7 +246,7 @@ func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, erro } } if chmod { - m.Chmod(name, perm) + return file, m.setFileMode(name, perm) } return file, nil } @@ -269,7 +278,7 @@ func (m *MemMapFs) RemoveAll(path string) error { m.mu.RLock() defer m.mu.RUnlock() - for p, _ := range m.getData() { + for p := range m.getData() { if strings.HasPrefix(p, path) { m.mu.RUnlock() m.mu.Lock() @@ -299,7 +308,7 @@ func (m *MemMapFs) Rename(oldname, newname string) error { delete(m.getData(), oldname) mem.ChangeFileName(fileData, newname) m.getData()[newname] = fileData - m.registerWithParent(fileData) + m.registerWithParent(fileData, 0) m.mu.Unlock() m.mu.RLock() } else { @@ -318,6 +327,21 @@ func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { } func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + mode &= chmodBits + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits + + mode = prevOtherBits | mode + return m.setFileMode(name, mode) +} + +func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error { name = normalizePath(name) m.mu.RLock() diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go index 13cc1b84..4761db5d 100644 --- a/vendor/github.com/spf13/afero/os.go +++ b/vendor/github.com/spf13/afero/os.go @@ -99,3 +99,11 @@ func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { fi, err := os.Lstat(name) return fi, true, err } + +func (OsFs) SymlinkIfPossible(oldname, newname string) error { + return os.Symlink(oldname, newname) +} + +func (OsFs) ReadlinkIfPossible(name string) (string, error) { + return os.Readlink(name) +} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go index c6376ec3..f94b181b 100644 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -44,6 +44,18 @@ func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { return fi, false, err } +func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} +} + +func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) { + if srdr, ok := r.source.(LinkReader); ok { + return srdr.ReadlinkIfPossible(name) + } + + return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} +} + func (r *ReadOnlyFs) Rename(o, n string) error { return syscall.EPERM } diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go index 9d92dbc0..c8fc0086 100644 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -126,6 +126,9 @@ func (r *RegexpFs) Open(name string) (File, error) { } } f, err := r.source.Open(name) + if err != nil { + return nil, err + } return &RegexpFile{f: f, re: r.re}, nil } diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go new file mode 100644 index 00000000..d1c6ea53 --- /dev/null +++ b/vendor/github.com/spf13/afero/symlink.go @@ -0,0 +1,55 @@ +// Copyright © 2018 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" +) + +// Symlinker is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It indicates support for 3 symlink related interfaces that implement the +// behaviors of the os methods: +// - Lstat +// - Symlink, and +// - Readlink +type Symlinker interface { + Lstater + Linker + LinkReader +} + +// Linker is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem, +// or the filesystem otherwise supports Symlink's. +type Linker interface { + SymlinkIfPossible(oldname, newname string) error +} + +// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system +// does not support Symlink's either directly or through its delegated filesystem. +// As expressed by support for the Linker interface. +var ErrNoSymlink = errors.New("symlink not supported") + +// LinkReader is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +type LinkReader interface { + ReadlinkIfPossible(name string) (string, error) +} + +// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system +// does not support the readlink operation either directly or through its delegated filesystem. +// As expressed by support for the LinkReader interface. +var ErrNoReadlink = errors.New("readlink not supported") diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/tinylib/msgp/LICENSE similarity index 50% rename from vendor/github.com/konsorten/go-windows-terminal-sequences/README.md rename to vendor/github.com/tinylib/msgp/LICENSE index 09a4a35c..14d60424 100644 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md +++ b/vendor/github.com/tinylib/msgp/LICENSE @@ -1,42 +1,8 @@ -# Windows Terminal Sequences +Copyright (c) 2014 Philip Hofer +Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated -This library allow for enabling Windows terminal color support for Go. - -See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details. - -## Usage - -```go -import ( - "syscall" - - sequences "github.com/konsorten/go-windows-terminal-sequences" -) - -func main() { - sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true) -} - -``` - -## Authors - -The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de). - -We thank all the authors who provided code to this library: - -* Felix Kollmann -* Nicolas Perraut -* @dirty49374 - -## License - -(The MIT License) - -Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go new file mode 100644 index 00000000..6c6bb37a --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go @@ -0,0 +1,24 @@ +// +build linux,!appengine + +package msgp + +import ( + "os" + "syscall" +) + +func adviseRead(mem []byte) { + syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED) +} + +func adviseWrite(mem []byte) { + syscall.Madvise(mem, syscall.MADV_SEQUENTIAL) +} + +func fallocate(f *os.File, sz int64) error { + err := syscall.Fallocate(int(f.Fd()), 0, 0, sz) + if err == syscall.ENOTSUP { + return f.Truncate(sz) + } + return err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go new file mode 100644 index 00000000..da65ea54 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/advise_other.go @@ -0,0 +1,17 @@ +// +build !linux appengine + +package msgp + +import ( + "os" +) + +// TODO: darwin, BSD support + +func adviseRead(mem []byte) {} + +func adviseWrite(mem []byte) {} + +func fallocate(f *os.File, sz int64) error { + return f.Truncate(sz) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go new file mode 100644 index 00000000..a0434c7e --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/circular.go @@ -0,0 +1,39 @@ +package msgp + +type timer interface { + StartTimer() + StopTimer() +} + +// EndlessReader is an io.Reader +// that loops over the same data +// endlessly. It is used for benchmarking. +type EndlessReader struct { + tb timer + data []byte + offset int +} + +// NewEndlessReader returns a new endless reader +func NewEndlessReader(b []byte, tb timer) *EndlessReader { + return &EndlessReader{tb: tb, data: b, offset: 0} +} + +// Read implements io.Reader. In practice, it +// always returns (len(p), nil), although it +// fills the supplied slice while the benchmark +// timer is stopped. +func (c *EndlessReader) Read(p []byte) (int, error) { + c.tb.StopTimer() + var n int + l := len(p) + m := len(c.data) + for n < l { + nn := copy(p[n:], c.data[c.offset:]) + n += nn + c.offset += nn + c.offset %= m + } + c.tb.StartTimer() + return n, nil +} diff --git a/vendor/github.com/tinylib/msgp/msgp/defs.go b/vendor/github.com/tinylib/msgp/msgp/defs.go new file mode 100644 index 00000000..c634eef1 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/defs.go @@ -0,0 +1,142 @@ +// This package is the support library for the msgp code generator (http://github.com/tinylib/msgp). +// +// This package defines the utilites used by the msgp code generator for encoding and decoding MessagePack +// from []byte and io.Reader/io.Writer types. Much of this package is devoted to helping the msgp code +// generator implement the Marshaler/Unmarshaler and Encodable/Decodable interfaces. +// +// This package defines four "families" of functions: +// - AppendXxxx() appends an object to a []byte in MessagePack encoding. +// - ReadXxxxBytes() reads an object from a []byte and returns the remaining bytes. +// - (*Writer).WriteXxxx() writes an object to the buffered *Writer type. +// - (*Reader).ReadXxxx() reads an object from a buffered *Reader type. +// +// Once a type has satisfied the `Encodable` and `Decodable` interfaces, +// it can be written and read from arbitrary `io.Writer`s and `io.Reader`s using +// msgp.Encode(io.Writer, msgp.Encodable) +// and +// msgp.Decode(io.Reader, msgp.Decodable) +// +// There are also methods for converting MessagePack to JSON without +// an explicit de-serialization step. +// +// For additional tips, tricks, and gotchas, please visit +// the wiki at http://github.com/tinylib/msgp +package msgp + +const last4 = 0x0f +const first4 = 0xf0 +const last5 = 0x1f +const first3 = 0xe0 +const last7 = 0x7f + +func isfixint(b byte) bool { + return b>>7 == 0 +} + +func isnfixint(b byte) bool { + return b&first3 == mnfixint +} + +func isfixmap(b byte) bool { + return b&first4 == mfixmap +} + +func isfixarray(b byte) bool { + return b&first4 == mfixarray +} + +func isfixstr(b byte) bool { + return b&first3 == mfixstr +} + +func wfixint(u uint8) byte { + return u & last7 +} + +func rfixint(b byte) uint8 { + return b +} + +func wnfixint(i int8) byte { + return byte(i) | mnfixint +} + +func rnfixint(b byte) int8 { + return int8(b) +} + +func rfixmap(b byte) uint8 { + return b & last4 +} + +func wfixmap(u uint8) byte { + return mfixmap | (u & last4) +} + +func rfixstr(b byte) uint8 { + return b & last5 +} + +func wfixstr(u uint8) byte { + return (u & last5) | mfixstr +} + +func rfixarray(b byte) uint8 { + return (b & last4) +} + +func wfixarray(u uint8) byte { + return (u & last4) | mfixarray +} + +// These are all the byte +// prefixes defined by the +// msgpack standard +const ( + // 0XXXXXXX + mfixint uint8 = 0x00 + + // 111XXXXX + mnfixint uint8 = 0xe0 + + // 1000XXXX + mfixmap uint8 = 0x80 + + // 1001XXXX + mfixarray uint8 = 0x90 + + // 101XXXXX + mfixstr uint8 = 0xa0 + + mnil uint8 = 0xc0 + mfalse uint8 = 0xc2 + mtrue uint8 = 0xc3 + mbin8 uint8 = 0xc4 + mbin16 uint8 = 0xc5 + mbin32 uint8 = 0xc6 + mext8 uint8 = 0xc7 + mext16 uint8 = 0xc8 + mext32 uint8 = 0xc9 + mfloat32 uint8 = 0xca + mfloat64 uint8 = 0xcb + muint8 uint8 = 0xcc + muint16 uint8 = 0xcd + muint32 uint8 = 0xce + muint64 uint8 = 0xcf + mint8 uint8 = 0xd0 + mint16 uint8 = 0xd1 + mint32 uint8 = 0xd2 + mint64 uint8 = 0xd3 + mfixext1 uint8 = 0xd4 + mfixext2 uint8 = 0xd5 + mfixext4 uint8 = 0xd6 + mfixext8 uint8 = 0xd7 + mfixext16 uint8 = 0xd8 + mstr8 uint8 = 0xd9 + mstr16 uint8 = 0xda + mstr32 uint8 = 0xdb + marray16 uint8 = 0xdc + marray32 uint8 = 0xdd + mmap16 uint8 = 0xde + mmap32 uint8 = 0xdf +) diff --git a/vendor/github.com/tinylib/msgp/msgp/edit.go b/vendor/github.com/tinylib/msgp/msgp/edit.go new file mode 100644 index 00000000..b473a6f6 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/edit.go @@ -0,0 +1,242 @@ +package msgp + +import ( + "math" +) + +// Locate returns a []byte pointing to the field +// in a messagepack map with the provided key. (The returned []byte +// points to a sub-slice of 'raw'; Locate does no allocations.) If the +// key doesn't exist in the map, a zero-length []byte will be returned. +func Locate(key string, raw []byte) []byte { + s, n := locate(raw, key) + return raw[s:n] +} + +// Replace takes a key ("key") in a messagepack map ("raw") +// and replaces its value with the one provided and returns +// the new []byte. The returned []byte may point to the same +// memory as "raw". Replace makes no effort to evaluate the validity +// of the contents of 'val'. It may use up to the full capacity of 'raw.' +// Replace returns 'nil' if the field doesn't exist or if the object in 'raw' +// is not a map. +func Replace(key string, raw []byte, val []byte) []byte { + start, end := locate(raw, key) + if start == end { + return nil + } + return replace(raw, start, end, val, true) +} + +// CopyReplace works similarly to Replace except that the returned +// byte slice does not point to the same memory as 'raw'. CopyReplace +// returns 'nil' if the field doesn't exist or 'raw' isn't a map. +func CopyReplace(key string, raw []byte, val []byte) []byte { + start, end := locate(raw, key) + if start == end { + return nil + } + return replace(raw, start, end, val, false) +} + +// Remove removes a key-value pair from 'raw'. It returns +// 'raw' unchanged if the key didn't exist. +func Remove(key string, raw []byte) []byte { + start, end := locateKV(raw, key) + if start == end { + return raw + } + raw = raw[:start+copy(raw[start:], raw[end:])] + return resizeMap(raw, -1) +} + +// HasKey returns whether the map in 'raw' has +// a field with key 'key' +func HasKey(key string, raw []byte) bool { + sz, bts, err := ReadMapHeaderBytes(raw) + if err != nil { + return false + } + var field []byte + for i := uint32(0); i < sz; i++ { + field, bts, err = ReadStringZC(bts) + if err != nil { + return false + } + if UnsafeString(field) == key { + return true + } + } + return false +} + +func replace(raw []byte, start int, end int, val []byte, inplace bool) []byte { + ll := end - start // length of segment to replace + lv := len(val) + + if inplace { + extra := lv - ll + + // fastest case: we're doing + // a 1:1 replacement + if extra == 0 { + copy(raw[start:], val) + return raw + + } else if extra < 0 { + // 'val' smaller than replaced value + // copy in place and shift back + + x := copy(raw[start:], val) + y := copy(raw[start+x:], raw[end:]) + return raw[:start+x+y] + + } else if extra < cap(raw)-len(raw) { + // 'val' less than (cap-len) extra bytes + // copy in place and shift forward + raw = raw[0 : len(raw)+extra] + // shift end forward + copy(raw[end+extra:], raw[end:]) + copy(raw[start:], val) + return raw + } + } + + // we have to allocate new space + out := make([]byte, len(raw)+len(val)-ll) + x := copy(out, raw[:start]) + y := copy(out[x:], val) + copy(out[x+y:], raw[end:]) + return out +} + +// locate does a naive O(n) search for the map key; returns start, end +// (returns 0,0 on error) +func locate(raw []byte, key string) (start int, end int) { + var ( + sz uint32 + bts []byte + field []byte + err error + ) + sz, bts, err = ReadMapHeaderBytes(raw) + if err != nil { + return + } + + // loop and locate field + for i := uint32(0); i < sz; i++ { + field, bts, err = ReadStringZC(bts) + if err != nil { + return 0, 0 + } + if UnsafeString(field) == key { + // start location + l := len(raw) + start = l - len(bts) + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + end = l - len(bts) + return + } + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + } + return 0, 0 +} + +// locate key AND value +func locateKV(raw []byte, key string) (start int, end int) { + var ( + sz uint32 + bts []byte + field []byte + err error + ) + sz, bts, err = ReadMapHeaderBytes(raw) + if err != nil { + return 0, 0 + } + + for i := uint32(0); i < sz; i++ { + tmp := len(bts) + field, bts, err = ReadStringZC(bts) + if err != nil { + return 0, 0 + } + if UnsafeString(field) == key { + start = len(raw) - tmp + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + end = len(raw) - len(bts) + return + } + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + } + return 0, 0 +} + +// delta is delta on map size +func resizeMap(raw []byte, delta int64) []byte { + var sz int64 + switch raw[0] { + case mmap16: + sz = int64(big.Uint16(raw[1:])) + if sz+delta <= math.MaxUint16 { + big.PutUint16(raw[1:], uint16(sz+delta)) + return raw + } + if cap(raw)-len(raw) >= 2 { + raw = raw[0 : len(raw)+2] + copy(raw[5:], raw[3:]) + raw[0] = mmap32 + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[3:]...) + + case mmap32: + sz = int64(big.Uint32(raw[1:])) + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + + default: + sz = int64(rfixmap(raw[0])) + if sz+delta < 16 { + raw[0] = wfixmap(uint8(sz + delta)) + return raw + } else if sz+delta <= math.MaxUint16 { + if cap(raw)-len(raw) >= 2 { + raw = raw[0 : len(raw)+2] + copy(raw[3:], raw[1:]) + raw[0] = mmap16 + big.PutUint16(raw[1:], uint16(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[1:]...) + } + if cap(raw)-len(raw) >= 4 { + raw = raw[0 : len(raw)+4] + copy(raw[5:], raw[1:]) + raw[0] = mmap32 + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[1:]...) + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize.go b/vendor/github.com/tinylib/msgp/msgp/elsize.go new file mode 100644 index 00000000..95762e7e --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/elsize.go @@ -0,0 +1,99 @@ +package msgp + +// size of every object on the wire, +// plus type information. gives us +// constant-time type information +// for traversing composite objects. +// +var sizes = [256]bytespec{ + mnil: {size: 1, extra: constsize, typ: NilType}, + mfalse: {size: 1, extra: constsize, typ: BoolType}, + mtrue: {size: 1, extra: constsize, typ: BoolType}, + mbin8: {size: 2, extra: extra8, typ: BinType}, + mbin16: {size: 3, extra: extra16, typ: BinType}, + mbin32: {size: 5, extra: extra32, typ: BinType}, + mext8: {size: 3, extra: extra8, typ: ExtensionType}, + mext16: {size: 4, extra: extra16, typ: ExtensionType}, + mext32: {size: 6, extra: extra32, typ: ExtensionType}, + mfloat32: {size: 5, extra: constsize, typ: Float32Type}, + mfloat64: {size: 9, extra: constsize, typ: Float64Type}, + muint8: {size: 2, extra: constsize, typ: UintType}, + muint16: {size: 3, extra: constsize, typ: UintType}, + muint32: {size: 5, extra: constsize, typ: UintType}, + muint64: {size: 9, extra: constsize, typ: UintType}, + mint8: {size: 2, extra: constsize, typ: IntType}, + mint16: {size: 3, extra: constsize, typ: IntType}, + mint32: {size: 5, extra: constsize, typ: IntType}, + mint64: {size: 9, extra: constsize, typ: IntType}, + mfixext1: {size: 3, extra: constsize, typ: ExtensionType}, + mfixext2: {size: 4, extra: constsize, typ: ExtensionType}, + mfixext4: {size: 6, extra: constsize, typ: ExtensionType}, + mfixext8: {size: 10, extra: constsize, typ: ExtensionType}, + mfixext16: {size: 18, extra: constsize, typ: ExtensionType}, + mstr8: {size: 2, extra: extra8, typ: StrType}, + mstr16: {size: 3, extra: extra16, typ: StrType}, + mstr32: {size: 5, extra: extra32, typ: StrType}, + marray16: {size: 3, extra: array16v, typ: ArrayType}, + marray32: {size: 5, extra: array32v, typ: ArrayType}, + mmap16: {size: 3, extra: map16v, typ: MapType}, + mmap32: {size: 5, extra: map32v, typ: MapType}, +} + +func init() { + // set up fixed fields + + // fixint + for i := mfixint; i < 0x80; i++ { + sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType} + } + + // nfixint + for i := uint16(mnfixint); i < 0x100; i++ { + sizes[uint8(i)] = bytespec{size: 1, extra: constsize, typ: IntType} + } + + // fixstr gets constsize, + // since the prefix yields the size + for i := mfixstr; i < 0xc0; i++ { + sizes[i] = bytespec{size: 1 + rfixstr(i), extra: constsize, typ: StrType} + } + + // fixmap + for i := mfixmap; i < 0x90; i++ { + sizes[i] = bytespec{size: 1, extra: varmode(2 * rfixmap(i)), typ: MapType} + } + + // fixarray + for i := mfixarray; i < 0xa0; i++ { + sizes[i] = bytespec{size: 1, extra: varmode(rfixarray(i)), typ: ArrayType} + } +} + +// a valid bytespsec has +// non-zero 'size' and +// non-zero 'typ' +type bytespec struct { + size uint8 // prefix size information + extra varmode // extra size information + typ Type // type + _ byte // makes bytespec 4 bytes (yes, this matters) +} + +// size mode +// if positive, # elements for composites +type varmode int8 + +const ( + constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects) + extra8 = -1 // has uint8(p[1]) extra bytes + extra16 = -2 // has be16(p[1:]) extra bytes + extra32 = -3 // has be32(p[1:]) extra bytes + map16v = -4 // use map16 + map32v = -5 // use map32 + array16v = -6 // use array16 + array32v = -7 // use array32 +) + +func getType(v byte) Type { + return sizes[v].typ +} diff --git a/vendor/github.com/tinylib/msgp/msgp/errors.go b/vendor/github.com/tinylib/msgp/msgp/errors.go new file mode 100644 index 00000000..cc78a980 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/errors.go @@ -0,0 +1,314 @@ +package msgp + +import ( + "fmt" + "reflect" +) + +const resumableDefault = false + +var ( + // ErrShortBytes is returned when the + // slice being decoded is too short to + // contain the contents of the message + ErrShortBytes error = errShort{} + + // this error is only returned + // if we reach code that should + // be unreachable + fatal error = errFatal{} +) + +// Error is the interface satisfied +// by all of the errors that originate +// from this package. +type Error interface { + error + + // Resumable returns whether + // or not the error means that + // the stream of data is malformed + // and the information is unrecoverable. + Resumable() bool +} + +// contextError allows msgp Error instances to be enhanced with additional +// context about their origin. +type contextError interface { + Error + + // withContext must not modify the error instance - it must clone and + // return a new error with the context added. + withContext(ctx string) error +} + +// Cause returns the underlying cause of an error that has been wrapped +// with additional context. +func Cause(e error) error { + out := e + if e, ok := e.(errWrapped); ok && e.cause != nil { + out = e.cause + } + return out +} + +// Resumable returns whether or not the error means that the stream of data is +// malformed and the information is unrecoverable. +func Resumable(e error) bool { + if e, ok := e.(Error); ok { + return e.Resumable() + } + return resumableDefault +} + +// WrapError wraps an error with additional context that allows the part of the +// serialized type that caused the problem to be identified. Underlying errors +// can be retrieved using Cause() +// +// The input error is not modified - a new error should be returned. +// +// ErrShortBytes is not wrapped with any context due to backward compatibility +// issues with the public API. +// +func WrapError(err error, ctx ...interface{}) error { + switch e := err.(type) { + case errShort: + return e + case contextError: + return e.withContext(ctxString(ctx)) + default: + return errWrapped{cause: err, ctx: ctxString(ctx)} + } +} + +// ctxString converts the incoming interface{} slice into a single string. +func ctxString(ctx []interface{}) string { + out := "" + for idx, cv := range ctx { + if idx > 0 { + out += "/" + } + out += fmt.Sprintf("%v", cv) + } + return out +} + +func addCtx(ctx, add string) string { + if ctx != "" { + return add + "/" + ctx + } else { + return add + } +} + +// errWrapped allows arbitrary errors passed to WrapError to be enhanced with +// context and unwrapped with Cause() +type errWrapped struct { + cause error + ctx string +} + +func (e errWrapped) Error() string { + if e.ctx != "" { + return fmt.Sprintf("%s at %s", e.cause, e.ctx) + } else { + return e.cause.Error() + } +} + +func (e errWrapped) Resumable() bool { + if e, ok := e.cause.(Error); ok { + return e.Resumable() + } + return resumableDefault +} + +type errShort struct{} + +func (e errShort) Error() string { return "msgp: too few bytes left to read object" } +func (e errShort) Resumable() bool { return false } + +type errFatal struct { + ctx string +} + +func (f errFatal) Error() string { + out := "msgp: fatal decoding error (unreachable code)" + if f.ctx != "" { + out += " at " + f.ctx + } + return out +} + +func (f errFatal) Resumable() bool { return false } + +func (f errFatal) withContext(ctx string) error { f.ctx = addCtx(f.ctx, ctx); return f } + +// ArrayError is an error returned +// when decoding a fix-sized array +// of the wrong size +type ArrayError struct { + Wanted uint32 + Got uint32 + ctx string +} + +// Error implements the error interface +func (a ArrayError) Error() string { + out := fmt.Sprintf("msgp: wanted array of size %d; got %d", a.Wanted, a.Got) + if a.ctx != "" { + out += " at " + a.ctx + } + return out +} + +// Resumable is always 'true' for ArrayErrors +func (a ArrayError) Resumable() bool { return true } + +func (a ArrayError) withContext(ctx string) error { a.ctx = addCtx(a.ctx, ctx); return a } + +// IntOverflow is returned when a call +// would downcast an integer to a type +// with too few bits to hold its value. +type IntOverflow struct { + Value int64 // the value of the integer + FailedBitsize int // the bit size that the int64 could not fit into + ctx string +} + +// Error implements the error interface +func (i IntOverflow) Error() string { + str := fmt.Sprintf("msgp: %d overflows int%d", i.Value, i.FailedBitsize) + if i.ctx != "" { + str += " at " + i.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (i IntOverflow) Resumable() bool { return true } + +func (i IntOverflow) withContext(ctx string) error { i.ctx = addCtx(i.ctx, ctx); return i } + +// UintOverflow is returned when a call +// would downcast an unsigned integer to a type +// with too few bits to hold its value +type UintOverflow struct { + Value uint64 // value of the uint + FailedBitsize int // the bit size that couldn't fit the value + ctx string +} + +// Error implements the error interface +func (u UintOverflow) Error() string { + str := fmt.Sprintf("msgp: %d overflows uint%d", u.Value, u.FailedBitsize) + if u.ctx != "" { + str += " at " + u.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (u UintOverflow) Resumable() bool { return true } + +func (u UintOverflow) withContext(ctx string) error { u.ctx = addCtx(u.ctx, ctx); return u } + +// UintBelowZero is returned when a call +// would cast a signed integer below zero +// to an unsigned integer. +type UintBelowZero struct { + Value int64 // value of the incoming int + ctx string +} + +// Error implements the error interface +func (u UintBelowZero) Error() string { + str := fmt.Sprintf("msgp: attempted to cast int %d to unsigned", u.Value) + if u.ctx != "" { + str += " at " + u.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (u UintBelowZero) Resumable() bool { return true } + +func (u UintBelowZero) withContext(ctx string) error { + u.ctx = ctx + return u +} + +// A TypeError is returned when a particular +// decoding method is unsuitable for decoding +// a particular MessagePack value. +type TypeError struct { + Method Type // Type expected by method + Encoded Type // Type actually encoded + + ctx string +} + +// Error implements the error interface +func (t TypeError) Error() string { + out := fmt.Sprintf("msgp: attempted to decode type %q with method for %q", t.Encoded, t.Method) + if t.ctx != "" { + out += " at " + t.ctx + } + return out +} + +// Resumable returns 'true' for TypeErrors +func (t TypeError) Resumable() bool { return true } + +func (t TypeError) withContext(ctx string) error { t.ctx = addCtx(t.ctx, ctx); return t } + +// returns either InvalidPrefixError or +// TypeError depending on whether or not +// the prefix is recognized +func badPrefix(want Type, lead byte) error { + t := sizes[lead].typ + if t == InvalidType { + return InvalidPrefixError(lead) + } + return TypeError{Method: want, Encoded: t} +} + +// InvalidPrefixError is returned when a bad encoding +// uses a prefix that is not recognized in the MessagePack standard. +// This kind of error is unrecoverable. +type InvalidPrefixError byte + +// Error implements the error interface +func (i InvalidPrefixError) Error() string { + return fmt.Sprintf("msgp: unrecognized type prefix 0x%x", byte(i)) +} + +// Resumable returns 'false' for InvalidPrefixErrors +func (i InvalidPrefixError) Resumable() bool { return false } + +// ErrUnsupportedType is returned +// when a bad argument is supplied +// to a function that takes `interface{}`. +type ErrUnsupportedType struct { + T reflect.Type + + ctx string +} + +// Error implements error +func (e *ErrUnsupportedType) Error() string { + out := fmt.Sprintf("msgp: type %q not supported", e.T) + if e.ctx != "" { + out += " at " + e.ctx + } + return out +} + +// Resumable returns 'true' for ErrUnsupportedType +func (e *ErrUnsupportedType) Resumable() bool { return true } + +func (e *ErrUnsupportedType) withContext(ctx string) error { + o := *e + o.ctx = addCtx(o.ctx, ctx) + return &o +} diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go new file mode 100644 index 00000000..b2e11085 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/extension.go @@ -0,0 +1,549 @@ +package msgp + +import ( + "fmt" + "math" +) + +const ( + // Complex64Extension is the extension number used for complex64 + Complex64Extension = 3 + + // Complex128Extension is the extension number used for complex128 + Complex128Extension = 4 + + // TimeExtension is the extension number used for time.Time + TimeExtension = 5 +) + +// our extensions live here +var extensionReg = make(map[int8]func() Extension) + +// RegisterExtension registers extensions so that they +// can be initialized and returned by methods that +// decode `interface{}` values. This should only +// be called during initialization. f() should return +// a newly-initialized zero value of the extension. Keep in +// mind that extensions 3, 4, and 5 are reserved for +// complex64, complex128, and time.Time, respectively, +// and that MessagePack reserves extension types from -127 to -1. +// +// For example, if you wanted to register a user-defined struct: +// +// msgp.RegisterExtension(10, func() msgp.Extension { &MyExtension{} }) +// +// RegisterExtension will panic if you call it multiple times +// with the same 'typ' argument, or if you use a reserved +// type (3, 4, or 5). +func RegisterExtension(typ int8, f func() Extension) { + switch typ { + case Complex64Extension, Complex128Extension, TimeExtension: + panic(fmt.Sprint("msgp: forbidden extension type:", typ)) + } + if _, ok := extensionReg[typ]; ok { + panic(fmt.Sprint("msgp: RegisterExtension() called with typ", typ, "more than once")) + } + extensionReg[typ] = f +} + +// ExtensionTypeError is an error type returned +// when there is a mis-match between an extension type +// and the type encoded on the wire +type ExtensionTypeError struct { + Got int8 + Want int8 +} + +// Error implements the error interface +func (e ExtensionTypeError) Error() string { + return fmt.Sprintf("msgp: error decoding extension: wanted type %d; got type %d", e.Want, e.Got) +} + +// Resumable returns 'true' for ExtensionTypeErrors +func (e ExtensionTypeError) Resumable() bool { return true } + +func errExt(got int8, wanted int8) error { + return ExtensionTypeError{Got: got, Want: wanted} +} + +// Extension is the interface fulfilled +// by types that want to define their +// own binary encoding. +type Extension interface { + // ExtensionType should return + // a int8 that identifies the concrete + // type of the extension. (Types <0 are + // officially reserved by the MessagePack + // specifications.) + ExtensionType() int8 + + // Len should return the length + // of the data to be encoded + Len() int + + // MarshalBinaryTo should copy + // the data into the supplied slice, + // assuming that the slice has length Len() + MarshalBinaryTo([]byte) error + + UnmarshalBinary([]byte) error +} + +// RawExtension implements the Extension interface +type RawExtension struct { + Data []byte + Type int8 +} + +// ExtensionType implements Extension.ExtensionType, and returns r.Type +func (r *RawExtension) ExtensionType() int8 { return r.Type } + +// Len implements Extension.Len, and returns len(r.Data) +func (r *RawExtension) Len() int { return len(r.Data) } + +// MarshalBinaryTo implements Extension.MarshalBinaryTo, +// and returns a copy of r.Data +func (r *RawExtension) MarshalBinaryTo(d []byte) error { + copy(d, r.Data) + return nil +} + +// UnmarshalBinary implements Extension.UnmarshalBinary, +// and sets r.Data to the contents of the provided slice +func (r *RawExtension) UnmarshalBinary(b []byte) error { + if cap(r.Data) >= len(b) { + r.Data = r.Data[0:len(b)] + } else { + r.Data = make([]byte, len(b)) + } + copy(r.Data, b) + return nil +} + +// WriteExtension writes an extension type to the writer +func (mw *Writer) WriteExtension(e Extension) error { + l := e.Len() + var err error + switch l { + case 0: + o, err := mw.require(3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 0 + mw.buf[o+2] = byte(e.ExtensionType()) + case 1: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext1 + mw.buf[o+1] = byte(e.ExtensionType()) + case 2: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext2 + mw.buf[o+1] = byte(e.ExtensionType()) + case 4: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext4 + mw.buf[o+1] = byte(e.ExtensionType()) + case 8: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = byte(e.ExtensionType()) + case 16: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext16 + mw.buf[o+1] = byte(e.ExtensionType()) + default: + switch { + case l < math.MaxUint8: + o, err := mw.require(3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = byte(uint8(l)) + mw.buf[o+2] = byte(e.ExtensionType()) + case l < math.MaxUint16: + o, err := mw.require(4) + if err != nil { + return err + } + mw.buf[o] = mext16 + big.PutUint16(mw.buf[o+1:], uint16(l)) + mw.buf[o+3] = byte(e.ExtensionType()) + default: + o, err := mw.require(6) + if err != nil { + return err + } + mw.buf[o] = mext32 + big.PutUint32(mw.buf[o+1:], uint32(l)) + mw.buf[o+5] = byte(e.ExtensionType()) + } + } + // we can only write directly to the + // buffer if we're sure that it + // fits the object + if l <= mw.bufsize() { + o, err := mw.require(l) + if err != nil { + return err + } + return e.MarshalBinaryTo(mw.buf[o:]) + } + // here we create a new buffer + // just large enough for the body + // and save it as the write buffer + err = mw.flush() + if err != nil { + return err + } + buf := make([]byte, l) + err = e.MarshalBinaryTo(buf) + if err != nil { + return err + } + mw.buf = buf + mw.wloc = l + return nil +} + +// peek at the extension type, assuming the next +// kind to be read is Extension +func (m *Reader) peekExtensionType() (int8, error) { + p, err := m.R.Peek(2) + if err != nil { + return 0, err + } + spec := sizes[p[0]] + if spec.typ != ExtensionType { + return 0, badPrefix(ExtensionType, p[0]) + } + if spec.extra == constsize { + return int8(p[1]), nil + } + size := spec.size + p, err = m.R.Peek(int(size)) + if err != nil { + return 0, err + } + return int8(p[size-1]), nil +} + +// peekExtension peeks at the extension encoding type +// (must guarantee at least 1 byte in 'b') +func peekExtension(b []byte) (int8, error) { + spec := sizes[b[0]] + size := spec.size + if spec.typ != ExtensionType { + return 0, badPrefix(ExtensionType, b[0]) + } + if len(b) < int(size) { + return 0, ErrShortBytes + } + // for fixed extensions, + // the type information is in + // the second byte + if spec.extra == constsize { + return int8(b[1]), nil + } + // otherwise, it's in the last + // part of the prefix + return int8(b[size-1]), nil +} + +// ReadExtension reads the next object from the reader +// as an extension. ReadExtension will fail if the next +// object in the stream is not an extension, or if +// e.Type() is not the same as the wire type. +func (m *Reader) ReadExtension(e Extension) (err error) { + var p []byte + p, err = m.R.Peek(2) + if err != nil { + return + } + lead := p[0] + var read int + var off int + switch lead { + case mfixext1: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(3) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(3) + } + return + + case mfixext2: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(4) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(4) + } + return + + case mfixext4: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(6) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(6) + } + return + + case mfixext8: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(10) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(10) + } + return + + case mfixext16: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(18) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(18) + } + return + + case mext8: + p, err = m.R.Peek(3) + if err != nil { + return + } + if int8(p[2]) != e.ExtensionType() { + err = errExt(int8(p[2]), e.ExtensionType()) + return + } + read = int(uint8(p[1])) + off = 3 + + case mext16: + p, err = m.R.Peek(4) + if err != nil { + return + } + if int8(p[3]) != e.ExtensionType() { + err = errExt(int8(p[3]), e.ExtensionType()) + return + } + read = int(big.Uint16(p[1:])) + off = 4 + + case mext32: + p, err = m.R.Peek(6) + if err != nil { + return + } + if int8(p[5]) != e.ExtensionType() { + err = errExt(int8(p[5]), e.ExtensionType()) + return + } + read = int(big.Uint32(p[1:])) + off = 6 + + default: + err = badPrefix(ExtensionType, lead) + return + } + + p, err = m.R.Peek(read + off) + if err != nil { + return + } + err = e.UnmarshalBinary(p[off:]) + if err == nil { + _, err = m.R.Skip(read + off) + } + return +} + +// AppendExtension appends a MessagePack extension to the provided slice +func AppendExtension(b []byte, e Extension) ([]byte, error) { + l := e.Len() + var o []byte + var n int + switch l { + case 0: + o, n = ensure(b, 3) + o[n] = mext8 + o[n+1] = 0 + o[n+2] = byte(e.ExtensionType()) + return o[:n+3], nil + case 1: + o, n = ensure(b, 3) + o[n] = mfixext1 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 2: + o, n = ensure(b, 4) + o[n] = mfixext2 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 4: + o, n = ensure(b, 6) + o[n] = mfixext4 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 8: + o, n = ensure(b, 10) + o[n] = mfixext8 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 16: + o, n = ensure(b, 18) + o[n] = mfixext16 + o[n+1] = byte(e.ExtensionType()) + n += 2 + default: + switch { + case l < math.MaxUint8: + o, n = ensure(b, l+3) + o[n] = mext8 + o[n+1] = byte(uint8(l)) + o[n+2] = byte(e.ExtensionType()) + n += 3 + case l < math.MaxUint16: + o, n = ensure(b, l+4) + o[n] = mext16 + big.PutUint16(o[n+1:], uint16(l)) + o[n+3] = byte(e.ExtensionType()) + n += 4 + default: + o, n = ensure(b, l+6) + o[n] = mext32 + big.PutUint32(o[n+1:], uint32(l)) + o[n+5] = byte(e.ExtensionType()) + n += 6 + } + } + return o, e.MarshalBinaryTo(o[n:]) +} + +// ReadExtensionBytes reads an extension from 'b' into 'e' +// and returns any remaining bytes. +// Possible errors: +// - ErrShortBytes ('b' not long enough) +// - ExtensionTypeError{} (wire type not the same as e.Type()) +// - TypeError{} (next object not an extension) +// - InvalidPrefixError +// - An umarshal error returned from e.UnmarshalBinary +func ReadExtensionBytes(b []byte, e Extension) ([]byte, error) { + l := len(b) + if l < 3 { + return b, ErrShortBytes + } + lead := b[0] + var ( + sz int // size of 'data' + off int // offset of 'data' + typ int8 + ) + switch lead { + case mfixext1: + typ = int8(b[1]) + sz = 1 + off = 2 + case mfixext2: + typ = int8(b[1]) + sz = 2 + off = 2 + case mfixext4: + typ = int8(b[1]) + sz = 4 + off = 2 + case mfixext8: + typ = int8(b[1]) + sz = 8 + off = 2 + case mfixext16: + typ = int8(b[1]) + sz = 16 + off = 2 + case mext8: + sz = int(uint8(b[1])) + typ = int8(b[2]) + off = 3 + if sz == 0 { + return b[3:], e.UnmarshalBinary(b[3:3]) + } + case mext16: + if l < 4 { + return b, ErrShortBytes + } + sz = int(big.Uint16(b[1:])) + typ = int8(b[3]) + off = 4 + case mext32: + if l < 6 { + return b, ErrShortBytes + } + sz = int(big.Uint32(b[1:])) + typ = int8(b[5]) + off = 6 + default: + return b, badPrefix(ExtensionType, lead) + } + + if typ != e.ExtensionType() { + return b, errExt(typ, e.ExtensionType()) + } + + // the data of the extension starts + // at 'off' and is 'sz' bytes long + if len(b[off:]) < sz { + return b, ErrShortBytes + } + tot := off + sz + return b[tot:], e.UnmarshalBinary(b[off:tot]) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go new file mode 100644 index 00000000..8e7370eb --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file.go @@ -0,0 +1,92 @@ +// +build linux darwin dragonfly freebsd netbsd openbsd +// +build !appengine + +package msgp + +import ( + "os" + "syscall" +) + +// ReadFile reads a file into 'dst' using +// a read-only memory mapping. Consequently, +// the file must be mmap-able, and the +// Unmarshaler should never write to +// the source memory. (Methods generated +// by the msgp tool obey that constraint, but +// user-defined implementations may not.) +// +// Reading and writing through file mappings +// is only efficient for large files; small +// files are best read and written using +// the ordinary streaming interfaces. +// +func ReadFile(dst Unmarshaler, file *os.File) error { + stat, err := file.Stat() + if err != nil { + return err + } + data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return err + } + adviseRead(data) + _, err = dst.UnmarshalMsg(data) + uerr := syscall.Munmap(data) + if err == nil { + err = uerr + } + return err +} + +// MarshalSizer is the combination +// of the Marshaler and Sizer +// interfaces. +type MarshalSizer interface { + Marshaler + Sizer +} + +// WriteFile writes a file from 'src' using +// memory mapping. It overwrites the entire +// contents of the previous file. +// The mapping size is calculated +// using the `Msgsize()` method +// of 'src', so it must produce a result +// equal to or greater than the actual encoded +// size of the object. Otherwise, +// a fault (SIGBUS) will occur. +// +// Reading and writing through file mappings +// is only efficient for large files; small +// files are best read and written using +// the ordinary streaming interfaces. +// +// NOTE: The performance of this call +// is highly OS- and filesystem-dependent. +// Users should take care to test that this +// performs as expected in a production environment. +// (Linux users should run a kernel and filesystem +// that support fallocate(2) for the best results.) +func WriteFile(src MarshalSizer, file *os.File) error { + sz := src.Msgsize() + err := fallocate(file, int64(sz)) + if err != nil { + return err + } + data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if err != nil { + return err + } + adviseWrite(data) + chunk := data[:0] + chunk, err = src.MarshalMsg(chunk) + if err != nil { + return err + } + uerr := syscall.Munmap(data) + if uerr != nil { + return uerr + } + return file.Truncate(int64(len(chunk))) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go new file mode 100644 index 00000000..6e654dbd --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go @@ -0,0 +1,47 @@ +// +build windows appengine + +package msgp + +import ( + "io/ioutil" + "os" +) + +// MarshalSizer is the combination +// of the Marshaler and Sizer +// interfaces. +type MarshalSizer interface { + Marshaler + Sizer +} + +func ReadFile(dst Unmarshaler, file *os.File) error { + if u, ok := dst.(Decodable); ok { + return u.DecodeMsg(NewReader(file)) + } + + data, err := ioutil.ReadAll(file) + if err != nil { + return err + } + _, err = dst.UnmarshalMsg(data) + return err +} + +func WriteFile(src MarshalSizer, file *os.File) error { + if e, ok := src.(Encodable); ok { + w := NewWriter(file) + err := e.EncodeMsg(w) + if err == nil { + err = w.Flush() + } + return err + } + + raw, err := src.MarshalMsg(nil) + if err != nil { + return err + } + _, err = file.Write(raw) + return err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/integers.go b/vendor/github.com/tinylib/msgp/msgp/integers.go new file mode 100644 index 00000000..f817d775 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/integers.go @@ -0,0 +1,174 @@ +package msgp + +/* ---------------------------------- + integer encoding utilities + (inline-able) + + TODO(tinylib): there are faster, + albeit non-portable solutions + to the code below. implement + byteswap? + ---------------------------------- */ + +func putMint64(b []byte, i int64) { + b[0] = mint64 + b[1] = byte(i >> 56) + b[2] = byte(i >> 48) + b[3] = byte(i >> 40) + b[4] = byte(i >> 32) + b[5] = byte(i >> 24) + b[6] = byte(i >> 16) + b[7] = byte(i >> 8) + b[8] = byte(i) +} + +func getMint64(b []byte) int64 { + return (int64(b[1]) << 56) | (int64(b[2]) << 48) | + (int64(b[3]) << 40) | (int64(b[4]) << 32) | + (int64(b[5]) << 24) | (int64(b[6]) << 16) | + (int64(b[7]) << 8) | (int64(b[8])) +} + +func putMint32(b []byte, i int32) { + b[0] = mint32 + b[1] = byte(i >> 24) + b[2] = byte(i >> 16) + b[3] = byte(i >> 8) + b[4] = byte(i) +} + +func getMint32(b []byte) int32 { + return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4])) +} + +func putMint16(b []byte, i int16) { + b[0] = mint16 + b[1] = byte(i >> 8) + b[2] = byte(i) +} + +func getMint16(b []byte) (i int16) { + return (int16(b[1]) << 8) | int16(b[2]) +} + +func putMint8(b []byte, i int8) { + b[0] = mint8 + b[1] = byte(i) +} + +func getMint8(b []byte) (i int8) { + return int8(b[1]) +} + +func putMuint64(b []byte, u uint64) { + b[0] = muint64 + b[1] = byte(u >> 56) + b[2] = byte(u >> 48) + b[3] = byte(u >> 40) + b[4] = byte(u >> 32) + b[5] = byte(u >> 24) + b[6] = byte(u >> 16) + b[7] = byte(u >> 8) + b[8] = byte(u) +} + +func getMuint64(b []byte) uint64 { + return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) | + (uint64(b[3]) << 40) | (uint64(b[4]) << 32) | + (uint64(b[5]) << 24) | (uint64(b[6]) << 16) | + (uint64(b[7]) << 8) | (uint64(b[8])) +} + +func putMuint32(b []byte, u uint32) { + b[0] = muint32 + b[1] = byte(u >> 24) + b[2] = byte(u >> 16) + b[3] = byte(u >> 8) + b[4] = byte(u) +} + +func getMuint32(b []byte) uint32 { + return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4])) +} + +func putMuint16(b []byte, u uint16) { + b[0] = muint16 + b[1] = byte(u >> 8) + b[2] = byte(u) +} + +func getMuint16(b []byte) uint16 { + return (uint16(b[1]) << 8) | uint16(b[2]) +} + +func putMuint8(b []byte, u uint8) { + b[0] = muint8 + b[1] = byte(u) +} + +func getMuint8(b []byte) uint8 { + return uint8(b[1]) +} + +func getUnix(b []byte) (sec int64, nsec int32) { + sec = (int64(b[0]) << 56) | (int64(b[1]) << 48) | + (int64(b[2]) << 40) | (int64(b[3]) << 32) | + (int64(b[4]) << 24) | (int64(b[5]) << 16) | + (int64(b[6]) << 8) | (int64(b[7])) + + nsec = (int32(b[8]) << 24) | (int32(b[9]) << 16) | (int32(b[10]) << 8) | (int32(b[11])) + return +} + +func putUnix(b []byte, sec int64, nsec int32) { + b[0] = byte(sec >> 56) + b[1] = byte(sec >> 48) + b[2] = byte(sec >> 40) + b[3] = byte(sec >> 32) + b[4] = byte(sec >> 24) + b[5] = byte(sec >> 16) + b[6] = byte(sec >> 8) + b[7] = byte(sec) + b[8] = byte(nsec >> 24) + b[9] = byte(nsec >> 16) + b[10] = byte(nsec >> 8) + b[11] = byte(nsec) +} + +/* ----------------------------- + prefix utilities + ----------------------------- */ + +// write prefix and uint8 +func prefixu8(b []byte, pre byte, sz uint8) { + b[0] = pre + b[1] = byte(sz) +} + +// write prefix and big-endian uint16 +func prefixu16(b []byte, pre byte, sz uint16) { + b[0] = pre + b[1] = byte(sz >> 8) + b[2] = byte(sz) +} + +// write prefix and big-endian uint32 +func prefixu32(b []byte, pre byte, sz uint32) { + b[0] = pre + b[1] = byte(sz >> 24) + b[2] = byte(sz >> 16) + b[3] = byte(sz >> 8) + b[4] = byte(sz) +} + +func prefixu64(b []byte, pre byte, sz uint64) { + b[0] = pre + b[1] = byte(sz >> 56) + b[2] = byte(sz >> 48) + b[3] = byte(sz >> 40) + b[4] = byte(sz >> 32) + b[5] = byte(sz >> 24) + b[6] = byte(sz >> 16) + b[7] = byte(sz >> 8) + b[8] = byte(sz) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go new file mode 100644 index 00000000..77601e52 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json.go @@ -0,0 +1,568 @@ +package msgp + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "io" + "strconv" + "unicode/utf8" +) + +var ( + null = []byte("null") + hex = []byte("0123456789abcdef") +) + +var defuns [_maxtype]func(jsWriter, *Reader) (int, error) + +// note: there is an initialization loop if +// this isn't set up during init() +func init() { + // since none of these functions are inline-able, + // there is not much of a penalty to the indirect + // call. however, this is best expressed as a jump-table... + defuns = [_maxtype]func(jsWriter, *Reader) (int, error){ + StrType: rwString, + BinType: rwBytes, + MapType: rwMap, + ArrayType: rwArray, + Float64Type: rwFloat64, + Float32Type: rwFloat32, + BoolType: rwBool, + IntType: rwInt, + UintType: rwUint, + NilType: rwNil, + ExtensionType: rwExtension, + Complex64Type: rwExtension, + Complex128Type: rwExtension, + TimeType: rwTime, + } +} + +// this is the interface +// used to write json +type jsWriter interface { + io.Writer + io.ByteWriter + WriteString(string) (int, error) +} + +// CopyToJSON reads MessagePack from 'src' and copies it +// as JSON to 'dst' until EOF. +func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) { + r := NewReader(src) + n, err = r.WriteToJSON(dst) + freeR(r) + return +} + +// WriteToJSON translates MessagePack from 'r' and writes it as +// JSON to 'w' until the underlying reader returns io.EOF. It returns +// the number of bytes written, and an error if it stopped before EOF. +func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) { + var j jsWriter + var bf *bufio.Writer + if jsw, ok := w.(jsWriter); ok { + j = jsw + } else { + bf = bufio.NewWriter(w) + j = bf + } + var nn int + for err == nil { + nn, err = rwNext(j, r) + n += int64(nn) + } + if err != io.EOF { + if bf != nil { + bf.Flush() + } + return + } + err = nil + if bf != nil { + err = bf.Flush() + } + return +} + +func rwNext(w jsWriter, src *Reader) (int, error) { + t, err := src.NextType() + if err != nil { + return 0, err + } + return defuns[t](w, src) +} + +func rwMap(dst jsWriter, src *Reader) (n int, err error) { + var comma bool + var sz uint32 + var field []byte + + sz, err = src.ReadMapHeader() + if err != nil { + return + } + + if sz == 0 { + return dst.WriteString("{}") + } + + err = dst.WriteByte('{') + if err != nil { + return + } + n++ + var nn int + for i := uint32(0); i < sz; i++ { + if comma { + err = dst.WriteByte(',') + if err != nil { + return + } + n++ + } + + field, err = src.ReadMapKeyPtr() + if err != nil { + return + } + nn, err = rwquoted(dst, field) + n += nn + if err != nil { + return + } + + err = dst.WriteByte(':') + if err != nil { + return + } + n++ + nn, err = rwNext(dst, src) + n += nn + if err != nil { + return + } + if !comma { + comma = true + } + } + + err = dst.WriteByte('}') + if err != nil { + return + } + n++ + return +} + +func rwArray(dst jsWriter, src *Reader) (n int, err error) { + err = dst.WriteByte('[') + if err != nil { + return + } + var sz uint32 + var nn int + sz, err = src.ReadArrayHeader() + if err != nil { + return + } + comma := false + for i := uint32(0); i < sz; i++ { + if comma { + err = dst.WriteByte(',') + if err != nil { + return + } + n++ + } + nn, err = rwNext(dst, src) + n += nn + if err != nil { + return + } + comma = true + } + + err = dst.WriteByte(']') + if err != nil { + return + } + n++ + return +} + +func rwNil(dst jsWriter, src *Reader) (int, error) { + err := src.ReadNil() + if err != nil { + return 0, err + } + return dst.Write(null) +} + +func rwFloat32(dst jsWriter, src *Reader) (int, error) { + f, err := src.ReadFloat32() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendFloat(src.scratch[:0], float64(f), 'f', -1, 64) + return dst.Write(src.scratch) +} + +func rwFloat64(dst jsWriter, src *Reader) (int, error) { + f, err := src.ReadFloat64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendFloat(src.scratch[:0], f, 'f', -1, 32) + return dst.Write(src.scratch) +} + +func rwInt(dst jsWriter, src *Reader) (int, error) { + i, err := src.ReadInt64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendInt(src.scratch[:0], i, 10) + return dst.Write(src.scratch) +} + +func rwUint(dst jsWriter, src *Reader) (int, error) { + u, err := src.ReadUint64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendUint(src.scratch[:0], u, 10) + return dst.Write(src.scratch) +} + +func rwBool(dst jsWriter, src *Reader) (int, error) { + b, err := src.ReadBool() + if err != nil { + return 0, err + } + if b { + return dst.WriteString("true") + } + return dst.WriteString("false") +} + +func rwTime(dst jsWriter, src *Reader) (int, error) { + t, err := src.ReadTime() + if err != nil { + return 0, err + } + bts, err := t.MarshalJSON() + if err != nil { + return 0, err + } + return dst.Write(bts) +} + +func rwExtension(dst jsWriter, src *Reader) (n int, err error) { + et, err := src.peekExtensionType() + if err != nil { + return 0, err + } + + // registered extensions can override + // the JSON encoding + if j, ok := extensionReg[et]; ok { + var bts []byte + e := j() + err = src.ReadExtension(e) + if err != nil { + return + } + bts, err = json.Marshal(e) + if err != nil { + return + } + return dst.Write(bts) + } + + e := RawExtension{} + e.Type = et + err = src.ReadExtension(&e) + if err != nil { + return + } + + var nn int + err = dst.WriteByte('{') + if err != nil { + return + } + n++ + + nn, err = dst.WriteString(`"type:"`) + n += nn + if err != nil { + return + } + + src.scratch = strconv.AppendInt(src.scratch[0:0], int64(e.Type), 10) + nn, err = dst.Write(src.scratch) + n += nn + if err != nil { + return + } + + nn, err = dst.WriteString(`,"data":"`) + n += nn + if err != nil { + return + } + + enc := base64.NewEncoder(base64.StdEncoding, dst) + + nn, err = enc.Write(e.Data) + n += nn + if err != nil { + return + } + err = enc.Close() + if err != nil { + return + } + nn, err = dst.WriteString(`"}`) + n += nn + return +} + +func rwString(dst jsWriter, src *Reader) (n int, err error) { + var p []byte + p, err = src.R.Peek(1) + if err != nil { + return + } + lead := p[0] + var read int + + if isfixstr(lead) { + read = int(rfixstr(lead)) + src.R.Skip(1) + goto write + } + + switch lead { + case mstr8: + p, err = src.R.Next(2) + if err != nil { + return + } + read = int(uint8(p[1])) + case mstr16: + p, err = src.R.Next(3) + if err != nil { + return + } + read = int(big.Uint16(p[1:])) + case mstr32: + p, err = src.R.Next(5) + if err != nil { + return + } + read = int(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +write: + p, err = src.R.Next(read) + if err != nil { + return + } + n, err = rwquoted(dst, p) + return +} + +func rwBytes(dst jsWriter, src *Reader) (n int, err error) { + var nn int + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + src.scratch, err = src.ReadBytes(src.scratch[:0]) + if err != nil { + return + } + enc := base64.NewEncoder(base64.StdEncoding, dst) + nn, err = enc.Write(src.scratch) + n += nn + if err != nil { + return + } + err = enc.Close() + if err != nil { + return + } + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + return +} + +// Below (c) The Go Authors, 2009-2014 +// Subject to the BSD-style license found at http://golang.org +// +// see: encoding/json/encode.go:(*encodeState).stringbytes() +func rwquoted(dst jsWriter, s []byte) (n int, err error) { + var nn int + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + switch b { + case '\\', '"': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte(b) + if err != nil { + return + } + n++ + case '\n': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('n') + if err != nil { + return + } + n++ + case '\r': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('r') + if err != nil { + return + } + n++ + case '\t': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('t') + if err != nil { + return + } + n++ + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // It also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. + nn, err = dst.WriteString(`\u00`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[b>>4]) + if err != nil { + return + } + n++ + err = dst.WriteByte(hex[b&0xF]) + if err != nil { + return + } + n++ + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + nn, err = dst.WriteString(`\ufffd`) + n += nn + if err != nil { + return + } + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + nn, err = dst.WriteString(`\u202`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[c&0xF]) + if err != nil { + return + } + n++ + i += size + start = i + continue + } + i += size + } + if start < len(s) { + nn, err = dst.Write(s[start:]) + n += nn + if err != nil { + return + } + } + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + return +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go new file mode 100644 index 00000000..438caf53 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go @@ -0,0 +1,363 @@ +package msgp + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "io" + "strconv" + "time" +) + +var unfuns [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error) + +func init() { + + // NOTE(pmh): this is best expressed as a jump table, + // but gc doesn't do that yet. revisit post-go1.5. + unfuns = [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error){ + StrType: rwStringBytes, + BinType: rwBytesBytes, + MapType: rwMapBytes, + ArrayType: rwArrayBytes, + Float64Type: rwFloat64Bytes, + Float32Type: rwFloat32Bytes, + BoolType: rwBoolBytes, + IntType: rwIntBytes, + UintType: rwUintBytes, + NilType: rwNullBytes, + ExtensionType: rwExtensionBytes, + Complex64Type: rwExtensionBytes, + Complex128Type: rwExtensionBytes, + TimeType: rwTimeBytes, + } +} + +// UnmarshalAsJSON takes raw messagepack and writes +// it as JSON to 'w'. If an error is returned, the +// bytes not translated will also be returned. If +// no errors are encountered, the length of the returned +// slice will be zero. +func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) { + var ( + scratch []byte + cast bool + dst jsWriter + err error + ) + if jsw, ok := w.(jsWriter); ok { + dst = jsw + cast = true + } else { + dst = bufio.NewWriterSize(w, 512) + } + for len(msg) > 0 && err == nil { + msg, scratch, err = writeNext(dst, msg, scratch) + } + if !cast && err == nil { + err = dst.(*bufio.Writer).Flush() + } + return msg, err +} + +func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + if len(msg) < 1 { + return msg, scratch, ErrShortBytes + } + t := getType(msg[0]) + if t == InvalidType { + return msg, scratch, InvalidPrefixError(msg[0]) + } + if t == ExtensionType { + et, err := peekExtension(msg) + if err != nil { + return nil, scratch, err + } + if et == TimeExtension { + t = TimeType + } + } + return unfuns[t](w, msg, scratch) +} + +func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + sz, msg, err := ReadArrayHeaderBytes(msg) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('[') + if err != nil { + return msg, scratch, err + } + for i := uint32(0); i < sz; i++ { + if i != 0 { + err = w.WriteByte(',') + if err != nil { + return msg, scratch, err + } + } + msg, scratch, err = writeNext(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + } + err = w.WriteByte(']') + return msg, scratch, err +} + +func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + sz, msg, err := ReadMapHeaderBytes(msg) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('{') + if err != nil { + return msg, scratch, err + } + for i := uint32(0); i < sz; i++ { + if i != 0 { + err = w.WriteByte(',') + if err != nil { + return msg, scratch, err + } + } + msg, scratch, err = rwMapKeyBytes(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte(':') + if err != nil { + return msg, scratch, err + } + msg, scratch, err = writeNext(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + } + err = w.WriteByte('}') + return msg, scratch, err +} + +func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + msg, scratch, err := rwStringBytes(w, msg, scratch) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return rwBytesBytes(w, msg, scratch) + } + } + return msg, scratch, err +} + +func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + str, msg, err := ReadStringZC(msg) + if err != nil { + return msg, scratch, err + } + _, err = rwquoted(w, str) + return msg, scratch, err +} + +func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + bts, msg, err := ReadBytesZC(msg) + if err != nil { + return msg, scratch, err + } + l := base64.StdEncoding.EncodedLen(len(bts)) + if cap(scratch) >= l { + scratch = scratch[0:l] + } else { + scratch = make([]byte, l) + } + base64.StdEncoding.Encode(scratch, bts) + err = w.WriteByte('"') + if err != nil { + return msg, scratch, err + } + _, err = w.Write(scratch) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('"') + return msg, scratch, err +} + +func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + msg, err := ReadNilBytes(msg) + if err != nil { + return msg, scratch, err + } + _, err = w.Write(null) + return msg, scratch, err +} + +func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + b, msg, err := ReadBoolBytes(msg) + if err != nil { + return msg, scratch, err + } + if b { + _, err = w.WriteString("true") + return msg, scratch, err + } + _, err = w.WriteString("false") + return msg, scratch, err +} + +func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + i, msg, err := ReadInt64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendInt(scratch[0:0], i, 10) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + u, msg, err := ReadUint64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendUint(scratch[0:0], u, 10) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloatBytes(w jsWriter, msg []byte, f64 bool, scratch []byte) ([]byte, []byte, error) { + var f float64 + var err error + var sz int + if f64 { + sz = 64 + f, msg, err = ReadFloat64Bytes(msg) + } else { + sz = 32 + var v float32 + v, msg, err = ReadFloat32Bytes(msg) + f = float64(v) + } + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch, f, 'f', -1, sz) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var f float32 + var err error + f, msg, err = ReadFloat32Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch[:0], float64(f), 'f', -1, 32) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var f float64 + var err error + f, msg, err = ReadFloat64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch[:0], f, 'f', -1, 64) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var t time.Time + var err error + t, msg, err = ReadTimeBytes(msg) + if err != nil { + return msg, scratch, err + } + bts, err := t.MarshalJSON() + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err +} + +func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var err error + var et int8 + et, err = peekExtension(msg) + if err != nil { + return msg, scratch, err + } + + // if it's time.Time + if et == TimeExtension { + var tm time.Time + tm, msg, err = ReadTimeBytes(msg) + if err != nil { + return msg, scratch, err + } + bts, err := tm.MarshalJSON() + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err + } + + // if the extension is registered, + // use its canonical JSON form + if f, ok := extensionReg[et]; ok { + e := f() + msg, err = ReadExtensionBytes(msg, e) + if err != nil { + return msg, scratch, err + } + bts, err := json.Marshal(e) + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err + } + + // otherwise, write `{"type": , "data": ""}` + r := RawExtension{} + r.Type = et + msg, err = ReadExtensionBytes(msg, &r) + if err != nil { + return msg, scratch, err + } + scratch, err = writeExt(w, r, scratch) + return msg, scratch, err +} + +func writeExt(w jsWriter, r RawExtension, scratch []byte) ([]byte, error) { + _, err := w.WriteString(`{"type":`) + if err != nil { + return scratch, err + } + scratch = strconv.AppendInt(scratch[0:0], int64(r.Type), 10) + _, err = w.Write(scratch) + if err != nil { + return scratch, err + } + _, err = w.WriteString(`,"data":"`) + if err != nil { + return scratch, err + } + l := base64.StdEncoding.EncodedLen(len(r.Data)) + if cap(scratch) >= l { + scratch = scratch[0:l] + } else { + scratch = make([]byte, l) + } + base64.StdEncoding.Encode(scratch, r.Data) + _, err = w.Write(scratch) + if err != nil { + return scratch, err + } + _, err = w.WriteString(`"}`) + return scratch, err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go new file mode 100644 index 00000000..ad07ef99 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/number.go @@ -0,0 +1,267 @@ +package msgp + +import ( + "math" + "strconv" +) + +// The portable parts of the Number implementation + +// Number can be +// an int64, uint64, float32, +// or float64 internally. +// It can decode itself +// from any of the native +// messagepack number types. +// The zero-value of Number +// is Int(0). Using the equality +// operator with Number compares +// both the type and the value +// of the number. +type Number struct { + // internally, this + // is just a tagged union. + // the raw bits of the number + // are stored the same way regardless. + bits uint64 + typ Type +} + +// AsInt sets the number to an int64. +func (n *Number) AsInt(i int64) { + + // we always store int(0) + // as {0, InvalidType} in + // order to preserve + // the behavior of the == operator + if i == 0 { + n.typ = InvalidType + n.bits = 0 + return + } + + n.typ = IntType + n.bits = uint64(i) +} + +// AsUint sets the number to a uint64. +func (n *Number) AsUint(u uint64) { + n.typ = UintType + n.bits = u +} + +// AsFloat32 sets the value of the number +// to a float32. +func (n *Number) AsFloat32(f float32) { + n.typ = Float32Type + n.bits = uint64(math.Float32bits(f)) +} + +// AsFloat64 sets the value of the +// number to a float64. +func (n *Number) AsFloat64(f float64) { + n.typ = Float64Type + n.bits = math.Float64bits(f) +} + +// Int casts the number as an int64, and +// returns whether or not that was the +// underlying type. +func (n *Number) Int() (int64, bool) { + return int64(n.bits), n.typ == IntType || n.typ == InvalidType +} + +// Uint casts the number as a uint64, and returns +// whether or not that was the underlying type. +func (n *Number) Uint() (uint64, bool) { + return n.bits, n.typ == UintType +} + +// Float casts the number to a float64, and +// returns whether or not that was the underlying +// type (either a float64 or a float32). +func (n *Number) Float() (float64, bool) { + switch n.typ { + case Float32Type: + return float64(math.Float32frombits(uint32(n.bits))), true + case Float64Type: + return math.Float64frombits(n.bits), true + default: + return 0.0, false + } +} + +// Type will return one of: +// Float64Type, Float32Type, UintType, or IntType. +func (n *Number) Type() Type { + if n.typ == InvalidType { + return IntType + } + return n.typ +} + +// DecodeMsg implements msgp.Decodable +func (n *Number) DecodeMsg(r *Reader) error { + typ, err := r.NextType() + if err != nil { + return err + } + switch typ { + case Float32Type: + f, err := r.ReadFloat32() + if err != nil { + return err + } + n.AsFloat32(f) + return nil + case Float64Type: + f, err := r.ReadFloat64() + if err != nil { + return err + } + n.AsFloat64(f) + return nil + case IntType: + i, err := r.ReadInt64() + if err != nil { + return err + } + n.AsInt(i) + return nil + case UintType: + u, err := r.ReadUint64() + if err != nil { + return err + } + n.AsUint(u) + return nil + default: + return TypeError{Encoded: typ, Method: IntType} + } +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (n *Number) UnmarshalMsg(b []byte) ([]byte, error) { + typ := NextType(b) + switch typ { + case IntType: + i, o, err := ReadInt64Bytes(b) + if err != nil { + return b, err + } + n.AsInt(i) + return o, nil + case UintType: + u, o, err := ReadUint64Bytes(b) + if err != nil { + return b, err + } + n.AsUint(u) + return o, nil + case Float64Type: + f, o, err := ReadFloat64Bytes(b) + if err != nil { + return b, err + } + n.AsFloat64(f) + return o, nil + case Float32Type: + f, o, err := ReadFloat32Bytes(b) + if err != nil { + return b, err + } + n.AsFloat32(f) + return o, nil + default: + return b, TypeError{Method: IntType, Encoded: typ} + } +} + +// MarshalMsg implements msgp.Marshaler +func (n *Number) MarshalMsg(b []byte) ([]byte, error) { + switch n.typ { + case IntType: + return AppendInt64(b, int64(n.bits)), nil + case UintType: + return AppendUint64(b, uint64(n.bits)), nil + case Float64Type: + return AppendFloat64(b, math.Float64frombits(n.bits)), nil + case Float32Type: + return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil + default: + return AppendInt64(b, 0), nil + } +} + +// EncodeMsg implements msgp.Encodable +func (n *Number) EncodeMsg(w *Writer) error { + switch n.typ { + case IntType: + return w.WriteInt64(int64(n.bits)) + case UintType: + return w.WriteUint64(n.bits) + case Float64Type: + return w.WriteFloat64(math.Float64frombits(n.bits)) + case Float32Type: + return w.WriteFloat32(math.Float32frombits(uint32(n.bits))) + default: + return w.WriteInt64(0) + } +} + +// Msgsize implements msgp.Sizer +func (n *Number) Msgsize() int { + switch n.typ { + case Float32Type: + return Float32Size + case Float64Type: + return Float64Size + case IntType: + return Int64Size + case UintType: + return Uint64Size + default: + return 1 // fixint(0) + } +} + +// MarshalJSON implements json.Marshaler +func (n *Number) MarshalJSON() ([]byte, error) { + t := n.Type() + if t == InvalidType { + return []byte{'0'}, nil + } + out := make([]byte, 0, 32) + switch t { + case Float32Type, Float64Type: + f, _ := n.Float() + return strconv.AppendFloat(out, f, 'f', -1, 64), nil + case IntType: + i, _ := n.Int() + return strconv.AppendInt(out, i, 10), nil + case UintType: + u, _ := n.Uint() + return strconv.AppendUint(out, u, 10), nil + default: + panic("(*Number).typ is invalid") + } +} + +// String implements fmt.Stringer +func (n *Number) String() string { + switch n.typ { + case InvalidType: + return "0" + case Float32Type, Float64Type: + f, _ := n.Float() + return strconv.FormatFloat(f, 'f', -1, 64) + case IntType: + i, _ := n.Int() + return strconv.FormatInt(i, 10) + case UintType: + u, _ := n.Uint() + return strconv.FormatUint(u, 10) + default: + panic("(*Number).typ is invalid") + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/purego.go b/vendor/github.com/tinylib/msgp/msgp/purego.go new file mode 100644 index 00000000..c828f7ec --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/purego.go @@ -0,0 +1,15 @@ +// +build purego appengine + +package msgp + +// let's just assume appengine +// uses 64-bit hardware... +const smallint = false + +func UnsafeString(b []byte) string { + return string(b) +} + +func UnsafeBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go new file mode 100644 index 00000000..aa668c57 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read.go @@ -0,0 +1,1358 @@ +package msgp + +import ( + "io" + "math" + "sync" + "time" + + "github.com/philhofer/fwd" +) + +// where we keep old *Readers +var readerPool = sync.Pool{New: func() interface{} { return &Reader{} }} + +// Type is a MessagePack wire type, +// including this package's built-in +// extension types. +type Type byte + +// MessagePack Types +// +// The zero value of Type +// is InvalidType. +const ( + InvalidType Type = iota + + // MessagePack built-in types + + StrType + BinType + MapType + ArrayType + Float64Type + Float32Type + BoolType + IntType + UintType + NilType + ExtensionType + + // pseudo-types provided + // by extensions + + Complex64Type + Complex128Type + TimeType + + _maxtype +) + +// String implements fmt.Stringer +func (t Type) String() string { + switch t { + case StrType: + return "str" + case BinType: + return "bin" + case MapType: + return "map" + case ArrayType: + return "array" + case Float64Type: + return "float64" + case Float32Type: + return "float32" + case BoolType: + return "bool" + case UintType: + return "uint" + case IntType: + return "int" + case ExtensionType: + return "ext" + case NilType: + return "nil" + default: + return "" + } +} + +func freeR(m *Reader) { + readerPool.Put(m) +} + +// Unmarshaler is the interface fulfilled +// by objects that know how to unmarshal +// themselves from MessagePack. +// UnmarshalMsg unmarshals the object +// from binary, returing any leftover +// bytes and any errors encountered. +type Unmarshaler interface { + UnmarshalMsg([]byte) ([]byte, error) +} + +// Decodable is the interface fulfilled +// by objects that know how to read +// themselves from a *Reader. +type Decodable interface { + DecodeMsg(*Reader) error +} + +// Decode decodes 'd' from 'r'. +func Decode(r io.Reader, d Decodable) error { + rd := NewReader(r) + err := d.DecodeMsg(rd) + freeR(rd) + return err +} + +// NewReader returns a *Reader that +// reads from the provided reader. The +// reader will be buffered. +func NewReader(r io.Reader) *Reader { + p := readerPool.Get().(*Reader) + if p.R == nil { + p.R = fwd.NewReader(r) + } else { + p.R.Reset(r) + } + return p +} + +// NewReaderSize returns a *Reader with a buffer of the given size. +// (This is vastly preferable to passing the decoder a reader that is already buffered.) +func NewReaderSize(r io.Reader, sz int) *Reader { + return &Reader{R: fwd.NewReaderSize(r, sz)} +} + +// Reader wraps an io.Reader and provides +// methods to read MessagePack-encoded values +// from it. Readers are buffered. +type Reader struct { + // R is the buffered reader + // that the Reader uses + // to decode MessagePack. + // The Reader itself + // is stateless; all the + // buffering is done + // within R. + R *fwd.Reader + scratch []byte +} + +// Read implements `io.Reader` +func (m *Reader) Read(p []byte) (int, error) { + return m.R.Read(p) +} + +// CopyNext reads the next object from m without decoding it and writes it to w. +// It avoids unnecessary copies internally. +func (m *Reader) CopyNext(w io.Writer) (int64, error) { + sz, o, err := getNextSize(m.R) + if err != nil { + return 0, err + } + + var n int64 + // Opportunistic optimization: if we can fit the whole thing in the m.R + // buffer, then just get a pointer to that, and pass it to w.Write, + // avoiding an allocation. + if int(sz) <= m.R.BufferSize() { + var nn int + var buf []byte + buf, err = m.R.Next(int(sz)) + if err != nil { + if err == io.ErrUnexpectedEOF { + err = ErrShortBytes + } + return 0, err + } + nn, err = w.Write(buf) + n += int64(nn) + } else { + // Fall back to io.CopyN. + // May avoid allocating if w is a ReaderFrom (e.g. bytes.Buffer) + n, err = io.CopyN(w, m.R, int64(sz)) + if err == io.ErrUnexpectedEOF { + err = ErrShortBytes + } + } + if err != nil { + return n, err + } else if n < int64(sz) { + return n, io.ErrShortWrite + } + + // for maps and slices, read elements + for x := uintptr(0); x < o; x++ { + var n2 int64 + n2, err = m.CopyNext(w) + if err != nil { + return n, err + } + n += n2 + } + return n, nil +} + +// ReadFull implements `io.ReadFull` +func (m *Reader) ReadFull(p []byte) (int, error) { + return m.R.ReadFull(p) +} + +// Reset resets the underlying reader. +func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) } + +// Buffered returns the number of bytes currently in the read buffer. +func (m *Reader) Buffered() int { return m.R.Buffered() } + +// BufferSize returns the capacity of the read buffer. +func (m *Reader) BufferSize() int { return m.R.BufferSize() } + +// NextType returns the next object type to be decoded. +func (m *Reader) NextType() (Type, error) { + p, err := m.R.Peek(1) + if err != nil { + return InvalidType, err + } + t := getType(p[0]) + if t == InvalidType { + return t, InvalidPrefixError(p[0]) + } + if t == ExtensionType { + v, err := m.peekExtensionType() + if err != nil { + return InvalidType, err + } + switch v { + case Complex64Extension: + return Complex64Type, nil + case Complex128Extension: + return Complex128Type, nil + case TimeExtension: + return TimeType, nil + } + } + return t, nil +} + +// IsNil returns whether or not +// the next byte is a null messagepack byte +func (m *Reader) IsNil() bool { + p, err := m.R.Peek(1) + return err == nil && p[0] == mnil +} + +// getNextSize returns the size of the next object on the wire. +// returns (obj size, obj elements, error) +// only maps and arrays have non-zero obj elements +// for maps and arrays, obj size does not include elements +// +// use uintptr b/c it's guaranteed to be large enough +// to hold whatever we can fit in memory. +func getNextSize(r *fwd.Reader) (uintptr, uintptr, error) { + b, err := r.Peek(1) + if err != nil { + return 0, 0, err + } + lead := b[0] + spec := &sizes[lead] + size, mode := spec.size, spec.extra + if size == 0 { + return 0, 0, InvalidPrefixError(lead) + } + if mode >= 0 { + return uintptr(size), uintptr(mode), nil + } + b, err = r.Peek(int(size)) + if err != nil { + return 0, 0, err + } + switch mode { + case extra8: + return uintptr(size) + uintptr(b[1]), 0, nil + case extra16: + return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil + case extra32: + return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil + case map16v: + return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil + case map32v: + return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil + case array16v: + return uintptr(size), uintptr(big.Uint16(b[1:])), nil + case array32v: + return uintptr(size), uintptr(big.Uint32(b[1:])), nil + default: + return 0, 0, fatal + } +} + +// Skip skips over the next object, regardless of +// its type. If it is an array or map, the whole array +// or map will be skipped. +func (m *Reader) Skip() error { + var ( + v uintptr // bytes + o uintptr // objects + err error + p []byte + ) + + // we can use the faster + // method if we have enough + // buffered data + if m.R.Buffered() >= 5 { + p, err = m.R.Peek(5) + if err != nil { + return err + } + v, o, err = getSize(p) + if err != nil { + return err + } + } else { + v, o, err = getNextSize(m.R) + if err != nil { + return err + } + } + + // 'v' is always non-zero + // if err == nil + _, err = m.R.Skip(int(v)) + if err != nil { + return err + } + + // for maps and slices, skip elements + for x := uintptr(0); x < o; x++ { + err = m.Skip() + if err != nil { + return err + } + } + return nil +} + +// ReadMapHeader reads the next object +// as a map header and returns the size +// of the map and the number of bytes written. +// It will return a TypeError{} if the next +// object is not a map. +func (m *Reader) ReadMapHeader() (sz uint32, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixmap(lead) { + sz = uint32(rfixmap(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case mmap16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mmap32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + default: + err = badPrefix(MapType, lead) + return + } +} + +// ReadMapKey reads either a 'str' or 'bin' field from +// the reader and returns the value as a []byte. It uses +// scratch for storage if it is large enough. +func (m *Reader) ReadMapKey(scratch []byte) ([]byte, error) { + out, err := m.ReadStringAsBytes(scratch) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return m.ReadBytes(scratch) + } + return nil, err + } + return out, nil +} + +// MapKeyPtr returns a []byte pointing to the contents +// of a valid map key. The key cannot be empty, and it +// must be shorter than the total buffer size of the +// *Reader. Additionally, the returned slice is only +// valid until the next *Reader method call. Users +// should exercise extreme care when using this +// method; writing into the returned slice may +// corrupt future reads. +func (m *Reader) ReadMapKeyPtr() ([]byte, error) { + p, err := m.R.Peek(1) + if err != nil { + return nil, err + } + lead := p[0] + var read int + if isfixstr(lead) { + read = int(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + switch lead { + case mstr8, mbin8: + p, err = m.R.Next(2) + if err != nil { + return nil, err + } + read = int(p[1]) + case mstr16, mbin16: + p, err = m.R.Next(3) + if err != nil { + return nil, err + } + read = int(big.Uint16(p[1:])) + case mstr32, mbin32: + p, err = m.R.Next(5) + if err != nil { + return nil, err + } + read = int(big.Uint32(p[1:])) + default: + return nil, badPrefix(StrType, lead) + } +fill: + if read == 0 { + return nil, ErrShortBytes + } + return m.R.Next(read) +} + +// ReadArrayHeader reads the next object as an +// array header and returns the size of the array +// and the number of bytes read. +func (m *Reader) ReadArrayHeader() (sz uint32, err error) { + var lead byte + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixarray(lead) { + sz = uint32(rfixarray(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case marray16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + + case marray32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + + default: + err = badPrefix(ArrayType, lead) + return + } +} + +// ReadNil reads a 'nil' MessagePack byte from the reader +func (m *Reader) ReadNil() error { + p, err := m.R.Peek(1) + if err != nil { + return err + } + if p[0] != mnil { + return badPrefix(NilType, p[0]) + } + _, err = m.R.Skip(1) + return err +} + +// ReadFloat64 reads a float64 from the reader. +// (If the value on the wire is encoded as a float32, +// it will be up-cast to a float64.) +func (m *Reader) ReadFloat64() (f float64, err error) { + var p []byte + p, err = m.R.Peek(9) + if err != nil { + // we'll allow a coversion from float32 to float64, + // since we don't lose any precision + if err == io.EOF && len(p) > 0 && p[0] == mfloat32 { + ef, err := m.ReadFloat32() + return float64(ef), err + } + return + } + if p[0] != mfloat64 { + // see above + if p[0] == mfloat32 { + ef, err := m.ReadFloat32() + return float64(ef), err + } + err = badPrefix(Float64Type, p[0]) + return + } + f = math.Float64frombits(getMuint64(p)) + _, err = m.R.Skip(9) + return +} + +// ReadFloat32 reads a float32 from the reader +func (m *Reader) ReadFloat32() (f float32, err error) { + var p []byte + p, err = m.R.Peek(5) + if err != nil { + return + } + if p[0] != mfloat32 { + err = badPrefix(Float32Type, p[0]) + return + } + f = math.Float32frombits(getMuint32(p)) + _, err = m.R.Skip(5) + return +} + +// ReadBool reads a bool from the reader +func (m *Reader) ReadBool() (b bool, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + switch p[0] { + case mtrue: + b = true + case mfalse: + default: + err = badPrefix(BoolType, p[0]) + return + } + _, err = m.R.Skip(1) + return +} + +// ReadInt64 reads an int64 from the reader +func (m *Reader) ReadInt64() (i int64, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + + if isfixint(lead) { + i = int64(rfixint(lead)) + _, err = m.R.Skip(1) + return + } else if isnfixint(lead) { + i = int64(rnfixint(lead)) + _, err = m.R.Skip(1) + return + } + + switch lead { + case mint8: + p, err = m.R.Next(2) + if err != nil { + return + } + i = int64(getMint8(p)) + return + + case muint8: + p, err = m.R.Next(2) + if err != nil { + return + } + i = int64(getMuint8(p)) + return + + case mint16: + p, err = m.R.Next(3) + if err != nil { + return + } + i = int64(getMint16(p)) + return + + case muint16: + p, err = m.R.Next(3) + if err != nil { + return + } + i = int64(getMuint16(p)) + return + + case mint32: + p, err = m.R.Next(5) + if err != nil { + return + } + i = int64(getMint32(p)) + return + + case muint32: + p, err = m.R.Next(5) + if err != nil { + return + } + i = int64(getMuint32(p)) + return + + case mint64: + p, err = m.R.Next(9) + if err != nil { + return + } + i = getMint64(p) + return + + case muint64: + p, err = m.R.Next(9) + if err != nil { + return + } + u := getMuint64(p) + if u > math.MaxInt64 { + err = UintOverflow{Value: u, FailedBitsize: 64} + return + } + i = int64(u) + return + + default: + err = badPrefix(IntType, lead) + return + } +} + +// ReadInt32 reads an int32 from the reader +func (m *Reader) ReadInt32() (i int32, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt32 || in < math.MinInt32 { + err = IntOverflow{Value: in, FailedBitsize: 32} + return + } + i = int32(in) + return +} + +// ReadInt16 reads an int16 from the reader +func (m *Reader) ReadInt16() (i int16, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt16 || in < math.MinInt16 { + err = IntOverflow{Value: in, FailedBitsize: 16} + return + } + i = int16(in) + return +} + +// ReadInt8 reads an int8 from the reader +func (m *Reader) ReadInt8() (i int8, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt8 || in < math.MinInt8 { + err = IntOverflow{Value: in, FailedBitsize: 8} + return + } + i = int8(in) + return +} + +// ReadInt reads an int from the reader +func (m *Reader) ReadInt() (i int, err error) { + if smallint { + var in int32 + in, err = m.ReadInt32() + i = int(in) + return + } + var in int64 + in, err = m.ReadInt64() + i = int(in) + return +} + +// ReadUint64 reads a uint64 from the reader +func (m *Reader) ReadUint64() (u uint64, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixint(lead) { + u = uint64(rfixint(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case mint8: + p, err = m.R.Next(2) + if err != nil { + return + } + v := int64(getMint8(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint8: + p, err = m.R.Next(2) + if err != nil { + return + } + u = uint64(getMuint8(p)) + return + + case mint16: + p, err = m.R.Next(3) + if err != nil { + return + } + v := int64(getMint16(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint16: + p, err = m.R.Next(3) + if err != nil { + return + } + u = uint64(getMuint16(p)) + return + + case mint32: + p, err = m.R.Next(5) + if err != nil { + return + } + v := int64(getMint32(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint32: + p, err = m.R.Next(5) + if err != nil { + return + } + u = uint64(getMuint32(p)) + return + + case mint64: + p, err = m.R.Next(9) + if err != nil { + return + } + v := int64(getMint64(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint64: + p, err = m.R.Next(9) + if err != nil { + return + } + u = getMuint64(p) + return + + default: + if isnfixint(lead) { + err = UintBelowZero{Value: int64(rnfixint(lead))} + } else { + err = badPrefix(UintType, lead) + } + return + + } +} + +// ReadUint32 reads a uint32 from the reader +func (m *Reader) ReadUint32() (u uint32, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint32 { + err = UintOverflow{Value: in, FailedBitsize: 32} + return + } + u = uint32(in) + return +} + +// ReadUint16 reads a uint16 from the reader +func (m *Reader) ReadUint16() (u uint16, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint16 { + err = UintOverflow{Value: in, FailedBitsize: 16} + return + } + u = uint16(in) + return +} + +// ReadUint8 reads a uint8 from the reader +func (m *Reader) ReadUint8() (u uint8, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint8 { + err = UintOverflow{Value: in, FailedBitsize: 8} + return + } + u = uint8(in) + return +} + +// ReadUint reads a uint from the reader +func (m *Reader) ReadUint() (u uint, err error) { + if smallint { + var un uint32 + un, err = m.ReadUint32() + u = uint(un) + return + } + var un uint64 + un, err = m.ReadUint64() + u = uint(un) + return +} + +// ReadByte is analogous to ReadUint8. +// +// NOTE: this is *not* an implementation +// of io.ByteReader. +func (m *Reader) ReadByte() (b byte, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint8 { + err = UintOverflow{Value: in, FailedBitsize: 8} + return + } + b = byte(in) + return +} + +// ReadBytes reads a MessagePack 'bin' object +// from the reader and returns its value. It may +// use 'scratch' for storage if it is non-nil. +func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(2) + if err != nil { + return + } + lead = p[0] + var read int64 + switch lead { + case mbin8: + read = int64(p[1]) + m.R.Skip(2) + case mbin16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mbin32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(BinType, lead) + return + } + if int64(cap(scratch)) < read { + b = make([]byte, read) + } else { + b = scratch[0:read] + } + _, err = m.R.ReadFull(b) + return +} + +// ReadBytesHeader reads the size header +// of a MessagePack 'bin' object. The user +// is responsible for dealing with the next +// 'sz' bytes from the reader in an application-specific +// way. +func (m *Reader) ReadBytesHeader() (sz uint32, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + switch p[0] { + case mbin8: + p, err = m.R.Next(2) + if err != nil { + return + } + sz = uint32(p[1]) + return + case mbin16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mbin32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = uint32(big.Uint32(p[1:])) + return + default: + err = badPrefix(BinType, p[0]) + return + } +} + +// ReadExactBytes reads a MessagePack 'bin'-encoded +// object off of the wire into the provided slice. An +// ArrayError will be returned if the object is not +// exactly the length of the input slice. +func (m *Reader) ReadExactBytes(into []byte) error { + p, err := m.R.Peek(2) + if err != nil { + return err + } + lead := p[0] + var read int64 // bytes to read + var skip int // prefix size to skip + switch lead { + case mbin8: + read = int64(p[1]) + skip = 2 + case mbin16: + p, err = m.R.Peek(3) + if err != nil { + return err + } + read = int64(big.Uint16(p[1:])) + skip = 3 + case mbin32: + p, err = m.R.Peek(5) + if err != nil { + return err + } + read = int64(big.Uint32(p[1:])) + skip = 5 + default: + return badPrefix(BinType, lead) + } + if read != int64(len(into)) { + return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)} + } + m.R.Skip(skip) + _, err = m.R.ReadFull(into) + return err +} + +// ReadStringAsBytes reads a MessagePack 'str' (utf-8) string +// and returns its value as bytes. It may use 'scratch' for storage +// if it is non-nil. +func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + var read int64 + + if isfixstr(lead) { + read = int64(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + read = int64(uint8(p[1])) + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +fill: + if int64(cap(scratch)) < read { + b = make([]byte, read) + } else { + b = scratch[0:read] + } + _, err = m.R.ReadFull(b) + return +} + +// ReadStringHeader reads a string header +// off of the wire. The user is then responsible +// for dealing with the next 'sz' bytes from +// the reader in an application-specific manner. +func (m *Reader) ReadStringHeader() (sz uint32, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead := p[0] + if isfixstr(lead) { + sz = uint32(rfixstr(lead)) + m.R.Skip(1) + return + } + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + sz = uint32(p[1]) + return + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + default: + err = badPrefix(StrType, lead) + return + } +} + +// ReadString reads a utf-8 string from the reader +func (m *Reader) ReadString() (s string, err error) { + var p []byte + var lead byte + var read int64 + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + + if isfixstr(lead) { + read = int64(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + read = int64(uint8(p[1])) + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +fill: + if read == 0 { + s, err = "", nil + return + } + // reading into the memory + // that will become the string + // itself has vastly superior + // worst-case performance, because + // the reader buffer doesn't have + // to be large enough to hold the string. + // the idea here is to make it more + // difficult for someone malicious + // to cause the system to run out of + // memory by sending very large strings. + // + // NOTE: this works because the argument + // passed to (*fwd.Reader).ReadFull escapes + // to the heap; its argument may, in turn, + // be passed to the underlying reader, and + // thus escape analysis *must* conclude that + // 'out' escapes. + out := make([]byte, read) + _, err = m.R.ReadFull(out) + if err != nil { + return + } + s = UnsafeString(out) + return +} + +// ReadComplex64 reads a complex64 from the reader +func (m *Reader) ReadComplex64() (f complex64, err error) { + var p []byte + p, err = m.R.Peek(10) + if err != nil { + return + } + if p[0] != mfixext8 { + err = badPrefix(Complex64Type, p[0]) + return + } + if int8(p[1]) != Complex64Extension { + err = errExt(int8(p[1]), Complex64Extension) + return + } + f = complex(math.Float32frombits(big.Uint32(p[2:])), + math.Float32frombits(big.Uint32(p[6:]))) + _, err = m.R.Skip(10) + return +} + +// ReadComplex128 reads a complex128 from the reader +func (m *Reader) ReadComplex128() (f complex128, err error) { + var p []byte + p, err = m.R.Peek(18) + if err != nil { + return + } + if p[0] != mfixext16 { + err = badPrefix(Complex128Type, p[0]) + return + } + if int8(p[1]) != Complex128Extension { + err = errExt(int8(p[1]), Complex128Extension) + return + } + f = complex(math.Float64frombits(big.Uint64(p[2:])), + math.Float64frombits(big.Uint64(p[10:]))) + _, err = m.R.Skip(18) + return +} + +// ReadMapStrIntf reads a MessagePack map into a map[string]interface{}. +// (You must pass a non-nil map into the function.) +func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) { + var sz uint32 + sz, err = m.ReadMapHeader() + if err != nil { + return + } + for key := range mp { + delete(mp, key) + } + for i := uint32(0); i < sz; i++ { + var key string + var val interface{} + key, err = m.ReadString() + if err != nil { + return + } + val, err = m.ReadIntf() + if err != nil { + return + } + mp[key] = val + } + return +} + +// ReadTime reads a time.Time object from the reader. +// The returned time's location will be set to time.Local. +func (m *Reader) ReadTime() (t time.Time, err error) { + var p []byte + p, err = m.R.Peek(15) + if err != nil { + return + } + if p[0] != mext8 || p[1] != 12 { + err = badPrefix(TimeType, p[0]) + return + } + if int8(p[2]) != TimeExtension { + err = errExt(int8(p[2]), TimeExtension) + return + } + sec, nsec := getUnix(p[3:]) + t = time.Unix(sec, int64(nsec)).Local() + _, err = m.R.Skip(15) + return +} + +// ReadIntf reads out the next object as a raw interface{}. +// Arrays are decoded as []interface{}, and maps are decoded +// as map[string]interface{}. Integers are decoded as int64 +// and unsigned integers are decoded as uint64. +func (m *Reader) ReadIntf() (i interface{}, err error) { + var t Type + t, err = m.NextType() + if err != nil { + return + } + switch t { + case BoolType: + i, err = m.ReadBool() + return + + case IntType: + i, err = m.ReadInt64() + return + + case UintType: + i, err = m.ReadUint64() + return + + case BinType: + i, err = m.ReadBytes(nil) + return + + case StrType: + i, err = m.ReadString() + return + + case Complex64Type: + i, err = m.ReadComplex64() + return + + case Complex128Type: + i, err = m.ReadComplex128() + return + + case TimeType: + i, err = m.ReadTime() + return + + case ExtensionType: + var t int8 + t, err = m.peekExtensionType() + if err != nil { + return + } + f, ok := extensionReg[t] + if ok { + e := f() + err = m.ReadExtension(e) + i = e + return + } + var e RawExtension + e.Type = t + err = m.ReadExtension(&e) + i = &e + return + + case MapType: + mp := make(map[string]interface{}) + err = m.ReadMapStrIntf(mp) + i = mp + return + + case NilType: + err = m.ReadNil() + i = nil + return + + case Float32Type: + i, err = m.ReadFloat32() + return + + case Float64Type: + i, err = m.ReadFloat64() + return + + case ArrayType: + var sz uint32 + sz, err = m.ReadArrayHeader() + + if err != nil { + return + } + out := make([]interface{}, int(sz)) + for j := range out { + out[j], err = m.ReadIntf() + if err != nil { + return + } + } + i = out + return + + default: + return nil, fatal // unreachable + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go new file mode 100644 index 00000000..e4199757 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go @@ -0,0 +1,1197 @@ +package msgp + +import ( + "bytes" + "encoding/binary" + "math" + "time" +) + +var big = binary.BigEndian + +// NextType returns the type of the next +// object in the slice. If the length +// of the input is zero, it returns +// InvalidType. +func NextType(b []byte) Type { + if len(b) == 0 { + return InvalidType + } + spec := sizes[b[0]] + t := spec.typ + if t == ExtensionType && len(b) > int(spec.size) { + var tp int8 + if spec.extra == constsize { + tp = int8(b[1]) + } else { + tp = int8(b[spec.size-1]) + } + switch tp { + case TimeExtension: + return TimeType + case Complex128Extension: + return Complex128Type + case Complex64Extension: + return Complex64Type + default: + return ExtensionType + } + } + return t +} + +// IsNil returns true if len(b)>0 and +// the leading byte is a 'nil' MessagePack +// byte; false otherwise +func IsNil(b []byte) bool { + if len(b) != 0 && b[0] == mnil { + return true + } + return false +} + +// Raw is raw MessagePack. +// Raw allows you to read and write +// data without interpreting its contents. +type Raw []byte + +// MarshalMsg implements msgp.Marshaler. +// It appends the raw contents of 'raw' +// to the provided byte slice. If 'raw' +// is 0 bytes, 'nil' will be appended instead. +func (r Raw) MarshalMsg(b []byte) ([]byte, error) { + i := len(r) + if i == 0 { + return AppendNil(b), nil + } + o, l := ensure(b, i) + copy(o[l:], []byte(r)) + return o, nil +} + +// UnmarshalMsg implements msgp.Unmarshaler. +// It sets the contents of *Raw to be the next +// object in the provided byte slice. +func (r *Raw) UnmarshalMsg(b []byte) ([]byte, error) { + l := len(b) + out, err := Skip(b) + if err != nil { + return b, err + } + rlen := l - len(out) + if IsNil(b[:rlen]) { + rlen = 0 + } + if cap(*r) < rlen { + *r = make(Raw, rlen) + } else { + *r = (*r)[0:rlen] + } + copy(*r, b[:rlen]) + return out, nil +} + +// EncodeMsg implements msgp.Encodable. +// It writes the raw bytes to the writer. +// If r is empty, it writes 'nil' instead. +func (r Raw) EncodeMsg(w *Writer) error { + if len(r) == 0 { + return w.WriteNil() + } + _, err := w.Write([]byte(r)) + return err +} + +// DecodeMsg implements msgp.Decodable. +// It sets the value of *Raw to be the +// next object on the wire. +func (r *Raw) DecodeMsg(f *Reader) error { + *r = (*r)[:0] + err := appendNext(f, (*[]byte)(r)) + if IsNil(*r) { + *r = (*r)[:0] + } + return err +} + +// Msgsize implements msgp.Sizer +func (r Raw) Msgsize() int { + l := len(r) + if l == 0 { + return 1 // for 'nil' + } + return l +} + +func appendNext(f *Reader, d *[]byte) error { + amt, o, err := getNextSize(f.R) + if err != nil { + return err + } + var i int + *d, i = ensure(*d, int(amt)) + _, err = f.R.ReadFull((*d)[i:]) + if err != nil { + return err + } + for o > 0 { + err = appendNext(f, d) + if err != nil { + return err + } + o-- + } + return nil +} + +// MarshalJSON implements json.Marshaler +func (r *Raw) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + _, err := UnmarshalAsJSON(&buf, []byte(*r)) + return buf.Bytes(), err +} + +// ReadMapHeaderBytes reads a map header size +// from 'b' and returns the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a map) +func ReadMapHeaderBytes(b []byte) (sz uint32, o []byte, err error) { + l := len(b) + if l < 1 { + err = ErrShortBytes + return + } + + lead := b[0] + if isfixmap(lead) { + sz = uint32(rfixmap(lead)) + o = b[1:] + return + } + + switch lead { + case mmap16: + if l < 3 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b[1:])) + o = b[3:] + return + + case mmap32: + if l < 5 { + err = ErrShortBytes + return + } + sz = big.Uint32(b[1:]) + o = b[5:] + return + + default: + err = badPrefix(MapType, lead) + return + } +} + +// ReadMapKeyZC attempts to read a map key +// from 'b' and returns the key bytes and the remaining bytes +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a str or bin) +func ReadMapKeyZC(b []byte) ([]byte, []byte, error) { + o, x, err := ReadStringZC(b) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return ReadBytesZC(b) + } + return nil, b, err + } + return o, x, nil +} + +// ReadArrayHeaderBytes attempts to read +// the array header size off of 'b' and return +// the size and remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not an array) +func ReadArrayHeaderBytes(b []byte) (sz uint32, o []byte, err error) { + if len(b) < 1 { + return 0, nil, ErrShortBytes + } + lead := b[0] + if isfixarray(lead) { + sz = uint32(rfixarray(lead)) + o = b[1:] + return + } + + switch lead { + case marray16: + if len(b) < 3 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b[1:])) + o = b[3:] + return + + case marray32: + if len(b) < 5 { + err = ErrShortBytes + return + } + sz = big.Uint32(b[1:]) + o = b[5:] + return + + default: + err = badPrefix(ArrayType, lead) + return + } +} + +// ReadNilBytes tries to read a "nil" byte +// off of 'b' and return the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a 'nil') +// - InvalidPrefixError +func ReadNilBytes(b []byte) ([]byte, error) { + if len(b) < 1 { + return nil, ErrShortBytes + } + if b[0] != mnil { + return b, badPrefix(NilType, b[0]) + } + return b[1:], nil +} + +// ReadFloat64Bytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a float64) +func ReadFloat64Bytes(b []byte) (f float64, o []byte, err error) { + if len(b) < 9 { + if len(b) >= 5 && b[0] == mfloat32 { + var tf float32 + tf, o, err = ReadFloat32Bytes(b) + f = float64(tf) + return + } + err = ErrShortBytes + return + } + + if b[0] != mfloat64 { + if b[0] == mfloat32 { + var tf float32 + tf, o, err = ReadFloat32Bytes(b) + f = float64(tf) + return + } + err = badPrefix(Float64Type, b[0]) + return + } + + f = math.Float64frombits(getMuint64(b)) + o = b[9:] + return +} + +// ReadFloat32Bytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a float32) +func ReadFloat32Bytes(b []byte) (f float32, o []byte, err error) { + if len(b) < 5 { + err = ErrShortBytes + return + } + + if b[0] != mfloat32 { + err = TypeError{Method: Float32Type, Encoded: getType(b[0])} + return + } + + f = math.Float32frombits(getMuint32(b)) + o = b[5:] + return +} + +// ReadBoolBytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a bool) +func ReadBoolBytes(b []byte) (bool, []byte, error) { + if len(b) < 1 { + return false, b, ErrShortBytes + } + switch b[0] { + case mtrue: + return true, b[1:], nil + case mfalse: + return false, b[1:], nil + default: + return false, b, badPrefix(BoolType, b[0]) + } +} + +// ReadInt64Bytes tries to read an int64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError (not a int) +func ReadInt64Bytes(b []byte) (i int64, o []byte, err error) { + l := len(b) + if l < 1 { + return 0, nil, ErrShortBytes + } + + lead := b[0] + if isfixint(lead) { + i = int64(rfixint(lead)) + o = b[1:] + return + } + if isnfixint(lead) { + i = int64(rnfixint(lead)) + o = b[1:] + return + } + + switch lead { + case mint8: + if l < 2 { + err = ErrShortBytes + return + } + i = int64(getMint8(b)) + o = b[2:] + return + + case muint8: + if l < 2 { + err = ErrShortBytes + return + } + i = int64(getMuint8(b)) + o = b[2:] + return + + case mint16: + if l < 3 { + err = ErrShortBytes + return + } + i = int64(getMint16(b)) + o = b[3:] + return + + case muint16: + if l < 3 { + err = ErrShortBytes + return + } + i = int64(getMuint16(b)) + o = b[3:] + return + + case mint32: + if l < 5 { + err = ErrShortBytes + return + } + i = int64(getMint32(b)) + o = b[5:] + return + + case muint32: + if l < 5 { + err = ErrShortBytes + return + } + i = int64(getMuint32(b)) + o = b[5:] + return + + case mint64: + if l < 9 { + err = ErrShortBytes + return + } + i = int64(getMint64(b)) + o = b[9:] + return + + case muint64: + if l < 9 { + err = ErrShortBytes + return + } + u := getMuint64(b) + if u > math.MaxInt64 { + err = UintOverflow{Value: u, FailedBitsize: 64} + return + } + i = int64(u) + o = b[9:] + return + + default: + err = badPrefix(IntType, lead) + return + } +} + +// ReadInt32Bytes tries to read an int32 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int32) +func ReadInt32Bytes(b []byte) (int32, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt32 || i < math.MinInt32 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 32} + } + return int32(i), o, err +} + +// ReadInt16Bytes tries to read an int16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int16) +func ReadInt16Bytes(b []byte) (int16, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt16 || i < math.MinInt16 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 16} + } + return int16(i), o, err +} + +// ReadInt8Bytes tries to read an int16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int8) +func ReadInt8Bytes(b []byte) (int8, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt8 || i < math.MinInt8 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 8} + } + return int8(i), o, err +} + +// ReadIntBytes tries to read an int +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int; 32-bit platforms only) +func ReadIntBytes(b []byte) (int, []byte, error) { + if smallint { + i, b, err := ReadInt32Bytes(b) + return int(i), b, err + } + i, b, err := ReadInt64Bytes(b) + return int(i), b, err +} + +// ReadUint64Bytes tries to read a uint64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +func ReadUint64Bytes(b []byte) (u uint64, o []byte, err error) { + l := len(b) + if l < 1 { + return 0, nil, ErrShortBytes + } + + lead := b[0] + if isfixint(lead) { + u = uint64(rfixint(lead)) + o = b[1:] + return + } + + switch lead { + case mint8: + if l < 2 { + err = ErrShortBytes + return + } + v := int64(getMint8(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[2:] + return + + case muint8: + if l < 2 { + err = ErrShortBytes + return + } + u = uint64(getMuint8(b)) + o = b[2:] + return + + case mint16: + if l < 3 { + err = ErrShortBytes + return + } + v := int64(getMint16(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[3:] + return + + case muint16: + if l < 3 { + err = ErrShortBytes + return + } + u = uint64(getMuint16(b)) + o = b[3:] + return + + case mint32: + if l < 5 { + err = ErrShortBytes + return + } + v := int64(getMint32(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[5:] + return + + case muint32: + if l < 5 { + err = ErrShortBytes + return + } + u = uint64(getMuint32(b)) + o = b[5:] + return + + case mint64: + if l < 9 { + err = ErrShortBytes + return + } + v := int64(getMint64(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[9:] + return + + case muint64: + if l < 9 { + err = ErrShortBytes + return + } + u = getMuint64(b) + o = b[9:] + return + + default: + if isnfixint(lead) { + err = UintBelowZero{Value: int64(rnfixint(lead))} + } else { + err = badPrefix(UintType, lead) + } + return + } +} + +// ReadUint32Bytes tries to read a uint32 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint32) +func ReadUint32Bytes(b []byte) (uint32, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint32 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 32} + } + return uint32(v), o, err +} + +// ReadUint16Bytes tries to read a uint16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint16) +func ReadUint16Bytes(b []byte) (uint16, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint16 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 16} + } + return uint16(v), o, err +} + +// ReadUint8Bytes tries to read a uint8 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint8) +func ReadUint8Bytes(b []byte) (uint8, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint8 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 8} + } + return uint8(v), o, err +} + +// ReadUintBytes tries to read a uint +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint; 32-bit platforms only) +func ReadUintBytes(b []byte) (uint, []byte, error) { + if smallint { + u, b, err := ReadUint32Bytes(b) + return uint(u), b, err + } + u, b, err := ReadUint64Bytes(b) + return uint(u), b, err +} + +// ReadByteBytes is analogous to ReadUint8Bytes +func ReadByteBytes(b []byte) (byte, []byte, error) { + return ReadUint8Bytes(b) +} + +// ReadBytesBytes reads a 'bin' object +// from 'b' and returns its vaue and +// the remaining bytes in 'b'. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a 'bin' object) +func ReadBytesBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { + return readBytesBytes(b, scratch, false) +} + +func readBytesBytes(b []byte, scratch []byte, zc bool) (v []byte, o []byte, err error) { + l := len(b) + if l < 1 { + return nil, nil, ErrShortBytes + } + + lead := b[0] + var read int + switch lead { + case mbin8: + if l < 2 { + err = ErrShortBytes + return + } + + read = int(b[1]) + b = b[2:] + + case mbin16: + if l < 3 { + err = ErrShortBytes + return + } + read = int(big.Uint16(b[1:])) + b = b[3:] + + case mbin32: + if l < 5 { + err = ErrShortBytes + return + } + read = int(big.Uint32(b[1:])) + b = b[5:] + + default: + err = badPrefix(BinType, lead) + return + } + + if len(b) < read { + err = ErrShortBytes + return + } + + // zero-copy + if zc { + v = b[0:read] + o = b[read:] + return + } + + if cap(scratch) >= read { + v = scratch[0:read] + } else { + v = make([]byte, read) + } + + o = b[copy(v, b):] + return +} + +// ReadBytesZC extracts the messagepack-encoded +// binary field without copying. The returned []byte +// points to the same memory as the input slice. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (object not 'bin') +func ReadBytesZC(b []byte) (v []byte, o []byte, err error) { + return readBytesBytes(b, nil, true) +} + +func ReadExactBytes(b []byte, into []byte) (o []byte, err error) { + l := len(b) + if l < 1 { + err = ErrShortBytes + return + } + + lead := b[0] + var read uint32 + var skip int + switch lead { + case mbin8: + if l < 2 { + err = ErrShortBytes + return + } + + read = uint32(b[1]) + skip = 2 + + case mbin16: + if l < 3 { + err = ErrShortBytes + return + } + read = uint32(big.Uint16(b[1:])) + skip = 3 + + case mbin32: + if l < 5 { + err = ErrShortBytes + return + } + read = uint32(big.Uint32(b[1:])) + skip = 5 + + default: + err = badPrefix(BinType, lead) + return + } + + if read != uint32(len(into)) { + err = ArrayError{Wanted: uint32(len(into)), Got: read} + return + } + + o = b[skip+copy(into, b[skip:]):] + return +} + +// ReadStringZC reads a messagepack string field +// without copying. The returned []byte points +// to the same memory as the input slice. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (object not 'str') +func ReadStringZC(b []byte) (v []byte, o []byte, err error) { + l := len(b) + if l < 1 { + return nil, nil, ErrShortBytes + } + + lead := b[0] + var read int + + if isfixstr(lead) { + read = int(rfixstr(lead)) + b = b[1:] + } else { + switch lead { + case mstr8: + if l < 2 { + err = ErrShortBytes + return + } + read = int(b[1]) + b = b[2:] + + case mstr16: + if l < 3 { + err = ErrShortBytes + return + } + read = int(big.Uint16(b[1:])) + b = b[3:] + + case mstr32: + if l < 5 { + err = ErrShortBytes + return + } + read = int(big.Uint32(b[1:])) + b = b[5:] + + default: + err = TypeError{Method: StrType, Encoded: getType(lead)} + return + } + } + + if len(b) < read { + err = ErrShortBytes + return + } + + v = b[0:read] + o = b[read:] + return +} + +// ReadStringBytes reads a 'str' object +// from 'b' and returns its value and the +// remaining bytes in 'b'. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (not 'str' type) +// - InvalidPrefixError +func ReadStringBytes(b []byte) (string, []byte, error) { + v, o, err := ReadStringZC(b) + return string(v), o, err +} + +// ReadStringAsBytes reads a 'str' object +// into a slice of bytes. 'v' is the value of +// the 'str' object, which may reside in memory +// pointed to by 'scratch.' 'o' is the remaining bytes +// in 'b.'' +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (not 'str' type) +// - InvalidPrefixError (unknown type marker) +func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { + var tmp []byte + tmp, o, err = ReadStringZC(b) + v = append(scratch[:0], tmp...) + return +} + +// ReadComplex128Bytes reads a complex128 +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex128) +// - InvalidPrefixError +// - ExtensionTypeError{} (object an extension of the correct size, but not a complex128) +func ReadComplex128Bytes(b []byte) (c complex128, o []byte, err error) { + if len(b) < 18 { + err = ErrShortBytes + return + } + if b[0] != mfixext16 { + err = badPrefix(Complex128Type, b[0]) + return + } + if int8(b[1]) != Complex128Extension { + err = errExt(int8(b[1]), Complex128Extension) + return + } + c = complex(math.Float64frombits(big.Uint64(b[2:])), + math.Float64frombits(big.Uint64(b[10:]))) + o = b[18:] + return +} + +// ReadComplex64Bytes reads a complex64 +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex64) +// - ExtensionTypeError{} (object an extension of the correct size, but not a complex64) +func ReadComplex64Bytes(b []byte) (c complex64, o []byte, err error) { + if len(b) < 10 { + err = ErrShortBytes + return + } + if b[0] != mfixext8 { + err = badPrefix(Complex64Type, b[0]) + return + } + if b[1] != Complex64Extension { + err = errExt(int8(b[1]), Complex64Extension) + return + } + c = complex(math.Float32frombits(big.Uint32(b[2:])), + math.Float32frombits(big.Uint32(b[6:]))) + o = b[10:] + return +} + +// ReadTimeBytes reads a time.Time +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex64) +// - ExtensionTypeError{} (object an extension of the correct size, but not a time.Time) +func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) { + if len(b) < 15 { + err = ErrShortBytes + return + } + if b[0] != mext8 || b[1] != 12 { + err = badPrefix(TimeType, b[0]) + return + } + if int8(b[2]) != TimeExtension { + err = errExt(int8(b[2]), TimeExtension) + return + } + sec, nsec := getUnix(b[3:]) + t = time.Unix(sec, int64(nsec)).Local() + o = b[15:] + return +} + +// ReadMapStrIntfBytes reads a map[string]interface{} +// out of 'b' and returns the map and remaining bytes. +// If 'old' is non-nil, the values will be read into that map. +func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) { + var sz uint32 + o = b + sz, o, err = ReadMapHeaderBytes(o) + + if err != nil { + return + } + + if old != nil { + for key := range old { + delete(old, key) + } + v = old + } else { + v = make(map[string]interface{}, int(sz)) + } + + for z := uint32(0); z < sz; z++ { + if len(o) < 1 { + err = ErrShortBytes + return + } + var key []byte + key, o, err = ReadMapKeyZC(o) + if err != nil { + return + } + var val interface{} + val, o, err = ReadIntfBytes(o) + if err != nil { + return + } + v[string(key)] = val + } + return +} + +// ReadIntfBytes attempts to read +// the next object out of 'b' as a raw interface{} and +// return the remaining bytes. +func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) { + if len(b) < 1 { + err = ErrShortBytes + return + } + + k := NextType(b) + + switch k { + case MapType: + i, o, err = ReadMapStrIntfBytes(b, nil) + return + + case ArrayType: + var sz uint32 + sz, o, err = ReadArrayHeaderBytes(b) + if err != nil { + return + } + j := make([]interface{}, int(sz)) + i = j + for d := range j { + j[d], o, err = ReadIntfBytes(o) + if err != nil { + return + } + } + return + + case Float32Type: + i, o, err = ReadFloat32Bytes(b) + return + + case Float64Type: + i, o, err = ReadFloat64Bytes(b) + return + + case IntType: + i, o, err = ReadInt64Bytes(b) + return + + case UintType: + i, o, err = ReadUint64Bytes(b) + return + + case BoolType: + i, o, err = ReadBoolBytes(b) + return + + case TimeType: + i, o, err = ReadTimeBytes(b) + return + + case Complex64Type: + i, o, err = ReadComplex64Bytes(b) + return + + case Complex128Type: + i, o, err = ReadComplex128Bytes(b) + return + + case ExtensionType: + var t int8 + t, err = peekExtension(b) + if err != nil { + return + } + // use a user-defined extension, + // if it's been registered + f, ok := extensionReg[t] + if ok { + e := f() + o, err = ReadExtensionBytes(b, e) + i = e + return + } + // last resort is a raw extension + e := RawExtension{} + e.Type = int8(t) + o, err = ReadExtensionBytes(b, &e) + i = &e + return + + case NilType: + o, err = ReadNilBytes(b) + return + + case BinType: + i, o, err = ReadBytesBytes(b, nil) + return + + case StrType: + i, o, err = ReadStringBytes(b) + return + + default: + err = InvalidPrefixError(b[0]) + return + } +} + +// Skip skips the next object in 'b' and +// returns the remaining bytes. If the object +// is a map or array, all of its elements +// will be skipped. +// Possible Errors: +// - ErrShortBytes (not enough bytes in b) +// - InvalidPrefixError (bad encoding) +func Skip(b []byte) ([]byte, error) { + sz, asz, err := getSize(b) + if err != nil { + return b, err + } + if uintptr(len(b)) < sz { + return b, ErrShortBytes + } + b = b[sz:] + for asz > 0 { + b, err = Skip(b) + if err != nil { + return b, err + } + asz-- + } + return b, nil +} + +// returns (skip N bytes, skip M objects, error) +func getSize(b []byte) (uintptr, uintptr, error) { + l := len(b) + if l == 0 { + return 0, 0, ErrShortBytes + } + lead := b[0] + spec := &sizes[lead] // get type information + size, mode := spec.size, spec.extra + if size == 0 { + return 0, 0, InvalidPrefixError(lead) + } + if mode >= 0 { // fixed composites + return uintptr(size), uintptr(mode), nil + } + if l < int(size) { + return 0, 0, ErrShortBytes + } + switch mode { + case extra8: + return uintptr(size) + uintptr(b[1]), 0, nil + case extra16: + return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil + case extra32: + return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil + case map16v: + return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil + case map32v: + return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil + case array16v: + return uintptr(size), uintptr(big.Uint16(b[1:])), nil + case array32v: + return uintptr(size), uintptr(big.Uint32(b[1:])), nil + default: + return 0, 0, fatal + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/size.go b/vendor/github.com/tinylib/msgp/msgp/size.go new file mode 100644 index 00000000..ce2f8b16 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/size.go @@ -0,0 +1,38 @@ +package msgp + +// The sizes provided +// are the worst-case +// encoded sizes for +// each type. For variable- +// length types ([]byte, string), +// the total encoded size is +// the prefix size plus the +// length of the object. +const ( + Int64Size = 9 + IntSize = Int64Size + UintSize = Int64Size + Int8Size = 2 + Int16Size = 3 + Int32Size = 5 + Uint8Size = 2 + ByteSize = Uint8Size + Uint16Size = 3 + Uint32Size = 5 + Uint64Size = Int64Size + Float64Size = 9 + Float32Size = 5 + Complex64Size = 10 + Complex128Size = 18 + + TimeSize = 15 + BoolSize = 1 + NilSize = 1 + + MapHeaderSize = 5 + ArrayHeaderSize = 5 + + BytesPrefixSize = 5 + StringPrefixSize = 5 + ExtensionPrefixSize = 6 +) diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go new file mode 100644 index 00000000..3978b6ff --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go @@ -0,0 +1,41 @@ +// +build !purego,!appengine + +package msgp + +import ( + "reflect" + "unsafe" +) + +// NOTE: +// all of the definition in this file +// should be repeated in appengine.go, +// but without using unsafe + +const ( + // spec says int and uint are always + // the same size, but that int/uint + // size may not be machine word size + smallint = unsafe.Sizeof(int(0)) == 4 +) + +// UnsafeString returns the byte slice as a volatile string +// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. +// THIS IS EVIL CODE. +// YOU HAVE BEEN WARNED. +func UnsafeString(b []byte) string { + sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: sh.Data, Len: sh.Len})) +} + +// UnsafeBytes returns the string as a byte slice +// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. +// THIS IS EVIL CODE. +// YOU HAVE BEEN WARNED. +func UnsafeBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Len: len(s), + Cap: len(s), + Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data, + })) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go new file mode 100644 index 00000000..fb1947c5 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write.go @@ -0,0 +1,845 @@ +package msgp + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "sync" + "time" +) + +// Sizer is an interface implemented +// by types that can estimate their +// size when MessagePack encoded. +// This interface is optional, but +// encoding/marshaling implementations +// may use this as a way to pre-allocate +// memory for serialization. +type Sizer interface { + Msgsize() int +} + +var ( + // Nowhere is an io.Writer to nowhere + Nowhere io.Writer = nwhere{} + + btsType = reflect.TypeOf(([]byte)(nil)) + writerPool = sync.Pool{ + New: func() interface{} { + return &Writer{buf: make([]byte, 2048)} + }, + } +) + +func popWriter(w io.Writer) *Writer { + wr := writerPool.Get().(*Writer) + wr.Reset(w) + return wr +} + +func pushWriter(wr *Writer) { + wr.w = nil + wr.wloc = 0 + writerPool.Put(wr) +} + +// freeW frees a writer for use +// by other processes. It is not necessary +// to call freeW on a writer. However, maintaining +// a reference to a *Writer after calling freeW on +// it will cause undefined behavior. +func freeW(w *Writer) { pushWriter(w) } + +// Require ensures that cap(old)-len(old) >= extra. +func Require(old []byte, extra int) []byte { + l := len(old) + c := cap(old) + r := l + extra + if c >= r { + return old + } else if l == 0 { + return make([]byte, 0, extra) + } + // the new size is the greater + // of double the old capacity + // and the sum of the old length + // and the number of new bytes + // necessary. + c <<= 1 + if c < r { + c = r + } + n := make([]byte, l, c) + copy(n, old) + return n +} + +// nowhere writer +type nwhere struct{} + +func (n nwhere) Write(p []byte) (int, error) { return len(p), nil } + +// Marshaler is the interface implemented +// by types that know how to marshal themselves +// as MessagePack. MarshalMsg appends the marshalled +// form of the object to the provided +// byte slice, returning the extended +// slice and any errors encountered. +type Marshaler interface { + MarshalMsg([]byte) ([]byte, error) +} + +// Encodable is the interface implemented +// by types that know how to write themselves +// as MessagePack using a *msgp.Writer. +type Encodable interface { + EncodeMsg(*Writer) error +} + +// Writer is a buffered writer +// that can be used to write +// MessagePack objects to an io.Writer. +// You must call *Writer.Flush() in order +// to flush all of the buffered data +// to the underlying writer. +type Writer struct { + w io.Writer + buf []byte + wloc int +} + +// NewWriter returns a new *Writer. +func NewWriter(w io.Writer) *Writer { + if wr, ok := w.(*Writer); ok { + return wr + } + return popWriter(w) +} + +// NewWriterSize returns a writer with a custom buffer size. +func NewWriterSize(w io.Writer, sz int) *Writer { + // we must be able to require() 18 + // contiguous bytes, so that is the + // practical minimum buffer size + if sz < 18 { + sz = 18 + } + + return &Writer{ + w: w, + buf: make([]byte, sz), + } +} + +// Encode encodes an Encodable to an io.Writer. +func Encode(w io.Writer, e Encodable) error { + wr := NewWriter(w) + err := e.EncodeMsg(wr) + if err == nil { + err = wr.Flush() + } + freeW(wr) + return err +} + +func (mw *Writer) flush() error { + if mw.wloc == 0 { + return nil + } + n, err := mw.w.Write(mw.buf[:mw.wloc]) + if err != nil { + if n > 0 { + mw.wloc = copy(mw.buf, mw.buf[n:mw.wloc]) + } + return err + } + mw.wloc = 0 + return nil +} + +// Flush flushes all of the buffered +// data to the underlying writer. +func (mw *Writer) Flush() error { return mw.flush() } + +// Buffered returns the number bytes in the write buffer +func (mw *Writer) Buffered() int { return len(mw.buf) - mw.wloc } + +func (mw *Writer) avail() int { return len(mw.buf) - mw.wloc } + +func (mw *Writer) bufsize() int { return len(mw.buf) } + +// NOTE: this should only be called with +// a number that is guaranteed to be less than +// len(mw.buf). typically, it is called with a constant. +// +// NOTE: this is a hot code path +func (mw *Writer) require(n int) (int, error) { + c := len(mw.buf) + wl := mw.wloc + if c-wl < n { + if err := mw.flush(); err != nil { + return 0, err + } + wl = mw.wloc + } + mw.wloc += n + return wl, nil +} + +func (mw *Writer) Append(b ...byte) error { + if mw.avail() < len(b) { + err := mw.flush() + if err != nil { + return err + } + } + mw.wloc += copy(mw.buf[mw.wloc:], b) + return nil +} + +// push one byte onto the buffer +// +// NOTE: this is a hot code path +func (mw *Writer) push(b byte) error { + if mw.wloc == len(mw.buf) { + if err := mw.flush(); err != nil { + return err + } + } + mw.buf[mw.wloc] = b + mw.wloc++ + return nil +} + +func (mw *Writer) prefix8(b byte, u uint8) error { + const need = 2 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu8(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix16(b byte, u uint16) error { + const need = 3 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu16(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix32(b byte, u uint32) error { + const need = 5 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu32(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix64(b byte, u uint64) error { + const need = 9 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu64(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +// Write implements io.Writer, and writes +// data directly to the buffer. +func (mw *Writer) Write(p []byte) (int, error) { + l := len(p) + if mw.avail() < l { + if err := mw.flush(); err != nil { + return 0, err + } + if l > len(mw.buf) { + return mw.w.Write(p) + } + } + mw.wloc += copy(mw.buf[mw.wloc:], p) + return l, nil +} + +// implements io.WriteString +func (mw *Writer) writeString(s string) error { + l := len(s) + if mw.avail() < l { + if err := mw.flush(); err != nil { + return err + } + if l > len(mw.buf) { + _, err := io.WriteString(mw.w, s) + return err + } + } + mw.wloc += copy(mw.buf[mw.wloc:], s) + return nil +} + +// Reset changes the underlying writer used by the Writer +func (mw *Writer) Reset(w io.Writer) { + mw.buf = mw.buf[:cap(mw.buf)] + mw.w = w + mw.wloc = 0 +} + +// WriteMapHeader writes a map header of the given +// size to the writer +func (mw *Writer) WriteMapHeader(sz uint32) error { + switch { + case sz <= 15: + return mw.push(wfixmap(uint8(sz))) + case sz <= math.MaxUint16: + return mw.prefix16(mmap16, uint16(sz)) + default: + return mw.prefix32(mmap32, sz) + } +} + +// WriteArrayHeader writes an array header of the +// given size to the writer +func (mw *Writer) WriteArrayHeader(sz uint32) error { + switch { + case sz <= 15: + return mw.push(wfixarray(uint8(sz))) + case sz <= math.MaxUint16: + return mw.prefix16(marray16, uint16(sz)) + default: + return mw.prefix32(marray32, sz) + } +} + +// WriteNil writes a nil byte to the buffer +func (mw *Writer) WriteNil() error { + return mw.push(mnil) +} + +// WriteFloat64 writes a float64 to the writer +func (mw *Writer) WriteFloat64(f float64) error { + return mw.prefix64(mfloat64, math.Float64bits(f)) +} + +// WriteFloat32 writes a float32 to the writer +func (mw *Writer) WriteFloat32(f float32) error { + return mw.prefix32(mfloat32, math.Float32bits(f)) +} + +// WriteInt64 writes an int64 to the writer +func (mw *Writer) WriteInt64(i int64) error { + if i >= 0 { + switch { + case i <= math.MaxInt8: + return mw.push(wfixint(uint8(i))) + case i <= math.MaxInt16: + return mw.prefix16(mint16, uint16(i)) + case i <= math.MaxInt32: + return mw.prefix32(mint32, uint32(i)) + default: + return mw.prefix64(mint64, uint64(i)) + } + } + switch { + case i >= -32: + return mw.push(wnfixint(int8(i))) + case i >= math.MinInt8: + return mw.prefix8(mint8, uint8(i)) + case i >= math.MinInt16: + return mw.prefix16(mint16, uint16(i)) + case i >= math.MinInt32: + return mw.prefix32(mint32, uint32(i)) + default: + return mw.prefix64(mint64, uint64(i)) + } +} + +// WriteInt8 writes an int8 to the writer +func (mw *Writer) WriteInt8(i int8) error { return mw.WriteInt64(int64(i)) } + +// WriteInt16 writes an int16 to the writer +func (mw *Writer) WriteInt16(i int16) error { return mw.WriteInt64(int64(i)) } + +// WriteInt32 writes an int32 to the writer +func (mw *Writer) WriteInt32(i int32) error { return mw.WriteInt64(int64(i)) } + +// WriteInt writes an int to the writer +func (mw *Writer) WriteInt(i int) error { return mw.WriteInt64(int64(i)) } + +// WriteUint64 writes a uint64 to the writer +func (mw *Writer) WriteUint64(u uint64) error { + switch { + case u <= (1<<7)-1: + return mw.push(wfixint(uint8(u))) + case u <= math.MaxUint8: + return mw.prefix8(muint8, uint8(u)) + case u <= math.MaxUint16: + return mw.prefix16(muint16, uint16(u)) + case u <= math.MaxUint32: + return mw.prefix32(muint32, uint32(u)) + default: + return mw.prefix64(muint64, u) + } +} + +// WriteByte is analogous to WriteUint8 +func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) } + +// WriteUint8 writes a uint8 to the writer +func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint16 writes a uint16 to the writer +func (mw *Writer) WriteUint16(u uint16) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint32 writes a uint32 to the writer +func (mw *Writer) WriteUint32(u uint32) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint writes a uint to the writer +func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) } + +// WriteBytes writes binary as 'bin' to the writer +func (mw *Writer) WriteBytes(b []byte) error { + sz := uint32(len(b)) + var err error + switch { + case sz <= math.MaxUint8: + err = mw.prefix8(mbin8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mbin16, uint16(sz)) + default: + err = mw.prefix32(mbin32, sz) + } + if err != nil { + return err + } + _, err = mw.Write(b) + return err +} + +// WriteBytesHeader writes just the size header +// of a MessagePack 'bin' object. The user is responsible +// for then writing 'sz' more bytes into the stream. +func (mw *Writer) WriteBytesHeader(sz uint32) error { + switch { + case sz <= math.MaxUint8: + return mw.prefix8(mbin8, uint8(sz)) + case sz <= math.MaxUint16: + return mw.prefix16(mbin16, uint16(sz)) + default: + return mw.prefix32(mbin32, sz) + } +} + +// WriteBool writes a bool to the writer +func (mw *Writer) WriteBool(b bool) error { + if b { + return mw.push(mtrue) + } + return mw.push(mfalse) +} + +// WriteString writes a messagepack string to the writer. +// (This is NOT an implementation of io.StringWriter) +func (mw *Writer) WriteString(s string) error { + sz := uint32(len(s)) + var err error + switch { + case sz <= 31: + err = mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + err = mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mstr16, uint16(sz)) + default: + err = mw.prefix32(mstr32, sz) + } + if err != nil { + return err + } + return mw.writeString(s) +} + +// WriteStringHeader writes just the string size +// header of a MessagePack 'str' object. The user +// is responsible for writing 'sz' more valid UTF-8 +// bytes to the stream. +func (mw *Writer) WriteStringHeader(sz uint32) error { + switch { + case sz <= 31: + return mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + return mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + return mw.prefix16(mstr16, uint16(sz)) + default: + return mw.prefix32(mstr32, sz) + } +} + +// WriteStringFromBytes writes a 'str' object +// from a []byte. +func (mw *Writer) WriteStringFromBytes(str []byte) error { + sz := uint32(len(str)) + var err error + switch { + case sz <= 31: + err = mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + err = mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mstr16, uint16(sz)) + default: + err = mw.prefix32(mstr32, sz) + } + if err != nil { + return err + } + _, err = mw.Write(str) + return err +} + +// WriteComplex64 writes a complex64 to the writer +func (mw *Writer) WriteComplex64(f complex64) error { + o, err := mw.require(10) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = Complex64Extension + big.PutUint32(mw.buf[o+2:], math.Float32bits(real(f))) + big.PutUint32(mw.buf[o+6:], math.Float32bits(imag(f))) + return nil +} + +// WriteComplex128 writes a complex128 to the writer +func (mw *Writer) WriteComplex128(f complex128) error { + o, err := mw.require(18) + if err != nil { + return err + } + mw.buf[o] = mfixext16 + mw.buf[o+1] = Complex128Extension + big.PutUint64(mw.buf[o+2:], math.Float64bits(real(f))) + big.PutUint64(mw.buf[o+10:], math.Float64bits(imag(f))) + return nil +} + +// WriteMapStrStr writes a map[string]string to the writer +func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) { + err = mw.WriteMapHeader(uint32(len(mp))) + if err != nil { + return + } + for key, val := range mp { + err = mw.WriteString(key) + if err != nil { + return + } + err = mw.WriteString(val) + if err != nil { + return + } + } + return nil +} + +// WriteMapStrIntf writes a map[string]interface to the writer +func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) { + err = mw.WriteMapHeader(uint32(len(mp))) + if err != nil { + return + } + for key, val := range mp { + err = mw.WriteString(key) + if err != nil { + return + } + err = mw.WriteIntf(val) + if err != nil { + return + } + } + return +} + +// WriteTime writes a time.Time object to the wire. +// +// Time is encoded as Unix time, which means that +// location (time zone) data is removed from the object. +// The encoded object itself is 12 bytes: 8 bytes for +// a big-endian 64-bit integer denoting seconds +// elapsed since "zero" Unix time, followed by 4 bytes +// for a big-endian 32-bit signed integer denoting +// the nanosecond offset of the time. This encoding +// is intended to ease portability across languages. +// (Note that this is *not* the standard time.Time +// binary encoding, because its implementation relies +// heavily on the internal representation used by the +// time package.) +func (mw *Writer) WriteTime(t time.Time) error { + t = t.UTC() + o, err := mw.require(15) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 12 + mw.buf[o+2] = TimeExtension + putUnix(mw.buf[o+3:], t.Unix(), int32(t.Nanosecond())) + return nil +} + +// WriteIntf writes the concrete type of 'v'. +// WriteIntf will error if 'v' is not one of the following: +// - A bool, float, string, []byte, int, uint, or complex +// - A map of supported types (with string keys) +// - An array or slice of supported types +// - A pointer to a supported type +// - A type that satisfies the msgp.Encodable interface +// - A type that satisfies the msgp.Extension interface +func (mw *Writer) WriteIntf(v interface{}) error { + if v == nil { + return mw.WriteNil() + } + switch v := v.(type) { + + // preferred interfaces + + case Encodable: + return v.EncodeMsg(mw) + case Extension: + return mw.WriteExtension(v) + + // concrete types + + case bool: + return mw.WriteBool(v) + case float32: + return mw.WriteFloat32(v) + case float64: + return mw.WriteFloat64(v) + case complex64: + return mw.WriteComplex64(v) + case complex128: + return mw.WriteComplex128(v) + case uint8: + return mw.WriteUint8(v) + case uint16: + return mw.WriteUint16(v) + case uint32: + return mw.WriteUint32(v) + case uint64: + return mw.WriteUint64(v) + case uint: + return mw.WriteUint(v) + case int8: + return mw.WriteInt8(v) + case int16: + return mw.WriteInt16(v) + case int32: + return mw.WriteInt32(v) + case int64: + return mw.WriteInt64(v) + case int: + return mw.WriteInt(v) + case string: + return mw.WriteString(v) + case []byte: + return mw.WriteBytes(v) + case map[string]string: + return mw.WriteMapStrStr(v) + case map[string]interface{}: + return mw.WriteMapStrIntf(v) + case time.Time: + return mw.WriteTime(v) + } + + val := reflect.ValueOf(v) + if !isSupported(val.Kind()) || !val.IsValid() { + return fmt.Errorf("msgp: type %s not supported", val) + } + + switch val.Kind() { + case reflect.Ptr: + if val.IsNil() { + return mw.WriteNil() + } + return mw.WriteIntf(val.Elem().Interface()) + case reflect.Slice: + return mw.writeSlice(val) + case reflect.Map: + return mw.writeMap(val) + } + return &ErrUnsupportedType{T: val.Type()} +} + +func (mw *Writer) writeMap(v reflect.Value) (err error) { + if v.Type().Key().Kind() != reflect.String { + return errors.New("msgp: map keys must be strings") + } + ks := v.MapKeys() + err = mw.WriteMapHeader(uint32(len(ks))) + if err != nil { + return + } + for _, key := range ks { + val := v.MapIndex(key) + err = mw.WriteString(key.String()) + if err != nil { + return + } + err = mw.WriteIntf(val.Interface()) + if err != nil { + return + } + } + return +} + +func (mw *Writer) writeSlice(v reflect.Value) (err error) { + // is []byte + if v.Type().ConvertibleTo(btsType) { + return mw.WriteBytes(v.Bytes()) + } + + sz := uint32(v.Len()) + err = mw.WriteArrayHeader(sz) + if err != nil { + return + } + for i := uint32(0); i < sz; i++ { + err = mw.WriteIntf(v.Index(int(i)).Interface()) + if err != nil { + return + } + } + return +} + +func (mw *Writer) writeStruct(v reflect.Value) error { + if enc, ok := v.Interface().(Encodable); ok { + return enc.EncodeMsg(mw) + } + return fmt.Errorf("msgp: unsupported type: %s", v.Type()) +} + +func (mw *Writer) writeVal(v reflect.Value) error { + if !isSupported(v.Kind()) { + return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) + } + + // shortcut for nil values + if v.IsNil() { + return mw.WriteNil() + } + switch v.Kind() { + case reflect.Bool: + return mw.WriteBool(v.Bool()) + + case reflect.Float32, reflect.Float64: + return mw.WriteFloat64(v.Float()) + + case reflect.Complex64, reflect.Complex128: + return mw.WriteComplex128(v.Complex()) + + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8: + return mw.WriteInt64(v.Int()) + + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + mw.WriteNil() + } + return mw.writeVal(v.Elem()) + + case reflect.Map: + return mw.writeMap(v) + + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8: + return mw.WriteUint64(v.Uint()) + + case reflect.String: + return mw.WriteString(v.String()) + + case reflect.Slice, reflect.Array: + return mw.writeSlice(v) + + case reflect.Struct: + return mw.writeStruct(v) + + } + return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) +} + +// is the reflect.Kind encodable? +func isSupported(k reflect.Kind) bool { + switch k { + case reflect.Func, reflect.Chan, reflect.Invalid, reflect.UnsafePointer: + return false + default: + return true + } +} + +// GuessSize guesses the size of the underlying +// value of 'i'. If the underlying value is not +// a simple builtin (or []byte), GuessSize defaults +// to 512. +func GuessSize(i interface{}) int { + if i == nil { + return NilSize + } + + switch i := i.(type) { + case Sizer: + return i.Msgsize() + case Extension: + return ExtensionPrefixSize + i.Len() + case float64: + return Float64Size + case float32: + return Float32Size + case uint8, uint16, uint32, uint64, uint: + return UintSize + case int8, int16, int32, int64, int: + return IntSize + case []byte: + return BytesPrefixSize + len(i) + case string: + return StringPrefixSize + len(i) + case complex64: + return Complex64Size + case complex128: + return Complex128Size + case bool: + return BoolSize + case map[string]interface{}: + s := MapHeaderSize + for key, val := range i { + s += StringPrefixSize + len(key) + GuessSize(val) + } + return s + case map[string]string: + s := MapHeaderSize + for key, val := range i { + s += 2*StringPrefixSize + len(key) + len(val) + } + return s + default: + return 512 + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go new file mode 100644 index 00000000..eaa03c46 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go @@ -0,0 +1,411 @@ +package msgp + +import ( + "math" + "reflect" + "time" +) + +// ensure 'sz' extra bytes in 'b' btw len(b) and cap(b) +func ensure(b []byte, sz int) ([]byte, int) { + l := len(b) + c := cap(b) + if c-l < sz { + o := make([]byte, (2*c)+sz) // exponential growth + n := copy(o, b) + return o[:n+sz], n + } + return b[:l+sz], l +} + +// AppendMapHeader appends a map header with the +// given size to the slice +func AppendMapHeader(b []byte, sz uint32) []byte { + switch { + case sz <= 15: + return append(b, wfixmap(uint8(sz))) + + case sz <= math.MaxUint16: + o, n := ensure(b, 3) + prefixu16(o[n:], mmap16, uint16(sz)) + return o + + default: + o, n := ensure(b, 5) + prefixu32(o[n:], mmap32, sz) + return o + } +} + +// AppendArrayHeader appends an array header with +// the given size to the slice +func AppendArrayHeader(b []byte, sz uint32) []byte { + switch { + case sz <= 15: + return append(b, wfixarray(uint8(sz))) + + case sz <= math.MaxUint16: + o, n := ensure(b, 3) + prefixu16(o[n:], marray16, uint16(sz)) + return o + + default: + o, n := ensure(b, 5) + prefixu32(o[n:], marray32, sz) + return o + } +} + +// AppendNil appends a 'nil' byte to the slice +func AppendNil(b []byte) []byte { return append(b, mnil) } + +// AppendFloat64 appends a float64 to the slice +func AppendFloat64(b []byte, f float64) []byte { + o, n := ensure(b, Float64Size) + prefixu64(o[n:], mfloat64, math.Float64bits(f)) + return o +} + +// AppendFloat32 appends a float32 to the slice +func AppendFloat32(b []byte, f float32) []byte { + o, n := ensure(b, Float32Size) + prefixu32(o[n:], mfloat32, math.Float32bits(f)) + return o +} + +// AppendInt64 appends an int64 to the slice +func AppendInt64(b []byte, i int64) []byte { + if i >= 0 { + switch { + case i <= math.MaxInt8: + return append(b, wfixint(uint8(i))) + case i <= math.MaxInt16: + o, n := ensure(b, 3) + putMint16(o[n:], int16(i)) + return o + case i <= math.MaxInt32: + o, n := ensure(b, 5) + putMint32(o[n:], int32(i)) + return o + default: + o, n := ensure(b, 9) + putMint64(o[n:], i) + return o + } + } + switch { + case i >= -32: + return append(b, wnfixint(int8(i))) + case i >= math.MinInt8: + o, n := ensure(b, 2) + putMint8(o[n:], int8(i)) + return o + case i >= math.MinInt16: + o, n := ensure(b, 3) + putMint16(o[n:], int16(i)) + return o + case i >= math.MinInt32: + o, n := ensure(b, 5) + putMint32(o[n:], int32(i)) + return o + default: + o, n := ensure(b, 9) + putMint64(o[n:], i) + return o + } +} + +// AppendInt appends an int to the slice +func AppendInt(b []byte, i int) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt8 appends an int8 to the slice +func AppendInt8(b []byte, i int8) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt16 appends an int16 to the slice +func AppendInt16(b []byte, i int16) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt32 appends an int32 to the slice +func AppendInt32(b []byte, i int32) []byte { return AppendInt64(b, int64(i)) } + +// AppendUint64 appends a uint64 to the slice +func AppendUint64(b []byte, u uint64) []byte { + switch { + case u <= (1<<7)-1: + return append(b, wfixint(uint8(u))) + + case u <= math.MaxUint8: + o, n := ensure(b, 2) + putMuint8(o[n:], uint8(u)) + return o + + case u <= math.MaxUint16: + o, n := ensure(b, 3) + putMuint16(o[n:], uint16(u)) + return o + + case u <= math.MaxUint32: + o, n := ensure(b, 5) + putMuint32(o[n:], uint32(u)) + return o + + default: + o, n := ensure(b, 9) + putMuint64(o[n:], u) + return o + + } +} + +// AppendUint appends a uint to the slice +func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) } + +// AppendUint8 appends a uint8 to the slice +func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) } + +// AppendByte is analogous to AppendUint8 +func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) } + +// AppendUint16 appends a uint16 to the slice +func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) } + +// AppendUint32 appends a uint32 to the slice +func AppendUint32(b []byte, u uint32) []byte { return AppendUint64(b, uint64(u)) } + +// AppendBytes appends bytes to the slice as MessagePack 'bin' data +func AppendBytes(b []byte, bts []byte) []byte { + sz := len(bts) + var o []byte + var n int + switch { + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mbin8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mbin16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mbin32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], bts)] +} + +// AppendBool appends a bool to the slice +func AppendBool(b []byte, t bool) []byte { + if t { + return append(b, mtrue) + } + return append(b, mfalse) +} + +// AppendString appends a string as a MessagePack 'str' to the slice +func AppendString(b []byte, s string) []byte { + sz := len(s) + var n int + var o []byte + switch { + case sz <= 31: + o, n = ensure(b, 1+sz) + o[n] = wfixstr(uint8(sz)) + n++ + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mstr8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mstr16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mstr32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], s)] +} + +// AppendStringFromBytes appends a []byte +// as a MessagePack 'str' to the slice 'b.' +func AppendStringFromBytes(b []byte, str []byte) []byte { + sz := len(str) + var n int + var o []byte + switch { + case sz <= 31: + o, n = ensure(b, 1+sz) + o[n] = wfixstr(uint8(sz)) + n++ + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mstr8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mstr16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mstr32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], str)] +} + +// AppendComplex64 appends a complex64 to the slice as a MessagePack extension +func AppendComplex64(b []byte, c complex64) []byte { + o, n := ensure(b, Complex64Size) + o[n] = mfixext8 + o[n+1] = Complex64Extension + big.PutUint32(o[n+2:], math.Float32bits(real(c))) + big.PutUint32(o[n+6:], math.Float32bits(imag(c))) + return o +} + +// AppendComplex128 appends a complex128 to the slice as a MessagePack extension +func AppendComplex128(b []byte, c complex128) []byte { + o, n := ensure(b, Complex128Size) + o[n] = mfixext16 + o[n+1] = Complex128Extension + big.PutUint64(o[n+2:], math.Float64bits(real(c))) + big.PutUint64(o[n+10:], math.Float64bits(imag(c))) + return o +} + +// AppendTime appends a time.Time to the slice as a MessagePack extension +func AppendTime(b []byte, t time.Time) []byte { + o, n := ensure(b, TimeSize) + t = t.UTC() + o[n] = mext8 + o[n+1] = 12 + o[n+2] = TimeExtension + putUnix(o[n+3:], t.Unix(), int32(t.Nanosecond())) + return o +} + +// AppendMapStrStr appends a map[string]string to the slice +// as a MessagePack map with 'str'-type keys and values +func AppendMapStrStr(b []byte, m map[string]string) []byte { + sz := uint32(len(m)) + b = AppendMapHeader(b, sz) + for key, val := range m { + b = AppendString(b, key) + b = AppendString(b, val) + } + return b +} + +// AppendMapStrIntf appends a map[string]interface{} to the slice +// as a MessagePack map with 'str'-type keys. +func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) { + sz := uint32(len(m)) + b = AppendMapHeader(b, sz) + var err error + for key, val := range m { + b = AppendString(b, key) + b, err = AppendIntf(b, val) + if err != nil { + return b, err + } + } + return b, nil +} + +// AppendIntf appends the concrete type of 'i' to the +// provided []byte. 'i' must be one of the following: +// - 'nil' +// - A bool, float, string, []byte, int, uint, or complex +// - A map[string]interface{} or map[string]string +// - A []T, where T is another supported type +// - A *T, where T is another supported type +// - A type that satisfieds the msgp.Marshaler interface +// - A type that satisfies the msgp.Extension interface +func AppendIntf(b []byte, i interface{}) ([]byte, error) { + if i == nil { + return AppendNil(b), nil + } + + // all the concrete types + // for which we have methods + switch i := i.(type) { + case Marshaler: + return i.MarshalMsg(b) + case Extension: + return AppendExtension(b, i) + case bool: + return AppendBool(b, i), nil + case float32: + return AppendFloat32(b, i), nil + case float64: + return AppendFloat64(b, i), nil + case complex64: + return AppendComplex64(b, i), nil + case complex128: + return AppendComplex128(b, i), nil + case string: + return AppendString(b, i), nil + case []byte: + return AppendBytes(b, i), nil + case int8: + return AppendInt8(b, i), nil + case int16: + return AppendInt16(b, i), nil + case int32: + return AppendInt32(b, i), nil + case int64: + return AppendInt64(b, i), nil + case int: + return AppendInt64(b, int64(i)), nil + case uint: + return AppendUint64(b, uint64(i)), nil + case uint8: + return AppendUint8(b, i), nil + case uint16: + return AppendUint16(b, i), nil + case uint32: + return AppendUint32(b, i), nil + case uint64: + return AppendUint64(b, i), nil + case time.Time: + return AppendTime(b, i), nil + case map[string]interface{}: + return AppendMapStrIntf(b, i) + case map[string]string: + return AppendMapStrStr(b, i), nil + case []interface{}: + b = AppendArrayHeader(b, uint32(len(i))) + var err error + for _, k := range i { + b, err = AppendIntf(b, k) + if err != nil { + return b, err + } + } + return b, nil + } + + var err error + v := reflect.ValueOf(i) + switch v.Kind() { + case reflect.Array, reflect.Slice: + l := v.Len() + b = AppendArrayHeader(b, uint32(l)) + for i := 0; i < l; i++ { + b, err = AppendIntf(b, v.Index(i).Interface()) + if err != nil { + return b, err + } + } + return b, nil + case reflect.Ptr: + if v.IsNil() { + return AppendNil(b), err + } + b, err = AppendIntf(b, v.Elem().Interface()) + return b, err + default: + return b, &ErrUnsupportedType{T: v.Type()} + } +} diff --git a/vendor/github.com/wiggin77/cfg/.gitignore b/vendor/github.com/wiggin77/cfg/.gitignore new file mode 100644 index 00000000..f1c181ec --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/wiggin77/cfg/.travis.yml b/vendor/github.com/wiggin77/cfg/.travis.yml new file mode 100644 index 00000000..9899b387 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/.travis.yml @@ -0,0 +1,5 @@ +language: go +sudo: false +before_script: + - go vet ./... + \ No newline at end of file diff --git a/vendor/github.com/wiggin77/cfg/LICENSE b/vendor/github.com/wiggin77/cfg/LICENSE new file mode 100644 index 00000000..2b0bf7ef --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 wiggin77 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/wiggin77/cfg/README.md b/vendor/github.com/wiggin77/cfg/README.md new file mode 100644 index 00000000..583a82cb --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/README.md @@ -0,0 +1,43 @@ +# cfg + +[![GoDoc](https://godoc.org/github.com/wiggin77/cfg?status.svg)](https://godoc.org/github.com/wiggin77/cfg) +[![Build Status](https://travis-ci.org/wiggin77/cfg.svg?branch=master)](https://travis-ci.org/wiggin77/cfg) + +Go package for app configuration. Supports chained configuration sources for multiple levels of defaults. +Includes APIs for loading Linux style configuration files (name/value pairs) or INI files, map based properties, +or easily create new configuration sources (e.g. load from database). + +Supports monitoring configuration sources for changes, hot loading properties, and notifying listeners of changes. + +## Usage + +```Go +config := &cfg.Config{} +defer config.Shutdown() // stops monitoring + +// load file via filespec string, os.File +src, err := Config.NewSrcFileFromFilespec("./myfile.conf") +if err != nil { + return err +} +// add src to top of chain, meaning first searched +cfg.PrependSource(src) + +// fetch prop 'retries', default to 3 if not found +val := config.Int("retries", 3) +``` + +See [example](./example_test.go) for more complete example, including listening for configuration changes. + +Config API parses the following data types: + +| type | method | example property values | +| ------- | ------ | -------- | +| string | Config.String | test, "" | +| int | Config.Int | -1, 77, 0 | +| int64 | Config.Int64 | -9223372036854775, 372036854775808 | +| float64 | Config.Float64 | -77.3456, 95642331.1 | +| bool | Config.Bool | T,t,true,True,1,0,False,false,f,F | +| time.Duration | Config.Duration | "10ms", "2 hours", "5 min" * | + +\* Units of measure supported: ms, sec, min, hour, day, week, year. diff --git a/vendor/github.com/wiggin77/cfg/config.go b/vendor/github.com/wiggin77/cfg/config.go new file mode 100644 index 00000000..0e958102 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/config.go @@ -0,0 +1,366 @@ +package cfg + +import ( + "errors" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/wiggin77/cfg/timeconv" +) + +// ErrNotFound returned when an operation is attempted on a +// resource that doesn't exist, such as fetching a non-existing +// property name. +var ErrNotFound = errors.New("not found") + +type sourceEntry struct { + src Source + props map[string]string +} + +// Config provides methods for retrieving property values from one or more +// configuration sources. +type Config struct { + mutexSrc sync.RWMutex + mutexListeners sync.RWMutex + srcs []*sourceEntry + chgListeners []ChangedListener + shutdown chan interface{} + wantPanicOnError bool +} + +// PrependSource inserts one or more `Sources` at the beginning of +// the list of sources such that the first source will be the +// source checked first when resolving a property value. +func (config *Config) PrependSource(srcs ...Source) { + arr := config.wrapSources(srcs...) + + config.mutexSrc.Lock() + if config.shutdown == nil { + config.shutdown = make(chan interface{}) + } + config.srcs = append(arr, config.srcs...) + config.mutexSrc.Unlock() + + for _, se := range arr { + if _, ok := se.src.(SourceMonitored); ok { + config.monitor(se) + } + } +} + +// AppendSource appends one or more `Sources` at the end of +// the list of sources such that the last source will be the +// source checked last when resolving a property value. +func (config *Config) AppendSource(srcs ...Source) { + arr := config.wrapSources(srcs...) + + config.mutexSrc.Lock() + if config.shutdown == nil { + config.shutdown = make(chan interface{}) + } + config.srcs = append(config.srcs, arr...) + config.mutexSrc.Unlock() + + for _, se := range arr { + if _, ok := se.src.(SourceMonitored); ok { + config.monitor(se) + } + } +} + +// wrapSources wraps one or more Source's and returns +// them as an array of `sourceEntry`. +func (config *Config) wrapSources(srcs ...Source) []*sourceEntry { + arr := make([]*sourceEntry, 0, len(srcs)) + for _, src := range srcs { + se := &sourceEntry{src: src} + config.reloadProps(se) + arr = append(arr, se) + } + return arr +} + +// SetWantPanicOnError sets the flag determining if Config +// should panic when `GetProps` or `GetLastModified` errors +// for a `Source`. +func (config *Config) SetWantPanicOnError(b bool) { + config.mutexSrc.Lock() + config.wantPanicOnError = b + config.mutexSrc.Unlock() +} + +// ShouldPanicOnError gets the flag determining if Config +// should panic when `GetProps` or `GetLastModified` errors +// for a `Source`. +func (config *Config) ShouldPanicOnError() (b bool) { + config.mutexSrc.RLock() + b = config.wantPanicOnError + config.mutexSrc.RUnlock() + return b +} + +// getProp returns the value of a named property. +// Each `Source` is checked, in the order created by adding via +// `AppendSource` and `PrependSource`, until a value for the +// property is found. +func (config *Config) getProp(name string) (val string, ok bool) { + config.mutexSrc.RLock() + defer config.mutexSrc.RUnlock() + + var s string + for _, se := range config.srcs { + if se.props != nil { + if s, ok = se.props[name]; ok { + val = strings.TrimSpace(s) + return + } + } + } + return +} + +// String returns the value of the named prop as a string. +// If the property is not found then the supplied default `def` +// and `ErrNotFound` are returned. +func (config *Config) String(name string, def string) (val string, err error) { + if v, ok := config.getProp(name); ok { + val = v + err = nil + return + } + + err = ErrNotFound + val = def + return +} + +// Int returns the value of the named prop as an `int`. +// If the property is not found then the supplied default `def` +// and `ErrNotFound` are returned. +// +// See config.String +func (config *Config) Int(name string, def int) (val int, err error) { + var s string + if s, err = config.String(name, ""); err == nil { + var i int64 + if i, err = strconv.ParseInt(s, 10, 32); err == nil { + val = int(i) + } + } + if err != nil { + val = def + } + return +} + +// Int64 returns the value of the named prop as an `int64`. +// If the property is not found then the supplied default `def` +// and `ErrNotFound` are returned. +// +// See config.String +func (config *Config) Int64(name string, def int64) (val int64, err error) { + var s string + if s, err = config.String(name, ""); err == nil { + val, err = strconv.ParseInt(s, 10, 64) + } + if err != nil { + val = def + } + return +} + +// Float64 returns the value of the named prop as a `float64`. +// If the property is not found then the supplied default `def` +// and `ErrNotFound` are returned. +// +// See config.String +func (config *Config) Float64(name string, def float64) (val float64, err error) { + var s string + if s, err = config.String(name, ""); err == nil { + val, err = strconv.ParseFloat(s, 64) + } + if err != nil { + val = def + } + return +} + +// Bool returns the value of the named prop as a `bool`. +// If the property is not found then the supplied default `def` +// and `ErrNotFound` are returned. +// +// Supports (t, true, 1, y, yes) for true, and (f, false, 0, n, no) for false, +// all case-insensitive. +// +// See config.String +func (config *Config) Bool(name string, def bool) (val bool, err error) { + var s string + if s, err = config.String(name, ""); err == nil { + switch strings.ToLower(s) { + case "t", "true", "1", "y", "yes": + val = true + case "f", "false", "0", "n", "no": + val = false + default: + err = errors.New("invalid syntax") + } + } + if err != nil { + val = def + } + return +} + +// Duration returns the value of the named prop as a `time.Duration`, representing +// a span of time. +// +// Units of measure are supported: ms, sec, min, hour, day, week, year. +// See config.UnitsToMillis for a complete list of units supported. +// +// If the property is not found then the supplied default `def` +// and `ErrNotFound` are returned. +// +// See config.String +func (config *Config) Duration(name string, def time.Duration) (val time.Duration, err error) { + var s string + if s, err = config.String(name, ""); err == nil { + var ms int64 + ms, err = timeconv.ParseMilliseconds(s) + val = time.Duration(ms) * time.Millisecond + } + if err != nil { + val = def + } + return +} + +// AddChangedListener adds a listener that will receive notifications +// whenever one or more property values change within the config. +func (config *Config) AddChangedListener(l ChangedListener) { + config.mutexListeners.Lock() + defer config.mutexListeners.Unlock() + + config.chgListeners = append(config.chgListeners, l) +} + +// RemoveChangedListener removes all instances of a ChangedListener. +// Returns `ErrNotFound` if the listener was not present. +func (config *Config) RemoveChangedListener(l ChangedListener) error { + config.mutexListeners.Lock() + defer config.mutexListeners.Unlock() + + dest := make([]ChangedListener, 0, len(config.chgListeners)) + err := ErrNotFound + + // Remove all instances of the listener by + // copying list while filtering. + for _, s := range config.chgListeners { + if s != l { + dest = append(dest, s) + } else { + err = nil + } + } + config.chgListeners = dest + return err +} + +// Shutdown can be called to stop monitoring of all config sources. +func (config *Config) Shutdown() { + config.mutexSrc.RLock() + defer config.mutexSrc.RUnlock() + if config.shutdown != nil { + close(config.shutdown) + } +} + +// onSourceChanged is called whenever one or more properties of a +// config source has changed. +func (config *Config) onSourceChanged(src SourceMonitored) { + defer func() { + if p := recover(); p != nil { + fmt.Println(p) + } + }() + config.mutexListeners.RLock() + defer config.mutexListeners.RUnlock() + for _, l := range config.chgListeners { + l.ConfigChanged(config, src) + } +} + +// monitor periodically checks a config source for changes. +func (config *Config) monitor(se *sourceEntry) { + go func(se *sourceEntry, shutdown <-chan interface{}) { + var src SourceMonitored + var ok bool + if src, ok = se.src.(SourceMonitored); !ok { + return + } + paused := false + last := time.Time{} + freq := src.GetMonitorFreq() + if freq <= 0 { + paused = true + freq = 10 + last, _ = src.GetLastModified() + } + timer := time.NewTimer(freq) + for { + select { + case <-timer.C: + if !paused { + if latest, err := src.GetLastModified(); err != nil { + if config.ShouldPanicOnError() { + panic(fmt.Sprintf("error <%v> getting last modified for %v", err, src)) + } + } else { + if last.Before(latest) { + last = latest + config.reloadProps(se) + // TODO: calc diff and provide detailed changes + config.onSourceChanged(src) + } + } + } + freq = src.GetMonitorFreq() + if freq <= 0 { + paused = true + freq = 10 + } else { + paused = false + } + timer.Reset(freq) + case <-shutdown: + // stop the timer and exit + if !timer.Stop() { + <-timer.C + } + return + } + } + }(se, config.shutdown) +} + +// reloadProps causes a Source to reload its properties. +func (config *Config) reloadProps(se *sourceEntry) { + config.mutexSrc.Lock() + defer config.mutexSrc.Unlock() + + m, err := se.src.GetProps() + if err != nil { + if config.wantPanicOnError { + panic(fmt.Sprintf("GetProps error for %v", se.src)) + } + return + } + + se.props = make(map[string]string) + for k, v := range m { + se.props[k] = v + } +} diff --git a/vendor/github.com/wiggin77/cfg/go.mod b/vendor/github.com/wiggin77/cfg/go.mod new file mode 100644 index 00000000..2e5a038e --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/go.mod @@ -0,0 +1,5 @@ +module github.com/wiggin77/cfg + +go 1.12 + +require github.com/wiggin77/merror v1.0.2 diff --git a/vendor/github.com/wiggin77/cfg/go.sum b/vendor/github.com/wiggin77/cfg/go.sum new file mode 100644 index 00000000..30fd3b58 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/go.sum @@ -0,0 +1,2 @@ +github.com/wiggin77/merror v1.0.2 h1:V0nH9eFp64ASyaXC+pB5WpvBoCg7NUwvaCSKdzlcHqw= +github.com/wiggin77/merror v1.0.2/go.mod h1:uQTcIU0Z6jRK4OwqganPYerzQxSFJ4GSHM3aurxxQpg= diff --git a/vendor/github.com/wiggin77/cfg/ini/ini.go b/vendor/github.com/wiggin77/cfg/ini/ini.go new file mode 100644 index 00000000..d28d7444 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/ini/ini.go @@ -0,0 +1,167 @@ +package ini + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "sync" + "time" +) + +// Ini provides parsing and querying of INI format or simple name/value pairs +// such as a simple config file. +// A name/value pair format is just an INI with no sections, and properties can +// be queried using an empty section name. +type Ini struct { + mutex sync.RWMutex + m map[string]*Section + lm time.Time +} + +// LoadFromFilespec loads an INI file from string containing path and filename. +func (ini *Ini) LoadFromFilespec(filespec string) error { + f, err := os.Open(filespec) + if err != nil { + return err + } + return ini.LoadFromFile(f) +} + +// LoadFromFile loads an INI file from `os.File`. +func (ini *Ini) LoadFromFile(file *os.File) error { + + fi, err := file.Stat() + if err != nil { + return err + } + lm := fi.ModTime() + + if err := ini.LoadFromReader(file); err != nil { + return err + } + ini.lm = lm + return nil +} + +// LoadFromReader loads an INI file from an `io.Reader`. +func (ini *Ini) LoadFromReader(reader io.Reader) error { + data, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + return ini.LoadFromString(string(data)) +} + +// LoadFromString parses an INI from a string . +func (ini *Ini) LoadFromString(s string) error { + m, err := getSections(s) + if err != nil { + return err + } + ini.mutex.Lock() + ini.m = m + ini.lm = time.Now() + ini.mutex.Unlock() + return nil +} + +// GetLastModified returns the last modified timestamp of the +// INI contents. +func (ini *Ini) GetLastModified() time.Time { + return ini.lm +} + +// GetSectionNames returns the names of all sections in this INI. +// Note, the returned section names are a snapshot in time, meaning +// other goroutines may change the contents of this INI as soon as +// the method returns. +func (ini *Ini) GetSectionNames() []string { + ini.mutex.RLock() + defer ini.mutex.RUnlock() + + arr := make([]string, 0, len(ini.m)) + for key := range ini.m { + arr = append(arr, key) + } + return arr +} + +// GetKeys returns the names of all keys in the specified section. +// Note, the returned key names are a snapshot in time, meaning other +// goroutines may change the contents of this INI as soon as the +// method returns. +func (ini *Ini) GetKeys(sectionName string) ([]string, error) { + sec, err := ini.getSection(sectionName) + if err != nil { + return nil, err + } + return sec.getKeys(), nil +} + +// getSection returns the named section. +func (ini *Ini) getSection(sectionName string) (*Section, error) { + ini.mutex.RLock() + defer ini.mutex.RUnlock() + + sec, ok := ini.m[sectionName] + if !ok { + return nil, fmt.Errorf("section '%s' not found", sectionName) + } + return sec, nil +} + +// GetFlattenedKeys returns all section names plus keys as one +// flattened array. +func (ini *Ini) GetFlattenedKeys() []string { + ini.mutex.RLock() + defer ini.mutex.RUnlock() + + arr := make([]string, 0, len(ini.m)*2) + for _, section := range ini.m { + keys := section.getKeys() + for _, key := range keys { + name := section.GetName() + if name != "" { + key = name + "." + key + } + arr = append(arr, key) + } + } + return arr +} + +// GetProp returns the value of the specified key in the named section. +func (ini *Ini) GetProp(section string, key string) (val string, ok bool) { + sec, err := ini.getSection(section) + if err != nil { + return val, false + } + return sec.GetProp(key) +} + +// ToMap returns a flattened map of the section name plus keys mapped +// to values. +func (ini *Ini) ToMap() map[string]string { + m := make(map[string]string) + + ini.mutex.RLock() + defer ini.mutex.RUnlock() + + for _, section := range ini.m { + for _, key := range section.getKeys() { + val, ok := section.GetProp(key) + if ok { + name := section.GetName() + var mapkey string + if name != "" { + mapkey = name + "." + key + } else { + mapkey = key + } + m[mapkey] = val + } + } + } + return m +} diff --git a/vendor/github.com/wiggin77/cfg/ini/parser.go b/vendor/github.com/wiggin77/cfg/ini/parser.go new file mode 100644 index 00000000..28916409 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/ini/parser.go @@ -0,0 +1,142 @@ +package ini + +import ( + "fmt" + "strings" + + "github.com/wiggin77/merror" +) + +// LF is linefeed +const LF byte = 0x0A + +// CR is carriage return +const CR byte = 0x0D + +// getSections parses an INI formatted string, or string containing just name/value pairs, +// returns map of `Section`'s. +// +// Any name/value pairs appearing before a section name are added to the section named +// with an empty string (""). Also true for Linux-style config files where all props +// are outside a named section. +// +// Any errors encountered are aggregated and returned, along with the partially parsed +// sections. +func getSections(str string) (map[string]*Section, error) { + merr := merror.New() + mapSections := make(map[string]*Section) + lines := buildLineArray(str) + section := newSection("") + + for _, line := range lines { + name, ok := parseSection(line) + if ok { + // A section name encountered. Stop processing the current one. + // Don't add the current section to the map if the section name is blank + // and the prop map is empty. + nameCurr := section.GetName() + if nameCurr != "" || section.hasKeys() { + mapSections[nameCurr] = section + } + // Start processing a new section. + section = newSection(name) + } else { + // Parse the property and add to the current section, or ignore if comment. + if k, v, comment, err := parseProp(line); !comment && err == nil { + section.setProp(k, v) + } else if err != nil { + merr.Append(err) // aggregate errors + } + } + + } + // If the current section is not empty, add it. + if section.hasKeys() { + mapSections[section.GetName()] = section + } + return mapSections, merr.ErrorOrNil() +} + +// buildLineArray parses the given string buffer and creates a list of strings, +// one for each line in the string buffer. +// +// A line is considered to be terminated by any one of a line feed ('\n'), +// a carriage return ('\r'), or a carriage return followed immediately by a +// linefeed. +// +// Lines prefixed with ';' or '#' are considered comments and skipped. +func buildLineArray(str string) []string { + arr := make([]string, 0, 10) + str = str + "\n" + + iLen := len(str) + iPos, iBegin := 0, 0 + var ch byte + + for iPos < iLen { + ch = str[iPos] + if ch == LF || ch == CR { + sub := str[iBegin:iPos] + sub = strings.TrimSpace(sub) + if sub != "" && !strings.HasPrefix(sub, ";") && !strings.HasPrefix(sub, "#") { + arr = append(arr, sub) + } + iPos++ + if ch == CR && iPos < iLen && str[iPos] == LF { + iPos++ + } + iBegin = iPos + } else { + iPos++ + } + } + return arr +} + +// parseSection parses the specified string for a section name enclosed in square brackets. +// Returns the section name found, or `ok=false` if `str` is not a section header. +func parseSection(str string) (name string, ok bool) { + str = strings.TrimSpace(str) + if !strings.HasPrefix(str, "[") { + return "", false + } + iCloser := strings.Index(str, "]") + if iCloser == -1 { + return "", false + } + return strings.TrimSpace(str[1:iCloser]), true +} + +// parseProp parses the specified string and extracts a key/value pair. +// +// If the string is a comment (prefixed with ';' or '#') then `comment=true` +// and key will be empty. +func parseProp(str string) (key string, val string, comment bool, err error) { + iLen := len(str) + iEqPos := strings.Index(str, "=") + if iEqPos == -1 { + return "", "", false, fmt.Errorf("not a key/value pair:'%s'", str) + } + + key = str[0:iEqPos] + key = strings.TrimSpace(key) + if iEqPos+1 < iLen { + val = str[iEqPos+1:] + val = strings.TrimSpace(val) + } + + // Check that the key has at least 1 char. + if key == "" { + return "", "", false, fmt.Errorf("key is empty for '%s'", str) + } + + // Check if this line is a comment that just happens + // to have an equals sign in it. Not an error, but not a + // useable line either. + if strings.HasPrefix(key, ";") || strings.HasPrefix(key, "#") { + key = "" + val = "" + comment = true + } + return key, val, comment, err +} diff --git a/vendor/github.com/wiggin77/cfg/ini/section.go b/vendor/github.com/wiggin77/cfg/ini/section.go new file mode 100644 index 00000000..18c4c254 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/ini/section.go @@ -0,0 +1,109 @@ +package ini + +import ( + "fmt" + "strings" + "sync" +) + +// Section represents a section in an INI file. The section has a name, which is +// enclosed in square brackets in the file. The section also has an array of +// key/value pairs. +type Section struct { + name string + props map[string]string + mtx sync.RWMutex +} + +func newSection(name string) *Section { + sec := &Section{} + sec.name = name + sec.props = make(map[string]string) + return sec +} + +// addLines addes an array of strings containing name/value pairs +// of the format `key=value`. +//func addLines(lines []string) { +// TODO +//} + +// GetName returns the name of the section. +func (sec *Section) GetName() (name string) { + sec.mtx.RLock() + name = sec.name + sec.mtx.RUnlock() + return +} + +// GetProp returns the value associated with the given key, or +// `ok=false` if key does not exist. +func (sec *Section) GetProp(key string) (val string, ok bool) { + sec.mtx.RLock() + val, ok = sec.props[key] + sec.mtx.RUnlock() + return +} + +// SetProp sets the value associated with the given key. +func (sec *Section) setProp(key string, val string) { + sec.mtx.Lock() + sec.props[key] = val + sec.mtx.Unlock() +} + +// hasKeys returns true if there are one or more properties in +// this section. +func (sec *Section) hasKeys() (b bool) { + sec.mtx.RLock() + b = len(sec.props) > 0 + sec.mtx.RUnlock() + return +} + +// getKeys returns an array containing all keys in this section. +func (sec *Section) getKeys() []string { + sec.mtx.RLock() + defer sec.mtx.RUnlock() + + arr := make([]string, len(sec.props)) + idx := 0 + for k := range sec.props { + arr[idx] = k + idx++ + } + return arr +} + +// combine the given section with this one. +func (sec *Section) combine(sec2 *Section) { + sec.mtx.Lock() + sec2.mtx.RLock() + defer sec.mtx.Unlock() + defer sec2.mtx.RUnlock() + + for k, v := range sec2.props { + sec.props[k] = v + } +} + +// String returns a string representation of this section. +func (sec *Section) String() string { + return fmt.Sprintf("[%s]\n%s", sec.GetName(), sec.StringPropsOnly()) +} + +// StringPropsOnly returns a string representation of this section +// without the section header. +func (sec *Section) StringPropsOnly() string { + sec.mtx.RLock() + defer sec.mtx.RUnlock() + sb := &strings.Builder{} + + for k, v := range sec.props { + sb.WriteString(k) + sb.WriteString("=") + sb.WriteString(v) + sb.WriteString("\n") + } + return sb.String() +} diff --git a/vendor/github.com/wiggin77/cfg/listener.go b/vendor/github.com/wiggin77/cfg/listener.go new file mode 100644 index 00000000..12ea4e45 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/listener.go @@ -0,0 +1,11 @@ +package cfg + +// ChangedListener interface is for receiving notifications +// when one or more properties within monitored config sources +// (SourceMonitored) have changed values. +type ChangedListener interface { + + // Changed is called when one or more properties in a `SourceMonitored` has a + // changed value. + ConfigChanged(cfg *Config, src SourceMonitored) +} diff --git a/vendor/github.com/wiggin77/cfg/nocopy.go b/vendor/github.com/wiggin77/cfg/nocopy.go new file mode 100644 index 00000000..f2450c0b --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/nocopy.go @@ -0,0 +1,11 @@ +package cfg + +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} diff --git a/vendor/github.com/wiggin77/cfg/source.go b/vendor/github.com/wiggin77/cfg/source.go new file mode 100644 index 00000000..09083e97 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/source.go @@ -0,0 +1,58 @@ +package cfg + +import ( + "sync" + "time" +) + +// Source is the interface required for any source of name/value pairs. +type Source interface { + + // GetProps fetches all the properties from a source and returns + // them as a map. + GetProps() (map[string]string, error) +} + +// SourceMonitored is the interface required for any config source that is +// monitored for changes. +type SourceMonitored interface { + Source + + // GetLastModified returns the time of the latest modification to any + // property value within the source. If a source does not support + // modifying properties at runtime then the zero value for `Time` + // should be returned to ensure reload events are not generated. + GetLastModified() (time.Time, error) + + // GetMonitorFreq returns the frequency as a `time.Duration` between + // checks for changes to this config source. + // + // Returning zero (or less) will temporarily suspend calls to `GetLastModified` + // and `GetMonitorFreq` will be called every 10 seconds until resumed, after which + // `GetMontitorFreq` will be called at a frequency roughly equal to the `time.Duration` + // returned. + GetMonitorFreq() time.Duration +} + +// AbstractSourceMonitor can be embedded in a custom `Source` to provide the +// basic plumbing for monitor frequency. +type AbstractSourceMonitor struct { + mutex sync.RWMutex + freq time.Duration +} + +// GetMonitorFreq returns the frequency as a `time.Duration` between +// checks for changes to this config source. +func (asm *AbstractSourceMonitor) GetMonitorFreq() (freq time.Duration) { + asm.mutex.RLock() + freq = asm.freq + asm.mutex.RUnlock() + return +} + +// SetMonitorFreq sets the frequency between checks for changes to this config source. +func (asm *AbstractSourceMonitor) SetMonitorFreq(freq time.Duration) { + asm.mutex.Lock() + asm.freq = freq + asm.mutex.Unlock() +} diff --git a/vendor/github.com/wiggin77/cfg/srcfile.go b/vendor/github.com/wiggin77/cfg/srcfile.go new file mode 100644 index 00000000..f42c69fa --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/srcfile.go @@ -0,0 +1,63 @@ +package cfg + +import ( + "os" + "time" + + "github.com/wiggin77/cfg/ini" +) + +// SrcFile is a configuration `Source` backed by a file containing +// name/value pairs or INI format. +type SrcFile struct { + AbstractSourceMonitor + ini ini.Ini + file *os.File +} + +// NewSrcFileFromFilespec creates a new SrcFile with the specified filespec. +func NewSrcFileFromFilespec(filespec string) (*SrcFile, error) { + file, err := os.Open(filespec) + if err != nil { + return nil, err + } + return NewSrcFile(file) +} + +// NewSrcFile creates a new SrcFile with the specified os.File. +func NewSrcFile(file *os.File) (*SrcFile, error) { + sf := &SrcFile{} + sf.freq = time.Minute + sf.file = file + if err := sf.ini.LoadFromFile(file); err != nil { + return nil, err + } + return sf, nil +} + +// GetProps fetches all the properties from a source and returns +// them as a map. +func (sf *SrcFile) GetProps() (map[string]string, error) { + lm, err := sf.GetLastModified() + if err != nil { + return nil, err + } + + // Check if we need to reload. + if sf.ini.GetLastModified() != lm { + if err := sf.ini.LoadFromFile(sf.file); err != nil { + return nil, err + } + } + return sf.ini.ToMap(), nil +} + +// GetLastModified returns the time of the latest modification to any +// property value within the source. +func (sf *SrcFile) GetLastModified() (time.Time, error) { + fi, err := sf.file.Stat() + if err != nil { + return time.Now(), err + } + return fi.ModTime(), nil +} diff --git a/vendor/github.com/wiggin77/cfg/srcmap.go b/vendor/github.com/wiggin77/cfg/srcmap.go new file mode 100644 index 00000000..321db27a --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/srcmap.go @@ -0,0 +1,78 @@ +package cfg + +import ( + "time" +) + +// SrcMap is a configuration `Source` backed by a simple map. +type SrcMap struct { + AbstractSourceMonitor + m map[string]string + lm time.Time +} + +// NewSrcMap creates an empty `SrcMap`. +func NewSrcMap() *SrcMap { + sm := &SrcMap{} + sm.m = make(map[string]string) + sm.lm = time.Now() + sm.freq = time.Minute + return sm +} + +// NewSrcMapFromMap creates a `SrcMap` containing a copy of the +// specified map. +func NewSrcMapFromMap(mapIn map[string]string) *SrcMap { + sm := NewSrcMap() + sm.PutAll(mapIn) + return sm +} + +// Put inserts or updates a value in the `SrcMap`. +func (sm *SrcMap) Put(key string, val string) { + sm.mutex.Lock() + sm.m[key] = val + sm.lm = time.Now() + sm.mutex.Unlock() +} + +// PutAll inserts a copy of `mapIn` into the `SrcMap` +func (sm *SrcMap) PutAll(mapIn map[string]string) { + sm.mutex.Lock() + defer sm.mutex.Unlock() + + for k, v := range mapIn { + sm.m[k] = v + } + sm.lm = time.Now() +} + +// GetProps fetches all the properties from a source and returns +// them as a map. +func (sm *SrcMap) GetProps() (m map[string]string, err error) { + sm.mutex.RLock() + m = sm.m + sm.mutex.RUnlock() + return +} + +// GetLastModified returns the time of the latest modification to any +// property value within the source. If a source does not support +// modifying properties at runtime then the zero value for `Time` +// should be returned to ensure reload events are not generated. +func (sm *SrcMap) GetLastModified() (last time.Time, err error) { + sm.mutex.RLock() + last = sm.lm + sm.mutex.RUnlock() + return +} + +// GetMonitorFreq returns the frequency as a `time.Duration` between +// checks for changes to this config source. Defaults to 1 minute +// unless changed with `SetMonitorFreq`. +func (sm *SrcMap) GetMonitorFreq() (freq time.Duration) { + sm.mutex.RLock() + freq = sm.freq + sm.mutex.RUnlock() + return +} diff --git a/vendor/github.com/wiggin77/cfg/timeconv/parse.go b/vendor/github.com/wiggin77/cfg/timeconv/parse.go new file mode 100644 index 00000000..218ef43a --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/timeconv/parse.go @@ -0,0 +1,108 @@ +package timeconv + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" +) + +// MillisPerSecond is the number of millseconds per second. +const MillisPerSecond int64 = 1000 + +// MillisPerMinute is the number of millseconds per minute. +const MillisPerMinute int64 = MillisPerSecond * 60 + +// MillisPerHour is the number of millseconds per hour. +const MillisPerHour int64 = MillisPerMinute * 60 + +// MillisPerDay is the number of millseconds per day. +const MillisPerDay int64 = MillisPerHour * 24 + +// MillisPerWeek is the number of millseconds per week. +const MillisPerWeek int64 = MillisPerDay * 7 + +// MillisPerYear is the approximate number of millseconds per year. +const MillisPerYear int64 = MillisPerDay*365 + int64((float64(MillisPerDay) * 0.25)) + +// ParseMilliseconds parses a string containing a number plus +// a unit of measure for time and returns the number of milliseconds +// it represents. +// +// Example: +// * "1 second" returns 1000 +// * "1 minute" returns 60000 +// * "1 hour" returns 3600000 +// +// See config.UnitsToMillis for a list of supported units of measure. +func ParseMilliseconds(str string) (int64, error) { + s := strings.TrimSpace(str) + reg := regexp.MustCompile("([0-9\\.\\-+]*)(.*)") + matches := reg.FindStringSubmatch(s) + if matches == nil || len(matches) < 1 || matches[1] == "" { + return 0, fmt.Errorf("invalid syntax - '%s'", s) + } + digits := matches[1] + units := "ms" + if len(matches) > 1 && matches[2] != "" { + units = matches[2] + } + + fDigits, err := strconv.ParseFloat(digits, 64) + if err != nil { + return 0, err + } + + msPerUnit, err := UnitsToMillis(units) + if err != nil { + return 0, err + } + + // Check for overflow. + fms := float64(msPerUnit) * fDigits + if fms > math.MaxInt64 || fms < math.MinInt64 { + return 0, fmt.Errorf("out of range - '%s' overflows", s) + } + ms := int64(fms) + return ms, nil +} + +// UnitsToMillis returns the number of milliseconds represented by the specified unit of measure. +// +// Example: +// * "second" returns 1000
+// * "minute" returns 60000
+// * "hour" returns 3600000
+// +// Supported units of measure: +// * "milliseconds", "millis", "ms", "millisecond" +// * "seconds", "sec", "s", "second" +// * "minutes", "mins", "min", "m", "minute" +// * "hours", "h", "hour" +// * "days", "d", "day" +// * "weeks", "w", "week" +// * "years", "y", "year" +func UnitsToMillis(units string) (ms int64, err error) { + u := strings.TrimSpace(units) + u = strings.ToLower(u) + switch u { + case "milliseconds", "millisecond", "millis", "ms": + ms = 1 + case "seconds", "second", "sec", "s": + ms = MillisPerSecond + case "minutes", "minute", "mins", "min", "m": + ms = MillisPerMinute + case "hours", "hour", "h": + ms = MillisPerHour + case "days", "day", "d": + ms = MillisPerDay + case "weeks", "week", "w": + ms = MillisPerWeek + case "years", "year", "y": + ms = MillisPerYear + default: + err = fmt.Errorf("invalid syntax - '%s' not a supported unit of measure", u) + } + return +} diff --git a/vendor/github.com/wiggin77/merror/.gitignore b/vendor/github.com/wiggin77/merror/.gitignore new file mode 100644 index 00000000..f1c181ec --- /dev/null +++ b/vendor/github.com/wiggin77/merror/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/wiggin77/merror/LICENSE b/vendor/github.com/wiggin77/merror/LICENSE new file mode 100644 index 00000000..2b0bf7ef --- /dev/null +++ b/vendor/github.com/wiggin77/merror/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 wiggin77 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/wiggin77/merror/README.md b/vendor/github.com/wiggin77/merror/README.md new file mode 100644 index 00000000..8a31687f --- /dev/null +++ b/vendor/github.com/wiggin77/merror/README.md @@ -0,0 +1,2 @@ +# merror +Multiple Error aggregator for Golang. diff --git a/vendor/github.com/wiggin77/merror/format.go b/vendor/github.com/wiggin77/merror/format.go new file mode 100644 index 00000000..8ba9aa82 --- /dev/null +++ b/vendor/github.com/wiggin77/merror/format.go @@ -0,0 +1,43 @@ +package merror + +import ( + "fmt" + "strings" +) + +// FormatterFunc is a function that converts a merror +// to a string. +type FormatterFunc func(merr *MError) string + +// GlobalFormatter is the global merror formatter. +// Set this to a custom formatter if desired. +var GlobalFormatter = defaultFormatter + +// defaultFormatter +func defaultFormatter(merr *MError) string { + count := 0 + overflow := 0 + + var format func(sb *strings.Builder, merr *MError, indent string) + format = func(sb *strings.Builder, merr *MError, indent string) { + count += merr.Len() + overflow += merr.Overflow() + + fmt.Fprintf(sb, "%sMError:\n", indent) + for _, err := range merr.Errors() { + if e, ok := err.(*MError); ok { + format(sb, e, indent+" ") + } else { + fmt.Fprintf(sb, "%s%s\n", indent, err.Error()) + } + } + } + + sb := &strings.Builder{} + format(sb, merr, "") + fmt.Fprintf(sb, "%d errors total.\n", count) + if merr.overflow > 0 { + fmt.Fprintf(sb, "%d errors truncated.\n", overflow) + } + return sb.String() +} diff --git a/vendor/github.com/wiggin77/merror/go.mod b/vendor/github.com/wiggin77/merror/go.mod new file mode 100644 index 00000000..44982f78 --- /dev/null +++ b/vendor/github.com/wiggin77/merror/go.mod @@ -0,0 +1 @@ +module github.com/wiggin77/merror diff --git a/vendor/github.com/wiggin77/merror/merror.go b/vendor/github.com/wiggin77/merror/merror.go new file mode 100644 index 00000000..01f19913 --- /dev/null +++ b/vendor/github.com/wiggin77/merror/merror.go @@ -0,0 +1,87 @@ +package merror + +// MError represents zero or more errors that can be +// accumulated via the `Append` method. +type MError struct { + cap int + errors []error + overflow int + formatter FormatterFunc +} + +// New returns a new instance of `MError` with no limit on the +// number of errors that can be appended. +func New() *MError { + me := &MError{} + me.errors = make([]error, 0, 10) + return me +} + +// NewWithCap returns a new instance of `MError` with a maximum +// capacity of `cap` errors. If exceeded only the overflow counter +// will be incremented. +// +// A `cap` of zero of less means no cap and max size of a slice +// on the current platform is the upper bound. +func NewWithCap(cap int) *MError { + me := New() + me.cap = cap + return me +} + +// Append adds an error to the aggregated error list. +func (me *MError) Append(err error) { + if err == nil { + return + } + if me.cap > 0 && len(me.errors) >= me.cap { + me.overflow++ + } else { + me.errors = append(me.errors, err) + } +} + +// Errors returns an array of the `error` instances that have been +// appended to this `MError`. +func (me *MError) Errors() []error { + return me.errors +} + +// Len returns the number of errors that have been appended. +func (me *MError) Len() int { + return len(me.errors) +} + +// Overflow returns the number of errors that have been truncated +// because maximum capacity was exceeded. +func (me *MError) Overflow() int { + return me.overflow +} + +// SetFormatter sets the `FormatterFunc` to be used when `Error` is +// called. The previous `FormatterFunc` is returned. +func (me *MError) SetFormatter(f FormatterFunc) (old FormatterFunc) { + old = me.formatter + me.formatter = f + return +} + +// ErrorOrNil returns nil if this `MError` contains no errors, +// otherwise this `MError` is returned. +func (me *MError) ErrorOrNil() error { + if me == nil || len(me.errors) == 0 { + return nil + } + return me +} + +// Error returns a string representation of this MError. +// The output format depends on the `Formatter` set for this +// merror instance, or the global formatter if none set. +func (me *MError) Error() string { + f := me.formatter + if f == nil { + f = GlobalFormatter + } + return f(me) +} diff --git a/vendor/github.com/wiggin77/srslog/.gitignore b/vendor/github.com/wiggin77/srslog/.gitignore new file mode 100644 index 00000000..ebf0f2e4 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/.gitignore @@ -0,0 +1 @@ +.cover diff --git a/vendor/github.com/wiggin77/srslog/.travis.yml b/vendor/github.com/wiggin77/srslog/.travis.yml new file mode 100644 index 00000000..921150e9 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/.travis.yml @@ -0,0 +1,15 @@ +sudo: required +dist: trusty +group: edge +language: go +go: +- 1.5 +before_install: + - pip install --user codecov +script: +- | + go get ./... + go test -v -coverprofile=coverage.txt -covermode=atomic + go vet +after_success: + - codecov diff --git a/vendor/github.com/wiggin77/srslog/CODE_OF_CONDUCT.md b/vendor/github.com/wiggin77/srslog/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..18ac49fc --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/CODE_OF_CONDUCT.md @@ -0,0 +1,50 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of +fostering an open and welcoming community, we pledge to respect all people who +contribute through reporting issues, posting feature requests, updating +documentation, submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free +experience for everyone, regardless of level of experience, gender, gender +identity and expression, sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, such as physical or electronic + addresses, without explicit permission +* Other unethical or unprofessional conduct + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +By adopting this Code of Conduct, project maintainers commit themselves to +fairly and consistently applying these principles to every aspect of managing +this project. Project maintainers who do not follow or enforce the Code of +Conduct may be permanently removed from the project team. + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting a project maintainer at [sirsean@gmail.com]. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. Maintainers are +obligated to maintain confidentiality with regard to the reporter of an +incident. + + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.3.0, available at +[http://contributor-covenant.org/version/1/3/0/][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/3/0/ diff --git a/vendor/github.com/wiggin77/srslog/LICENSE b/vendor/github.com/wiggin77/srslog/LICENSE new file mode 100644 index 00000000..9269338f --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015 Rackspace. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/wiggin77/srslog/README.md b/vendor/github.com/wiggin77/srslog/README.md new file mode 100644 index 00000000..dcacc348 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/README.md @@ -0,0 +1,147 @@ +[![Build Status](https://travis-ci.org/RackSec/srslog.svg?branch=master)](https://travis-ci.org/RackSec/srslog) + +# srslog + +Go has a `syslog` package in the standard library, but it has the following +shortcomings: + +1. It doesn't have TLS support +2. [According to bradfitz on the Go team, it is no longer being maintained.](https://github.com/golang/go/issues/13449#issuecomment-161204716) + +I agree that it doesn't need to be in the standard library. So, I've +followed Brad's suggestion and have made a separate project to handle syslog. + +This code was taken directly from the Go project as a base to start from. + +However, this _does_ have TLS support. + +# Usage + +Basic usage retains the same interface as the original `syslog` package. We +only added to the interface where required to support new functionality. + +Switch from the standard library: + +``` +import( + //"log/syslog" + syslog "github.com/RackSec/srslog" +) +``` + +You can still use it for local syslog: + +``` +w, err := syslog.Dial("", "", syslog.LOG_ERR, "testtag") +``` + +Or to unencrypted UDP: + +``` +w, err := syslog.Dial("udp", "192.168.0.50:514", syslog.LOG_ERR, "testtag") +``` + +Or to unencrypted TCP: + +``` +w, err := syslog.Dial("tcp", "192.168.0.51:514", syslog.LOG_ERR, "testtag") +``` + +But now you can also send messages via TLS-encrypted TCP: + +``` +w, err := syslog.DialWithTLSCertPath("tcp+tls", "192.168.0.52:514", syslog.LOG_ERR, "testtag", "/path/to/servercert.pem") +``` + +And if you need more control over your TLS configuration : + +``` +pool := x509.NewCertPool() +serverCert, err := ioutil.ReadFile("/path/to/servercert.pem") +if err != nil { + return nil, err +} +pool.AppendCertsFromPEM(serverCert) +config := tls.Config{ + RootCAs: pool, +} + +w, err := DialWithTLSConfig(network, raddr, priority, tag, &config) +``` + +(Note that in both TLS cases, this uses a self-signed certificate, where the +remote syslog server has the keypair and the client has only the public key.) + +And then to write log messages, continue like so: + +``` +if err != nil { + log.Fatal("failed to connect to syslog:", err) +} +defer w.Close() + +w.Alert("this is an alert") +w.Crit("this is critical") +w.Err("this is an error") +w.Warning("this is a warning") +w.Notice("this is a notice") +w.Info("this is info") +w.Debug("this is debug") +w.Write([]byte("these are some bytes")) +``` + +If you need further control over connection attempts, you can use the DialWithCustomDialer +function. To continue with the DialWithTLSConfig example: + +``` +netDialer := &net.Dialer{Timeout: time.Second*5} // easy timeouts +realNetwork := "tcp" // real network, other vars your dail func can close over +dial := func(network, addr string) (net.Conn, error) { + // cannot use "network" here as it'll simply be "custom" which will fail + return tls.DialWithDialer(netDialer, realNetwork, addr, &config) +} + +w, err := DialWithCustomDialer("custom", "192.168.0.52:514", syslog.LOG_ERR, "testtag", dial) +``` + +Your custom dial func can set timeouts, proxy connections, and do whatever else it needs before returning a net.Conn. + +# Generating TLS Certificates + +We've provided a script that you can use to generate a self-signed keypair: + +``` +pip install cryptography +python script/gen-certs.py +``` + +That outputs the public key and private key to standard out. Put those into +`.pem` files. (And don't put them into any source control. The certificate in +the `test` directory is used by the unit tests, and please do not actually use +it anywhere else.) + +# Running Tests + +Run the tests as usual: + +``` +go test +``` + +But we've also provided a test coverage script that will show you which +lines of code are not covered: + +``` +script/coverage --html +``` + +That will open a new browser tab showing coverage information. + +# License + +This project uses the New BSD License, the same as the Go project itself. + +# Code of Conduct + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. diff --git a/vendor/github.com/wiggin77/srslog/constants.go b/vendor/github.com/wiggin77/srslog/constants.go new file mode 100644 index 00000000..600801ee --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/constants.go @@ -0,0 +1,68 @@ +package srslog + +import ( + "errors" +) + +// Priority is a combination of the syslog facility and +// severity. For example, LOG_ALERT | LOG_FTP sends an alert severity +// message from the FTP facility. The default severity is LOG_EMERG; +// the default facility is LOG_KERN. +type Priority int + +const severityMask = 0x07 +const facilityMask = 0xf8 + +const ( + // Severity. + + // From /usr/include/sys/syslog.h. + // These are the same on Linux, BSD, and OS X. + LOG_EMERG Priority = iota + LOG_ALERT + LOG_CRIT + LOG_ERR + LOG_WARNING + LOG_NOTICE + LOG_INFO + LOG_DEBUG +) + +const ( + // Facility. + + // From /usr/include/sys/syslog.h. + // These are the same up to LOG_FTP on Linux, BSD, and OS X. + LOG_KERN Priority = iota << 3 + LOG_USER + LOG_MAIL + LOG_DAEMON + LOG_AUTH + LOG_SYSLOG + LOG_LPR + LOG_NEWS + LOG_UUCP + LOG_CRON + LOG_AUTHPRIV + LOG_FTP + _ // unused + _ // unused + _ // unused + _ // unused + LOG_LOCAL0 + LOG_LOCAL1 + LOG_LOCAL2 + LOG_LOCAL3 + LOG_LOCAL4 + LOG_LOCAL5 + LOG_LOCAL6 + LOG_LOCAL7 +) + +func validatePriority(p Priority) error { + if p < 0 || p > LOG_LOCAL7|LOG_DEBUG { + return errors.New("log/syslog: invalid priority") + } else { + return nil + } +} diff --git a/vendor/github.com/wiggin77/srslog/dialer.go b/vendor/github.com/wiggin77/srslog/dialer.go new file mode 100644 index 00000000..1ecf29b2 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/dialer.go @@ -0,0 +1,104 @@ +package srslog + +import ( + "crypto/tls" + "net" +) + +// dialerFunctionWrapper is a simple object that consists of a dialer function +// and its name. This is primarily for testing, so we can make sure that the +// getDialer method returns the correct dialer function. However, if you ever +// find that you need to check which dialer function you have, this would also +// be useful for you without having to use reflection. +type dialerFunctionWrapper struct { + Name string + Dialer func() (serverConn, string, error) +} + +// Call the wrapped dialer function and return its return values. +func (df dialerFunctionWrapper) Call() (serverConn, string, error) { + return df.Dialer() +} + +// getDialer returns a "dialer" function that can be called to connect to a +// syslog server. +// +// Each dialer function is responsible for dialing the remote host and returns +// a serverConn, the hostname (or a default if the Writer has not specified a +// hostname), and an error in case dialing fails. +// +// The reason for separate dialers is that different network types may need +// to dial their connection differently, yet still provide a net.Conn interface +// that you can use once they have dialed. Rather than an increasingly long +// conditional, we have a map of network -> dialer function (with a sane default +// value), and adding a new network type is as easy as writing the dialer +// function and adding it to the map. +func (w *Writer) getDialer() dialerFunctionWrapper { + dialers := map[string]dialerFunctionWrapper{ + "": dialerFunctionWrapper{"unixDialer", w.unixDialer}, + "tcp+tls": dialerFunctionWrapper{"tlsDialer", w.tlsDialer}, + "custom": dialerFunctionWrapper{"customDialer", w.customDialer}, + } + dialer, ok := dialers[w.network] + if !ok { + dialer = dialerFunctionWrapper{"basicDialer", w.basicDialer} + } + return dialer +} + +// unixDialer uses the unixSyslog method to open a connection to the syslog +// daemon running on the local machine. +func (w *Writer) unixDialer() (serverConn, string, error) { + sc, err := unixSyslog() + hostname := w.hostname + if hostname == "" { + hostname = "localhost" + } + return sc, hostname, err +} + +// tlsDialer connects to TLS over TCP, and is used for the "tcp+tls" network +// type. +func (w *Writer) tlsDialer() (serverConn, string, error) { + c, err := tls.Dial("tcp", w.raddr, w.tlsConfig) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = newNetConn(c) + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} + +// basicDialer is the most common dialer for syslog, and supports both TCP and +// UDP connections. +func (w *Writer) basicDialer() (serverConn, string, error) { + c, err := net.Dial(w.network, w.raddr) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = newNetConn(c) + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} + +// customDialer uses the custom dialer when the Writer was created +// giving developers total control over how connections are made and returned. +// Note it does not check if cdialer is nil, as it should only be referenced from getDialer. +func (w *Writer) customDialer() (serverConn, string, error) { + c, err := w.customDial(w.network, w.raddr) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = newNetConn(c) + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} diff --git a/vendor/github.com/wiggin77/srslog/formatter.go b/vendor/github.com/wiggin77/srslog/formatter.go new file mode 100644 index 00000000..e306fd67 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/formatter.go @@ -0,0 +1,58 @@ +package srslog + +import ( + "fmt" + "os" + "time" +) + +const appNameMaxLength = 48 // limit to 48 chars as per RFC5424 + +// Formatter is a type of function that takes the consituent parts of a +// syslog message and returns a formatted string. A different Formatter is +// defined for each different syslog protocol we support. +type Formatter func(p Priority, hostname, tag, content string) string + +// DefaultFormatter is the original format supported by the Go syslog package, +// and is a non-compliant amalgamation of 3164 and 5424 that is intended to +// maximize compatibility. +func DefaultFormatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s", + p, timestamp, hostname, tag, os.Getpid(), content) + return msg +} + +// UnixFormatter omits the hostname, because it is only used locally. +func UnixFormatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.Stamp) + msg := fmt.Sprintf("<%d>%s %s[%d]: %s", + p, timestamp, tag, os.Getpid(), content) + return msg +} + +// RFC3164Formatter provides an RFC 3164 compliant message. +func RFC3164Formatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.Stamp) + msg := fmt.Sprintf("<%d>%s %s %s[%d]: %s", + p, timestamp, hostname, tag, os.Getpid(), content) + return msg +} + +// if string's length is greater than max, then use the last part +func truncateStartStr(s string, max int) string { + if (len(s) > max) { + return s[len(s) - max:] + } + return s +} + +// RFC5424Formatter provides an RFC 5424 compliant message. +func RFC5424Formatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + appName := truncateStartStr(os.Args[0], appNameMaxLength) + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s", + p, 1, timestamp, hostname, appName, pid, tag, content) + return msg +} diff --git a/vendor/github.com/wiggin77/srslog/framer.go b/vendor/github.com/wiggin77/srslog/framer.go new file mode 100644 index 00000000..ab46f0de --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/framer.go @@ -0,0 +1,24 @@ +package srslog + +import ( + "fmt" +) + +// Framer is a type of function that takes an input string (typically an +// already-formatted syslog message) and applies "message framing" to it. We +// have different framers because different versions of the syslog protocol +// and its transport requirements define different framing behavior. +type Framer func(in string) string + +// DefaultFramer does nothing, since there is no framing to apply. This is +// the original behavior of the Go syslog package, and is also typically used +// for UDP syslog. +func DefaultFramer(in string) string { + return in +} + +// RFC5425MessageLengthFramer prepends the message length to the front of the +// provided message, as defined in RFC 5425. +func RFC5425MessageLengthFramer(in string) string { + return fmt.Sprintf("%d %s", len(in), in) +} diff --git a/vendor/github.com/wiggin77/srslog/go.mod b/vendor/github.com/wiggin77/srslog/go.mod new file mode 100644 index 00000000..393b0761 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/go.mod @@ -0,0 +1,3 @@ +module github.com/wiggin77/srslog + +go 1.14 diff --git a/vendor/github.com/wiggin77/srslog/logger.go b/vendor/github.com/wiggin77/srslog/logger.go new file mode 100644 index 00000000..3a738565 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/logger.go @@ -0,0 +1,13 @@ +package srslog + +import ( + "io/ioutil" + "log" +) + +var Logger log.Logger + +func init() { + Logger = log.Logger{} + Logger.SetOutput(ioutil.Discard) +} diff --git a/vendor/github.com/wiggin77/srslog/net_conn.go b/vendor/github.com/wiggin77/srslog/net_conn.go new file mode 100644 index 00000000..f3cfeb60 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/net_conn.go @@ -0,0 +1,76 @@ +package srslog + +import ( + "io" + "net" + "time" +) + +// netConn has an internal net.Conn and adheres to the serverConn interface, +// allowing us to send syslog messages over the network. +type netConn struct { + conn net.Conn + done chan interface{} +} + +// newNetConn creates a netConn instance that is monitored for unexpected socket closure. +func newNetConn(conn net.Conn) *netConn { + nc := &netConn{conn: conn, done: make(chan interface{})} + go monitor(nc.conn, nc.done) + return nc +} + +// writeString formats syslog messages using time.RFC3339 and includes the +// hostname, and sends the message to the connection. +func (n *netConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { + if framer == nil { + framer = DefaultFramer + } + if formatter == nil { + formatter = DefaultFormatter + } + formattedMessage := framer(formatter(p, hostname, tag, msg)) + _, err := n.conn.Write([]byte(formattedMessage)) + return err +} + +// close the network connection +func (n *netConn) close() error { + // signal monitor goroutine to exit + close(n.done) + // wake up monitor blocked on read (close usually is enough) + _ = n.conn.SetReadDeadline(time.Now()) + // close the connection + return n.conn.Close() +} + +// monitor continuously tries to read from the connection to detect socket close. +// This is needed because syslog server uses a write only socket and Linux systems +// take a long time to detect a loss of connectivity on a socket when only writing; +// the writes simply fail without an error returned. +func monitor(conn net.Conn, done chan interface{}) { + defer Logger.Println("monitor exit") + + buf := make([]byte, 1) + for { + Logger.Println("monitor loop") + + select { + case <-done: + return + case <-time.After(1 * time.Second): + } + + err := conn.SetReadDeadline(time.Now().Add(time.Second * 30)) + if err != nil { + continue + } + + _, err = conn.Read(buf) + Logger.Println("monitor -- ", err) + if err == io.EOF { + Logger.Println("monitor close conn") + conn.Close() + } + } +} diff --git a/vendor/github.com/wiggin77/srslog/srslog.go b/vendor/github.com/wiggin77/srslog/srslog.go new file mode 100644 index 00000000..b47ad72d --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/srslog.go @@ -0,0 +1,125 @@ +package srslog + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "io/ioutil" + "log" + "net" + "os" +) + +// This interface allows us to work with both local and network connections, +// and enables Solaris support (see syslog_unix.go). +type serverConn interface { + writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, s string) error + close() error +} + +// DialFunc is the function signature to be used for a custom dialer callback +// with DialWithCustomDialer +type DialFunc func(string, string) (net.Conn, error) + +// New establishes a new connection to the system log daemon. Each +// write to the returned Writer sends a log message with the given +// priority and prefix. +func New(priority Priority, tag string) (w *Writer, err error) { + return Dial("", "", priority, tag) +} + +// Dial establishes a connection to a log daemon by connecting to +// address raddr on the specified network. Each write to the returned +// Writer sends a log message with the given facility, severity and +// tag. +// If network is empty, Dial will connect to the local syslog server. +func Dial(network, raddr string, priority Priority, tag string) (*Writer, error) { + return DialWithTLSConfig(network, raddr, priority, tag, nil) +} + +// ErrNilDialFunc is returned from DialWithCustomDialer when a nil DialFunc is passed, +// avoiding a nil pointer deference panic. +var ErrNilDialFunc = errors.New("srslog: nil DialFunc passed to DialWithCustomDialer") + +// DialWithCustomDialer establishes a connection by calling customDial. +// Each write to the returned Writer sends a log message with the given facility, severity and tag. +// Network must be "custom" in order for this package to use customDial. +// While network and raddr will be passed to customDial, it is allowed for customDial to ignore them. +// If customDial is nil, this function returns ErrNilDialFunc. +func DialWithCustomDialer(network, raddr string, priority Priority, tag string, customDial DialFunc) (*Writer, error) { + if customDial == nil { + return nil, ErrNilDialFunc + } + return dialAllParameters(network, raddr, priority, tag, nil, customDial) +} + +// DialWithTLSCertPath establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses certPath to load TLS certificates and configure +// the secure connection. +func DialWithTLSCertPath(network, raddr string, priority Priority, tag, certPath string) (*Writer, error) { + serverCert, err := ioutil.ReadFile(certPath) + if err != nil { + return nil, err + } + + return DialWithTLSCert(network, raddr, priority, tag, serverCert) +} + +// DialWIthTLSCert establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses serverCert to load a TLS certificate +// and configure the secure connection. +func DialWithTLSCert(network, raddr string, priority Priority, tag string, serverCert []byte) (*Writer, error) { + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(serverCert) + config := tls.Config{ + RootCAs: pool, + } + + return DialWithTLSConfig(network, raddr, priority, tag, &config) +} + +// DialWithTLSConfig establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses tlsConfig to configure the secure connection. +func DialWithTLSConfig(network, raddr string, priority Priority, tag string, tlsConfig *tls.Config) (*Writer, error) { + return dialAllParameters(network, raddr, priority, tag, tlsConfig, nil) +} + +// implementation of the various functions above +func dialAllParameters(network, raddr string, priority Priority, tag string, tlsConfig *tls.Config, customDial DialFunc) (*Writer, error) { + if err := validatePriority(priority); err != nil { + return nil, err + } + + if tag == "" { + tag = os.Args[0] + } + hostname, _ := os.Hostname() + + w := &Writer{ + priority: priority, + tag: tag, + hostname: hostname, + network: network, + raddr: raddr, + tlsConfig: tlsConfig, + customDial: customDial, + } + + _, err := w.connect() + if err != nil { + return nil, err + } + return w, err +} + +// NewLogger creates a log.Logger whose output is written to +// the system log service with the specified priority. The logFlag +// argument is the flag set passed through to log.New to create +// the Logger. +func NewLogger(p Priority, logFlag int) (*log.Logger, error) { + s, err := New(p, "") + if err != nil { + return nil, err + } + return log.New(s, "", logFlag), nil +} diff --git a/vendor/github.com/wiggin77/srslog/srslog_unix.go b/vendor/github.com/wiggin77/srslog/srslog_unix.go new file mode 100644 index 00000000..a04d9396 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/srslog_unix.go @@ -0,0 +1,54 @@ +package srslog + +import ( + "errors" + "io" + "net" +) + +// unixSyslog opens a connection to the syslog daemon running on the +// local machine using a Unix domain socket. This function exists because of +// Solaris support as implemented by gccgo. On Solaris you can not +// simply open a TCP connection to the syslog daemon. The gccgo +// sources have a syslog_solaris.go file that implements unixSyslog to +// return a type that satisfies the serverConn interface and simply calls the C +// library syslog function. +func unixSyslog() (conn serverConn, err error) { + logTypes := []string{"unixgram", "unix"} + logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"} + for _, network := range logTypes { + for _, path := range logPaths { + conn, err := net.Dial(network, path) + if err != nil { + continue + } else { + return &localConn{conn: conn}, nil + } + } + } + return nil, errors.New("Unix syslog delivery error") +} + +// localConn adheres to the serverConn interface, allowing us to send syslog +// messages to the local syslog daemon over a Unix domain socket. +type localConn struct { + conn io.WriteCloser +} + +// writeString formats syslog messages using time.Stamp instead of time.RFC3339, +// and omits the hostname (because it is expected to be used locally). +func (n *localConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { + if framer == nil { + framer = DefaultFramer + } + if formatter == nil { + formatter = UnixFormatter + } + _, err := n.conn.Write([]byte(framer(formatter(p, hostname, tag, msg)))) + return err +} + +// close the (local) network connection +func (n *localConn) close() error { + return n.conn.Close() +} diff --git a/vendor/github.com/wiggin77/srslog/writer.go b/vendor/github.com/wiggin77/srslog/writer.go new file mode 100644 index 00000000..86bccba1 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/writer.go @@ -0,0 +1,201 @@ +package srslog + +import ( + "crypto/tls" + "strings" + "sync" +) + +// A Writer is a connection to a syslog server. +type Writer struct { + priority Priority + tag string + hostname string + network string + raddr string + tlsConfig *tls.Config + framer Framer + formatter Formatter + + //non-nil if custom dialer set, used in getDialer + customDial DialFunc + + mu sync.RWMutex // guards conn + conn serverConn +} + +// getConn provides access to the internal conn, protected by a mutex. The +// conn is threadsafe, so it can be used while unlocked, but we want to avoid +// race conditions on grabbing a reference to it. +func (w *Writer) getConn() serverConn { + w.mu.RLock() + conn := w.conn + w.mu.RUnlock() + return conn +} + +// setConn updates the internal conn, protected by a mutex. +func (w *Writer) setConn(c serverConn) { + w.mu.Lock() + w.conn = c + w.mu.Unlock() +} + +// connect makes a connection to the syslog server. +func (w *Writer) connect() (serverConn, error) { + conn := w.getConn() + if conn != nil { + // ignore err from close, it makes sense to continue anyway + conn.close() + w.setConn(nil) + } + + var hostname string + var err error + dialer := w.getDialer() + conn, hostname, err = dialer.Call() + if err == nil { + w.setConn(conn) + w.hostname = hostname + + return conn, nil + } else { + return nil, err + } +} + +// SetFormatter changes the formatter function for subsequent messages. +func (w *Writer) SetFormatter(f Formatter) { + w.formatter = f +} + +// SetFramer changes the framer function for subsequent messages. +func (w *Writer) SetFramer(f Framer) { + w.framer = f +} + +// SetHostname changes the hostname for syslog messages if needed. +func (w *Writer) SetHostname(hostname string) { + w.hostname = hostname +} + +// Write sends a log message to the syslog daemon using the default priority +// passed into `srslog.New` or the `srslog.Dial*` functions. +func (w *Writer) Write(b []byte) (int, error) { + return w.writeAndRetry(w.priority, string(b)) +} + +// WriteWithPriority sends a log message with a custom priority. +func (w *Writer) WriteWithPriority(p Priority, b []byte) (int, error) { + return w.writeAndRetryWithPriority(p, string(b)) +} + +// Close closes a connection to the syslog daemon. +func (w *Writer) Close() error { + conn := w.getConn() + if conn != nil { + err := conn.close() + w.setConn(nil) + return err + } + return nil +} + +// Emerg logs a message with severity LOG_EMERG; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Emerg(m string) (err error) { + _, err = w.writeAndRetry(LOG_EMERG, m) + return err +} + +// Alert logs a message with severity LOG_ALERT; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Alert(m string) (err error) { + _, err = w.writeAndRetry(LOG_ALERT, m) + return err +} + +// Crit logs a message with severity LOG_CRIT; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Crit(m string) (err error) { + _, err = w.writeAndRetry(LOG_CRIT, m) + return err +} + +// Err logs a message with severity LOG_ERR; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Err(m string) (err error) { + _, err = w.writeAndRetry(LOG_ERR, m) + return err +} + +// Warning logs a message with severity LOG_WARNING; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Warning(m string) (err error) { + _, err = w.writeAndRetry(LOG_WARNING, m) + return err +} + +// Notice logs a message with severity LOG_NOTICE; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Notice(m string) (err error) { + _, err = w.writeAndRetry(LOG_NOTICE, m) + return err +} + +// Info logs a message with severity LOG_INFO; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Info(m string) (err error) { + _, err = w.writeAndRetry(LOG_INFO, m) + return err +} + +// Debug logs a message with severity LOG_DEBUG; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Debug(m string) (err error) { + _, err = w.writeAndRetry(LOG_DEBUG, m) + return err +} + +// writeAndRetry takes a severity and the string to write. Any facility passed to +// it as part of the severity Priority will be ignored. +func (w *Writer) writeAndRetry(severity Priority, s string) (int, error) { + pr := (w.priority & facilityMask) | (severity & severityMask) + + return w.writeAndRetryWithPriority(pr, s) +} + +// writeAndRetryWithPriority differs from writeAndRetry in that it allows setting +// of both the facility and the severity. +func (w *Writer) writeAndRetryWithPriority(p Priority, s string) (int, error) { + conn := w.getConn() + if conn != nil { + if n, err := w.write(conn, p, s); err == nil { + return n, err + } + } + + var err error + if conn, err = w.connect(); err != nil { + return 0, err + } + return w.write(conn, p, s) +} + +// write generates and writes a syslog formatted string. It formats the +// message based on the current Formatter and Framer. +func (w *Writer) write(conn serverConn, p Priority, msg string) (int, error) { + // ensure it ends in a \n + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + + err := conn.writeString(w.framer, w.formatter, p, w.hostname, w.tag, msg) + if err != nil { + return 0, err + } + // Note: return the length of the input, not the number of + // bytes printed by Fprintf, because this must behave like + // an io.Writer. + return len(msg), nil +} diff --git a/vendor/golang.org/x/image/webp/decode.go b/vendor/golang.org/x/image/webp/decode.go index f77a4ebf..d6eefd59 100644 --- a/vendor/golang.org/x/image/webp/decode.go +++ b/vendor/golang.org/x/image/webp/decode.go @@ -126,22 +126,23 @@ func decode(r io.Reader, configOnly bool) (image.Image, image.Config, error) { alphaBit = 1 << 4 iccProfileBit = 1 << 5 ) - if buf[0] != alphaBit { - return nil, image.Config{}, errors.New("webp: non-Alpha VP8X is not implemented") - } + wantAlpha = (buf[0] & alphaBit) != 0 widthMinusOne = uint32(buf[4]) | uint32(buf[5])<<8 | uint32(buf[6])<<16 heightMinusOne = uint32(buf[7]) | uint32(buf[8])<<8 | uint32(buf[9])<<16 if configOnly { + if wantAlpha { + return nil, image.Config{ + ColorModel: color.NYCbCrAModel, + Width: int(widthMinusOne) + 1, + Height: int(heightMinusOne) + 1, + }, nil + } return nil, image.Config{ - ColorModel: color.NYCbCrAModel, + ColorModel: color.YCbCrModel, Width: int(widthMinusOne) + 1, Height: int(heightMinusOne) + 1, }, nil } - wantAlpha = true - - default: - return nil, image.Config{}, errInvalidFormat } } } diff --git a/vendor/gopkg.in/ini.v1/.travis.yml b/vendor/gopkg.in/ini.v1/.travis.yml deleted file mode 100644 index 4db2e661..00000000 --- a/vendor/gopkg.in/ini.v1/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -os: linux -dist: xenial -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x - - 1.14.x -install: skip -script: - - go get golang.org/x/tools/cmd/cover - - go get github.com/smartystreets/goconvey - - mkdir -p $HOME/gopath/src/gopkg.in - - ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1 - - cd $HOME/gopath/src/gopkg.in/ini.v1 - - go test -v -cover -race diff --git a/vendor/gopkg.in/ini.v1/README.md b/vendor/gopkg.in/ini.v1/README.md index 783eb06a..5d65658b 100644 --- a/vendor/gopkg.in/ini.v1/README.md +++ b/vendor/gopkg.in/ini.v1/README.md @@ -1,6 +1,9 @@ # INI -[![Build Status](https://img.shields.io/travis/go-ini/ini/master.svg?style=for-the-badge&logo=travis)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) +[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/go-ini/ini/Go?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=workflow%3AGo) +[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) +[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) +[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) ![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) @@ -33,6 +36,7 @@ Please add `-u` flag to update in the future. - [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) - [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- 中国大陆镜像:https://ini.unknwon.cn ## License diff --git a/vendor/gopkg.in/ini.v1/codecov.yml b/vendor/gopkg.in/ini.v1/codecov.yml new file mode 100644 index 00000000..fc947f23 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/codecov.yml @@ -0,0 +1,9 @@ +coverage: + range: "60...95" + status: + project: + default: + threshold: 1% + +comment: + layout: 'diff, files' diff --git a/vendor/gopkg.in/ini.v1/data_source.go b/vendor/gopkg.in/ini.v1/data_source.go index bbedf361..c3a541f1 100644 --- a/vendor/gopkg.in/ini.v1/data_source.go +++ b/vendor/gopkg.in/ini.v1/data_source.go @@ -66,10 +66,10 @@ func parseDataSource(source interface{}) (dataSource, error) { return sourceFile{s}, nil case []byte: return &sourceData{s}, nil - case io.Reader: - return &sourceReadCloser{ioutil.NopCloser(s)}, nil case io.ReadCloser: return &sourceReadCloser{s}, nil + case io.Reader: + return &sourceReadCloser{ioutil.NopCloser(s)}, nil default: return nil, fmt.Errorf("error parsing data source: unknown type %q", s) } diff --git a/vendor/gopkg.in/ini.v1/file.go b/vendor/gopkg.in/ini.v1/file.go index f95606f9..2fcd8de6 100644 --- a/vendor/gopkg.in/ini.v1/file.go +++ b/vendor/gopkg.in/ini.v1/file.go @@ -55,6 +55,9 @@ func newFile(dataSources []dataSource, opts LoadOptions) *File { if len(opts.KeyValueDelimiterOnWrite) == 0 { opts.KeyValueDelimiterOnWrite = "=" } + if len(opts.ChildSectionDelimiter) == 0 { + opts.ChildSectionDelimiter = "." + } return &File{ BlockMode: true, @@ -82,7 +85,7 @@ func (f *File) NewSection(name string) (*Section, error) { return nil, errors.New("empty section name") } - if f.options.Insensitive && name != DefaultSection { + if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { name = strings.ToLower(name) } @@ -144,7 +147,7 @@ func (f *File) SectionsByName(name string) ([]*Section, error) { if len(name) == 0 { name = DefaultSection } - if f.options.Insensitive { + if f.options.Insensitive || f.options.InsensitiveSections { name = strings.ToLower(name) } @@ -236,7 +239,7 @@ func (f *File) DeleteSectionWithIndex(name string, index int) error { if len(name) == 0 { name = DefaultSection } - if f.options.Insensitive { + if f.options.Insensitive || f.options.InsensitiveSections { name = strings.ToLower(name) } @@ -347,7 +350,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { } } - if i > 0 || DefaultHeader { + if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { return nil, err } diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go index 9f28cb31..80ebf3ad 100644 --- a/vendor/gopkg.in/ini.v1/ini.go +++ b/vendor/gopkg.in/ini.v1/ini.go @@ -18,8 +18,10 @@ package ini import ( + "os" "regexp" "runtime" + "strings" ) const ( @@ -55,8 +57,10 @@ var ( DefaultFormatRight = "" ) +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + func init() { - if runtime.GOOS == "windows" { + if runtime.GOOS == "windows" && !inTest { LineBreak = "\r\n" } } @@ -67,6 +71,10 @@ type LoadOptions struct { Loose bool // Insensitive indicates whether the parser forces all section and key names to lowercase. Insensitive bool + // InsensitiveSections indicates whether the parser forces all section to lowercase. + InsensitiveSections bool + // InsensitiveKeys indicates whether the parser forces all key names to lowercase. + InsensitiveKeys bool // IgnoreContinuation indicates whether to ignore continuation lines while parsing. IgnoreContinuation bool // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. @@ -105,6 +113,8 @@ type LoadOptions struct { KeyValueDelimiters string // KeyValueDelimiters is the delimiter that are used to separate key and value output. By default, it is "=". KeyValueDelimiterOnWrite string + // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". + ChildSectionDelimiter string // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). PreserveSurroundedQuote bool // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/gopkg.in/ini.v1/key.go index 3c197410..8baafd9e 100644 --- a/vendor/gopkg.in/ini.v1/key.go +++ b/vendor/gopkg.in/ini.v1/key.go @@ -686,99 +686,127 @@ func (k *Key) StrictTimes(delim string) ([]time.Time, error) { // parseBools transforms strings to bools. func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { vals := make([]bool, 0, len(strs)) - for _, str := range strs { + parser := func(str string) (interface{}, error) { val, err := parseBool(str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(bool)) } } - return vals, nil + return vals, err } // parseFloat64s transforms strings to float64s. func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { vals := make([]float64, 0, len(strs)) - for _, str := range strs { + parser := func(str string) (interface{}, error) { val, err := strconv.ParseFloat(str, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(float64)) } } - return vals, nil + return vals, err } // parseInts transforms strings to ints. func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { vals := make([]int, 0, len(strs)) - for _, str := range strs { - valInt64, err := strconv.ParseInt(str, 0, 64) - val := int(valInt64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, int(val.(int64))) } } - return vals, nil + return vals, err } // parseInt64s transforms strings to int64s. func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { vals := make([]int64, 0, len(strs)) - for _, str := range strs { + parser := func(str string) (interface{}, error) { val, err := strconv.ParseInt(str, 0, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(int64)) } } - return vals, nil + return vals, err } // parseUints transforms strings to uints. func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { vals := make([]uint, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 0, 0) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, uint(val)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, uint(val.(uint64))) } } - return vals, nil + return vals, err } // parseUint64s transforms strings to uint64s. func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { vals := make([]uint64, 0, len(strs)) - for _, str := range strs { + parser := func(str string) (interface{}, error) { val, err := strconv.ParseUint(str, 0, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(uint64)) } } - return vals, nil + return vals, err } + +type Parser func(str string) (interface{}, error) + + // parseTimesFormat transforms strings to times in given format. func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { vals := make([]time.Time, 0, len(strs)) - for _, str := range strs { + parser := func(str string) (interface{}, error) { val, err := time.Parse(format, str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(time.Time)) + } + } + return vals, err +} + + +// doParse transforms strings to different types +func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { + vals := make([]interface{}, 0, len(strs)) + for _, str := range strs { + val, err := parser(str) if err != nil && returnOnInvalid { return nil, err } diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go index f023db59..65147166 100644 --- a/vendor/gopkg.in/ini.v1/parser.go +++ b/vendor/gopkg.in/ini.v1/parser.go @@ -84,7 +84,10 @@ func (p *parser) BOM() error { case mask[0] == 254 && mask[1] == 255: fallthrough case mask[0] == 255 && mask[1] == 254: - p.buf.Read(mask) + _, err = p.buf.Read(mask) + if err != nil { + return err + } case mask[0] == 239 && mask[1] == 187: mask, err := p.buf.Peek(3) if err != nil && err != io.EOF { @@ -93,7 +96,10 @@ func (p *parser) BOM() error { return nil } if mask[2] == 191 { - p.buf.Read(mask) + _, err = p.buf.Read(mask) + if err != nil { + return err + } } } return nil @@ -135,7 +141,7 @@ func readKeyName(delimiters string, in []byte) (string, int, error) { } // Get out key name - endIdx := -1 + var endIdx int if len(keyQuote) > 0 { startIdx := len(keyQuote) // FIXME: fail case -> """"""name"""=value @@ -371,7 +377,7 @@ func (f *File) parse(reader io.Reader) (err error) { // Ignore error because default section name is never empty string. name := DefaultSection - if f.options.Insensitive { + if f.options.Insensitive || f.options.InsensitiveSections { name = strings.ToLower(DefaultSection) } section, _ := f.NewSection(name) @@ -413,7 +419,10 @@ func (f *File) parse(reader io.Reader) (err error) { if f.options.AllowNestedValues && isLastValueEmpty && len(line) > 0 { if line[0] == ' ' || line[0] == '\t' { - lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) + err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) + if err != nil { + return err + } continue } } @@ -460,7 +469,7 @@ func (f *File) parse(reader io.Reader) (err error) { inUnparseableSection = false for i := range f.options.UnparseableSections { if f.options.UnparseableSections[i] == name || - (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { inUnparseableSection = true continue } diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/gopkg.in/ini.v1/section.go index 6ba5ac29..afaa97c9 100644 --- a/vendor/gopkg.in/ini.v1/section.go +++ b/vendor/gopkg.in/ini.v1/section.go @@ -66,7 +66,7 @@ func (s *Section) SetBody(body string) { func (s *Section) NewKey(name, val string) (*Key, error) { if len(name) == 0 { return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive { + } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { name = strings.ToLower(name) } @@ -109,7 +109,7 @@ func (s *Section) GetKey(name string) (*Key, error) { if s.f.BlockMode { s.f.lock.RLock() } - if s.f.options.Insensitive { + if s.f.options.Insensitive || s.f.options.InsensitiveKeys { name = strings.ToLower(name) } key := s.keys[name] @@ -121,7 +121,7 @@ func (s *Section) GetKey(name string) (*Key, error) { // Check if it is a child-section. sname := s.name for { - if i := strings.LastIndex(sname, "."); i > -1 { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { sname = sname[:i] sec, err := s.f.GetSection(sname) if err != nil { @@ -188,7 +188,7 @@ func (s *Section) ParentKeys() []*Key { var parentKeys []*Key sname := s.name for { - if i := strings.LastIndex(sname, "."); i > -1 { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { sname = sname[:i] sec, err := s.f.GetSection(sname) if err != nil { @@ -245,7 +245,7 @@ func (s *Section) DeleteKey(name string) { // For example, "[parent.child1]" and "[parent.child12]" are child sections // of section "[parent]". func (s *Section) ChildSections() []*Section { - prefix := s.name + "." + prefix := s.name + s.f.options.ChildSectionDelimiter children := make([]*Section, 0, 3) for _, name := range s.f.sectionList { if strings.HasPrefix(name, prefix) { diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go index 6b958496..ae5ef4a8 100644 --- a/vendor/gopkg.in/ini.v1/struct.go +++ b/vendor/gopkg.in/ini.v1/struct.go @@ -278,7 +278,9 @@ func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bo return rawName, omitEmpty, allowShadow, allowNonUnique } -func (s *Section) mapToField(val reflect.Value, isStrict bool) error { +// mapToField maps the given value to the matching field of the given section. +// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. +func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) error { if val.Kind() == reflect.Ptr { val = val.Elem() } @@ -307,13 +309,16 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool) error { } if isAnonymous || isStruct || isStructPtr { - if sec, err := s.f.GetSection(fieldName); err == nil { + if secs, err := s.f.SectionsByName(fieldName); err == nil { + if len(secs) <= sectionIndex { + return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) + } // Only set the field to non-nil struct value if we have a section for it. // Otherwise, we end up with a non-nil struct ptr even though there is no data. if isStructPtr && field.IsNil() { field.Set(reflect.New(tpField.Type.Elem())) } - if err = sec.mapToField(field, isStrict); err != nil { + if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex); err != nil { return fmt.Errorf("map to field %q: %v", fieldName, err) } continue @@ -350,9 +355,9 @@ func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) ( } typ := val.Type().Elem() - for _, sec := range secs { + for i, sec := range secs { elem := reflect.New(typ) - if err = sec.mapToField(elem, isStrict); err != nil { + if err = sec.mapToField(elem, isStrict, i); err != nil { return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) } @@ -382,7 +387,7 @@ func (s *Section) mapTo(v interface{}, isStrict bool) error { return nil } - return s.mapToField(val, isStrict) + return s.mapToField(val, isStrict, 0) } // MapTo maps section to given struct. @@ -474,7 +479,7 @@ func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, all _ = keyWithShadows.AddShadow(val) } } - key = keyWithShadows + *key = *keyWithShadows return nil } @@ -564,6 +569,10 @@ func (s *Section) reflectFrom(val reflect.Value) error { typ := val.Type() for i := 0; i < typ.NumField(); i++ { + if !val.Field(i).CanInterface() { + continue + } + field := val.Field(i) tpField := typ.Field(i) @@ -695,7 +704,6 @@ func (s *Section) ReflectFrom(v interface{}) error { } if typ.Kind() == reflect.Ptr { - typ = typ.Elem() val = val.Elem() } else { return errors.New("not a pointer to a struct") diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go index 1f7e87e6..d2c2308f 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -86,6 +86,7 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, } } diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml index 04d4dae0..a130fe88 100644 --- a/vendor/gopkg.in/yaml.v3/.travis.yml +++ b/vendor/gopkg.in/yaml.v3/.travis.yml @@ -11,6 +11,7 @@ go: - "1.11.x" - "1.12.x" - "1.13.x" + - "1.14.x" - "tip" go_import_path: gopkg.in/yaml.v3 diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go index 65846e67..ae7d049f 100644 --- a/vendor/gopkg.in/yaml.v3/apic.go +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -108,6 +108,7 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, } } diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go index be63169b..21c0dacf 100644 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -35,6 +35,7 @@ type parser struct { doc *Node anchors map[string]*Node doneInit bool + textless bool } func newParser(b []byte) *parser { @@ -108,14 +109,18 @@ func (p *parser) peek() yaml_event_type_t { func (p *parser) fail() { var where string var line int - if p.parser.problem_mark.line != 0 { + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line // Scanner errors don't iterate line before returning error if p.parser.error == yaml_SCANNER_ERROR { line++ } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line } if line != 0 { where = "line " + strconv.Itoa(line) + ": " @@ -169,17 +174,20 @@ func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { } else if kind == ScalarNode { tag, _ = resolve("", value) } - return &Node{ - Kind: kind, - Tag: tag, - Value: value, - Style: style, - Line: p.event.start_mark.line + 1, - Column: p.event.start_mark.column + 1, - HeadComment: string(p.event.head_comment), - LineComment: string(p.event.line_comment), - FootComment: string(p.event.foot_comment), + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n } func (p *parser) parseChild(parent *Node) *Node { @@ -391,7 +399,7 @@ func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good // // If n holds a null value, prepare returns before doing anything. func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.ShortTag() == nullTag { + if n.ShortTag() == nullTag || n.Kind == 0 && n.IsZero() { return out, false, false } again := true @@ -497,8 +505,13 @@ func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { good = d.mapping(n, out) case SequenceNode: good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough default: - panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind))) + failf("cannot decode node with unknown kind %d", n.Kind) } return good } @@ -533,6 +546,17 @@ func resetMap(out reflect.Value) { } } +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + func (d *decoder) scalar(n *Node, out reflect.Value) bool { var tag string var resolved interface{} @@ -550,14 +574,7 @@ func (d *decoder) scalar(n *Node, out reflect.Value) bool { } } if resolved == nil { - if out.CanAddr() { - switch out.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - out.Set(reflect.Zero(out.Type())) - return true - } - } - return false + return d.null(out) } if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { // We've resolved to exactly the type we want, so use that. diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go index ab2a0661..c29217ef 100644 --- a/vendor/gopkg.in/yaml.v3/emitterc.go +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -235,10 +235,13 @@ func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool emitter.indent = 0 } } else if !indentless { - emitter.indent += emitter.best_indent - // [Go] If inside a block sequence item, discount the space taken by the indicator. - if emitter.best_indent > 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { - emitter.indent -= 2 + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) } } return true @@ -725,16 +728,9 @@ func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_e // Expect a block item node. func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { - // [Go] The original logic here would not indent the sequence when inside a mapping. - // In Go we always indent it, but take the sequence indicator out of the indentation. - indentless := emitter.best_indent == 2 && emitter.mapping_context && (emitter.column == 0 || !emitter.indention) - original := emitter.indent - if !yaml_emitter_increase_indent(emitter, false, indentless) { + if !yaml_emitter_increase_indent(emitter, false, false) { return false } - if emitter.indent > original+2 { - emitter.indent -= 2 - } } if event.typ == yaml_SEQUENCE_END_EVENT { emitter.indent = emitter.indents[len(emitter.indents)-1] @@ -785,6 +781,13 @@ func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_ev if !yaml_emitter_write_indent(emitter) { return false } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } if yaml_emitter_check_simple_key(emitter) { emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, true) @@ -810,6 +813,29 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ return false } } + if len(emitter.key_line_comment) > 0 { + // [Go] A line comment was previously provided for the key. Handle it before + // the value so the inline comments are placed correctly. + if yaml_emitter_silent_nil_event(emitter, event) && len(emitter.line_comment) == 0 { + // Nothing other than the line comment will be written on the line. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } else { + // An actual value is coming, so emit the comment line. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + // Indent in unless it's a block that will reindent anyway. + if event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || (event.typ != yaml_MAPPING_START_EVENT && event.typ != yaml_SEQUENCE_START_EVENT) { + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { return false @@ -823,6 +849,10 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ return true } +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + // Expect a node. func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, root bool, sequence bool, mapping bool, simple_key bool) bool { diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go index 1f37271c..45e8d1e1 100644 --- a/vendor/gopkg.in/yaml.v3/encode.go +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -119,6 +119,9 @@ func (e *encoder) marshal(tag string, in reflect.Value) { case *Node: e.nodev(in) return + case Node: + e.nodev(in.Addr()) + return case time.Time: e.timev(tag, in) return @@ -422,18 +425,23 @@ func (e *encoder) nodev(in reflect.Value) { } func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + // If the tag was not explicitly requested, and dropping it won't change the // implicit tag of the value, don't include it in the presentation. var tag = node.Tag var stag = shortTag(tag) - var rtag string var forceQuoting bool if tag != "" && node.Style&TaggedStyle == 0 { if node.Kind == ScalarNode { if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { tag = "" } else { - rtag, _ = resolve("", node.Value) + rtag, _ := resolve("", node.Value) if rtag == stag { tag = "" } else if stag == strTag { @@ -442,6 +450,7 @@ func (e *encoder) node(node *Node, tail string) { } } } else { + var rtag string switch node.Kind { case MappingNode: rtag = mapTag @@ -471,7 +480,7 @@ func (e *encoder) node(node *Node, tail string) { if node.Style&FlowStyle != 0 { style = yaml_FLOW_SEQUENCE_STYLE } - e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)) + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) e.event.head_comment = []byte(node.HeadComment) e.emit() for _, node := range node.Content { @@ -487,7 +496,7 @@ func (e *encoder) node(node *Node, tail string) { if node.Style&FlowStyle != 0 { style = yaml_FLOW_MAPPING_STYLE } - yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style) + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) e.event.tail_comment = []byte(tail) e.event.head_comment = []byte(node.HeadComment) e.emit() @@ -528,11 +537,11 @@ func (e *encoder) node(node *Node, tail string) { case ScalarNode: value := node.Value if !utf8.ValidString(value) { - if tag == binaryTag { + if stag == binaryTag { failf("explicitly tagged !!binary data must be base64-encoded") } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) } // It can't be encoded directly as YAML so use a binary tag // and encode it as base64. @@ -557,5 +566,7 @@ func (e *encoder) node(node *Node, tail string) { } e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) } } diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go index aea9050b..ac66fccc 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -648,6 +648,10 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } return true } if len(anchor) > 0 || len(tag) > 0 { @@ -694,25 +698,13 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark - prior_head := len(parser.head_comment) + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false } - if prior_head > 0 && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - // [Go] It's a sequence under a sequence entry, so the former head comment - // is for the list itself, not the first list item under it. - parser.stem_comment = parser.head_comment[:prior_head] - if len(parser.head_comment) == prior_head { - parser.head_comment = nil - } else { - // Copy suffix to prevent very strange bugs if someone ever appends - // further bytes to the prefix in the stem_comment slice above. - parser.head_comment = append([]byte(nil), parser.head_comment[prior_head+1:]...) - } - - } if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) return yaml_parser_parse_node(parser, event, true, false) @@ -754,7 +746,9 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false @@ -780,6 +774,32 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y return true } +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // ******************* diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go index 57e954ca..d9a539c3 100644 --- a/vendor/gopkg.in/yaml.v3/scannerc.go +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -749,6 +749,11 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { if !ok { return } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } if !yaml_parser_scan_line_comment(parser, comment_mark) { ok = false return @@ -2856,13 +2861,12 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t return false } skip_line(parser) - } else { - if parser.mark.index >= seen { - if len(text) == 0 { - start_mark = parser.mark - } - text = append(text, parser.buffer[parser.buffer_pos]) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark } + text = read(parser, text) + } else { skip(parser) } } @@ -2999,10 +3003,9 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo return false } skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) } else { - if parser.mark.index >= seen { - text = append(text, parser.buffer[parser.buffer_pos]) - } skip(parser) } } diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go index b5d35a50..56e8a849 100644 --- a/vendor/gopkg.in/yaml.v3/yaml.go +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -89,7 +89,7 @@ func Unmarshal(in []byte, out interface{}) (err error) { return unmarshal(in, out, false) } -// A Decorder reads and decodes YAML values from an input stream. +// A Decoder reads and decodes YAML values from an input stream. type Decoder struct { parser *parser knownFields bool @@ -194,7 +194,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // Zero valued structs will be omitted if all their public // fields are zero, unless they implement an IsZero // method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. +// case the field will be excluded if IsZero returns true. // // flow Marshal using a flow style (useful for structs, // sequences and maps). @@ -252,6 +252,24 @@ func (e *Encoder) Encode(v interface{}) (err error) { return nil } +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + // SetIndent changes the used indentation used when encoding. func (e *Encoder) SetIndent(spaces int) { if spaces < 0 { @@ -328,6 +346,12 @@ const ( // and maps, Node is an intermediate representation that allows detailed // control over the content being decoded or encoded. // +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// // Values that make use of the Node type interact with the yaml package in the // same way any other type would do, by encoding and decoding yaml data // directly or indirectly into them. @@ -391,6 +415,13 @@ type Node struct { Column int } +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + + // LongTag returns the long form of the tag that indicates the data type for // the node. If the Tag field isn't explicitly defined, one will be computed // based on the node properties. diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go index 2719cfbb..7c6d0077 100644 --- a/vendor/gopkg.in/yaml.v3/yamlh.go +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -787,6 +787,8 @@ type yaml_emitter_t struct { foot_comment []byte tail_comment []byte + key_line_comment []byte + // Dumper stuff opened bool // If the stream was already opened? diff --git a/vendor/modules.txt b/vendor/modules.txt index e95b3de7..fb15eaa8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -41,7 +41,7 @@ github.com/dyatlov/go-opengraph/opengraph github.com/francoispqt/gojay # github.com/fsnotify/fsnotify v1.4.9 github.com/fsnotify/fsnotify -# github.com/go-asn1-ber/asn1-ber v1.4.1 +# github.com/go-asn1-ber/asn1-ber v1.5.1 github.com/go-asn1-ber/asn1-ber # github.com/go-telegram-bot-api/telegram-bot-api v1.0.1-0.20200524105306-7434b0456e81 github.com/go-telegram-bot-api/telegram-bot-api @@ -65,6 +65,10 @@ github.com/gopackage/ddp github.com/gorilla/schema # github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket +# github.com/hashicorp/errwrap v1.0.0 +github.com/hashicorp/errwrap +# github.com/hashicorp/go-multierror v1.1.0 +github.com/hashicorp/go-multierror # github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru @@ -87,8 +91,6 @@ github.com/keybase/go-keybase-chat-bot/kbchat/types/chat1 github.com/keybase/go-keybase-chat-bot/kbchat/types/gregor1 github.com/keybase/go-keybase-chat-bot/kbchat/types/keybase1 github.com/keybase/go-keybase-chat-bot/kbchat/types/stellar1 -# github.com/konsorten/go-windows-terminal-sequences v1.0.3 -github.com/konsorten/go-windows-terminal-sequences # github.com/labstack/echo/v4 v4.1.17 github.com/labstack/echo/v4 github.com/labstack/echo/v4/middleware @@ -124,7 +126,11 @@ github.com/mattermost/go-i18n/i18n/language github.com/mattermost/go-i18n/i18n/translation # github.com/mattermost/ldap v0.0.0-20191128190019-9f62ba4b8d4d github.com/mattermost/ldap -# github.com/mattermost/mattermost-server/v5 v5.25.2 +# github.com/mattermost/logr v1.0.13 +github.com/mattermost/logr +github.com/mattermost/logr/format +github.com/mattermost/logr/target +# github.com/mattermost/mattermost-server/v5 v5.28.1 github.com/mattermost/mattermost-server/v5/mlog github.com/mattermost/mattermost-server/v5/model github.com/mattermost/mattermost-server/v5/services/timezones @@ -142,7 +148,7 @@ github.com/mattn/godown github.com/mgutz/ansi # github.com/missdeer/golib v1.0.3 github.com/missdeer/golib/ic -# github.com/mitchellh/mapstructure v1.2.3 +# github.com/mitchellh/mapstructure v1.3.3 github.com/mitchellh/mapstructure # github.com/monaco-io/request v1.0.4 github.com/monaco-io/request @@ -155,10 +161,12 @@ github.com/nelsonken/gomf # github.com/paulrosania/go-charset v0.0.0-20190326053356-55c9d7a5834c github.com/paulrosania/go-charset/charset github.com/paulrosania/go-charset/data -# github.com/pborman/uuid v1.2.0 +# github.com/pborman/uuid v1.2.1 github.com/pborman/uuid -# github.com/pelletier/go-toml v1.7.0 +# github.com/pelletier/go-toml v1.8.0 github.com/pelletier/go-toml +# github.com/philhofer/fwd v1.0.0 +github.com/philhofer/fwd # github.com/pkg/errors v0.9.1 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 @@ -179,18 +187,18 @@ github.com/shazow/rateio github.com/shazow/ssh-chat/internal/sanitize github.com/shazow/ssh-chat/sshd github.com/shazow/ssh-chat/sshd/terminal -# github.com/sirupsen/logrus v1.6.0 +# github.com/sirupsen/logrus v1.7.0 github.com/sirupsen/logrus # github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 github.com/skip2/go-qrcode github.com/skip2/go-qrcode/bitset github.com/skip2/go-qrcode/reedsolomon -# github.com/slack-go/slack v0.7.0 +# github.com/slack-go/slack v0.7.2 github.com/slack-go/slack github.com/slack-go/slack/internal/errorsx github.com/slack-go/slack/internal/timex github.com/slack-go/slack/slackutilsx -# github.com/spf13/afero v1.2.2 +# github.com/spf13/afero v1.3.4 github.com/spf13/afero github.com/spf13/afero/mem # github.com/spf13/cast v1.3.1 @@ -209,12 +217,22 @@ github.com/stretchr/testify/suite github.com/subosito/gotenv # github.com/technoweenie/multipartstreamer v1.0.1 github.com/technoweenie/multipartstreamer +# github.com/tinylib/msgp v1.1.2 +github.com/tinylib/msgp/msgp # github.com/valyala/bytebufferpool v1.0.0 github.com/valyala/bytebufferpool # github.com/valyala/fasttemplate v1.2.1 github.com/valyala/fasttemplate # github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 github.com/vincent-petithory/dataurl +# github.com/wiggin77/cfg v1.0.2 +github.com/wiggin77/cfg +github.com/wiggin77/cfg/ini +github.com/wiggin77/cfg/timeconv +# github.com/wiggin77/merror v1.0.2 +github.com/wiggin77/merror +# github.com/wiggin77/srslog v1.0.1 +github.com/wiggin77/srslog # github.com/writeas/go-strip-markdown v2.0.1+incompatible github.com/writeas/go-strip-markdown # github.com/yaegashi/msgraph.go v0.1.4 @@ -251,7 +269,7 @@ golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/terminal -# golang.org/x/image v0.0.0-20200801110659-972c09e46d76 +# golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 golang.org/x/image/riff golang.org/x/image/vp8 golang.org/x/image/vp8l @@ -334,15 +352,15 @@ google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb -# gopkg.in/ini.v1 v1.55.0 +# gopkg.in/ini.v1 v1.60.0 gopkg.in/ini.v1 # gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/natefinch/lumberjack.v2 # gopkg.in/olahol/melody.v1 v1.0.0-20170518105555-d52139073376 gopkg.in/olahol/melody.v1 -# gopkg.in/yaml.v2 v2.2.8 +# gopkg.in/yaml.v2 v2.3.0 gopkg.in/yaml.v2 -# gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c +# gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 gopkg.in/yaml.v3 # layeh.com/gumble v0.0.0-20200818122324-146f9205029b layeh.com/gumble/gumble