5
0
mirror of https://github.com/cwinfo/matterbridge.git synced 2024-11-21 21:40:27 +00:00

Bump github.com/mattermost/mattermost-server/v6 from 6.1.0 to 6.3.0 (#1686)

Bumps [github.com/mattermost/mattermost-server/v6](https://github.com/mattermost/mattermost-server) from 6.1.0 to 6.3.0.
- [Release notes](https://github.com/mattermost/mattermost-server/releases)
- [Changelog](https://github.com/mattermost/mattermost-server/blob/master/CHANGELOG.md)
- [Commits](https://github.com/mattermost/mattermost-server/compare/v6.1.0...v6.3.0)

---
updated-dependencies:
- dependency-name: github.com/mattermost/mattermost-server/v6
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2022-01-18 20:24:14 +01:00 committed by GitHub
parent fecca57507
commit aad60c882e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
250 changed files with 43143 additions and 11479 deletions

17
go.mod
View File

@ -30,7 +30,7 @@ require (
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba
github.com/matterbridge/matterclient v0.0.0-20211107234719-faca3cd42315
github.com/mattermost/mattermost-server/v5 v5.39.3
github.com/mattermost/mattermost-server/v6 v6.1.0
github.com/mattermost/mattermost-server/v6 v6.3.0
github.com/mattn/godown v0.0.1
github.com/missdeer/golib v1.0.4
github.com/nelsonken/gomf v0.0.0-20180504123937-a9dd2f9deae9
@ -73,6 +73,7 @@ require (
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kettek/apng v0.0.0-20191108220231-414630eed80f // indirect
github.com/klauspost/compress v1.13.6 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/labstack/gommon v0.3.0 // indirect
github.com/magiconair/properties v1.8.5 // indirect
@ -80,12 +81,12 @@ require (
github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d // indirect
github.com/mattermost/logr v1.0.13 // indirect
github.com/mattermost/logr/v2 v2.0.15 // indirect
github.com/mattn/go-colorable v0.1.11 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/minio-go/v7 v7.0.14 // indirect
github.com/minio/minio-go/v7 v7.0.16 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.4.2 // indirect
@ -113,21 +114,23 @@ require (
github.com/tinylib/msgp v1.1.6 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.1 // indirect
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/wiggin77/cfg v1.0.2 // indirect
github.com/wiggin77/merror v1.0.3 // indirect
github.com/wiggin77/srslog v1.0.1 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.7.0 // indirect
go.uber.org/zap v1.17.0 // indirect
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect
golang.org/x/net v0.0.0-20211006190231-62292e806868 // indirect
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac // indirect
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 // indirect
golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9 // indirect
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/ini.v1 v1.63.2 // indirect
gopkg.in/ini.v1 v1.64.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect

612
go.sum

File diff suppressed because it is too large Load Diff

304
vendor/github.com/klauspost/compress/LICENSE generated vendored Normal file
View File

@ -0,0 +1,304 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Copyright (c) 2019 Klaus Post. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------------------
Files: gzhttp/*
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016-2017 The New York Times Company
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------
Files: s2/cmd/internal/readahead/*
The MIT License (MIT)
Copyright (c) 2015 Klaus Post
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---------------------
Files: snappy/*
Files: internal/snapref/*
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----------------
Files: s2/cmd/internal/filepathx/*
Copyright 2016 The filepathx Authors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

15
vendor/github.com/klauspost/compress/s2/.gitignore generated vendored Normal file
View File

@ -0,0 +1,15 @@
testdata/bench
# These explicitly listed benchmark data files are for an obsolete version of
# snappy_test.go.
testdata/alice29.txt
testdata/asyoulik.txt
testdata/fireworks.jpeg
testdata/geo.protodata
testdata/html
testdata/html_x_4
testdata/kppkn.gtb
testdata/lcet10.txt
testdata/paper-100k.pdf
testdata/plrabn12.txt
testdata/urls.10K

28
vendor/github.com/klauspost/compress/s2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Copyright (c) 2019 Klaus Post. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

717
vendor/github.com/klauspost/compress/s2/README.md generated vendored Normal file
View File

@ -0,0 +1,717 @@
# S2 Compression
S2 is an extension of [Snappy](https://github.com/google/snappy).
S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads.
Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy.
This means that S2 can seamlessly replace Snappy without converting compressed content.
S2 can produce Snappy compatible output, faster and better than Snappy.
If you want full benefit of the changes you should use s2 without Snappy compatibility.
S2 is designed to have high throughput on content that cannot be compressed.
This is important, so you don't have to worry about spending CPU cycles on already compressed data.
## Benefits over Snappy
* Better compression
* Adjustable compression (3 levels)
* Concurrent stream compression
* Faster decompression, even for Snappy compatible content
* Ability to quickly skip forward in compressed stream
* Compatible with reading Snappy compressed content
* Smaller block size overhead on incompressible blocks
* Block concatenation
* Uncompressed stream mode
* Automatic stream size padding
* Snappy compatible block compression
## Drawbacks over Snappy
* Not optimized for 32 bit systems.
* Streams use slightly more memory due to larger blocks and concurrency (configurable).
# Usage
Installation: `go get -u github.com/klauspost/compress/s2`
Full package documentation:
[![godoc][1]][2]
[1]: https://godoc.org/github.com/klauspost/compress?status.svg
[2]: https://godoc.org/github.com/klauspost/compress/s2
## Compression
```Go
func EncodeStream(src io.Reader, dst io.Writer) error {
enc := s2.NewWriter(dst)
_, err := io.Copy(enc, src)
if err != nil {
enc.Close()
return err
}
// Blocks until compression is done.
return enc.Close()
}
```
You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete.
For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method.
The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2.
It is possible to flush any buffered data using the `Flush()` method.
This will block until all data sent to the encoder has been written to the output.
S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader.
As a final method to compress data, if you have a single block of data you would like to have encoded as a stream,
a slightly more efficient method is to use the `EncodeBuffer` method.
This will take ownership of the buffer until the stream is closed.
```Go
func EncodeStream(src []byte, dst io.Writer) error {
enc := s2.NewWriter(dst)
// The encoder owns the buffer until Flush or Close is called.
err := enc.EncodeBuffer(buf)
if err != nil {
enc.Close()
return err
}
// Blocks until compression is done.
return enc.Close()
}
```
Each call to `EncodeBuffer` will result in discrete blocks being created without buffering,
so it should only be used a single time per stream.
If you need to write several blocks, you should use the regular io.Writer interface.
## Decompression
```Go
func DecodeStream(src io.Reader, dst io.Writer) error {
dec := s2.NewReader(src)
_, err := io.Copy(dst, dec)
return err
}
```
Similar to the Writer, a Reader can be reused using the `Reset` method.
For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available.
However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed.
For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`.
Do however note that these functions (similar to Snappy) does not provide validation of data,
so data corruption may be undetected. Stream encoding provides CRC checks of data.
It is possible to efficiently skip forward in a compressed stream using the `Skip()` method.
For big skips the decompressor is able to skip blocks without decompressing them.
## Single Blocks
Similar to Snappy S2 offers single block compression.
Blocks do not offer the same flexibility and safety as streams,
but may be preferable for very small payloads, less than 100K.
Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result.
It is possible to provide a destination buffer.
If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used.
If not a new will be allocated.
Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression.
Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`.
Again an optional destination buffer can be supplied.
The `s2.DecodedLen(src)` can be used to get the minimum capacity needed.
If that is not satisfied a new buffer will be allocated.
Block function always operate on a single goroutine since it should only be used for small payloads.
# Commandline tools
Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression.
Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases).
Installing then requires Go to be installed. To install them, use:
`go install github.com/klauspost/compress/s2/cmd/s2c && go install github.com/klauspost/compress/s2/cmd/s2d`
To build binaries to the current folder use:
`go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d`
## s2c
```
Usage: s2c [options] file1 file2
Compresses all files supplied as input separately.
Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'.
By default output files will be overwritten.
Use - as the only file name to read from stdin and write to stdout.
Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
File names beginning with 'http://' and 'https://' will be downloaded and compressed.
Only http response code 200 is accepted.
Options:
-bench int
Run benchmark n times. No output will be written
-blocksize string
Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M")
-c Write all output to stdout. Multiple input files will be concatenated
-cpu int
Compress using this amount of threads (default 32)
-faster
Compress faster, but with a minor compression loss
-help
Display help
-o string
Write output to another file. Single input file only
-pad string
Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1")
-q Don't write any output to terminal, except errors
-rm
Delete source file(s) after successful compression
-safe
Do not overwrite output files
-slower
Compress more, but a lot slower
-snappy
Generate Snappy compatible output stream
-verify
Verify written files
```
## s2d
```
Usage: s2d [options] file1 file2
Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'.
Output file names have the extension removed. By default output files will be overwritten.
Use - as the only file name to read from stdin and write to stdout.
Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
File names beginning with 'http://' and 'https://' will be downloaded and decompressed.
Extensions on downloaded files are ignored. Only http response code 200 is accepted.
Options:
-bench int
Run benchmark n times. No output will be written
-c Write all output to stdout. Multiple input files will be concatenated
-help
Display help
-o string
Write output to another file. Single input file only
-q Don't write any output to terminal, except errors
-rm
Delete source file(s) after successful decompression
-safe
Do not overwrite output files
-verify
Verify files, but do not write output
```
## s2sx: self-extracting archives
s2sx allows creating self-extracting archives with no dependencies.
By default, executables are created for the same platforms as the host os,
but this can be overridden with `-os` and `-arch` parameters.
Extracted files have 0666 permissions, except when untar option used.
```
Usage: s2sx [options] file1 file2
Compresses all files supplied as input separately.
If files have '.s2' extension they are assumed to be compressed already.
Output files are written as 'filename.s2sx' and with '.exe' for windows targets.
If output is big, an additional file with ".more" is written. This must be included as well.
By default output files will be overwritten.
Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
Options:
-arch string
Destination architecture (default "amd64")
-c Write all output to stdout. Multiple input files will be concatenated
-cpu int
Compress using this amount of threads (default 32)
-help
Display help
-max string
Maximum executable size. Rest will be written to another file. (default "1G")
-os string
Destination operating system (default "windows")
-q Don't write any output to terminal, except errors
-rm
Delete source file(s) after successful compression
-safe
Do not overwrite output files
-untar
Untar on destination
```
Available platforms are:
* darwin-amd64
* darwin-arm64
* linux-amd64
* linux-arm
* linux-arm64
* linux-mips64
* linux-ppc64le
* windows-386
* windows-amd64
By default, there is a size limit of 1GB for the output executable.
When this is exceeded the remaining file content is written to a file called
output+`.more`. This file must be included for a successful extraction and
placed alongside the executable for a successful extraction.
This file *must* have the same name as the executable, so if the executable is renamed,
so must the `.more` file.
This functionality is disabled with stdin/stdout.
### Self-extracting TAR files
If you wrap a TAR file you can specify `-untar` to make it untar on the destination host.
Files are extracted to the current folder with the path specified in the tar file.
Note that tar files are not validated before they are wrapped.
For security reasons files that move below the root folder are not allowed.
# Performance
This section will focus on comparisons to Snappy.
This package is solely aimed at replacing Snappy as a high speed compression package.
If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd)
gives better compression, but typically at speeds slightly below "better" mode in this package.
Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation.
Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput.
A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain.
The content compressed in this mode is fully compatible with the standard decoder.
Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU):
| File | S2 speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller |
|-----------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------|
| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 12.70x | 10556 MB/s | 7.35% | 4.15x | 3455 MB/s | 12.79% |
| (1 CPU) | 1.14x | 948 MB/s | - | 0.42x | 349 MB/s | - |
| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 17.13x | 14484 MB/s | 31.60% | 10.09x | 8533 MB/s | 37.71% |
| (1 CPU) | 1.33x | 1127 MB/s | - | 0.70x | 589 MB/s | - |
| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12000 MB/s | -5.79% | 6.59x | 5223 MB/s | 5.80% |
| (1 CPU) | 1.11x | 877 MB/s | - | 0.47x | 370 MB/s | - |
| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 14.62x | 12116 MB/s | 15.90% | 5.35x | 4430 MB/s | 16.08% |
| (1 CPU) | 1.38x | 1146 MB/s | - | 0.38x | 312 MB/s | - |
| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 8.83x | 17579 MB/s | 43.86% | 6.54x | 13011 MB/s | 47.23% |
| (1 CPU) | 1.14x | 2259 MB/s | - | 0.74x | 1475 MB/s | - |
| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 16.72x | 14019 MB/s | 24.02% | 10.11x | 8477 MB/s | 30.48% |
| (1 CPU) | 1.24x | 1043 MB/s | - | 0.70x | 586 MB/s | - |
| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9254 MB/s | 1.84% | 6.75x | 4686 MB/s | 6.72% |
| (1 CPU) | 0.97x | 672 MB/s | - | 0.53x | 366 MB/s | - |
| sharnd.out.2gb | 2.11x | 12639 MB/s | 0.01% | 1.98x | 11833 MB/s | 0.01% |
| (1 CPU) | 0.93x | 5594 MB/s | - | 1.34x | 8030 MB/s | - |
| [enwik9](http://mattmahoney.net/dc/textdata.html) | 19.34x | 8220 MB/s | 3.98% | 7.87x | 3345 MB/s | 15.82% |
| (1 CPU) | 1.06x | 452 MB/s | - | 0.50x | 213 MB/s | - |
| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 10.48x | 6124 MB/s | 5.67% | 3.76x | 2197 MB/s | 12.60% |
| (1 CPU) | 0.97x | 568 MB/s | - | 0.46x | 271 MB/s | - |
| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 21.07x | 9020 MB/s | 6.36% | 6.91x | 2959 MB/s | 16.95% |
| (1 CPU) | 1.07x | 460 MB/s | - | 0.51x | 220 MB/s | - |
### Legend
* `S2 speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core.
* `S2 throughput`: Throughput of S2 in MB/s.
* `S2 % smaller`: How many percent of the Snappy output size is S2 better.
* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy.
* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy.
* `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression.
There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads.
Machine generated data gets by far the biggest compression boost, with size being being reduced by up to 45% of Snappy size.
The "better" compression mode sees a good improvement in all cases, but usually at a performance cost.
Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup.
This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above).
## Decompression
S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used.
S2 vs Snappy **decompression** speed. Both operating on single core:
| File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy |
|-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------|
| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x |
| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x |
| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x |
| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x |
| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x |
| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x |
| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x |
| sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x |
| [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x |
| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x |
| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x |
### Legend
* `S2 Throughput`: Decompression speed of S2 encoded content.
* `Better Throughput`: Decompression speed of S2 "better" encoded content.
* `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed.
While the decompression code hasn't changed, there is a significant speedup in decompression speed.
S2 prefers longer matches and will typically only find matches that are 6 bytes or longer.
While this reduces compression a bit, it improves decompression speed.
The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy.
Without assembly decompression is also very fast; single goroutine decompression speed. No assembly:
| File | S2 Throughput | S2 throughput |
|--------------------------------|--------------|---------------|
| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s |
| 10gb.tar.s2 | 1.30x | 867.07 MB/s |
| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s |
| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s |
| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s |
| enwik9.s2 | 1.67x | 681.53 MB/s |
| adresser.json.s2 | 3.41x | 4230.53 MB/s |
| silesia.tar.s2 | 1.52x | 811.58 |
Even though S2 typically compresses better than Snappy, decompression speed is always better.
## Block compression
When compressing blocks no concurrent compression is performed just as Snappy.
This is because blocks are for smaller payloads and generally will not benefit from concurrent compression.
An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input.
In rare, worst case scenario Snappy blocks could be significantly bigger than the input.
### Mixed content blocks
The most reliable is a wide dataset.
For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
53927 files, total input size: 4,014,735,833 bytes. Single goroutine used.
| * | Input | Output | Reduction | MB/s |
|-------------------|------------|------------|-----------|--------|
| S2 | 4014735833 | 1059723369 | 73.60% | **934.34** |
| S2 Better | 4014735833 | 969670507 | 75.85% | 532.70 |
| S2 Best | 4014735833 | 906625668 | **77.85%** | 46.84 |
| Snappy | 4014735833 | 1128706759 | 71.89% | 762.59 |
| S2, Snappy Output | 4014735833 | 1093821420 | 72.75% | 908.60 |
| LZ4 | 4014735833 | 1079259294 | 73.12% | 526.94 |
S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best".
"Better" mode provides the same compression speed as LZ4 with better compression ratio.
When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression.
As can be seen from the other benchmarks decompression should also be easier on the S2 generated output.
Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for
other Go compressors:
| * | Input | Output | Reduction | MB/s |
|-------------------|------------|------------|-----------|--------|
| Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 |
| Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 |
| Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 |
| Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 |
### Standard block compression
Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns.
So individual benchmarks should only be seen as a guideline and the overall picture is more important.
These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above.
Block compression. Parallel benchmark running on 16 cores, 16 goroutines.
AMD64 assembly is use for both S2 and Snappy.
| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec |
|-----------------------|-------------|---------|--------------|-------------|-------------|-------------|
| html | 22843 | 21111 | 16246 MB/s | 17438 MB/s | 40972 MB/s | 49263 MB/s |
| urls.10K | 335492 | 287326 | 7943 MB/s | 9693 MB/s | 22523 MB/s | 26484 MB/s |
| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 273889 MB/s | 718321 MB/s | 827552 MB/s |
| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 17773 MB/s | 33691 MB/s | 52421 MB/s |
| paper-100k.pdf | 85304 | 84459 | 167546 MB/s | 101263 MB/s | 326905 MB/s | 291944 MB/s |
| html_x_4 | 92234 | 21113 | 15194 MB/s | 50670 MB/s | 30843 MB/s | 32217 MB/s |
| alice29.txt | 88034 | 85975 | 5936 MB/s | 6139 MB/s | 12882 MB/s | 20044 MB/s |
| asyoulik.txt | 77503 | 79650 | 5517 MB/s | 6366 MB/s | 12735 MB/s | 22806 MB/s |
| lcet10.txt | 234661 | 220670 | 6235 MB/s | 6067 MB/s | 14519 MB/s | 18697 MB/s |
| plrabn12.txt | 319267 | 317985 | 5159 MB/s | 5726 MB/s | 11923 MB/s | 19901 MB/s |
| geo.protodata | 23335 | 18690 | 21220 MB/s | 26529 MB/s | 56271 MB/s | 62540 MB/s |
| kppkn.gtb | 69526 | 65312 | 9732 MB/s | 8559 MB/s | 18491 MB/s | 18969 MB/s |
| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 15489 MB/s | 31883 MB/s | 38874 MB/s |
| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13000 MB/s | 48056 MB/s | 52341 MB/s |
| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12806 MB/s | 32378 MB/s | 46322 MB/s |
| alice29.txt (20000B) | 12686 | 13574 | 7733 MB/s | 11210 MB/s | 30566 MB/s | 58969 MB/s |
| Relative Perf | Snappy size | S2 size improved | S2 Speed | S2 Dec Speed |
|-----------------------|-------------|------------------|----------|--------------|
| html | 22.31% | 7.58% | 1.07x | 1.20x |
| urls.10K | 47.78% | 14.36% | 1.22x | 1.18x |
| fireworks.jpeg | 99.95% | -0.05% | 0.78x | 1.15x |
| fireworks.jpeg (200B) | 73.00% | -6.16% | 2.00x | 1.56x |
| paper-100k.pdf | 83.30% | 0.99% | 0.60x | 0.89x |
| html_x_4 | 22.52% | 77.11% | 3.33x | 1.04x |
| alice29.txt | 57.88% | 2.34% | 1.03x | 1.56x |
| asyoulik.txt | 61.91% | -2.77% | 1.15x | 1.79x |
| lcet10.txt | 54.99% | 5.96% | 0.97x | 1.29x |
| plrabn12.txt | 66.26% | 0.40% | 1.11x | 1.67x |
| geo.protodata | 19.68% | 19.91% | 1.25x | 1.11x |
| kppkn.gtb | 37.72% | 6.06% | 0.88x | 1.03x |
| alice29.txt (128B) | 62.50% | -2.50% | 2.31x | 1.22x |
| alice29.txt (1000B) | 77.40% | 0.00% | 1.07x | 1.09x |
| alice29.txt (10000B) | 66.48% | -4.29% | 1.27x | 1.43x |
| alice29.txt (20000B) | 63.43% | -7.00% | 1.45x | 1.93x |
Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size.
Decompression speed is better than Snappy, except in one case.
Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline.
Size is on average around Snappy, but varies on content type.
In cases where compression is worse, it usually is compensated by a speed boost.
### Better compression
Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns.
So individual benchmarks should only be seen as a guideline and the overall picture is more important.
| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec |
|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------|
| html | 22843 | 19833 | 16246 MB/s | 7731 MB/s | 40972 MB/s | 40292 MB/s |
| urls.10K | 335492 | 253529 | 7943 MB/s | 3980 MB/s | 22523 MB/s | 20981 MB/s |
| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 9760 MB/s | 718321 MB/s | 823698 MB/s |
| fireworks.jpeg (200B) | 146 | 142 | 8869 MB/s | 594 MB/s | 33691 MB/s | 30101 MB/s |
| paper-100k.pdf | 85304 | 82915 | 167546 MB/s | 7470 MB/s | 326905 MB/s | 198869 MB/s |
| html_x_4 | 92234 | 19841 | 15194 MB/s | 23403 MB/s | 30843 MB/s | 30937 MB/s |
| alice29.txt | 88034 | 73218 | 5936 MB/s | 2945 MB/s | 12882 MB/s | 16611 MB/s |
| asyoulik.txt | 77503 | 66844 | 5517 MB/s | 2739 MB/s | 12735 MB/s | 14975 MB/s |
| lcet10.txt | 234661 | 190589 | 6235 MB/s | 3099 MB/s | 14519 MB/s | 16634 MB/s |
| plrabn12.txt | 319267 | 270828 | 5159 MB/s | 2600 MB/s | 11923 MB/s | 13382 MB/s |
| geo.protodata | 23335 | 18278 | 21220 MB/s | 11208 MB/s | 56271 MB/s | 57961 MB/s |
| kppkn.gtb | 69526 | 61851 | 9732 MB/s | 4556 MB/s | 18491 MB/s | 16524 MB/s |
| alice29.txt (128B) | 80 | 81 | 6691 MB/s | 529 MB/s | 31883 MB/s | 34225 MB/s |
| alice29.txt (1000B) | 774 | 748 | 12204 MB/s | 1943 MB/s | 48056 MB/s | 42068 MB/s |
| alice29.txt (10000B) | 6648 | 6234 | 10044 MB/s | 2949 MB/s | 32378 MB/s | 28813 MB/s |
| alice29.txt (20000B) | 12686 | 11584 | 7733 MB/s | 2822 MB/s | 30566 MB/s | 27315 MB/s |
| Relative Perf | Snappy size | Better size | Better Speed | Better dec |
|-----------------------|-------------|-------------|--------------|------------|
| html | 22.31% | 13.18% | 0.48x | 0.98x |
| urls.10K | 47.78% | 24.43% | 0.50x | 0.93x |
| fireworks.jpeg | 99.95% | -0.05% | 0.03x | 1.15x |
| fireworks.jpeg (200B) | 73.00% | 2.74% | 0.07x | 0.89x |
| paper-100k.pdf | 83.30% | 2.80% | 0.07x | 0.61x |
| html_x_4 | 22.52% | 78.49% | 0.04x | 1.00x |
| alice29.txt | 57.88% | 16.83% | 1.54x | 1.29x |
| asyoulik.txt | 61.91% | 13.75% | 0.50x | 1.18x |
| lcet10.txt | 54.99% | 18.78% | 0.50x | 1.15x |
| plrabn12.txt | 66.26% | 15.17% | 0.50x | 1.12x |
| geo.protodata | 19.68% | 21.67% | 0.50x | 1.03x |
| kppkn.gtb | 37.72% | 11.04% | 0.53x | 0.89x |
| alice29.txt (128B) | 62.50% | -1.25% | 0.47x | 1.07x |
| alice29.txt (1000B) | 77.40% | 3.36% | 0.08x | 0.88x |
| alice29.txt (10000B) | 66.48% | 6.23% | 0.16x | 0.89x |
| alice29.txt (20000B) | 63.43% | 8.69% | 0.29x | 0.89x |
Except for the mostly incompressible JPEG image compression is better and usually in the
double digits in terms of percentage reduction over Snappy.
The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder
to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down.
This mode aims to provide better compression at the expense of performance and achieves that
without a huge performance penalty, except on very small blocks.
Decompression speed suffers a little compared to the regular S2 mode,
but still manages to be close to Snappy in spite of increased compression.
# Best compression mode
S2 offers a "best" compression mode.
This will compress as much as possible with little regard to CPU usage.
Mainly for offline compression, but where decompression speed should still
be high and compatible with other S2 compressed data.
Some examples compared on 16 core CPU, amd64 assembly used:
```
* enwik10
Default... 10000000000 -> 4761467548 [47.61%]; 1.098s, 8685.6MB/s
Better... 10000000000 -> 4219438251 [42.19%]; 1.925s, 4954.2MB/s
Best... 10000000000 -> 3627364337 [36.27%]; 43.051s, 221.5MB/s
* github-june-2days-2019.json
Default... 6273951764 -> 1043196283 [16.63%]; 431ms, 13882.3MB/s
Better... 6273951764 -> 949146808 [15.13%]; 547ms, 10938.4MB/s
Best... 6273951764 -> 832855506 [13.27%]; 9.455s, 632.8MB/s
* nyc-taxi-data-10M.csv
Default... 3325605752 -> 1095998837 [32.96%]; 324ms, 9788.7MB/s
Better... 3325605752 -> 954776589 [28.71%]; 491ms, 6459.4MB/s
Best... 3325605752 -> 779098746 [23.43%]; 8.29s, 382.6MB/s
* 10gb.tar
Default... 10065157632 -> 5916578242 [58.78%]; 1.028s, 9337.4MB/s
Better... 10065157632 -> 5649207485 [56.13%]; 1.597s, 6010.6MB/s
Best... 10065157632 -> 5208719802 [51.75%]; 32.78s, 292.8MB/
* consensus.db.10gb
Default... 10737418240 -> 4562648848 [42.49%]; 882ms, 11610.0MB/s
Better... 10737418240 -> 4542428129 [42.30%]; 1.533s, 6679.7MB/s
Best... 10737418240 -> 4244773384 [39.53%]; 42.96s, 238.4MB/s
```
Decompression speed should be around the same as using the 'better' compression mode.
# Snappy Compatibility
S2 now offers full compatibility with Snappy.
This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output.
There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by
simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`.
This uses "better" mode for all operations.
If you would like more control, you can use the s2 package as described below:
## Blocks
Snappy compatible blocks can be generated with the S2 encoder.
Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace
| Snappy | S2 replacement |
|----------------------------|-------------------------|
| snappy.Encode(...) | s2.EncodeSnappy(...) |
| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) |
`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output.
`s2.ConcatBlocks` is compatible with snappy blocks.
Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used:
| Encoder | Size | MB/s | Reduction |
|-----------------------|------------|--------|------------
| snappy.Encode | 1128706759 | 725.59 | 71.89% |
| s2.EncodeSnappy | 1093823291 | 899.16 | 72.75% |
| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% |
| s2.EncodeSnappyBest | 944507998 | 66.00 | 76.47% |
## Streams
For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`.
All other options are available, but note that block size limit is different for snappy.
Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput:
| File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best |
|-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------|
| nyc-taxi-data-10M.csv | 1316042016 - 517.54MB/s | 1307003093 - 8406.29MB/s | 1174534014 - 4984.35MB/s | 1115904679 - 177.81MB/s |
| enwik10 | 5088294643 - 433.45MB/s | 5175840939 - 8454.52MB/s | 4560784526 - 4403.10MB/s | 4340299103 - 159.71MB/s |
| 10gb.tar | 6056946612 - 703.25MB/s | 6208571995 - 9035.75MB/s | 5741646126 - 2402.08MB/s | 5548973895 - 171.17MB/s |
| github-june-2days-2019.json | 1525176492 - 908.11MB/s | 1476519054 - 12625.93MB/s | 1400547532 - 6163.61MB/s | 1321887137 - 200.71MB/s |
| consensus.db.10gb | 5412897703 - 1054.38MB/s | 5354073487 - 12634.82MB/s | 5335069899 - 2472.23MB/s | 5201000954 - 166.32MB/s |
# Decompression
All decompression functions map directly to equivalent s2 functions.
| Snappy | S2 replacement |
|------------------------|--------------------|
| snappy.Decode(...) | s2.Decode(...) |
| snappy.DecodedLen(...) | s2.DecodedLen(...) |
| snappy.NewReader(...) | s2.NewReader(...) |
Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip)
are also available for Snappy streams.
If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize)
on your Reader will reduce memory consumption.
# Concatenating blocks and streams.
Concatenating streams will concatenate the output of both without recompressing them.
While this is inefficient in terms of compression it might be usable in certain scenarios.
The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement.
Blocks can be concatenated using the `ConcatBlocks` function.
Snappy blocks/streams can safely be concatenated with S2 blocks and streams.
# Format Extensions
* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`.
* [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB).
* Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset.
Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0.
The length is specified by reading the 3-bit length specified in the tag and decode using this table:
| Length | Actual Length |
|--------|----------------------|
| 0 | 4 |
| 1 | 5 |
| 2 | 6 |
| 3 | 7 |
| 4 | 8 |
| 5 | 8 + read 1 byte |
| 6 | 260 + read 2 bytes |
| 7 | 65540 + read 3 bytes |
This allows any repeat offset + length to be represented by 2 to 5 bytes.
Lengths are stored as little endian values.
The first copy of a block cannot be a repeat offset and the offset is not carried across blocks in streams.
Default streaming block size is 1MB.
# LICENSE
This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.

565
vendor/github.com/klauspost/compress/s2/decode.go generated vendored Normal file
View File

@ -0,0 +1,565 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"encoding/binary"
"errors"
"io"
)
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("s2: corrupt input")
// ErrCRC reports that the input failed CRC validation (streams only)
ErrCRC = errors.New("s2: corrupt input, crc mismatch")
// ErrTooLarge reports that the uncompressed length is too large.
ErrTooLarge = errors.New("s2: decoded block is too large")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("s2: unsupported input")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n <= 0 || v > 0xffffffff {
return 0, 0, ErrCorrupt
}
const wordSize = 32 << (^uint(0) >> 32 & 1)
if wordSize == 32 && v > 0x7fffffff {
return 0, 0, ErrTooLarge
}
return int(v), n, nil
}
const (
decodeErrCodeCorrupt = 1
)
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if dLen <= cap(dst) {
dst = dst[:dLen]
} else {
dst = make([]byte, dLen)
}
if s2Decode(dst, src[s:]) != 0 {
return nil, ErrCorrupt
}
return dst, nil
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes.
func NewReader(r io.Reader, opts ...ReaderOption) *Reader {
nr := Reader{
r: r,
maxBlock: maxBlockSize,
}
for _, opt := range opts {
if err := opt(&nr); err != nil {
nr.err = err
return &nr
}
}
nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize
if nr.lazyBuf > 0 {
nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize)
} else {
nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize)
}
nr.paramsOK = true
return &nr
}
// ReaderOption is an option for creating a decoder.
type ReaderOption func(*Reader) error
// ReaderMaxBlockSize allows to control allocations if the stream
// has been compressed with a smaller WriterBlockSize, or with the default 1MB.
// Blocks must be this size or smaller to decompress,
// otherwise the decoder will return ErrUnsupported.
//
// For streams compressed with Snappy this can safely be set to 64KB (64 << 10).
//
// Default is the maximum limit of 4MB.
func ReaderMaxBlockSize(blockSize int) ReaderOption {
return func(r *Reader) error {
if blockSize > maxBlockSize || blockSize <= 0 {
return errors.New("s2: block size too large. Must be <= 4MB and > 0")
}
if r.lazyBuf == 0 && blockSize < defaultBlockSize {
r.lazyBuf = blockSize
}
r.maxBlock = blockSize
return nil
}
}
// ReaderAllocBlock allows to control upfront stream allocations
// and not allocate for frames bigger than this initially.
// If frames bigger than this is seen a bigger buffer will be allocated.
//
// Default is 1MB, which is default output size.
func ReaderAllocBlock(blockSize int) ReaderOption {
return func(r *Reader) error {
if blockSize > maxBlockSize || blockSize < 1024 {
return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024")
}
r.lazyBuf = blockSize
return nil
}
}
// Reader is an io.Reader that can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
// maximum block size allowed.
maxBlock int
// maximum expected buffer size.
maxBufSize int
// alloc a buffer this size if > 0.
lazyBuf int
readHeader bool
paramsOK bool
snappyFrame bool
}
// ensureBufferSize will ensure that the buffer can take at least n bytes.
// If false is returned the buffer exceeds maximum allowed size.
func (r *Reader) ensureBufferSize(n int) bool {
if len(r.buf) >= n {
return true
}
if n > r.maxBufSize {
r.err = ErrCorrupt
return false
}
// Realloc buffer.
r.buf = make([]byte, n)
return true
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
if !r.paramsOK {
return
}
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.readHeader = false
}
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrCorrupt
}
return false
}
return true
}
// skipN will skip n bytes.
// If the supplied reader supports seeking that is used.
// tmp is used as a temporary buffer for reading.
// The supplied slice does not need to be the size of the read.
func (r *Reader) skipN(tmp []byte, n int, allowEOF bool) (ok bool) {
if rs, ok := r.r.(io.ReadSeeker); ok {
_, err := rs.Seek(int64(n), io.SeekCurrent)
if err == nil {
return true
}
if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrCorrupt
return false
}
}
for n > 0 {
if n < len(tmp) {
tmp = tmp[:n]
}
if _, r.err = io.ReadFull(r.r, tmp); r.err != nil {
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrCorrupt
}
return false
}
n -= len(tmp)
}
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4], true) {
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
if !r.ensureBufferSize(chunkLen) {
if r.err == nil {
r.err = ErrUnsupported
}
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if r.snappyFrame && n > maxSnappyBlockSize {
r.err = ErrCorrupt
return 0, r.err
}
if n > len(r.decoded) {
if n > r.maxBlock {
r.err = ErrCorrupt
return 0, r.err
}
r.decoded = make([]byte, n)
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCRC
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
if !r.ensureBufferSize(chunkLen) {
if r.err == nil {
r.err = ErrUnsupported
}
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if r.snappyFrame && n > maxSnappyBlockSize {
r.err = ErrCorrupt
return 0, r.err
}
if n > len(r.decoded) {
if n > r.maxBlock {
r.err = ErrCorrupt
return 0, r.err
}
r.decoded = make([]byte, n)
}
if !r.readFull(r.decoded[:n], false) {
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCRC
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
return 0, r.err
}
if string(r.buf[:len(magicBody)]) != magicBody {
if string(r.buf[:len(magicBody)]) != magicBodySnappy {
r.err = ErrCorrupt
return 0, r.err
} else {
r.snappyFrame = true
}
} else {
r.snappyFrame = false
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return 0, r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if chunkLen > maxBlockSize {
r.err = ErrUnsupported
return 0, r.err
}
if !r.skipN(r.buf, chunkLen, false) {
return 0, r.err
}
}
}
// Skip will skip n bytes forward in the decompressed output.
// For larger skips this consumes less CPU and is faster than reading output and discarding it.
// CRC is not checked on skipped blocks.
// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped.
// If a decoding error is encountered subsequent calls to Read will also fail.
func (r *Reader) Skip(n int64) error {
if n < 0 {
return errors.New("attempted negative skip")
}
if r.err != nil {
return r.err
}
for n > 0 {
if r.i < r.j {
// Skip in buffer.
// decoded[i:j] contains decoded bytes that have not yet been passed on.
left := int64(r.j - r.i)
if left >= n {
r.i += int(n)
return nil
}
n -= int64(r.j - r.i)
r.i, r.j = 0, 0
}
// Buffer empty; read blocks until we have content.
if !r.readFull(r.buf[:4], true) {
if r.err == io.EOF {
r.err = io.ErrUnexpectedEOF
}
return r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return r.err
}
if !r.ensureBufferSize(chunkLen) {
if r.err == nil {
r.err = ErrUnsupported
}
return r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf, false) {
return r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
dLen, err := DecodedLen(buf)
if err != nil {
r.err = err
return r.err
}
if dLen > r.maxBlock {
r.err = ErrCorrupt
return r.err
}
// Check if destination is within this block
if int64(dLen) > n {
if len(r.decoded) < dLen {
r.decoded = make([]byte, dLen)
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return r.err
}
if crc(r.decoded[:dLen]) != checksum {
r.err = ErrCorrupt
return r.err
}
} else {
// Skip block completely
n -= int64(dLen)
dLen = 0
}
r.i, r.j = 0, dLen
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return r.err
}
if !r.ensureBufferSize(chunkLen) {
if r.err != nil {
r.err = ErrUnsupported
}
return r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf, false) {
return r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n2 := chunkLen - checksumSize
if n2 > len(r.decoded) {
if n2 > r.maxBlock {
r.err = ErrCorrupt
return r.err
}
r.decoded = make([]byte, n2)
}
if !r.readFull(r.decoded[:n2], false) {
return r.err
}
if int64(n2) < n {
if crc(r.decoded[:n2]) != checksum {
r.err = ErrCorrupt
return r.err
}
}
r.i, r.j = 0, n2
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
return r.err
}
if string(r.buf[:len(magicBody)]) != magicBody {
if string(r.buf[:len(magicBody)]) != magicBodySnappy {
r.err = ErrCorrupt
return r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return r.err
}
if chunkLen > maxBlockSize {
r.err = ErrUnsupported
return r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.skipN(r.buf, chunkLen, false) {
return r.err
}
}
return nil
}
// ReadByte satisfies the io.ByteReader interface.
func (r *Reader) ReadByte() (byte, error) {
if r.err != nil {
return 0, r.err
}
if r.i < r.j {
c := r.decoded[r.i]
r.i++
return c, nil
}
var tmp [1]byte
for i := 0; i < 10; i++ {
n, err := r.Read(tmp[:])
if err != nil {
return 0, err
}
if n == 1 {
return tmp[0], nil
}
}
return 0, io.ErrNoProgress
}

568
vendor/github.com/klauspost/compress/s2/decode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,568 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#define R_TMP0 AX
#define R_TMP1 BX
#define R_LEN CX
#define R_OFF DX
#define R_SRC SI
#define R_DST DI
#define R_DBASE R8
#define R_DLEN R9
#define R_DEND R10
#define R_SBASE R11
#define R_SLEN R12
#define R_SEND R13
#define R_TMP2 R14
#define R_TMP3 R15
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - R_TMP0 scratch
// - R_TMP1 scratch
// - R_LEN length or x (shared)
// - R_OFF offset
// - R_SRC &src[s]
// - R_DST &dst[d]
// + R_DBASE dst_base
// + R_DLEN dst_len
// + R_DEND dst_base + dst_len
// + R_SBASE src_base
// + R_SLEN src_len
// + R_SEND src_base + src_len
// - R_TMP2 used by doCopy
// - R_TMP3 used by doCopy
//
// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST.
// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC.
TEXT ·s2Decode(SB), NOSPLIT, $48-56
// Initialize R_SRC, R_DST and R_DBASE-R_SEND.
MOVQ dst_base+0(FP), R_DBASE
MOVQ dst_len+8(FP), R_DLEN
MOVQ R_DBASE, R_DST
MOVQ R_DBASE, R_DEND
ADDQ R_DLEN, R_DEND
MOVQ src_base+24(FP), R_SBASE
MOVQ src_len+32(FP), R_SLEN
MOVQ R_SBASE, R_SRC
MOVQ R_SBASE, R_SEND
ADDQ R_SLEN, R_SEND
XORQ R_OFF, R_OFF
loop:
// for s < len(src)
CMPQ R_SRC, R_SEND
JEQ end
// R_LEN = uint32(src[s])
//
// switch src[s] & 0x03
MOVBLZX (R_SRC), R_LEN
MOVL R_LEN, R_TMP1
ANDL $3, R_TMP1
CMPL R_TMP1, $1
JAE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
SHRL $2, R_LEN
CMPL R_LEN, $60
JAE tagLit60Plus
// case x < 60:
// s++
INCQ R_SRC
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that R_LEN == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// R_LEN can hold 64 bits, so the increment cannot overflow.
INCQ R_LEN
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// R_TMP0 = len(dst) - d
// R_TMP1 = len(src) - s
MOVQ R_DEND, R_TMP0
SUBQ R_DST, R_TMP0
MOVQ R_SEND, R_TMP1
SUBQ R_SRC, R_TMP1
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMPQ R_LEN, $16
JGT callMemmove
CMPQ R_TMP0, $16
JLT callMemmove
CMPQ R_TMP1, $16
JLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(R_SRC), X0
MOVOU X0, 0(R_DST)
// d += length
// s += length
ADDQ R_LEN, R_DST
ADDQ R_LEN, R_SRC
JMP loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMPQ R_LEN, R_TMP0
JGT errCorrupt
CMPQ R_LEN, R_TMP1
JGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVQ R_DST, 0(SP)
MOVQ R_SRC, 8(SP)
MOVQ R_LEN, 16(SP)
MOVQ R_DST, 24(SP)
MOVQ R_SRC, 32(SP)
MOVQ R_LEN, 40(SP)
MOVQ R_OFF, 48(SP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R_DBASE-R_SEND.
MOVQ 24(SP), R_DST
MOVQ 32(SP), R_SRC
MOVQ 40(SP), R_LEN
MOVQ 48(SP), R_OFF
MOVQ dst_base+0(FP), R_DBASE
MOVQ dst_len+8(FP), R_DLEN
MOVQ R_DBASE, R_DEND
ADDQ R_DLEN, R_DEND
MOVQ src_base+24(FP), R_SBASE
MOVQ src_len+32(FP), R_SLEN
MOVQ R_SBASE, R_SEND
ADDQ R_SLEN, R_SEND
// d += length
// s += length
ADDQ R_LEN, R_DST
ADDQ R_LEN, R_SRC
JMP loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADDQ R_LEN, R_SRC
SUBQ $58, R_SRC
CMPQ R_SRC, R_SEND
JA errCorrupt
// case x == 60:
CMPL R_LEN, $61
JEQ tagLit61
JA tagLit62Plus
// x = uint32(src[s-1])
MOVBLZX -1(R_SRC), R_LEN
JMP doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVWLZX -2(R_SRC), R_LEN
JMP doLit
tagLit62Plus:
CMPL R_LEN, $62
JA tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
// We read one byte, safe to read one back, since we are just reading tag.
// x = binary.LittleEndian.Uint32(src[s-1:]) >> 8
MOVL -4(R_SRC), R_LEN
SHRL $8, R_LEN
JMP doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVL -4(R_SRC), R_LEN
JMP doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy4:
// case tagCopy4:
// s += 5
ADDQ $5, R_SRC
// if uint(s) > uint(len(src)) { etc }
CMPQ R_SRC, R_SEND
JA errCorrupt
// length = 1 + int(src[s-5])>>2
SHRQ $2, R_LEN
INCQ R_LEN
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
MOVLQZX -4(R_SRC), R_OFF
JMP doCopy
tagCopy2:
// case tagCopy2:
// s += 3
ADDQ $3, R_SRC
// if uint(s) > uint(len(src)) { etc }
CMPQ R_SRC, R_SEND
JA errCorrupt
// length = 1 + int(src[s-3])>>2
SHRQ $2, R_LEN
INCQ R_LEN
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
MOVWQZX -2(R_SRC), R_OFF
JMP doCopy
tagCopy:
// We have a copy tag. We assume that:
// - R_TMP1 == src[s] & 0x03
// - R_LEN == src[s]
CMPQ R_TMP1, $2
JEQ tagCopy2
JA tagCopy4
// case tagCopy1:
// s += 2
ADDQ $2, R_SRC
// if uint(s) > uint(len(src)) { etc }
CMPQ R_SRC, R_SEND
JA errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
// length = 4 + int(src[s-2])>>2&0x7
MOVBQZX -1(R_SRC), R_TMP1
MOVQ R_LEN, R_TMP0
SHRQ $2, R_LEN
ANDQ $0xe0, R_TMP0
ANDQ $7, R_LEN
SHLQ $3, R_TMP0
ADDQ $4, R_LEN
ORQ R_TMP1, R_TMP0
// check if repeat code, ZF set by ORQ.
JZ repeatCode
// This is a regular copy, transfer our temporary value to R_OFF (length)
MOVQ R_TMP0, R_OFF
JMP doCopy
// This is a repeat code.
repeatCode:
// If length < 9, reuse last offset, with the length already calculated.
CMPQ R_LEN, $9
JL doCopyRepeat
// Read additional bytes for length.
JE repeatLen1
// Rare, so the extra branch shouldn't hurt too much.
CMPQ R_LEN, $10
JE repeatLen2
JMP repeatLen3
// Read repeat lengths.
repeatLen1:
// s ++
ADDQ $1, R_SRC
// if uint(s) > uint(len(src)) { etc }
CMPQ R_SRC, R_SEND
JA errCorrupt
// length = src[s-1] + 8
MOVBQZX -1(R_SRC), R_LEN
ADDL $8, R_LEN
JMP doCopyRepeat
repeatLen2:
// s +=2
ADDQ $2, R_SRC
// if uint(s) > uint(len(src)) { etc }
CMPQ R_SRC, R_SEND
JA errCorrupt
// length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8)
MOVWQZX -2(R_SRC), R_LEN
ADDL $260, R_LEN
JMP doCopyRepeat
repeatLen3:
// s +=3
ADDQ $3, R_SRC
// if uint(s) > uint(len(src)) { etc }
CMPQ R_SRC, R_SEND
JA errCorrupt
// length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16)
// Read one byte further back (just part of the tag, shifted out)
MOVL -4(R_SRC), R_LEN
SHRL $8, R_LEN
ADDL $65540, R_LEN
JMP doCopyRepeat
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - R_LEN == length && R_LEN > 0
// - R_OFF == offset
// if d < offset { etc }
MOVQ R_DST, R_TMP1
SUBQ R_DBASE, R_TMP1
CMPQ R_TMP1, R_OFF
JLT errCorrupt
// Repeat values can skip the test above, since any offset > 0 will be in dst.
doCopyRepeat:
// if offset <= 0 { etc }
CMPQ R_OFF, $0
JLE errCorrupt
// if length > len(dst)-d { etc }
MOVQ R_DEND, R_TMP1
SUBQ R_DST, R_TMP1
CMPQ R_LEN, R_TMP1
JGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R_TMP2 = len(dst)-d
// - R_TMP3 = &dst[d-offset]
MOVQ R_DEND, R_TMP2
SUBQ R_DST, R_TMP2
MOVQ R_DST, R_TMP3
SUBQ R_OFF, R_TMP3
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMPQ R_LEN, $16
JGT slowForwardCopy
CMPQ R_OFF, $8
JLT slowForwardCopy
CMPQ R_TMP2, $16
JLT slowForwardCopy
MOVQ 0(R_TMP3), R_TMP0
MOVQ R_TMP0, 0(R_DST)
MOVQ 8(R_TMP3), R_TMP1
MOVQ R_TMP1, 8(R_DST)
ADDQ R_LEN, R_DST
JMP loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUBQ $10, R_TMP2
CMPQ R_LEN, R_TMP2
JGT verySlowForwardCopy
// We want to keep the offset, so we use R_TMP2 from here.
MOVQ R_OFF, R_TMP2
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R_TMP3, is unchanged.
// }
CMPQ R_TMP2, $8
JGE fixUpSlowForwardCopy
MOVQ (R_TMP3), R_TMP1
MOVQ R_TMP1, (R_DST)
SUBQ R_TMP2, R_LEN
ADDQ R_TMP2, R_DST
ADDQ R_TMP2, R_TMP2
JMP makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by R_DST being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVQ R_DST, R_TMP0
ADDQ R_LEN, R_DST
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
CMPQ R_LEN, $0
JLE loop
MOVQ (R_TMP3), R_TMP1
MOVQ R_TMP1, (R_TMP0)
ADDQ $8, R_TMP3
ADDQ $8, R_TMP0
SUBQ $8, R_LEN
JMP finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R_TMP3), R_TMP1
MOVB R_TMP1, (R_DST)
INCQ R_TMP3
INCQ R_DST
DECQ R_LEN
JNZ verySlowForwardCopy
JMP loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMPQ R_DST, R_DEND
JNE errCorrupt
// return 0
MOVQ $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVQ $1, ret+48(FP)
RET

574
vendor/github.com/klauspost/compress/s2/decode_arm64.s generated vendored Normal file
View File

@ -0,0 +1,574 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#define R_TMP0 R2
#define R_TMP1 R3
#define R_LEN R4
#define R_OFF R5
#define R_SRC R6
#define R_DST R7
#define R_DBASE R8
#define R_DLEN R9
#define R_DEND R10
#define R_SBASE R11
#define R_SLEN R12
#define R_SEND R13
#define R_TMP2 R14
#define R_TMP3 R15
// TEST_SRC will check if R_SRC is <= SRC_END
#define TEST_SRC() \
CMP R_SEND, R_SRC \
BGT errCorrupt
// MOVD R_SRC, R_TMP1
// SUB R_SBASE, R_TMP1, R_TMP1
// CMP R_SLEN, R_TMP1
// BGT errCorrupt
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - R_TMP0 scratch
// - R_TMP1 scratch
// - R_LEN length or x
// - R_OFF offset
// - R_SRC &src[s]
// - R_DST &dst[d]
// + R_DBASE dst_base
// + R_DLEN dst_len
// + R_DEND dst_base + dst_len
// + R_SBASE src_base
// + R_SLEN src_len
// + R_SEND src_base + src_len
// - R_TMP2 used by doCopy
// - R_TMP3 used by doCopy
//
// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST.
// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC.
TEXT ·s2Decode(SB), NOSPLIT, $56-64
// Initialize R_SRC, R_DST and R_DBASE-R_SEND.
MOVD dst_base+0(FP), R_DBASE
MOVD dst_len+8(FP), R_DLEN
MOVD R_DBASE, R_DST
MOVD R_DBASE, R_DEND
ADD R_DLEN, R_DEND, R_DEND
MOVD src_base+24(FP), R_SBASE
MOVD src_len+32(FP), R_SLEN
MOVD R_SBASE, R_SRC
MOVD R_SBASE, R_SEND
ADD R_SLEN, R_SEND, R_SEND
MOVD $0, R_OFF
loop:
// for s < len(src)
CMP R_SEND, R_SRC
BEQ end
// R_LEN = uint32(src[s])
//
// switch src[s] & 0x03
MOVBU (R_SRC), R_LEN
MOVW R_LEN, R_TMP1
ANDW $3, R_TMP1
MOVW $1, R1
CMPW R1, R_TMP1
BGE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
MOVW $60, R1
LSRW $2, R_LEN, R_LEN
CMPW R_LEN, R1
BLS tagLit60Plus
// case x < 60:
// s++
ADD $1, R_SRC, R_SRC
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that R_LEN == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// R_LEN can hold 64 bits, so the increment cannot overflow.
ADD $1, R_LEN, R_LEN
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// R_TMP0 = len(dst) - d
// R_TMP1 = len(src) - s
MOVD R_DEND, R_TMP0
SUB R_DST, R_TMP0, R_TMP0
MOVD R_SEND, R_TMP1
SUB R_SRC, R_TMP1, R_TMP1
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMP $16, R_LEN
BGT callMemmove
CMP $16, R_TMP0
BLT callMemmove
CMP $16, R_TMP1
BLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
LDP 0(R_SRC), (R_TMP2, R_TMP3)
STP (R_TMP2, R_TMP3), 0(R_DST)
// d += length
// s += length
ADD R_LEN, R_DST, R_DST
ADD R_LEN, R_SRC, R_SRC
B loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMP R_TMP0, R_LEN
BGT errCorrupt
CMP R_TMP1, R_LEN
BGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVD R_DST, 8(RSP)
MOVD R_SRC, 16(RSP)
MOVD R_LEN, 24(RSP)
MOVD R_DST, 32(RSP)
MOVD R_SRC, 40(RSP)
MOVD R_LEN, 48(RSP)
MOVD R_OFF, 56(RSP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R_DBASE-R_SEND.
MOVD 32(RSP), R_DST
MOVD 40(RSP), R_SRC
MOVD 48(RSP), R_LEN
MOVD 56(RSP), R_OFF
MOVD dst_base+0(FP), R_DBASE
MOVD dst_len+8(FP), R_DLEN
MOVD R_DBASE, R_DEND
ADD R_DLEN, R_DEND, R_DEND
MOVD src_base+24(FP), R_SBASE
MOVD src_len+32(FP), R_SLEN
MOVD R_SBASE, R_SEND
ADD R_SLEN, R_SEND, R_SEND
// d += length
// s += length
ADD R_LEN, R_DST, R_DST
ADD R_LEN, R_SRC, R_SRC
B loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADD R_LEN, R_SRC, R_SRC
SUB $58, R_SRC, R_SRC
TEST_SRC()
// case x == 60:
MOVW $61, R1
CMPW R1, R_LEN
BEQ tagLit61
BGT tagLit62Plus
// x = uint32(src[s-1])
MOVBU -1(R_SRC), R_LEN
B doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVHU -2(R_SRC), R_LEN
B doLit
tagLit62Plus:
CMPW $62, R_LEN
BHI tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVHU -3(R_SRC), R_LEN
MOVBU -1(R_SRC), R_TMP1
ORR R_TMP1<<16, R_LEN
B doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVWU -4(R_SRC), R_LEN
B doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy4:
// case tagCopy4:
// s += 5
ADD $5, R_SRC, R_SRC
// if uint(s) > uint(len(src)) { etc }
MOVD R_SRC, R_TMP1
SUB R_SBASE, R_TMP1, R_TMP1
CMP R_SLEN, R_TMP1
BGT errCorrupt
// length = 1 + int(src[s-5])>>2
MOVD $1, R1
ADD R_LEN>>2, R1, R_LEN
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
MOVWU -4(R_SRC), R_OFF
B doCopy
tagCopy2:
// case tagCopy2:
// s += 3
ADD $3, R_SRC, R_SRC
// if uint(s) > uint(len(src)) { etc }
TEST_SRC()
// length = 1 + int(src[s-3])>>2
MOVD $1, R1
ADD R_LEN>>2, R1, R_LEN
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
MOVHU -2(R_SRC), R_OFF
B doCopy
tagCopy:
// We have a copy tag. We assume that:
// - R_TMP1 == src[s] & 0x03
// - R_LEN == src[s]
CMP $2, R_TMP1
BEQ tagCopy2
BGT tagCopy4
// case tagCopy1:
// s += 2
ADD $2, R_SRC, R_SRC
// if uint(s) > uint(len(src)) { etc }
TEST_SRC()
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
// Calculate offset in R_TMP0 in case it is a repeat.
MOVD R_LEN, R_TMP0
AND $0xe0, R_TMP0
MOVBU -1(R_SRC), R_TMP1
ORR R_TMP0<<3, R_TMP1, R_TMP0
// length = 4 + int(src[s-2])>>2&0x7
MOVD $7, R1
AND R_LEN>>2, R1, R_LEN
ADD $4, R_LEN, R_LEN
// check if repeat code with offset 0.
CMP $0, R_TMP0
BEQ repeatCode
// This is a regular copy, transfer our temporary value to R_OFF (offset)
MOVD R_TMP0, R_OFF
B doCopy
// This is a repeat code.
repeatCode:
// If length < 9, reuse last offset, with the length already calculated.
CMP $9, R_LEN
BLT doCopyRepeat
BEQ repeatLen1
CMP $10, R_LEN
BEQ repeatLen2
repeatLen3:
// s +=3
ADD $3, R_SRC, R_SRC
// if uint(s) > uint(len(src)) { etc }
TEST_SRC()
// length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540
MOVBU -1(R_SRC), R_TMP0
MOVHU -3(R_SRC), R_LEN
ORR R_TMP0<<16, R_LEN, R_LEN
ADD $65540, R_LEN, R_LEN
B doCopyRepeat
repeatLen2:
// s +=2
ADD $2, R_SRC, R_SRC
// if uint(s) > uint(len(src)) { etc }
TEST_SRC()
// length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260
MOVHU -2(R_SRC), R_LEN
ADD $260, R_LEN, R_LEN
B doCopyRepeat
repeatLen1:
// s +=1
ADD $1, R_SRC, R_SRC
// if uint(s) > uint(len(src)) { etc }
TEST_SRC()
// length = src[s-1] + 8
MOVBU -1(R_SRC), R_LEN
ADD $8, R_LEN, R_LEN
B doCopyRepeat
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - R_LEN == length && R_LEN > 0
// - R_OFF == offset
// if d < offset { etc }
MOVD R_DST, R_TMP1
SUB R_DBASE, R_TMP1, R_TMP1
CMP R_OFF, R_TMP1
BLT errCorrupt
// Repeat values can skip the test above, since any offset > 0 will be in dst.
doCopyRepeat:
// if offset <= 0 { etc }
CMP $0, R_OFF
BLE errCorrupt
// if length > len(dst)-d { etc }
MOVD R_DEND, R_TMP1
SUB R_DST, R_TMP1, R_TMP1
CMP R_TMP1, R_LEN
BGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R_TMP2 = len(dst)-d
// - R_TMP3 = &dst[d-offset]
MOVD R_DEND, R_TMP2
SUB R_DST, R_TMP2, R_TMP2
MOVD R_DST, R_TMP3
SUB R_OFF, R_TMP3, R_TMP3
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMP $16, R_LEN
BGT slowForwardCopy
CMP $8, R_OFF
BLT slowForwardCopy
CMP $16, R_TMP2
BLT slowForwardCopy
MOVD 0(R_TMP3), R_TMP0
MOVD R_TMP0, 0(R_DST)
MOVD 8(R_TMP3), R_TMP1
MOVD R_TMP1, 8(R_DST)
ADD R_LEN, R_DST, R_DST
B loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUB $10, R_TMP2, R_TMP2
CMP R_TMP2, R_LEN
BGT verySlowForwardCopy
// We want to keep the offset, so we use R_TMP2 from here.
MOVD R_OFF, R_TMP2
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R_TMP3, is unchanged.
// }
CMP $8, R_TMP2
BGE fixUpSlowForwardCopy
MOVD (R_TMP3), R_TMP1
MOVD R_TMP1, (R_DST)
SUB R_TMP2, R_LEN, R_LEN
ADD R_TMP2, R_DST, R_DST
ADD R_TMP2, R_TMP2, R_TMP2
B makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by R_DST being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVD R_DST, R_TMP0
ADD R_LEN, R_DST, R_DST
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
MOVD $0, R1
CMP R1, R_LEN
BLE loop
MOVD (R_TMP3), R_TMP1
MOVD R_TMP1, (R_TMP0)
ADD $8, R_TMP3, R_TMP3
ADD $8, R_TMP0, R_TMP0
SUB $8, R_LEN, R_LEN
B finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R_TMP3), R_TMP1
MOVB R_TMP1, (R_DST)
ADD $1, R_TMP3, R_TMP3
ADD $1, R_DST, R_DST
SUB $1, R_LEN, R_LEN
CBNZ R_LEN, verySlowForwardCopy
B loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMP R_DEND, R_DST
BNE errCorrupt
// return 0
MOVD $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVD $1, R_TMP0
MOVD R_TMP0, ret+48(FP)
RET

17
vendor/github.com/klauspost/compress/s2/decode_asm.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (amd64 || arm64) && !appengine && gc && !noasm
// +build amd64 arm64
// +build !appengine
// +build gc
// +build !noasm
package s2
// decode has the same semantics as in decode_other.go.
//
//go:noescape
func s2Decode(dst, src []byte) int

267
vendor/github.com/klauspost/compress/s2/decode_other.go generated vendored Normal file
View File

@ -0,0 +1,267 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (!amd64 && !arm64) || appengine || !gc || noasm
// +build !amd64,!arm64 appengine !gc noasm
package s2
import (
"fmt"
"strconv"
)
// decode writes the decoding of src to dst. It assumes that the varint-encoded
// length of the decompressed bytes has already been read, and that len(dst)
// equals that length.
//
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
func s2Decode(dst, src []byte) int {
const debug = false
if debug {
fmt.Println("Starting decode, dst len:", len(dst))
}
var d, s, length int
offset := 0
// As long as we can read at least 5 bytes...
for s < len(src)-5 {
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
x = uint32(src[s-1])
case x == 61:
s += 3
x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
s += 4
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
s += 5
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
length = int(x) + 1
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
return decodeErrCodeCorrupt
}
if debug {
fmt.Println("literals, length:", length, "d-after:", d+length)
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
length = int(src[s-2]) >> 2 & 0x7
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
if toffset == 0 {
if debug {
fmt.Print("(repeat) ")
}
// keep last offset
switch length {
case 5:
s += 1
length = int(uint32(src[s-1])) + 4
case 6:
s += 2
length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
case 7:
s += 3
length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
default: // 0-> 4
}
} else {
offset = toffset
}
length += 4
case tagCopy2:
s += 3
length = 1 + int(src[s-3])>>2
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
case tagCopy4:
s += 5
length = 1 + int(src[s-5])>>2
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
}
if offset <= 0 || d < offset || length > len(dst)-d {
return decodeErrCodeCorrupt
}
if debug {
fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
}
// Copy from an earlier sub-slice of dst to a later sub-slice.
// If no overlap, use the built-in copy:
if offset > length {
copy(dst[d:d+length], dst[d-offset:])
d += length
continue
}
// Unlike the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
//
// We align the slices into a and b and show the compiler they are the same size.
// This allows the loop to run without bounds checks.
a := dst[d : d+length]
b := dst[d-offset:]
b = b[:len(a)]
for i := range a {
a[i] = b[i]
}
d += length
}
// Remaining with extra checks...
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-1])
case x == 61:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
s += 4
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
length = int(x) + 1
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
return decodeErrCodeCorrupt
}
if debug {
fmt.Println("literals, length:", length, "d-after:", d+length)
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = int(src[s-2]) >> 2 & 0x7
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
if toffset == 0 {
if debug {
fmt.Print("(repeat) ")
}
// keep last offset
switch length {
case 5:
s += 1
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = int(uint32(src[s-1])) + 4
case 6:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
case 7:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
default: // 0-> 4
}
} else {
offset = toffset
}
length += 4
case tagCopy2:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
case tagCopy4:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-5])>>2
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
}
if offset <= 0 || d < offset || length > len(dst)-d {
return decodeErrCodeCorrupt
}
if debug {
fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
}
// Copy from an earlier sub-slice of dst to a later sub-slice.
// If no overlap, use the built-in copy:
if offset > length {
copy(dst[d:d+length], dst[d-offset:])
d += length
continue
}
// Unlike the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
//
// We align the slices into a and b and show the compiler they are the same size.
// This allows the loop to run without bounds checks.
a := dst[d : d+length]
b := dst[d-offset:]
b = b[:len(a)]
for i := range a {
a[i] = b[i]
}
d += length
}
if d != len(dst) {
return decodeErrCodeCorrupt
}
return 0
}

1172
vendor/github.com/klauspost/compress/s2/encode.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

456
vendor/github.com/klauspost/compress/s2/encode_all.go generated vendored Normal file
View File

@ -0,0 +1,456 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"bytes"
"encoding/binary"
"math/bits"
)
func load32(b []byte, i int) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
func load64(b []byte, i int) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash6(u uint64, h uint8) uint32 {
const prime6bytes = 227718039650203
return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
}
func encodeGo(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:d]
}
if len(src) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], src)
return dst[:d]
}
n := encodeBlockGo(dst[d:], src)
if n > 0 {
d += n
return dst[:d]
}
// Not compressible
d += emitLiteral(dst[d:], src)
return dst[:d]
}
// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockGo(dst, src []byte) (d int) {
// Initialize the hash table.
const (
tableBits = 14
maxTableSize = 1 << tableBits
debug = false
)
var table [maxTableSize]uint32
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
repeat := 1
for {
candidate := 0
for {
// Next src position to check
nextS := s + (s-nextEmit)>>6 + 4
if nextS > sLimit {
goto emitRemainder
}
hash0 := hash6(cv, tableBits)
hash1 := hash6(cv>>8, tableBits)
candidate = int(table[hash0])
candidate2 := int(table[hash1])
table[hash0] = uint32(s)
table[hash1] = uint32(s + 1)
hash2 := hash6(cv>>16, tableBits)
// Check repeat at offset checkRep.
const checkRep = 1
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if nextEmit > 0 {
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], repeat, s-base)
} else {
// First match, cannot be repeat.
d += emitCopy(dst[d:], repeat, s-base)
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
if uint32(cv) == load32(src, candidate) {
break
}
candidate = int(table[hash2])
if uint32(cv>>8) == load32(src, candidate2) {
table[hash2] = uint32(s + 2)
candidate = candidate2
s++
break
}
table[hash2] = uint32(s + 2)
if uint32(cv>>16) == load32(src, candidate) {
s += 2
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards.
// The top bytes will be rechecked to get the full match.
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
candidate--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = base - candidate
// Extend the 4-byte match as long as possible.
s += 4
candidate += 4
for s <= len(src)-8 {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopy(dst[d:], repeat, s-base)
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Check for an immediate match, otherwise start search at s+1
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>16, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s)
if debug && s == candidate {
panic("s == candidate")
}
if uint32(x>>16) != load32(src, candidate) {
cv = load64(src, s+1)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
func encodeBlockSnappyGo(dst, src []byte) (d int) {
// Initialize the hash table.
const (
tableBits = 14
maxTableSize = 1 << tableBits
)
var table [maxTableSize]uint32
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
repeat := 1
for {
candidate := 0
for {
// Next src position to check
nextS := s + (s-nextEmit)>>6 + 4
if nextS > sLimit {
goto emitRemainder
}
hash0 := hash6(cv, tableBits)
hash1 := hash6(cv>>8, tableBits)
candidate = int(table[hash0])
candidate2 := int(table[hash1])
table[hash0] = uint32(s)
table[hash1] = uint32(s + 1)
hash2 := hash6(cv>>16, tableBits)
// Check repeat at offset checkRep.
const checkRep = 1
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopyNoRepeat(dst[d:], repeat, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
if uint32(cv) == load32(src, candidate) {
break
}
candidate = int(table[hash2])
if uint32(cv>>8) == load32(src, candidate2) {
table[hash2] = uint32(s + 2)
candidate = candidate2
s++
break
}
table[hash2] = uint32(s + 2)
if uint32(cv>>16) == load32(src, candidate) {
s += 2
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
candidate--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = base - candidate
// Extend the 4-byte match as long as possible.
s += 4
candidate += 4
for s <= len(src)-8 {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopyNoRepeat(dst[d:], repeat, s-base)
if false {
// Validate match.
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Check for an immediate match, otherwise start search at s+1
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>16, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s)
if uint32(x>>16) != load32(src, candidate) {
cv = load64(src, s+1)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}

142
vendor/github.com/klauspost/compress/s2/encode_amd64.go generated vendored Normal file
View File

@ -0,0 +1,142 @@
//go:build !appengine && !noasm && gc
// +build !appengine,!noasm,gc
package s2
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
const (
// Use 12 bit table when less than...
limit12B = 16 << 10
// Use 10 bit table when less than...
limit10B = 4 << 10
// Use 8 bit table when less than...
limit8B = 512
)
if len(src) >= 4<<20 {
return encodeBlockAsm(dst, src)
}
if len(src) >= limit12B {
return encodeBlockAsm4MB(dst, src)
}
if len(src) >= limit10B {
return encodeBlockAsm12B(dst, src)
}
if len(src) >= limit8B {
return encodeBlockAsm10B(dst, src)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
return encodeBlockAsm8B(dst, src)
}
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetter(dst, src []byte) (d int) {
const (
// Use 12 bit table when less than...
limit12B = 16 << 10
// Use 10 bit table when less than...
limit10B = 4 << 10
// Use 8 bit table when less than...
limit8B = 512
)
if len(src) > 4<<20 {
return encodeBetterBlockAsm(dst, src)
}
if len(src) >= limit12B {
return encodeBetterBlockAsm4MB(dst, src)
}
if len(src) >= limit10B {
return encodeBetterBlockAsm12B(dst, src)
}
if len(src) >= limit8B {
return encodeBetterBlockAsm10B(dst, src)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
return encodeBetterBlockAsm8B(dst, src)
}
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockSnappy(dst, src []byte) (d int) {
const (
// Use 12 bit table when less than...
limit12B = 16 << 10
// Use 10 bit table when less than...
limit10B = 4 << 10
// Use 8 bit table when less than...
limit8B = 512
)
if len(src) >= 64<<10 {
return encodeSnappyBlockAsm(dst, src)
}
if len(src) >= limit12B {
return encodeSnappyBlockAsm64K(dst, src)
}
if len(src) >= limit10B {
return encodeSnappyBlockAsm12B(dst, src)
}
if len(src) >= limit8B {
return encodeSnappyBlockAsm10B(dst, src)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
return encodeSnappyBlockAsm8B(dst, src)
}
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterSnappy(dst, src []byte) (d int) {
const (
// Use 12 bit table when less than...
limit12B = 16 << 10
// Use 10 bit table when less than...
limit10B = 4 << 10
// Use 8 bit table when less than...
limit8B = 512
)
if len(src) >= 64<<10 {
return encodeSnappyBetterBlockAsm(dst, src)
}
if len(src) >= limit12B {
return encodeSnappyBetterBlockAsm64K(dst, src)
}
if len(src) >= limit10B {
return encodeSnappyBetterBlockAsm12B(dst, src)
}
if len(src) >= limit8B {
return encodeSnappyBetterBlockAsm10B(dst, src)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
return encodeSnappyBetterBlockAsm8B(dst, src)
}

604
vendor/github.com/klauspost/compress/s2/encode_best.go generated vendored Normal file
View File

@ -0,0 +1,604 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"fmt"
"math/bits"
)
// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBest(dst, src []byte) (d int) {
// Initialize the hash tables.
const (
// Long hash matches.
lTableBits = 19
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 16
maxSTableSize = 1 << sTableBits
inputMargin = 8 + 2
)
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
if len(src) < minNonLiteralBlockSize {
return 0
}
var lTable [maxLTableSize]uint64
var sTable [maxSTableSize]uint64
// Bail if we can't compress to at least this.
dstLimit := len(src) - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
repeat := 1
const lowbitMask = 0xffffffff
getCur := func(x uint64) int {
return int(x & lowbitMask)
}
getPrev := func(x uint64) int {
return int(x >> 32)
}
const maxSkip = 64
for {
type match struct {
offset int
s int
length int
score int
rep bool
}
var best match
for {
// Next src position to check
nextS := (s-nextEmit)>>8 + 1
if nextS > maxSkip {
nextS = s + maxSkip
} else {
nextS += s
}
if nextS > sLimit {
goto emitRemainder
}
hashL := hash8(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL := lTable[hashL]
candidateS := sTable[hashS]
score := func(m match) int {
// Matches that are longer forward are penalized since we must emit it as a literal.
score := m.length - m.s
if nextEmit == m.s {
// If we do not have to emit literals, we save 1 byte
score++
}
offset := m.s - m.offset
if m.rep {
return score - emitRepeatSize(offset, m.length)
}
return score - emitCopySize(offset, m.length)
}
matchAt := func(offset, s int, first uint32, rep bool) match {
if best.length != 0 && best.s-best.offset == s-offset {
// Don't retest if we have the same offset.
return match{offset: offset, s: s}
}
if load32(src, offset) != first {
return match{offset: offset, s: s}
}
m := match{offset: offset, s: s, length: 4 + offset, rep: rep}
s += 4
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
m.length += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
m.length += 8
}
m.length -= offset
m.score = score(m)
if m.score <= -m.s {
// Eliminate if no savings, we might find a better one.
m.length = 0
}
return m
}
bestOf := func(a, b match) match {
if b.length == 0 {
return a
}
if a.length == 0 {
return b
}
as := a.score + b.s
bs := b.score + a.s
if as >= bs {
return a
}
return b
}
best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false))
best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false))
{
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
if best.length > 0 {
// s+1
nextShort := sTable[hash4(cv>>8, sTableBits)]
s := s + 1
cv := load64(src, s)
nextLong := lTable[hash8(cv, lTableBits)]
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
// Repeat at + 2
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
// s+2
if true {
nextShort = sTable[hash4(cv>>8, sTableBits)]
s++
cv = load64(src, s)
nextLong = lTable[hash8(cv, lTableBits)]
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
}
// Search for a match at best match end, see if that is better.
if sAt := best.s + best.length; sAt < sLimit {
sBack := best.s
backL := best.length
// Load initial values
cv = load64(src, sBack)
// Search for mismatch
next := lTable[hash8(load64(src, sAt), lTableBits)]
//next := sTable[hash4(load64(src, sAt), sTableBits)]
if checkAt := getCur(next) - backL; checkAt > 0 {
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
}
if checkAt := getPrev(next) - backL; checkAt > 0 {
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
}
}
}
}
// Update table
lTable[hashL] = uint64(s) | candidateL<<32
sTable[hashS] = uint64(s) | candidateS<<32
if best.length > 0 {
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards, not needed for repeats...
s = best.s
if !best.rep {
for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
best.offset--
best.length++
s--
}
}
if false && best.offset >= s {
panic(fmt.Errorf("t %d >= s %d", best.offset, s))
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := s - best.offset
s += best.length
if offset > 65535 && s-base <= 5 && !best.rep {
// Bail if the match is equal or worse to the encoding.
s = best.s + 1
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if best.rep {
if nextEmit > 0 {
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], offset, best.length)
} else {
// First match, cannot be repeat.
d += emitCopy(dst[d:], offset, best.length)
}
} else {
d += emitCopy(dst[d:], offset, best.length)
}
repeat = offset
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Fill tables...
for i := best.s + 1; i < s; i++ {
cv0 := load64(src, i)
long0 := hash8(cv0, lTableBits)
short0 := hash4(cv0, sTableBits)
lTable[long0] = uint64(i) | lTable[long0]<<32
sTable[short0] = uint64(i) | sTable[short0]<<32
}
cv = load64(src, s)
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBestSnappy(dst, src []byte) (d int) {
// Initialize the hash tables.
const (
// Long hash matches.
lTableBits = 19
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 16
maxSTableSize = 1 << sTableBits
inputMargin = 8 + 2
)
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
if len(src) < minNonLiteralBlockSize {
return 0
}
var lTable [maxLTableSize]uint64
var sTable [maxSTableSize]uint64
// Bail if we can't compress to at least this.
dstLimit := len(src) - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
repeat := 1
const lowbitMask = 0xffffffff
getCur := func(x uint64) int {
return int(x & lowbitMask)
}
getPrev := func(x uint64) int {
return int(x >> 32)
}
const maxSkip = 64
for {
type match struct {
offset int
s int
length int
score int
}
var best match
for {
// Next src position to check
nextS := (s-nextEmit)>>8 + 1
if nextS > maxSkip {
nextS = s + maxSkip
} else {
nextS += s
}
if nextS > sLimit {
goto emitRemainder
}
hashL := hash8(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL := lTable[hashL]
candidateS := sTable[hashS]
score := func(m match) int {
// Matches that are longer forward are penalized since we must emit it as a literal.
score := m.length - m.s
if nextEmit == m.s {
// If we do not have to emit literals, we save 1 byte
score++
}
offset := m.s - m.offset
return score - emitCopySize(offset, m.length)
}
matchAt := func(offset, s int, first uint32) match {
if best.length != 0 && best.s-best.offset == s-offset {
// Don't retest if we have the same offset.
return match{offset: offset, s: s}
}
if load32(src, offset) != first {
return match{offset: offset, s: s}
}
m := match{offset: offset, s: s, length: 4 + offset}
s += 4
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
m.length += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
m.length += 8
}
m.length -= offset
m.score = score(m)
if m.score <= -m.s {
// Eliminate if no savings, we might find a better one.
m.length = 0
}
return m
}
bestOf := func(a, b match) match {
if b.length == 0 {
return a
}
if a.length == 0 {
return b
}
as := a.score + b.s
bs := b.score + a.s
if as >= bs {
return a
}
return b
}
best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv)))
best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv)))
best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv)))
{
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8)))
if best.length > 0 {
// s+1
nextShort := sTable[hash4(cv>>8, sTableBits)]
s := s + 1
cv := load64(src, s)
nextLong := lTable[hash8(cv, lTableBits)]
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv)))
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv)))
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv)))
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv)))
// Repeat at + 2
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8)))
// s+2
if true {
nextShort = sTable[hash4(cv>>8, sTableBits)]
s++
cv = load64(src, s)
nextLong = lTable[hash8(cv, lTableBits)]
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv)))
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv)))
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv)))
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv)))
}
// Search for a match at best match end, see if that is better.
if sAt := best.s + best.length; sAt < sLimit {
sBack := best.s
backL := best.length
// Load initial values
cv = load64(src, sBack)
// Search for mismatch
next := lTable[hash8(load64(src, sAt), lTableBits)]
//next := sTable[hash4(load64(src, sAt), sTableBits)]
if checkAt := getCur(next) - backL; checkAt > 0 {
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv)))
}
if checkAt := getPrev(next) - backL; checkAt > 0 {
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv)))
}
}
}
}
// Update table
lTable[hashL] = uint64(s) | candidateL<<32
sTable[hashS] = uint64(s) | candidateS<<32
if best.length > 0 {
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards, not needed for repeats...
s = best.s
if true {
for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
best.offset--
best.length++
s--
}
}
if false && best.offset >= s {
panic(fmt.Errorf("t %d >= s %d", best.offset, s))
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := s - best.offset
s += best.length
if offset > 65535 && s-base <= 5 {
// Bail if the match is equal or worse to the encoding.
s = best.s + 1
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
d += emitLiteral(dst[d:], src[nextEmit:base])
d += emitCopyNoRepeat(dst[d:], offset, best.length)
repeat = offset
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Fill tables...
for i := best.s + 1; i < s; i++ {
cv0 := load64(src, i)
long0 := hash8(cv0, lTableBits)
short0 := hash4(cv0, sTableBits)
lTable[long0] = uint64(i) | lTable[long0]<<32
sTable[short0] = uint64(i) | sTable[short0]<<32
}
cv = load64(src, s)
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
// emitCopySize returns the size to encode the offset+length
//
// It assumes that:
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24
func emitCopySize(offset, length int) int {
if offset >= 65536 {
i := 0
if length > 64 {
length -= 64
if length >= 4 {
// Emit remaining as repeats
return 5 + emitRepeatSize(offset, length)
}
i = 5
}
if length == 0 {
return i
}
return i + 5
}
// Offset no more than 2 bytes.
if length > 64 {
// Emit remaining as repeats, at least 4 bytes remain.
return 3 + emitRepeatSize(offset, length-60)
}
if length >= 12 || offset >= 2048 {
return 3
}
// Emit the remaining copy, encoded as 2 bytes.
return 2
}
// emitRepeatSize returns the number of bytes required to encode a repeat.
// Length must be at least 4 and < 1<<24
func emitRepeatSize(offset, length int) int {
// Repeat offset, make length cheaper
if length <= 4+4 || (length < 8+4 && offset < 2048) {
return 2
}
if length < (1<<8)+4+4 {
return 3
}
if length < (1<<16)+(1<<8)+4 {
return 4
}
const maxRepeat = (1 << 24) - 1
length -= (1 << 16) - 4
left := 0
if length > maxRepeat {
left = length - maxRepeat + 4
length = maxRepeat - 4
}
if left > 0 {
return 5 + emitRepeatSize(offset, left)
}
return 5
}

View File

@ -0,0 +1,431 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"math/bits"
)
// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4(u uint64, h uint8) uint32 {
const prime4bytes = 2654435761
return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
}
// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash5(u uint64, h uint8) uint32 {
const prime5bytes = 889523592379
return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
}
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 {
const prime7bytes = 58295818150454627
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
}
// hash8 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash8(u uint64, h uint8) uint32 {
const prime8bytes = 0xcf1bbcdcb7a56463
return uint32((u * prime8bytes) >> ((64 - h) & 63))
}
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterGo(dst, src []byte) (d int) {
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
if len(src) < minNonLiteralBlockSize {
return 0
}
// Initialize the hash tables.
const (
// Long hash matches.
lTableBits = 16
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 14
maxSTableSize = 1 << sTableBits
)
var lTable [maxLTableSize]uint32
var sTable [maxSTableSize]uint32
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We initialize repeat to 0, so we never match on first attempt
repeat := 0
for {
candidateL := 0
nextS := 0
for {
// Next src position to check
nextS = s + (s-nextEmit)>>7 + 1
if nextS > sLimit {
goto emitRemainder
}
hashL := hash7(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL = int(lTable[hashL])
candidateS := int(sTable[hashS])
lTable[hashL] = uint32(s)
sTable[hashS] = uint32(s)
// Check repeat at offset checkRep.
const checkRep = 1
if false && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidate] {
s++
candidate++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
if nextEmit > 0 {
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], repeat, s-base)
} else {
// First match, cannot be repeat.
d += emitCopy(dst[d:], repeat, s-base)
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
if uint32(cv) == load32(src, candidateL) {
break
}
// Check our short candidate
if uint32(cv) == load32(src, candidateS) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
lTable[hashL] = uint32(s + 1)
if uint32(cv>>8) == load32(src, candidateL) {
s++
break
}
// Use our short candidate.
candidateL = candidateS
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
candidateL--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := base - candidateL
// Extend the 4-byte match as long as possible.
s += 4
candidateL += 4
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidateL] {
s++
candidateL++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateL += 8
}
if offset > 65535 && s-base <= 5 && repeat != offset {
// Bail if the match is equal or worse to the encoding.
s = nextS + 1
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if repeat == offset {
d += emitRepeat(dst[d:], offset, s-base)
} else {
d += emitCopy(dst[d:], offset, s-base)
repeat = offset
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index match start+1 (long) and start+2 (short)
index0 := base + 1
// Index match end-2 (long) and end-1 (short)
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
cv = load64(src, s)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
if len(src) < minNonLiteralBlockSize {
return 0
}
// Initialize the hash tables.
const (
// Long hash matches.
lTableBits = 16
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 14
maxSTableSize = 1 << sTableBits
)
var lTable [maxLTableSize]uint32
var sTable [maxSTableSize]uint32
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We initialize repeat to 0, so we never match on first attempt
repeat := 0
const maxSkip = 100
for {
candidateL := 0
nextS := 0
for {
// Next src position to check
nextS = (s-nextEmit)>>7 + 1
if nextS > maxSkip {
nextS = s + maxSkip
} else {
nextS += s
}
if nextS > sLimit {
goto emitRemainder
}
hashL := hash7(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL = int(lTable[hashL])
candidateS := int(sTable[hashS])
lTable[hashL] = uint32(s)
sTable[hashS] = uint32(s)
if uint32(cv) == load32(src, candidateL) {
break
}
// Check our short candidate
if uint32(cv) == load32(src, candidateS) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
lTable[hashL] = uint32(s + 1)
if uint32(cv>>8) == load32(src, candidateL) {
s++
break
}
// Use our short candidate.
candidateL = candidateS
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
candidateL--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := base - candidateL
// Extend the 4-byte match as long as possible.
s += 4
candidateL += 4
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidateL] {
s++
candidateL++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateL += 8
}
if offset > 65535 && s-base <= 5 && repeat != offset {
// Bail if the match is equal or worse to the encoding.
s = nextS + 1
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
d += emitLiteral(dst[d:], src[nextEmit:base])
d += emitCopyNoRepeat(dst[d:], offset, s-base)
repeat = offset
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index match start+1 (long) and start+2 (short)
index0 := base + 1
// Index match end-2 (long) and end-1 (short)
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
cv = load64(src, s)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}

298
vendor/github.com/klauspost/compress/s2/encode_go.go generated vendored Normal file
View File

@ -0,0 +1,298 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
package s2
import (
"math/bits"
)
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src))
func encodeBlock(dst, src []byte) (d int) {
if len(src) < minNonLiteralBlockSize {
return 0
}
return encodeBlockGo(dst, src)
}
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src))
func encodeBlockBetter(dst, src []byte) (d int) {
return encodeBlockBetterGo(dst, src)
}
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src))
func encodeBlockBetterSnappy(dst, src []byte) (d int) {
return encodeBlockBetterSnappyGo(dst, src)
}
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src))
func encodeBlockSnappy(dst, src []byte) (d int) {
if len(src) < minNonLiteralBlockSize {
return 0
}
return encodeBlockSnappyGo(dst, src)
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 0 <= len(lit) && len(lit) <= math.MaxUint32
func emitLiteral(dst, lit []byte) int {
if len(lit) == 0 {
return 0
}
const num = 63<<2 | tagLiteral
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[1] = uint8(n)
dst[0] = 60<<2 | tagLiteral
i = 2
case n < 1<<16:
dst[2] = uint8(n >> 8)
dst[1] = uint8(n)
dst[0] = 61<<2 | tagLiteral
i = 3
case n < 1<<24:
dst[3] = uint8(n >> 16)
dst[2] = uint8(n >> 8)
dst[1] = uint8(n)
dst[0] = 62<<2 | tagLiteral
i = 4
default:
dst[4] = uint8(n >> 24)
dst[3] = uint8(n >> 16)
dst[2] = uint8(n >> 8)
dst[1] = uint8(n)
dst[0] = 63<<2 | tagLiteral
i = 5
}
return i + copy(dst[i:], lit)
}
// emitRepeat writes a repeat chunk and returns the number of bytes written.
// Length must be at least 4 and < 1<<24
func emitRepeat(dst []byte, offset, length int) int {
// Repeat offset, make length cheaper
length -= 4
if length <= 4 {
dst[0] = uint8(length)<<2 | tagCopy1
dst[1] = 0
return 2
}
if length < 8 && offset < 2048 {
// Encode WITH offset
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
return 2
}
if length < (1<<8)+4 {
length -= 4
dst[2] = uint8(length)
dst[1] = 0
dst[0] = 5<<2 | tagCopy1
return 3
}
if length < (1<<16)+(1<<8) {
length -= 1 << 8
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 6<<2 | tagCopy1
return 4
}
const maxRepeat = (1 << 24) - 1
length -= 1 << 16
left := 0
if length > maxRepeat {
left = length - maxRepeat + 4
length = maxRepeat - 4
}
dst[4] = uint8(length >> 16)
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 7<<2 | tagCopy1
if left > 0 {
return 5 + emitRepeat(dst[5:], offset, left)
}
return 5
}
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24
func emitCopy(dst []byte, offset, length int) int {
if offset >= 65536 {
i := 0
if length > 64 {
// Emit a length 64 copy, encoded as 5 bytes.
dst[4] = uint8(offset >> 24)
dst[3] = uint8(offset >> 16)
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = 63<<2 | tagCopy4
length -= 64
if length >= 4 {
// Emit remaining as repeats
return 5 + emitRepeat(dst[5:], offset, length)
}
i = 5
}
if length == 0 {
return i
}
// Emit a copy, offset encoded as 4 bytes.
dst[i+0] = uint8(length-1)<<2 | tagCopy4
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
dst[i+3] = uint8(offset >> 16)
dst[i+4] = uint8(offset >> 24)
return i + 5
}
// Offset no more than 2 bytes.
if length > 64 {
// Emit a length 60 copy, encoded as 3 bytes.
// Emit remaining as repeat value (minimum 4 bytes).
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = 59<<2 | tagCopy2
length -= 60
// Emit remaining as repeats, at least 4 bytes remain.
return 3 + emitRepeat(dst[3:], offset, length)
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = uint8(length-1)<<2 | tagCopy2
return 3
}
// Emit the remaining copy, encoded as 2 bytes.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
return 2
}
// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24
func emitCopyNoRepeat(dst []byte, offset, length int) int {
if offset >= 65536 {
i := 0
if length > 64 {
// Emit a length 64 copy, encoded as 5 bytes.
dst[4] = uint8(offset >> 24)
dst[3] = uint8(offset >> 16)
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = 63<<2 | tagCopy4
length -= 64
if length >= 4 {
// Emit remaining as repeats
return 5 + emitCopyNoRepeat(dst[5:], offset, length)
}
i = 5
}
if length == 0 {
return i
}
// Emit a copy, offset encoded as 4 bytes.
dst[i+0] = uint8(length-1)<<2 | tagCopy4
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
dst[i+3] = uint8(offset >> 16)
dst[i+4] = uint8(offset >> 24)
return i + 5
}
// Offset no more than 2 bytes.
if length > 64 {
// Emit a length 60 copy, encoded as 3 bytes.
// Emit remaining as repeat value (minimum 4 bytes).
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = 59<<2 | tagCopy2
length -= 60
// Emit remaining as repeats, at least 4 bytes remain.
return 3 + emitCopyNoRepeat(dst[3:], offset, length)
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = uint8(length-1)<<2 | tagCopy2
return 3
}
// Emit the remaining copy, encoded as 2 bytes.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
return 2
}
// matchLen returns how many bytes match in a and b
//
// It assumes that:
// len(a) <= len(b)
//
func matchLen(a []byte, b []byte) int {
b = b[:len(a)]
var checked int
if len(a) > 4 {
// Try 4 bytes first
if diff := load32(a, 0) ^ load32(b, 0); diff != 0 {
return bits.TrailingZeros32(diff) >> 3
}
// Switch to 8 byte matching.
checked = 4
a = a[4:]
b = b[4:]
for len(a) >= 8 {
b = b[:len(a)]
if diff := load64(a, 0) ^ load64(b, 0); diff != 0 {
return checked + (bits.TrailingZeros64(diff) >> 3)
}
checked += 8
a = a[8:]
b = b[8:]
}
}
b = b[:len(a)]
for i := range a {
if a[i] != b[i] {
return int(i) + checked
}
}
return len(a) + checked
}

View File

@ -0,0 +1,189 @@
// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
//go:build !appengine && !noasm && gc
// +build !appengine,!noasm,gc
package s2
// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeBlockAsm(dst []byte, src []byte) int
// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4194304 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeBlockAsm4MB(dst []byte, src []byte) int
// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 16383 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeBlockAsm12B(dst []byte, src []byte) int
// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4095 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeBlockAsm10B(dst []byte, src []byte) int
// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 511 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeBlockAsm8B(dst []byte, src []byte) int
// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeBetterBlockAsm(dst []byte, src []byte) int
// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4194304 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 16383 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeBetterBlockAsm12B(dst []byte, src []byte) int
// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4095 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeBetterBlockAsm10B(dst []byte, src []byte) int
// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 511 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeBetterBlockAsm8B(dst []byte, src []byte) int
// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeSnappyBlockAsm(dst []byte, src []byte) int
// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 65535 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 16383 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4095 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 511 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 65535 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 16383 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4095 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 511 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes with margin of 0 bytes
// 0 <= len(lit) && len(lit) <= math.MaxUint32
//
//go:noescape
func emitLiteral(dst []byte, lit []byte) int
// emitRepeat writes a repeat chunk and returns the number of bytes written.
// Length must be at least 4 and < 1<<32
//
//go:noescape
func emitRepeat(dst []byte, offset int, length int) int
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24
//
//go:noescape
func emitCopy(dst []byte, offset int, length int) int
// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24
//
//go:noescape
func emitCopyNoRepeat(dst []byte, offset int, length int) int
// matchLen returns how many bytes match in a and b
//
// It assumes that:
// len(a) <= len(b)
//
//go:noescape
func matchLen(a []byte, b []byte) int

File diff suppressed because it is too large Load Diff

139
vendor/github.com/klauspost/compress/s2/s2.go generated vendored Normal file
View File

@ -0,0 +1,139 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package s2 implements the S2 compression format.
//
// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput,
// which is why it features concurrent compression for bigger payloads.
//
// Decoding is compatible with Snappy compressed content,
// but content compressed with S2 cannot be decompressed by Snappy.
//
// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2
//
// There are actually two S2 formats: block and stream. They are related,
// but different: trying to decompress block-compressed data as a S2 stream
// will fail, and vice versa. The block format is the Decode and Encode
// functions and the stream format is the Reader and Writer types.
//
// A "better" compression option is available. This will trade some compression
// speed
//
// The block format, the more common case, is used when the complete size (the
// number of bytes) of the original data is known upfront, at the time
// compression starts. The stream format, also known as the framing format, is
// for when that isn't always true.
//
// Blocks to not offer much data protection, so it is up to you to
// add data validation of decompressed blocks.
//
// Streams perform CRC validation of the decompressed data.
// Stream compression will also be performed on multiple CPU cores concurrently
// significantly improving throughput.
package s2
import (
"bytes"
"hash/crc32"
)
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
first byte of each chunk is broken into its 2 least and 6 most significant bits
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
Zero means a literal tag. All other values mean a copy tag.
For literal tags:
- If m < 60, the next 1 + m bytes are literal bytes.
- Otherwise, let n be the little-endian unsigned integer denoted by the next
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
For copy tags, length bytes are copied from offset bytes ago, in the style of
Lempel-Ziv compression algorithms. In particular:
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
of the offset. The next byte is bits 0-7 of the offset.
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
The length is 1 + m. The offset is the little-endian unsigned integer
denoted by the next 2 bytes.
- For l == 3, the offset ranges in [0, 1<<32) and the length in
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
integer denoted by the next 4 bytes.
*/
const (
tagLiteral = 0x00
tagCopy1 = 0x01
tagCopy2 = 0x02
tagCopy4 = 0x03
)
const (
checksumSize = 4
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy
magicBodySnappy = "sNaPpY"
magicBody = "S2sTwO"
// maxBlockSize is the maximum size of the input to encodeBlock.
//
// For the framing format (Writer type instead of Encode function),
// this is the maximum uncompressed size of a block.
maxBlockSize = 4 << 20
// minBlockSize is the minimum size of block setting when creating a writer.
minBlockSize = 4 << 10
// Default block size
defaultBlockSize = 1 << 20
// maxSnappyBlockSize is the maximum snappy block size.
maxSnappyBlockSize = 1 << 16
obufHeaderLen = checksumSize + chunkHeaderSize
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of
// https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return c>>15 | c<<17 + 0xa282ead8
}
// literalExtraSize returns the extra size of encoding n literals.
// n should be >= 0 and <= math.MaxUint32.
func literalExtraSize(n int64) int64 {
if n == 0 {
return 0
}
switch {
case n < 60:
return 1
case n < 1<<8:
return 2
case n < 1<<16:
return 3
case n < 1<<24:
return 4
default:
return 5
}
}
type byter interface {
Bytes() []byte
}
var _ byter = &bytes.Buffer{}

View File

@ -60,8 +60,19 @@ type ChannelMember struct {
ExplicitRoles string `json:"explicit_roles"`
}
// ChannelMemberWithTeamData contains ChannelMember appended with extra team information
// as well.
type ChannelMemberWithTeamData struct {
ChannelMember
TeamDisplayName string `json:"team_display_name"`
TeamName string `json:"team_name"`
TeamUpdateAt int64 `json:"team_update_at"`
}
type ChannelMembers []ChannelMember
type ChannelMembersWithTeamData []ChannelMemberWithTeamData
type ChannelMemberForExport struct {
ChannelMember
ChannelName string

View File

@ -3100,6 +3100,24 @@ func (c *Client4) GetChannelsForTeamAndUserWithLastDeleteAt(teamId, userId strin
return ch, BuildResponse(r), nil
}
// GetChannelsForUserWithLastDeleteAt returns a list channels for a user, additionally filtered with lastDeleteAt.
func (c *Client4) GetChannelsForUserWithLastDeleteAt(userID string, lastDeleteAt int) ([]*Channel, *Response, error) {
route := fmt.Sprintf(c.userRoute(userID) + "/channels")
route += fmt.Sprintf("?last_delete_at=%d", lastDeleteAt)
r, err := c.DoAPIGet(route, "")
if err != nil {
return nil, BuildResponse(r), err
}
defer closeBody(r)
var ch []*Channel
err = json.NewDecoder(r.Body).Decode(&ch)
if err != nil {
return nil, BuildResponse(r), NewAppError("GetChannelsForUserWithLastDeleteAt", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError)
}
return ch, BuildResponse(r), nil
}
// SearchChannels returns the channels on a team matching the provided search term.
func (c *Client4) SearchChannels(teamId string, search *ChannelSearch) ([]*Channel, *Response, error) {
searchJSON, jsonErr := json.Marshal(search)
@ -3160,6 +3178,29 @@ func (c *Client4) SearchAllChannels(search *ChannelSearch) (ChannelListWithTeamD
return ch, BuildResponse(r), nil
}
// SearchAllChannelsForUser search in all the channels for a regular user.
func (c *Client4) SearchAllChannelsForUser(term string) (ChannelListWithTeamData, *Response, error) {
search := &ChannelSearch{
Term: term,
}
searchJSON, jsonErr := json.Marshal(search)
if jsonErr != nil {
return nil, nil, NewAppError("SearchAllChannelsForUser", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
}
r, err := c.DoAPIPost(c.channelsRoute()+"/search?system_console=false", string(searchJSON))
if err != nil {
return nil, BuildResponse(r), err
}
defer closeBody(r)
var ch ChannelListWithTeamData
err = json.NewDecoder(r.Body).Decode(&ch)
if err != nil {
return nil, BuildResponse(r), NewAppError("SearchAllChannelsForUser", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError)
}
return ch, BuildResponse(r), nil
}
// SearchAllChannelsPaged searches all the channels and returns the results paged with the total count.
func (c *Client4) SearchAllChannelsPaged(search *ChannelSearch) (*ChannelsWithCount, *Response, error) {
searchJSON, jsonErr := json.Marshal(search)
@ -3304,7 +3345,7 @@ func (c *Client4) GetChannelByNameForTeamNameIncludeDeleted(channelName, teamNam
return ch, BuildResponse(r), nil
}
// GetChannelMembers gets a page of channel members.
// GetChannelMembers gets a page of channel members specific to a channel.
func (c *Client4) GetChannelMembers(channelId string, page, perPage int, etag string) (ChannelMembers, *Response, error) {
query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
r, err := c.DoAPIGet(c.channelMembersRoute(channelId)+query, etag)
@ -3321,6 +3362,23 @@ func (c *Client4) GetChannelMembers(channelId string, page, perPage int, etag st
return ch, BuildResponse(r), nil
}
// GetChannelMembersWithTeamData gets a page of all channel members for a user.
func (c *Client4) GetChannelMembersWithTeamData(userID string, page, perPage int) (ChannelMembersWithTeamData, *Response, error) {
query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
r, err := c.DoAPIGet(c.userRoute(userID)+"/channel_members"+query, "")
if err != nil {
return nil, BuildResponse(r), err
}
defer closeBody(r)
var ch ChannelMembersWithTeamData
err = json.NewDecoder(r.Body).Decode(&ch)
if err != nil {
return nil, BuildResponse(r), NewAppError("GetChannelMembersWithTeamData", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError)
}
return ch, BuildResponse(r), nil
}
// GetChannelMembersByIds gets the channel members in a channel for a list of user ids.
func (c *Client4) GetChannelMembersByIds(channelId string, userIds []string) (ChannelMembers, *Response, error) {
r, err := c.DoAPIPost(c.channelMembersRoute(channelId)+"/ids", ArrayToJSON(userIds))
@ -3710,6 +3768,27 @@ func (c *Client4) GetPostsForChannel(channelId string, page, perPage int, etag s
return &list, BuildResponse(r), nil
}
// GetPostsByIds gets a list of posts by taking an array of post ids
func (c *Client4) GetPostsByIds(postIds []string) ([]*Post, *Response, error) {
js, jsonErr := json.Marshal(postIds)
if jsonErr != nil {
return nil, nil, NewAppError("SearchFilesWithParams", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
}
r, err := c.DoAPIPost(c.postsRoute()+"/ids", string(js))
if err != nil {
return nil, BuildResponse(r), err
}
defer closeBody(r)
var list []*Post
if r.StatusCode == http.StatusNotModified {
return list, BuildResponse(r), nil
}
if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil {
return nil, nil, NewAppError("GetPostsByIds", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
}
return list, BuildResponse(r), nil
}
// GetFlaggedPostsForUser returns flagged posts of a user based on user id string.
func (c *Client4) GetFlaggedPostsForUser(userId string, page int, perPage int) (*PostList, *Response, error) {
query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
@ -6096,6 +6175,44 @@ func (c *Client4) UpdateUserStatus(userId string, userStatus *Status) (*Status,
return &s, BuildResponse(r), nil
}
// UpdateUserCustomStatus sets a user's custom status based on the provided user id string.
func (c *Client4) UpdateUserCustomStatus(userId string, userCustomStatus *CustomStatus) (*CustomStatus, *Response, error) {
buf, err := json.Marshal(userCustomStatus)
if err != nil {
return nil, nil, NewAppError("UpdateUserCustomStatus", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError)
}
r, err := c.DoAPIPutBytes(c.userStatusRoute(userId)+"/custom", buf)
if err != nil {
return nil, BuildResponse(r), err
}
defer closeBody(r)
var s CustomStatus
if jsonErr := json.NewDecoder(r.Body).Decode(&s); jsonErr != nil {
return nil, nil, NewAppError("UpdateUserCustomStatus", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
}
return &s, BuildResponse(r), nil
}
// RemoveUserCustomStatus remove a user's custom status based on the provided user id string.
func (c *Client4) RemoveUserCustomStatus(userId string) (*Response, error) {
r, err := c.DoAPIDelete(c.userStatusRoute(userId) + "/custom")
if err != nil {
return BuildResponse(r), err
}
defer closeBody(r)
return BuildResponse(r), nil
}
// RemoveRecentUserCustomStatus remove a recent user's custom status based on the provided user id string.
func (c *Client4) RemoveRecentUserCustomStatus(userId string) (*Response, error) {
r, err := c.DoAPIDelete(c.userStatusRoute(userId) + "/custom/recent")
if err != nil {
return BuildResponse(r), err
}
defer closeBody(r)
return BuildResponse(r), nil
}
// Emoji Section
// CreateEmoji will save an emoji to the server if the current user has permission
@ -6425,6 +6542,20 @@ func (c *Client4) DownloadJob(jobId string) ([]byte, *Response, error) {
// Roles Section
// GetAllRoles returns a list of all the roles.
func (c *Client4) GetAllRoles() ([]*Role, *Response, error) {
r, err := c.DoAPIGet(c.rolesRoute(), "")
if err != nil {
return nil, BuildResponse(r), err
}
defer closeBody(r)
var list []*Role
if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil {
return nil, nil, NewAppError("GetAllRoles", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
}
return list, BuildResponse(r), nil
}
// GetRole gets a single role by ID.
func (c *Client4) GetRole(id string) (*Role, *Response, error) {
r, err := c.DoAPIGet(c.rolesRoute()+fmt.Sprintf("/%v", id), "")

View File

@ -106,6 +106,7 @@ const (
ServiceSettingsDefaultListenAndAddress = ":8065"
ServiceSettingsDefaultGfycatAPIKey = "2_KtH_W5"
ServiceSettingsDefaultGfycatAPISecret = "3wLVZPiswc3DnaiaFoLkDvB4X0IV6CpMkj4tf2inJRsBY6-FnkT08zGmppWFgeof"
ServiceSettingsDefaultDeveloperFlags = ""
TeamSettingsDefaultSiteName = "Mattermost"
TeamSettingsDefaultMaxUsersPerTeam = 50
@ -302,6 +303,7 @@ type ServiceSettings struct {
RestrictLinkPreviews *string `access:"site_posts"`
EnableTesting *bool `access:"environment_developer,write_restrictable,cloud_restrictable"`
EnableDeveloper *bool `access:"environment_developer,write_restrictable,cloud_restrictable"`
DeveloperFlags *string `access:"environment_developer"`
EnableOpenTracing *bool `access:"write_restrictable,cloud_restrictable"`
EnableSecurityFixAlert *bool `access:"environment_smtp,write_restrictable,cloud_restrictable"`
EnableInsecureOutgoingConnections *bool `access:"environment_web_server,write_restrictable,cloud_restrictable"`
@ -363,7 +365,6 @@ type ServiceSettings struct {
ThreadAutoFollow *bool `access:"experimental_features"`
CollapsedThreads *string `access:"experimental_features"`
ManagedResourcePaths *string `access:"environment_web_server,write_restrictable,cloud_restrictable"`
EnableReliableWebSockets *bool `access:"experimental_features"` // telemetry: none
}
func (s *ServiceSettings) SetDefaults(isUpdate bool) {
@ -416,6 +417,10 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) {
s.EnableDeveloper = NewBool(false)
}
if s.DeveloperFlags == nil {
s.DeveloperFlags = NewString("")
}
if s.EnableOpenTracing == nil {
s.EnableOpenTracing = NewBool(false)
}
@ -776,10 +781,6 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) {
if s.ManagedResourcePaths == nil {
s.ManagedResourcePaths = NewString("")
}
if s.EnableReliableWebSockets == nil {
s.EnableReliableWebSockets = NewBool(true)
}
}
type ClusterSettings struct {
@ -1978,8 +1979,6 @@ func (s *TeamSettings) SetDefaults() {
type ClientRequirements struct {
AndroidLatestVersion string `access:"write_restrictable,cloud_restrictable"`
AndroidMinVersion string `access:"write_restrictable,cloud_restrictable"`
DesktopLatestVersion string `access:"write_restrictable,cloud_restrictable"`
DesktopMinVersion string `access:"write_restrictable,cloud_restrictable"`
IosLatestVersion string `access:"write_restrictable,cloud_restrictable"`
IosMinVersion string `access:"write_restrictable,cloud_restrictable"`
}
@ -2615,8 +2614,8 @@ func (s *DataRetentionSettings) SetDefaults() {
}
type JobSettings struct {
RunJobs *bool `access:"write_restrictable,cloud_restrictable"`
RunScheduler *bool `access:"write_restrictable,cloud_restrictable"`
RunJobs *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none
RunScheduler *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none
CleanupJobsThresholdDays *int `access:"write_restrictable,cloud_restrictable"`
}
@ -3025,7 +3024,7 @@ type Config struct {
BleveSettings BleveSettings
DataRetentionSettings DataRetentionSettings
MessageExportSettings MessageExportSettings
JobSettings JobSettings // telemetry: none
JobSettings JobSettings
PluginSettings PluginSettings
DisplaySettings DisplaySettings
GuestAccountsSettings GuestAccountsSettings

View File

@ -25,9 +25,12 @@ type FeatureFlags struct {
// Enable the remote cluster service for shared channels.
EnableRemoteClusterService bool
// AppsEnabled toggle the Apps framework functionalities both in server and client side
// AppsEnabled toggles the Apps framework functionalities both in server and client side
AppsEnabled bool
// AppBarEnabled toggles the App Bar component on client side
AppBarEnabled bool
// Feature flags to control plugin versions
PluginPlaybooks string `plugin_id:"playbooks"`
PluginApps string `plugin_id:"com.mattermost.apps"`
@ -41,14 +44,9 @@ type FeatureFlags struct {
// Enable different team menu button treatments, possible values = ("none", "by_team_name", "inverted_sidebar_bg_color")
AddChannelButton string
// Enable different treatments for first time users, possible values = ("none", "tour_point", "around_input")
PrewrittenMessages string
// Enable different treatments for first time users, possible values = ("none", "tips_and_next_steps")
DownloadAppsCTA string
// Determine whether when a user gets created, they'll have noisy notifications e.g. Send desktop notifications for all activity
NewAccountNoisy bool
// Enable Boards Unfurl Preview
BoardsUnfurl bool
@ -57,6 +55,21 @@ type FeatureFlags struct {
// Start A/B tour tips automatically, possible values = ("none", "auto")
AutoTour string
// A dash separated list for feature flags to turn on for Boards
BoardsFeatureFlags string
// A/B test for the add members to channel button, possible values = ("top", "bottom")
AddMembersToChannel string
// Enable Create First Channel
GuidedChannelCreation bool
// Determine after which duration in hours to send a second invitation to someone that didn't join after the initial invite, possible values = ("48", "72")
ResendInviteEmailInterval string
// A/B test for whether radio buttons or toggle button is more effective in in-screen invite to team modal ("none", "toggle")
InviteToTeam string
}
func (f *FeatureFlags) SetDefaults() {
@ -66,17 +79,21 @@ func (f *FeatureFlags) SetDefaults() {
f.CollapsedThreads = true
f.EnableRemoteClusterService = false
f.AppsEnabled = false
f.AppBarEnabled = false
f.PluginApps = ""
f.PluginFocalboard = ""
f.PermalinkPreviews = true
f.GlobalHeader = true
f.AddChannelButton = "by_team_name"
f.PrewrittenMessages = "tour_point"
f.DownloadAppsCTA = "tips_and_next_steps"
f.NewAccountNoisy = false
f.BoardsUnfurl = true
f.CallsMobile = false
f.AutoTour = "none"
f.BoardsFeatureFlags = ""
f.AddMembersToChannel = "top"
f.GuidedChannelCreation = false
f.ResendInviteEmailInterval = ""
f.InviteToTeam = "none"
}
func (f *FeatureFlags) Plugins() map[string]string {

View File

@ -58,15 +58,15 @@ var AllJobTypes = [...]string{
}
type Job struct {
Id string `json:"id"`
Type string `json:"type"`
Priority int64 `json:"priority"`
CreateAt int64 `json:"create_at"`
StartAt int64 `json:"start_at"`
LastActivityAt int64 `json:"last_activity_at"`
Status string `json:"status"`
Progress int64 `json:"progress"`
Data map[string]string `json:"data"`
Id string `json:"id"`
Type string `json:"type"`
Priority int64 `json:"priority"`
CreateAt int64 `json:"create_at"`
StartAt int64 `json:"start_at"`
LastActivityAt int64 `json:"last_activity_at"`
Status string `json:"status"`
Progress int64 `json:"progress"`
Data StringMap `json:"data"`
}
func (j *Job) IsValid() *AppError {

View File

@ -52,6 +52,7 @@ type License struct {
SkuName string `json:"sku_name"`
SkuShortName string `json:"sku_short_name"`
IsTrial bool `json:"is_trial"`
IsGovSku bool `json:"is_gov_sku"`
}
type Customer struct {

View File

@ -206,7 +206,8 @@ type ManifestServer struct {
Executable string `json:"executable" yaml:"executable"`
}
// ManifestExecutables is a legacy structure capturing a subet of the known platform executables.
// Deprecated: ManifestExecutables is a legacy structure capturing a subset of the known platform executables.
// It will be remove in v7.0: https://mattermost.atlassian.net/browse/MM-40531
type ManifestExecutables struct {
// LinuxAmd64 is the path to your executable binary for the corresponding platform
LinuxAmd64 string `json:"linux-amd64,omitempty" yaml:"linux-amd64,omitempty"`

View File

@ -35,4 +35,5 @@ const (
MigrationKeyAddTestEmailAncillaryPermission = "test_email_ancillary_permission"
MigrationKeyAddAboutSubsectionPermissions = "about_subsection_permissions"
MigrationKeyAddIntegrationsSubsectionPermissions = "integrations_subsection_permissions"
MigrationKeyAddPlaybooksPermissions = "playbooks_permissions"
)

View File

@ -4,9 +4,11 @@
package model
const (
PermissionScopeSystem = "system_scope"
PermissionScopeTeam = "team_scope"
PermissionScopeChannel = "channel_scope"
PermissionScopeSystem = "system_scope"
PermissionScopeTeam = "team_scope"
PermissionScopeChannel = "channel_scope"
PermissionScopePlaybook = "playbook_scope"
PermissionScopeRun = "run_scope"
)
type Permission struct {
@ -331,6 +333,23 @@ var PermissionSysconsoleWriteExperimentalFeatureFlags *Permission
var PermissionSysconsoleReadExperimentalBleve *Permission
var PermissionSysconsoleWriteExperimentalBleve *Permission
var PermissionPublicPlaybookCreate *Permission
var PermissionPublicPlaybookManageProperties *Permission
var PermissionPublicPlaybookManageMembers *Permission
var PermissionPublicPlaybookView *Permission
var PermissionPublicPlaybookMakePrivate *Permission
var PermissionPrivatePlaybookCreate *Permission
var PermissionPrivatePlaybookManageProperties *Permission
var PermissionPrivatePlaybookManageMembers *Permission
var PermissionPrivatePlaybookView *Permission
var PermissionPrivatePlaybookMakePublic *Permission
var PermissionRunCreate *Permission
var PermissionRunManageProperties *Permission
var PermissionRunManageMembers *Permission
var PermissionRunView *Permission
// General permission that encompasses all system admin functions
// in the future this could be broken up to allow access to some
// admin functions but not others
@ -1895,6 +1914,105 @@ func initializePermissions() {
PermissionScopeSystem,
}
// Playbooks
PermissionPublicPlaybookCreate = &Permission{
"playbook_public_create",
"",
"",
PermissionScopeTeam,
}
PermissionPublicPlaybookManageProperties = &Permission{
"playbook_public_manage_properties",
"",
"",
PermissionScopePlaybook,
}
PermissionPublicPlaybookManageMembers = &Permission{
"playbook_public_manage_members",
"",
"",
PermissionScopePlaybook,
}
PermissionPublicPlaybookView = &Permission{
"playbook_public_view",
"",
"",
PermissionScopePlaybook,
}
PermissionPublicPlaybookMakePrivate = &Permission{
"playbook_public_make_private",
"",
"",
PermissionScopePlaybook,
}
PermissionPrivatePlaybookCreate = &Permission{
"playbook_private_create",
"",
"",
PermissionScopeTeam,
}
PermissionPrivatePlaybookManageProperties = &Permission{
"playbook_private_manage_properties",
"",
"",
PermissionScopePlaybook,
}
PermissionPrivatePlaybookManageMembers = &Permission{
"playbook_private_manage_members",
"",
"",
PermissionScopePlaybook,
}
PermissionPrivatePlaybookView = &Permission{
"playbook_private_view",
"",
"",
PermissionScopePlaybook,
}
PermissionPrivatePlaybookMakePublic = &Permission{
"playbook_private_make_public",
"",
"",
PermissionScopePlaybook,
}
PermissionRunCreate = &Permission{
"run_create",
"",
"",
PermissionScopePlaybook,
}
PermissionRunManageProperties = &Permission{
"run_manage_properties",
"",
"",
PermissionScopeRun,
}
PermissionRunManageMembers = &Permission{
"run_manage_members",
"",
"",
PermissionScopeRun,
}
PermissionRunView = &Permission{
"run_view",
"",
"",
PermissionScopeRun,
}
SysconsoleReadPermissions = []*Permission{
PermissionSysconsoleReadAboutEditionAndLicense,
PermissionSysconsoleReadBilling,
@ -2108,6 +2226,8 @@ func initializePermissions() {
PermissionViewTeam,
PermissionViewMembers,
PermissionInviteGuest,
PermissionPublicPlaybookCreate,
PermissionPrivatePlaybookCreate,
}
ChannelScopedPermissions := []*Permission{
@ -2163,12 +2283,32 @@ func initializePermissions() {
PermissionSysconsoleWriteCompliance,
}
PlaybookScopedPermissions := []*Permission{
PermissionPublicPlaybookManageProperties,
PermissionPublicPlaybookManageMembers,
PermissionPublicPlaybookView,
PermissionPublicPlaybookMakePrivate,
PermissionPrivatePlaybookManageProperties,
PermissionPrivatePlaybookManageMembers,
PermissionPrivatePlaybookView,
PermissionPrivatePlaybookMakePublic,
PermissionRunCreate,
}
RunScopedPermissions := []*Permission{
PermissionRunManageProperties,
PermissionRunManageMembers,
PermissionRunView,
}
AllPermissions = []*Permission{}
AllPermissions = append(AllPermissions, SystemScopedPermissionsMinusSysconsole...)
AllPermissions = append(AllPermissions, TeamScopedPermissions...)
AllPermissions = append(AllPermissions, ChannelScopedPermissions...)
AllPermissions = append(AllPermissions, SysconsoleReadPermissions...)
AllPermissions = append(AllPermissions, SysconsoleWritePermissions...)
AllPermissions = append(AllPermissions, PlaybookScopedPermissions...)
AllPermissions = append(AllPermissions, RunScopedPermissions...)
ChannelModeratedPermissions = []string{
PermissionCreatePost.Id,

View File

@ -10,7 +10,7 @@ import (
const (
KeyValuePluginIdMaxRunes = 190
KeyValueKeyMaxRunes = 50
KeyValueKeyMaxRunes = 150
)
type PluginKeyValue struct {

View File

@ -64,6 +64,7 @@ type PushNotification struct {
OverrideIconURL string `json:"override_icon_url,omitempty"`
FromWebhook string `json:"from_webhook,omitempty"`
Version string `json:"version,omitempty"`
IsCRTEnabled bool `json:"is_crt_enabled"`
IsIdLoaded bool `json:"is_id_loaded"`
}

View File

@ -41,6 +41,11 @@ func init() {
ChannelGuestRoleId,
ChannelUserRoleId,
ChannelAdminRoleId,
PlaybookAdminRoleId,
PlaybookMemberRoleId,
RunAdminRoleId,
RunMemberRoleId,
}, NewSystemRoleIDs...)
// When updating the values here, the values in mattermost-redux must also be updated.
@ -362,6 +367,11 @@ const (
ChannelUserRoleId = "channel_user"
ChannelAdminRoleId = "channel_admin"
PlaybookAdminRoleId = "playbook_admin"
PlaybookMemberRoleId = "playbook_member"
RunAdminRoleId = "run_admin"
RunMemberRoleId = "run_member"
RoleNameMaxLength = 64
RoleDisplayNameMaxLength = 128
RoleDescriptionMaxLength = 1024
@ -807,6 +817,61 @@ func MakeDefaultRoles() map[string]*Role {
BuiltIn: true,
}
roles[PlaybookAdminRoleId] = &Role{
Name: PlaybookAdminRoleId,
DisplayName: "authentication.roles.playbook_admin.name",
Description: "authentication.roles.playbook_admin.description",
Permissions: []string{
PermissionPublicPlaybookManageMembers.Id,
PermissionPublicPlaybookManageProperties.Id,
PermissionPrivatePlaybookManageMembers.Id,
PermissionPrivatePlaybookManageProperties.Id,
PermissionPublicPlaybookMakePrivate.Id,
},
SchemeManaged: true,
BuiltIn: true,
}
roles[PlaybookMemberRoleId] = &Role{
Name: PlaybookMemberRoleId,
DisplayName: "authentication.roles.playbook_member.name",
Description: "authentication.roles.playbook_member.description",
Permissions: []string{
PermissionPublicPlaybookView.Id,
PermissionPublicPlaybookManageMembers.Id,
PermissionPublicPlaybookManageProperties.Id,
PermissionPrivatePlaybookView.Id,
PermissionPrivatePlaybookManageMembers.Id,
PermissionPrivatePlaybookManageProperties.Id,
PermissionRunCreate.Id,
},
SchemeManaged: true,
BuiltIn: true,
}
roles[RunAdminRoleId] = &Role{
Name: RunAdminRoleId,
DisplayName: "authentication.roles.run_admin.name",
Description: "authentication.roles.run_admin.description",
Permissions: []string{
PermissionRunManageMembers.Id,
PermissionRunManageProperties.Id,
},
SchemeManaged: true,
BuiltIn: true,
}
roles[RunMemberRoleId] = &Role{
Name: RunMemberRoleId,
DisplayName: "authentication.roles.run_member.name",
Description: "authentication.roles.run_member.description",
Permissions: []string{
PermissionRunView.Id,
},
SchemeManaged: true,
BuiltIn: true,
}
roles[SystemGuestRoleId] = &Role{
Name: "system_guest",
DisplayName: "authentication.roles.global_guest.name",

View File

@ -14,23 +14,29 @@ const (
SchemeDescriptionMaxLength = 1024
SchemeScopeTeam = "team"
SchemeScopeChannel = "channel"
SchemeScopePlaybook = "playbook"
SchemeScopeRun = "run"
)
type Scheme struct {
Id string `json:"id"`
Name string `json:"name"`
DisplayName string `json:"display_name"`
Description string `json:"description"`
CreateAt int64 `json:"create_at"`
UpdateAt int64 `json:"update_at"`
DeleteAt int64 `json:"delete_at"`
Scope string `json:"scope"`
DefaultTeamAdminRole string `json:"default_team_admin_role"`
DefaultTeamUserRole string `json:"default_team_user_role"`
DefaultChannelAdminRole string `json:"default_channel_admin_role"`
DefaultChannelUserRole string `json:"default_channel_user_role"`
DefaultTeamGuestRole string `json:"default_team_guest_role"`
DefaultChannelGuestRole string `json:"default_channel_guest_role"`
Id string `json:"id"`
Name string `json:"name"`
DisplayName string `json:"display_name"`
Description string `json:"description"`
CreateAt int64 `json:"create_at"`
UpdateAt int64 `json:"update_at"`
DeleteAt int64 `json:"delete_at"`
Scope string `json:"scope"`
DefaultTeamAdminRole string `json:"default_team_admin_role"`
DefaultTeamUserRole string `json:"default_team_user_role"`
DefaultChannelAdminRole string `json:"default_channel_admin_role"`
DefaultChannelUserRole string `json:"default_channel_user_role"`
DefaultTeamGuestRole string `json:"default_team_guest_role"`
DefaultChannelGuestRole string `json:"default_channel_guest_role"`
DefaultPlaybookAdminRole string `json:"default_playbook_admin_role"`
DefaultPlaybookMemberRole string `json:"default_playbook_member_role"`
DefaultRunAdminRole string `json:"default_run_admin_role"`
DefaultRunMemberRole string `json:"default_run_member_role"`
}
type SchemePatch struct {
@ -45,31 +51,39 @@ type SchemeIDPatch struct {
// SchemeConveyor is used for importing and exporting a Scheme and its associated Roles.
type SchemeConveyor struct {
Name string `json:"name"`
DisplayName string `json:"display_name"`
Description string `json:"description"`
Scope string `json:"scope"`
TeamAdmin string `json:"default_team_admin_role"`
TeamUser string `json:"default_team_user_role"`
TeamGuest string `json:"default_team_guest_role"`
ChannelAdmin string `json:"default_channel_admin_role"`
ChannelUser string `json:"default_channel_user_role"`
ChannelGuest string `json:"default_channel_guest_role"`
Roles []*Role `json:"roles"`
Name string `json:"name"`
DisplayName string `json:"display_name"`
Description string `json:"description"`
Scope string `json:"scope"`
TeamAdmin string `json:"default_team_admin_role"`
TeamUser string `json:"default_team_user_role"`
TeamGuest string `json:"default_team_guest_role"`
ChannelAdmin string `json:"default_channel_admin_role"`
ChannelUser string `json:"default_channel_user_role"`
ChannelGuest string `json:"default_channel_guest_role"`
PlaybookAdmin string `json:"default_playbook_admin_role"`
PlaybookMember string `json:"default_playbook_member_role"`
RunAdmin string `json:"default_run_admin_role"`
RunMember string `json:"default_run_member_role"`
Roles []*Role `json:"roles"`
}
func (sc *SchemeConveyor) Scheme() *Scheme {
return &Scheme{
DisplayName: sc.DisplayName,
Name: sc.Name,
Description: sc.Description,
Scope: sc.Scope,
DefaultTeamAdminRole: sc.TeamAdmin,
DefaultTeamUserRole: sc.TeamUser,
DefaultTeamGuestRole: sc.TeamGuest,
DefaultChannelAdminRole: sc.ChannelAdmin,
DefaultChannelUserRole: sc.ChannelUser,
DefaultChannelGuestRole: sc.ChannelGuest,
DisplayName: sc.DisplayName,
Name: sc.Name,
Description: sc.Description,
Scope: sc.Scope,
DefaultTeamAdminRole: sc.TeamAdmin,
DefaultTeamUserRole: sc.TeamUser,
DefaultTeamGuestRole: sc.TeamGuest,
DefaultChannelAdminRole: sc.ChannelAdmin,
DefaultChannelUserRole: sc.ChannelUser,
DefaultChannelGuestRole: sc.ChannelGuest,
DefaultPlaybookAdminRole: sc.PlaybookAdmin,
DefaultPlaybookMemberRole: sc.PlaybookMember,
DefaultRunAdminRole: sc.RunAdmin,
DefaultRunMemberRole: sc.RunMember,
}
}
@ -101,7 +115,7 @@ func (scheme *Scheme) IsValidForCreate() bool {
}
switch scheme.Scope {
case SchemeScopeTeam, SchemeScopeChannel:
case SchemeScopeTeam, SchemeScopeChannel, SchemeScopePlaybook, SchemeScopeRun:
default:
return false
}
@ -130,6 +144,22 @@ func (scheme *Scheme) IsValidForCreate() bool {
if !IsValidRoleName(scheme.DefaultTeamGuestRole) {
return false
}
if !IsValidRoleName(scheme.DefaultPlaybookAdminRole) {
return false
}
if !IsValidRoleName(scheme.DefaultPlaybookMemberRole) {
return false
}
if !IsValidRoleName(scheme.DefaultRunAdminRole) {
return false
}
if !IsValidRoleName(scheme.DefaultRunMemberRole) {
return false
}
}
if scheme.Scope == SchemeScopeChannel {

View File

@ -69,6 +69,10 @@ type TeamMembersGetOptions struct {
ViewRestrictions *ViewUsersRestrictions
}
type TeamInviteReminderData struct {
Interval string
}
func EmailInviteWithErrorToEmails(o []*EmailInviteWithError) []string {
var ret []string
for _, o := range o {

View File

@ -621,6 +621,15 @@ func (u *User) SetCustomStatus(cs *CustomStatus) error {
return nil
}
func (u *User) GetCustomStatus() *CustomStatus {
var o *CustomStatus
data := u.Props[UserPropsKeyCustomStatus]
_ = json.Unmarshal([]byte(data), &o)
return o
}
func (u *User) ClearCustomStatus() {
u.MakeNonNil()
u.Props[UserPropsKeyCustomStatus] = ""

View File

@ -98,6 +98,43 @@ func (sa *StringArray) Scan(value interface{}) error {
return errors.New("received value is neither a byte slice nor string")
}
// Scan converts database column value to StringMap
func (m *StringMap) Scan(value interface{}) error {
if value == nil {
return nil
}
buf, ok := value.([]byte)
if ok {
return json.Unmarshal(buf, m)
}
str, ok := value.(string)
if ok {
return json.Unmarshal([]byte(str), m)
}
return errors.New("received value is neither a byte slice nor string")
}
func (si *StringInterface) Scan(value interface{}) error {
if value == nil {
return nil
}
buf, ok := value.([]byte)
if ok {
return json.Unmarshal(buf, si)
}
str, ok := value.(string)
if ok {
return json.Unmarshal([]byte(str), si)
}
return errors.New("received value is neither a byte slice nor string")
}
var translateFunc i18n.TranslateFunc
var translateFuncOnce sync.Once

View File

@ -13,6 +13,8 @@ import (
// It should be maintained in chronological order with most current
// release at the front of the list.
var versions = []string{
"6.3.0",
"6.2.0",
"6.1.0",
"6.0.0",
"5.39.0",

View File

@ -6,6 +6,7 @@ package model
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"sync/atomic"
"time"
@ -13,6 +14,7 @@ import (
"github.com/mattermost/mattermost-server/v6/shared/mlog"
"github.com/gorilla/websocket"
"github.com/vmihailenco/msgpack/v5"
)
const (
@ -25,6 +27,7 @@ type msgType int
const (
msgTypeJSON msgType = iota + 1
msgTypePong
msgTypeBinary
)
type writeMessage struct {
@ -65,10 +68,26 @@ func NewWebSocketClient(url, authToken string) (*WebSocketClient, error) {
return NewWebSocketClientWithDialer(websocket.DefaultDialer, url, authToken)
}
func NewReliableWebSocketClientWithDialer(dialer *websocket.Dialer, url, authToken, connID string, seqNo int, withAuthHeader bool) (*WebSocketClient, error) {
connectURL := url + APIURLSuffix + "/websocket" + fmt.Sprintf("?connection_id=%s&sequence_number=%d", connID, seqNo)
var header http.Header
if withAuthHeader {
header = http.Header{
"Authorization": []string{"Bearer " + authToken},
}
}
return makeClient(dialer, url, connectURL, authToken, header)
}
// NewWebSocketClientWithDialer constructs a new WebSocket client with convenience
// methods for talking to the server using a custom dialer.
func NewWebSocketClientWithDialer(dialer *websocket.Dialer, url, authToken string) (*WebSocketClient, error) {
conn, _, err := dialer.Dial(url+APIURLSuffix+"/websocket", nil)
return makeClient(dialer, url, url+APIURLSuffix+"/websocket", authToken, nil)
}
func makeClient(dialer *websocket.Dialer, url, connectURL, authToken string, header http.Header) (*WebSocketClient, error) {
conn, _, err := dialer.Dial(connectURL, header)
if err != nil {
return nil, NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError)
}
@ -76,7 +95,7 @@ func NewWebSocketClientWithDialer(dialer *websocket.Dialer, url, authToken strin
client := &WebSocketClient{
URL: url,
APIURL: url + APIURLSuffix,
ConnectURL: url + APIURLSuffix + "/websocket",
ConnectURL: connectURL,
Conn: conn,
AuthToken: authToken,
Sequence: 1,
@ -165,6 +184,10 @@ func (wsc *WebSocketClient) writer() {
switch msg.msgType {
case msgTypeJSON:
wsc.Conn.WriteJSON(msg.data)
case msgTypeBinary:
if data, ok := msg.data.([]byte); ok {
wsc.Conn.WriteMessage(websocket.BinaryMessage, data)
}
case msgTypePong:
wsc.Conn.WriteMessage(websocket.PongMessage, []byte{})
}
@ -258,6 +281,26 @@ func (wsc *WebSocketClient) SendMessage(action string, data map[string]interface
}
}
func (wsc *WebSocketClient) SendBinaryMessage(action string, data map[string]interface{}) error {
req := &WebSocketRequest{}
req.Seq = wsc.Sequence
req.Action = action
req.Data = data
binaryData, err := msgpack.Marshal(req)
if err != nil {
return fmt.Errorf("failed to marshal request to msgpack: %w", err)
}
wsc.Sequence++
wsc.writeChan <- writeMessage{
msgType: msgTypeBinary,
data: binaryData,
}
return nil
}
// UserTyping will push a user_typing event out to all connected users
// who are in the specified channel
func (wsc *WebSocketClient) UserTyping(channelId, parentId string) {

View File

@ -90,12 +90,58 @@ type WebsocketBroadcast struct {
ContainsSensitiveData bool `json:"-"`
}
func (wb *WebsocketBroadcast) copy() *WebsocketBroadcast {
if wb == nil {
return nil
}
var c WebsocketBroadcast
if wb.OmitUsers != nil {
c.OmitUsers = make(map[string]bool, len(wb.OmitUsers))
for k, v := range wb.OmitUsers {
c.OmitUsers[k] = v
}
}
c.UserId = wb.UserId
c.ChannelId = wb.ChannelId
c.TeamId = wb.TeamId
c.ContainsSanitizedData = wb.ContainsSanitizedData
c.ContainsSensitiveData = wb.ContainsSensitiveData
return &c
}
type precomputedWebSocketEventJSON struct {
Event json.RawMessage
Data json.RawMessage
Broadcast json.RawMessage
}
func (p *precomputedWebSocketEventJSON) copy() *precomputedWebSocketEventJSON {
if p == nil {
return nil
}
var c precomputedWebSocketEventJSON
if p.Event != nil {
c.Event = make([]byte, len(p.Event))
copy(c.Event, p.Event)
}
if p.Data != nil {
c.Data = make([]byte, len(p.Data))
copy(c.Data, p.Data)
}
if p.Broadcast != nil {
c.Broadcast = make([]byte, len(p.Broadcast))
copy(c.Broadcast, p.Broadcast)
}
return &c
}
// webSocketEventJSON mirrors WebSocketEvent to make some of its unexported fields serializable
type webSocketEventJSON struct {
Event string `json:"event"`
@ -154,6 +200,25 @@ func (ev *WebSocketEvent) Copy() *WebSocketEvent {
return copy
}
func (ev *WebSocketEvent) DeepCopy() *WebSocketEvent {
var dataCopy map[string]interface{}
if ev.data != nil {
dataCopy = make(map[string]interface{}, len(ev.data))
for k, v := range ev.data {
dataCopy[k] = v
}
}
copy := &WebSocketEvent{
event: ev.event,
data: dataCopy,
broadcast: ev.broadcast.copy(),
sequence: ev.sequence,
precomputedJSON: ev.precomputedJSON.copy(),
}
return copy
}
func (ev *WebSocketEvent) GetData() map[string]interface{} {
return ev.data
}

View File

@ -4,31 +4,31 @@
package model
import (
"encoding/json"
"github.com/mattermost/mattermost-server/v6/shared/i18n"
"github.com/vmihailenco/msgpack/v5"
)
// WebSocketRequest represents a request made to the server through a websocket.
type WebSocketRequest struct {
// Client-provided fields
Seq int64 `json:"seq"` // A counter which is incremented for every request made.
Action string `json:"action"` // The action to perform for a request. For example: get_statuses, user_typing.
Data map[string]interface{} `json:"data"` // The metadata for an action.
Seq int64 `json:"seq" msgpack:"seq"` // A counter which is incremented for every request made.
Action string `json:"action" msgpack:"action"` // The action to perform for a request. For example: get_statuses, user_typing.
Data map[string]interface{} `json:"data" msgpack:"data"` // The metadata for an action.
// Server-provided fields
Session Session `json:"-"`
T i18n.TranslateFunc `json:"-"`
Locale string `json:"-"`
Session Session `json:"-" msgpack:"-"`
T i18n.TranslateFunc `json:"-" msgpack:"-"`
Locale string `json:"-" msgpack:"-"`
}
func (o *WebSocketRequest) Clone() (*WebSocketRequest, error) {
buf, err := json.Marshal(o)
buf, err := msgpack.Marshal(o)
if err != nil {
return nil, err
}
var ret WebSocketRequest
err = json.Unmarshal(buf, &ret)
err = msgpack.Unmarshal(buf, &ret)
if err != nil {
return nil, err
}

View File

@ -255,18 +255,25 @@ func (b *S3FileBackend) CopyFile(oldPath, newPath string) error {
oldPath = filepath.Join(b.pathPrefix, oldPath)
newPath = filepath.Join(b.pathPrefix, newPath)
srcOpts := s3.CopySrcOptions{
Bucket: b.bucket,
Object: oldPath,
Encryption: encrypt.NewSSE(),
Bucket: b.bucket,
Object: oldPath,
}
if b.encrypt {
srcOpts.Encryption = encrypt.NewSSE()
}
dstOpts := s3.CopyDestOptions{
Bucket: b.bucket,
Object: newPath,
Encryption: encrypt.NewSSE(),
Bucket: b.bucket,
Object: newPath,
}
if b.encrypt {
dstOpts.Encryption = encrypt.NewSSE()
}
if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {
return errors.Wrapf(err, "unable to copy file from %s to %s", oldPath, newPath)
}
return nil
}
@ -274,14 +281,19 @@ func (b *S3FileBackend) MoveFile(oldPath, newPath string) error {
oldPath = filepath.Join(b.pathPrefix, oldPath)
newPath = filepath.Join(b.pathPrefix, newPath)
srcOpts := s3.CopySrcOptions{
Bucket: b.bucket,
Object: oldPath,
Encryption: encrypt.NewSSE(),
Bucket: b.bucket,
Object: oldPath,
}
if b.encrypt {
srcOpts.Encryption = encrypt.NewSSE()
}
dstOpts := s3.CopyDestOptions{
Bucket: b.bucket,
Object: newPath,
Encryption: encrypt.NewSSE(),
Bucket: b.bucket,
Object: newPath,
}
if b.encrypt {
dstOpts.Encryption = encrypt.NewSSE()
}
if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {

View File

@ -42,7 +42,6 @@ loop:
continue
}
var buf bytes.Buffer
for {
c, err := er.ReadByte()
if err != nil {
@ -51,7 +50,6 @@ loop:
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
break
}
buf.Write([]byte(string(c)))
}
}

View File

@ -8,9 +8,20 @@ linters:
- typecheck
- goimports
- misspell
- revive
- govet
- golint
- ineffassign
- gosimple
- deadcode
- structcheck
- gocritic
issues:
exclude-use-default: false
exclude:
# todo fix these when we get enough time.
- "singleCaseSwitch: should rewrite switch statement to if statement"
- "unlambda: replace"
- "captLocal:"
- "ifElseChain:"
- "elseif:"

View File

@ -9,7 +9,7 @@ checks: lint vet test examples functional-test
lint:
@mkdir -p ${GOPATH}/bin
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1
@echo "Running $@ check"
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml

View File

@ -28,7 +28,7 @@ import (
)
// SetBucketEncryption sets the default encryption configuration on an existing bucket.
func (c Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error {
func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -70,7 +70,7 @@ func (c Client) SetBucketEncryption(ctx context.Context, bucketName string, conf
}
// RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts.
func (c Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error {
func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -99,7 +99,7 @@ func (c Client) RemoveBucketEncryption(ctx context.Context, bucketName string) e
// GetBucketEncryption gets the default encryption configuration
// on an existing bucket with a context to control cancellations and timeouts.
func (c Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) {
func (c *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err

View File

@ -30,7 +30,7 @@ import (
)
// SetBucketLifecycle set the lifecycle on an existing bucket.
func (c Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error {
func (c *Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -51,7 +51,7 @@ func (c Client) SetBucketLifecycle(ctx context.Context, bucketName string, confi
}
// Saves a new bucket lifecycle.
func (c Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error {
func (c *Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
@ -81,7 +81,7 @@ func (c Client) putBucketLifecycle(ctx context.Context, bucketName string, buf [
}
// Remove lifecycle from a bucket.
func (c Client) removeBucketLifecycle(ctx context.Context, bucketName string) error {
func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) error {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
@ -101,7 +101,7 @@ func (c Client) removeBucketLifecycle(ctx context.Context, bucketName string) er
}
// GetBucketLifecycle fetch bucket lifecycle configuration
func (c Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) {
func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
@ -120,7 +120,7 @@ func (c Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lif
}
// Request server for current bucket lifecycle.
func (c Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) {
func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)

View File

@ -32,7 +32,7 @@ import (
)
// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts.
func (c Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error {
func (c *Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -73,12 +73,12 @@ func (c Client) SetBucketNotification(ctx context.Context, bucketName string, co
}
// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
func (c Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error {
func (c *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error {
return c.SetBucketNotification(ctx, bucketName, notification.Configuration{})
}
// GetBucketNotification returns current bucket notification configuration
func (c Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) {
func (c *Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return notification.Configuration{}, err
@ -87,7 +87,7 @@ func (c Client) GetBucketNotification(ctx context.Context, bucketName string) (b
}
// Request server for notification rules.
func (c Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) {
func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) {
urlValues := make(url.Values)
urlValues.Set("notification", "")
@ -121,12 +121,12 @@ func processBucketNotificationResponse(bucketName string, resp *http.Response) (
}
// ListenNotification listen for all events, this is a MinIO specific API
func (c Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info {
func (c *Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info {
return c.ListenBucketNotification(ctx, "", prefix, suffix, events)
}
// ListenBucketNotification listen for bucket events, this is a MinIO specific API
func (c Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info {
func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info {
notificationInfoCh := make(chan notification.Info, 1)
const notificationCapacity = 4 * 1024 * 1024
notificationEventBuffer := make([]byte, notificationCapacity)

View File

@ -27,7 +27,7 @@ import (
)
// SetBucketPolicy sets the access permissions on an existing bucket.
func (c Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error {
func (c *Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -43,7 +43,7 @@ func (c Client) SetBucketPolicy(ctx context.Context, bucketName, policy string)
}
// Saves a new bucket policy.
func (c Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error {
func (c *Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
@ -71,7 +71,7 @@ func (c Client) putBucketPolicy(ctx context.Context, bucketName, policy string)
}
// Removes all policies on a bucket.
func (c Client) removeBucketPolicy(ctx context.Context, bucketName string) error {
func (c *Client) removeBucketPolicy(ctx context.Context, bucketName string) error {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
@ -91,7 +91,7 @@ func (c Client) removeBucketPolicy(ctx context.Context, bucketName string) error
}
// GetBucketPolicy returns the current policy
func (c Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) {
func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
@ -108,7 +108,7 @@ func (c Client) GetBucketPolicy(ctx context.Context, bucketName string) (string,
}
// Request server for current bucket policy.
func (c Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) {
func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)

View File

@ -33,12 +33,12 @@ import (
)
// RemoveBucketReplication removes a replication config on an existing bucket.
func (c Client) RemoveBucketReplication(ctx context.Context, bucketName string) error {
func (c *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error {
return c.removeBucketReplication(ctx, bucketName)
}
// SetBucketReplication sets a replication config on an existing bucket.
func (c Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error {
func (c *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -53,7 +53,7 @@ func (c Client) SetBucketReplication(ctx context.Context, bucketName string, cfg
}
// Saves a new bucket replication.
func (c Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error {
func (c *Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
@ -86,7 +86,7 @@ func (c Client) putBucketReplication(ctx context.Context, bucketName string, cfg
}
// Remove replication from a bucket.
func (c Client) removeBucketReplication(ctx context.Context, bucketName string) error {
func (c *Client) removeBucketReplication(ctx context.Context, bucketName string) error {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
@ -107,7 +107,7 @@ func (c Client) removeBucketReplication(ctx context.Context, bucketName string)
// GetBucketReplication fetches bucket replication configuration.If config is not
// found, returns empty config with nil error.
func (c Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) {
func (c *Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return cfg, err
@ -124,7 +124,7 @@ func (c Client) GetBucketReplication(ctx context.Context, bucketName string) (cf
}
// Request server for current bucket replication config.
func (c Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) {
func (c *Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
@ -153,7 +153,7 @@ func (c Client) getBucketReplication(ctx context.Context, bucketName string) (cf
}
// GetBucketReplicationMetrics fetches bucket replication status metrics
func (c Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) {
func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return s, err
@ -199,7 +199,7 @@ func mustGetUUID() string {
// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
// is enabled in the replication config
func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) {
func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) {
rID = mustGetUUID()
_, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID)
if err != nil {
@ -208,16 +208,15 @@ func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, o
return rID, nil
}
// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
// is enabled in the replication config
func (c Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (rinfo replication.ResyncTargetsInfo, err error) {
rID := mustGetUUID()
return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, rID)
// ResetBucketReplicationOnTarget kicks off replication of previously replicated objects if
// ExistingObjectReplication is enabled in the replication config
func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (replication.ResyncTargetsInfo, error) {
return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, mustGetUUID())
}
// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
// is enabled in the replication config
func (c Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string, resetID string) (rinfo replication.ResyncTargetsInfo, err error) {
func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string, resetID string) (rinfo replication.ResyncTargetsInfo, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return

View File

@ -32,7 +32,7 @@ import (
// GetBucketTagging fetch tagging configuration for a bucket with a
// context to control cancellations and timeouts.
func (c Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) {
func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
@ -64,7 +64,7 @@ func (c Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.
// SetBucketTagging sets tagging configuration for a bucket
// with a context to control cancellations and timeouts.
func (c Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error {
func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -107,7 +107,7 @@ func (c Client) SetBucketTagging(ctx context.Context, bucketName string, tags *t
// RemoveBucketTagging removes tagging configuration for a
// bucket with a context to control cancellations and timeouts.
func (c Client) RemoveBucketTagging(ctx context.Context, bucketName string) error {
func (c *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err

View File

@ -27,7 +27,7 @@ import (
)
// SetBucketVersioning sets a bucket versioning configuration
func (c Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error {
func (c *Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -67,12 +67,12 @@ func (c Client) SetBucketVersioning(ctx context.Context, bucketName string, conf
}
// EnableVersioning - enable object versioning in given bucket.
func (c Client) EnableVersioning(ctx context.Context, bucketName string) error {
func (c *Client) EnableVersioning(ctx context.Context, bucketName string) error {
return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"})
}
// SuspendVersioning - suspend object versioning in given bucket.
func (c Client) SuspendVersioning(ctx context.Context, bucketName string) error {
func (c *Client) SuspendVersioning(ctx context.Context, bucketName string) error {
return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"})
}
@ -102,7 +102,7 @@ func (b BucketVersioningConfiguration) Suspended() bool {
// GetBucketVersioning gets the versioning configuration on
// an existing bucket with a context to control cancellations and timeouts.
func (c Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) {
func (c *Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return BucketVersioningConfiguration{}, err

View File

@ -201,7 +201,7 @@ func (opts CopySrcOptions) validate() (err error) {
}
// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) {
// Build headers.
@ -243,8 +243,10 @@ func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuck
customHeader: headers,
}
if dstOpts.Internal.SourceVersionID != "" {
if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil {
return ObjectInfo{}, errInvalidArgument(err.Error())
if dstOpts.Internal.SourceVersionID != nullVersionID {
if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil {
return ObjectInfo{}, errInvalidArgument(err.Error())
}
}
urlValues := make(url.Values)
urlValues.Set("versionId", dstOpts.Internal.SourceVersionID)
@ -282,7 +284,7 @@ func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuck
return objInfo, nil
}
func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) {
headers := make(http.Header)
@ -335,7 +337,7 @@ func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, dest
// uploadPartCopy - helper function to create a part in a multipart
// upload via an upload-part-copy request
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
headers http.Header) (p CompletePart, err error) {
// Build query parameters
@ -375,7 +377,7 @@ func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID str
// and concatenates them into a new object using only server-side copying
// operations. Optionally takes progress reader hook for applications to
// look at current progress.
func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) {
func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) {
if len(srcs) < 1 || len(srcs) > maxPartsCount {
return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.")
}
@ -396,7 +398,7 @@ func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...
var err error
for i, src := range srcs {
opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID}
srcObjectInfos[i], err = c.statObject(context.Background(), src.Bucket, src.Object, opts)
srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts)
if err != nil {
return UploadInfo{}, err
}

View File

@ -25,7 +25,7 @@ import (
)
// CopyObject - copy a source object into a new object
func (c Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) {
func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) {
if err := src.validate(); err != nil {
return UploadInfo{}, err
}

View File

@ -18,8 +18,11 @@
package minio
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
)
@ -98,6 +101,19 @@ const (
reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
)
// xmlDecodeAndBody reads the whole body up to 1MB and
// tries to XML decode it into v.
// The body that was read and any error from reading or decoding is returned.
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
// read the whole body (up to 1MB)
const maxBodyLength = 1 << 20
body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
if err != nil {
return nil, err
}
return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v)
}
// httpRespToErrorResponse returns a new encoded ErrorResponse
// structure as error.
func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
@ -111,7 +127,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
Server: resp.Header.Get("Server"),
}
err := xmlDecoder(resp.Body, &errResp)
errBody, err := xmlDecodeAndBody(resp.Body, &errResp)
// Xml decoding failed with no body, fall back to HTTP headers.
if err != nil {
switch resp.StatusCode {
@ -156,10 +172,17 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
Key: objectName,
}
default:
msg := resp.Status
if len(errBody) > 0 {
msg = string(errBody)
if len(msg) > 1024 {
msg = msg[:1024] + "..."
}
}
errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: resp.Status,
Message: resp.Status,
Message: msg,
BucketName: bucketName,
}
}

View File

@ -52,7 +52,7 @@ type accessControlPolicy struct {
}
// GetObjectACL get object ACLs
func (c Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) {
func (c *Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) {
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
objectName: objectName,
@ -75,7 +75,7 @@ func (c Client) GetObjectACL(ctx context.Context, bucketName, objectName string)
return nil, err
}
objInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{})
objInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{})
if err != nil {
return nil, err
}

View File

@ -28,7 +28,7 @@ import (
// FGetObject - download contents of an object to a local file.
// The options can be used to specify the GET request further.
func (c Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err

View File

@ -30,7 +30,7 @@ import (
)
// GetObject wrapper function that accepts a request context
func (c Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
@ -139,7 +139,7 @@ func (c Client) GetObject(ctx context.Context, bucketName, objectName string, op
// Remove range header if already set, for stat Operations to get original file size.
delete(opts.headers, "Range")
objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts))
objectInfo, err = c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts))
if err != nil {
resCh <- getResponse{
Error: err,
@ -162,7 +162,7 @@ func (c Client) GetObject(ctx context.Context, bucketName, objectName string, op
if etag != "" && !snowball {
opts.SetMatchETag(etag)
}
objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts))
objectInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts))
if err != nil {
resCh <- getResponse{
Error: err,
@ -639,7 +639,7 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<-
//
// For more information about the HTTP Range header.
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
func (c *Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
// Validate input arguments.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, ObjectInfo{}, nil, err

View File

@ -25,7 +25,7 @@ import (
"github.com/minio/minio-go/v7/pkg/encrypt"
)
//AdvancedGetOptions for internal use by MinIO server - not intended for client use.
// AdvancedGetOptions for internal use by MinIO server - not intended for client use.
type AdvancedGetOptions struct {
ReplicationDeleteMarker bool
ReplicationProxyRequest string

View File

@ -36,7 +36,7 @@ import (
// fmt.Println(message)
// }
//
func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
// Execute GET on service.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex})
defer closeResponse(resp)
@ -56,8 +56,8 @@ func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
return listAllMyBucketsResult.Buckets.Bucket, nil
}
/// Bucket List Operations.
func (c Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
// Bucket List Operations.
func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
// Allocate new list objects channel.
objectStatCh := make(chan ObjectInfo, 1)
// Default listing is delimited at "/"
@ -153,7 +153,7 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName string, opts ListO
// ?delimiter - A delimiter is a character you use to group keys.
// ?start-after - Sets a marker to start listing lexically at this key onwards.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) {
func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) {
// Validate bucket name.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListBucketV2Result{}, err
@ -252,7 +252,7 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix
return listBucketResult, nil
}
func (c Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
// Allocate new list objects channel.
objectStatCh := make(chan ObjectInfo, 1)
// Default listing is delimited at "/"
@ -332,7 +332,7 @@ func (c Client) listObjects(ctx context.Context, bucketName string, opts ListObj
return objectStatCh
}
func (c Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
// Allocate new list objects channel.
resultCh := make(chan ObjectInfo, 1)
// Default listing is delimited at "/"
@ -443,7 +443,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName string, opts
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int, headers http.Header) (ListVersionsResult, error) {
func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int, headers http.Header) (ListVersionsResult, error) {
// Validate bucket name.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListVersionsResult{}, err
@ -540,7 +540,7 @@ func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix,
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) {
func (c *Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) {
// Validate bucket name.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListBucketResult{}, err
@ -661,7 +661,7 @@ func (o *ListObjectsOptions) Set(key, value string) {
// fmt.Println(object)
// }
//
func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
if opts.WithVersions {
return c.listObjectVersions(ctx, bucketName, opts)
}
@ -697,12 +697,12 @@ func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObj
// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) {
// fmt.Println(message)
// }
func (c Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive)
}
// listIncompleteUploads lists all incomplete uploads.
func (c Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
// Allocate channel for multipart uploads.
objectMultipartStatCh := make(chan ObjectMultipartInfo, 1)
// Delimiter is set to "/" by default.
@ -788,7 +788,7 @@ func (c Client) listIncompleteUploads(ctx context.Context, bucketName, objectPre
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
func (c Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) {
func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) {
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set uploads.
@ -867,7 +867,7 @@ func (c Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMa
}
// listObjectParts list all object parts recursively.
func (c Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
// Part number marker for the next batch of request.
var nextPartNumberMarker int
partsInfo = make(map[int]ObjectPart)
@ -896,7 +896,7 @@ func (c Client) listObjectParts(ctx context.Context, bucketName, objectName, upl
}
// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name.
func (c Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) {
func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) {
var uploadIDs []string
// Make list incomplete uploads recursive.
isRecursive := true
@ -923,7 +923,7 @@ func (c Client) findUploadIDs(ctx context.Context, bucketName, objectName string
// ?part-number-marker - Specifies the part after which listing should
// begin.
// ?max-parts - Maximum parts to be listed per request.
func (c Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) {
func (c *Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) {
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number marker.

View File

@ -81,7 +81,7 @@ func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) {
}
// PutObjectLegalHold : sets object legal hold for a given object and versionID.
func (c Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error {
func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -135,7 +135,7 @@ func (c Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName s
}
// GetObjectLegalHold gets legal-hold status of given object.
func (c Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) {
func (c *Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err

View File

@ -139,7 +139,7 @@ func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit
}
// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil.
func (c Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
func (c *Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -184,7 +184,7 @@ func (c Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string
}
// GetObjectLockConfig gets object lock configuration of given bucket.
func (c Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
func (c *Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", nil, nil, nil, err
@ -230,12 +230,12 @@ func (c Client) GetObjectLockConfig(ctx context.Context, bucketName string) (obj
}
// GetBucketObjectLockConfig gets object lock configuration of given bucket.
func (c Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
func (c *Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
_, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName)
return mode, validity, unit, err
}
// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil.
func (c Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
func (c *Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit)
}

View File

@ -63,7 +63,7 @@ type PutObjectRetentionOptions struct {
}
// PutObjectRetention sets object retention for a given object and versionID.
func (c Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error {
func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -126,7 +126,7 @@ func (c Client) PutObjectRetention(ctx context.Context, bucketName, objectName s
}
// GetObjectRetention gets retention of given object.
func (c Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) {
func (c *Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, nil, err

View File

@ -36,7 +36,7 @@ type PutObjectTaggingOptions struct {
// PutObjectTagging replaces or creates object tag(s) and can target
// a specific object version in a versioned bucket.
func (c Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error {
func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -87,7 +87,7 @@ type GetObjectTaggingOptions struct {
// GetObjectTagging fetches object tag(s) with options to target
// a specific object version in a versioned bucket.
func (c Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) {
func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
@ -125,7 +125,7 @@ type RemoveObjectTaggingOptions struct {
// RemoveObjectTagging removes object tag(s) with options to control a specific object
// version in a versioned bucket
func (c Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error {
func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)

View File

@ -30,7 +30,7 @@ import (
// presignURL - Returns a presigned URL for an input 'method'.
// Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) presignURL(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
func (c *Client) presignURL(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) {
// Input validation.
if method == "" {
return nil, errInvalidArgument("method cannot be empty.")
@ -45,11 +45,12 @@ func (c Client) presignURL(ctx context.Context, method string, bucketName string
// Convert expires into seconds.
expireSeconds := int64(expires / time.Second)
reqMetadata := requestMetadata{
presignURL: true,
bucketName: bucketName,
objectName: objectName,
expires: expireSeconds,
queryValues: reqParams,
presignURL: true,
bucketName: bucketName,
objectName: objectName,
expires: expireSeconds,
queryValues: reqParams,
extraPresignHeader: extraHeaders,
}
// Instantiate a new request.
@ -65,43 +66,54 @@ func (c Client) presignURL(ctx context.Context, method string, bucketName string
// data without credentials. URL can have a maximum expiry of
// upto 7days or a minimum of 1sec. Additionally you can override
// a set of response headers using the query parameters.
func (c Client) PresignedGetObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
func (c *Client) PresignedGetObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams)
return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams, nil)
}
// PresignedHeadObject - Returns a presigned URL to access
// object metadata without credentials. URL can have a maximum expiry
// of upto 7days or a minimum of 1sec. Additionally you can override
// a set of response headers using the query parameters.
func (c Client) PresignedHeadObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
func (c *Client) PresignedHeadObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams)
return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams, nil)
}
// PresignedPutObject - Returns a presigned URL to upload an object
// without credentials. URL can have a maximum expiry of upto 7days
// or a minimum of 1sec.
func (c Client) PresignedPutObject(ctx context.Context, bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
func (c *Client) PresignedPutObject(ctx context.Context, bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil)
return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil, nil)
}
// Presign - returns a presigned URL for any http method of your choice
// along with custom request params. URL can have a maximum expiry of
// upto 7days or a minimum of 1sec.
func (c Client) Presign(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams)
// PresignHeader - similar to Presign() but allows including HTTP headers that
// will be used to build the signature. The request using the resulting URL will
// need to have the exact same headers to be added for signature validation to
// pass.
//
// FIXME: The extra header parameter should be included in Presign() in the next
// major version bump, and this function should then be deprecated.
func (c *Client) PresignHeader(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) {
return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders)
}
// Presign - returns a presigned URL for any http method of your choice along
// with custom request params and extra signed headers. URL can have a maximum
// expiry of upto 7days or a minimum of 1sec.
func (c *Client) Presign(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, nil)
}
// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
func (c Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
// Validate input arguments.
if p.expiration.IsZero() {
return nil, nil, errors.New("Expiration time must be specified")

View File

@ -26,8 +26,8 @@ import (
"github.com/minio/minio-go/v7/pkg/s3utils"
)
/// Bucket operations
func (c Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) {
// Bucket operations
func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) {
// Validate the input arguments.
if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil {
return err
@ -42,7 +42,7 @@ func (c Client) makeBucket(ctx context.Context, bucketName string, opts MakeBuck
return err
}
func (c Client) doMakeBucket(ctx context.Context, bucketName string, location string, objectLockEnabled bool) (err error) {
func (c *Client) doMakeBucket(ctx context.Context, bucketName string, location string, objectLockEnabled bool) (err error) {
defer func() {
// Save the location into cache on a successful makeBucket response.
if err == nil {
@ -118,6 +118,6 @@ type MakeBucketOptions struct {
//
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
func (c Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) {
func (c *Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) {
return c.makeBucket(ctx, bucketName, opts)
}

View File

@ -26,6 +26,8 @@ import (
"github.com/minio/minio-go/v7/pkg/s3utils"
)
const nullVersionID = "null"
// Verify if reader is *minio.Object
func isObject(reader io.Reader) (ok bool) {
_, ok = reader.(*Object)
@ -130,7 +132,7 @@ func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCou
// getUploadID - fetch upload id if already present for an object name
// or initiate a new request to fetch a new upload id.
func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) {
func (c *Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err

View File

@ -27,7 +27,7 @@ import (
)
// FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation.
func (c Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) {
func (c *Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err

View File

@ -37,7 +37,7 @@ import (
"github.com/minio/minio-go/v7/pkg/s3utils"
)
func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
opts PutObjectOptions) (info UploadInfo, err error) {
info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
if err != nil {
@ -56,7 +56,7 @@ func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName s
return info, err
}
func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err
@ -186,7 +186,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
@ -200,8 +200,10 @@ func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectN
urlValues.Set("uploads", "")
if opts.Internal.SourceVersionID != "" {
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
return initiateMultipartUploadResult{}, errInvalidArgument(err.Error())
if opts.Internal.SourceVersionID != nullVersionID {
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
return initiateMultipartUploadResult{}, errInvalidArgument(err.Error())
}
}
urlValues.Set("versionId", opts.Internal.SourceVersionID)
}
@ -237,7 +239,7 @@ func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectN
}
// uploadPart - Uploads a part in a multipart upload.
func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
@ -308,7 +310,7 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID
}
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
complete completeMultipartUpload, opts PutObjectOptions) (UploadInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {

View File

@ -41,7 +41,7 @@ import (
// - *minio.Object
// - Any reader which has a method 'ReadAt()'
//
func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
@ -90,7 +90,7 @@ type uploadPartReq struct {
// temporary files for staging all the data, these temporary files are
// cleaned automatically when the caller i.e http client closes the
// stream after uploading all the contents successfully.
func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
reader io.ReaderAt, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
@ -240,7 +240,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa
return uploadInfo, nil
}
func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string,
func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string,
reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
@ -369,7 +369,7 @@ func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bu
// putObject special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
func (c Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
func (c *Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err
@ -430,7 +430,7 @@ func (c Client) putObject(ctx context.Context, bucketName, objectName string, re
// putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) {
func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err
@ -452,8 +452,10 @@ func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string,
contentSHA256Hex: sha256Hex,
}
if opts.Internal.SourceVersionID != "" {
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
return UploadInfo{}, errInvalidArgument(err.Error())
if opts.Internal.SourceVersionID != nullVersionID {
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
return UploadInfo{}, errInvalidArgument(err.Error())
}
}
urlValues := make(url.Values)
urlValues.Set("versionId", opts.Internal.SourceVersionID)

View File

@ -221,7 +221,7 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
// - For size input as -1 PutObject does a multipart Put operation
// until input stream reaches EOF. Maximum object size that can
// be uploaded through this operation will be 5TiB.
func (c Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
opts PutObjectOptions) (info UploadInfo, err error) {
if objectSize < 0 && opts.DisableMultipart {
return UploadInfo{}, errors.New("object size must be provided with disable multipart upload")
@ -235,7 +235,7 @@ func (c Client) PutObject(ctx context.Context, bucketName, objectName string, re
return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts)
}
func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
// Check for largest object size allowed.
if size > int64(maxMultipartPutObjectSize) {
return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
@ -269,7 +269,7 @@ func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName stri
return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts)
}
func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err

View File

@ -0,0 +1,215 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2021 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"archive/tar"
"bufio"
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"sync"
"time"
"github.com/klauspost/compress/s2"
)
// SnowballOptions contains options for PutObjectsSnowball calls.
type SnowballOptions struct {
// Opts is options applied to all objects.
Opts PutObjectOptions
// Processing options:
// InMemory specifies that all objects should be collected in memory
// before they are uploaded.
// If false a temporary file will be created.
InMemory bool
// Compress enabled content compression before upload.
// Compression will typically reduce memory and network usage,
// Compression can safely be enabled with MinIO hosts.
Compress bool
}
// SnowballObject contains information about a single object to be added to the snowball.
type SnowballObject struct {
// Key is the destination key, including prefix.
Key string
// Size is the content size of this object.
Size int64
// Modtime to apply to the object.
ModTime time.Time
// Content of the object.
// Exactly 'Size' number of bytes must be provided.
Content io.Reader
// Close will be called when an object has finished processing.
// Note that if PutObjectsSnowball returns because of an error,
// objects not consumed from the input will NOT have been closed.
// Leave as nil for no callback.
Close func()
}
type nopReadSeekCloser struct {
io.ReadSeeker
}
func (n nopReadSeekCloser) Close() error {
return nil
}
// This is available as io.ReadSeekCloser from go1.16
type readSeekCloser interface {
io.Reader
io.Closer
io.Seeker
}
// PutObjectsSnowball will put multiple objects with a single put call.
// A (compressed) TAR file will be created which will contain multiple objects.
// The key for each object will be used for the destination in the specified bucket.
// Total size should be < 5TB.
// This function blocks until 'objs' is closed and the content has been uploaded.
func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) {
err = opts.Opts.validate()
if err != nil {
return err
}
var tmpWriter io.Writer
var getTmpReader func() (rc readSeekCloser, sz int64, err error)
if opts.InMemory {
b := bytes.NewBuffer(nil)
tmpWriter = b
getTmpReader = func() (readSeekCloser, int64, error) {
return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil
}
} else {
f, err := ioutil.TempFile("", "s3-putsnowballobjects-*")
if err != nil {
return err
}
name := f.Name()
tmpWriter = f
var once sync.Once
defer once.Do(func() {
f.Close()
})
defer os.Remove(name)
getTmpReader = func() (readSeekCloser, int64, error) {
once.Do(func() {
f.Close()
})
f, err := os.Open(name)
if err != nil {
return nil, 0, err
}
st, err := f.Stat()
if err != nil {
return nil, 0, err
}
return f, st.Size(), nil
}
}
var flush = func() error { return nil }
if !opts.Compress {
if !opts.InMemory {
// Insert buffer for writes.
buf := bufio.NewWriterSize(tmpWriter, 1<<20)
flush = buf.Flush
tmpWriter = buf
}
} else {
s2c := s2.NewWriter(tmpWriter, s2.WriterBetterCompression())
flush = s2c.Close
defer s2c.Close()
tmpWriter = s2c
}
t := tar.NewWriter(tmpWriter)
objectLoop:
for {
select {
case <-ctx.Done():
return ctx.Err()
case obj, ok := <-objs:
if !ok {
break objectLoop
}
closeObj := func() {}
if obj.Close != nil {
closeObj = obj.Close
}
// Trim accidental slash prefix.
obj.Key = strings.TrimPrefix(obj.Key, "/")
header := tar.Header{
Typeflag: tar.TypeReg,
Name: obj.Key,
Size: obj.Size,
ModTime: obj.ModTime,
Format: tar.FormatPAX,
}
if err := t.WriteHeader(&header); err != nil {
closeObj()
return err
}
n, err := io.Copy(t, obj.Content)
if err != nil {
closeObj()
return err
}
if n != obj.Size {
closeObj()
return io.ErrUnexpectedEOF
}
closeObj()
}
}
// Flush tar
err = t.Flush()
if err != nil {
return err
}
// Flush compression
err = flush()
if err != nil {
return err
}
if opts.Opts.UserMetadata == nil {
opts.Opts.UserMetadata = map[string]string{}
}
opts.Opts.UserMetadata["X-Amz-Meta-Snowball-Auto-Extract"] = "true"
opts.Opts.DisableMultipart = true
rc, sz, err := getTmpReader()
if err != nil {
return err
}
defer rc.Close()
rand := c.random.Uint64()
_, err = c.PutObject(ctx, bucketName, fmt.Sprintf("snowball-upload-%x.tar", rand), rc, sz, opts.Opts)
return err
}

View File

@ -29,9 +29,16 @@ import (
"github.com/minio/minio-go/v7/pkg/s3utils"
)
// BucketOptions special headers to purge buckets, only
//revive:disable
// Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions.
type BucketOptions = RemoveBucketOptions
//revive:enable
// RemoveBucketOptions special headers to purge buckets, only
// useful when endpoint is MinIO
type BucketOptions struct {
type RemoveBucketOptions struct {
ForceDelete bool
}
@ -40,7 +47,7 @@ type BucketOptions struct {
// All objects (including all object versions and delete markers)
// in the bucket will be deleted forcibly if bucket options set
// ForceDelete to 'true'.
func (c Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts BucketOptions) error {
func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts RemoveBucketOptions) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -77,7 +84,7 @@ func (c Client) RemoveBucketWithOptions(ctx context.Context, bucketName string,
//
// All objects (including all object versions and delete markers).
// in the bucket must be deleted before successfully attempting this request.
func (c Client) RemoveBucket(ctx context.Context, bucketName string) error {
func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -120,7 +127,7 @@ type RemoveObjectOptions struct {
}
// RemoveObject removes an object from a bucket.
func (c Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error {
func (c *Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -132,7 +139,7 @@ func (c Client) RemoveObject(ctx context.Context, bucketName, objectName string,
return c.removeObject(ctx, bucketName, objectName, opts)
}
func (c Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error {
func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error {
// Get resources properly escaped and lined up before
// using them in http request.
@ -246,7 +253,7 @@ type RemoveObjectsOptions struct {
// RemoveObjects removes multiple objects from a bucket while
// it is possible to specify objects versions which are received from
// objectsCh. Remove failures are sent back via error channel.
func (c Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError {
func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError {
errorCh := make(chan RemoveObjectError, 1)
// Validate if bucket name is valid.
@ -291,7 +298,7 @@ func hasInvalidXMLChar(str string) bool {
}
// Generate and call MultiDelete S3 requests based on entries received from objectsCh
func (c Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, errorCh chan<- RemoveObjectError, opts RemoveObjectsOptions) {
func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, errorCh chan<- RemoveObjectError, opts RemoveObjectsOptions) {
maxEntries := 1000
finish := false
urlValues := make(url.Values)
@ -389,7 +396,7 @@ func (c Client) removeObjects(ctx context.Context, bucketName string, objectsCh
}
// RemoveIncompleteUpload aborts an partially uploaded object.
func (c Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error {
func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -416,7 +423,7 @@ func (c Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectNa
// abortMultipartUpload aborts a multipart upload for the given
// uploadID, all previously uploaded parts are deleted.
func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error {
func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err

View File

@ -68,7 +68,7 @@ type MetadataEntry struct {
// S3 holds properties of the copy of the archived object
type S3 struct {
AccessControlList *AccessControlList `xml:"AccessControlList,omiempty"`
AccessControlList *AccessControlList `xml:"AccessControlList,omitempty"`
BucketName string
Prefix string
CannedACL *string `xml:"CannedACL,omitempty"`
@ -110,7 +110,7 @@ func (r *RestoreRequest) SetDays(v int) {
r.Days = &v
}
// SetDays sets the GlacierJobParameters of the restore request
// SetGlacierJobParameters sets the GlacierJobParameters of the restore request
func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) {
r.GlacierJobParameters = &v
}
@ -141,7 +141,7 @@ func (r *RestoreRequest) SetOutputLocation(v OutputLocation) {
}
// RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API
func (c Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error {
func (c *Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err

View File

@ -121,8 +121,8 @@ func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement
return err
}
switch se := t.(type) {
case xml.StartElement:
se, ok := t.(xml.StartElement)
if ok {
tagName := se.Name.Local
switch tagName {
case "Name", "Prefix",

View File

@ -438,7 +438,7 @@ const (
)
// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API.
func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) {
func (c *Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err

View File

@ -27,7 +27,7 @@ import (
// BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to
// control cancellations and timeouts.
func (c Client) BucketExists(ctx context.Context, bucketName string) (bool, error) {
func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return false, err
@ -58,19 +58,7 @@ func (c Client) BucketExists(ctx context.Context, bucketName string) (bool, erro
}
// StatObject verifies if object exists and you have permission to access.
func (c Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
return c.statObject(ctx, bucketName, objectName, opts)
}
// Lower level API for statObject supporting pre-conditions and range headers.
func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err

View File

@ -46,7 +46,7 @@ import (
// Client implements Amazon S3 compatible methods.
type Client struct {
/// Standard options.
// Standard options.
// Parsed endpoint url provided by the user.
endpointURL *url.URL
@ -92,9 +92,7 @@ type Client struct {
md5Hasher func() md5simd.Hasher
sha256Hasher func() md5simd.Hasher
healthCheckCh chan struct{}
healthCheck int32
lastOnline time.Time
healthStatus int32
}
// Options for New method
@ -113,7 +111,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.14"
libraryVersion = "v7.0.16"
)
// User Agent should always following the below style.
@ -312,7 +310,7 @@ func privateNew(endpoint string, opts *Options) (*Client, error) {
clnt.lookup = opts.BucketLookup
// healthcheck is not initialized
clnt.healthCheck = unknown
clnt.healthStatus = unknown
// Return.
return clnt, nil
@ -404,30 +402,30 @@ const (
// IsOnline returns true if healthcheck enabled and client is online
func (c *Client) IsOnline() bool {
switch atomic.LoadInt32(&c.healthCheck) {
case online, unknown:
return true
}
return false
return !c.IsOffline()
}
// sets online healthStatus to offline
func (c *Client) markOffline() {
atomic.CompareAndSwapInt32(&c.healthStatus, online, offline)
}
// IsOffline returns true if healthcheck enabled and client is offline
func (c *Client) IsOffline() bool {
return !c.IsOnline()
return atomic.LoadInt32(&c.healthStatus) == offline
}
// HealthCheck starts a healthcheck to see if endpoint is up. Returns a context cancellation function
// and and error if health check is already started
func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) {
if atomic.LoadInt32(&c.healthCheck) == online {
return nil, fmt.Errorf("health check running already")
if atomic.LoadInt32(&c.healthStatus) == online {
return nil, fmt.Errorf("health check is running")
}
if hcDuration < 1*time.Second {
return nil, fmt.Errorf("health check duration should be atleast 1 second")
}
ctx, cancelFn := context.WithCancel(context.Background())
c.healthCheckCh = make(chan struct{})
atomic.StoreInt32(&c.healthCheck, online)
atomic.StoreInt32(&c.healthStatus, online)
probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-")
go func(duration time.Duration) {
timer := time.NewTimer(duration)
@ -435,27 +433,24 @@ func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, erro
for {
select {
case <-ctx.Done():
close(c.healthCheckCh)
atomic.StoreInt32(&c.healthCheck, unknown)
atomic.StoreInt32(&c.healthStatus, unknown)
return
case <-timer.C:
timer.Reset(duration)
// Do health check the first time and ONLY if the connection is marked offline
if c.IsOffline() || c.lastOnline.IsZero() {
_, err := c.getBucketLocation(context.Background(), probeBucketName)
if err != nil && IsNetworkOrHostDown(err, false) {
atomic.StoreInt32(&c.healthCheck, offline)
if c.IsOffline() {
gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second)
_, err := c.getBucketLocation(gctx, probeBucketName)
gcancel()
if IsNetworkOrHostDown(err, false) {
// Still network errors do not need to do anything.
continue
}
switch ToErrorResponse(err).Code {
case "NoSuchBucket", "AccessDenied", "":
c.lastOnline = time.Now()
atomic.StoreInt32(&c.healthCheck, online)
atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
}
}
case <-c.healthCheckCh:
// set offline if client saw a network error
atomic.StoreInt32(&c.healthCheck, offline)
}
}
}(hcDuration)
@ -468,11 +463,12 @@ type requestMetadata struct {
presignURL bool
// User supplied.
bucketName string
objectName string
queryValues url.Values
customHeader http.Header
expires int64
bucketName string
objectName string
queryValues url.Values
customHeader http.Header
extraPresignHeader http.Header
expires int64
// Generated by our internal code.
bucketLocation string
@ -483,7 +479,7 @@ type requestMetadata struct {
}
// dumpHTTP - dump HTTP request and response.
func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// Starts http dump.
_, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
if err != nil {
@ -543,8 +539,14 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
}
// do - execute http request.
func (c Client) do(req *http.Request) (*http.Response, error) {
resp, err := c.httpClient.Do(req)
func (c *Client) do(req *http.Request) (resp *http.Response, err error) {
defer func() {
if IsNetworkOrHostDown(err, false) {
c.markOffline()
}
}()
resp, err = c.httpClient.Do(req)
if err != nil {
// Handle this specifically for now until future Golang versions fix this issue properly.
if urlErr, ok := err.(*url.Error); ok {
@ -587,7 +589,11 @@ var successStatus = []int{
// executeMethod - instantiates a given method, and retries the
// request upon any error up to maxRetries attempts in a binomially
// delayed manner using a standard back off algorithm.
func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
func (c *Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
if c.IsOffline() {
return nil, errors.New(c.endpointURL.String() + " is offline.")
}
var retryable bool // Indicates if request can be retried.
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
var reqRetry = MaxRetry // Indicates how many times we can retry the request
@ -641,24 +647,11 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque
continue // Retry.
}
if atomic.LoadInt32(&c.healthCheck) != unknown && IsNetworkOrHostDown(err, false) {
select {
case c.healthCheckCh <- struct{}{}:
default:
}
}
return nil, err
}
// Initiate the request.
res, err = c.do(req)
if err != nil {
if atomic.LoadInt32(&c.healthCheck) != unknown && IsNetworkOrHostDown(err, false) {
select {
case c.healthCheckCh <- struct{}{}:
default:
}
}
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return nil, err
}
@ -753,7 +746,7 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque
}
// newRequest - instantiate a new HTTP request for a given method.
func (c Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) {
func (c *Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) {
// If no method is supplied default to 'POST'.
if method == "" {
method = http.MethodPost
@ -821,6 +814,14 @@ func (c Client) newRequest(ctx context.Context, method string, metadata requestM
if signerType.IsAnonymous() {
return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
}
if metadata.extraPresignHeader != nil {
if signerType.IsV2() {
return nil, errInvalidArgument("Extra signed headers for Presign with Signature V2 is not supported.")
}
for k, v := range metadata.extraPresignHeader {
req.Header.Set(k, v[0])
}
}
if signerType.IsV2() {
// Presign URL with signature v2.
req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost)
@ -893,7 +894,7 @@ func (c Client) newRequest(ctx context.Context, method string, metadata requestM
}
// set User agent.
func (c Client) setUserAgent(req *http.Request) {
func (c *Client) setUserAgent(req *http.Request) {
req.Header.Set("User-Agent", libraryUserAgent)
if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
@ -901,7 +902,7 @@ func (c Client) setUserAgent(req *http.Request) {
}
// makeTargetURL make a new target url.
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) {
func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) {
host := c.endpointURL.Host
// For Amazon S3 endpoint, try to fetch location based endpoint.
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
@ -946,13 +947,13 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isV
if isVirtualHostStyle {
urlStr = scheme + "://" + bucketName + "." + host + "/"
if objectName != "" {
urlStr = urlStr + s3utils.EncodePath(objectName)
urlStr += s3utils.EncodePath(objectName)
}
} else {
// If not fall back to using path style.
urlStr = urlStr + bucketName + "/"
if objectName != "" {
urlStr = urlStr + s3utils.EncodePath(objectName)
urlStr += s3utils.EncodePath(objectName)
}
}
}

View File

@ -73,7 +73,7 @@ func (r *bucketLocationCache) Delete(bucketName string) {
// GetBucketLocation - get location for the bucket name from location cache, if not
// fetch freshly by making a new request.
func (c Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) {
func (c *Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
}
@ -82,7 +82,7 @@ func (c Client) GetBucketLocation(ctx context.Context, bucketName string) (strin
// getBucketLocation - Get location for the bucketName from location map cache, if not
// fetch freshly by making a new request.
func (c Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) {
func (c *Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
}
@ -169,7 +169,7 @@ func processBucketLocationResponse(resp *http.Response, bucketName string) (buck
}
// getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
func (c Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) {
func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) {
// Set location query.
urlValues := make(url.Values)
urlValues.Set("location", "")
@ -188,7 +188,7 @@ func (c Client) getBucketLocationRequest(ctx context.Context, bucketName string)
var urlStr string
//only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint
// only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint
if isVirtualHost && s3utils.IsAliyunOSSEndpoint(targetURL) {
urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location"
} else {

View File

@ -17,7 +17,7 @@
package minio
/// Multipart upload defaults.
// Multipart upload defaults.
// absMinPartSize - absolute minimum part size (5 MiB) below which
// a part in a multipart upload may not be uploaded.

View File

@ -125,9 +125,3 @@ func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string)
func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
return c.getObject(ctx, bucketName, objectName, opts)
}
// StatObject is a lower level API implemented to support special
// conditions matching etag, modtime on a request.
func (c Core) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
return c.statObject(ctx, bucketName, objectName, opts)
}

View File

@ -20,6 +20,7 @@
package main
import (
"archive/zip"
"bytes"
"context"
"errors"
@ -32,6 +33,7 @@ import (
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"reflect"
"runtime"
@ -151,6 +153,8 @@ func logError(testName string, function string, args map[string]interface{}, sta
// addition to NotImplemented error returned from server
if isErrNotImplemented(err) {
ignoredLog(testName, function, args, startTime, message).Info()
} else if isRunOnFail() {
failureLog(testName, function, args, startTime, alert, message, err).Error()
} else {
failureLog(testName, function, args, startTime, alert, message, err).Fatal()
}
@ -260,6 +264,10 @@ func isErrNotImplemented(err error) bool {
return minio.ToErrorResponse(err).Code == "NotImplemented"
}
func isRunOnFail() bool {
return os.Getenv("RUN_ON_FAIL") == "1"
}
func init() {
// If server endpoint is not set, all tests default to
// using https://play.min.io
@ -2885,8 +2893,8 @@ func testFPutObject() {
logError(testName, function, args, startTime, "", "StatObject failed", err)
return
}
if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" {
logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar or application/octet-stream, got "+rGTar.ContentType, err)
if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" {
logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-tar or application/octet-stream, got "+rGTar.ContentType, err)
return
}
@ -3174,6 +3182,189 @@ func testPutObjectContext() {
}
// Tests get object with s3zip extensions.
func testGetObjectS3Zip() {
// initialize logging params
startTime := time.Now()
testName := getFuncName()
function := "GetObject(bucketName, objectName)"
args := map[string]interface{}{"x-minio-extract": true}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Instantiate new minio client object.
c, err := minio.New(os.Getenv(serverEndpoint),
&minio.Options{
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
Secure: mustParseBool(os.Getenv(enableHTTPS)),
})
if err != nil {
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
return
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
if err != nil {
logError(testName, function, args, startTime, "", "MakeBucket failed", err)
return
}
defer func() {
// Delete all objects and buckets
if err = cleanupBucket(bucketName, c); err != nil {
logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
return
}
}()
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + ".zip"
args["objectName"] = objectName
var zipFile bytes.Buffer
zw := zip.NewWriter(&zipFile)
rng := rand.New(rand.NewSource(0xc0cac01a))
const nFiles = 500
for i := 0; i <= nFiles; i++ {
if i == nFiles {
// Make one large, compressible file.
i = 1000000
}
b := make([]byte, i)
if i < nFiles {
rng.Read(b)
}
wc, err := zw.Create(fmt.Sprintf("test/small/file-%d.bin", i))
if err != nil {
logError(testName, function, args, startTime, "", "zw.Create failed", err)
return
}
wc.Write(b)
}
err = zw.Close()
if err != nil {
logError(testName, function, args, startTime, "", "zw.Close failed", err)
return
}
buf := zipFile.Bytes()
// Save the data
_, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
// Read the data back
r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "GetObject failed", err)
return
}
st, err := r.Stat()
if err != nil {
logError(testName, function, args, startTime, "", "Stat object failed", err)
return
}
if st.Size != int64(len(buf)) {
logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(len(buf))+", got "+string(st.Size), err)
return
}
r.Close()
zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf)))
if err != nil {
logError(testName, function, args, startTime, "", "zip.NewReader failed", err)
return
}
lOpts := minio.ListObjectsOptions{}
lOpts.Set("x-minio-extract", "true")
lOpts.Prefix = objectName + "/"
lOpts.Recursive = true
list := c.ListObjects(context.Background(), bucketName, lOpts)
var listed = map[string]minio.ObjectInfo{}
for item := range list {
if item.Err != nil {
break
}
listed[item.Key] = item
}
if len(listed) == 0 {
// Assume we are running against non-minio.
args["SKIPPED"] = true
ignoredLog(testName, function, args, startTime, "s3zip does not appear to be present").Info()
return
}
for _, file := range zr.File {
if file.FileInfo().IsDir() {
continue
}
args["zipfile"] = file.Name
zfr, err := file.Open()
if err != nil {
logError(testName, function, args, startTime, "", "file.Open failed", err)
return
}
want, err := ioutil.ReadAll(zfr)
if err != nil {
logError(testName, function, args, startTime, "", "fzip file read failed", err)
return
}
opts := minio.GetObjectOptions{}
opts.Set("x-minio-extract", "true")
key := path.Join(objectName, file.Name)
r, err = c.GetObject(context.Background(), bucketName, key, opts)
if err != nil {
terr := minio.ToErrorResponse(err)
if terr.StatusCode != http.StatusNotFound {
logError(testName, function, args, startTime, "", "GetObject failed", err)
}
return
}
got, err := ioutil.ReadAll(r)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
}
r.Close()
if !bytes.Equal(want, got) {
logError(testName, function, args, startTime, "", "Content mismatch", err)
return
}
oi, ok := listed[key]
if !ok {
logError(testName, function, args, startTime, "", "Object Missing", fmt.Errorf("%s not present in listing", key))
return
}
if int(oi.Size) != len(got) {
logError(testName, function, args, startTime, "", "Object Size Incorrect", fmt.Errorf("listing %d, read %d", oi.Size, len(got)))
return
}
delete(listed, key)
}
delete(args, "zipfile")
if len(listed) > 0 {
logError(testName, function, args, startTime, "", "Extra listed objects", fmt.Errorf("left over: %v", listed))
return
}
successLogger(testName, function, args, startTime).Info()
}
// Tests get object ReaderSeeker interface methods.
func testGetObjectReadSeekFunctional() {
// initialize logging params
@ -5902,6 +6093,63 @@ func testFunctional() {
return
}
function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)"
functionAll += ", " + function
presignExtraHeaders := map[string][]string{
"mysecret": {"abcxxx"},
}
args = map[string]interface{}{
"method": "PUT",
"bucketName": bucketName,
"objectName": objectName + "-presign-custom",
"expires": 3600 * time.Second,
"extraHeaders": presignExtraHeaders,
}
presignedURL, err := c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders)
if err != nil {
logError(testName, function, args, startTime, "", "Presigned failed", err)
return
}
// Generate data more than 32K
buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
req, err = http.NewRequest(http.MethodPut, presignedURL.String(), bytes.NewReader(buf))
if err != nil {
logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err)
return
}
req.Header.Add("mysecret", "abcxxx")
resp, err = httpClient.Do(req)
if err != nil {
logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err)
return
}
// Download the uploaded object to verify
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName + "-presign-custom",
}
newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presign-custom", minio.GetObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "GetObject of uploaded custom-presigned object failed", err)
return
}
newReadBytes, err = ioutil.ReadAll(newReader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err)
return
}
newReader.Close()
if !bytes.Equal(newReadBytes, buf) {
logError(testName, function, args, startTime, "", "Bytes mismatch on custom-presigned object upload verification", err)
return
}
function = "RemoveObject(bucketName, objectName)"
functionAll += ", " + function
args = map[string]interface{}{
@ -5938,6 +6186,14 @@ func testFunctional() {
return
}
args["objectName"] = objectName + "-presign-custom"
err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "RemoveObject failed", err)
return
}
function = "RemoveBucket(bucketName)"
functionAll += ", " + function
args = map[string]interface{}{
@ -6476,8 +6732,8 @@ func testFPutObjectV2() {
logError(testName, function, args, startTime, "", "Unexpected size", nil)
return
}
if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" {
logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err)
if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" {
logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-tar , got "+rGTar.ContentType, err)
return
}
@ -10680,27 +10936,44 @@ func testFunctionalV2() {
return
}
function = "GetObject(bucketName, objectName)"
functionAll += ", " + function
// Download the uploaded object to verify
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName + "-presigned",
}
newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "GetObject failed", err)
logError(testName, function, args, startTime, "", "GetObject of uploaded presigned object failed", err)
return
}
newReadBytes, err = ioutil.ReadAll(newReader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err)
return
}
newReader.Close()
if !bytes.Equal(newReadBytes, buf) {
logError(testName, function, args, startTime, "", "Bytes mismatch", err)
logError(testName, function, args, startTime, "", "Bytes mismatch on presigned object upload verification", err)
return
}
function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)"
functionAll += ", " + function
presignExtraHeaders := map[string][]string{
"mysecret": {"abcxxx"},
}
args = map[string]interface{}{
"method": "PUT",
"bucketName": bucketName,
"objectName": objectName + "-presign-custom",
"expires": 3600 * time.Second,
"extraHeaders": presignExtraHeaders,
}
_, err = c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders)
if err == nil {
logError(testName, function, args, startTime, "", "Presigned with extra headers succeeded", err)
return
}
@ -11596,6 +11869,7 @@ func testRemoveObjects() {
_, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "Error uploading object", err)
return
}
// Replace with smaller...
@ -11617,7 +11891,8 @@ func testRemoveObjects() {
}
err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts)
if err != nil {
log.Fatalln(err)
logError(testName, function, args, startTime, "", "Error setting retention", err)
return
}
objectsCh := make(chan minio.ObjectInfo)
@ -11627,7 +11902,8 @@ func testRemoveObjects() {
// List all objects from a bucket-name with a matching prefix.
for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) {
if object.Err != nil {
log.Fatalln(object.Err)
logError(testName, function, args, startTime, "", "Error listing objects", object.Err)
return
}
objectsCh <- object
}
@ -11650,7 +11926,8 @@ func testRemoveObjects() {
// List all objects from a bucket-name with a matching prefix.
for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) {
if object.Err != nil {
log.Fatalln(object.Err)
logError(testName, function, args, startTime, "", "Error listing objects", object.Err)
return
}
objectsCh1 <- object
}
@ -11730,6 +12007,7 @@ func main() {
testPutObjectStreaming()
testGetObjectSeekEnd()
testGetObjectClosedTwice()
testGetObjectS3Zip()
testRemoveMultipleObjects()
testFPutObjectMultipart()
testFPutObject()

View File

@ -112,7 +112,7 @@ func (m *IAM) Retrieve() (Value, error) {
return &WebIdentityToken{Token: string(token)}, nil
},
roleARN: os.Getenv("AWS_ROLE_ARN"),
RoleARN: os.Getenv("AWS_ROLE_ARN"),
roleSessionName: os.Getenv("AWS_ROLE_SESSION_NAME"),
}

View File

@ -0,0 +1,192 @@
// MinIO Go Library for Amazon S3 Compatible Cloud Storage
// Copyright 2021 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package credentials
import (
"crypto/tls"
"encoding/xml"
"errors"
"io"
"net"
"net/http"
"net/url"
"strconv"
"time"
)
// CertificateIdentityOption is an optional AssumeRoleWithCertificate
// parameter - e.g. a custom HTTP transport configuration or S3 credental
// livetime.
type CertificateIdentityOption func(*STSCertificateIdentity)
// CertificateIdentityWithTransport returns a CertificateIdentityOption that
// customizes the STSCertificateIdentity with the given http.RoundTripper.
func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption {
return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t })
}
// CertificateIdentityWithExpiry returns a CertificateIdentityOption that
// customizes the STSCertificateIdentity with the given livetime.
//
// Fetched S3 credentials will have the given livetime if the STS server
// allows such credentials.
func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption {
return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime })
}
// A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and
// rotates those credentials once they expire.
type STSCertificateIdentity struct {
Expiry
// STSEndpoint is the base URL endpoint of the STS API.
// For example, https://minio.local:9000
STSEndpoint string
// S3CredentialLivetime is the duration temp. S3 access
// credentials should be valid.
//
// It represents the access credential livetime requested
// by the client. The STS server may choose to issue
// temp. S3 credentials that have a different - usually
// shorter - livetime.
//
// The default livetime is one hour.
S3CredentialLivetime time.Duration
// Client is the HTTP client used to authenticate and fetch
// S3 credentials.
//
// A custom TLS client configuration can be specified by
// using a custom http.Transport:
// Client: http.Client {
// Transport: &http.Transport{
// TLSClientConfig: &tls.Config{},
// },
// }
Client http.Client
}
var _ Provider = (*STSWebIdentity)(nil) // compiler check
// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates
// to the given STS endpoint with the given TLS certificate and retrieves and
// rotates S3 credentials.
func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) {
if endpoint == "" {
return nil, errors.New("STS endpoint cannot be empty")
}
if _, err := url.Parse(endpoint); err != nil {
return nil, err
}
var identity = &STSCertificateIdentity{
STSEndpoint: endpoint,
Client: http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 5 * time.Second,
TLSClientConfig: &tls.Config{
Certificates: []tls.Certificate{certificate},
},
},
},
}
for _, option := range options {
option(identity)
}
return New(identity), nil
}
// Retrieve fetches a new set of S3 credentials from the configured
// STS API endpoint.
func (i *STSCertificateIdentity) Retrieve() (Value, error) {
endpointURL, err := url.Parse(i.STSEndpoint)
if err != nil {
return Value{}, err
}
var livetime = i.S3CredentialLivetime
if livetime == 0 {
livetime = 1 * time.Hour
}
queryValues := url.Values{}
queryValues.Set("Action", "AssumeRoleWithCertificate")
queryValues.Set("Version", STSVersion)
endpointURL.RawQuery = queryValues.Encode()
req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil)
if err != nil {
return Value{}, err
}
req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10))
resp, err := i.Client.Do(req)
if err != nil {
return Value{}, err
}
if resp.Body != nil {
defer resp.Body.Close()
}
if resp.StatusCode != http.StatusOK {
return Value{}, errors.New(resp.Status)
}
const MaxSize = 10 * 1 << 20
var body io.Reader = resp.Body
if resp.ContentLength > 0 && resp.ContentLength < MaxSize {
body = io.LimitReader(body, resp.ContentLength)
} else {
body = io.LimitReader(body, MaxSize)
}
var response assumeRoleWithCertificateResponse
if err = xml.NewDecoder(body).Decode(&response); err != nil {
return Value{}, err
}
i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow)
return Value{
AccessKeyID: response.Result.Credentials.AccessKey,
SecretAccessKey: response.Result.Credentials.SecretKey,
SessionToken: response.Result.Credentials.SessionToken,
SignerType: SignatureDefault,
}, nil
}
// Expiration returns the expiration time of the current S3 credentials.
func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration }
type assumeRoleWithCertificateResponse struct {
XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"`
Result struct {
Credentials struct {
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
} `xml:"Credentials" json:"credentials,omitempty"`
} `xml:"AssumeRoleWithCertificateResult"`
ResponseMetadata struct {
RequestID string `xml:"RequestId,omitempty"`
} `xml:"ResponseMetadata,omitempty"`
}

View File

@ -124,7 +124,7 @@ func stripPassword(err error) error {
// LDAP Identity with a specified session policy. The `policy` parameter must be
// a JSON string specifying the policy document.
//
// DEPRECATED: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead.
// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead.
func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) {
return New(&LDAPIdentity{
Client: &http.Client{Transport: http.DefaultTransport},

View File

@ -78,9 +78,9 @@ type STSWebIdentity struct {
// This is a customer provided function and is mandatory.
GetWebIDTokenExpiry func() (*WebIdentityToken, error)
// roleARN is the Amazon Resource Name (ARN) of the role that the caller is
// RoleARN is the Amazon Resource Name (ARN) of the role that the caller is
// assuming.
roleARN string
RoleARN string
// roleSessionName is the identifier for the assumed role session.
roleSessionName string
@ -164,7 +164,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
// Retrieve retrieves credentials from the MinIO service.
// Error will be returned if the request fails.
func (m *STSWebIdentity) Retrieve() (Value, error) {
a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.roleARN, m.roleSessionName, m.GetWebIDTokenExpiry)
a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry)
if err != nil {
return Value{}, err
}

View File

@ -21,9 +21,12 @@ package lifecycle
import (
"encoding/json"
"encoding/xml"
"errors"
"time"
)
var errMissingStorageClass = errors.New("storage-class cannot be empty")
// AbortIncompleteMultipartUpload structure, not supported yet on MinIO
type AbortIncompleteMultipartUpload struct {
XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"`
@ -50,13 +53,14 @@ func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.Sta
// (or suspended) to request server delete noncurrent object versions at a
// specific period in the object's lifetime.
type NoncurrentVersionExpiration struct {
XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"`
XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"`
MaxNoncurrentVersions int `xml:"MaxNoncurrentVersions,omitempty"`
}
// MarshalXML if non-current days not set to non zero value
func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if n.IsDaysNull() {
if n.isNull() {
return nil
}
type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration
@ -68,13 +72,17 @@ func (n NoncurrentVersionExpiration) IsDaysNull() bool {
return n.NoncurrentDays == ExpirationDays(0)
}
func (n NoncurrentVersionExpiration) isNull() bool {
return n.IsDaysNull() && n.MaxNoncurrentVersions == 0
}
// NoncurrentVersionTransition structure, set this action to request server to
// transition noncurrent object versions to different set storage classes
// at a specific period in the object's lifetime.
type NoncurrentVersionTransition struct {
XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"`
StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"`
NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"`
}
// IsDaysNull returns true if days field is null
@ -87,10 +95,30 @@ func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool {
return n.StorageClass == ""
}
func (n NoncurrentVersionTransition) isNull() bool {
return n.StorageClass == ""
}
// UnmarshalJSON implements NoncurrentVersionTransition JSONify
func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error {
type noncurrentVersionTransition NoncurrentVersionTransition
var nt noncurrentVersionTransition
err := json.Unmarshal(b, &nt)
if err != nil {
return err
}
if nt.StorageClass == "" {
return errMissingStorageClass
}
*n = NoncurrentVersionTransition(nt)
return nil
}
// MarshalXML is extended to leave out
// <NoncurrentVersionTransition></NoncurrentVersionTransition> tags
func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if n.IsDaysNull() || n.IsStorageClassEmpty() {
if n.isNull() {
return nil
}
type noncurrentVersionTransitionWrapper NoncurrentVersionTransition
@ -114,25 +142,44 @@ type Transition struct {
XMLName xml.Name `xml:"Transition" json:"-"`
Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
Days ExpirationDays `xml:"Days" json:"Days"`
}
// UnmarshalJSON returns an error if storage-class is empty.
func (t *Transition) UnmarshalJSON(b []byte) error {
type transition Transition
var tr transition
err := json.Unmarshal(b, &tr)
if err != nil {
return err
}
if tr.StorageClass == "" {
return errMissingStorageClass
}
*t = Transition(tr)
return nil
}
// MarshalJSON customizes json encoding by omitting empty values
func (t Transition) MarshalJSON() ([]byte, error) {
if t.IsNull() {
return nil, nil
}
type transition struct {
Date *ExpirationDate `json:"Date,omitempty"`
StorageClass string `json:"StorageClass,omitempty"`
Days *ExpirationDays `json:"Days,omitempty"`
Days *ExpirationDays `json:"Days"`
}
newt := transition{
StorageClass: t.StorageClass,
}
if !t.IsDaysNull() {
newt.Days = &t.Days
}
if !t.IsDateNull() {
newt.Date = &t.Date
} else {
newt.Days = &t.Days
}
return json.Marshal(newt)
}
@ -147,9 +194,9 @@ func (t Transition) IsDateNull() bool {
return t.Date.Time.IsZero()
}
// IsNull returns true if both date and days fields are null
// IsNull returns true if no storage-class is set.
func (t Transition) IsNull() bool {
return t.IsDaysNull() && t.IsDateNull()
return t.StorageClass == ""
}
// MarshalXML is transition is non null
@ -364,10 +411,10 @@ func (r Rule) MarshalJSON() ([]byte, error) {
if !r.Transition.IsNull() {
newr.Transition = &r.Transition
}
if !r.NoncurrentVersionExpiration.IsDaysNull() {
if !r.NoncurrentVersionExpiration.isNull() {
newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration
}
if !r.NoncurrentVersionTransition.IsDaysNull() {
if !r.NoncurrentVersionTransition.isNull() {
newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition
}

View File

@ -103,15 +103,21 @@ func (c *Config) AddRule(opts Options) error {
if err != nil {
return err
}
var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite
if opts.RoleArn != "" {
tokens := strings.Split(opts.RoleArn, ":")
if len(tokens) != 6 {
return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn)
}
if !strings.HasPrefix(opts.RoleArn, "arn:aws:iam") {
switch {
case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0:
c.Role = opts.RoleArn
compatSw = true
case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"):
c.Role = opts.RoleArn
default:
return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn)
}
c.Role = opts.RoleArn
}
var status Status
@ -151,7 +157,11 @@ func (c *Config) AddRule(opts Options) error {
destBucket := opts.DestBucket
// ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html
if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 {
return fmt.Errorf("destination bucket needs to be in Arn format")
if len(btokens) == 1 && compatSw {
destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket)
} else {
return fmt.Errorf("destination bucket needs to be in Arn format")
}
}
dmStatus := Disabled
if opts.ReplicateDeleteMarkers != "" {
@ -228,7 +238,7 @@ func (c *Config) AddRule(opts Options) error {
return err
}
// if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration
if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") {
if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw {
for i := range c.Rules {
c.Rules[i].Destination.Bucket = c.Role
}
@ -254,7 +264,7 @@ func (c *Config) EditRule(opts Options) error {
return fmt.Errorf("rule ID missing")
}
// if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS.
if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") {
if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 {
for i := range c.Rules {
c.Rules[i].Destination.Bucket = c.Role
}
@ -484,10 +494,7 @@ func (r Rule) validateStatus() error {
}
func (r Rule) validateFilter() error {
if err := r.Filter.Validate(); err != nil {
return err
}
return nil
return r.Filter.Validate()
}
// Prefix - a rule can either have prefix under <filter></filter> or under
@ -712,9 +719,12 @@ type Metrics struct {
FailedCount uint64 `json:"failedReplicationCount"`
}
// ResyncTargetsInfo provides replication target information to resync replicated data.
type ResyncTargetsInfo struct {
Targets []ResyncTarget `json:"target,omitempty"`
}
// ResyncTarget provides the replica resources and resetID to initiate resync replication.
type ResyncTarget struct {
Arn string `json:"arn"`
ResetID string `json:"resetid"`

View File

@ -171,6 +171,7 @@ func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
return false
}
return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" ||
endpointURL.Host == "s3-fips.us-gov-west-1.amazonaws.com" ||
endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com"
}
@ -211,7 +212,7 @@ func IsGoogleEndpoint(endpointURL url.URL) bool {
// Expects ascii encoded strings - from output of urlEncodePath
func percentEncodeSlash(s string) string {
return strings.Replace(s, "/", "%2F", -1)
return strings.ReplaceAll(s, "/", "%2F")
}
// QueryEncode - encodes query values in their URL encoded form. In

View File

@ -233,16 +233,7 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
if idx > 0 {
buf.WriteByte(',')
}
if strings.Contains(v, "\n") {
// TODO: "Unfold" long headers that
// span multiple lines (as allowed by
// RFC 2616, section 4.2) by replacing
// the folding white-space (including
// new-line) by a single space.
buf.WriteString(v)
} else {
buf.WriteString(v)
}
buf.WriteString(v)
}
buf.WriteByte('\n')
}

View File

@ -42,22 +42,22 @@ const (
ServiceTypeSTS = "sts"
)
///
/// Excerpts from @lsegal -
/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
///
/// User-Agent:
///
/// This is ignored from signing because signing this causes
/// problems with generating pre-signed URLs (that are executed
/// by other agents) or when customers pass requests through
/// proxies, which may modify the user-agent.
///
///
/// Authorization:
///
/// Is skipped for obvious reasons
///
//
// Excerpts from @lsegal -
// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
//
// User-Agent:
//
// This is ignored from signing because signing this causes
// problems with generating pre-signed URLs (that are executed
// by other agents) or when customers pass requests through
// proxies, which may modify the user-agent.
//
//
// Authorization:
//
// Is skipped for obvious reasons
//
var v4IgnoredHeaders = map[string]bool{
"Authorization": true,
"User-Agent": true,
@ -118,7 +118,9 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin
headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv
}
headers = append(headers, "host")
if !headerExists("host", headers) {
headers = append(headers, "host")
}
sort.Strings(headers)
var buf bytes.Buffer
@ -130,7 +132,7 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin
switch {
case k == "host":
buf.WriteString(getHostAddr(&req))
fallthrough
buf.WriteByte('\n')
default:
for idx, v := range vals[k] {
if idx > 0 {
@ -144,6 +146,15 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin
return buf.String()
}
func headerExists(key string, headers []string) bool {
for _, k := range headers {
if k == key {
return true
}
}
return false
}
// getSignedHeaders generate all signed request headers.
// i.e lexically sorted, semicolon-separated list of lowercase
// request header names.
@ -155,7 +166,9 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
}
headers = append(headers, strings.ToLower(k))
}
headers = append(headers, "host")
if !headerExists("host", headers) {
headers = append(headers, "host")
}
sort.Strings(headers)
return strings.Join(headers, ";")
}
@ -170,7 +183,7 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
// <SignedHeaders>\n
// <HashedPayload>
func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string {
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
canonicalRequest := strings.Join([]string{
req.Method,
s3utils.EncodePath(req.URL.Path),
@ -186,7 +199,7 @@ func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashe
func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string {
stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
stringToSign = stringToSign + getScope(location, t, serviceType) + "\n"
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest)))
return stringToSign
}

View File

@ -44,6 +44,10 @@ func sumHMAC(key []byte, data []byte) []byte {
// getHostAddr returns host header if available, otherwise returns host from URL
func getHostAddr(req *http.Request) string {
host := req.Header.Get("host")
if host != "" && req.Host != host {
return host
}
if req.Host != "" {
return req.Host
}

View File

@ -316,8 +316,8 @@ func (p PostPolicy) marshalJSON() []byte {
}
retStr := "{"
retStr = retStr + expirationStr + ","
retStr = retStr + conditionsStr
retStr = retStr + "}"
retStr += conditionsStr
retStr += "}"
return []byte(retStr)
}

View File

@ -20,7 +20,7 @@ package minio
import "time"
// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
func (c *Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
attemptCh := make(chan int)
// normalize jitter to the range [0, 1.0]
@ -39,7 +39,7 @@ func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, ji
if attempt > maxAttempt {
attempt = maxAttempt
}
//sleep = random_between(0, min(cap, base * 2 ** attempt))
// sleep = random_between(0, min(cap, base * 2 ** attempt))
sleep := unit * time.Duration(1<<uint(attempt))
if sleep > cap {
sleep = cap

View File

@ -42,7 +42,7 @@ var DefaultRetryCap = time.Second
// newRetryTimer creates a timer with exponentially increasing
// delays until the maximum retry attempts are reached.
func (c Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int {
func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int {
attemptCh := make(chan int)
// computes the exponential backoff duration according to
@ -56,7 +56,7 @@ func (c Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Durat
jitter = MaxJitter
}
//sleep = random_between(0, min(cap, base * 2 ** attempt))
// sleep = random_between(0, min(cap, base * 2 ** attempt))
sleep := unit * time.Duration(1<<uint(attempt))
if sleep > cap {
sleep = cap

View File

@ -52,7 +52,7 @@ var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`)
func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) {
if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 {
expTime, err := time.Parse(http.TimeFormat, matches[1])
expTime, err := parseRFC7231Time(matches[1])
if err != nil {
return time.Time{}, ""
}
@ -73,7 +73,7 @@ func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err er
return false, time.Time{}, err
}
if matches[3] != "" {
expTime, err = time.Parse(http.TimeFormat, matches[3])
expTime, err = parseRFC7231Time(matches[3])
if err != nil {
return false, time.Time{}, err
}
@ -240,6 +240,27 @@ func extractObjMetadata(header http.Header) http.Header {
return filteredHeader
}
const (
// RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
rfc822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT"
rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT"
)
func parseTime(t string, formats ...string) (time.Time, error) {
for _, format := range formats {
tt, err := time.Parse(format, t)
if err == nil {
return tt, nil
}
}
return time.Time{}, fmt.Errorf("unable to parse %s in any of the input formats: %s", t, formats)
}
func parseRFC7231Time(lastModified string) (time.Time, error) {
return parseTime(lastModified, rfc822TimeFormat, rfc822TimeFormatSingleDigitDay, rfc822TimeFormatSingleDigitDayTwoDigitYear)
}
// ToObjectInfo converts http header values into ObjectInfo type,
// extracts metadata and fills in all the necessary fields in ObjectInfo.
func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectInfo, error) {
@ -267,7 +288,7 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn
}
// Parse Last-Modified has http time format.
date, err := time.Parse(http.TimeFormat, h.Get("Last-Modified"))
mtime, err := parseRFC7231Time(h.Get("Last-Modified"))
if err != nil {
return ObjectInfo{}, ErrorResponse{
Code: "InternalError",
@ -289,7 +310,18 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn
expiryStr := h.Get("Expires")
var expiry time.Time
if expiryStr != "" {
expiry, _ = time.Parse(http.TimeFormat, expiryStr)
expiry, err = parseRFC7231Time(expiryStr)
if err != nil {
return ObjectInfo{}, ErrorResponse{
Code: "InternalError",
Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err),
BucketName: bucketName,
Key: objectName,
RequestID: h.Get("x-amz-request-id"),
HostID: h.Get("x-amz-id-2"),
Region: h.Get("x-amz-bucket-region"),
}
}
}
metadata := extractObjMetadata(h)
@ -337,7 +369,7 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn
ETag: etag,
Key: objectName,
Size: size,
LastModified: date,
LastModified: mtime,
ContentType: contentType,
Expires: expiry,
VersionID: h.Get(amzVersionID),
@ -404,7 +436,7 @@ func redactSignature(origAuth string) string {
return "AWS **REDACTED**:**REDACTED**"
}
/// Signature V4 authorization header.
// Signature V4 authorization header.
// Strip out accessKeyID from:
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
@ -552,6 +584,11 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool {
if expectTimeouts && errors.Is(err, context.DeadlineExceeded) {
return false
}
if errors.Is(err, context.DeadlineExceeded) {
return true
}
// We need to figure if the error either a timeout
// or a non-temporary error.
urlErr := &url.Error{}
@ -581,6 +618,10 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool {
case strings.Contains(err.Error(), "connection timed out"):
// If err is a net.Dial timeout.
return true
case strings.Contains(err.Error(), "connection refused"):
// If err is connection refused
return true
case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"):
// Denial errors
return true

4
vendor/github.com/vmihailenco/msgpack/v5/.prettierrc generated vendored Normal file
View File

@ -0,0 +1,4 @@
semi: false
singleQuote: true
proseWrap: always
printWidth: 100

20
vendor/github.com/vmihailenco/msgpack/v5/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,20 @@
sudo: false
language: go
go:
- 1.15.x
- 1.16.x
- tip
matrix:
allow_failures:
- go: tip
env:
- GO111MODULE=on
go_import_path: github.com/vmihailenco/msgpack
before_install:
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go
env GOPATH)/bin v1.31.0

51
vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,51 @@
## [5.3.5](https://github.com/vmihailenco/msgpack/compare/v5.3.4...v5.3.5) (2021-10-22)
## v5
### Added
- `DecodeMap` is split into `DecodeMap`, `DecodeTypedMap`, and `DecodeUntypedMap`.
- New msgpack extensions API.
### Changed
- `Reset*` functions also reset flags.
- `SetMapDecodeFunc` is renamed to `SetMapDecoder`.
- `StructAsArray` is renamed to `UseArrayEncodedStructs`.
- `SortMapKeys` is renamed to `SetSortMapKeys`.
### Removed
- `UseJSONTag` is removed. Use `SetCustomStructTag("json")` instead.
## v4
- Encode, Decode, Marshal, and Unmarshal are changed to accept single argument. EncodeMulti and
DecodeMulti are added as replacement.
- Added EncodeInt8/16/32/64 and EncodeUint8/16/32/64.
- Encoder changed to preserve type of numbers instead of chosing most compact encoding. The old
behavior can be achieved with Encoder.UseCompactEncoding.
## v3.3
- `msgpack:",inline"` tag is restored to force inlining structs.
## v3.2
- Decoding extension types returns pointer to the value instead of the value. Fixes #153
## v3
- gopkg.in is not supported any more. Update import path to github.com/vmihailenco/msgpack.
- Msgpack maps are decoded into map[string]interface{} by default.
- EncodeSliceLen is removed in favor of EncodeArrayLen. DecodeSliceLen is removed in favor of
DecodeArrayLen.
- Embedded structs are automatically inlined where possible.
- Time is encoded using extension as described in https://github.com/msgpack/msgpack/pull/209. Old
format is supported as well.
- EncodeInt8/16/32/64 is replaced with EncodeInt. EncodeUint8/16/32/64 is replaced with EncodeUint.
There should be no performance differences.
- DecodeInterface can now return int8/16/32 and uint8/16/32.
- PeekCode returns codes.Code instead of byte.

25
vendor/github.com/vmihailenco/msgpack/v5/LICENSE generated vendored Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2013 The github.com/vmihailenco/msgpack Authors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

6
vendor/github.com/vmihailenco/msgpack/v5/Makefile generated vendored Normal file
View File

@ -0,0 +1,6 @@
test:
go test ./...
go test ./... -short -race
go test ./... -run=NONE -bench=. -benchmem
env GOOS=linux GOARCH=386 go test ./...
go vet

Some files were not shown because too many files have changed in this diff Show More