5
0
mirror of https://github.com/cwinfo/matterbridge.git synced 2024-11-22 07:00:27 +00:00

Update dependencies (#1851)

This commit is contained in:
Wim 2022-06-25 00:36:16 +02:00 committed by GitHub
parent 5604d140e3
commit 4649876956
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
87 changed files with 10535 additions and 4392 deletions

20
go.mod
View File

@ -6,13 +6,13 @@ require (
github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f
github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560 github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c
github.com/SevereCloud/vksdk/v2 v2.14.0 github.com/SevereCloud/vksdk/v2 v2.14.1
github.com/bwmarrin/discordgo v0.25.0 github.com/bwmarrin/discordgo v0.25.0
github.com/d5/tengo/v2 v2.10.1 github.com/d5/tengo/v2 v2.12.0
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/fsnotify/fsnotify v1.5.4 github.com/fsnotify/fsnotify v1.5.4
github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1
github.com/gomarkdown/markdown v0.0.0-20220603122033-8f3b341fef32 github.com/gomarkdown/markdown v0.0.0-20220607163217-45f7c050e2d1
github.com/google/gops v0.3.23 github.com/google/gops v0.3.23
github.com/gorilla/schema v1.2.0 github.com/gorilla/schema v1.2.0
github.com/gorilla/websocket v1.5.0 github.com/gorilla/websocket v1.5.0
@ -40,16 +40,16 @@ require (
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca
github.com/shazow/ssh-chat v1.10.1 github.com/shazow/ssh-chat v1.10.1
github.com/sirupsen/logrus v1.8.1 github.com/sirupsen/logrus v1.8.1
github.com/slack-go/slack v0.10.3 github.com/slack-go/slack v0.11.0
github.com/spf13/viper v1.12.0 github.com/spf13/viper v1.12.0
github.com/stretchr/testify v1.7.1 github.com/stretchr/testify v1.7.2
github.com/vincent-petithory/dataurl v1.0.0 github.com/vincent-petithory/dataurl v1.0.0
github.com/writeas/go-strip-markdown v2.0.1+incompatible github.com/writeas/go-strip-markdown v2.0.1+incompatible
github.com/yaegashi/msgraph.go v0.1.4 github.com/yaegashi/msgraph.go v0.1.4
github.com/zfjagann/golang-ring v0.0.0-20220330170733-19bcea1b6289 github.com/zfjagann/golang-ring v0.0.0-20220330170733-19bcea1b6289
go.mau.fi/whatsmeow v0.0.0-20220601182603-a8d86cf1812c go.mau.fi/whatsmeow v0.0.0-20220624184947-57a69a641154
golang.org/x/image v0.0.0-20220601225756-64ec528b34cd golang.org/x/image v0.0.0-20220617043117-41969df76e82
golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2
golang.org/x/text v0.3.7 golang.org/x/text v0.3.7
gomod.garykim.dev/nc-talk v0.3.0 gomod.garykim.dev/nc-talk v0.3.0
google.golang.org/protobuf v1.28.0 google.golang.org/protobuf v1.28.0
@ -80,7 +80,7 @@ require (
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kettek/apng v0.0.0-20191108220231-414630eed80f // indirect github.com/kettek/apng v0.0.0-20191108220231-414630eed80f // indirect
github.com/klauspost/compress v1.15.1 // indirect github.com/klauspost/compress v1.15.6 // indirect
github.com/klauspost/cpuid/v2 v2.0.12 // indirect github.com/klauspost/cpuid/v2 v2.0.12 // indirect
github.com/labstack/gommon v0.3.1 // indirect github.com/labstack/gommon v0.3.1 // indirect
github.com/magiconair/properties v1.8.6 // indirect github.com/magiconair/properties v1.8.6 // indirect
@ -145,7 +145,7 @@ require (
gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/ini.v1 v1.66.4 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/uint128 v1.1.1 // indirect lukechampine.com/uint128 v1.1.1 // indirect
modernc.org/cc/v3 v3.36.0 // indirect modernc.org/cc/v3 v3.36.0 // indirect
modernc.org/ccgo/v3 v3.16.6 // indirect modernc.org/ccgo/v3 v3.16.6 // indirect

37
go.sum
View File

@ -160,8 +160,8 @@ github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c/go.mod h1:DNS
github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
github.com/RoaringBitmap/roaring v0.8.0/go.mod h1:jdT9ykXwHFNdJbEtxePexlFYH9LXucApeS0/+/g+p1I= github.com/RoaringBitmap/roaring v0.8.0/go.mod h1:jdT9ykXwHFNdJbEtxePexlFYH9LXucApeS0/+/g+p1I=
github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
github.com/SevereCloud/vksdk/v2 v2.14.0 h1:1lciJC4FWhSQIjjFb3NGyJI7x9sPKk/P6aAvR0ibh1o= github.com/SevereCloud/vksdk/v2 v2.14.1 h1:pToB5uvNn6CUpPAs4jINlv5Z9qArTs+muATDOWNFJo8=
github.com/SevereCloud/vksdk/v2 v2.14.0/go.mod h1:J/iPooVfldjVADo47G5aNxkvlRWAsZnMHpri8sZmck4= github.com/SevereCloud/vksdk/v2 v2.14.1/go.mod h1:OW11r2PqGTGc/oxuodjgeqr2uxutasJGTmhjLMHMImg=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
@ -460,8 +460,8 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
github.com/d5/tengo/v2 v2.10.1 h1:Z7vmTAQfdoExNEB9kxgqxvoBBW9bf+8uYMiDyriX5HM= github.com/d5/tengo/v2 v2.12.0 h1:EJLSMheqt1Kv/WjV5D0BvqJ/Qq/J6H3ZBpSZgw6Hn7Y=
github.com/d5/tengo/v2 v2.10.1/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8= github.com/d5/tengo/v2 v2.12.0/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8=
github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA= github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@ -723,8 +723,8 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomarkdown/markdown v0.0.0-20220603122033-8f3b341fef32 h1:QxcGJpbMJw6tHRtrHKJiL11LdX1SXDfV1f4t4mJl3QI= github.com/gomarkdown/markdown v0.0.0-20220607163217-45f7c050e2d1 h1:wAupuFkZ/yq219/mSbqDtMfUZQY0gTYEtoz3/LKtppU=
github.com/gomarkdown/markdown v0.0.0-20220603122033-8f3b341fef32/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA= github.com/gomarkdown/markdown v0.0.0-20220607163217-45f7c050e2d1/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA=
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -1032,8 +1032,9 @@ github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY=
github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
@ -1561,8 +1562,8 @@ github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882 h1:A7o8tOE
github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882/go.mod h1:5IwJoz9Pw7JsrCN4/skkxUtSWT7myuUPLhCgv6Q5vvQ= github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882/go.mod h1:5IwJoz9Pw7JsrCN4/skkxUtSWT7myuUPLhCgv6Q5vvQ=
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 h1:lpEzuenPuO1XNTeikEmvqYFcU37GVLl8SRNblzyvGBE= github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 h1:lpEzuenPuO1XNTeikEmvqYFcU37GVLl8SRNblzyvGBE=
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9/go.mod h1:PLPIyL7ikehBD1OAjmKKiOEhbvWyHGaNDjquXMcYABo= github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9/go.mod h1:PLPIyL7ikehBD1OAjmKKiOEhbvWyHGaNDjquXMcYABo=
github.com/slack-go/slack v0.10.3 h1:kKYwlKY73AfSrtAk9UHWCXXfitudkDztNI9GYBviLxw= github.com/slack-go/slack v0.11.0 h1:sBBjQz8LY++6eeWhGJNZpRm5jvLRNnWBFZ/cAq58a6k=
github.com/slack-go/slack v0.10.3/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/slack-go/slack v0.11.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8=
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
@ -1629,8 +1630,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI=
github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs=
@ -1774,8 +1776,8 @@ go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+
go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY=
go.mau.fi/libsignal v0.0.0-20220425070825-c40c839ee6a0 h1:3IQF2bgAyibdo77hTejwuJe4jlypj9QaE4xCQuxrThM= go.mau.fi/libsignal v0.0.0-20220425070825-c40c839ee6a0 h1:3IQF2bgAyibdo77hTejwuJe4jlypj9QaE4xCQuxrThM=
go.mau.fi/libsignal v0.0.0-20220425070825-c40c839ee6a0/go.mod h1:kBOXTvYyDG/q1Ihgvd4J6WenGPh7wtEGvPKF6vmf5ak= go.mau.fi/libsignal v0.0.0-20220425070825-c40c839ee6a0/go.mod h1:kBOXTvYyDG/q1Ihgvd4J6WenGPh7wtEGvPKF6vmf5ak=
go.mau.fi/whatsmeow v0.0.0-20220601182603-a8d86cf1812c h1:2pn4sUljgVcFrPl1wyFOA0Qvg8726yzwyC1+qVdPkjM= go.mau.fi/whatsmeow v0.0.0-20220624184947-57a69a641154 h1:jUe0Re+w8/YHfxYryxjVkG3PEQDujCzGhbqsk6Qadtg=
go.mau.fi/whatsmeow v0.0.0-20220601182603-a8d86cf1812c/go.mod h1:iUBgOLNaqShLrR17u0kIiRptIGFH+nbT1tRhaWBEX/c= go.mau.fi/whatsmeow v0.0.0-20220624184947-57a69a641154/go.mod h1:iUBgOLNaqShLrR17u0kIiRptIGFH+nbT1tRhaWBEX/c=
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
@ -1888,8 +1890,8 @@ golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+o
golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20210622092929-e6eecd499c2c/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/image v0.0.0-20210622092929-e6eecd499c2c/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20220321031419-a8550c1d254a/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/image v0.0.0-20220321031419-a8550c1d254a/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20220601225756-64ec528b34cd h1:9NbNcTg//wfC5JskFW4Z3sqwVnjmJKHxLAol1bW2qgw= golang.org/x/image v0.0.0-20220617043117-41969df76e82 h1:KpZB5pUSBvrHltNEdK/tw0xlPeD13M6M6aGP32gKqiw=
golang.org/x/image v0.0.0-20220601225756-64ec528b34cd/go.mod h1:doUCurBvlfPMKfmIpRIywoHmhN3VyhnoFDbvIEWF4hY= golang.org/x/image v0.0.0-20220617043117-41969df76e82/go.mod h1:doUCurBvlfPMKfmIpRIywoHmhN3VyhnoFDbvIEWF4hY=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -2031,8 +2033,8 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 h1:zwrSfklXn0gxyLRX/aR+q6cgHbV/ItVyzbPlbA+dkAw= golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 h1:+jnHzr9VPj32ykQVai5DNahi9+NSp7yYuCsl5eAQtL0=
golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -2583,8 +2585,9 @@ gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg= gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg=
gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=

View File

@ -57,6 +57,8 @@ linters:
- grouper - grouper
- decorder - decorder
- containedctx - containedctx
# - execinquery # FIXME: panic in 1.46.0
- nosprintfhostport
# - wrapcheck # TODO: v3 Fix # - wrapcheck # TODO: v3 Fix
# - testpackage # TODO: Fix testpackage # - testpackage # TODO: Fix testpackage
@ -87,6 +89,7 @@ linters:
# - varnamelen # - varnamelen
# - errchkjson # - errchkjson
# - maintidx # - maintidx
# - nonamedreturns
# depricated # depricated
# - maligned # - maligned

View File

@ -7,6 +7,6 @@ package vksdk
// Module constants. // Module constants.
const ( const (
Version = "2.14.0" Version = "2.14.1"
API = "5.131" API = "5.131"
) )

View File

@ -210,7 +210,7 @@ type GroupsGroup struct {
MainSection int `json:"main_section,omitempty"` MainSection int `json:"main_section,omitempty"`
OnlineStatus GroupsOnlineStatus `json:"online_status,omitempty"` // Status of replies in community messages OnlineStatus GroupsOnlineStatus `json:"online_status,omitempty"` // Status of replies in community messages
AgeLimits int `json:"age_limits,omitempty"` // Information whether age limit AgeLimits int `json:"age_limits,omitempty"` // Information whether age limit
BanInfo GroupsGroupBanInfo `json:"ban_info,omitempty"` // User ban info BanInfo *GroupsGroupBanInfo `json:"ban_info,omitempty"` // User ban info
Addresses GroupsAddressesInfo `json:"addresses,omitempty"` // Info about addresses in Groups Addresses GroupsAddressesInfo `json:"addresses,omitempty"` // Info about addresses in Groups
LiveCovers GroupsLiveCovers `json:"live_covers,omitempty"` LiveCovers GroupsLiveCovers `json:"live_covers,omitempty"`
CropPhoto UsersCropPhoto `json:"crop_photo,omitempty"` CropPhoto UsersCropPhoto `json:"crop_photo,omitempty"`
@ -963,7 +963,7 @@ type GroupsOnlineStatus struct {
// GroupsOwnerXtrBanInfo struct. // GroupsOwnerXtrBanInfo struct.
type GroupsOwnerXtrBanInfo struct { type GroupsOwnerXtrBanInfo struct {
BanInfo GroupsBanInfo `json:"ban_info"` BanInfo *GroupsBanInfo `json:"ban_info"`
Group GroupsGroup `json:"group"` Group GroupsGroup `json:"group"`
Profile UsersUser `json:"profile"` Profile UsersUser `json:"profile"`
Type string `json:"type"` Type string `json:"type"`

View File

@ -31,7 +31,7 @@ type VideoVideo struct {
CanLike BaseBoolInt `json:"can_like"` CanLike BaseBoolInt `json:"can_like"`
// Information whether current user can download the video. // Information whether current user can download the video.
CanDownload BaseBoolInt `json:"can_download"` CanDownload int `json:"can_download"`
// Information whether current user can repost this video. // Information whether current user can repost this video.
CanRepost BaseBoolInt `json:"can_repost"` CanRepost BaseBoolInt `json:"can_repost"`

View File

@ -1220,14 +1220,14 @@ func (c *Compiler) optimizeFunc(node parser.Node) {
iterateInstructions(c.scopes[c.scopeIndex].Instructions, iterateInstructions(c.scopes[c.scopeIndex].Instructions,
func(pos int, opcode parser.Opcode, operands []int) bool { func(pos int, opcode parser.Opcode, operands []int) bool {
switch { switch {
case dsts[pos]:
dstIdx++
deadCode = false
case opcode == parser.OpReturn: case opcode == parser.OpReturn:
if deadCode { if deadCode {
return true return true
} }
deadCode = true deadCode = true
case dsts[pos]:
dstIdx++
deadCode = false
case deadCode: case deadCode:
return true return true
} }
@ -1242,6 +1242,7 @@ func (c *Compiler) optimizeFunc(node parser.Node) {
var appendReturn bool var appendReturn bool
endPos := len(c.scopes[c.scopeIndex].Instructions) endPos := len(c.scopes[c.scopeIndex].Instructions)
newEndPost := len(newInsts) newEndPost := len(newInsts)
iterateInstructions(newInsts, iterateInstructions(newInsts,
func(pos int, opcode parser.Opcode, operands []int) bool { func(pos int, opcode parser.Opcode, operands []int) bool {
switch opcode { switch opcode {

View File

@ -375,7 +375,12 @@ func (p *Parser) parseOperand() Expr {
case token.Ident: case token.Ident:
return p.parseIdent() return p.parseIdent()
case token.Int: case token.Int:
v, _ := strconv.ParseInt(p.tokenLit, 10, 64) v, err := strconv.ParseInt(p.tokenLit, 0, 64)
if err == strconv.ErrRange {
p.error(p.pos, "number out of range")
} else if err != nil {
p.error(p.pos, "invalid integer")
}
x := &IntLit{ x := &IntLit{
Value: v, Value: v,
ValuePos: p.pos, ValuePos: p.pos,
@ -383,8 +388,14 @@ func (p *Parser) parseOperand() Expr {
} }
p.next() p.next()
return x return x
case token.Float: case token.Float:
v, _ := strconv.ParseFloat(p.tokenLit, 64) v, err := strconv.ParseFloat(p.tokenLit, 64)
if err == strconv.ErrRange {
p.error(p.pos, "number out of range")
} else if err != nil {
p.error(p.pos, "invalid float")
}
x := &FloatLit{ x := &FloatLit{
Value: v, Value: v,
ValuePos: p.pos, ValuePos: p.pos,
@ -447,10 +458,11 @@ func (p *Parser) parseOperand() Expr {
return p.parseErrorExpr() return p.parseErrorExpr()
case token.Immutable: // immutable expression case token.Immutable: // immutable expression
return p.parseImmutableExpr() return p.parseImmutableExpr()
default:
p.errorExpected(p.pos, "operand")
} }
pos := p.pos pos := p.pos
p.errorExpected(pos, "operand")
p.advance(stmtStart) p.advance(stmtStart)
return &BadExpr{From: pos, To: p.pos} return &BadExpr{From: pos, To: p.pos}
} }

View File

@ -93,9 +93,9 @@ func (s *Scanner) Scan() (
token.Export, token.True, token.False, token.Undefined: token.Export, token.True, token.False, token.Undefined:
insertSemi = true insertSemi = true
} }
case '0' <= ch && ch <= '9': case ('0' <= ch && ch <= '9') || (ch == '.' && '0' <= s.peek() && s.peek() <= '9'):
insertSemi = true insertSemi = true
tok, literal = s.scanNumber(false) tok, literal = s.scanNumber()
default: default:
s.next() // always make progress s.next() // always make progress
@ -125,17 +125,12 @@ func (s *Scanner) Scan() (
case ':': case ':':
tok = s.switch2(token.Colon, token.Define) tok = s.switch2(token.Colon, token.Define)
case '.': case '.':
if '0' <= s.ch && s.ch <= '9' {
insertSemi = true
tok, literal = s.scanNumber(true)
} else {
tok = token.Period tok = token.Period
if s.ch == '.' && s.peek() == '.' { if s.ch == '.' && s.peek() == '.' {
s.next() s.next()
s.next() // consume last '.' s.next() // consume last '.'
tok = token.Ellipsis tok = token.Ellipsis
} }
}
case ',': case ',':
tok = token.Comma tok = token.Comma
case '?': case '?':
@ -379,86 +374,58 @@ func (s *Scanner) scanIdentifier() string {
return string(s.src[offs:s.offset]) return string(s.src[offs:s.offset])
} }
func (s *Scanner) scanMantissa(base int) { func (s *Scanner) scanDigits(base int) {
for digitVal(s.ch) < base { for s.ch == '_' || digitVal(s.ch) < base {
s.next() s.next()
} }
} }
func (s *Scanner) scanNumber( func (s *Scanner) scanNumber() (token.Token, string) {
seenDecimalPoint bool,
) (tok token.Token, lit string) {
// digitVal(s.ch) < 10
offs := s.offset offs := s.offset
tok = token.Int tok := token.Int
base := 10
defer func() { // Determine base
lit = string(s.src[offs:s.offset]) switch {
}() case s.ch == '0' && lower(s.peek()) == 'b':
base = 2
if seenDecimalPoint { s.next()
offs-- s.next()
tok = token.Float case s.ch == '0' && lower(s.peek()) == 'o':
s.scanMantissa(10) base = 8
goto exponent s.next()
} s.next()
case s.ch == '0' && lower(s.peek()) == 'x':
if s.ch == '0' { base = 16
// int or float
offs := s.offset
s.next() s.next()
if s.ch == 'x' || s.ch == 'X' {
// hexadecimal int
s.next() s.next()
s.scanMantissa(16)
if s.offset-offs <= 2 {
// only scanned "0x" or "0X"
s.error(offs, "illegal hexadecimal number")
}
} else {
// octal int or float
seenDecimalDigit := false
s.scanMantissa(8)
if s.ch == '8' || s.ch == '9' {
// illegal octal int or float
seenDecimalDigit = true
s.scanMantissa(10)
}
if s.ch == '.' || s.ch == 'e' || s.ch == 'E' || s.ch == 'i' {
goto fraction
}
// octal int
if seenDecimalDigit {
s.error(offs, "illegal octal number")
}
}
return
} }
// decimal int or float // Scan whole number
s.scanMantissa(10) s.scanDigits(base)
fraction: // Scan fractional part
if s.ch == '.' { if s.ch == '.' && (base == 10 || base == 16) {
tok = token.Float tok = token.Float
s.next() s.next()
s.scanMantissa(10) s.scanDigits(base)
} }
exponent: // Scan exponent
if s.ch == 'e' || s.ch == 'E' { if s.ch == 'e' || s.ch == 'E' || s.ch == 'p' || s.ch == 'P' {
tok = token.Float tok = token.Float
s.next() s.next()
if s.ch == '-' || s.ch == '+' { if s.ch == '-' || s.ch == '+' {
s.next() s.next()
} }
if digitVal(s.ch) < 10 { offs := s.offset
s.scanMantissa(10) s.scanDigits(10)
} else { if offs == s.offset {
s.error(offs, "illegal floating-point exponent") s.error(offs, "exponent has no digits")
} }
} }
return
return tok, string(s.src[offs:s.offset])
} }
func (s *Scanner) scanEscape(quote rune) bool { func (s *Scanner) scanEscape(quote rune) bool {
@ -687,3 +654,7 @@ func digitVal(ch rune) int {
} }
return 16 // larger than any legal digit val return 16 // larger than any legal digit val
} }
func lower(c byte) byte {
return c | ('x' - 'X')
}

9
vendor/github.com/gomarkdown/markdown/.gitpod.yml generated vendored Normal file
View File

@ -0,0 +1,9 @@
# This configuration file was automatically generated by Gitpod.
# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file)
# and commit this file to your remote git repository to share the goodness with others.
tasks:
- init: go get && go build ./... && go test ./...
command: go run

View File

@ -1675,6 +1675,12 @@ func (p *Parser) paragraph(data []byte) int {
return i return i
} }
// if there's a block quote, paragraph is over
if p.quotePrefix(current) > 0 {
p.renderParagraph(data[:i])
return i
}
// if there's a fenced code block, paragraph is over // if there's a fenced code block, paragraph is over
if p.extensions&FencedCode != 0 { if p.extensions&FencedCode != 0 {
if p.fencedCodeBlock(current, false) > 0 { if p.fencedCodeBlock(current, false) > 0 {

View File

@ -23,3 +23,10 @@ _testmain.go
*.test *.test
*.prof *.prof
/s2/cmd/_s2sx/sfx-exe /s2/cmd/_s2sx/sfx-exe
# Linux perf files
perf.data
perf.data.old
# gdb history
.gdb_history

View File

@ -17,6 +17,41 @@ This package provides various compression algorithms.
# changelog # changelog
* May 25, 2022 (v1.15.5)
* s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
* s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
* huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
* zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
* zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
* May 11, 2022 (v1.15.4)
* huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
* inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
* zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
* zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
* May 5, 2022 (v1.15.3)
* zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
* s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
* Apr 26, 2022 (v1.15.2)
* zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
* zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
* s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
* Minimum version is Go 1.16, added CI test on 1.18.
* Mar 11, 2022 (v1.15.1)
* huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
* zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
* zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
* zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
* Mar 3, 2022 (v1.15.0) * Mar 3, 2022 (v1.15.0)
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
@ -60,6 +95,9 @@ While the release has been extensively tested, it is recommended to testing when
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
<details>
<summary>See changes to v1.13.x</summary>
* Aug 30, 2021 (v1.13.5) * Aug 30, 2021 (v1.13.5)
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
@ -88,6 +126,8 @@ While the release has been extensively tested, it is recommended to testing when
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
</details>
<details> <details>
<summary>See changes to v1.12.x</summary> <summary>See changes to v1.12.x</summary>

View File

@ -1,5 +0,0 @@
package huff0
//go:generate go run generate.go
//go:generate asmfmt -w decompress_amd64.s
//go:generate asmfmt -w decompress_8b_amd64.s

View File

@ -165,11 +165,6 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63)) return uint16(b.value >> ((64 - n) & 63))
} }
// peekTopBits(n) is equvialent to peekBitFast(64 - n)
func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
return uint16(b.value >> n)
}
func (b *bitReaderShifted) advance(n uint8) { func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n b.bitsRead += n
b.value <<= n & 63 b.value <<= n & 63
@ -220,11 +215,6 @@ func (b *bitReaderShifted) fill() {
} }
} }
// finished returns true if all bits have been read from the bit stream.
func (b *bitReaderShifted) finished() bool {
return b.off == 0 && b.bitsRead >= 64
}
func (b *bitReaderShifted) remaining() uint { func (b *bitReaderShifted) remaining() uint {
return b.off*8 + uint(64-b.bitsRead) return b.off*8 + uint(64-b.bitsRead)
} }

View File

@ -5,8 +5,6 @@
package huff0 package huff0
import "fmt"
// bitWriter will write bits. // bitWriter will write bits.
// First bit will be LSB of the first byte of output. // First bit will be LSB of the first byte of output.
type bitWriter struct { type bitWriter struct {
@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */ 0xFFFF, 0xFFFF} /* up to 16 bits */
// addBits16NC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
b.nBits += bits
}
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently. // It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
b.nBits += encA.nBits + encB.nBits b.nBits += encA.nBits + encB.nBits
} }
// addBits16ZeroNC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
// This is fastest if bits can be zero.
func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
if bits == 0 {
return
}
value <<= (16 - bits) & 15
value >>= (16 - bits) & 15
b.bitContainer |= uint64(value) << (b.nBits & 63)
b.nBits += bits
}
// flush will flush all pending full bytes.
// There will be at least 56 bits available for writing when this has been called.
// Using flush32 is faster, but leaves less space for writing.
func (b *bitWriter) flush() {
v := b.nBits >> 3
switch v {
case 0:
return
case 1:
b.out = append(b.out,
byte(b.bitContainer),
)
b.bitContainer >>= 1 << 3
case 2:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
)
b.bitContainer >>= 2 << 3
case 3:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
)
b.bitContainer >>= 3 << 3
case 4:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
)
b.bitContainer >>= 4 << 3
case 5:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
)
b.bitContainer >>= 5 << 3
case 6:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
)
b.bitContainer >>= 6 << 3
case 7:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
)
b.bitContainer >>= 7 << 3
case 8:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
byte(b.bitContainer>>56),
)
b.bitContainer = 0
b.nBits = 0
return
default:
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
}
b.nBits &= 7
}
// flush32 will flush out, so there are at least 32 bits available for writing. // flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() { func (b *bitWriter) flush32() {
if b.nBits < 32 { if b.nBits < 32 {
@ -201,10 +93,3 @@ func (b *bitWriter) close() error {
b.flushAlign() b.flushAlign()
return nil return nil
} }
// reset and continue writing by appending to out.
func (b *bitWriter) reset(out []byte) {
b.bitContainer = 0
b.nBits = 0
b.out = out
}

View File

@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) {
b.off = 0 b.off = 0
} }
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)
}
// Int32 returns a little endian int32 starting at current offset. // Int32 returns a little endian int32 starting at current offset.
func (b byteReader) Int32() int32 { func (b byteReader) Int32() int32 {
v3 := int32(b.b[b.off+3]) v3 := int32(b.b[b.off+3])
@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 {
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
} }
// unread returns the unread portion of the input.
func (b byteReader) unread() []byte {
return b.b[b.off:]
}
// remain will return the number of bytes remaining. // remain will return the number of bytes remaining.
func (b byteReader) remain() int { func (b byteReader) remain() int {
return len(b.b) - b.off return len(b.b) - b.off

View File

@ -404,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool {
return true return true
} }
//lint:ignore U1000 used for debugging
func (s *Scratch) validateTable(c cTable) bool { func (s *Scratch) validateTable(c cTable) bool {
if len(c) < int(s.symbolLen) { if len(c) < int(s.symbolLen) {
return false return false

View File

@ -11,7 +11,6 @@ import (
type dTable struct { type dTable struct {
single []dEntrySingle single []dEntrySingle
double []dEntryDouble
} }
// single-symbols decoding // single-symbols decoding
@ -19,13 +18,6 @@ type dEntrySingle struct {
entry uint16 entry uint16
} }
// double-symbols decoding
type dEntryDouble struct {
seq [4]byte
nBits uint8
len uint8
}
// Uses special code for all tables that are < 8 bits. // Uses special code for all tables that are < 8 bits.
const use8BitTables = true const use8BitTables = true
@ -35,7 +27,7 @@ const use8BitTables = true
// If no Scratch is provided a new one is allocated. // If no Scratch is provided a new one is allocated.
// The returned Scratch can be used for encoding or decoding input using this table. // The returned Scratch can be used for encoding or decoding input using this table.
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
s, err = s.prepare(in) s, err = s.prepare(nil)
if err != nil { if err != nil {
return s, nil, err return s, nil, err
} }
@ -236,108 +228,6 @@ func (d *Decoder) buffer() *[4][256]byte {
return &[4][256]byte{} return &[4][256]byte{}
} }
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress1X8Bit(dst, src)
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:0]
// Avoid bounds check by always having full sized table.
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
bufs := d.buffer()
buf := &bufs[0]
var off uint8
for br.off >= 8 {
br.fillFast()
v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
// Refill
br.fillFast()
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:off]...)
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if false && br.bitsRead >= 32 {
if br.off >= 4 {
v := br.in[br.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
br.value = (br.value << 32) | uint64(low)
br.bitsRead -= 32
br.off -= 4
} else {
for br.off > 0 {
br.value = (br.value << 8) | uint64(br.in[br.off-1])
br.bitsRead -= 8
br.off--
}
}
}
if len(dst) >= maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
d.bufs.Put(bufs)
return dst, br.close()
}
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. // decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
// The cap of the output buffer will be the maximum decompressed size. // The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly. // The length of the supplied input must match the end of a block exactly.
@ -995,7 +885,6 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
const shift = 56 const shift = 56
const tlSize = 1 << 8 const tlSize = 1 << 8
const tlMask = tlSize - 1
single := d.dt.single[:tlSize] single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty. // Use temp table to avoid bound checks/append penalty.

View File

@ -1,488 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
// const stream = 0
// br0.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
MOVQ bitReaderShifted_value(br0), br_value
MOVQ bitReaderShifted_off(br0), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill0
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br0), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br0.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 0(buffer)(off*1)
// SECOND PART:
// val2 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 0+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
MOVQ br_value, bitReaderShifted_value(br0)
MOVQ br_offset, bitReaderShifted_off(br0)
// const stream = 1
// br1.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
MOVQ bitReaderShifted_value(br1), br_value
MOVQ bitReaderShifted_off(br1), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill1
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br1), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br1.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 256(buffer)(off*1)
// SECOND PART:
// val2 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 256+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
MOVQ br_value, bitReaderShifted_value(br1)
MOVQ br_offset, bitReaderShifted_off(br1)
// const stream = 2
// br2.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
MOVQ bitReaderShifted_value(br2), br_value
MOVQ bitReaderShifted_off(br2), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill2
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br2), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br2.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 512(buffer)(off*1)
// SECOND PART:
// val2 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 512+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
MOVQ br_value, bitReaderShifted_value(br2)
MOVQ br_offset, bitReaderShifted_off(br2)
// const stream = 3
// br3.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
MOVQ bitReaderShifted_value(br3), br_value
MOVQ bitReaderShifted_off(br3), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill3
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br3), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br3.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 768(buffer)(off*1)
// SECOND PART:
// val2 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 768+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
MOVQ br_value, bitReaderShifted_value(br3)
MOVQ br_offset, bitReaderShifted_off(br3)
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -1,197 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// SECOND PART:
// val2 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -2,30 +2,43 @@
// +build amd64,!appengine,!noasm,gc // +build amd64,!appengine,!noasm,gc
// This file contains the specialisation of Decoder.Decompress4X // This file contains the specialisation of Decoder.Decompress4X
// that uses an asm implementation of its main loop. // and Decoder.Decompress1X that use an asm implementation of thir main loops.
package huff0 package huff0
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/klauspost/compress/internal/cpuinfo"
) )
// decompress4x_main_loop_x86 is an x86 assembler implementation // decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8. // of Decompress4X when tablelog > 8.
// go:noescape //go:noescape
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, func decompress4x_main_loop_amd64(ctx *decompress4xContext)
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
// decompress4x_8b_loop_x86 is an x86 assembler implementation // decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries // of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop. // per loop.
// go:noescape //go:noescape
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
// fallback8BitSize is the size where using Go version is faster. // fallback8BitSize is the size where using Go version is faster.
const fallback8BitSize = 800 const fallback8BitSize = 800
type decompress4xContext struct {
pbr0 *bitReaderShifted
pbr1 *bitReaderShifted
pbr2 *bitReaderShifted
pbr3 *bitReaderShifted
peekBits uint8
out *byte
dstEvery int
tbl *dEntrySingle
decoded int
limit *byte
}
// Decompress4X will decompress a 4X encoded stream. // Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly. // The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of // The *capacity* of the dst slice must match the destination size of
@ -42,6 +55,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if cap(dst) < fallback8BitSize && use8BitTables { if cap(dst) < fallback8BitSize && use8BitTables {
return d.decompress4X8bit(dst, src) return d.decompress4X8bit(dst, src)
} }
var br [4]bitReaderShifted var br [4]bitReaderShifted
// Decode "jump table" // Decode "jump table"
start := 6 start := 6
@ -71,70 +85,28 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
const tlMask = tlSize - 1 const tlMask = tlSize - 1
single := d.dt.single[:tlSize] single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int var decoded int
const debug = false if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
ctx := decompress4xContext{
// see: bitReaderShifted.peekBitsFast() pbr0: &br[0],
peekBits := uint8((64 - d.actualTableLog) & 63) pbr1: &br[1],
pbr2: &br[2],
// Decode 2 values from each decoder/loop. pbr3: &br[3],
const bufoff = 256 peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
for { out: &out[0],
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { dstEvery: dstEvery,
break tbl: &single[0],
limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
} }
if use8BitTables { if use8BitTables {
off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) decompress4x_8b_main_loop_amd64(&ctx)
} else { } else {
off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) decompress4x_main_loop_amd64(&ctx)
}
if debug {
fmt.Print("DEBUG: ")
fmt.Printf("off=%d,", off)
for i := 0; i < 4; i++ {
fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
i, br[i].bitsRead, br[i].value, br[i].off)
}
fmt.Println("")
} }
if off != 0 { decoded = ctx.decoded
break out = out[decoded/4:]
}
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
} }
// Decode remaining. // Decode remaining.
@ -150,7 +122,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
for bitsLeft > 0 { for bitsLeft > 0 {
br.fill() br.fill()
if offset >= endsAt { if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4") return nil, errors.New("corruption detected: stream overrun 4")
} }
@ -164,7 +135,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
offset++ offset++
} }
if offset != endsAt { if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
} }
decoded += offset - dstEvery*i decoded += offset - dstEvery*i
@ -173,9 +143,86 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
return nil, err return nil, err
} }
} }
d.bufs.Put(buf)
if dstSize != decoded { if dstSize != decoded {
return nil, errors.New("corruption detected: short output block") return nil, errors.New("corruption detected: short output block")
} }
return dst, nil return dst, nil
} }
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress1X when tablelog > 8.
//go:noescape
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
// of Decompress1X when tablelog > 8.
//go:noescape
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
type decompress1xContext struct {
pbr *bitReaderShifted
peekBits uint8
out *byte
outCap int
tbl *dEntrySingle
decoded int
}
// Error reported by asm implementations
const error_max_decoded_size_exeeded = -1
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:maxDecodedSize]
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
if maxDecodedSize >= 4 {
ctx := decompress1xContext{
pbr: &br,
out: &dst[0],
outCap: maxDecodedSize,
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
tbl: &d.dt.single[0],
}
if cpuinfo.HasBMI2() {
decompress1x_main_loop_bmi2(&ctx)
} else {
decompress1x_main_loop_amd64(&ctx)
}
if ctx.decoded == error_max_decoded_size_exeeded {
return nil, ErrMaxDecodedSizeExceeded
}
dst = dst[:ctx.decoded]
}
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if len(dst) >= maxDecodedSize {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
return dst, br.close()
}

File diff suppressed because it is too large Load Diff

View File

@ -1,195 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#ifdef GOAMD64_v4
#ifndef GOAMD64_v3
#define GOAMD64_v3
#endif
#endif
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $2, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -191,3 +191,105 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
} }
return dst, nil return dst, nil
} }
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress1X8Bit(dst, src)
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:0]
// Avoid bounds check by always having full sized table.
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
bufs := d.buffer()
buf := &bufs[0]
var off uint8
for br.off >= 8 {
br.fillFast()
v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
// Refill
br.fillFast()
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:off]...)
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if false && br.bitsRead >= 32 {
if br.off >= 4 {
v := br.in[br.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
br.value = (br.value << 32) | uint64(low)
br.bitsRead -= 32
br.off -= 4
} else {
for br.off > 0 {
br.value = (br.value << 8) | uint64(br.in[br.off-1])
br.bitsRead -= 8
br.off--
}
}
}
if len(dst) >= maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
d.bufs.Put(bufs)
return dst, br.close()
}

View File

@ -0,0 +1,34 @@
// Package cpuinfo gives runtime info about the current CPU.
//
// This is a very limited module meant for use internally
// in this project. For more versatile solution check
// https://github.com/klauspost/cpuid.
package cpuinfo
// HasBMI1 checks whether an x86 CPU supports the BMI1 extension.
func HasBMI1() bool {
return hasBMI1
}
// HasBMI2 checks whether an x86 CPU supports the BMI2 extension.
func HasBMI2() bool {
return hasBMI2
}
// DisableBMI2 will disable BMI2, for testing purposes.
// Call returned function to restore previous state.
func DisableBMI2() func() {
old := hasBMI2
hasBMI2 = false
return func() {
hasBMI2 = old
}
}
// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions.
func HasBMI() bool {
return HasBMI1() && HasBMI2()
}
var hasBMI1 bool
var hasBMI2 bool

View File

@ -0,0 +1,11 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
package cpuinfo
// go:noescape
func x86extensions() (bmi1, bmi2 bool)
func init() {
hasBMI1, hasBMI2 = x86extensions()
}

View File

@ -0,0 +1,36 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
TEXT ·x86extensions(SB), NOSPLIT, $0
// 1. determine max EAX value
XORQ AX, AX
CPUID
CMPQ AX, $7
JB unsupported
// 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction"
MOVQ $7, AX
MOVQ $0, CX
CPUID
BTQ $3, BX // bit 3 = BMI1
SETCS AL
BTQ $8, BX // bit 8 = BMI2
SETCS AH
MOVB AL, bmi1+0(FP)
MOVB AH, bmi2+1(FP)
RET
unsupported:
XORQ AX, AX
MOVB AL, bmi1+0(FP)
MOVB AL, bmi2+1(FP)
RET

View File

@ -19,6 +19,7 @@ This is important, so you don't have to worry about spending CPU cycles on alrea
* Adjustable compression (3 levels) * Adjustable compression (3 levels)
* Concurrent stream compression * Concurrent stream compression
* Faster decompression, even for Snappy compatible content * Faster decompression, even for Snappy compatible content
* Concurrent Snappy/S2 stream decompression
* Ability to quickly skip forward in compressed stream * Ability to quickly skip forward in compressed stream
* Random seeking with indexes * Random seeking with indexes
* Compatible with reading Snappy compressed content * Compatible with reading Snappy compressed content
@ -415,6 +416,25 @@ Without assembly decompression is also very fast; single goroutine decompression
Even though S2 typically compresses better than Snappy, decompression speed is always better. Even though S2 typically compresses better than Snappy, decompression speed is always better.
### Concurrent Stream Decompression
For full stream decompression S2 offers a [DecodeConcurrent](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.DecodeConcurrent)
that will decode a full stream using multiple goroutines.
Example scaling, AMD Ryzen 3950X, 16 cores, decompression using `s2d -bench=3 <input>`, best of 3:
| Input | `-cpu=1` | `-cpu=2` | `-cpu=4` | `-cpu=8` | `-cpu=16` |
|-------------------------------------------|------------|------------|------------|------------|-------------|
| enwik10.snappy | 1098.6MB/s | 1819.8MB/s | 3625.6MB/s | 6910.6MB/s | 10818.2MB/s |
| enwik10.s2 | 1303.5MB/s | 2606.1MB/s | 4847.9MB/s | 8878.4MB/s | 9592.1MB/s |
| sofia-air-quality-dataset.tar.snappy | 1302.0MB/s | 2165.0MB/s | 4244.5MB/s | 8241.0MB/s | 12920.5MB/s |
| sofia-air-quality-dataset.tar.s2 | 1399.2MB/s | 2463.2MB/s | 5196.5MB/s | 9639.8MB/s | 11439.5MB/s |
| sofia-air-quality-dataset.tar.s2 (no asm) | 837.5MB/s | 1652.6MB/s | 3183.6MB/s | 5945.0MB/s | 9620.7MB/s |
Scaling can be expected to be pretty linear until memory bandwidth is saturated.
For now the DecodeConcurrent can only be used for full streams without seeking or combining with regular reads.
## Block compression ## Block compression
@ -873,7 +893,7 @@ for each entry {
} }
// Uncompressed uses previous offset and adds EstBlockSize // Uncompressed uses previous offset and adds EstBlockSize
entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize + uOff
} }
@ -901,6 +921,14 @@ for each entry {
} }
``` ```
To decode from any given uncompressed offset `(wantOffset)`:
* Iterate entries until `entry[n].UncompressedOffset > wantOffset`.
* Start decoding from `entry[n-1].CompressedOffset`.
* Discard `entry[n-1].UncompressedOffset - wantOffset` bytes from the decoded stream.
See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface.
# Format Extensions # Format Extensions
* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`. * Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`.

View File

@ -11,6 +11,8 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"runtime"
"sync"
) )
var ( var (
@ -169,6 +171,14 @@ func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption {
} }
} }
// ReaderIgnoreCRC will make the reader skip CRC calculation and checks.
func ReaderIgnoreCRC() ReaderOption {
return func(r *Reader) error {
r.ignoreCRC = true
return nil
}
}
// Reader is an io.Reader that can read Snappy-compressed bytes. // Reader is an io.Reader that can read Snappy-compressed bytes.
type Reader struct { type Reader struct {
r io.Reader r io.Reader
@ -191,18 +201,19 @@ type Reader struct {
paramsOK bool paramsOK bool
snappyFrame bool snappyFrame bool
ignoreStreamID bool ignoreStreamID bool
ignoreCRC bool
} }
// ensureBufferSize will ensure that the buffer can take at least n bytes. // ensureBufferSize will ensure that the buffer can take at least n bytes.
// If false is returned the buffer exceeds maximum allowed size. // If false is returned the buffer exceeds maximum allowed size.
func (r *Reader) ensureBufferSize(n int) bool { func (r *Reader) ensureBufferSize(n int) bool {
if len(r.buf) >= n {
return true
}
if n > r.maxBufSize { if n > r.maxBufSize {
r.err = ErrCorrupt r.err = ErrCorrupt
return false return false
} }
if cap(r.buf) >= n {
return true
}
// Realloc buffer. // Realloc buffer.
r.buf = make([]byte, n) r.buf = make([]byte, n)
return true return true
@ -220,6 +231,7 @@ func (r *Reader) Reset(reader io.Reader) {
r.err = nil r.err = nil
r.i = 0 r.i = 0
r.j = 0 r.j = 0
r.blockStart = 0
r.readHeader = r.ignoreStreamID r.readHeader = r.ignoreStreamID
} }
@ -344,7 +356,7 @@ func (r *Reader) Read(p []byte) (int, error) {
r.err = err r.err = err
return 0, r.err return 0, r.err
} }
if crc(r.decoded[:n]) != checksum { if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
r.err = ErrCRC r.err = ErrCRC
return 0, r.err return 0, r.err
} }
@ -385,7 +397,7 @@ func (r *Reader) Read(p []byte) (int, error) {
if !r.readFull(r.decoded[:n], false) { if !r.readFull(r.decoded[:n], false) {
return 0, r.err return 0, r.err
} }
if crc(r.decoded[:n]) != checksum { if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
r.err = ErrCRC r.err = ErrCRC
return 0, r.err return 0, r.err
} }
@ -435,6 +447,259 @@ func (r *Reader) Read(p []byte) (int, error) {
} }
} }
// DecodeConcurrent will decode the full stream to w.
// This function should not be combined with reading, seeking or other operations.
// Up to 'concurrent' goroutines will be used.
// If <= 0, runtime.NumCPU will be used.
// On success the number of bytes decompressed nil and is returned.
// This is mainly intended for bigger streams.
func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, err error) {
if r.i > 0 || r.j > 0 || r.blockStart > 0 {
return 0, errors.New("DecodeConcurrent called after ")
}
if concurrent <= 0 {
concurrent = runtime.NumCPU()
}
// Write to output
var errMu sync.Mutex
var aErr error
setErr := func(e error) (ok bool) {
errMu.Lock()
defer errMu.Unlock()
if e == nil {
return aErr == nil
}
if aErr == nil {
aErr = e
}
return false
}
hasErr := func() (ok bool) {
errMu.Lock()
v := aErr != nil
errMu.Unlock()
return v
}
var aWritten int64
toRead := make(chan []byte, concurrent)
writtenBlocks := make(chan []byte, concurrent)
queue := make(chan chan []byte, concurrent)
reUse := make(chan chan []byte, concurrent)
for i := 0; i < concurrent; i++ {
toRead <- make([]byte, 0, r.maxBufSize)
writtenBlocks <- make([]byte, 0, r.maxBufSize)
reUse <- make(chan []byte, 1)
}
// Writer
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for toWrite := range queue {
entry := <-toWrite
reUse <- toWrite
if hasErr() {
writtenBlocks <- entry
continue
}
n, err := w.Write(entry)
want := len(entry)
writtenBlocks <- entry
if err != nil {
setErr(err)
continue
}
if n != want {
setErr(io.ErrShortWrite)
continue
}
aWritten += int64(n)
}
}()
// Reader
defer func() {
close(queue)
if r.err != nil {
err = r.err
setErr(r.err)
}
wg.Wait()
if err == nil {
err = aErr
}
written = aWritten
}()
for !hasErr() {
if !r.readFull(r.buf[:4], true) {
if r.err == io.EOF {
r.err = nil
}
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
r.blockStart += int64(r.j)
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
if chunkLen > r.maxBufSize {
r.err = ErrCorrupt
return 0, r.err
}
orgBuf := <-toRead
buf := orgBuf[:chunkLen]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if r.snappyFrame && n > maxSnappyBlockSize {
r.err = ErrCorrupt
return 0, r.err
}
if n > r.maxBlock {
r.err = ErrCorrupt
return 0, r.err
}
wg.Add(1)
decoded := <-writtenBlocks
entry := <-reUse
queue <- entry
go func() {
defer wg.Done()
decoded = decoded[:n]
_, err := Decode(decoded, buf)
toRead <- orgBuf
if err != nil {
writtenBlocks <- decoded
setErr(err)
return
}
if !r.ignoreCRC && crc(decoded) != checksum {
writtenBlocks <- decoded
setErr(ErrCRC)
return
}
entry <- decoded
}()
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
if chunkLen > r.maxBufSize {
r.err = ErrCorrupt
return 0, r.err
}
// Grab write buffer
orgBuf := <-writtenBlocks
buf := orgBuf[:checksumSize]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read content.
n := chunkLen - checksumSize
if r.snappyFrame && n > maxSnappyBlockSize {
r.err = ErrCorrupt
return 0, r.err
}
if n > r.maxBlock {
r.err = ErrCorrupt
return 0, r.err
}
// Read uncompressed
buf = orgBuf[:n]
if !r.readFull(buf, false) {
return 0, r.err
}
if !r.ignoreCRC && crc(buf) != checksum {
r.err = ErrCRC
return 0, r.err
}
entry := <-reUse
queue <- entry
entry <- buf
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
return 0, r.err
}
if string(r.buf[:len(magicBody)]) != magicBody {
if string(r.buf[:len(magicBody)]) != magicBodySnappy {
r.err = ErrCorrupt
return 0, r.err
} else {
r.snappyFrame = true
}
} else {
r.snappyFrame = false
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
// fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
r.err = ErrUnsupported
return 0, r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if chunkLen > maxChunkSize {
// fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
r.err = ErrUnsupported
return 0, r.err
}
// fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
if !r.skippable(r.buf, chunkLen, false, chunkType) {
return 0, r.err
}
}
return 0, r.err
}
// Skip will skip n bytes forward in the decompressed output. // Skip will skip n bytes forward in the decompressed output.
// For larger skips this consumes less CPU and is faster than reading output and discarding it. // For larger skips this consumes less CPU and is faster than reading output and discarding it.
// CRC is not checked on skipped blocks. // CRC is not checked on skipped blocks.
@ -699,8 +964,16 @@ func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
case io.SeekCurrent: case io.SeekCurrent:
offset += r.blockStart + int64(r.i) offset += r.blockStart + int64(r.i)
case io.SeekEnd: case io.SeekEnd:
offset = -offset if offset > 0 {
return 0, errors.New("seek after end of file")
} }
offset = r.index.TotalUncompressed + offset
}
if offset < 0 {
return 0, errors.New("seek before start of file")
}
c, u, err := r.index.Find(offset) c, u, err := r.index.Find(offset)
if err != nil { if err != nil {
return r.blockStart + int64(r.i), err return r.blockStart + int64(r.i), err
@ -712,10 +985,6 @@ func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
return 0, err return 0, err
} }
if offset < 0 {
offset = r.index.TotalUncompressed + offset
}
r.i = r.j // Remove rest of current block. r.i = r.j // Remove rest of current block.
if u < offset { if u < offset {
// Forward inside block // Forward inside block

View File

@ -1119,12 +1119,6 @@ func (w *Writer) closeIndex(idx bool) ([]byte, error) {
if w.appendIndex { if w.appendIndex {
w.written += int64(len(index)) w.written += int64(len(index))
} }
if true {
_, err := w.index.Load(index)
if err != nil {
panic(err)
}
}
} }
if w.pad > 1 { if w.pad > 1 {

View File

@ -370,7 +370,7 @@ func encodeBlockBestSnappy(dst, src []byte) (d int) {
} }
offset := m.s - m.offset offset := m.s - m.offset
return score - emitCopySize(offset, m.length) return score - emitCopyNoRepeatSize(offset, m.length)
} }
matchAt := func(offset, s int, first uint32) match { matchAt := func(offset, s int, first uint32) match {
@ -567,6 +567,10 @@ func emitCopySize(offset, length int) int {
// Offset no more than 2 bytes. // Offset no more than 2 bytes.
if length > 64 { if length > 64 {
if offset < 2048 {
// Emit 8 bytes, then rest as repeats...
return 2 + emitRepeatSize(offset, length-8)
}
// Emit remaining as repeats, at least 4 bytes remain. // Emit remaining as repeats, at least 4 bytes remain.
return 3 + emitRepeatSize(offset, length-60) return 3 + emitRepeatSize(offset, length-60)
} }
@ -577,6 +581,28 @@ func emitCopySize(offset, length int) int {
return 2 return 2
} }
// emitCopyNoRepeatSize returns the size to encode the offset+length
//
// It assumes that:
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24
func emitCopyNoRepeatSize(offset, length int) int {
if offset >= 65536 {
return 5 + 5*(length/64)
}
// Offset no more than 2 bytes.
if length > 64 {
// Emit remaining as repeats, at least 4 bytes remain.
return 3 + 3*(length/60)
}
if length >= 12 || offset >= 2048 {
return 3
}
// Emit the remaining copy, encoded as 2 bytes.
return 2
}
// emitRepeatSize returns the number of bytes required to encode a repeat. // emitRepeatSize returns the number of bytes required to encode a repeat.
// Length must be at least 4 and < 1<<24 // Length must be at least 4 and < 1<<24
func emitRepeatSize(offset, length int) int { func emitRepeatSize(offset, length int) int {

View File

@ -180,14 +180,23 @@ func emitCopy(dst []byte, offset, length int) int {
// Offset no more than 2 bytes. // Offset no more than 2 bytes.
if length > 64 { if length > 64 {
off := 3
if offset < 2048 {
// emit 8 bytes as tagCopy1, rest as repeats.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
length -= 8
off = 2
} else {
// Emit a length 60 copy, encoded as 3 bytes. // Emit a length 60 copy, encoded as 3 bytes.
// Emit remaining as repeat value (minimum 4 bytes). // Emit remaining as repeat value (minimum 4 bytes).
dst[2] = uint8(offset >> 8) dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset) dst[1] = uint8(offset)
dst[0] = 59<<2 | tagCopy2 dst[0] = 59<<2 | tagCopy2
length -= 60 length -= 60
}
// Emit remaining as repeats, at least 4 bytes remain. // Emit remaining as repeats, at least 4 bytes remain.
return 3 + emitRepeat(dst[3:], offset, length) return off + emitRepeat(dst[off:], offset, length)
} }
if length >= 12 || offset >= 2048 { if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes. // Emit the remaining copy, encoded as 3 bytes.

View File

@ -5,6 +5,8 @@
package s2 package s2
func _dummy_()
// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. // encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes. // Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written. // It assumes that the varint-encoded length of the decompressed bytes has already been written.

File diff suppressed because it is too large Load Diff

View File

@ -10,6 +10,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"sort"
) )
const ( const (
@ -100,6 +101,15 @@ func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err er
if offset > i.TotalUncompressed { if offset > i.TotalUncompressed {
return 0, 0, io.ErrUnexpectedEOF return 0, 0, io.ErrUnexpectedEOF
} }
if len(i.info) > 200 {
n := sort.Search(len(i.info), func(n int) bool {
return i.info[n].uncompressedOffset > offset
})
if n == 0 {
n = 1
}
return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil
}
for _, info := range i.info { for _, info := range i.info {
if info.uncompressedOffset > offset { if info.uncompressedOffset > offset {
break break

View File

@ -386,47 +386,31 @@ In practice this means that concurrency is often limited to utilizing about 3 co
### Benchmarks ### Benchmarks
These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
The first two are streaming decodes and the last are smaller inputs. The first two are streaming decodes and the last are smaller inputs.
Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used.
``` ```
BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op
BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op
BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op Concurrent blocks, performance:
BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op
Concurrent performance: BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op
``` ```
This reflects the performance around May 2020, but this may be out of date. This reflects the performance around May 2022, but this may be out of date.
## Zstd inside ZIP files ## Zstd inside ZIP files

View File

@ -63,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 {
return v return v
} }
func (b *bitReader) get16BitsFast(n uint8) uint16 {
const regMask = 64 - 1
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
b.bitsRead += n
return v
}
// fillFast() will make sure at least 32 bits are available. // fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available. // There must be at least 4 bytes available.
func (b *bitReader) fillFast() { func (b *bitReader) fillFast() {

View File

@ -5,8 +5,6 @@
package zstd package zstd
import "fmt"
// bitWriter will write bits. // bitWriter will write bits.
// First bit will be LSB of the first byte of output. // First bit will be LSB of the first byte of output.
type bitWriter struct { type bitWriter struct {
@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.nBits += bits b.nBits += bits
} }
// flush will flush all pending full bytes.
// There will be at least 56 bits available for writing when this has been called.
// Using flush32 is faster, but leaves less space for writing.
func (b *bitWriter) flush() {
v := b.nBits >> 3
switch v {
case 0:
case 1:
b.out = append(b.out,
byte(b.bitContainer),
)
case 2:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
)
case 3:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
)
case 4:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
)
case 5:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
)
case 6:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
)
case 7:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
)
case 8:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
byte(b.bitContainer>>56),
)
default:
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
}
b.bitContainer >>= v << 3
b.nBits &= 7
}
// flush32 will flush out, so there are at least 32 bits available for writing. // flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() { func (b *bitWriter) flush32() {
if b.nBits < 32 { if b.nBits < 32 {

View File

@ -5,9 +5,14 @@
package zstd package zstd
import ( import (
"bytes"
"encoding/binary"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os"
"path/filepath"
"sync" "sync"
"github.com/klauspost/compress/huff0" "github.com/klauspost/compress/huff0"
@ -38,12 +43,12 @@ const (
// maxCompressedBlockSize is the biggest allowed compressed block size (128KB) // maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
maxCompressedBlockSize = 128 << 10 maxCompressedBlockSize = 128 << 10
compressedBlockOverAlloc = 16
maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
// Maximum possible block size (all Raw+Uncompressed). // Maximum possible block size (all Raw+Uncompressed).
maxBlockSize = (1 << 21) - 1 maxBlockSize = (1 << 21) - 1
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
maxCompressedLiteralSize = 1 << 18
maxRLELiteralSize = 1 << 20
maxMatchLen = 131074 maxMatchLen = 131074
maxSequences = 0x7f00 + 0xffff maxSequences = 0x7f00 + 0xffff
@ -97,7 +102,6 @@ type blockDec struct {
// Block is RLE, this is the size. // Block is RLE, this is the size.
RLESize uint32 RLESize uint32
tmp [4]byte
Type blockType Type blockType
@ -136,7 +140,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
b.Type = blockType((bh >> 1) & 3) b.Type = blockType((bh >> 1) & 3)
// find size. // find size.
cSize := int(bh >> 3) cSize := int(bh >> 3)
maxSize := maxBlockSize maxSize := maxCompressedBlockSizeAlloc
switch b.Type { switch b.Type {
case blockTypeReserved: case blockTypeReserved:
return ErrReservedBlockType return ErrReservedBlockType
@ -157,9 +161,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
println("Data size on stream:", cSize) println("Data size on stream:", cSize)
} }
b.RLESize = 0 b.RLESize = 0
maxSize = maxCompressedBlockSize maxSize = maxCompressedBlockSizeAlloc
if windowSize < maxCompressedBlockSize && b.lowMem { if windowSize < maxCompressedBlockSize && b.lowMem {
maxSize = int(windowSize) maxSize = int(windowSize) + compressedBlockOverAlloc
} }
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
if debugDecoder { if debugDecoder {
@ -190,9 +194,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
// Read block data. // Read block data.
if cap(b.dataStorage) < cSize { if cap(b.dataStorage) < cSize {
if b.lowMem || cSize > maxCompressedBlockSize { if b.lowMem || cSize > maxCompressedBlockSize {
b.dataStorage = make([]byte, 0, cSize) b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
} else { } else {
b.dataStorage = make([]byte, 0, maxCompressedBlockSize) b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
} }
} }
if cap(b.dst) <= maxSize { if cap(b.dst) <= maxSize {
@ -360,14 +364,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
} }
if cap(b.literalBuf) < litRegenSize { if cap(b.literalBuf) < litRegenSize {
if b.lowMem { if b.lowMem {
b.literalBuf = make([]byte, litRegenSize) b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
} else { } else {
if litRegenSize > maxCompressedLiteralSize { b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
// Exceptional
b.literalBuf = make([]byte, litRegenSize)
} else {
b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
}
} }
} }
literals = b.literalBuf[:litRegenSize] literals = b.literalBuf[:litRegenSize]
@ -397,14 +396,14 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
// Ensure we have space to store it. // Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize { if cap(b.literalBuf) < litRegenSize {
if b.lowMem { if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize) b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else { } else {
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
} }
} }
var err error var err error
// Use our out buffer. // Use our out buffer.
huff.MaxDecodedSize = maxCompressedBlockSize huff.MaxDecodedSize = litRegenSize
if fourStreams { if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else { } else {
@ -429,9 +428,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
// Ensure we have space to store it. // Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize { if cap(b.literalBuf) < litRegenSize {
if b.lowMem { if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize) b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else { } else {
b.literalBuf = make([]byte, 0, maxCompressedBlockSize) b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
} }
} }
huff := hist.huffTree huff := hist.huffTree
@ -448,7 +447,7 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
return in, err return in, err
} }
hist.huffTree = huff hist.huffTree = huff
huff.MaxDecodedSize = maxCompressedBlockSize huff.MaxDecodedSize = litRegenSize
// Use our out buffer. // Use our out buffer.
if fourStreams { if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
@ -463,6 +462,8 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
if len(literals) != litRegenSize { if len(literals) != litRegenSize {
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
} }
// Re-cap to get extra size.
literals = b.literalBuf[:len(literals)]
if debugDecoder { if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
} }
@ -486,10 +487,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
b.dst = append(b.dst, hist.decoders.literals...) b.dst = append(b.dst, hist.decoders.literals...)
return nil return nil
} }
err = hist.decoders.decodeSync(hist) before := len(hist.decoders.out)
err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
if err != nil { if err != nil {
return err return err
} }
if hist.decoders.maxSyncLen > 0 {
hist.decoders.maxSyncLen += uint64(before)
hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
}
b.dst = hist.decoders.out b.dst = hist.decoders.out
hist.recentOffsets = hist.decoders.prevOffset hist.recentOffsets = hist.decoders.prevOffset
return nil return nil
@ -632,6 +638,22 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
println("initializing sequences:", err) println("initializing sequences:", err)
return err return err
} }
// Extract blocks...
if false && hist.dict == nil {
fatalErr := func(err error) {
if err != nil {
panic(err)
}
}
fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
var buf bytes.Buffer
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
buf.Write(in)
ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
}
return nil return nil
} }
@ -650,6 +672,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
} }
hist.decoders.windowSize = hist.windowSize hist.decoders.windowSize = hist.windowSize
hist.decoders.prevOffset = hist.recentOffsets hist.decoders.prevOffset = hist.recentOffsets
err := hist.decoders.decode(b.sequence) err := hist.decoders.decode(b.sequence)
hist.recentOffsets = hist.decoders.prevOffset hist.recentOffsets = hist.decoders.prevOffset
return err return err

View File

@ -52,10 +52,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
return r, nil return r, nil
} }
func (b *byteBuf) remain() []byte {
return *b
}
func (b *byteBuf) readByte() (byte, error) { func (b *byteBuf) readByte() (byte, error) {
bb := *b bb := *b
if len(bb) < 1 { if len(bb) < 1 {

View File

@ -13,12 +13,6 @@ type byteReader struct {
off int off int
} }
// init will initialize the reader and set the input.
func (b *byteReader) init(in []byte) {
b.b = in
b.off = 0
}
// advance the stream b n bytes. // advance the stream b n bytes.
func (b *byteReader) advance(n uint) { func (b *byteReader) advance(n uint) {
b.off += int(n) b.off += int(n)

View File

@ -347,18 +347,20 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
} }
frame.history.setDict(&dict) frame.history.setDict(&dict)
} }
if frame.WindowSize > d.o.maxWindowSize {
if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { return dst, ErrWindowSizeExceeded
}
if frame.FrameContentSize != fcsUnknown {
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
return dst, ErrDecoderSizeExceeded return dst, ErrDecoderSizeExceeded
} }
if frame.FrameContentSize < 1<<30 {
// Never preallocate more than 1 GB up front.
if cap(dst)-len(dst) < int(frame.FrameContentSize) { if cap(dst)-len(dst) < int(frame.FrameContentSize) {
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
copy(dst2, dst) copy(dst2, dst)
dst = dst2 dst = dst2
} }
} }
if cap(dst) == 0 { if cap(dst) == 0 {
// Allocate len(input) * 2 by default if nothing is provided // Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size. // and we didn't get frame content size.
@ -437,7 +439,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
} }
if len(next.b) > 0 { if !d.o.ignoreChecksum && len(next.b) > 0 {
n, err := d.current.crc.Write(next.b) n, err := d.current.crc.Write(next.b)
if err == nil { if err == nil {
if n != len(next.b) { if n != len(next.b) {
@ -449,7 +451,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
got := d.current.crc.Sum64() got := d.current.crc.Sum64()
var tmp [4]byte var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(got)) binary.LittleEndian.PutUint32(tmp[:], uint32(got))
if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC { if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
if debugDecoder { if debugDecoder {
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
} }
@ -533,9 +535,15 @@ func (d *Decoder) nextBlockSync() (ok bool) {
// Update/Check CRC // Update/Check CRC
if d.frame.HasCheckSum { if d.frame.HasCheckSum {
if !d.o.ignoreChecksum {
d.frame.crc.Write(d.current.b) d.frame.crc.Write(d.current.b)
}
if d.current.d.Last { if d.current.d.Last {
if !d.o.ignoreChecksum {
d.current.err = d.frame.checkCRC() d.current.err = d.frame.checkCRC()
} else {
d.current.err = d.frame.consumeCRC()
}
if d.current.err != nil { if d.current.err != nil {
println("CRC error:", d.current.err) println("CRC error:", d.current.err)
return false return false
@ -629,60 +637,18 @@ func (d *Decoder) startSyncDecoder(r io.Reader) error {
// Create Decoder: // Create Decoder:
// ASYNC: // ASYNC:
// Spawn 4 go routines. // Spawn 3 go routines.
// 0: Read frames and decode blocks. // 0: Read frames and decode block literals.
// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree. // 1: Decode sequences.
// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets. // 2: Execute sequences, send to output.
// 3: Wait for stream history, execute sequences, send stream history.
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
defer d.streamWg.Done() defer d.streamWg.Done()
br := readerWrapper{r: r} br := readerWrapper{r: r}
var seqPrepare = make(chan *blockDec, d.o.concurrent)
var seqDecode = make(chan *blockDec, d.o.concurrent) var seqDecode = make(chan *blockDec, d.o.concurrent)
var seqExecute = make(chan *blockDec, d.o.concurrent) var seqExecute = make(chan *blockDec, d.o.concurrent)
// Async 1: Prepare blocks... // Async 1: Decode sequences...
go func() {
var hist history
var hasErr bool
for block := range seqPrepare {
if hasErr {
if block != nil {
seqDecode <- block
}
continue
}
if block.async.newHist != nil {
if debugDecoder {
println("Async 1: new history")
}
hist.reset()
if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict)
}
}
if block.err != nil || block.Type != blockTypeCompressed {
hasErr = block.err != nil
seqDecode <- block
continue
}
remain, err := block.decodeLiterals(block.data, &hist)
block.err = err
hasErr = block.err != nil
if err == nil {
block.async.literals = hist.decoders.literals
block.async.seqData = remain
} else if debugDecoder {
println("decodeLiterals error:", err)
}
seqDecode <- block
}
close(seqDecode)
}()
// Async 2: Decode sequences...
go func() { go func() {
var hist history var hist history
var hasErr bool var hasErr bool
@ -696,7 +662,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
} }
if block.async.newHist != nil { if block.async.newHist != nil {
if debugDecoder { if debugDecoder {
println("Async 2: new history, recent:", block.async.newHist.recentOffsets) println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
} }
hist.decoders = block.async.newHist.decoders hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets hist.recentOffsets = block.async.newHist.recentOffsets
@ -750,7 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
} }
if block.async.newHist != nil { if block.async.newHist != nil {
if debugDecoder { if debugDecoder {
println("Async 3: new history") println("Async 2: new history")
} }
hist.windowSize = block.async.newHist.windowSize hist.windowSize = block.async.newHist.windowSize
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
@ -837,6 +803,33 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
decodeStream: decodeStream:
for { for {
var hist history
var hasErr bool
decodeBlock := func(block *blockDec) {
if hasErr {
if block != nil {
seqDecode <- block
}
return
}
if block.err != nil || block.Type != blockTypeCompressed {
hasErr = block.err != nil
seqDecode <- block
return
}
remain, err := block.decodeLiterals(block.data, &hist)
block.err = err
hasErr = block.err != nil
if err == nil {
block.async.literals = hist.decoders.literals
block.async.seqData = remain
} else if debugDecoder {
println("decodeLiterals error:", err)
}
seqDecode <- block
}
frame := d.frame frame := d.frame
if debugDecoder { if debugDecoder {
println("New frame...") println("New frame...")
@ -863,7 +856,7 @@ decodeStream:
case <-ctx.Done(): case <-ctx.Done():
case dec := <-d.decoders: case dec := <-d.decoders:
dec.sendErr(err) dec.sendErr(err)
seqPrepare <- dec decodeBlock(dec)
} }
break decodeStream break decodeStream
} }
@ -883,6 +876,10 @@ decodeStream:
if debugDecoder { if debugDecoder {
println("Alloc History:", h.allocFrameBuffer) println("Alloc History:", h.allocFrameBuffer)
} }
hist.reset()
if h.dict != nil {
hist.setDict(h.dict)
}
dec.async.newHist = &h dec.async.newHist = &h
dec.async.fcs = frame.FrameContentSize dec.async.fcs = frame.FrameContentSize
historySent = true historySent = true
@ -909,7 +906,7 @@ decodeStream:
} }
err = dec.err err = dec.err
last := dec.Last last := dec.Last
seqPrepare <- dec decodeBlock(dec)
if err != nil { if err != nil {
break decodeStream break decodeStream
} }
@ -918,7 +915,7 @@ decodeStream:
} }
} }
} }
close(seqPrepare) close(seqDecode)
wg.Wait() wg.Wait()
d.frame.history.b = frameHistCache d.frame.history.b = frameHistCache
} }

View File

@ -19,6 +19,7 @@ type decoderOptions struct {
maxDecodedSize uint64 maxDecodedSize uint64
maxWindowSize uint64 maxWindowSize uint64
dicts []dict dicts []dict
ignoreChecksum bool
} }
func (o *decoderOptions) setDefault() { func (o *decoderOptions) setDefault() {
@ -31,7 +32,7 @@ func (o *decoderOptions) setDefault() {
if o.concurrent > 4 { if o.concurrent > 4 {
o.concurrent = 4 o.concurrent = 4
} }
o.maxDecodedSize = 1 << 63 o.maxDecodedSize = 64 << 30
} }
// WithDecoderLowmem will set whether to use a lower amount of memory, // WithDecoderLowmem will set whether to use a lower amount of memory,
@ -66,7 +67,7 @@ func WithDecoderConcurrency(n int) DOption {
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
// non-streaming operations or maximum window size for streaming operations. // non-streaming operations or maximum window size for streaming operations.
// This can be used to control memory usage of potentially hostile content. // This can be used to control memory usage of potentially hostile content.
// Maximum and default is 1 << 63 bytes. // Maximum is 1 << 63 bytes. Default is 64GiB.
func WithDecoderMaxMemory(n uint64) DOption { func WithDecoderMaxMemory(n uint64) DOption {
return func(o *decoderOptions) error { return func(o *decoderOptions) error {
if n == 0 { if n == 0 {
@ -112,3 +113,11 @@ func WithDecoderMaxWindow(size uint64) DOption {
return nil return nil
} }
} }
// IgnoreChecksum allows to forcibly ignore checksum checking.
func IgnoreChecksum(b bool) DOption {
return func(o *decoderOptions) error {
o.ignoreChecksum = b
return nil
}
}

View File

@ -156,8 +156,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -518,8 +518,8 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match. // We have at least 4 byte match.
// No need to check backwards. We come straight from a match // No need to check backwards. We come straight from a match
@ -674,8 +674,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -1047,8 +1047,8 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match. // We have at least 4 byte match.
// No need to check backwards. We come straight from a match // No need to check backwards. We come straight from a match

View File

@ -127,8 +127,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -439,8 +439,8 @@ encodeLoop:
var t int32 var t int32
for { for {
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -785,8 +785,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -969,7 +969,7 @@ encodeLoop:
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen) longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
e.longTable[longHash1] = te0 e.longTable[longHash1] = te0
e.longTable[longHash2] = te1 e.longTable[longHash2] = te1
e.markLongShardDirty(longHash1) e.markLongShardDirty(longHash1)
@ -1002,8 +1002,8 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
// We have at least 4 byte match. // We have at least 4 byte match.
// No need to check backwards. We come straight from a match // No need to check backwards. We come straight from a match

View File

@ -551,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
} }
// If we can do everything in one block, prefer that. // If we can do everything in one block, prefer that.
if len(src) <= maxCompressedBlockSize { if len(src) <= e.o.blockSize {
enc.Reset(e.o.dict, true) enc.Reset(e.o.dict, true)
// Slightly faster with no history and everything in one block. // Slightly faster with no history and everything in one block.
if e.o.crc { if e.o.crc {

View File

@ -253,10 +253,11 @@ func (d *frameDec) reset(br byteBuffer) error {
return ErrWindowSizeTooSmall return ErrWindowSizeTooSmall
} }
d.history.windowSize = int(d.WindowSize) d.history.windowSize = int(d.WindowSize)
if d.o.lowMem && d.history.windowSize < maxBlockSize { if !d.o.lowMem || d.history.windowSize < maxBlockSize {
// Alloc 2x window size if not low-mem, or very small window size.
d.history.allocFrameBuffer = d.history.windowSize * 2 d.history.allocFrameBuffer = d.history.windowSize * 2
// TODO: Maybe use FrameContent size
} else { } else {
// Alloc with one additional block
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
} }
@ -290,13 +291,6 @@ func (d *frameDec) checkCRC() error {
if !d.HasCheckSum { if !d.HasCheckSum {
return nil return nil
} }
var tmp [4]byte
got := d.crc.Sum64()
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
// We can overwrite upper tmp now // We can overwrite upper tmp now
want, err := d.rawInput.readSmall(4) want, err := d.rawInput.readSmall(4)
@ -305,7 +299,19 @@ func (d *frameDec) checkCRC() error {
return err return err
} }
if !bytes.Equal(tmp[:], want) && !ignoreCRC { if d.o.ignoreChecksum {
return nil
}
var tmp [4]byte
got := d.crc.Sum64()
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
if !bytes.Equal(tmp[:], want) {
if debugDecoder { if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want) println("CRC Check Failed:", tmp[:], "!=", want)
} }
@ -317,6 +323,19 @@ func (d *frameDec) checkCRC() error {
return nil return nil
} }
// consumeCRC reads the checksum data if the frame has one.
func (d *frameDec) consumeCRC() error {
if d.HasCheckSum {
_, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
return err
}
}
return nil
}
// runDecoder will create a sync decoder that will decode a block of data. // runDecoder will create a sync decoder that will decode a block of data.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b saved := d.history.b
@ -326,6 +345,19 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
d.history.ignoreBuffer = len(dst) d.history.ignoreBuffer = len(dst)
// Store input length, so we only check new data. // Store input length, so we only check new data.
crcStart := len(dst) crcStart := len(dst)
d.history.decoders.maxSyncLen = 0
if d.FrameContentSize != fcsUnknown {
d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
return dst, ErrDecoderSizeExceeded
}
if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
// Alloc for output
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
copy(dst2, dst)
dst = dst2
}
}
var err error var err error
for { for {
err = dec.reset(d.rawInput, d.WindowSize) err = dec.reset(d.rawInput, d.WindowSize)
@ -360,6 +392,9 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
err = ErrFrameSizeMismatch err = ErrFrameSizeMismatch
} else if d.HasCheckSum { } else if d.HasCheckSum {
if d.o.ignoreChecksum {
err = d.consumeCRC()
} else {
var n int var n int
n, err = d.crc.Write(dst[crcStart:]) n, err = d.crc.Write(dst[crcStart:])
if err == nil { if err == nil {
@ -371,6 +406,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
} }
} }
} }
}
d.history.b = saved d.history.b = saved
return dst, err return dst, err
} }

View File

@ -5,8 +5,10 @@
package zstd package zstd
import ( import (
"encoding/binary"
"errors" "errors"
"fmt" "fmt"
"io"
) )
const ( const (
@ -182,6 +184,29 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
return s.buildDtable() return s.buildDtable()
} }
func (s *fseDecoder) mustReadFrom(r io.Reader) {
fatalErr := func(err error) {
if err != nil {
panic(err)
}
}
// dt [maxTablesize]decSymbol // Decompression table.
// symbolLen uint16 // Length of active part of the symbol table.
// actualTableLog uint8 // Selected tablelog.
// maxBits uint8 // Maximum number of additional bits
// // used for table creation to avoid allocations.
// stateTable [256]uint16
// norm [maxSymbolValue + 1]int16
// preDefined bool
fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
}
// decSymbol contains information about a state entry, // decSymbol contains information about a state entry,
// Including the state offset base, the output symbol and // Including the state offset base, the output symbol and
// the number of bits to read for the low part of the destination state. // the number of bits to read for the low part of the destination state.
@ -204,18 +229,10 @@ func (d decSymbol) newState() uint16 {
return uint16(d >> 16) return uint16(d >> 16)
} }
func (d decSymbol) baseline() uint32 {
return uint32(d >> 32)
}
func (d decSymbol) baselineInt() int { func (d decSymbol) baselineInt() int {
return int(d >> 32) return int(d >> 32)
} }
func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
*d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
}
func (d *decSymbol) setNBits(nBits uint8) { func (d *decSymbol) setNBits(nBits uint8) {
const mask = 0xffffffffffffff00 const mask = 0xffffffffffffff00
*d = (*d & mask) | decSymbol(nBits) *d = (*d & mask) | decSymbol(nBits)
@ -231,11 +248,6 @@ func (d *decSymbol) setNewState(state uint16) {
*d = (*d & mask) | decSymbol(state)<<16 *d = (*d & mask) | decSymbol(state)<<16
} }
func (d *decSymbol) setBaseline(baseline uint32) {
const mask = 0xffffffff
*d = (*d & mask) | decSymbol(baseline)<<32
}
func (d *decSymbol) setExt(addBits uint8, baseline uint32) { func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
const mask = 0xffff00ff const mask = 0xffff00ff
*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
@ -352,34 +364,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
s.state = dt[br.getBits(tableLog)] s.state = dt[br.getBits(tableLog)]
} }
// next returns the current symbol and sets the next state.
// At least tablelog bits must be available in the bit reader.
func (s *fseState) next(br *bitReader) {
lowBits := uint16(br.getBits(s.state.nbBits()))
s.state = s.dt[s.state.newState()+lowBits]
}
// finished returns true if all bits have been read from the bitstream
// and the next state would require reading bits from the input.
func (s *fseState) finished(br *bitReader) bool {
return br.finished() && s.state.nbBits() > 0
}
// final returns the current state symbol without decoding the next.
func (s *fseState) final() (int, uint8) {
return s.state.baselineInt(), s.state.addBits()
}
// final returns the current state symbol without decoding the next. // final returns the current state symbol without decoding the next.
func (s decSymbol) final() (int, uint8) { func (s decSymbol) final() (int, uint8) {
return s.baselineInt(), s.addBits() return s.baselineInt(), s.addBits()
} }
// nextFast returns the next symbol and sets the next state.
// This can only be used if no symbols are 0 bits.
// At least tablelog bits must be available in the bit reader.
func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
lowBits := br.get16BitsFast(s.state.nbBits())
s.state = s.dt[s.state.newState()+lowBits]
return s.state.baseline(), s.state.addBits()
}

View File

@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
s.clearCount = maxCount != 0 s.clearCount = maxCount != 0
} }
// prepare will prepare and allocate scratch tables used for both compression and decompression.
func (s *fseEncoder) prepare() (*fseEncoder, error) {
if s == nil {
s = &fseEncoder{}
}
s.useRLE = false
if s.clearCount && s.maxCount == 0 {
for i := range s.count {
s.count[i] = 0
}
s.clearCount = false
}
return s, nil
}
// allocCtable will allocate tables needed for compression. // allocCtable will allocate tables needed for compression.
// If existing tables a re big enough, they are simply re-used. // If existing tables a re big enough, they are simply re-used.
func (s *fseEncoder) allocCtable() { func (s *fseEncoder) allocCtable() {
@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
c.state = c.stateTable[lu] c.state = c.stateTable[lu]
} }
// encode the output symbol provided and write it to the bitstream.
func (c *cState) encode(symbolTT symbolTransform) {
nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
c.bw.addBits16NC(c.state, uint8(nbBitsOut))
c.state = c.stateTable[dstState]
}
// flush will write the tablelog to the output and flush the remaining full bytes. // flush will write the tablelog to the output and flush the remaining full bytes.
func (c *cState) flush(tableLog uint8) { func (c *cState) flush(tableLog uint8) {
c.bw.flush32() c.bw.flush32()

View File

@ -1,11 +0,0 @@
//go:build ignorecrc
// +build ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// ignoreCRC can be used for fuzz testing to ignore CRC values...
const ignoreCRC = true

View File

@ -1,11 +0,0 @@
//go:build !ignorecrc
// +build !ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// ignoreCRC can be used for fuzz testing to ignore CRC values...
const ignoreCRC = false

View File

@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 {
return (uint32(u) * prime4bytes) >> (32 - length) return (uint32(u) * prime4bytes) >> (32 - length)
} }
} }
// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash3(u uint32, h uint8) uint32 {
return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
}

View File

@ -73,6 +73,7 @@ type sequenceDecs struct {
seqSize int seqSize int
windowSize int windowSize int
maxBits uint8 maxBits uint8
maxSyncLen uint64
} }
// initialize all 3 decoders from the stream input. // initialize all 3 decoders from the stream input.
@ -98,153 +99,13 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
return nil return nil
} }
// decode sequences from the stream with the provided history.
func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
s.seqSize = 0
litRemain := len(s.literals)
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
for i := range seqs {
var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
// Final will not read from stream.
var llB, mlB, moB uint8
ll, llB = llState.final()
ml, mlB = mlState.final()
mo, moB = ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fillFast()
}
ml += br.getBits(mlB)
ll += br.getBits(llB)
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
} else {
// mo = s.adjustOffset(mo, ll, moB)
// Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
mo++
}
if mo == 0 {
mo = s.prevOffset[0]
} else {
var temp int
if mo == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[mo]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("WARNING: temp was 0")
temp = 1
}
if mo != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
mo = temp
}
}
br.fillFast()
} else {
if br.overread() {
if debugDecoder {
printf("reading sequence %d, exceeded available data\n", i)
}
return io.ErrUnexpectedEOF
}
ll, mo, ml = s.next(br, llState, mlState, ofState)
br.fill()
}
if debugSequences {
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
}
// Evaluate.
// We might be doing this async, so do it early.
if mo == 0 && ml > 0 {
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
}
if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
s.seqSize += ll + ml
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
litRemain -= ll
if litRemain < 0 {
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
}
seqs[i] = seqVals{
ll: ll,
ml: ml,
mo: mo,
}
if i == len(seqs)-1 {
// This is the last sequence, so we shouldn't update state.
break
}
// Manually inlined, ~ 5-20% faster
// Update all 3 states at once. Approx 20% faster.
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
if nBits == 0 {
llState = llTable[llState.newState()&maxTableMask]
mlState = mlTable[mlState.newState()&maxTableMask]
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits >> (ofState.nbBits() & 31))
lowBits &= bitMask[mlState.nbBits()&15]
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
}
}
s.seqSize += litRemain
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
}
return err
}
// execute will execute the decoded sequence with the provided history. // execute will execute the decoded sequence with the provided history.
// The sequence must be evaluated before being sent. // The sequence must be evaluated before being sent.
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
if len(s.dict) == 0 {
return s.executeSimple(seqs, hist)
}
// Ensure we have enough output size... // Ensure we have enough output size...
if len(s.out)+s.seqSize > cap(s.out) { if len(s.out)+s.seqSize > cap(s.out) {
addBytes := s.seqSize + len(s.out) addBytes := s.seqSize + len(s.out)
@ -327,6 +188,7 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
} }
} }
} }
// Add final literals // Add final literals
copy(out[t:], s.literals) copy(out[t:], s.literals)
if debugDecoder { if debugDecoder {
@ -341,14 +203,18 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
} }
// decode sequences from the stream with the provided history. // decode sequences from the stream with the provided history.
func (s *sequenceDecs) decodeSync(history *history) error { func (s *sequenceDecs) decodeSync(hist []byte) error {
supported, err := s.decodeSyncSimple(hist)
if supported {
return err
}
br := s.br br := s.br
seqs := s.nSeqs seqs := s.nSeqs
startSize := len(s.out) startSize := len(s.out)
// Grab full sizes tables, to avoid bounds checks. // Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
hist := history.b[history.ignoreBuffer:]
out := s.out out := s.out
maxBlockSize := maxCompressedBlockSize maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize { if s.windowSize < maxBlockSize {
@ -433,7 +299,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
} }
size := ll + ml + len(out) size := ll + ml + len(out)
if size-startSize > maxBlockSize { if size-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize) return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
} }
if size > cap(out) { if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions // Not enough size, which can happen under high volume block streaming conditions
@ -463,13 +329,13 @@ func (s *sequenceDecs) decodeSync(history *history) error {
if mo > len(out)+len(hist) || mo > s.windowSize { if mo > len(out)+len(hist) || mo > s.windowSize {
if len(s.dict) == 0 { if len(s.dict) == 0 {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)) return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
} }
// we may be in dictionary. // we may be in dictionary.
dictO := len(s.dict) - (mo - (len(out) + len(hist))) dictO := len(s.dict) - (mo - (len(out) + len(hist)))
if dictO < 0 || dictO >= len(s.dict) { if dictO < 0 || dictO >= len(s.dict) {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)) return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
} }
end := dictO + ml end := dictO + ml
if end > len(s.dict) { if end > len(s.dict) {
@ -530,6 +396,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
ofState = ofTable[ofState.newState()&maxTableMask] ofState = ofTable[ofState.newState()&maxTableMask]
} else { } else {
bits := br.get32BitsFast(nBits) bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask] llState = llTable[(llState.newState()+lowBits)&maxTableMask]
@ -543,8 +410,8 @@ func (s *sequenceDecs) decodeSync(history *history) error {
} }
// Check if space for literals // Check if space for literals
if len(s.literals)+len(s.out)-startSize > maxBlockSize { if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize) return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
} }
// Add final literals // Add final literals
@ -552,16 +419,6 @@ func (s *sequenceDecs) decodeSync(history *history) error {
return br.close() return br.close()
} }
// update states, at least 27 bits must be available.
func (s *sequenceDecs) update(br *bitReader) {
// Max 8 bits
s.litLengths.state.next(br)
// Max 9 bits
s.matchLengths.state.next(br)
// Max 8 bits
s.offsets.state.next(br)
}
var bitMask [16]uint16 var bitMask [16]uint16
func init() { func init() {
@ -570,87 +427,6 @@ func init() {
} }
} }
// update states, at least 27 bits must be available.
func (s *sequenceDecs) updateAlt(br *bitReader) {
// Update all 3 states at once. Approx 20% faster.
a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
nBits := a.nbBits() + b.nbBits() + c.nbBits()
if nBits == 0 {
s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
s.offsets.state.state = s.offsets.state.dt[c.newState()]
return
}
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
lowBits = uint16(bits >> (c.nbBits() & 31))
lowBits &= bitMask[b.nbBits()&15]
s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
lowBits = uint16(bits) & bitMask[c.nbBits()&15]
s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
}
// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
ll, llB := llState.final()
ml, mlB := mlState.final()
mo, moB := ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fillFast()
}
ml += br.getBits(mlB)
ll += br.getBits(llB)
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
return
}
// mo = s.adjustOffset(mo, ll, moB)
// Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
mo++
}
if mo == 0 {
mo = s.prevOffset[0]
return
}
var temp int
if mo == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[mo]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("temp was 0")
temp = 1
}
if mo != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
mo = temp
return
}
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream. // Final will not read from stream.
ll, llB := llState.final() ll, llB := llState.final()

View File

@ -0,0 +1,362 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
package zstd
import (
"fmt"
"github.com/klauspost/compress/internal/cpuinfo"
)
type decodeSyncAsmContext struct {
llTable []decSymbol
mlTable []decSymbol
ofTable []decSymbol
llState uint64
mlState uint64
ofState uint64
iteration int
litRemain int
out []byte
outPosition int
literals []byte
litPosition int
history []byte
windowSize int
ll int // set on error (not for all errors, please refer to _generate/gen.go)
ml int // set on error (not for all errors, please refer to _generate/gen.go)
mo int // set on error (not for all errors, please refer to _generate/gen.go)
}
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
//go:noescape
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
//go:noescape
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
//go:noescape
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
//go:noescape
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// decode sequences from the stream with the provided history but without a dictionary.
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
if len(s.dict) > 0 {
return false, nil
}
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
return false, nil
}
useSafe := false
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
useSafe = true
}
if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
useSafe = true
}
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
useSafe = true
}
br := s.br
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
ctx := decodeSyncAsmContext{
llTable: s.litLengths.fse.dt[:maxTablesize],
mlTable: s.matchLengths.fse.dt[:maxTablesize],
ofTable: s.offsets.fse.dt[:maxTablesize],
llState: uint64(s.litLengths.state.state),
mlState: uint64(s.matchLengths.state.state),
ofState: uint64(s.offsets.state.state),
iteration: s.nSeqs - 1,
litRemain: len(s.literals),
out: s.out,
outPosition: len(s.out),
literals: s.literals,
windowSize: s.windowSize,
history: hist,
}
s.seqSize = 0
startSize := len(s.out)
var errCode int
if cpuinfo.HasBMI2() {
if useSafe {
errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx)
} else {
errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx)
}
} else {
if useSafe {
errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx)
} else {
errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx)
}
}
switch errCode {
case noError:
break
case errorMatchLenOfsMismatch:
return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml)
case errorMatchLenTooBig:
return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml)
case errorMatchOffTooBig:
return true, fmt.Errorf("match offset (%d) bigger than current history (%d)",
ctx.mo, ctx.outPosition+len(hist)-startSize)
case errorNotEnoughLiterals:
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
ctx.ll, ctx.litRemain+ctx.ll)
case errorNotEnoughSpace:
size := ctx.outPosition + ctx.ll + ctx.ml
if debugDecoder {
println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
}
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
default:
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
}
s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize {
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
return true, err
}
s.literals = s.literals[ctx.litPosition:]
t := ctx.outPosition
s.out = s.out[:t]
// Add final literals
s.out = append(s.out, s.literals...)
if debugDecoder {
t += len(s.literals)
if t != len(s.out) {
panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t))
}
}
return true, nil
}
// --------------------------------------------------------------------------------
type decodeAsmContext struct {
llTable []decSymbol
mlTable []decSymbol
ofTable []decSymbol
llState uint64
mlState uint64
ofState uint64
iteration int
seqs []seqVals
litRemain int
}
const noError = 0
// error reported when mo == 0 && ml > 0
const errorMatchLenOfsMismatch = 1
// error reported when ml > maxMatchLen
const errorMatchLenTooBig = 2
// error reported when mo > available history or mo > s.windowSize
const errorMatchOffTooBig = 3
// error reported when the sum of literal lengths exeeceds the literal buffer size
const errorNotEnoughLiterals = 4
// error reported when capacity of `out` is too small
const errorNotEnoughSpace = 5
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
//go:noescape
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
//go:noescape
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
//go:noescape
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
//go:noescape
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// decode sequences from the stream without the provided history.
func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
ctx := decodeAsmContext{
llTable: s.litLengths.fse.dt[:maxTablesize],
mlTable: s.matchLengths.fse.dt[:maxTablesize],
ofTable: s.offsets.fse.dt[:maxTablesize],
llState: uint64(s.litLengths.state.state),
mlState: uint64(s.matchLengths.state.state),
ofState: uint64(s.offsets.state.state),
seqs: seqs,
iteration: len(seqs) - 1,
litRemain: len(s.literals),
}
s.seqSize = 0
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
var errCode int
if cpuinfo.HasBMI2() {
if lte56bits {
errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx)
} else {
errCode = sequenceDecs_decode_bmi2(s, br, &ctx)
}
} else {
if lte56bits {
errCode = sequenceDecs_decode_56_amd64(s, br, &ctx)
} else {
errCode = sequenceDecs_decode_amd64(s, br, &ctx)
}
}
if errCode != 0 {
i := len(seqs) - ctx.iteration - 1
switch errCode {
case errorMatchLenOfsMismatch:
ml := ctx.seqs[i].ml
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
case errorMatchLenTooBig:
ml := ctx.seqs[i].ml
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
case errorNotEnoughLiterals:
ll := ctx.seqs[i].ll
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
}
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
}
if ctx.litRemain < 0 {
return fmt.Errorf("literal count is too big: total available %d, total requested %d",
len(s.literals), len(s.literals)-ctx.litRemain)
}
s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
}
return err
}
// --------------------------------------------------------------------------------
type executeAsmContext struct {
seqs []seqVals
seqIndex int
out []byte
history []byte
literals []byte
outPosition int
litPosition int
windowSize int
}
// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
//
// Returns false if a match offset is too big.
//
// Please refer to seqdec_generic.go for the reference implementation.
//go:noescape
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Same as above, but with safe memcopies
//go:noescape
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
// executeSimple handles cases when dictionary is not used.
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
// Ensure we have enough output size...
if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) {
addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc
s.out = append(s.out, make([]byte, addBytes)...)
s.out = s.out[:len(s.out)-addBytes]
}
if debugDecoder {
printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
}
var t = len(s.out)
out := s.out[:t+s.seqSize]
ctx := executeAsmContext{
seqs: seqs,
seqIndex: 0,
out: out,
history: hist,
outPosition: t,
litPosition: 0,
literals: s.literals,
windowSize: s.windowSize,
}
var ok bool
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
ok = sequenceDecs_executeSimple_safe_amd64(&ctx)
} else {
ok = sequenceDecs_executeSimple_amd64(&ctx)
}
if !ok {
return fmt.Errorf("match offset (%d) bigger than current history (%d)",
seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
}
s.literals = s.literals[ctx.litPosition:]
t = ctx.outPosition
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
t += len(s.literals)
if t != len(out) {
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
}
}
s.out = out
return nil
}

3689
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,237 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
package zstd
import (
"fmt"
"io"
)
// decode sequences from the stream with the provided history but without dictionary.
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
return false, nil
}
// decode sequences from the stream without the provided history.
func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
s.seqSize = 0
litRemain := len(s.literals)
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
for i := range seqs {
var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
// Final will not read from stream.
var llB, mlB, moB uint8
ll, llB = llState.final()
ml, mlB = mlState.final()
mo, moB = ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fillFast()
}
ml += br.getBits(mlB)
ll += br.getBits(llB)
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
} else {
// mo = s.adjustOffset(mo, ll, moB)
// Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
mo++
}
if mo == 0 {
mo = s.prevOffset[0]
} else {
var temp int
if mo == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[mo]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("WARNING: temp was 0")
temp = 1
}
if mo != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
mo = temp
}
}
br.fillFast()
} else {
if br.overread() {
if debugDecoder {
printf("reading sequence %d, exceeded available data\n", i)
}
return io.ErrUnexpectedEOF
}
ll, mo, ml = s.next(br, llState, mlState, ofState)
br.fill()
}
if debugSequences {
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
}
// Evaluate.
// We might be doing this async, so do it early.
if mo == 0 && ml > 0 {
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
}
if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
s.seqSize += ll + ml
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
litRemain -= ll
if litRemain < 0 {
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
}
seqs[i] = seqVals{
ll: ll,
ml: ml,
mo: mo,
}
if i == len(seqs)-1 {
// This is the last sequence, so we shouldn't update state.
break
}
// Manually inlined, ~ 5-20% faster
// Update all 3 states at once. Approx 20% faster.
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
if nBits == 0 {
llState = llTable[llState.newState()&maxTableMask]
mlState = mlTable[mlState.newState()&maxTableMask]
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits >> (ofState.nbBits() & 31))
lowBits &= bitMask[mlState.nbBits()&15]
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
}
}
s.seqSize += litRemain
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
}
return err
}
// executeSimple handles cases when a dictionary is not used.
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
// Ensure we have enough output size...
if len(s.out)+s.seqSize > cap(s.out) {
addBytes := s.seqSize + len(s.out)
s.out = append(s.out, make([]byte, addBytes)...)
s.out = s.out[:len(s.out)-addBytes]
}
if debugDecoder {
printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
}
var t = len(s.out)
out := s.out[:t+s.seqSize]
for _, seq := range seqs {
// Add literals
copy(out[t:], s.literals[:seq.ll])
t += seq.ll
s.literals = s.literals[seq.ll:]
// Malformed input
if seq.mo > t+len(hist) || seq.mo > s.windowSize {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
}
// Copy from history.
if v := seq.mo - t; v > 0 {
// v is the start position in history from end.
start := len(hist) - v
if seq.ml > v {
// Some goes into the current block.
// Copy remainder of history
copy(out[t:], hist[start:])
t += v
seq.ml -= v
} else {
copy(out[t:], hist[start:start+seq.ml])
t += seq.ml
continue
}
}
// We must be in the current buffer now
if seq.ml > 0 {
start := t - seq.mo
if seq.ml <= t-start {
// No overlap
copy(out[t:], out[start:start+seq.ml])
t += seq.ml
} else {
// Overlapping copy
// Extend destination slice and copy one byte at the time.
src := out[start : start+seq.ml]
dst := out[t:]
dst = dst[:len(src)]
t += len(src)
// Destination is the space we just added.
for i := range src {
dst[i] = src[i]
}
}
}
}
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
t += len(s.literals)
if t != len(out) {
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
}
}
s.out = out
return nil
}

View File

@ -18,25 +18,43 @@ const ZipMethodWinZip = 93
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT // See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
const ZipMethodPKWare = 20 const ZipMethodPKWare = 20
var zipReaderPool sync.Pool // zipReaderPool is the default reader pool.
var zipReaderPool = sync.Pool{New: func() interface{} {
z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
if err != nil {
panic(err)
}
return z
}}
// newZipReader creates a pooled zip decompressor. // newZipReader creates a pooled zip decompressor.
func newZipReader(r io.Reader) io.ReadCloser { func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
dec, ok := zipReaderPool.Get().(*Decoder) pool := &zipReaderPool
if len(opts) > 0 {
opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
// Force concurrency 1
opts = append(opts, WithDecoderConcurrency(1))
// Create our own pool
pool = &sync.Pool{}
}
return func(r io.Reader) io.ReadCloser {
dec, ok := pool.Get().(*Decoder)
if ok { if ok {
dec.Reset(r) dec.Reset(r)
} else { } else {
d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) d, err := NewReader(r, opts...)
if err != nil { if err != nil {
panic(err) panic(err)
} }
dec = d dec = d
} }
return &pooledZipReader{dec: dec} return &pooledZipReader{dec: dec, pool: pool}
}
} }
type pooledZipReader struct { type pooledZipReader struct {
mu sync.Mutex // guards Close and Read mu sync.Mutex // guards Close and Read
pool *sync.Pool
dec *Decoder dec *Decoder
} }
@ -48,8 +66,8 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
} }
dec, err := r.dec.Read(p) dec, err := r.dec.Read(p)
if err == io.EOF { if err == io.EOF {
err = r.dec.Reset(nil) r.dec.Reset(nil)
zipReaderPool.Put(r.dec) r.pool.Put(r.dec)
r.dec = nil r.dec = nil
} }
return dec, err return dec, err
@ -61,7 +79,7 @@ func (r *pooledZipReader) Close() error {
var err error var err error
if r.dec != nil { if r.dec != nil {
err = r.dec.Reset(nil) err = r.dec.Reset(nil)
zipReaderPool.Put(r.dec) r.pool.Put(r.dec)
r.dec = nil r.dec = nil
} }
return err return err
@ -115,6 +133,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries. // ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example. // See ZipCompressor for example.
func ZipDecompressor() func(r io.Reader) io.ReadCloser { // Options can be specified. WithDecoderConcurrency(1) is forced,
return newZipReader // and by default a 128MB maximum decompression window is specified.
// The window size can be overridden if required.
func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
return newZipReader(opts...)
} }

View File

@ -110,17 +110,6 @@ func printf(format string, a ...interface{}) {
} }
} }
// matchLenFast does matching, but will not match the last up to 7 bytes.
func matchLenFast(a, b []byte) int {
endI := len(a) & (math.MaxInt32 - 7)
for i := 0; i < endI; i += 8 {
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
return i + bits.TrailingZeros64(diff)>>3
}
}
return endI
}
// matchLen returns the maximum length. // matchLen returns the maximum length.
// a must be the shortest of the two. // a must be the shortest of the two.
// The function also returns whether all bytes matched. // The function also returns whether all bytes matched.

View File

@ -39,7 +39,7 @@ func main() {
// If you set debugging, it will log all requests to the console // If you set debugging, it will log all requests to the console
// Useful when encountering issues // Useful when encountering issues
// slack.New("YOUR_TOKEN_HERE", slack.OptionDebug(true)) // slack.New("YOUR_TOKEN_HERE", slack.OptionDebug(true))
groups, err := api.GetUserGroups(false) groups, err := api.GetUserGroups(slack.GetUserGroupsOptionIncludeUsers(false))
if err != nil { if err != nil {
fmt.Printf("%s\n", err) fmt.Printf("%s\n", err)
return return
@ -86,7 +86,13 @@ See https://github.com/slack-go/slack/blob/master/examples/websocket/websocket.g
See https://github.com/slack-go/slack/blob/master/examples/eventsapi/events.go See https://github.com/slack-go/slack/blob/master/examples/eventsapi/events.go
## Socketmode Event Handler (Experimental)
When using socket mode, dealing with an event can be pretty lengthy as it requires you to route the event to the right place.
Instead, you can use `SocketmodeHandler` much like you use an HTTP handler to register which event you would like to listen to and what callback function will process that event when it occurs.
See [./examples/socketmode_handler/socketmode_handler.go](./examples/socketmode_handler/socketmode_handler.go)
## Contributing ## Contributing
You are more than welcome to contribute to this project. Fork and You are more than welcome to contribute to this project. Fork and

View File

@ -19,11 +19,12 @@ func (s InputBlock) BlockType() MessageBlockType {
} }
// NewInputBlock returns a new instance of an input block // NewInputBlock returns a new instance of an input block
func NewInputBlock(blockID string, label *TextBlockObject, element BlockElement) *InputBlock { func NewInputBlock(blockID string, label, hint *TextBlockObject, element BlockElement) *InputBlock {
return &InputBlock{ return &InputBlock{
Type: MBTInput, Type: MBTInput,
BlockID: blockID, BlockID: blockID,
Label: label, Label: label,
Element: element, Element: element,
Hint: hint,
} }
} }

159
vendor/github.com/slack-go/slack/bookmarks.go generated vendored Normal file
View File

@ -0,0 +1,159 @@
package slack
import (
"context"
"net/url"
)
type Bookmark struct {
ID string `json:"id"`
ChannelID string `json:"channel_id"`
Title string `json:"title"`
Link string `json:"link"`
Emoji string `json:"emoji"`
IconURL string `json:"icon_url"`
Type string `json:"type"`
Created JSONTime `json:"date_created"`
Updated JSONTime `json:"date_updated"`
Rank string `json:"rank"`
LastUpdatedByUserID string `json:"last_updated_by_user_id"`
LastUpdatedByTeamID string `json:"last_updated_by_team_id"`
ShortcutID string `json:"shortcut_id"`
EntityID string `json:"entity_id"`
AppID string `json:"app_id"`
}
type AddBookmarkParameters struct {
Title string // A required title for the bookmark
Type string // A required type for the bookmark
Link string // URL required for type:link
Emoji string // An optional emoji
EntityID string
ParentID string
}
type EditBookmarkParameters struct {
Title *string // Change the title. Set to "" to clear
Emoji *string // Change the emoji. Set to "" to clear
Link string // Change the link
}
type addBookmarkResponse struct {
Bookmark Bookmark `json:"bookmark"`
SlackResponse
}
type editBookmarkResponse struct {
Bookmark Bookmark `json:"bookmark"`
SlackResponse
}
type listBookmarksResponse struct {
Bookmarks []Bookmark `json:"bookmarks"`
SlackResponse
}
// AddBookmark adds a bookmark in a channel
func (api *Client) AddBookmark(channelID string, params AddBookmarkParameters) (Bookmark, error) {
return api.AddBookmarkContext(context.Background(), channelID, params)
}
// AddBookmarkContext adds a bookmark in a channel with a custom context
func (api *Client) AddBookmarkContext(ctx context.Context, channelID string, params AddBookmarkParameters) (Bookmark, error) {
values := url.Values{
"channel_id": {channelID},
"token": {api.token},
"title": {params.Title},
"type": {params.Type},
}
if params.Link != "" {
values.Set("link", params.Link)
}
if params.Emoji != "" {
values.Set("emoji", params.Emoji)
}
if params.EntityID != "" {
values.Set("entity_id", params.EntityID)
}
if params.ParentID != "" {
values.Set("parent_id", params.ParentID)
}
response := &addBookmarkResponse{}
if err := api.postMethod(ctx, "bookmarks.add", values, response); err != nil {
return Bookmark{}, err
}
return response.Bookmark, response.Err()
}
// RemoveBookmark removes a bookmark from a channel
func (api *Client) RemoveBookmark(channelID, bookmarkID string) error {
return api.RemoveBookmarkContext(context.Background(), channelID, bookmarkID)
}
// RemoveBookmarkContext removes a bookmark from a channel with a custom context
func (api *Client) RemoveBookmarkContext(ctx context.Context, channelID, bookmarkID string) error {
values := url.Values{
"channel_id": {channelID},
"token": {api.token},
"bookmark_id": {bookmarkID},
}
response := &SlackResponse{}
if err := api.postMethod(ctx, "bookmarks.remove", values, response); err != nil {
return err
}
return response.Err()
}
// ListBookmarks returns all bookmarks for a channel.
func (api *Client) ListBookmarks(channelID string) ([]Bookmark, error) {
return api.ListBookmarksContext(context.Background(), channelID)
}
// ListBookmarksContext returns all bookmarks for a channel with a custom context.
func (api *Client) ListBookmarksContext(ctx context.Context, channelID string) ([]Bookmark, error) {
values := url.Values{
"channel_id": {channelID},
"token": {api.token},
}
response := &listBookmarksResponse{}
err := api.postMethod(ctx, "bookmarks.list", values, response)
if err != nil {
return nil, err
}
return response.Bookmarks, response.Err()
}
func (api *Client) EditBookmark(channelID, bookmarkID string, params EditBookmarkParameters) (Bookmark, error) {
return api.EditBookmarkContext(context.Background(), channelID, bookmarkID, params)
}
func (api *Client) EditBookmarkContext(ctx context.Context, channelID, bookmarkID string, params EditBookmarkParameters) (Bookmark, error) {
values := url.Values{
"channel_id": {channelID},
"token": {api.token},
"bookmark_id": {bookmarkID},
}
if params.Link != "" {
values.Set("link", params.Link)
}
if params.Emoji != nil {
values.Set("emoji", *params.Emoji)
}
if params.Title != nil {
values.Set("title", *params.Title)
}
response := &editBookmarkResponse{}
if err := api.postMethod(ctx, "bookmarks.edit", values, response); err != nil {
return Bookmark{}, err
}
return response.Bookmark, response.Err()
}

View File

@ -18,8 +18,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/slack-go/slack/internal/misc"
) )
// SlackResponse handles parsing out errors from the web api. // SlackResponse handles parsing out errors from the web api.
@ -299,7 +297,7 @@ func checkStatusCode(resp *http.Response, d Debug) error {
// Slack seems to send an HTML body along with 5xx error codes. Don't parse it. // Slack seems to send an HTML body along with 5xx error codes. Don't parse it.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
logResponse(resp, d) logResponse(resp, d)
return misc.StatusCodeError{Code: resp.StatusCode, Status: resp.Status} return StatusCodeError{Code: resp.StatusCode, Status: resp.Status}
} }
return nil return nil

316
vendor/github.com/slack-go/slack/remotefiles.go generated vendored Normal file
View File

@ -0,0 +1,316 @@
package slack
import (
"context"
"fmt"
"io"
"net/url"
"strconv"
"strings"
)
const (
DEFAULT_REMOTE_FILES_CHANNEL = ""
DEFAULT_REMOTE_FILES_TS_FROM = 0
DEFAULT_REMOTE_FILES_TS_TO = -1
DEFAULT_REMOTE_FILES_COUNT = 100
)
// RemoteFile contains all the information for a remote file
// For more details:
// https://api.slack.com/messaging/files/remote
type RemoteFile struct {
ID string `json:"id"`
Created JSONTime `json:"created"`
Timestamp JSONTime `json:"timestamp"`
Name string `json:"name"`
Title string `json:"title"`
Mimetype string `json:"mimetype"`
Filetype string `json:"filetype"`
PrettyType string `json:"pretty_type"`
User string `json:"user"`
Editable bool `json:"editable"`
Size int `json:"size"`
Mode string `json:"mode"`
IsExternal bool `json:"is_external"`
ExternalType string `json:"external_type"`
IsPublic bool `json:"is_public"`
PublicURLShared bool `json:"public_url_shared"`
DisplayAsBot bool `json:"display_as_bot"`
Username string `json:"username"`
URLPrivate string `json:"url_private"`
Permalink string `json:"permalink"`
CommentsCount int `json:"comments_count"`
IsStarred bool `json:"is_starred"`
Shares Share `json:"shares"`
Channels []string `json:"channels"`
Groups []string `json:"groups"`
IMs []string `json:"ims"`
ExternalID string `json:"external_id"`
ExternalURL string `json:"external_url"`
HasRichPreview bool `json:"has_rich_preview"`
}
// RemoteFileParameters contains required and optional parameters for a remote file.
//
// ExternalID is a user defined GUID, ExternalURL is where the remote file can be accessed,
// and Title is the name of the file.
//
// For more details:
// https://api.slack.com/methods/files.remote.add
type RemoteFileParameters struct {
ExternalID string // required
ExternalURL string // required
Title string // required
Filetype string
IndexableFileContents string
PreviewImage string
PreviewImageReader io.Reader
}
// ListRemoteFilesParameters contains arguments for the ListRemoteFiles method.
// For more details:
// https://api.slack.com/methods/files.remote.list
type ListRemoteFilesParameters struct {
Channel string
Cursor string
Limit int
TimestampFrom JSONTime
TimestampTo JSONTime
}
type remoteFileResponseFull struct {
RemoteFile `json:"file"`
Paging `json:"paging"`
Files []RemoteFile `json:"files"`
SlackResponse
}
func (api *Client) remoteFileRequest(ctx context.Context, path string, values url.Values) (*remoteFileResponseFull, error) {
response := &remoteFileResponseFull{}
err := api.postMethod(ctx, path, values, response)
if err != nil {
return nil, err
}
return response, response.Err()
}
// AddRemoteFile adds a remote file. Unlike regular files, remote files must be explicitly shared.
// For more details:
// https://api.slack.com/methods/files.remote.add
func (api *Client) AddRemoteFile(params RemoteFileParameters) (*RemoteFile, error) {
return api.AddRemoteFileContext(context.Background(), params)
}
// AddRemoteFileContext adds a remote file and setting a custom context
// For more details see the AddRemoteFile documentation.
func (api *Client) AddRemoteFileContext(ctx context.Context, params RemoteFileParameters) (remotefile *RemoteFile, err error) {
if params.ExternalID == "" || params.ExternalURL == "" || params.Title == "" {
return nil, ErrParametersMissing
}
response := &remoteFileResponseFull{}
values := url.Values{
"token": {api.token},
"external_id": {params.ExternalID},
"external_url": {params.ExternalURL},
"title": {params.Title},
}
if params.Filetype != "" {
values.Add("filetype", params.Filetype)
}
if params.IndexableFileContents != "" {
values.Add("indexable_file_contents", params.IndexableFileContents)
}
if params.PreviewImage != "" {
err = postLocalWithMultipartResponse(ctx, api.httpclient, api.endpoint+"files.remote.add", params.PreviewImage, "preview_image", api.token, values, response, api)
} else if params.PreviewImageReader != nil {
err = postWithMultipartResponse(ctx, api.httpclient, api.endpoint+"files.remote.add", "preview.png", "preview_image", api.token, values, params.PreviewImageReader, response, api)
} else {
response, err = api.remoteFileRequest(ctx, "files.remote.add", values)
}
if err != nil {
return nil, err
}
return &response.RemoteFile, response.Err()
}
// ListRemoteFiles retrieves all remote files according to the parameters given. Uses cursor based pagination.
// For more details:
// https://api.slack.com/methods/files.remote.list
func (api *Client) ListRemoteFiles(params ListRemoteFilesParameters) ([]RemoteFile, error) {
return api.ListRemoteFilesContext(context.Background(), params)
}
// ListRemoteFilesContext retrieves all remote files according to the parameters given with a custom context. Uses cursor based pagination.
// For more details see the ListRemoteFiles documentation.
func (api *Client) ListRemoteFilesContext(ctx context.Context, params ListRemoteFilesParameters) ([]RemoteFile, error) {
values := url.Values{
"token": {api.token},
}
if params.Channel != DEFAULT_REMOTE_FILES_CHANNEL {
values.Add("channel", params.Channel)
}
if params.TimestampFrom != DEFAULT_REMOTE_FILES_TS_FROM {
values.Add("ts_from", strconv.FormatInt(int64(params.TimestampFrom), 10))
}
if params.TimestampTo != DEFAULT_REMOTE_FILES_TS_TO {
values.Add("ts_to", strconv.FormatInt(int64(params.TimestampTo), 10))
}
if params.Limit != DEFAULT_REMOTE_FILES_COUNT {
values.Add("limit", strconv.Itoa(params.Limit))
}
if params.Cursor != "" {
values.Add("cursor", params.Cursor)
}
response, err := api.remoteFileRequest(ctx, "files.remote.list", values)
if err != nil {
return nil, err
}
params.Cursor = response.SlackResponse.ResponseMetadata.Cursor
return response.Files, nil
}
// GetRemoteFileInfo retrieves the complete remote file information.
// For more details:
// https://api.slack.com/methods/files.remote.info
func (api *Client) GetRemoteFileInfo(externalID, fileID string) (remotefile *RemoteFile, err error) {
return api.GetRemoteFileInfoContext(context.Background(), externalID, fileID)
}
// GetRemoteFileInfoContext retrieves the complete remote file information given with a custom context.
// For more details see the GetRemoteFileInfo documentation.
func (api *Client) GetRemoteFileInfoContext(ctx context.Context, externalID, fileID string) (remotefile *RemoteFile, err error) {
if fileID == "" && externalID == "" {
return nil, fmt.Errorf("either externalID or fileID is required")
}
if fileID != "" && externalID != "" {
return nil, fmt.Errorf("don't provide both externalID and fileID")
}
values := url.Values{
"token": {api.token},
}
if fileID != "" {
values.Add("file", fileID)
}
if externalID != "" {
values.Add("external_id", externalID)
}
response, err := api.remoteFileRequest(ctx, "files.remote.info", values)
if err != nil {
return nil, err
}
return &response.RemoteFile, err
}
// ShareRemoteFile shares a remote file to channels
// For more details:
// https://api.slack.com/methods/files.remote.share
func (api *Client) ShareRemoteFile(channels []string, externalID, fileID string) (file *RemoteFile, err error) {
return api.ShareRemoteFileContext(context.Background(), channels, externalID, fileID)
}
// ShareRemoteFileContext shares a remote file to channels with a custom context.
// For more details see the ShareRemoteFile documentation.
func (api *Client) ShareRemoteFileContext(ctx context.Context, channels []string, externalID, fileID string) (file *RemoteFile, err error) {
if channels == nil || len(channels) == 0 {
return nil, ErrParametersMissing
}
if fileID == "" && externalID == "" {
return nil, fmt.Errorf("either externalID or fileID is required")
}
values := url.Values{
"token": {api.token},
"channels": {strings.Join(channels, ",")},
}
if fileID != "" {
values.Add("file", fileID)
}
if externalID != "" {
values.Add("external_id", externalID)
}
response, err := api.remoteFileRequest(ctx, "files.remote.share", values)
if err != nil {
return nil, err
}
return &response.RemoteFile, err
}
// UpdateRemoteFile updates a remote file
// For more details:
// https://api.slack.com/methods/files.remote.update
func (api *Client) UpdateRemoteFile(fileID string, params RemoteFileParameters) (remotefile *RemoteFile, err error) {
return api.UpdateRemoteFileContext(context.Background(), fileID, params)
}
// UpdateRemoteFileContext updates a remote file with a custom context
// For more details see the UpdateRemoteFile documentation.
func (api *Client) UpdateRemoteFileContext(ctx context.Context, fileID string, params RemoteFileParameters) (remotefile *RemoteFile, err error) {
response := &remoteFileResponseFull{}
values := url.Values{
"token": {api.token},
}
if fileID != "" {
values.Add("file", fileID)
}
if params.ExternalID != "" {
values.Add("external_id", params.ExternalID)
}
if params.ExternalURL != "" {
values.Add("external_url", params.ExternalURL)
}
if params.Title != "" {
values.Add("title", params.Title)
}
if params.Filetype != "" {
values.Add("filetype", params.Filetype)
}
if params.IndexableFileContents != "" {
values.Add("indexable_file_contents", params.IndexableFileContents)
}
if params.PreviewImageReader != nil {
err = postWithMultipartResponse(ctx, api.httpclient, api.endpoint+"files.remote.update", "preview.png", "preview_image", api.token, values, params.PreviewImageReader, response, api)
} else {
response, err = api.remoteFileRequest(ctx, "files.remote.update", values)
}
if err != nil {
return nil, err
}
return &response.RemoteFile, response.Err()
}
// RemoveRemoteFile removes a remote file.
// For more details:
// https://api.slack.com/methods/files.remote.remove
func (api *Client) RemoveRemoteFile(externalID, fileID string) (err error) {
return api.RemoveRemoteFileContext(context.Background(), externalID, fileID)
}
// RemoveRemoteFileContext removes a remote file with a custom context
// For more information see the RemoveRemoteFiles documentation.
func (api *Client) RemoveRemoteFileContext(ctx context.Context, externalID, fileID string) (err error) {
if fileID == "" && externalID == "" {
return fmt.Errorf("either externalID or fileID is required")
}
if fileID != "" && externalID != "" {
return fmt.Errorf("don't provide both externalID and fileID")
}
values := url.Values{
"token": {api.token},
}
if fileID != "" {
values.Add("file", fileID)
}
if externalID != "" {
values.Add("external_id", externalID)
}
_, err = api.remoteFileRequest(ctx, "files.remote.remove", values)
return err
}

View File

@ -1,4 +1,4 @@
package misc package slack
import ( import (
"fmt" "fmt"

View File

@ -36,6 +36,7 @@ type View struct {
AppID string `json:"app_id"` AppID string `json:"app_id"`
ExternalID string `json:"external_id"` ExternalID string `json:"external_id"`
BotID string `json:"bot_id"` BotID string `json:"bot_id"`
AppInstalledTeamID string `json:"app_installed_team_id"`
} }
type ViewSubmissionCallbackResponseURL struct { type ViewSubmissionCallbackResponseURL struct {

View File

@ -5,6 +5,8 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"io/ioutil"
"net/http" "net/http"
) )
@ -21,6 +23,7 @@ type WebhookMessage struct {
ResponseType string `json:"response_type,omitempty"` ResponseType string `json:"response_type,omitempty"`
ReplaceOriginal bool `json:"replace_original,omitempty"` ReplaceOriginal bool `json:"replace_original,omitempty"`
DeleteOriginal bool `json:"delete_original,omitempty"` DeleteOriginal bool `json:"delete_original,omitempty"`
ReplyBroadcast bool `json:"reply_broadcast,omitempty"`
} }
func PostWebhook(url string, msg *WebhookMessage) error { func PostWebhook(url string, msg *WebhookMessage) error {
@ -51,7 +54,10 @@ func PostWebhookCustomHTTPContext(ctx context.Context, url string, httpClient *h
if err != nil { if err != nil {
return fmt.Errorf("failed to post webhook: %w", err) return fmt.Errorf("failed to post webhook: %w", err)
} }
defer resp.Body.Close() defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
return checkStatusCode(resp, discard{}) return checkStatusCode(resp, discard{})
} }

View File

@ -13,7 +13,6 @@ import (
"github.com/slack-go/slack/internal/backoff" "github.com/slack-go/slack/internal/backoff"
"github.com/slack-go/slack/internal/errorsx" "github.com/slack-go/slack/internal/errorsx"
"github.com/slack-go/slack/internal/misc"
"github.com/slack-go/slack/internal/timex" "github.com/slack-go/slack/internal/timex"
) )
@ -127,7 +126,7 @@ func (rtm *RTM) connect(connectionCount int, useRTMStart bool) (*Info, *websocke
} }
switch actual := err.(type) { switch actual := err.(type) {
case misc.StatusCodeError: case StatusCodeError:
if actual.Code == http.StatusNotFound { if actual.Code == http.StatusNotFound {
rtm.Debugf("invalid auth when connecting with RTM: %s", err) rtm.Debugf("invalid auth when connecting with RTM: %s", err)
rtm.IncomingEvents <- RTMEvent{"invalid_auth", &InvalidAuthEvent{}} rtm.IncomingEvents <- RTMEvent{"invalid_auth", &InvalidAuthEvent{}}
@ -475,7 +474,7 @@ func (rtm *RTM) handleEvent(typeStr string, event json.RawMessage) {
v, exists := EventMapping[typeStr] v, exists := EventMapping[typeStr]
if !exists { if !exists {
rtm.Debugf("RTM Error - received unmapped event %q: %s\n", typeStr, string(event)) rtm.Debugf("RTM Error - received unmapped event %q: %s\n", typeStr, string(event))
err := fmt.Errorf("RTM Error: Received unmapped event %q: %s", typeStr, string(event)) err := fmt.Errorf("RTM Error: Received unmapped event %q", typeStr)
rtm.IncomingEvents <- RTMEvent{"unmarshalling_error", &UnmarshallingErrorEvent{err}} rtm.IncomingEvents <- RTMEvent{"unmarshalling_error", &UnmarshallingErrorEvent{err}}
return return
} }
@ -484,7 +483,7 @@ func (rtm *RTM) handleEvent(typeStr string, event json.RawMessage) {
err := json.Unmarshal(event, recvEvent) err := json.Unmarshal(event, recvEvent)
if err != nil { if err != nil {
rtm.Debugf("RTM Error, could not unmarshall event %q: %s\n", typeStr, string(event)) rtm.Debugf("RTM Error, could not unmarshall event %q: %s\n", typeStr, string(event))
err := fmt.Errorf("RTM Error: Could not unmarshall event %q: %s", typeStr, string(event)) err := fmt.Errorf("RTM Error: Could not unmarshall event %q", typeStr)
rtm.IncomingEvents <- RTMEvent{"unmarshalling_error", &UnmarshallingErrorEvent{err}} rtm.IncomingEvents <- RTMEvent{"unmarshalling_error", &UnmarshallingErrorEvent{err}}
return return
} }

View File

@ -14,9 +14,9 @@ type SubteamMembersChangedEvent struct {
DatePreviousUpdate JSONTime `json:"date_previous_update"` DatePreviousUpdate JSONTime `json:"date_previous_update"`
DateUpdate JSONTime `json:"date_update"` DateUpdate JSONTime `json:"date_update"`
AddedUsers []string `json:"added_users"` AddedUsers []string `json:"added_users"`
AddedUsersCount string `json:"added_users_count"` AddedUsersCount int `json:"added_users_count"`
RemovedUsers []string `json:"removed_users"` RemovedUsers []string `json:"removed_users"`
RemovedUsersCount string `json:"removed_users_count"` RemovedUsersCount int `json:"removed_users_count"`
} }
// SubteamSelfAddedEvent represents an event of you have been added to a User Group // SubteamSelfAddedEvent represents an event of you have been added to a User Group

View File

@ -9,7 +9,7 @@ package assert
import "reflect" import "reflect"
// Wrapper around reflect.Value.CanConvert, for compatability // Wrapper around reflect.Value.CanConvert, for compatibility
// reasons. // reasons.
func canConvert(value reflect.Value, to reflect.Type) bool { func canConvert(value reflect.Value, to reflect.Type) bool {
return value.CanConvert(to) return value.CanConvert(to)

View File

@ -7,6 +7,7 @@ import (
"reflect" "reflect"
"regexp" "regexp"
"runtime/debug" "runtime/debug"
"sync"
"testing" "testing"
"time" "time"
@ -21,17 +22,22 @@ var matchMethod = flag.String("testify.m", "", "regular expression to select tes
// retrieving the current *testing.T context. // retrieving the current *testing.T context.
type Suite struct { type Suite struct {
*assert.Assertions *assert.Assertions
mu sync.RWMutex
require *require.Assertions require *require.Assertions
t *testing.T t *testing.T
} }
// T retrieves the current *testing.T context. // T retrieves the current *testing.T context.
func (suite *Suite) T() *testing.T { func (suite *Suite) T() *testing.T {
suite.mu.RLock()
defer suite.mu.RUnlock()
return suite.t return suite.t
} }
// SetT sets the current *testing.T context. // SetT sets the current *testing.T context.
func (suite *Suite) SetT(t *testing.T) { func (suite *Suite) SetT(t *testing.T) {
suite.mu.Lock()
defer suite.mu.Unlock()
suite.t = t suite.t = t
suite.Assertions = assert.New(t) suite.Assertions = assert.New(t)
suite.require = require.New(t) suite.require = require.New(t)
@ -39,6 +45,8 @@ func (suite *Suite) SetT(t *testing.T) {
// Require returns a require context for suite. // Require returns a require context for suite.
func (suite *Suite) Require() *require.Assertions { func (suite *Suite) Require() *require.Assertions {
suite.mu.Lock()
defer suite.mu.Unlock()
if suite.require == nil { if suite.require == nil {
suite.require = require.New(suite.T()) suite.require = require.New(suite.T())
} }
@ -51,6 +59,8 @@ func (suite *Suite) Require() *require.Assertions {
// assert.Assertions with require.Assertions), this method is provided so you // assert.Assertions with require.Assertions), this method is provided so you
// can call `suite.Assert().NoError()`. // can call `suite.Assert().NoError()`.
func (suite *Suite) Assert() *assert.Assertions { func (suite *Suite) Assert() *assert.Assertions {
suite.mu.Lock()
defer suite.mu.Unlock()
if suite.Assertions == nil { if suite.Assertions == nil {
suite.Assertions = assert.New(suite.T()) suite.Assertions = assert.New(suite.T())
} }

File diff suppressed because it is too large Load Diff

View File

@ -31,10 +31,10 @@ message ADVDeviceIdentity {
optional uint32 keyIndex = 3; optional uint32 keyIndex = 3;
} }
message CompanionProps { message DeviceProps {
optional string os = 1; optional string os = 1;
optional AppVersion version = 2; optional AppVersion version = 2;
enum CompanionPropsPlatformType { enum DevicePropsPlatformType {
UNKNOWN = 0; UNKNOWN = 0;
CHROME = 1; CHROME = 1;
FIREFOX = 2; FIREFOX = 2;
@ -50,7 +50,7 @@ message CompanionProps {
CATALINA = 12; CATALINA = 12;
TCL_TV = 13; TCL_TV = 13;
} }
optional CompanionPropsPlatformType platformType = 3; optional DevicePropsPlatformType platformType = 3;
optional bool requireFullSync = 4; optional bool requireFullSync = 4;
} }
@ -62,39 +62,6 @@ message AppVersion {
optional uint32 quinary = 5; optional uint32 quinary = 5;
} }
message ProtocolMessage {
optional MessageKey key = 1;
enum ProtocolMessageType {
REVOKE = 0;
EPHEMERAL_SETTING = 3;
EPHEMERAL_SYNC_RESPONSE = 4;
HISTORY_SYNC_NOTIFICATION = 5;
APP_STATE_SYNC_KEY_SHARE = 6;
APP_STATE_SYNC_KEY_REQUEST = 7;
MSG_FANOUT_BACKFILL_REQUEST = 8;
INITIAL_SECURITY_NOTIFICATION_SETTING_SYNC = 9;
APP_STATE_FATAL_EXCEPTION_NOTIFICATION = 10;
}
optional ProtocolMessageType type = 2;
optional uint32 ephemeralExpiration = 4;
optional int64 ephemeralSettingTimestamp = 5;
optional HistorySyncNotification historySyncNotification = 6;
optional AppStateSyncKeyShare appStateSyncKeyShare = 7;
optional AppStateSyncKeyRequest appStateSyncKeyRequest = 8;
optional InitialSecurityNotificationSettingSync initialSecurityNotificationSettingSync = 9;
optional AppStateFatalExceptionNotification appStateFatalExceptionNotification = 10;
optional DisappearingMode disappearingMode = 11;
}
message ProductMessage {
optional ProductSnapshot product = 1;
optional string businessOwnerJid = 2;
optional CatalogSnapshot catalog = 4;
optional string body = 5;
optional string footer = 6;
optional ContextInfo contextInfo = 17;
}
message ProductSnapshot { message ProductSnapshot {
optional ImageMessage productImage = 1; optional ImageMessage productImage = 1;
optional string productId = 2; optional string productId = 2;
@ -123,6 +90,10 @@ message PollVoteMessage {
message PollUpdateMessage { message PollUpdateMessage {
optional MessageKey pollCreationMessageKey = 1; optional MessageKey pollCreationMessageKey = 1;
optional PollEncValue vote = 2; optional PollEncValue vote = 2;
optional PollUpdateMessageMetadata metadata = 3;
}
message PollUpdateMessageMetadata {
} }
message PollEncValue { message PollEncValue {
@ -265,6 +236,12 @@ message ProductListHeaderImage {
optional bytes jpegThumbnail = 2; optional bytes jpegThumbnail = 2;
} }
message KeepInChatMessage {
optional MessageKey key = 1;
optional KeepType keepType = 2;
optional int64 timestampMs = 3;
}
message InvoiceMessage { message InvoiceMessage {
optional string note = 1; optional string note = 1;
optional string token = 2; optional string token = 2;
@ -708,6 +685,11 @@ message Location {
optional string name = 3; optional string name = 3;
} }
enum KeepType {
UNKNOWN = 0;
KEEP_FOR_ALL = 1;
UNDO_KEEP_FOR_ALL = 2;
}
message InteractiveAnnotation { message InteractiveAnnotation {
repeated Point polygonVertices = 1; repeated Point polygonVertices = 1;
oneof action { oneof action {
@ -926,12 +908,14 @@ message Message {
optional InteractiveResponseMessage interactiveResponseMessage = 48; optional InteractiveResponseMessage interactiveResponseMessage = 48;
optional PollCreationMessage pollCreationMessage = 49; optional PollCreationMessage pollCreationMessage = 49;
optional PollUpdateMessage pollUpdateMessage = 50; optional PollUpdateMessage pollUpdateMessage = 50;
optional KeepInChatMessage keepInChatMessage = 51;
} }
message MessageContextInfo { message MessageContextInfo {
optional DeviceListMetadata deviceListMetadata = 1; optional DeviceListMetadata deviceListMetadata = 1;
optional int32 deviceListMetadataVersion = 2; optional int32 deviceListMetadataVersion = 2;
optional bytes messageSecret = 3; optional bytes messageSecret = 3;
optional bytes paddingBytes = 4;
} }
message VideoMessage { message VideoMessage {
@ -1060,6 +1044,39 @@ message ReactionMessage {
optional int64 senderTimestampMs = 4; optional int64 senderTimestampMs = 4;
} }
message ProtocolMessage {
optional MessageKey key = 1;
enum ProtocolMessageType {
REVOKE = 0;
EPHEMERAL_SETTING = 3;
EPHEMERAL_SYNC_RESPONSE = 4;
HISTORY_SYNC_NOTIFICATION = 5;
APP_STATE_SYNC_KEY_SHARE = 6;
APP_STATE_SYNC_KEY_REQUEST = 7;
MSG_FANOUT_BACKFILL_REQUEST = 8;
INITIAL_SECURITY_NOTIFICATION_SETTING_SYNC = 9;
APP_STATE_FATAL_EXCEPTION_NOTIFICATION = 10;
}
optional ProtocolMessageType type = 2;
optional uint32 ephemeralExpiration = 4;
optional int64 ephemeralSettingTimestamp = 5;
optional HistorySyncNotification historySyncNotification = 6;
optional AppStateSyncKeyShare appStateSyncKeyShare = 7;
optional AppStateSyncKeyRequest appStateSyncKeyRequest = 8;
optional InitialSecurityNotificationSettingSync initialSecurityNotificationSettingSync = 9;
optional AppStateFatalExceptionNotification appStateFatalExceptionNotification = 10;
optional DisappearingMode disappearingMode = 11;
}
message ProductMessage {
optional ProductSnapshot product = 1;
optional string businessOwnerJid = 2;
optional CatalogSnapshot catalog = 4;
optional string body = 5;
optional string footer = 6;
optional ContextInfo contextInfo = 17;
}
message EphemeralSetting { message EphemeralSetting {
optional sfixed32 duration = 1; optional sfixed32 duration = 1;
optional sfixed64 timestamp = 2; optional sfixed64 timestamp = 2;
@ -1161,6 +1178,17 @@ message Conversation {
optional MediaVisibility mediaVisibility = 27; optional MediaVisibility mediaVisibility = 27;
optional uint64 tcTokenSenderTimestamp = 28; optional uint64 tcTokenSenderTimestamp = 28;
optional bool suspended = 29; optional bool suspended = 29;
optional bool terminated = 30;
optional uint64 createdAt = 31;
optional string createdBy = 32;
optional string description = 33;
optional bool support = 34;
optional bool isParentGroup = 35;
optional bool isDefaultSubgroup = 36;
optional string parentGroupId = 37;
optional string displayName = 38;
optional string pnJid = 39;
optional bool selfMasked = 40;
} }
message AutoDownloadSettings { message AutoDownloadSettings {
@ -1192,8 +1220,8 @@ message MsgOpaqueData {
optional string loc = 16; optional string loc = 16;
optional string pollName = 17; optional string pollName = 17;
repeated PollOption pollOptions = 18; repeated PollOption pollOptions = 18;
optional bytes pollEncKey = 19;
optional uint32 pollSelectableOptionsCount = 20; optional uint32 pollSelectableOptionsCount = 20;
optional bytes messageSecret = 21;
} }
message PollOption { message PollOption {
@ -1559,20 +1587,6 @@ message BizAccountLinkInfo {
optional BizAccountLinkInfoAccountType accountType = 5; optional BizAccountLinkInfoAccountType accountType = 5;
} }
message NoiseCertificate {
optional bytes details = 1;
optional bytes signature = 2;
}
// Renamed from NoiseCertificate$Details
message NoiseCertificateDetails {
optional uint32 serial = 1;
optional string issuer = 2;
optional uint64 expires = 3;
optional string subject = 4;
optional bytes key = 5;
}
message HandshakeMessage { message HandshakeMessage {
optional ClientHello clientHello = 2; optional ClientHello clientHello = 2;
optional ServerHello serverHello = 3; optional ServerHello serverHello = 3;
@ -1943,6 +1957,7 @@ message WebMessageInfo {
GROUP_PARTICIPANT_ACCEPT = 140; GROUP_PARTICIPANT_ACCEPT = 140;
GROUP_PARTICIPANT_LINKED_GROUP_JOIN = 141; GROUP_PARTICIPANT_LINKED_GROUP_JOIN = 141;
COMMUNITY_CREATE = 142; COMMUNITY_CREATE = 142;
EPHEMERAL_KEEP_IN_CHAT = 143;
} }
optional WebMessageInfoStubType messageStubType = 24; optional WebMessageInfoStubType messageStubType = 24;
optional bool clearMedia = 25; optional bool clearMedia = 25;
@ -1976,6 +1991,7 @@ message WebMessageInfo {
optional string agentId = 47; optional string agentId = 47;
optional bool statusAlreadyViewed = 48; optional bool statusAlreadyViewed = 48;
optional bytes messageSecret = 49; optional bytes messageSecret = 49;
optional KeepInChat keepInChat = 50;
} }
message WebFeatures { message WebFeatures {
@ -2148,3 +2164,43 @@ message MediaData {
optional string localPath = 1; optional string localPath = 1;
} }
message KeepInChat {
optional KeepType keepType = 1;
optional int64 serverTimestamp = 2;
optional string deviceJid = 3;
}
message NoiseCertificate {
optional bytes details = 1;
optional bytes signature = 2;
}
// Renamed from NoiseCertificate$Details
message NoiseCertificateDetails {
optional uint32 serial = 1;
optional string issuer = 2;
optional uint64 expires = 3;
optional string subject = 4;
optional bytes key = 5;
}
message CertChain {
optional CertChainNoiseCertificate leaf = 1;
optional CertChainNoiseCertificate intermediate = 2;
}
// Renamed from CertChain$NoiseCertificate
message CertChainNoiseCertificate {
optional bytes details = 1;
optional bytes signature = 2;
}
// Renamed from CertChain$NoiseCertificate$Details
message CertChainNoiseCertificateDetails {
optional uint32 serial = 1;
optional uint32 issuerSerial = 2;
optional bytes key = 3;
optional uint64 notBefore = 4;
optional uint64 notAfter = 5;
}

View File

@ -40,6 +40,9 @@ func (cli *Client) handleEncryptedMessage(node *waBinary.Node) {
if err != nil { if err != nil {
cli.Log.Warnf("Failed to parse message: %v", err) cli.Log.Warnf("Failed to parse message: %v", err)
} else { } else {
if info.VerifiedName != nil && len(info.VerifiedName.Details.GetVerifiedName()) > 0 {
go cli.updateBusinessName(info.Sender, info, info.VerifiedName.Details.GetVerifiedName())
}
if len(info.PushName) > 0 && info.PushName != "-" { if len(info.PushName) > 0 && info.PushName != "-" {
go cli.updatePushName(info.Sender, info, info.PushName) go cli.updatePushName(info.Sender, info, info.PushName)
} }
@ -47,13 +50,17 @@ func (cli *Client) handleEncryptedMessage(node *waBinary.Node) {
} }
} }
func (cli *Client) parseMessageSource(node *waBinary.Node) (source types.MessageSource, err error) { func (cli *Client) parseMessageSource(node *waBinary.Node, requireParticipant bool) (source types.MessageSource, err error) {
ag := node.AttrGetter() ag := node.AttrGetter()
from := ag.JID("from") from := ag.JID("from")
if from.Server == types.GroupServer || from.Server == types.BroadcastServer { if from.Server == types.GroupServer || from.Server == types.BroadcastServer {
source.IsGroup = true source.IsGroup = true
source.Chat = from source.Chat = from
if requireParticipant {
source.Sender = ag.JID("participant") source.Sender = ag.JID("participant")
} else {
source.Sender = ag.OptionalJIDOrEmpty("participant")
}
if source.Sender.User == cli.Store.ID.User { if source.Sender.User == cli.Store.ID.User {
source.IsFromMe = true source.IsFromMe = true
} }
@ -80,7 +87,7 @@ func (cli *Client) parseMessageSource(node *waBinary.Node) (source types.Message
func (cli *Client) parseMessageInfo(node *waBinary.Node) (*types.MessageInfo, error) { func (cli *Client) parseMessageInfo(node *waBinary.Node) (*types.MessageInfo, error) {
var info types.MessageInfo var info types.MessageInfo
var err error var err error
info.MessageSource, err = cli.parseMessageSource(node) info.MessageSource, err = cli.parseMessageSource(node, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -96,6 +103,11 @@ func (cli *Client) parseMessageInfo(node *waBinary.Node) (*types.MessageInfo, er
for _, child := range node.GetChildren() { for _, child := range node.GetChildren() {
if child.Tag == "multicast" { if child.Tag == "multicast" {
info.Multicast = true info.Multicast = true
} else if child.Tag == "verified_name" {
info.VerifiedName, err = parseVerifiedNameContent(child)
if err != nil {
cli.Log.Warnf("Failed to parse verified_name node in %s: %v", info.ID, err)
}
} else if mediaType, ok := child.AttrGetter().GetString("mediatype", false); ok { } else if mediaType, ok := child.AttrGetter().GetString("mediatype", false); ok {
info.MediaType = mediaType info.MediaType = mediaType
} }

View File

@ -15,7 +15,7 @@ import (
) )
func (cli *Client) handleChatState(node *waBinary.Node) { func (cli *Client) handleChatState(node *waBinary.Node) {
source, err := cli.parseMessageSource(node) source, err := cli.parseMessageSource(node, true)
if err != nil { if err != nil {
cli.Log.Warnf("Failed to parse chat state update: %v", err) cli.Log.Warnf("Failed to parse chat state update: %v", err)
} else if len(node.GetChildren()) != 1 { } else if len(node.GetChildren()) != 1 {

View File

@ -20,7 +20,7 @@ func (cli *Client) handleReceipt(node *waBinary.Node) {
receipt, err := cli.parseReceipt(node) receipt, err := cli.parseReceipt(node)
if err != nil { if err != nil {
cli.Log.Warnf("Failed to parse receipt: %v", err) cli.Log.Warnf("Failed to parse receipt: %v", err)
} else { } else if receipt != nil {
if receipt.Type == events.ReceiptTypeRetry { if receipt.Type == events.ReceiptTypeRetry {
go func() { go func() {
err := cli.handleRetryReceipt(receipt, node) err := cli.handleRetryReceipt(receipt, node)
@ -34,9 +34,29 @@ func (cli *Client) handleReceipt(node *waBinary.Node) {
go cli.sendAck(node) go cli.sendAck(node)
} }
func (cli *Client) handleGroupedReceipt(partialReceipt events.Receipt, participants *waBinary.Node) {
pag := participants.AttrGetter()
partialReceipt.MessageIDs = []types.MessageID{pag.String("key")}
for _, child := range participants.GetChildren() {
if child.Tag != "user" {
cli.Log.Warnf("Unexpected node in grouped receipt participants: %s", child.XMLString())
continue
}
ag := child.AttrGetter()
receipt := partialReceipt
receipt.Timestamp = ag.UnixTime("t")
receipt.MessageSource.Sender = ag.JID("jid")
if !ag.OK() {
cli.Log.Warnf("Failed to parse user node %s in grouped receipt: %v", child.XMLString(), ag.Error())
continue
}
go cli.dispatchEvent(&receipt)
}
}
func (cli *Client) parseReceipt(node *waBinary.Node) (*events.Receipt, error) { func (cli *Client) parseReceipt(node *waBinary.Node) (*events.Receipt, error) {
ag := node.AttrGetter() ag := node.AttrGetter()
source, err := cli.parseMessageSource(node) source, err := cli.parseMessageSource(node, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -45,6 +65,16 @@ func (cli *Client) parseReceipt(node *waBinary.Node) (*events.Receipt, error) {
Timestamp: ag.UnixTime("t"), Timestamp: ag.UnixTime("t"),
Type: events.ReceiptType(ag.OptionalString("type")), Type: events.ReceiptType(ag.OptionalString("type")),
} }
if source.IsGroup && source.Sender.IsEmpty() {
participantTags := node.GetChildrenByTag("participants")
if len(participantTags) == 0 {
return nil, &ElementMissingError{Tag: "participants", In: "grouped receipt"}
}
for _, pcp := range participantTags {
cli.handleGroupedReceipt(receipt, &pcp)
}
return nil, nil
}
mainMessageID := ag.String("id") mainMessageID := ag.String("id")
if !ag.OK() { if !ag.OK() {
return nil, fmt.Errorf("failed to parse read receipt attrs: %+v", ag.Errors) return nil, fmt.Errorf("failed to parse read receipt attrs: %+v", ag.Errors)

View File

@ -74,7 +74,7 @@ func (vc WAVersionContainer) ProtoAppVersion() *waProto.AppVersion {
} }
// waVersion is the WhatsApp web client version // waVersion is the WhatsApp web client version
var waVersion = WAVersionContainer{2, 2218, 8} var waVersion = WAVersionContainer{2, 2222, 11}
// waVersionHash is the md5 hash of a dot-separated waVersion // waVersionHash is the md5 hash of a dot-separated waVersion
var waVersionHash [16]byte var waVersionHash [16]byte
@ -125,14 +125,14 @@ var BaseClientPayload = &waProto.ClientPayload{
// Deprecated: renamed to DeviceProps // Deprecated: renamed to DeviceProps
var CompanionProps = DeviceProps var CompanionProps = DeviceProps
var DeviceProps = &waProto.CompanionProps{ var DeviceProps = &waProto.DeviceProps{
Os: proto.String("whatsmeow"), Os: proto.String("whatsmeow"),
Version: &waProto.AppVersion{ Version: &waProto.AppVersion{
Primary: proto.Uint32(0), Primary: proto.Uint32(0),
Secondary: proto.Uint32(1), Secondary: proto.Uint32(1),
Tertiary: proto.Uint32(0), Tertiary: proto.Uint32(0),
}, },
PlatformType: waProto.CompanionProps_UNKNOWN.Enum(), PlatformType: waProto.DeviceProps_UNKNOWN.Enum(),
RequireFullSync: proto.Bool(false), RequireFullSync: proto.Bool(false),
} }

View File

@ -181,7 +181,8 @@ const (
adv_key, adv_details, adv_account_sig, adv_account_sig_key, adv_device_sig, adv_key, adv_details, adv_account_sig, adv_account_sig_key, adv_device_sig,
platform, business_name, push_name) platform, business_name, push_name)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
ON CONFLICT (jid) DO UPDATE SET platform=$12, business_name=$13, push_name=$14 ON CONFLICT (jid) DO UPDATE
SET platform=excluded.platform, business_name=excluded.business_name, push_name=excluded.push_name
` `
deleteDeviceQuery = `DELETE FROM whatsmeow_device WHERE jid=$1` deleteDeviceQuery = `DELETE FROM whatsmeow_device WHERE jid=$1`
) )

View File

@ -67,7 +67,7 @@ var _ store.ContactStore = (*SQLStore)(nil)
const ( const (
putIdentityQuery = ` putIdentityQuery = `
INSERT INTO whatsmeow_identity_keys (our_jid, their_id, identity) VALUES ($1, $2, $3) INSERT INTO whatsmeow_identity_keys (our_jid, their_id, identity) VALUES ($1, $2, $3)
ON CONFLICT (our_jid, their_id) DO UPDATE SET identity=$3 ON CONFLICT (our_jid, their_id) DO UPDATE SET identity=excluded.identity
` `
deleteAllIdentitiesQuery = `DELETE FROM whatsmeow_identity_keys WHERE our_jid=$1 AND their_id LIKE $2` deleteAllIdentitiesQuery = `DELETE FROM whatsmeow_identity_keys WHERE our_jid=$1 AND their_id LIKE $2`
deleteIdentityQuery = `DELETE FROM whatsmeow_identity_keys WHERE our_jid=$1 AND their_id=$2` deleteIdentityQuery = `DELETE FROM whatsmeow_identity_keys WHERE our_jid=$1 AND their_id=$2`
@ -108,7 +108,7 @@ const (
hasSessionQuery = `SELECT true FROM whatsmeow_sessions WHERE our_jid=$1 AND their_id=$2` hasSessionQuery = `SELECT true FROM whatsmeow_sessions WHERE our_jid=$1 AND their_id=$2`
putSessionQuery = ` putSessionQuery = `
INSERT INTO whatsmeow_sessions (our_jid, their_id, session) VALUES ($1, $2, $3) INSERT INTO whatsmeow_sessions (our_jid, their_id, session) VALUES ($1, $2, $3)
ON CONFLICT (our_jid, their_id) DO UPDATE SET session=$3 ON CONFLICT (our_jid, their_id) DO UPDATE SET session=excluded.session
` `
deleteAllSessionsQuery = `DELETE FROM whatsmeow_sessions WHERE our_jid=$1 AND their_id LIKE $2` deleteAllSessionsQuery = `DELETE FROM whatsmeow_sessions WHERE our_jid=$1 AND their_id LIKE $2`
deleteSessionQuery = `DELETE FROM whatsmeow_sessions WHERE our_jid=$1 AND their_id=$2` deleteSessionQuery = `DELETE FROM whatsmeow_sessions WHERE our_jid=$1 AND their_id=$2`
@ -259,7 +259,7 @@ const (
getSenderKeyQuery = `SELECT sender_key FROM whatsmeow_sender_keys WHERE our_jid=$1 AND chat_id=$2 AND sender_id=$3` getSenderKeyQuery = `SELECT sender_key FROM whatsmeow_sender_keys WHERE our_jid=$1 AND chat_id=$2 AND sender_id=$3`
putSenderKeyQuery = ` putSenderKeyQuery = `
INSERT INTO whatsmeow_sender_keys (our_jid, chat_id, sender_id, sender_key) VALUES ($1, $2, $3, $4) INSERT INTO whatsmeow_sender_keys (our_jid, chat_id, sender_id, sender_key) VALUES ($1, $2, $3, $4)
ON CONFLICT (our_jid, chat_id, sender_id) DO UPDATE SET sender_key=$4 ON CONFLICT (our_jid, chat_id, sender_id) DO UPDATE SET sender_key=excluded.sender_key
` `
) )
@ -279,7 +279,8 @@ func (s *SQLStore) GetSenderKey(group, user string) (key []byte, err error) {
const ( const (
putAppStateSyncKeyQuery = ` putAppStateSyncKeyQuery = `
INSERT INTO whatsmeow_app_state_sync_keys (jid, key_id, key_data, timestamp, fingerprint) VALUES ($1, $2, $3, $4, $5) INSERT INTO whatsmeow_app_state_sync_keys (jid, key_id, key_data, timestamp, fingerprint) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (jid, key_id) DO UPDATE SET key_data=$3, timestamp=$4, fingerprint=$5 ON CONFLICT (jid, key_id) DO UPDATE
SET key_data=excluded.key_data, timestamp=excluded.timestamp, fingerprint=excluded.fingerprint
` `
getAppStateSyncKeyQuery = `SELECT key_data, timestamp, fingerprint FROM whatsmeow_app_state_sync_keys WHERE jid=$1 AND key_id=$2` getAppStateSyncKeyQuery = `SELECT key_data, timestamp, fingerprint FROM whatsmeow_app_state_sync_keys WHERE jid=$1 AND key_id=$2`
) )
@ -301,7 +302,7 @@ func (s *SQLStore) GetAppStateSyncKey(id []byte) (*store.AppStateSyncKey, error)
const ( const (
putAppStateVersionQuery = ` putAppStateVersionQuery = `
INSERT INTO whatsmeow_app_state_version (jid, name, version, hash) VALUES ($1, $2, $3, $4) INSERT INTO whatsmeow_app_state_version (jid, name, version, hash) VALUES ($1, $2, $3, $4)
ON CONFLICT (jid, name) DO UPDATE SET version=$3, hash=$4 ON CONFLICT (jid, name) DO UPDATE SET version=excluded.version, hash=excluded.hash
` `
getAppStateVersionQuery = `SELECT version, hash FROM whatsmeow_app_state_version WHERE jid=$1 AND name=$2` getAppStateVersionQuery = `SELECT version, hash FROM whatsmeow_app_state_version WHERE jid=$1 AND name=$2`
deleteAppStateVersionQuery = `DELETE FROM whatsmeow_app_state_version WHERE jid=$1 AND name=$2` deleteAppStateVersionQuery = `DELETE FROM whatsmeow_app_state_version WHERE jid=$1 AND name=$2`
@ -435,11 +436,11 @@ const (
` `
putPushNameQuery = ` putPushNameQuery = `
INSERT INTO whatsmeow_contacts (our_jid, their_jid, push_name) VALUES ($1, $2, $3) INSERT INTO whatsmeow_contacts (our_jid, their_jid, push_name) VALUES ($1, $2, $3)
ON CONFLICT (our_jid, their_jid) DO UPDATE SET push_name=$3 ON CONFLICT (our_jid, their_jid) DO UPDATE SET push_name=excluded.push_name
` `
putBusinessNameQuery = ` putBusinessNameQuery = `
INSERT INTO whatsmeow_contacts (our_jid, their_jid, business_name) VALUES ($1, $2, $3) INSERT INTO whatsmeow_contacts (our_jid, their_jid, business_name) VALUES ($1, $2, $3)
ON CONFLICT (our_jid, their_jid) DO UPDATE SET business_name=$3 ON CONFLICT (our_jid, their_jid) DO UPDATE SET business_name=excluded.business_name
` `
getContactQuery = ` getContactQuery = `
SELECT first_name, full_name, push_name, business_name FROM whatsmeow_contacts WHERE our_jid=$1 AND their_jid=$2 SELECT first_name, full_name, push_name, business_name FROM whatsmeow_contacts WHERE our_jid=$1 AND their_jid=$2
@ -470,23 +471,25 @@ func (s *SQLStore) PutPushName(user types.JID, pushName string) (bool, string, e
return false, "", nil return false, "", nil
} }
func (s *SQLStore) PutBusinessName(user types.JID, businessName string) error { func (s *SQLStore) PutBusinessName(user types.JID, businessName string) (bool, string, error) {
s.contactCacheLock.Lock() s.contactCacheLock.Lock()
defer s.contactCacheLock.Unlock() defer s.contactCacheLock.Unlock()
cached, err := s.getContact(user) cached, err := s.getContact(user)
if err != nil { if err != nil {
return err return false, "", err
} }
if cached.BusinessName != businessName { if cached.BusinessName != businessName {
_, err = s.db.Exec(putBusinessNameQuery, s.JID, user, businessName) _, err = s.db.Exec(putBusinessNameQuery, s.JID, user, businessName)
if err != nil { if err != nil {
return err return false, "", err
} }
previousName := cached.BusinessName
cached.BusinessName = businessName cached.BusinessName = businessName
cached.Found = true cached.Found = true
return true, previousName, nil
} }
return nil return false, "", nil
} }
func (s *SQLStore) PutContactName(user types.JID, firstName, fullName string) error { func (s *SQLStore) PutContactName(user types.JID, firstName, fullName string) error {
@ -643,7 +646,7 @@ func (s *SQLStore) GetAllContacts() (map[types.JID]types.ContactInfo, error) {
const ( const (
putChatSettingQuery = ` putChatSettingQuery = `
INSERT INTO whatsmeow_chat_settings (our_jid, chat_jid, %[1]s) VALUES ($1, $2, $3) INSERT INTO whatsmeow_chat_settings (our_jid, chat_jid, %[1]s) VALUES ($1, $2, $3)
ON CONFLICT (our_jid, chat_jid) DO UPDATE SET %[1]s=$3 ON CONFLICT (our_jid, chat_jid) DO UPDATE SET %[1]s=excluded.%[1]s
` `
getChatSettingsQuery = ` getChatSettingsQuery = `
SELECT muted_until, pinned, archived FROM whatsmeow_chat_settings WHERE our_jid=$1 AND chat_jid=$2 SELECT muted_until, pinned, archived FROM whatsmeow_chat_settings WHERE our_jid=$1 AND chat_jid=$2

View File

@ -239,7 +239,7 @@ func upgradeV2(tx *sql.Tx, container *Container) error {
if err != nil { if err != nil {
return err return err
} }
if container.dialect == "postgres" { if container.dialect == "postgres" || container.dialect == "pgx" {
_, err = tx.Exec(fillSigKeyPostgres) _, err = tx.Exec(fillSigKeyPostgres)
} else { } else {
_, err = tx.Exec(fillSigKeySQLite) _, err = tx.Exec(fillSigKeySQLite)

View File

@ -80,7 +80,7 @@ type ContactEntry struct {
type ContactStore interface { type ContactStore interface {
PutPushName(user types.JID, pushName string) (bool, string, error) PutPushName(user types.JID, pushName string) (bool, string, error)
PutBusinessName(user types.JID, businessName string) error PutBusinessName(user types.JID, businessName string) (bool, string, error)
PutContactName(user types.JID, fullName, firstName string) error PutContactName(user types.JID, fullName, firstName string) error
PutAllContactNames(contacts []ContactEntry) error PutAllContactNames(contacts []ContactEntry) error
GetContact(user types.JID) (types.ContactInfo, error) GetContact(user types.JID) (types.ContactInfo, error)

View File

@ -30,6 +30,14 @@ type PushName struct {
NewPushName string // The new push name that was included in the message. NewPushName string // The new push name that was included in the message.
} }
// BusinessName is emitted when a message is received with a different verified business name than the previous value cached for the same user.
type BusinessName struct {
JID types.JID
Message *types.MessageInfo // This is only present if the change was detected in a message.
OldBusinessName string
NewBusinessName string
}
// Pin is emitted when a chat is pinned or unpinned from another device. // Pin is emitted when a chat is pinned or unpinned from another device.
type Pin struct { type Pin struct {
JID types.JID // The chat which was pinned or unpinned. JID types.JID // The chat which was pinned or unpinned.

View File

@ -47,6 +47,7 @@ type MessageInfo struct {
Multicast bool Multicast bool
MediaType string MediaType string
VerifiedName *VerifiedName
DeviceSentMeta *DeviceSentMeta // Metadata for direct messages sent from another one of the user's own devices. DeviceSentMeta *DeviceSentMeta // Metadata for direct messages sent from another one of the user's own devices.
} }

View File

@ -133,7 +133,7 @@ func (cli *Client) GetUserInfo(jids []types.JID) (map[types.JID]types.UserInfo,
info.PictureID, _ = child.GetChildByTag("picture").Attrs["id"].(string) info.PictureID, _ = child.GetChildByTag("picture").Attrs["id"].(string)
info.Devices = parseDeviceList(jid.User, child.GetChildByTag("devices")) info.Devices = parseDeviceList(jid.User, child.GetChildByTag("devices"))
if verifiedName != nil { if verifiedName != nil {
cli.updateBusinessName(jid, verifiedName.Details.GetVerifiedName()) cli.updateBusinessName(jid, nil, verifiedName.Details.GetVerifiedName())
} }
respData[jid] = info respData[jid] = info
} }
@ -262,13 +262,21 @@ func (cli *Client) updatePushName(user types.JID, messageInfo *types.MessageInfo
} }
} }
func (cli *Client) updateBusinessName(user types.JID, name string) { func (cli *Client) updateBusinessName(user types.JID, messageInfo *types.MessageInfo, name string) {
if cli.Store.Contacts == nil { if cli.Store.Contacts == nil {
return return
} }
err := cli.Store.Contacts.PutBusinessName(user, name) changed, previousName, err := cli.Store.Contacts.PutBusinessName(user, name)
if err != nil { if err != nil {
cli.Log.Errorf("Failed to save business name of %s in device store: %v", user, err) cli.Log.Errorf("Failed to save business name of %s in device store: %v", user, err)
} else if changed {
cli.Log.Debugf("Business name of %s changed from %s to %s, dispatching event", user, previousName, name)
cli.dispatchEvent(&events.BusinessName{
JID: user,
Message: messageInfo,
OldBusinessName: previousName,
NewBusinessName: name,
})
} }
} }
@ -280,6 +288,10 @@ func parseVerifiedName(businessNode waBinary.Node) (*types.VerifiedName, error)
if !ok { if !ok {
return nil, nil return nil, nil
} }
return parseVerifiedNameContent(verifiedNameNode)
}
func parseVerifiedNameContent(verifiedNameNode waBinary.Node) (*types.VerifiedName, error) {
rawCert, ok := verifiedNameNode.Content.([]byte) rawCert, ok := verifiedNameNode.Content.([]byte)
if !ok { if !ok {
return nil, nil return nil, nil

11
vendor/gopkg.in/yaml.v3/parserc.go generated vendored
View File

@ -687,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i
func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first { if first {
token := peek_token(parser) token := peek_token(parser)
if token == nil {
return false
}
parser.marks = append(parser.marks, token.start_mark) parser.marks = append(parser.marks, token.start_mark)
skip_token(parser) skip_token(parser)
} }
@ -786,7 +789,7 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
} }
token := peek_token(parser) token := peek_token(parser)
if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
return return
} }
@ -813,6 +816,9 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first { if first {
token := peek_token(parser) token := peek_token(parser)
if token == nil {
return false
}
parser.marks = append(parser.marks, token.start_mark) parser.marks = append(parser.marks, token.start_mark)
skip_token(parser) skip_token(parser)
} }
@ -922,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev
func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first { if first {
token := peek_token(parser) token := peek_token(parser)
if token == nil {
return false
}
parser.marks = append(parser.marks, token.start_mark) parser.marks = append(parser.marks, token.start_mark)
skip_token(parser) skip_token(parser)
} }

26
vendor/modules.txt vendored
View File

@ -38,7 +38,7 @@ github.com/Rhymen/go-whatsapp/binary/token
github.com/Rhymen/go-whatsapp/crypto/cbc github.com/Rhymen/go-whatsapp/crypto/cbc
github.com/Rhymen/go-whatsapp/crypto/curve25519 github.com/Rhymen/go-whatsapp/crypto/curve25519
github.com/Rhymen/go-whatsapp/crypto/hkdf github.com/Rhymen/go-whatsapp/crypto/hkdf
# github.com/SevereCloud/vksdk/v2 v2.14.0 # github.com/SevereCloud/vksdk/v2 v2.14.1
## explicit; go 1.16 ## explicit; go 1.16
github.com/SevereCloud/vksdk/v2 github.com/SevereCloud/vksdk/v2
github.com/SevereCloud/vksdk/v2/api github.com/SevereCloud/vksdk/v2/api
@ -58,7 +58,7 @@ github.com/blang/semver
# github.com/bwmarrin/discordgo v0.25.0 # github.com/bwmarrin/discordgo v0.25.0
## explicit; go 1.13 ## explicit; go 1.13
github.com/bwmarrin/discordgo github.com/bwmarrin/discordgo
# github.com/d5/tengo/v2 v2.10.1 # github.com/d5/tengo/v2 v2.12.0
## explicit; go 1.13 ## explicit; go 1.13
github.com/d5/tengo/v2 github.com/d5/tengo/v2
github.com/d5/tengo/v2/parser github.com/d5/tengo/v2/parser
@ -93,7 +93,7 @@ github.com/golang-jwt/jwt
## explicit; go 1.9 ## explicit; go 1.9
github.com/golang/protobuf/proto github.com/golang/protobuf/proto
github.com/golang/protobuf/protoc-gen-go/descriptor github.com/golang/protobuf/protoc-gen-go/descriptor
# github.com/gomarkdown/markdown v0.0.0-20220603122033-8f3b341fef32 # github.com/gomarkdown/markdown v0.0.0-20220607163217-45f7c050e2d1
## explicit; go 1.12 ## explicit; go 1.12
github.com/gomarkdown/markdown github.com/gomarkdown/markdown
github.com/gomarkdown/markdown/ast github.com/gomarkdown/markdown/ast
@ -183,11 +183,12 @@ github.com/keybase/go-keybase-chat-bot/kbchat/types/chat1
github.com/keybase/go-keybase-chat-bot/kbchat/types/gregor1 github.com/keybase/go-keybase-chat-bot/kbchat/types/gregor1
github.com/keybase/go-keybase-chat-bot/kbchat/types/keybase1 github.com/keybase/go-keybase-chat-bot/kbchat/types/keybase1
github.com/keybase/go-keybase-chat-bot/kbchat/types/stellar1 github.com/keybase/go-keybase-chat-bot/kbchat/types/stellar1
# github.com/klauspost/compress v1.15.1 # github.com/klauspost/compress v1.15.6
## explicit; go 1.15 ## explicit; go 1.16
github.com/klauspost/compress github.com/klauspost/compress
github.com/klauspost/compress/fse github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0
github.com/klauspost/compress/internal/cpuinfo
github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/s2 github.com/klauspost/compress/s2
github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd
@ -403,12 +404,11 @@ github.com/sizeofint/webpanimation
github.com/skip2/go-qrcode github.com/skip2/go-qrcode
github.com/skip2/go-qrcode/bitset github.com/skip2/go-qrcode/bitset
github.com/skip2/go-qrcode/reedsolomon github.com/skip2/go-qrcode/reedsolomon
# github.com/slack-go/slack v0.10.3 # github.com/slack-go/slack v0.11.0
## explicit; go 1.16 ## explicit; go 1.16
github.com/slack-go/slack github.com/slack-go/slack
github.com/slack-go/slack/internal/backoff github.com/slack-go/slack/internal/backoff
github.com/slack-go/slack/internal/errorsx github.com/slack-go/slack/internal/errorsx
github.com/slack-go/slack/internal/misc
github.com/slack-go/slack/internal/timex github.com/slack-go/slack/internal/timex
github.com/slack-go/slack/slackutilsx github.com/slack-go/slack/slackutilsx
# github.com/spf13/afero v1.8.2 # github.com/spf13/afero v1.8.2
@ -435,7 +435,7 @@ github.com/spf13/viper/internal/encoding/javaproperties
github.com/spf13/viper/internal/encoding/json github.com/spf13/viper/internal/encoding/json
github.com/spf13/viper/internal/encoding/toml github.com/spf13/viper/internal/encoding/toml
github.com/spf13/viper/internal/encoding/yaml github.com/spf13/viper/internal/encoding/yaml
# github.com/stretchr/testify v1.7.1 # github.com/stretchr/testify v1.7.2
## explicit; go 1.13 ## explicit; go 1.13
github.com/stretchr/testify/assert github.com/stretchr/testify/assert
github.com/stretchr/testify/require github.com/stretchr/testify/require
@ -514,7 +514,7 @@ go.mau.fi/libsignal/util/errorhelper
go.mau.fi/libsignal/util/keyhelper go.mau.fi/libsignal/util/keyhelper
go.mau.fi/libsignal/util/medium go.mau.fi/libsignal/util/medium
go.mau.fi/libsignal/util/optional go.mau.fi/libsignal/util/optional
# go.mau.fi/whatsmeow v0.0.0-20220601182603-a8d86cf1812c # go.mau.fi/whatsmeow v0.0.0-20220624184947-57a69a641154
## explicit; go 1.17 ## explicit; go 1.17
go.mau.fi/whatsmeow go.mau.fi/whatsmeow
go.mau.fi/whatsmeow/appstate go.mau.fi/whatsmeow/appstate
@ -567,7 +567,7 @@ golang.org/x/crypto/scrypt
golang.org/x/crypto/ssh golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/terminal golang.org/x/crypto/ssh/terminal
# golang.org/x/image v0.0.0-20220601225756-64ec528b34cd # golang.org/x/image v0.0.0-20220617043117-41969df76e82
## explicit; go 1.12 ## explicit; go 1.12
golang.org/x/image/riff golang.org/x/image/riff
golang.org/x/image/vp8 golang.org/x/image/vp8
@ -589,8 +589,8 @@ golang.org/x/net/http2/hpack
golang.org/x/net/idna golang.org/x/net/idna
golang.org/x/net/publicsuffix golang.org/x/net/publicsuffix
golang.org/x/net/websocket golang.org/x/net/websocket
# golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 # golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2
## explicit; go 1.11 ## explicit; go 1.15
golang.org/x/oauth2 golang.org/x/oauth2
golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/clientcredentials
golang.org/x/oauth2/internal golang.org/x/oauth2/internal
@ -705,7 +705,7 @@ gopkg.in/olahol/melody.v1
# gopkg.in/yaml.v2 v2.4.0 # gopkg.in/yaml.v2 v2.4.0
## explicit; go 1.15 ## explicit; go 1.15
gopkg.in/yaml.v2 gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0 # gopkg.in/yaml.v3 v3.0.1
## explicit ## explicit
gopkg.in/yaml.v3 gopkg.in/yaml.v3
# layeh.com/gumble v0.0.0-20200818122324-146f9205029b # layeh.com/gumble v0.0.0-20200818122324-146f9205029b