mirror of
https://github.com/cwinfo/matterbridge.git
synced 2025-06-27 16:59:23 +00:00
Vendor libs
This commit is contained in:
27
vendor/golang.org/x/net/http2/hpack/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/net/http2/hpack/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
251
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
Normal file
251
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
Normal file
@ -0,0 +1,251 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
uint32Max = ^uint32(0)
|
||||
initialHeaderTableSize = 4096
|
||||
)
|
||||
|
||||
type Encoder struct {
|
||||
dynTab dynamicTable
|
||||
// minSize is the minimum table size set by
|
||||
// SetMaxDynamicTableSize after the previous Header Table Size
|
||||
// Update.
|
||||
minSize uint32
|
||||
// maxSizeLimit is the maximum table size this encoder
|
||||
// supports. This will protect the encoder from too large
|
||||
// size.
|
||||
maxSizeLimit uint32
|
||||
// tableSizeUpdate indicates whether "Header Table Size
|
||||
// Update" is required.
|
||||
tableSizeUpdate bool
|
||||
w io.Writer
|
||||
buf []byte
|
||||
}
|
||||
|
||||
// NewEncoder returns a new Encoder which performs HPACK encoding. An
|
||||
// encoded data is written to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
e := &Encoder{
|
||||
minSize: uint32Max,
|
||||
maxSizeLimit: initialHeaderTableSize,
|
||||
tableSizeUpdate: false,
|
||||
w: w,
|
||||
}
|
||||
e.dynTab.setMaxSize(initialHeaderTableSize)
|
||||
return e
|
||||
}
|
||||
|
||||
// WriteField encodes f into a single Write to e's underlying Writer.
|
||||
// This function may also produce bytes for "Header Table Size Update"
|
||||
// if necessary. If produced, it is done before encoding f.
|
||||
func (e *Encoder) WriteField(f HeaderField) error {
|
||||
e.buf = e.buf[:0]
|
||||
|
||||
if e.tableSizeUpdate {
|
||||
e.tableSizeUpdate = false
|
||||
if e.minSize < e.dynTab.maxSize {
|
||||
e.buf = appendTableSize(e.buf, e.minSize)
|
||||
}
|
||||
e.minSize = uint32Max
|
||||
e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
|
||||
}
|
||||
|
||||
idx, nameValueMatch := e.searchTable(f)
|
||||
if nameValueMatch {
|
||||
e.buf = appendIndexed(e.buf, idx)
|
||||
} else {
|
||||
indexing := e.shouldIndex(f)
|
||||
if indexing {
|
||||
e.dynTab.add(f)
|
||||
}
|
||||
|
||||
if idx == 0 {
|
||||
e.buf = appendNewName(e.buf, f, indexing)
|
||||
} else {
|
||||
e.buf = appendIndexedName(e.buf, f, idx, indexing)
|
||||
}
|
||||
}
|
||||
n, err := e.w.Write(e.buf)
|
||||
if err == nil && n != len(e.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// searchTable searches f in both stable and dynamic header tables.
|
||||
// The static header table is searched first. Only when there is no
|
||||
// exact match for both name and value, the dynamic header table is
|
||||
// then searched. If there is no match, i is 0. If both name and value
|
||||
// match, i is the matched index and nameValueMatch becomes true. If
|
||||
// only name matches, i points to that index and nameValueMatch
|
||||
// becomes false.
|
||||
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
for idx, hf := range staticTable {
|
||||
if !constantTimeStringCompare(hf.Name, f.Name) {
|
||||
continue
|
||||
}
|
||||
if i == 0 {
|
||||
i = uint64(idx + 1)
|
||||
}
|
||||
if f.Sensitive {
|
||||
continue
|
||||
}
|
||||
if !constantTimeStringCompare(hf.Value, f.Value) {
|
||||
continue
|
||||
}
|
||||
i = uint64(idx + 1)
|
||||
nameValueMatch = true
|
||||
return
|
||||
}
|
||||
|
||||
j, nameValueMatch := e.dynTab.search(f)
|
||||
if nameValueMatch || (i == 0 && j != 0) {
|
||||
i = j + uint64(len(staticTable))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
||||
// The actual size is bounded by the value passed to
|
||||
// SetMaxDynamicTableSizeLimit.
|
||||
func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
|
||||
if v > e.maxSizeLimit {
|
||||
v = e.maxSizeLimit
|
||||
}
|
||||
if v < e.minSize {
|
||||
e.minSize = v
|
||||
}
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
|
||||
// specified in SetMaxDynamicTableSize to v. By default, it is set to
|
||||
// 4096, which is the same size of the default dynamic header table
|
||||
// size described in HPACK specification. If the current maximum
|
||||
// dynamic header table size is strictly greater than v, "Header Table
|
||||
// Size Update" will be done in the next WriteField call and the
|
||||
// maximum dynamic header table size is truncated to v.
|
||||
func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
|
||||
e.maxSizeLimit = v
|
||||
if e.dynTab.maxSize > v {
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldIndex reports whether f should be indexed.
|
||||
func (e *Encoder) shouldIndex(f HeaderField) bool {
|
||||
return !f.Sensitive && f.Size() <= e.dynTab.maxSize
|
||||
}
|
||||
|
||||
// appendIndexed appends index i, as encoded in "Indexed Header Field"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendIndexed(dst []byte, i uint64) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, i)
|
||||
dst[first] |= 0x80
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendNewName appends f, as encoded in one of "Literal Header field
|
||||
// - New Name" representation variants, to dst and returns the
|
||||
// extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Inremental Indexing"
|
||||
// representation is used.
|
||||
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
|
||||
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
|
||||
dst = appendHpackString(dst, f.Name)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendIndexedName appends f and index i referring indexed name
|
||||
// entry, as encoded in one of "Literal Header field - Indexed Name"
|
||||
// representation variants, to dst and returns the extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
||||
// representation is used.
|
||||
func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
|
||||
first := len(dst)
|
||||
var n byte
|
||||
if indexing {
|
||||
n = 6
|
||||
} else {
|
||||
n = 4
|
||||
}
|
||||
dst = appendVarInt(dst, n, i)
|
||||
dst[first] |= encodeTypeByte(indexing, f.Sensitive)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendTableSize appends v, as encoded in "Header Table Size Update"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendTableSize(dst []byte, v uint32) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 5, uint64(v))
|
||||
dst[first] |= 0x20
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendVarInt appends i, as encoded in variable integer form using n
|
||||
// bit prefix, to dst and returns the extended buffer.
|
||||
//
|
||||
// See
|
||||
// http://http2.github.io/http2-spec/compression.html#integer.representation
|
||||
func appendVarInt(dst []byte, n byte, i uint64) []byte {
|
||||
k := uint64((1 << n) - 1)
|
||||
if i < k {
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
dst = append(dst, byte(k))
|
||||
i -= k
|
||||
for ; i >= 128; i >>= 7 {
|
||||
dst = append(dst, byte(0x80|(i&0x7f)))
|
||||
}
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
|
||||
// appendHpackString appends s, as encoded in "String Literal"
|
||||
// representation, to dst and returns the the extended buffer.
|
||||
//
|
||||
// s will be encoded in Huffman codes only when it produces strictly
|
||||
// shorter byte string.
|
||||
func appendHpackString(dst []byte, s string) []byte {
|
||||
huffmanLength := HuffmanEncodeLength(s)
|
||||
if huffmanLength < uint64(len(s)) {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, huffmanLength)
|
||||
dst = AppendHuffmanString(dst, s)
|
||||
dst[first] |= 0x80
|
||||
} else {
|
||||
dst = appendVarInt(dst, 7, uint64(len(s)))
|
||||
dst = append(dst, s...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// encodeTypeByte returns type byte. If sensitive is true, type byte
|
||||
// for "Never Indexed" representation is returned. If sensitive is
|
||||
// false and indexing is true, type byte for "Incremental Indexing"
|
||||
// representation is returned. Otherwise, type byte for "Without
|
||||
// Indexing" is returned.
|
||||
func encodeTypeByte(indexing, sensitive bool) byte {
|
||||
if sensitive {
|
||||
return 0x10
|
||||
}
|
||||
if indexing {
|
||||
return 0x40
|
||||
}
|
||||
return 0
|
||||
}
|
542
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
Normal file
542
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
Normal file
@ -0,0 +1,542 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package hpack implements HPACK, a compression format for
|
||||
// efficiently representing HTTP header fields in the context of HTTP/2.
|
||||
//
|
||||
// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A DecodingError is something the spec defines as a decoding error.
|
||||
type DecodingError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (de DecodingError) Error() string {
|
||||
return fmt.Sprintf("decoding error: %v", de.Err)
|
||||
}
|
||||
|
||||
// An InvalidIndexError is returned when an encoder references a table
|
||||
// entry before the static table or after the end of the dynamic table.
|
||||
type InvalidIndexError int
|
||||
|
||||
func (e InvalidIndexError) Error() string {
|
||||
return fmt.Sprintf("invalid indexed representation index %d", int(e))
|
||||
}
|
||||
|
||||
// A HeaderField is a name-value pair. Both the name and value are
|
||||
// treated as opaque sequences of octets.
|
||||
type HeaderField struct {
|
||||
Name, Value string
|
||||
|
||||
// Sensitive means that this header field should never be
|
||||
// indexed.
|
||||
Sensitive bool
|
||||
}
|
||||
|
||||
// IsPseudo reports whether the header field is an http2 pseudo header.
|
||||
// That is, it reports whether it starts with a colon.
|
||||
// It is not otherwise guaranteed to be a valid psuedo header field,
|
||||
// though.
|
||||
func (hf HeaderField) IsPseudo() bool {
|
||||
return len(hf.Name) != 0 && hf.Name[0] == ':'
|
||||
}
|
||||
|
||||
func (hf HeaderField) String() string {
|
||||
var suffix string
|
||||
if hf.Sensitive {
|
||||
suffix = " (sensitive)"
|
||||
}
|
||||
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
|
||||
}
|
||||
|
||||
// Size returns the size of an entry per RFC 7540 section 5.2.
|
||||
func (hf HeaderField) Size() uint32 {
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
|
||||
// "The size of the dynamic table is the sum of the size of
|
||||
// its entries. The size of an entry is the sum of its name's
|
||||
// length in octets (as defined in Section 5.2), its value's
|
||||
// length in octets (see Section 5.2), plus 32. The size of
|
||||
// an entry is calculated using the length of the name and
|
||||
// value without any Huffman encoding applied."
|
||||
|
||||
// This can overflow if somebody makes a large HeaderField
|
||||
// Name and/or Value by hand, but we don't care, because that
|
||||
// won't happen on the wire because the encoding doesn't allow
|
||||
// it.
|
||||
return uint32(len(hf.Name) + len(hf.Value) + 32)
|
||||
}
|
||||
|
||||
// A Decoder is the decoding context for incremental processing of
|
||||
// header blocks.
|
||||
type Decoder struct {
|
||||
dynTab dynamicTable
|
||||
emit func(f HeaderField)
|
||||
|
||||
emitEnabled bool // whether calls to emit are enabled
|
||||
maxStrLen int // 0 means unlimited
|
||||
|
||||
// buf is the unparsed buffer. It's only written to
|
||||
// saveBuf if it was truncated in the middle of a header
|
||||
// block. Because it's usually not owned, we can only
|
||||
// process it under Write.
|
||||
buf []byte // not owned; only valid during Write
|
||||
|
||||
// saveBuf is previous data passed to Write which we weren't able
|
||||
// to fully parse before. Unlike buf, we own this data.
|
||||
saveBuf bytes.Buffer
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder with the provided maximum dynamic
|
||||
// table size. The emitFunc will be called for each valid field
|
||||
// parsed, in the same goroutine as calls to Write, before Write returns.
|
||||
func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
|
||||
d := &Decoder{
|
||||
emit: emitFunc,
|
||||
emitEnabled: true,
|
||||
}
|
||||
d.dynTab.allowedMaxSize = maxDynamicTableSize
|
||||
d.dynTab.setMaxSize(maxDynamicTableSize)
|
||||
return d
|
||||
}
|
||||
|
||||
// ErrStringLength is returned by Decoder.Write when the max string length
|
||||
// (as configured by Decoder.SetMaxStringLength) would be violated.
|
||||
var ErrStringLength = errors.New("hpack: string too long")
|
||||
|
||||
// SetMaxStringLength sets the maximum size of a HeaderField name or
|
||||
// value string. If a string exceeds this length (even after any
|
||||
// decompression), Write will return ErrStringLength.
|
||||
// A value of 0 means unlimited and is the default from NewDecoder.
|
||||
func (d *Decoder) SetMaxStringLength(n int) {
|
||||
d.maxStrLen = n
|
||||
}
|
||||
|
||||
// SetEmitFunc changes the callback used when new header fields
|
||||
// are decoded.
|
||||
// It must be non-nil. It does not affect EmitEnabled.
|
||||
func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
|
||||
d.emit = emitFunc
|
||||
}
|
||||
|
||||
// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
|
||||
// should be called. The default is true.
|
||||
//
|
||||
// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
|
||||
// while still decoding and keeping in-sync with decoder state, but
|
||||
// without doing unnecessary decompression or generating unnecessary
|
||||
// garbage for header fields past the limit.
|
||||
func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
|
||||
|
||||
// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
|
||||
// are currently enabled. The default is true.
|
||||
func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
|
||||
|
||||
// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
|
||||
// underlying buffers for garbage reasons.
|
||||
|
||||
func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
|
||||
d.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
|
||||
// stream (via dynamic table size updates) may set the maximum size
|
||||
// to.
|
||||
func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
|
||||
d.dynTab.allowedMaxSize = v
|
||||
}
|
||||
|
||||
type dynamicTable struct {
|
||||
// ents is the FIFO described at
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
|
||||
// The newest (low index) is append at the end, and items are
|
||||
// evicted from the front.
|
||||
ents []HeaderField
|
||||
size uint32
|
||||
maxSize uint32 // current maxSize
|
||||
allowedMaxSize uint32 // maxSize may go up to this, inclusive
|
||||
}
|
||||
|
||||
func (dt *dynamicTable) setMaxSize(v uint32) {
|
||||
dt.maxSize = v
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
// TODO: change dynamicTable to be a struct with a slice and a size int field,
|
||||
// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
|
||||
//
|
||||
//
|
||||
// Then make add increment the size. maybe the max size should move from Decoder to
|
||||
// dynamicTable and add should return an ok bool if there was enough space.
|
||||
//
|
||||
// Later we'll need a remove operation on dynamicTable.
|
||||
|
||||
func (dt *dynamicTable) add(f HeaderField) {
|
||||
dt.ents = append(dt.ents, f)
|
||||
dt.size += f.Size()
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
// If we're too big, evict old stuff (front of the slice)
|
||||
func (dt *dynamicTable) evict() {
|
||||
base := dt.ents // keep base pointer of slice
|
||||
for dt.size > dt.maxSize {
|
||||
dt.size -= dt.ents[0].Size()
|
||||
dt.ents = dt.ents[1:]
|
||||
}
|
||||
|
||||
// Shift slice contents down if we evicted things.
|
||||
if len(dt.ents) != len(base) {
|
||||
copy(base, dt.ents)
|
||||
dt.ents = base[:len(dt.ents)]
|
||||
}
|
||||
}
|
||||
|
||||
// constantTimeStringCompare compares string a and b in a constant
|
||||
// time manner.
|
||||
func constantTimeStringCompare(a, b string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
c := byte(0)
|
||||
|
||||
for i := 0; i < len(a); i++ {
|
||||
c |= a[i] ^ b[i]
|
||||
}
|
||||
|
||||
return c == 0
|
||||
}
|
||||
|
||||
// Search searches f in the table. The return value i is 0 if there is
|
||||
// no name match. If there is name match or name/value match, i is the
|
||||
// index of that entry (1-based). If both name and value match,
|
||||
// nameValueMatch becomes true.
|
||||
func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
l := len(dt.ents)
|
||||
for j := l - 1; j >= 0; j-- {
|
||||
ent := dt.ents[j]
|
||||
if !constantTimeStringCompare(ent.Name, f.Name) {
|
||||
continue
|
||||
}
|
||||
if i == 0 {
|
||||
i = uint64(l - j)
|
||||
}
|
||||
if f.Sensitive {
|
||||
continue
|
||||
}
|
||||
if !constantTimeStringCompare(ent.Value, f.Value) {
|
||||
continue
|
||||
}
|
||||
i = uint64(l - j)
|
||||
nameValueMatch = true
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *Decoder) maxTableIndex() int {
|
||||
return len(d.dynTab.ents) + len(staticTable)
|
||||
}
|
||||
|
||||
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
||||
if i < 1 {
|
||||
return
|
||||
}
|
||||
if i > uint64(d.maxTableIndex()) {
|
||||
return
|
||||
}
|
||||
if i <= uint64(len(staticTable)) {
|
||||
return staticTable[i-1], true
|
||||
}
|
||||
dents := d.dynTab.ents
|
||||
return dents[len(dents)-(int(i)-len(staticTable))], true
|
||||
}
|
||||
|
||||
// Decode decodes an entire block.
|
||||
//
|
||||
// TODO: remove this method and make it incremental later? This is
|
||||
// easier for debugging now.
|
||||
func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
|
||||
var hf []HeaderField
|
||||
saveFunc := d.emit
|
||||
defer func() { d.emit = saveFunc }()
|
||||
d.emit = func(f HeaderField) { hf = append(hf, f) }
|
||||
if _, err := d.Write(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := d.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hf, nil
|
||||
}
|
||||
|
||||
func (d *Decoder) Close() error {
|
||||
if d.saveBuf.Len() > 0 {
|
||||
d.saveBuf.Reset()
|
||||
return DecodingError{errors.New("truncated headers")}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) Write(p []byte) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
// Prevent state machine CPU attacks (making us redo
|
||||
// work up to the point of finding out we don't have
|
||||
// enough data)
|
||||
return
|
||||
}
|
||||
// Only copy the data if we have to. Optimistically assume
|
||||
// that p will contain a complete header block.
|
||||
if d.saveBuf.Len() == 0 {
|
||||
d.buf = p
|
||||
} else {
|
||||
d.saveBuf.Write(p)
|
||||
d.buf = d.saveBuf.Bytes()
|
||||
d.saveBuf.Reset()
|
||||
}
|
||||
|
||||
for len(d.buf) > 0 {
|
||||
err = d.parseHeaderFieldRepr()
|
||||
if err == errNeedMore {
|
||||
// Extra paranoia, making sure saveBuf won't
|
||||
// get too large. All the varint and string
|
||||
// reading code earlier should already catch
|
||||
// overlong things and return ErrStringLength,
|
||||
// but keep this as a last resort.
|
||||
const varIntOverhead = 8 // conservative
|
||||
if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
|
||||
return 0, ErrStringLength
|
||||
}
|
||||
d.saveBuf.Write(d.buf)
|
||||
return len(p), nil
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return len(p), err
|
||||
}
|
||||
|
||||
// errNeedMore is an internal sentinel error value that means the
|
||||
// buffer is truncated and we need to read more data before we can
|
||||
// continue parsing.
|
||||
var errNeedMore = errors.New("need more data")
|
||||
|
||||
type indexType int
|
||||
|
||||
const (
|
||||
indexedTrue indexType = iota
|
||||
indexedFalse
|
||||
indexedNever
|
||||
)
|
||||
|
||||
func (v indexType) indexed() bool { return v == indexedTrue }
|
||||
func (v indexType) sensitive() bool { return v == indexedNever }
|
||||
|
||||
// returns errNeedMore if there isn't enough data available.
|
||||
// any other error is fatal.
|
||||
// consumes d.buf iff it returns nil.
|
||||
// precondition: must be called with len(d.buf) > 0
|
||||
func (d *Decoder) parseHeaderFieldRepr() error {
|
||||
b := d.buf[0]
|
||||
switch {
|
||||
case b&128 != 0:
|
||||
// Indexed representation.
|
||||
// High bit set?
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
|
||||
return d.parseFieldIndexed()
|
||||
case b&192 == 64:
|
||||
// 6.2.1 Literal Header Field with Incremental Indexing
|
||||
// 0b10xxxxxx: top two bits are 10
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
|
||||
return d.parseFieldLiteral(6, indexedTrue)
|
||||
case b&240 == 0:
|
||||
// 6.2.2 Literal Header Field without Indexing
|
||||
// 0b0000xxxx: top four bits are 0000
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
|
||||
return d.parseFieldLiteral(4, indexedFalse)
|
||||
case b&240 == 16:
|
||||
// 6.2.3 Literal Header Field never Indexed
|
||||
// 0b0001xxxx: top four bits are 0001
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
|
||||
return d.parseFieldLiteral(4, indexedNever)
|
||||
case b&224 == 32:
|
||||
// 6.3 Dynamic Table Size Update
|
||||
// Top three bits are '001'.
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
|
||||
return d.parseDynamicTableSizeUpdate()
|
||||
}
|
||||
|
||||
return DecodingError{errors.New("invalid encoding")}
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseFieldIndexed() error {
|
||||
buf := d.buf
|
||||
idx, buf, err := readVarInt(7, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hf, ok := d.at(idx)
|
||||
if !ok {
|
||||
return DecodingError{InvalidIndexError(idx)}
|
||||
}
|
||||
d.buf = buf
|
||||
return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||
buf := d.buf
|
||||
nameIdx, buf, err := readVarInt(n, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var hf HeaderField
|
||||
wantStr := d.emitEnabled || it.indexed()
|
||||
if nameIdx > 0 {
|
||||
ihf, ok := d.at(nameIdx)
|
||||
if !ok {
|
||||
return DecodingError{InvalidIndexError(nameIdx)}
|
||||
}
|
||||
hf.Name = ihf.Name
|
||||
} else {
|
||||
hf.Name, buf, err = d.readString(buf, wantStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, buf, err = d.readString(buf, wantStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.buf = buf
|
||||
if it.indexed() {
|
||||
d.dynTab.add(hf)
|
||||
}
|
||||
hf.Sensitive = it.sensitive()
|
||||
return d.callEmit(hf)
|
||||
}
|
||||
|
||||
func (d *Decoder) callEmit(hf HeaderField) error {
|
||||
if d.maxStrLen != 0 {
|
||||
if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
}
|
||||
if d.emitEnabled {
|
||||
d.emit(hf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseDynamicTableSizeUpdate() error {
|
||||
buf := d.buf
|
||||
size, buf, err := readVarInt(5, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size > uint64(d.dynTab.allowedMaxSize) {
|
||||
return DecodingError{errors.New("dynamic table size update too large")}
|
||||
}
|
||||
d.dynTab.setMaxSize(uint32(size))
|
||||
d.buf = buf
|
||||
return nil
|
||||
}
|
||||
|
||||
var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
|
||||
|
||||
// readVarInt reads an unsigned variable length integer off the
|
||||
// beginning of p. n is the parameter as described in
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
|
||||
//
|
||||
// n must always be between 1 and 8.
|
||||
//
|
||||
// The returned remain buffer is either a smaller suffix of p, or err != nil.
|
||||
// The error is errNeedMore if p doesn't contain a complete integer.
|
||||
func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
||||
if n < 1 || n > 8 {
|
||||
panic("bad n")
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, p, errNeedMore
|
||||
}
|
||||
i = uint64(p[0])
|
||||
if n < 8 {
|
||||
i &= (1 << uint64(n)) - 1
|
||||
}
|
||||
if i < (1<<uint64(n))-1 {
|
||||
return i, p[1:], nil
|
||||
}
|
||||
|
||||
origP := p
|
||||
p = p[1:]
|
||||
var m uint64
|
||||
for len(p) > 0 {
|
||||
b := p[0]
|
||||
p = p[1:]
|
||||
i += uint64(b&127) << m
|
||||
if b&128 == 0 {
|
||||
return i, p, nil
|
||||
}
|
||||
m += 7
|
||||
if m >= 63 { // TODO: proper overflow check. making this up.
|
||||
return 0, origP, errVarintOverflow
|
||||
}
|
||||
}
|
||||
return 0, origP, errNeedMore
|
||||
}
|
||||
|
||||
// readString decodes an hpack string from p.
|
||||
//
|
||||
// wantStr is whether s will be used. If false, decompression and
|
||||
// []byte->string garbage are skipped if s will be ignored
|
||||
// anyway. This does mean that huffman decoding errors for non-indexed
|
||||
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
|
||||
// is returning an error anyway, and because they're not indexed, the error
|
||||
// won't affect the decoding state.
|
||||
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
|
||||
if len(p) == 0 {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
isHuff := p[0]&128 != 0
|
||||
strLen, p, err := readVarInt(7, p)
|
||||
if err != nil {
|
||||
return "", p, err
|
||||
}
|
||||
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
|
||||
return "", nil, ErrStringLength
|
||||
}
|
||||
if uint64(len(p)) < strLen {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
if !isHuff {
|
||||
if wantStr {
|
||||
s = string(p[:strLen])
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
}
|
||||
|
||||
if wantStr {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
|
||||
buf.Reset()
|
||||
return "", nil, err
|
||||
}
|
||||
s = buf.String()
|
||||
buf.Reset() // be nice to GC
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
}
|
190
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
Normal file
190
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} { return new(bytes.Buffer) },
|
||||
}
|
||||
|
||||
// HuffmanDecode decodes the string in v and writes the expanded
|
||||
// result to w, returning the number of bytes written to w and the
|
||||
// Write call's return value. At most one Write call is made.
|
||||
func HuffmanDecode(w io.Writer, v []byte) (int, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
// HuffmanDecodeToString decodes the string in v.
|
||||
func HuffmanDecodeToString(v []byte) (string, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// ErrInvalidHuffman is returned for errors found decoding
|
||||
// Huffman-encoded strings.
|
||||
var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
|
||||
|
||||
// huffmanDecode decodes v to buf.
|
||||
// If maxLen is greater than 0, attempts to write more to buf than
|
||||
// maxLen bytes will return ErrStringLength.
|
||||
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
||||
n := rootHuffmanNode
|
||||
cur, nbits := uint(0), uint8(0)
|
||||
for _, b := range v {
|
||||
cur = cur<<8 | uint(b)
|
||||
nbits += 8
|
||||
for nbits >= 8 {
|
||||
idx := byte(cur >> (nbits - 8))
|
||||
n = n.children[idx]
|
||||
if n == nil {
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if n.children == nil {
|
||||
if maxLen != 0 && buf.Len() == maxLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
nbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
} else {
|
||||
nbits -= 8
|
||||
}
|
||||
}
|
||||
}
|
||||
for nbits > 0 {
|
||||
n = n.children[byte(cur<<(8-nbits))]
|
||||
if n.children != nil || n.codeLen > nbits {
|
||||
break
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
nbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type node struct {
|
||||
// children is non-nil for internal nodes
|
||||
children []*node
|
||||
|
||||
// The following are only valid if children is nil:
|
||||
codeLen uint8 // number of bits that led to the output of sym
|
||||
sym byte // output symbol
|
||||
}
|
||||
|
||||
func newInternalNode() *node {
|
||||
return &node{children: make([]*node, 256)}
|
||||
}
|
||||
|
||||
var rootHuffmanNode = newInternalNode()
|
||||
|
||||
func init() {
|
||||
if len(huffmanCodes) != 256 {
|
||||
panic("unexpected size")
|
||||
}
|
||||
for i, code := range huffmanCodes {
|
||||
addDecoderNode(byte(i), code, huffmanCodeLen[i])
|
||||
}
|
||||
}
|
||||
|
||||
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
|
||||
cur := rootHuffmanNode
|
||||
for codeLen > 8 {
|
||||
codeLen -= 8
|
||||
i := uint8(code >> codeLen)
|
||||
if cur.children[i] == nil {
|
||||
cur.children[i] = newInternalNode()
|
||||
}
|
||||
cur = cur.children[i]
|
||||
}
|
||||
shift := 8 - codeLen
|
||||
start, end := int(uint8(code<<shift)), int(1<<shift)
|
||||
for i := start; i < start+end; i++ {
|
||||
cur.children[i] = &node{sym: sym, codeLen: codeLen}
|
||||
}
|
||||
}
|
||||
|
||||
// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
|
||||
// and returns the extended buffer.
|
||||
func AppendHuffmanString(dst []byte, s string) []byte {
|
||||
rembits := uint8(8)
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
if rembits == 8 {
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
|
||||
}
|
||||
|
||||
if rembits < 8 {
|
||||
// special EOS symbol
|
||||
code := uint32(0x3fffffff)
|
||||
nbits := uint8(30)
|
||||
|
||||
t := uint8(code >> (nbits - rembits))
|
||||
dst[len(dst)-1] |= t
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// HuffmanEncodeLength returns the number of bytes required to encode
|
||||
// s in Huffman codes. The result is round up to byte boundary.
|
||||
func HuffmanEncodeLength(s string) uint64 {
|
||||
n := uint64(0)
|
||||
for i := 0; i < len(s); i++ {
|
||||
n += uint64(huffmanCodeLen[s[i]])
|
||||
}
|
||||
return (n + 7) / 8
|
||||
}
|
||||
|
||||
// appendByteToHuffmanCode appends Huffman code for c to dst and
|
||||
// returns the extended buffer and the remaining bits in the last
|
||||
// element. The appending is not byte aligned and the remaining bits
|
||||
// in the last element of dst is given in rembits.
|
||||
func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
|
||||
code := huffmanCodes[c]
|
||||
nbits := huffmanCodeLen[c]
|
||||
|
||||
for {
|
||||
if rembits > nbits {
|
||||
t := uint8(code << (rembits - nbits))
|
||||
dst[len(dst)-1] |= t
|
||||
rembits -= nbits
|
||||
break
|
||||
}
|
||||
|
||||
t := uint8(code >> (nbits - rembits))
|
||||
dst[len(dst)-1] |= t
|
||||
|
||||
nbits -= rembits
|
||||
rembits = 8
|
||||
|
||||
if nbits == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
|
||||
return dst, rembits
|
||||
}
|
352
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
Normal file
352
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
Normal file
@ -0,0 +1,352 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
func pair(name, value string) HeaderField {
|
||||
return HeaderField{Name: name, Value: value}
|
||||
}
|
||||
|
||||
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
||||
var staticTable = [...]HeaderField{
|
||||
pair(":authority", ""), // index 1 (1-based)
|
||||
pair(":method", "GET"),
|
||||
pair(":method", "POST"),
|
||||
pair(":path", "/"),
|
||||
pair(":path", "/index.html"),
|
||||
pair(":scheme", "http"),
|
||||
pair(":scheme", "https"),
|
||||
pair(":status", "200"),
|
||||
pair(":status", "204"),
|
||||
pair(":status", "206"),
|
||||
pair(":status", "304"),
|
||||
pair(":status", "400"),
|
||||
pair(":status", "404"),
|
||||
pair(":status", "500"),
|
||||
pair("accept-charset", ""),
|
||||
pair("accept-encoding", "gzip, deflate"),
|
||||
pair("accept-language", ""),
|
||||
pair("accept-ranges", ""),
|
||||
pair("accept", ""),
|
||||
pair("access-control-allow-origin", ""),
|
||||
pair("age", ""),
|
||||
pair("allow", ""),
|
||||
pair("authorization", ""),
|
||||
pair("cache-control", ""),
|
||||
pair("content-disposition", ""),
|
||||
pair("content-encoding", ""),
|
||||
pair("content-language", ""),
|
||||
pair("content-length", ""),
|
||||
pair("content-location", ""),
|
||||
pair("content-range", ""),
|
||||
pair("content-type", ""),
|
||||
pair("cookie", ""),
|
||||
pair("date", ""),
|
||||
pair("etag", ""),
|
||||
pair("expect", ""),
|
||||
pair("expires", ""),
|
||||
pair("from", ""),
|
||||
pair("host", ""),
|
||||
pair("if-match", ""),
|
||||
pair("if-modified-since", ""),
|
||||
pair("if-none-match", ""),
|
||||
pair("if-range", ""),
|
||||
pair("if-unmodified-since", ""),
|
||||
pair("last-modified", ""),
|
||||
pair("link", ""),
|
||||
pair("location", ""),
|
||||
pair("max-forwards", ""),
|
||||
pair("proxy-authenticate", ""),
|
||||
pair("proxy-authorization", ""),
|
||||
pair("range", ""),
|
||||
pair("referer", ""),
|
||||
pair("refresh", ""),
|
||||
pair("retry-after", ""),
|
||||
pair("server", ""),
|
||||
pair("set-cookie", ""),
|
||||
pair("strict-transport-security", ""),
|
||||
pair("transfer-encoding", ""),
|
||||
pair("user-agent", ""),
|
||||
pair("vary", ""),
|
||||
pair("via", ""),
|
||||
pair("www-authenticate", ""),
|
||||
}
|
||||
|
||||
var huffmanCodes = [256]uint32{
|
||||
0x1ff8,
|
||||
0x7fffd8,
|
||||
0xfffffe2,
|
||||
0xfffffe3,
|
||||
0xfffffe4,
|
||||
0xfffffe5,
|
||||
0xfffffe6,
|
||||
0xfffffe7,
|
||||
0xfffffe8,
|
||||
0xffffea,
|
||||
0x3ffffffc,
|
||||
0xfffffe9,
|
||||
0xfffffea,
|
||||
0x3ffffffd,
|
||||
0xfffffeb,
|
||||
0xfffffec,
|
||||
0xfffffed,
|
||||
0xfffffee,
|
||||
0xfffffef,
|
||||
0xffffff0,
|
||||
0xffffff1,
|
||||
0xffffff2,
|
||||
0x3ffffffe,
|
||||
0xffffff3,
|
||||
0xffffff4,
|
||||
0xffffff5,
|
||||
0xffffff6,
|
||||
0xffffff7,
|
||||
0xffffff8,
|
||||
0xffffff9,
|
||||
0xffffffa,
|
||||
0xffffffb,
|
||||
0x14,
|
||||
0x3f8,
|
||||
0x3f9,
|
||||
0xffa,
|
||||
0x1ff9,
|
||||
0x15,
|
||||
0xf8,
|
||||
0x7fa,
|
||||
0x3fa,
|
||||
0x3fb,
|
||||
0xf9,
|
||||
0x7fb,
|
||||
0xfa,
|
||||
0x16,
|
||||
0x17,
|
||||
0x18,
|
||||
0x0,
|
||||
0x1,
|
||||
0x2,
|
||||
0x19,
|
||||
0x1a,
|
||||
0x1b,
|
||||
0x1c,
|
||||
0x1d,
|
||||
0x1e,
|
||||
0x1f,
|
||||
0x5c,
|
||||
0xfb,
|
||||
0x7ffc,
|
||||
0x20,
|
||||
0xffb,
|
||||
0x3fc,
|
||||
0x1ffa,
|
||||
0x21,
|
||||
0x5d,
|
||||
0x5e,
|
||||
0x5f,
|
||||
0x60,
|
||||
0x61,
|
||||
0x62,
|
||||
0x63,
|
||||
0x64,
|
||||
0x65,
|
||||
0x66,
|
||||
0x67,
|
||||
0x68,
|
||||
0x69,
|
||||
0x6a,
|
||||
0x6b,
|
||||
0x6c,
|
||||
0x6d,
|
||||
0x6e,
|
||||
0x6f,
|
||||
0x70,
|
||||
0x71,
|
||||
0x72,
|
||||
0xfc,
|
||||
0x73,
|
||||
0xfd,
|
||||
0x1ffb,
|
||||
0x7fff0,
|
||||
0x1ffc,
|
||||
0x3ffc,
|
||||
0x22,
|
||||
0x7ffd,
|
||||
0x3,
|
||||
0x23,
|
||||
0x4,
|
||||
0x24,
|
||||
0x5,
|
||||
0x25,
|
||||
0x26,
|
||||
0x27,
|
||||
0x6,
|
||||
0x74,
|
||||
0x75,
|
||||
0x28,
|
||||
0x29,
|
||||
0x2a,
|
||||
0x7,
|
||||
0x2b,
|
||||
0x76,
|
||||
0x2c,
|
||||
0x8,
|
||||
0x9,
|
||||
0x2d,
|
||||
0x77,
|
||||
0x78,
|
||||
0x79,
|
||||
0x7a,
|
||||
0x7b,
|
||||
0x7ffe,
|
||||
0x7fc,
|
||||
0x3ffd,
|
||||
0x1ffd,
|
||||
0xffffffc,
|
||||
0xfffe6,
|
||||
0x3fffd2,
|
||||
0xfffe7,
|
||||
0xfffe8,
|
||||
0x3fffd3,
|
||||
0x3fffd4,
|
||||
0x3fffd5,
|
||||
0x7fffd9,
|
||||
0x3fffd6,
|
||||
0x7fffda,
|
||||
0x7fffdb,
|
||||
0x7fffdc,
|
||||
0x7fffdd,
|
||||
0x7fffde,
|
||||
0xffffeb,
|
||||
0x7fffdf,
|
||||
0xffffec,
|
||||
0xffffed,
|
||||
0x3fffd7,
|
||||
0x7fffe0,
|
||||
0xffffee,
|
||||
0x7fffe1,
|
||||
0x7fffe2,
|
||||
0x7fffe3,
|
||||
0x7fffe4,
|
||||
0x1fffdc,
|
||||
0x3fffd8,
|
||||
0x7fffe5,
|
||||
0x3fffd9,
|
||||
0x7fffe6,
|
||||
0x7fffe7,
|
||||
0xffffef,
|
||||
0x3fffda,
|
||||
0x1fffdd,
|
||||
0xfffe9,
|
||||
0x3fffdb,
|
||||
0x3fffdc,
|
||||
0x7fffe8,
|
||||
0x7fffe9,
|
||||
0x1fffde,
|
||||
0x7fffea,
|
||||
0x3fffdd,
|
||||
0x3fffde,
|
||||
0xfffff0,
|
||||
0x1fffdf,
|
||||
0x3fffdf,
|
||||
0x7fffeb,
|
||||
0x7fffec,
|
||||
0x1fffe0,
|
||||
0x1fffe1,
|
||||
0x3fffe0,
|
||||
0x1fffe2,
|
||||
0x7fffed,
|
||||
0x3fffe1,
|
||||
0x7fffee,
|
||||
0x7fffef,
|
||||
0xfffea,
|
||||
0x3fffe2,
|
||||
0x3fffe3,
|
||||
0x3fffe4,
|
||||
0x7ffff0,
|
||||
0x3fffe5,
|
||||
0x3fffe6,
|
||||
0x7ffff1,
|
||||
0x3ffffe0,
|
||||
0x3ffffe1,
|
||||
0xfffeb,
|
||||
0x7fff1,
|
||||
0x3fffe7,
|
||||
0x7ffff2,
|
||||
0x3fffe8,
|
||||
0x1ffffec,
|
||||
0x3ffffe2,
|
||||
0x3ffffe3,
|
||||
0x3ffffe4,
|
||||
0x7ffffde,
|
||||
0x7ffffdf,
|
||||
0x3ffffe5,
|
||||
0xfffff1,
|
||||
0x1ffffed,
|
||||
0x7fff2,
|
||||
0x1fffe3,
|
||||
0x3ffffe6,
|
||||
0x7ffffe0,
|
||||
0x7ffffe1,
|
||||
0x3ffffe7,
|
||||
0x7ffffe2,
|
||||
0xfffff2,
|
||||
0x1fffe4,
|
||||
0x1fffe5,
|
||||
0x3ffffe8,
|
||||
0x3ffffe9,
|
||||
0xffffffd,
|
||||
0x7ffffe3,
|
||||
0x7ffffe4,
|
||||
0x7ffffe5,
|
||||
0xfffec,
|
||||
0xfffff3,
|
||||
0xfffed,
|
||||
0x1fffe6,
|
||||
0x3fffe9,
|
||||
0x1fffe7,
|
||||
0x1fffe8,
|
||||
0x7ffff3,
|
||||
0x3fffea,
|
||||
0x3fffeb,
|
||||
0x1ffffee,
|
||||
0x1ffffef,
|
||||
0xfffff4,
|
||||
0xfffff5,
|
||||
0x3ffffea,
|
||||
0x7ffff4,
|
||||
0x3ffffeb,
|
||||
0x7ffffe6,
|
||||
0x3ffffec,
|
||||
0x3ffffed,
|
||||
0x7ffffe7,
|
||||
0x7ffffe8,
|
||||
0x7ffffe9,
|
||||
0x7ffffea,
|
||||
0x7ffffeb,
|
||||
0xffffffe,
|
||||
0x7ffffec,
|
||||
0x7ffffed,
|
||||
0x7ffffee,
|
||||
0x7ffffef,
|
||||
0x7fffff0,
|
||||
0x3ffffee,
|
||||
}
|
||||
|
||||
var huffmanCodeLen = [256]uint8{
|
||||
13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
|
||||
28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
|
||||
6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
|
||||
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
|
||||
13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
|
||||
15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
|
||||
6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
|
||||
20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
|
||||
24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
|
||||
22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
|
||||
21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
|
||||
26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
|
||||
19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
|
||||
20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
|
||||
26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
|
||||
}
|
Reference in New Issue
Block a user