mirror of
https://github.com/cwinfo/matterbridge.git
synced 2025-07-03 22:27:44 +00:00
Use mod vendor for vendored directory (backwards compatible)
This commit is contained in:
85
vendor/github.com/valyala/fasttemplate/README.md
generated
vendored
Normal file
85
vendor/github.com/valyala/fasttemplate/README.md
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
fasttemplate
|
||||
============
|
||||
|
||||
Simple and fast template engine for Go.
|
||||
|
||||
Fasttemplate peforms only a single task - it substitutes template placeholders
|
||||
with user-defined values. At high speed :)
|
||||
|
||||
Take a look at [quicktemplate](https://github.com/valyala/quicktemplate) if you need fast yet powerful html template engine.
|
||||
|
||||
*Please note that fasttemplate doesn't do any escaping on template values
|
||||
unlike [html/template](http://golang.org/pkg/html/template/) do. So values
|
||||
must be properly escaped before passing them to fasttemplate.*
|
||||
|
||||
Fasttemplate is faster than [text/template](http://golang.org/pkg/text/template/),
|
||||
[strings.Replace](http://golang.org/pkg/strings/#Replace),
|
||||
[strings.Replacer](http://golang.org/pkg/strings/#Replacer)
|
||||
and [fmt.Fprintf](https://golang.org/pkg/fmt/#Fprintf) on placeholders' substitution.
|
||||
|
||||
Below are benchmark results comparing fasttemplate performance to text/template,
|
||||
strings.Replace, strings.Replacer and fmt.Fprintf:
|
||||
|
||||
```
|
||||
$ go test -bench=. -benchmem
|
||||
PASS
|
||||
BenchmarkFmtFprintf-4 2000000 790 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkStringsReplace-4 500000 3474 ns/op 2112 B/op 14 allocs/op
|
||||
BenchmarkStringsReplacer-4 500000 2657 ns/op 2256 B/op 23 allocs/op
|
||||
BenchmarkTextTemplate-4 500000 3333 ns/op 336 B/op 19 allocs/op
|
||||
BenchmarkFastTemplateExecuteFunc-4 5000000 349 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkFastTemplateExecute-4 3000000 383 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkFastTemplateExecuteFuncString-4 3000000 549 ns/op 144 B/op 1 allocs/op
|
||||
BenchmarkFastTemplateExecuteString-4 3000000 572 ns/op 144 B/op 1 allocs/op
|
||||
BenchmarkFastTemplateExecuteTagFunc-4 2000000 743 ns/op 144 B/op 3 allocs/op
|
||||
```
|
||||
|
||||
|
||||
Docs
|
||||
====
|
||||
|
||||
See http://godoc.org/github.com/valyala/fasttemplate .
|
||||
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
```go
|
||||
template := "http://{{host}}/?q={{query}}&foo={{bar}}{{bar}}"
|
||||
t := fasttemplate.New(template, "{{", "}}")
|
||||
s := t.ExecuteString(map[string]interface{}{
|
||||
"host": "google.com",
|
||||
"query": url.QueryEscape("hello=world"),
|
||||
"bar": "foobar",
|
||||
})
|
||||
fmt.Printf("%s", s)
|
||||
|
||||
// Output:
|
||||
// http://google.com/?q=hello%3Dworld&foo=foobarfoobar
|
||||
```
|
||||
|
||||
|
||||
Advanced usage
|
||||
==============
|
||||
|
||||
```go
|
||||
template := "Hello, [user]! You won [prize]!!! [foobar]"
|
||||
t, err := fasttemplate.NewTemplate(template, "[", "]")
|
||||
if err != nil {
|
||||
log.Fatalf("unexpected error when parsing template: %s", err)
|
||||
}
|
||||
s := t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) {
|
||||
switch tag {
|
||||
case "user":
|
||||
return w.Write([]byte("John"))
|
||||
case "prize":
|
||||
return w.Write([]byte("$100500"))
|
||||
default:
|
||||
return w.Write([]byte(fmt.Sprintf("[unknown tag %q]", tag)))
|
||||
}
|
||||
})
|
||||
fmt.Printf("%s", s)
|
||||
|
||||
// Output:
|
||||
// Hello, John! You won $100500!!! [unknown tag "foobar"]
|
||||
```
|
@ -1,111 +0,0 @@
|
||||
package bytebufferpool
|
||||
|
||||
import "io"
|
||||
|
||||
// ByteBuffer provides byte buffer, which can be used for minimizing
|
||||
// memory allocations.
|
||||
//
|
||||
// ByteBuffer may be used with functions appending data to the given []byte
|
||||
// slice. See example code for details.
|
||||
//
|
||||
// Use Get for obtaining an empty byte buffer.
|
||||
type ByteBuffer struct {
|
||||
|
||||
// B is a byte buffer to use in append-like workloads.
|
||||
// See example code for details.
|
||||
B []byte
|
||||
}
|
||||
|
||||
// Len returns the size of the byte buffer.
|
||||
func (b *ByteBuffer) Len() int {
|
||||
return len(b.B)
|
||||
}
|
||||
|
||||
// ReadFrom implements io.ReaderFrom.
|
||||
//
|
||||
// The function appends all the data read from r to b.
|
||||
func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) {
|
||||
p := b.B
|
||||
nStart := int64(len(p))
|
||||
nMax := int64(cap(p))
|
||||
n := nStart
|
||||
if nMax == 0 {
|
||||
nMax = 64
|
||||
p = make([]byte, nMax)
|
||||
} else {
|
||||
p = p[:nMax]
|
||||
}
|
||||
for {
|
||||
if n == nMax {
|
||||
nMax *= 2
|
||||
bNew := make([]byte, nMax)
|
||||
copy(bNew, p)
|
||||
p = bNew
|
||||
}
|
||||
nn, err := r.Read(p[n:])
|
||||
n += int64(nn)
|
||||
if err != nil {
|
||||
b.B = p[:n]
|
||||
n -= nStart
|
||||
if err == io.EOF {
|
||||
return n, nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WriteTo implements io.WriterTo.
|
||||
func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b.B)
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
// Bytes returns b.B, i.e. all the bytes accumulated in the buffer.
|
||||
//
|
||||
// The purpose of this function is bytes.Buffer compatibility.
|
||||
func (b *ByteBuffer) Bytes() []byte {
|
||||
return b.B
|
||||
}
|
||||
|
||||
// Write implements io.Writer - it appends p to ByteBuffer.B
|
||||
func (b *ByteBuffer) Write(p []byte) (int, error) {
|
||||
b.B = append(b.B, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// WriteByte appends the byte c to the buffer.
|
||||
//
|
||||
// The purpose of this function is bytes.Buffer compatibility.
|
||||
//
|
||||
// The function always returns nil.
|
||||
func (b *ByteBuffer) WriteByte(c byte) error {
|
||||
b.B = append(b.B, c)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteString appends s to ByteBuffer.B.
|
||||
func (b *ByteBuffer) WriteString(s string) (int, error) {
|
||||
b.B = append(b.B, s...)
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// Set sets ByteBuffer.B to p.
|
||||
func (b *ByteBuffer) Set(p []byte) {
|
||||
b.B = append(b.B[:0], p...)
|
||||
}
|
||||
|
||||
// SetString sets ByteBuffer.B to s.
|
||||
func (b *ByteBuffer) SetString(s string) {
|
||||
b.B = append(b.B[:0], s...)
|
||||
}
|
||||
|
||||
// String returns string representation of ByteBuffer.B.
|
||||
func (b *ByteBuffer) String() string {
|
||||
return string(b.B)
|
||||
}
|
||||
|
||||
// Reset makes ByteBuffer.B empty.
|
||||
func (b *ByteBuffer) Reset() {
|
||||
b.B = b.B[:0]
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
// Package bytebufferpool implements a pool of byte buffers
|
||||
// with anti-fragmentation protection.
|
||||
//
|
||||
// The pool may waste limited amount of memory due to fragmentation.
|
||||
// This amount equals to the maximum total size of the byte buffers
|
||||
// in concurrent use.
|
||||
package bytebufferpool
|
151
vendor/github.com/valyala/fasttemplate/vendor/github.com/valyala/bytebufferpool/pool.go
generated
vendored
151
vendor/github.com/valyala/fasttemplate/vendor/github.com/valyala/bytebufferpool/pool.go
generated
vendored
@ -1,151 +0,0 @@
|
||||
package bytebufferpool
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
minBitSize = 6 // 2**6=64 is a CPU cache line size
|
||||
steps = 20
|
||||
|
||||
minSize = 1 << minBitSize
|
||||
maxSize = 1 << (minBitSize + steps - 1)
|
||||
|
||||
calibrateCallsThreshold = 42000
|
||||
maxPercentile = 0.95
|
||||
)
|
||||
|
||||
// Pool represents byte buffer pool.
|
||||
//
|
||||
// Distinct pools may be used for distinct types of byte buffers.
|
||||
// Properly determined byte buffer types with their own pools may help reducing
|
||||
// memory waste.
|
||||
type Pool struct {
|
||||
calls [steps]uint64
|
||||
calibrating uint64
|
||||
|
||||
defaultSize uint64
|
||||
maxSize uint64
|
||||
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
var defaultPool Pool
|
||||
|
||||
// Get returns an empty byte buffer from the pool.
|
||||
//
|
||||
// Got byte buffer may be returned to the pool via Put call.
|
||||
// This reduces the number of memory allocations required for byte buffer
|
||||
// management.
|
||||
func Get() *ByteBuffer { return defaultPool.Get() }
|
||||
|
||||
// Get returns new byte buffer with zero length.
|
||||
//
|
||||
// The byte buffer may be returned to the pool via Put after the use
|
||||
// in order to minimize GC overhead.
|
||||
func (p *Pool) Get() *ByteBuffer {
|
||||
v := p.pool.Get()
|
||||
if v != nil {
|
||||
return v.(*ByteBuffer)
|
||||
}
|
||||
return &ByteBuffer{
|
||||
B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)),
|
||||
}
|
||||
}
|
||||
|
||||
// Put returns byte buffer to the pool.
|
||||
//
|
||||
// ByteBuffer.B mustn't be touched after returning it to the pool.
|
||||
// Otherwise data races will occur.
|
||||
func Put(b *ByteBuffer) { defaultPool.Put(b) }
|
||||
|
||||
// Put releases byte buffer obtained via Get to the pool.
|
||||
//
|
||||
// The buffer mustn't be accessed after returning to the pool.
|
||||
func (p *Pool) Put(b *ByteBuffer) {
|
||||
idx := index(len(b.B))
|
||||
|
||||
if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold {
|
||||
p.calibrate()
|
||||
}
|
||||
|
||||
maxSize := int(atomic.LoadUint64(&p.maxSize))
|
||||
if maxSize == 0 || cap(b.B) <= maxSize {
|
||||
b.Reset()
|
||||
p.pool.Put(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pool) calibrate() {
|
||||
if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) {
|
||||
return
|
||||
}
|
||||
|
||||
a := make(callSizes, 0, steps)
|
||||
var callsSum uint64
|
||||
for i := uint64(0); i < steps; i++ {
|
||||
calls := atomic.SwapUint64(&p.calls[i], 0)
|
||||
callsSum += calls
|
||||
a = append(a, callSize{
|
||||
calls: calls,
|
||||
size: minSize << i,
|
||||
})
|
||||
}
|
||||
sort.Sort(a)
|
||||
|
||||
defaultSize := a[0].size
|
||||
maxSize := defaultSize
|
||||
|
||||
maxSum := uint64(float64(callsSum) * maxPercentile)
|
||||
callsSum = 0
|
||||
for i := 0; i < steps; i++ {
|
||||
if callsSum > maxSum {
|
||||
break
|
||||
}
|
||||
callsSum += a[i].calls
|
||||
size := a[i].size
|
||||
if size > maxSize {
|
||||
maxSize = size
|
||||
}
|
||||
}
|
||||
|
||||
atomic.StoreUint64(&p.defaultSize, defaultSize)
|
||||
atomic.StoreUint64(&p.maxSize, maxSize)
|
||||
|
||||
atomic.StoreUint64(&p.calibrating, 0)
|
||||
}
|
||||
|
||||
type callSize struct {
|
||||
calls uint64
|
||||
size uint64
|
||||
}
|
||||
|
||||
type callSizes []callSize
|
||||
|
||||
func (ci callSizes) Len() int {
|
||||
return len(ci)
|
||||
}
|
||||
|
||||
func (ci callSizes) Less(i, j int) bool {
|
||||
return ci[i].calls > ci[j].calls
|
||||
}
|
||||
|
||||
func (ci callSizes) Swap(i, j int) {
|
||||
ci[i], ci[j] = ci[j], ci[i]
|
||||
}
|
||||
|
||||
func index(n int) int {
|
||||
n--
|
||||
n >>= minBitSize
|
||||
idx := 0
|
||||
for n > 0 {
|
||||
n >>= 1
|
||||
idx++
|
||||
}
|
||||
if idx >= steps {
|
||||
idx = steps - 1
|
||||
}
|
||||
return idx
|
||||
}
|
Reference in New Issue
Block a user