4
0
mirror of https://github.com/cwinfo/matterbridge.git synced 2025-07-06 18:54:05 +00:00

Update mattermost library (#2152)

* Update mattermost library

* Fix linting
This commit is contained in:
Wim
2024-05-24 23:08:09 +02:00
committed by GitHub
parent 65d78e38af
commit d16645c952
1003 changed files with 89451 additions and 114025 deletions

View File

@ -0,0 +1,525 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package timeseries implements a time series structure for stats collection.
package timeseries // import "golang.org/x/net/internal/timeseries"
import (
"fmt"
"log"
"time"
)
const (
timeSeriesNumBuckets = 64
minuteHourSeriesNumBuckets = 60
)
var timeSeriesResolutions = []time.Duration{
1 * time.Second,
10 * time.Second,
1 * time.Minute,
10 * time.Minute,
1 * time.Hour,
6 * time.Hour,
24 * time.Hour, // 1 day
7 * 24 * time.Hour, // 1 week
4 * 7 * 24 * time.Hour, // 4 weeks
16 * 7 * 24 * time.Hour, // 16 weeks
}
var minuteHourSeriesResolutions = []time.Duration{
1 * time.Second,
1 * time.Minute,
}
// An Observable is a kind of data that can be aggregated in a time series.
type Observable interface {
Multiply(ratio float64) // Multiplies the data in self by a given ratio
Add(other Observable) // Adds the data from a different observation to self
Clear() // Clears the observation so it can be reused.
CopyFrom(other Observable) // Copies the contents of a given observation to self
}
// Float attaches the methods of Observable to a float64.
type Float float64
// NewFloat returns a Float.
func NewFloat() Observable {
f := Float(0)
return &f
}
// String returns the float as a string.
func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
// Value returns the float's value.
func (f *Float) Value() float64 { return float64(*f) }
func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
func (f *Float) Add(other Observable) {
o := other.(*Float)
*f += *o
}
func (f *Float) Clear() { *f = 0 }
func (f *Float) CopyFrom(other Observable) {
o := other.(*Float)
*f = *o
}
// A Clock tells the current time.
type Clock interface {
Time() time.Time
}
type defaultClock int
var defaultClockInstance defaultClock
func (defaultClock) Time() time.Time { return time.Now() }
// Information kept per level. Each level consists of a circular list of
// observations. The start of the level may be derived from end and the
// len(buckets) * sizeInMillis.
type tsLevel struct {
oldest int // index to oldest bucketed Observable
newest int // index to newest bucketed Observable
end time.Time // end timestamp for this level
size time.Duration // duration of the bucketed Observable
buckets []Observable // collections of observations
provider func() Observable // used for creating new Observable
}
func (l *tsLevel) Clear() {
l.oldest = 0
l.newest = len(l.buckets) - 1
l.end = time.Time{}
for i := range l.buckets {
if l.buckets[i] != nil {
l.buckets[i].Clear()
l.buckets[i] = nil
}
}
}
func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
l.size = size
l.provider = f
l.buckets = make([]Observable, numBuckets)
}
// Keeps a sequence of levels. Each level is responsible for storing data at
// a given resolution. For example, the first level stores data at a one
// minute resolution while the second level stores data at a one hour
// resolution.
// Each level is represented by a sequence of buckets. Each bucket spans an
// interval equal to the resolution of the level. New observations are added
// to the last bucket.
type timeSeries struct {
provider func() Observable // make more Observable
numBuckets int // number of buckets in each level
levels []*tsLevel // levels of bucketed Observable
lastAdd time.Time // time of last Observable tracked
total Observable // convenient aggregation of all Observable
clock Clock // Clock for getting current time
pending Observable // observations not yet bucketed
pendingTime time.Time // what time are we keeping in pending
dirty bool // if there are pending observations
}
// init initializes a level according to the supplied criteria.
func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
ts.provider = f
ts.numBuckets = numBuckets
ts.clock = clock
ts.levels = make([]*tsLevel, len(resolutions))
for i := range resolutions {
if i > 0 && resolutions[i-1] >= resolutions[i] {
log.Print("timeseries: resolutions must be monotonically increasing")
break
}
newLevel := new(tsLevel)
newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
ts.levels[i] = newLevel
}
ts.Clear()
}
// Clear removes all observations from the time series.
func (ts *timeSeries) Clear() {
ts.lastAdd = time.Time{}
ts.total = ts.resetObservation(ts.total)
ts.pending = ts.resetObservation(ts.pending)
ts.pendingTime = time.Time{}
ts.dirty = false
for i := range ts.levels {
ts.levels[i].Clear()
}
}
// Add records an observation at the current time.
func (ts *timeSeries) Add(observation Observable) {
ts.AddWithTime(observation, ts.clock.Time())
}
// AddWithTime records an observation at the specified time.
func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
smallBucketDuration := ts.levels[0].size
if t.After(ts.lastAdd) {
ts.lastAdd = t
}
if t.After(ts.pendingTime) {
ts.advance(t)
ts.mergePendingUpdates()
ts.pendingTime = ts.levels[0].end
ts.pending.CopyFrom(observation)
ts.dirty = true
} else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
// The observation is close enough to go into the pending bucket.
// This compensates for clock skewing and small scheduling delays
// by letting the update stay in the fast path.
ts.pending.Add(observation)
ts.dirty = true
} else {
ts.mergeValue(observation, t)
}
}
// mergeValue inserts the observation at the specified time in the past into all levels.
func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
for _, level := range ts.levels {
index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
if 0 <= index && index < ts.numBuckets {
bucketNumber := (level.oldest + index) % ts.numBuckets
if level.buckets[bucketNumber] == nil {
level.buckets[bucketNumber] = level.provider()
}
level.buckets[bucketNumber].Add(observation)
}
}
ts.total.Add(observation)
}
// mergePendingUpdates applies the pending updates into all levels.
func (ts *timeSeries) mergePendingUpdates() {
if ts.dirty {
ts.mergeValue(ts.pending, ts.pendingTime)
ts.pending = ts.resetObservation(ts.pending)
ts.dirty = false
}
}
// advance cycles the buckets at each level until the latest bucket in
// each level can hold the time specified.
func (ts *timeSeries) advance(t time.Time) {
if !t.After(ts.levels[0].end) {
return
}
for i := 0; i < len(ts.levels); i++ {
level := ts.levels[i]
if !level.end.Before(t) {
break
}
// If the time is sufficiently far, just clear the level and advance
// directly.
if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
for _, b := range level.buckets {
ts.resetObservation(b)
}
level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
}
for t.After(level.end) {
level.end = level.end.Add(level.size)
level.newest = level.oldest
level.oldest = (level.oldest + 1) % ts.numBuckets
ts.resetObservation(level.buckets[level.newest])
}
t = level.end
}
}
// Latest returns the sum of the num latest buckets from the level.
func (ts *timeSeries) Latest(level, num int) Observable {
now := ts.clock.Time()
if ts.levels[0].end.Before(now) {
ts.advance(now)
}
ts.mergePendingUpdates()
result := ts.provider()
l := ts.levels[level]
index := l.newest
for i := 0; i < num; i++ {
if l.buckets[index] != nil {
result.Add(l.buckets[index])
}
if index == 0 {
index = ts.numBuckets
}
index--
}
return result
}
// LatestBuckets returns a copy of the num latest buckets from level.
func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
if level < 0 || level > len(ts.levels) {
log.Print("timeseries: bad level argument: ", level)
return nil
}
if num < 0 || num >= ts.numBuckets {
log.Print("timeseries: bad num argument: ", num)
return nil
}
results := make([]Observable, num)
now := ts.clock.Time()
if ts.levels[0].end.Before(now) {
ts.advance(now)
}
ts.mergePendingUpdates()
l := ts.levels[level]
index := l.newest
for i := 0; i < num; i++ {
result := ts.provider()
results[i] = result
if l.buckets[index] != nil {
result.CopyFrom(l.buckets[index])
}
if index == 0 {
index = ts.numBuckets
}
index -= 1
}
return results
}
// ScaleBy updates observations by scaling by factor.
func (ts *timeSeries) ScaleBy(factor float64) {
for _, l := range ts.levels {
for i := 0; i < ts.numBuckets; i++ {
l.buckets[i].Multiply(factor)
}
}
ts.total.Multiply(factor)
ts.pending.Multiply(factor)
}
// Range returns the sum of observations added over the specified time range.
// If start or finish times don't fall on bucket boundaries of the same
// level, then return values are approximate answers.
func (ts *timeSeries) Range(start, finish time.Time) Observable {
return ts.ComputeRange(start, finish, 1)[0]
}
// Recent returns the sum of observations from the last delta.
func (ts *timeSeries) Recent(delta time.Duration) Observable {
now := ts.clock.Time()
return ts.Range(now.Add(-delta), now)
}
// Total returns the total of all observations.
func (ts *timeSeries) Total() Observable {
ts.mergePendingUpdates()
return ts.total
}
// ComputeRange computes a specified number of values into a slice using
// the observations recorded over the specified time period. The return
// values are approximate if the start or finish times don't fall on the
// bucket boundaries at the same level or if the number of buckets spanning
// the range is not an integral multiple of num.
func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
if start.After(finish) {
log.Printf("timeseries: start > finish, %v>%v", start, finish)
return nil
}
if num < 0 {
log.Printf("timeseries: num < 0, %v", num)
return nil
}
results := make([]Observable, num)
for _, l := range ts.levels {
if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
ts.extract(l, start, finish, num, results)
return results
}
}
// Failed to find a level that covers the desired range. So just
// extract from the last level, even if it doesn't cover the entire
// desired range.
ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
return results
}
// RecentList returns the specified number of values in slice over the most
// recent time period of the specified range.
func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
if delta < 0 {
return nil
}
now := ts.clock.Time()
return ts.ComputeRange(now.Add(-delta), now, num)
}
// extract returns a slice of specified number of observations from a given
// level over a given range.
func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
ts.mergePendingUpdates()
srcInterval := l.size
dstInterval := finish.Sub(start) / time.Duration(num)
dstStart := start
srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
srcIndex := 0
// Where should scanning start?
if dstStart.After(srcStart) {
advance := int(dstStart.Sub(srcStart) / srcInterval)
srcIndex += advance
srcStart = srcStart.Add(time.Duration(advance) * srcInterval)
}
// The i'th value is computed as show below.
// interval = (finish/start)/num
// i'th value = sum of observation in range
// [ start + i * interval,
// start + (i + 1) * interval )
for i := 0; i < num; i++ {
results[i] = ts.resetObservation(results[i])
dstEnd := dstStart.Add(dstInterval)
for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
srcEnd := srcStart.Add(srcInterval)
if srcEnd.After(ts.lastAdd) {
srcEnd = ts.lastAdd
}
if !srcEnd.Before(dstStart) {
srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
// dst completely contains src.
if srcValue != nil {
results[i].Add(srcValue)
}
} else {
// dst partially overlaps src.
overlapStart := maxTime(srcStart, dstStart)
overlapEnd := minTime(srcEnd, dstEnd)
base := srcEnd.Sub(srcStart)
fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
used := ts.provider()
if srcValue != nil {
used.CopyFrom(srcValue)
}
used.Multiply(fraction)
results[i].Add(used)
}
if srcEnd.After(dstEnd) {
break
}
}
srcIndex++
srcStart = srcStart.Add(srcInterval)
}
dstStart = dstStart.Add(dstInterval)
}
}
// resetObservation clears the content so the struct may be reused.
func (ts *timeSeries) resetObservation(observation Observable) Observable {
if observation == nil {
observation = ts.provider()
} else {
observation.Clear()
}
return observation
}
// TimeSeries tracks data at granularities from 1 second to 16 weeks.
type TimeSeries struct {
timeSeries
}
// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
func NewTimeSeries(f func() Observable) *TimeSeries {
return NewTimeSeriesWithClock(f, defaultClockInstance)
}
// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
// assigning timestamps.
func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
ts := new(TimeSeries)
ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
return ts
}
// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
type MinuteHourSeries struct {
timeSeries
}
// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
}
// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
// assigning timestamps.
func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
ts := new(MinuteHourSeries)
ts.timeSeries.init(minuteHourSeriesResolutions, f,
minuteHourSeriesNumBuckets, clock)
return ts
}
func (ts *MinuteHourSeries) Minute() Observable {
return ts.timeSeries.Latest(0, 60)
}
func (ts *MinuteHourSeries) Hour() Observable {
return ts.timeSeries.Latest(1, 60)
}
func minTime(a, b time.Time) time.Time {
if a.Before(b) {
return a
}
return b
}
func maxTime(a, b time.Time) time.Time {
if a.After(b) {
return a
}
return b
}

Binary file not shown.

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@ -1,203 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go
// Package publicsuffix provides a public suffix list based on data from
// https://publicsuffix.org/
//
// A public suffix is one under which Internet users can directly register
// names. It is related to, but different from, a TLD (top level domain).
//
// "com" is a TLD (top level domain). Top level means it has no dots.
//
// "com" is also a public suffix. Amazon and Google have registered different
// siblings under that domain: "amazon.com" and "google.com".
//
// "au" is another TLD, again because it has no dots. But it's not "amazon.au".
// Instead, it's "amazon.com.au".
//
// "com.au" isn't an actual TLD, because it's not at the top level (it has
// dots). But it is an eTLD (effective TLD), because that's the branching point
// for domain name registrars.
//
// Another name for "an eTLD" is "a public suffix". Often, what's more of
// interest is the eTLD+1, or one more label than the public suffix. For
// example, browsers partition read/write access to HTTP cookies according to
// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from
// "google.com.au", but web pages served from "maps.google.com" can share
// cookies from "www.google.com", so you don't have to sign into Google Maps
// separately from signing into Google Web Search. Note that all four of those
// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1,
// the last two are not (but share the same eTLD+1: "google.com").
//
// All of these domains have the same eTLD+1:
// - "www.books.amazon.co.uk"
// - "books.amazon.co.uk"
// - "amazon.co.uk"
//
// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk".
//
// There is no closed form algorithm to calculate the eTLD of a domain.
// Instead, the calculation is data driven. This package provides a
// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at
// https://publicsuffix.org/
package publicsuffix // import "golang.org/x/net/publicsuffix"
// TODO: specify case sensitivity and leading/trailing dot behavior for
// func PublicSuffix and func EffectiveTLDPlusOne.
import (
"fmt"
"net/http/cookiejar"
"strings"
)
// List implements the cookiejar.PublicSuffixList interface by calling the
// PublicSuffix function.
var List cookiejar.PublicSuffixList = list{}
type list struct{}
func (list) PublicSuffix(domain string) string {
ps, _ := PublicSuffix(domain)
return ps
}
func (list) String() string {
return version
}
// PublicSuffix returns the public suffix of the domain using a copy of the
// publicsuffix.org database compiled into the library.
//
// icann is whether the public suffix is managed by the Internet Corporation
// for Assigned Names and Numbers. If not, the public suffix is either a
// privately managed domain (and in practice, not a top level domain) or an
// unmanaged top level domain (and not explicitly mentioned in the
// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN
// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and
// "cromulent" is an unmanaged top level domain.
//
// Use cases for distinguishing ICANN domains like "foo.com" from private
// domains like "foo.appspot.com" can be found at
// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases
func PublicSuffix(domain string) (publicSuffix string, icann bool) {
lo, hi := uint32(0), uint32(numTLD)
s, suffix, icannNode, wildcard := domain, len(domain), false, false
loop:
for {
dot := strings.LastIndex(s, ".")
if wildcard {
icann = icannNode
suffix = 1 + dot
}
if lo == hi {
break
}
f := find(s[1+dot:], lo, hi)
if f == notFound {
break
}
u := uint32(nodes.get(f) >> (nodesBitsTextOffset + nodesBitsTextLength))
icannNode = u&(1<<nodesBitsICANN-1) != 0
u >>= nodesBitsICANN
u = children.get(u & (1<<nodesBitsChildren - 1))
lo = u & (1<<childrenBitsLo - 1)
u >>= childrenBitsLo
hi = u & (1<<childrenBitsHi - 1)
u >>= childrenBitsHi
switch u & (1<<childrenBitsNodeType - 1) {
case nodeTypeNormal:
suffix = 1 + dot
case nodeTypeException:
suffix = 1 + len(s)
break loop
}
u >>= childrenBitsNodeType
wildcard = u&(1<<childrenBitsWildcard-1) != 0
if !wildcard {
icann = icannNode
}
if dot == -1 {
break
}
s = s[:dot]
}
if suffix == len(domain) {
// If no rules match, the prevailing rule is "*".
return domain[1+strings.LastIndex(domain, "."):], icann
}
return domain[suffix:], icann
}
const notFound uint32 = 1<<32 - 1
// find returns the index of the node in the range [lo, hi) whose label equals
// label, or notFound if there is no such node. The range is assumed to be in
// strictly increasing node label order.
func find(label string, lo, hi uint32) uint32 {
for lo < hi {
mid := lo + (hi-lo)/2
s := nodeLabel(mid)
if s < label {
lo = mid + 1
} else if s == label {
return mid
} else {
hi = mid
}
}
return notFound
}
// nodeLabel returns the label for the i'th node.
func nodeLabel(i uint32) string {
x := nodes.get(i)
length := x & (1<<nodesBitsTextLength - 1)
x >>= nodesBitsTextLength
offset := x & (1<<nodesBitsTextOffset - 1)
return text[offset : offset+length]
}
// EffectiveTLDPlusOne returns the effective top level domain plus one more
// label. For example, the eTLD+1 for "foo.bar.golang.org" is "golang.org".
func EffectiveTLDPlusOne(domain string) (string, error) {
if strings.HasPrefix(domain, ".") || strings.HasSuffix(domain, ".") || strings.Contains(domain, "..") {
return "", fmt.Errorf("publicsuffix: empty label in domain %q", domain)
}
suffix, _ := PublicSuffix(domain)
if len(domain) <= len(suffix) {
return "", fmt.Errorf("publicsuffix: cannot derive eTLD+1 for domain %q", domain)
}
i := len(domain) - len(suffix) - 1
if domain[i] != '.' {
return "", fmt.Errorf("publicsuffix: invalid public suffix %q for domain %q", suffix, domain)
}
return domain[1+strings.LastIndex(domain[:i], "."):], nil
}
type uint32String string
func (u uint32String) get(i uint32) uint32 {
off := i * 4
return (uint32(u[off])<<24 |
uint32(u[off+1])<<16 |
uint32(u[off+2])<<8 |
uint32(u[off+3]))
}
type uint40String string
func (u uint40String) get(i uint32) uint64 {
off := uint64(i * (nodesBits / 8))
return uint64(u[off])<<32 |
uint64(u[off+1])<<24 |
uint64(u[off+2])<<16 |
uint64(u[off+3])<<8 |
uint64(u[off+4])
}

View File

@ -1,70 +0,0 @@
// generated by go run gen.go; DO NOT EDIT
package publicsuffix
import _ "embed"
const version = "publicsuffix.org's public_suffix_list.dat, git revision 63cbc63d470d7b52c35266aa96c4c98c96ec499c (2023-08-03T10:01:25Z)"
const (
nodesBits = 40
nodesBitsChildren = 10
nodesBitsICANN = 1
nodesBitsTextOffset = 16
nodesBitsTextLength = 6
childrenBitsWildcard = 1
childrenBitsNodeType = 2
childrenBitsHi = 14
childrenBitsLo = 14
)
const (
nodeTypeNormal = 0
nodeTypeException = 1
nodeTypeParentOnly = 2
)
// numTLD is the number of top level domains.
const numTLD = 1474
// text is the combined text of all labels.
//
//go:embed data/text
var text string
// nodes is the list of nodes. Each node is represented as a 40-bit integer,
// which encodes the node's children, wildcard bit and node type (as an index
// into the children array), ICANN bit and text.
//
// The layout within the node, from MSB to LSB, is:
//
// [ 7 bits] unused
// [10 bits] children index
// [ 1 bits] ICANN bit
// [16 bits] text index
// [ 6 bits] text length
//
//go:embed data/nodes
var nodes uint40String
// children is the list of nodes' children, the parent's wildcard bit and the
// parent's node type. If a node has no children then their children index
// will be in the range [0, 6), depending on the wildcard bit and node type.
//
// The layout within the uint32, from MSB to LSB, is:
//
// [ 1 bits] unused
// [ 1 bits] wildcard bit
// [ 2 bits] node type
// [14 bits] high nodes index (exclusive) of children
// [14 bits] low nodes index (inclusive) of children
//
//go:embed data/children
var children uint32String
// max children 743 (capacity 1023)
// max text offset 30876 (capacity 65535)
// max text length 31 (capacity 63)
// max hi 9322 (capacity 16383)
// max lo 9317 (capacity 16383)

532
vendor/golang.org/x/net/trace/events.go generated vendored Normal file
View File

@ -0,0 +1,532 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"bytes"
"fmt"
"html/template"
"io"
"log"
"net/http"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"text/tabwriter"
"time"
)
const maxEventsPerLog = 100
type bucket struct {
MaxErrAge time.Duration
String string
}
var buckets = []bucket{
{0, "total"},
{10 * time.Second, "errs<10s"},
{1 * time.Minute, "errs<1m"},
{10 * time.Minute, "errs<10m"},
{1 * time.Hour, "errs<1h"},
{10 * time.Hour, "errs<10h"},
{24000 * time.Hour, "errors"},
}
// RenderEvents renders the HTML page typically served at /debug/events.
// It does not do any auth checking. The request may be nil.
//
// Most users will use the Events handler.
func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
now := time.Now()
data := &struct {
Families []string // family names
Buckets []bucket
Counts [][]int // eventLog count per family/bucket
// Set when a bucket has been selected.
Family string
Bucket int
EventLogs eventLogs
Expanded bool
}{
Buckets: buckets,
}
data.Families = make([]string, 0, len(families))
famMu.RLock()
for name := range families {
data.Families = append(data.Families, name)
}
famMu.RUnlock()
sort.Strings(data.Families)
// Count the number of eventLogs in each family for each error age.
data.Counts = make([][]int, len(data.Families))
for i, name := range data.Families {
// TODO(sameer): move this loop under the family lock.
f := getEventFamily(name)
data.Counts[i] = make([]int, len(data.Buckets))
for j, b := range data.Buckets {
data.Counts[i][j] = f.Count(now, b.MaxErrAge)
}
}
if req != nil {
var ok bool
data.Family, data.Bucket, ok = parseEventsArgs(req)
if !ok {
// No-op
} else {
data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
}
if data.EventLogs != nil {
defer data.EventLogs.Free()
sort.Sort(data.EventLogs)
}
if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
data.Expanded = exp
}
}
famMu.RLock()
defer famMu.RUnlock()
if err := eventsTmpl().Execute(w, data); err != nil {
log.Printf("net/trace: Failed executing template: %v", err)
}
}
func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
fam, bStr := req.FormValue("fam"), req.FormValue("b")
if fam == "" || bStr == "" {
return "", 0, false
}
b, err := strconv.Atoi(bStr)
if err != nil || b < 0 || b >= len(buckets) {
return "", 0, false
}
return fam, b, true
}
// An EventLog provides a log of events associated with a specific object.
type EventLog interface {
// Printf formats its arguments with fmt.Sprintf and adds the
// result to the event log.
Printf(format string, a ...interface{})
// Errorf is like Printf, but it marks this event as an error.
Errorf(format string, a ...interface{})
// Finish declares that this event log is complete.
// The event log should not be used after calling this method.
Finish()
}
// NewEventLog returns a new EventLog with the specified family name
// and title.
func NewEventLog(family, title string) EventLog {
el := newEventLog()
el.ref()
el.Family, el.Title = family, title
el.Start = time.Now()
el.events = make([]logEntry, 0, maxEventsPerLog)
el.stack = make([]uintptr, 32)
n := runtime.Callers(2, el.stack)
el.stack = el.stack[:n]
getEventFamily(family).add(el)
return el
}
func (el *eventLog) Finish() {
getEventFamily(el.Family).remove(el)
el.unref() // matches ref in New
}
var (
famMu sync.RWMutex
families = make(map[string]*eventFamily) // family name => family
)
func getEventFamily(fam string) *eventFamily {
famMu.Lock()
defer famMu.Unlock()
f := families[fam]
if f == nil {
f = &eventFamily{}
families[fam] = f
}
return f
}
type eventFamily struct {
mu sync.RWMutex
eventLogs eventLogs
}
func (f *eventFamily) add(el *eventLog) {
f.mu.Lock()
f.eventLogs = append(f.eventLogs, el)
f.mu.Unlock()
}
func (f *eventFamily) remove(el *eventLog) {
f.mu.Lock()
defer f.mu.Unlock()
for i, el0 := range f.eventLogs {
if el == el0 {
copy(f.eventLogs[i:], f.eventLogs[i+1:])
f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
return
}
}
}
func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
f.mu.RLock()
defer f.mu.RUnlock()
for _, el := range f.eventLogs {
if el.hasRecentError(now, maxErrAge) {
n++
}
}
return
}
func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
f.mu.RLock()
defer f.mu.RUnlock()
els = make(eventLogs, 0, len(f.eventLogs))
for _, el := range f.eventLogs {
if el.hasRecentError(now, maxErrAge) {
el.ref()
els = append(els, el)
}
}
return
}
type eventLogs []*eventLog
// Free calls unref on each element of the list.
func (els eventLogs) Free() {
for _, el := range els {
el.unref()
}
}
// eventLogs may be sorted in reverse chronological order.
func (els eventLogs) Len() int { return len(els) }
func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
// A logEntry is a timestamped log entry in an event log.
type logEntry struct {
When time.Time
Elapsed time.Duration // since previous event in log
NewDay bool // whether this event is on a different day to the previous event
What string
IsErr bool
}
// WhenString returns a string representation of the elapsed time of the event.
// It will include the date if midnight was crossed.
func (e logEntry) WhenString() string {
if e.NewDay {
return e.When.Format("2006/01/02 15:04:05.000000")
}
return e.When.Format("15:04:05.000000")
}
// An eventLog represents an active event log.
type eventLog struct {
// Family is the top-level grouping of event logs to which this belongs.
Family string
// Title is the title of this event log.
Title string
// Timing information.
Start time.Time
// Call stack where this event log was created.
stack []uintptr
// Append-only sequence of events.
//
// TODO(sameer): change this to a ring buffer to avoid the array copy
// when we hit maxEventsPerLog.
mu sync.RWMutex
events []logEntry
LastErrorTime time.Time
discarded int
refs int32 // how many buckets this is in
}
func (el *eventLog) reset() {
// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
el.Family = ""
el.Title = ""
el.Start = time.Time{}
el.stack = nil
el.events = nil
el.LastErrorTime = time.Time{}
el.discarded = 0
el.refs = 0
}
func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
if maxErrAge == 0 {
return true
}
el.mu.RLock()
defer el.mu.RUnlock()
return now.Sub(el.LastErrorTime) < maxErrAge
}
// delta returns the elapsed time since the last event or the log start,
// and whether it spans midnight.
// L >= el.mu
func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
if len(el.events) == 0 {
return t.Sub(el.Start), false
}
prev := el.events[len(el.events)-1].When
return t.Sub(prev), prev.Day() != t.Day()
}
func (el *eventLog) Printf(format string, a ...interface{}) {
el.printf(false, format, a...)
}
func (el *eventLog) Errorf(format string, a ...interface{}) {
el.printf(true, format, a...)
}
func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
el.mu.Lock()
e.Elapsed, e.NewDay = el.delta(e.When)
if len(el.events) < maxEventsPerLog {
el.events = append(el.events, e)
} else {
// Discard the oldest event.
if el.discarded == 0 {
// el.discarded starts at two to count for the event it
// is replacing, plus the next one that we are about to
// drop.
el.discarded = 2
} else {
el.discarded++
}
// TODO(sameer): if this causes allocations on a critical path,
// change eventLog.What to be a fmt.Stringer, as in trace.go.
el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
// The timestamp of the discarded meta-event should be
// the time of the last event it is representing.
el.events[0].When = el.events[1].When
copy(el.events[1:], el.events[2:])
el.events[maxEventsPerLog-1] = e
}
if e.IsErr {
el.LastErrorTime = e.When
}
el.mu.Unlock()
}
func (el *eventLog) ref() {
atomic.AddInt32(&el.refs, 1)
}
func (el *eventLog) unref() {
if atomic.AddInt32(&el.refs, -1) == 0 {
freeEventLog(el)
}
}
func (el *eventLog) When() string {
return el.Start.Format("2006/01/02 15:04:05.000000")
}
func (el *eventLog) ElapsedTime() string {
elapsed := time.Since(el.Start)
return fmt.Sprintf("%.6f", elapsed.Seconds())
}
func (el *eventLog) Stack() string {
buf := new(bytes.Buffer)
tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
printStackRecord(tw, el.stack)
tw.Flush()
return buf.String()
}
// printStackRecord prints the function + source line information
// for a single stack trace.
// Adapted from runtime/pprof/pprof.go.
func printStackRecord(w io.Writer, stk []uintptr) {
for _, pc := range stk {
f := runtime.FuncForPC(pc)
if f == nil {
continue
}
file, line := f.FileLine(pc)
name := f.Name()
// Hide runtime.goexit and any runtime functions at the beginning.
if strings.HasPrefix(name, "runtime.") {
continue
}
fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
}
}
func (el *eventLog) Events() []logEntry {
el.mu.RLock()
defer el.mu.RUnlock()
return el.events
}
// freeEventLogs is a freelist of *eventLog
var freeEventLogs = make(chan *eventLog, 1000)
// newEventLog returns a event log ready to use.
func newEventLog() *eventLog {
select {
case el := <-freeEventLogs:
return el
default:
return new(eventLog)
}
}
// freeEventLog adds el to freeEventLogs if there's room.
// This is non-blocking.
func freeEventLog(el *eventLog) {
el.reset()
select {
case freeEventLogs <- el:
default:
}
}
var eventsTmplCache *template.Template
var eventsTmplOnce sync.Once
func eventsTmpl() *template.Template {
eventsTmplOnce.Do(func() {
eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{
"elapsed": elapsed,
"trimSpace": strings.TrimSpace,
}).Parse(eventsHTML))
})
return eventsTmplCache
}
const eventsHTML = `
<html>
<head>
<title>events</title>
</head>
<style type="text/css">
body {
font-family: sans-serif;
}
table#req-status td.family {
padding-right: 2em;
}
table#req-status td.active {
padding-right: 1em;
}
table#req-status td.empty {
color: #aaa;
}
table#reqs {
margin-top: 1em;
}
table#reqs tr.first {
{{if $.Expanded}}font-weight: bold;{{end}}
}
table#reqs td {
font-family: monospace;
}
table#reqs td.when {
text-align: right;
white-space: nowrap;
}
table#reqs td.elapsed {
padding: 0 0.5em;
text-align: right;
white-space: pre;
width: 10em;
}
address {
font-size: smaller;
margin-top: 5em;
}
</style>
<body>
<h1>/debug/events</h1>
<table id="req-status">
{{range $i, $fam := .Families}}
<tr>
<td class="family">{{$fam}}</td>
{{range $j, $bucket := $.Buckets}}
{{$n := index $.Counts $i $j}}
<td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
{{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
[{{$n}} {{$bucket.String}}]
{{if $n}}</a>{{end}}
</td>
{{end}}
</tr>{{end}}
</table>
{{if $.EventLogs}}
<hr />
<h3>Family: {{$.Family}}</h3>
{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
[Summary]{{if $.Expanded}}</a>{{end}}
{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
[Expanded]{{if not $.Expanded}}</a>{{end}}
<table id="reqs">
<tr><th>When</th><th>Elapsed</th></tr>
{{range $el := $.EventLogs}}
<tr class="first">
<td class="when">{{$el.When}}</td>
<td class="elapsed">{{$el.ElapsedTime}}</td>
<td>{{$el.Title}}
</tr>
{{if $.Expanded}}
<tr>
<td class="when"></td>
<td class="elapsed"></td>
<td><pre>{{$el.Stack|trimSpace}}</pre></td>
</tr>
{{range $el.Events}}
<tr>
<td class="when">{{.WhenString}}</td>
<td class="elapsed">{{elapsed .Elapsed}}</td>
<td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
</tr>
{{end}}
{{end}}
{{end}}
</table>
{{end}}
</body>
</html>
`

365
vendor/golang.org/x/net/trace/histogram.go generated vendored Normal file
View File

@ -0,0 +1,365 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
// This file implements histogramming for RPC statistics collection.
import (
"bytes"
"fmt"
"html/template"
"log"
"math"
"sync"
"golang.org/x/net/internal/timeseries"
)
const (
bucketCount = 38
)
// histogram keeps counts of values in buckets that are spaced
// out in powers of 2: 0-1, 2-3, 4-7...
// histogram implements timeseries.Observable
type histogram struct {
sum int64 // running total of measurements
sumOfSquares float64 // square of running total
buckets []int64 // bucketed values for histogram
value int // holds a single value as an optimization
valueCount int64 // number of values recorded for single value
}
// addMeasurement records a value measurement observation to the histogram.
func (h *histogram) addMeasurement(value int64) {
// TODO: assert invariant
h.sum += value
h.sumOfSquares += float64(value) * float64(value)
bucketIndex := getBucket(value)
if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
h.value = bucketIndex
h.valueCount++
} else {
h.allocateBuckets()
h.buckets[bucketIndex]++
}
}
func (h *histogram) allocateBuckets() {
if h.buckets == nil {
h.buckets = make([]int64, bucketCount)
h.buckets[h.value] = h.valueCount
h.value = 0
h.valueCount = -1
}
}
func log2(i int64) int {
n := 0
for ; i >= 0x100; i >>= 8 {
n += 8
}
for ; i > 0; i >>= 1 {
n += 1
}
return n
}
func getBucket(i int64) (index int) {
index = log2(i) - 1
if index < 0 {
index = 0
}
if index >= bucketCount {
index = bucketCount - 1
}
return
}
// Total returns the number of recorded observations.
func (h *histogram) total() (total int64) {
if h.valueCount >= 0 {
total = h.valueCount
}
for _, val := range h.buckets {
total += int64(val)
}
return
}
// Average returns the average value of recorded observations.
func (h *histogram) average() float64 {
t := h.total()
if t == 0 {
return 0
}
return float64(h.sum) / float64(t)
}
// Variance returns the variance of recorded observations.
func (h *histogram) variance() float64 {
t := float64(h.total())
if t == 0 {
return 0
}
s := float64(h.sum) / t
return h.sumOfSquares/t - s*s
}
// StandardDeviation returns the standard deviation of recorded observations.
func (h *histogram) standardDeviation() float64 {
return math.Sqrt(h.variance())
}
// PercentileBoundary estimates the value that the given fraction of recorded
// observations are less than.
func (h *histogram) percentileBoundary(percentile float64) int64 {
total := h.total()
// Corner cases (make sure result is strictly less than Total())
if total == 0 {
return 0
} else if total == 1 {
return int64(h.average())
}
percentOfTotal := round(float64(total) * percentile)
var runningTotal int64
for i := range h.buckets {
value := h.buckets[i]
runningTotal += value
if runningTotal == percentOfTotal {
// We hit an exact bucket boundary. If the next bucket has data, it is a
// good estimate of the value. If the bucket is empty, we interpolate the
// midpoint between the next bucket's boundary and the next non-zero
// bucket. If the remaining buckets are all empty, then we use the
// boundary for the next bucket as the estimate.
j := uint8(i + 1)
min := bucketBoundary(j)
if runningTotal < total {
for h.buckets[j] == 0 {
j++
}
}
max := bucketBoundary(j)
return min + round(float64(max-min)/2)
} else if runningTotal > percentOfTotal {
// The value is in this bucket. Interpolate the value.
delta := runningTotal - percentOfTotal
percentBucket := float64(value-delta) / float64(value)
bucketMin := bucketBoundary(uint8(i))
nextBucketMin := bucketBoundary(uint8(i + 1))
bucketSize := nextBucketMin - bucketMin
return bucketMin + round(percentBucket*float64(bucketSize))
}
}
return bucketBoundary(bucketCount - 1)
}
// Median returns the estimated median of the observed values.
func (h *histogram) median() int64 {
return h.percentileBoundary(0.5)
}
// Add adds other to h.
func (h *histogram) Add(other timeseries.Observable) {
o := other.(*histogram)
if o.valueCount == 0 {
// Other histogram is empty
} else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
// Both have a single bucketed value, aggregate them
h.valueCount += o.valueCount
} else {
// Two different values necessitate buckets in this histogram
h.allocateBuckets()
if o.valueCount >= 0 {
h.buckets[o.value] += o.valueCount
} else {
for i := range h.buckets {
h.buckets[i] += o.buckets[i]
}
}
}
h.sumOfSquares += o.sumOfSquares
h.sum += o.sum
}
// Clear resets the histogram to an empty state, removing all observed values.
func (h *histogram) Clear() {
h.buckets = nil
h.value = 0
h.valueCount = 0
h.sum = 0
h.sumOfSquares = 0
}
// CopyFrom copies from other, which must be a *histogram, into h.
func (h *histogram) CopyFrom(other timeseries.Observable) {
o := other.(*histogram)
if o.valueCount == -1 {
h.allocateBuckets()
copy(h.buckets, o.buckets)
}
h.sum = o.sum
h.sumOfSquares = o.sumOfSquares
h.value = o.value
h.valueCount = o.valueCount
}
// Multiply scales the histogram by the specified ratio.
func (h *histogram) Multiply(ratio float64) {
if h.valueCount == -1 {
for i := range h.buckets {
h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
}
} else {
h.valueCount = int64(float64(h.valueCount) * ratio)
}
h.sum = int64(float64(h.sum) * ratio)
h.sumOfSquares = h.sumOfSquares * ratio
}
// New creates a new histogram.
func (h *histogram) New() timeseries.Observable {
r := new(histogram)
r.Clear()
return r
}
func (h *histogram) String() string {
return fmt.Sprintf("%d, %f, %d, %d, %v",
h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
}
// round returns the closest int64 to the argument
func round(in float64) int64 {
return int64(math.Floor(in + 0.5))
}
// bucketBoundary returns the first value in the bucket.
func bucketBoundary(bucket uint8) int64 {
if bucket == 0 {
return 0
}
return 1 << bucket
}
// bucketData holds data about a specific bucket for use in distTmpl.
type bucketData struct {
Lower, Upper int64
N int64
Pct, CumulativePct float64
GraphWidth int
}
// data holds data about a Distribution for use in distTmpl.
type data struct {
Buckets []*bucketData
Count, Median int64
Mean, StandardDeviation float64
}
// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
const maxHTMLBarWidth = 350.0
// newData returns data representing h for use in distTmpl.
func (h *histogram) newData() *data {
// Force the allocation of buckets to simplify the rendering implementation
h.allocateBuckets()
// We scale the bars on the right so that the largest bar is
// maxHTMLBarWidth pixels in width.
maxBucket := int64(0)
for _, n := range h.buckets {
if n > maxBucket {
maxBucket = n
}
}
total := h.total()
barsizeMult := maxHTMLBarWidth / float64(maxBucket)
var pctMult float64
if total == 0 {
pctMult = 1.0
} else {
pctMult = 100.0 / float64(total)
}
buckets := make([]*bucketData, len(h.buckets))
runningTotal := int64(0)
for i, n := range h.buckets {
if n == 0 {
continue
}
runningTotal += n
var upperBound int64
if i < bucketCount-1 {
upperBound = bucketBoundary(uint8(i + 1))
} else {
upperBound = math.MaxInt64
}
buckets[i] = &bucketData{
Lower: bucketBoundary(uint8(i)),
Upper: upperBound,
N: n,
Pct: float64(n) * pctMult,
CumulativePct: float64(runningTotal) * pctMult,
GraphWidth: int(float64(n) * barsizeMult),
}
}
return &data{
Buckets: buckets,
Count: total,
Median: h.median(),
Mean: h.average(),
StandardDeviation: h.standardDeviation(),
}
}
func (h *histogram) html() template.HTML {
buf := new(bytes.Buffer)
if err := distTmpl().Execute(buf, h.newData()); err != nil {
buf.Reset()
log.Printf("net/trace: couldn't execute template: %v", err)
}
return template.HTML(buf.String())
}
var distTmplCache *template.Template
var distTmplOnce sync.Once
func distTmpl() *template.Template {
distTmplOnce.Do(func() {
// Input: data
distTmplCache = template.Must(template.New("distTmpl").Parse(`
<table>
<tr>
<td style="padding:0.25em">Count: {{.Count}}</td>
<td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
<td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
<td style="padding:0.25em">Median: {{.Median}}</td>
</tr>
</table>
<hr>
<table>
{{range $b := .Buckets}}
{{if $b}}
<tr>
<td style="padding:0 0 0 0.25em">[</td>
<td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
<td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
<td style="text-align:right;padding:0 0.25em">{{.N}}</td>
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
<td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
</tr>
{{end}}
{{end}}
</table>
`))
})
return distTmplCache
}

1130
vendor/golang.org/x/net/trace/trace.go generated vendored Normal file

File diff suppressed because it is too large Load Diff