mirror of
https://github.com/cwinfo/matterbridge.git
synced 2025-07-04 12:27:44 +00:00
Update vendor
This commit is contained in:
83
vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/filesstore.go
generated
vendored
Normal file
83
vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/filesstore.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package filestore
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
driverS3 = "amazons3"
|
||||
driverLocal = "local"
|
||||
)
|
||||
|
||||
type ReadCloseSeeker interface {
|
||||
io.ReadCloser
|
||||
io.Seeker
|
||||
}
|
||||
|
||||
type FileBackend interface {
|
||||
TestConnection() error
|
||||
|
||||
Reader(path string) (ReadCloseSeeker, error)
|
||||
ReadFile(path string) ([]byte, error)
|
||||
FileExists(path string) (bool, error)
|
||||
FileSize(path string) (int64, error)
|
||||
CopyFile(oldPath, newPath string) error
|
||||
MoveFile(oldPath, newPath string) error
|
||||
WriteFile(fr io.Reader, path string) (int64, error)
|
||||
AppendFile(fr io.Reader, path string) (int64, error)
|
||||
RemoveFile(path string) error
|
||||
FileModTime(path string) (time.Time, error)
|
||||
|
||||
ListDirectory(path string) ([]string, error)
|
||||
RemoveDirectory(path string) error
|
||||
}
|
||||
|
||||
type FileBackendSettings struct {
|
||||
DriverName string
|
||||
Directory string
|
||||
AmazonS3AccessKeyId string
|
||||
AmazonS3SecretAccessKey string
|
||||
AmazonS3Bucket string
|
||||
AmazonS3PathPrefix string
|
||||
AmazonS3Region string
|
||||
AmazonS3Endpoint string
|
||||
AmazonS3SSL bool
|
||||
AmazonS3SignV2 bool
|
||||
AmazonS3SSE bool
|
||||
AmazonS3Trace bool
|
||||
}
|
||||
|
||||
func (settings *FileBackendSettings) CheckMandatoryS3Fields() error {
|
||||
if settings.AmazonS3Bucket == "" {
|
||||
return errors.New("missing s3 bucket settings")
|
||||
}
|
||||
|
||||
// if S3 endpoint is not set call the set defaults to set that
|
||||
if settings.AmazonS3Endpoint == "" {
|
||||
settings.AmazonS3Endpoint = "s3.amazonaws.com"
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewFileBackend(settings FileBackendSettings) (FileBackend, error) {
|
||||
switch settings.DriverName {
|
||||
case driverS3:
|
||||
backend, err := NewS3FileBackend(settings)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to connect to the s3 backend")
|
||||
}
|
||||
return backend, nil
|
||||
case driverLocal:
|
||||
return &LocalFileBackend{
|
||||
directory: settings.Directory,
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.New("no valid filestorage driver found")
|
||||
}
|
211
vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/localstore.go
generated
vendored
Normal file
211
vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/localstore.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package filestore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/mattermost/mattermost-server/v6/shared/mlog"
|
||||
)
|
||||
|
||||
const (
|
||||
TestFilePath = "/testfile"
|
||||
)
|
||||
|
||||
type LocalFileBackend struct {
|
||||
directory string
|
||||
}
|
||||
|
||||
// copyFile will copy a file from src path to dst path.
|
||||
// Overwrites any existing files at dst.
|
||||
// Permissions are copied from file at src to the new file at dst.
|
||||
func copyFile(src, dst string) (err error) {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
if err = os.MkdirAll(filepath.Dir(dst), os.ModePerm); err != nil {
|
||||
return
|
||||
}
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if e := out.Close(); e != nil {
|
||||
err = e
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = out.Sync()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
stat, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = os.Chmod(dst, stat.Mode())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) TestConnection() error {
|
||||
f := bytes.NewReader([]byte("testingwrite"))
|
||||
if _, err := writeFileLocally(f, filepath.Join(b.directory, TestFilePath)); err != nil {
|
||||
return errors.Wrap(err, "unable to write to the local filesystem storage")
|
||||
}
|
||||
os.Remove(filepath.Join(b.directory, TestFilePath))
|
||||
mlog.Debug("Able to write files to local storage.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) Reader(path string) (ReadCloseSeeker, error) {
|
||||
f, err := os.Open(filepath.Join(b.directory, path))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to open file %s", path)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) ReadFile(path string) ([]byte, error) {
|
||||
f, err := ioutil.ReadFile(filepath.Join(b.directory, path))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to read file %s", path)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) FileExists(path string) (bool, error) {
|
||||
_, err := os.Stat(filepath.Join(b.directory, path))
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "unable to know if file %s exists", path)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) FileSize(path string) (int64, error) {
|
||||
info, err := os.Stat(filepath.Join(b.directory, path))
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "unable to get file size for %s", path)
|
||||
}
|
||||
return info.Size(), nil
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) FileModTime(path string) (time.Time, error) {
|
||||
info, err := os.Stat(filepath.Join(b.directory, path))
|
||||
if err != nil {
|
||||
return time.Time{}, errors.Wrapf(err, "unable to get modification time for file %s", path)
|
||||
}
|
||||
return info.ModTime(), nil
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) CopyFile(oldPath, newPath string) error {
|
||||
if err := copyFile(filepath.Join(b.directory, oldPath), filepath.Join(b.directory, newPath)); err != nil {
|
||||
return errors.Wrapf(err, "unable to copy file from %s to %s", oldPath, newPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) MoveFile(oldPath, newPath string) error {
|
||||
if err := os.MkdirAll(filepath.Dir(filepath.Join(b.directory, newPath)), 0750); err != nil {
|
||||
return errors.Wrapf(err, "unable to create the new destination directory %s", filepath.Dir(newPath))
|
||||
}
|
||||
|
||||
if err := os.Rename(filepath.Join(b.directory, oldPath), filepath.Join(b.directory, newPath)); err != nil {
|
||||
return errors.Wrapf(err, "unable to move the file to %s to the destination directory", newPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) WriteFile(fr io.Reader, path string) (int64, error) {
|
||||
return writeFileLocally(fr, filepath.Join(b.directory, path))
|
||||
}
|
||||
|
||||
func writeFileLocally(fr io.Reader, path string) (int64, error) {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil {
|
||||
directory, _ := filepath.Abs(filepath.Dir(path))
|
||||
return 0, errors.Wrapf(err, "unable to create the directory %s for the file %s", directory, path)
|
||||
}
|
||||
fw, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "unable to open the file %s to write the data", path)
|
||||
}
|
||||
defer fw.Close()
|
||||
written, err := io.Copy(fw, fr)
|
||||
if err != nil {
|
||||
return written, errors.Wrapf(err, "unable write the data in the file %s", path)
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) AppendFile(fr io.Reader, path string) (int64, error) {
|
||||
fp := filepath.Join(b.directory, path)
|
||||
if _, err := os.Stat(fp); err != nil {
|
||||
return 0, errors.Wrapf(err, "unable to find the file %s to append the data", path)
|
||||
}
|
||||
fw, err := os.OpenFile(fp, os.O_WRONLY|os.O_APPEND, 0600)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "unable to open the file %s to append the data", path)
|
||||
}
|
||||
defer fw.Close()
|
||||
written, err := io.Copy(fw, fr)
|
||||
if err != nil {
|
||||
return written, errors.Wrapf(err, "unable append the data in the file %s", path)
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) RemoveFile(path string) error {
|
||||
if err := os.Remove(filepath.Join(b.directory, path)); err != nil {
|
||||
return errors.Wrapf(err, "unable to remove the file %s", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) ListDirectory(path string) ([]string, error) {
|
||||
var paths []string
|
||||
fileInfos, err := ioutil.ReadDir(filepath.Join(b.directory, path))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return paths, nil
|
||||
}
|
||||
return nil, errors.Wrapf(err, "unable to list the directory %s", path)
|
||||
}
|
||||
for _, fileInfo := range fileInfos {
|
||||
paths = append(paths, filepath.Join(path, fileInfo.Name()))
|
||||
}
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func (b *LocalFileBackend) RemoveDirectory(path string) error {
|
||||
if err := os.RemoveAll(filepath.Join(b.directory, path)); err != nil {
|
||||
return errors.Wrapf(err, "unable to remove the directory %s", path)
|
||||
}
|
||||
return nil
|
||||
}
|
56
vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3_overrides.go
generated
vendored
Normal file
56
vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3_overrides.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package filestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
)
|
||||
|
||||
// customTransport is used to point the request to a different server.
|
||||
// This is helpful in situations where a different service is handling AWS S3 requests
|
||||
// from multiple Mattermost applications, and the Mattermost service itself does not
|
||||
// have any S3 credentials.
|
||||
type customTransport struct {
|
||||
base http.RoundTripper
|
||||
host string
|
||||
scheme string
|
||||
client http.Client
|
||||
}
|
||||
|
||||
// RoundTrip implements the http.Roundtripper interface.
|
||||
func (t *customTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
// Rountrippers should not modify the original request.
|
||||
newReq := req.Clone(context.Background())
|
||||
*newReq.URL = *req.URL
|
||||
req.URL.Scheme = t.scheme
|
||||
req.URL.Host = t.host
|
||||
return t.client.Do(req)
|
||||
}
|
||||
|
||||
// customProvider is a dummy credentials provider for the minio client to work
|
||||
// without actually providing credentials. This is needed with a custom transport
|
||||
// in cases where the minio client does not actually have credentials with itself,
|
||||
// rather needs responses from another entity.
|
||||
//
|
||||
// It satisfies the credentials.Provider interface.
|
||||
type customProvider struct {
|
||||
isSignV2 bool
|
||||
}
|
||||
|
||||
// Retrieve just returns empty credentials.
|
||||
func (cp customProvider) Retrieve() (credentials.Value, error) {
|
||||
sign := credentials.SignatureV4
|
||||
if cp.isSignV2 {
|
||||
sign = credentials.SignatureV2
|
||||
}
|
||||
return credentials.Value{
|
||||
SignerType: sign,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired always returns false.
|
||||
func (cp customProvider) IsExpired() bool { return false }
|
442
vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3store.go
generated
vendored
Normal file
442
vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3store.go
generated
vendored
Normal file
@ -0,0 +1,442 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package filestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
s3 "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/mattermost/mattermost-server/v6/shared/mlog"
|
||||
)
|
||||
|
||||
// S3FileBackend contains all necessary information to communicate with
|
||||
// an AWS S3 compatible API backend.
|
||||
type S3FileBackend struct {
|
||||
endpoint string
|
||||
accessKey string
|
||||
secretKey string
|
||||
secure bool
|
||||
signV2 bool
|
||||
region string
|
||||
bucket string
|
||||
pathPrefix string
|
||||
encrypt bool
|
||||
trace bool
|
||||
client *s3.Client
|
||||
}
|
||||
|
||||
type S3FileBackendAuthError struct {
|
||||
DetailedError string
|
||||
}
|
||||
|
||||
// S3FileBackendNoBucketError is returned when testing a connection and no S3 bucket is found
|
||||
type S3FileBackendNoBucketError struct{}
|
||||
|
||||
const (
|
||||
// This is not exported by minio. See: https://github.com/minio/minio-go/issues/1339
|
||||
bucketNotFound = "NoSuchBucket"
|
||||
)
|
||||
|
||||
var (
|
||||
imageExtensions = map[string]bool{".jpg": true, ".jpeg": true, ".gif": true, ".bmp": true, ".png": true, ".tiff": true, "tif": true}
|
||||
imageMimeTypes = map[string]string{".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", ".bmp": "image/bmp", ".png": "image/png", ".tiff": "image/tiff", ".tif": "image/tif"}
|
||||
)
|
||||
|
||||
func isFileExtImage(ext string) bool {
|
||||
ext = strings.ToLower(ext)
|
||||
return imageExtensions[ext]
|
||||
}
|
||||
|
||||
func getImageMimeType(ext string) string {
|
||||
ext = strings.ToLower(ext)
|
||||
if imageMimeTypes[ext] == "" {
|
||||
return "image"
|
||||
}
|
||||
return imageMimeTypes[ext]
|
||||
}
|
||||
|
||||
func (s *S3FileBackendAuthError) Error() string {
|
||||
return s.DetailedError
|
||||
}
|
||||
|
||||
func (s *S3FileBackendNoBucketError) Error() string {
|
||||
return "no such bucket"
|
||||
}
|
||||
|
||||
// NewS3FileBackend returns an instance of an S3FileBackend.
|
||||
func NewS3FileBackend(settings FileBackendSettings) (*S3FileBackend, error) {
|
||||
backend := &S3FileBackend{
|
||||
endpoint: settings.AmazonS3Endpoint,
|
||||
accessKey: settings.AmazonS3AccessKeyId,
|
||||
secretKey: settings.AmazonS3SecretAccessKey,
|
||||
secure: settings.AmazonS3SSL,
|
||||
signV2: settings.AmazonS3SignV2,
|
||||
region: settings.AmazonS3Region,
|
||||
bucket: settings.AmazonS3Bucket,
|
||||
pathPrefix: settings.AmazonS3PathPrefix,
|
||||
encrypt: settings.AmazonS3SSE,
|
||||
trace: settings.AmazonS3Trace,
|
||||
}
|
||||
cli, err := backend.s3New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
backend.client = cli
|
||||
return backend, nil
|
||||
}
|
||||
|
||||
// Similar to s3.New() but allows initialization of signature v2 or signature v4 client.
|
||||
// If signV2 input is false, function always returns signature v4.
|
||||
//
|
||||
// Additionally this function also takes a user defined region, if set
|
||||
// disables automatic region lookup.
|
||||
func (b *S3FileBackend) s3New() (*s3.Client, error) {
|
||||
var creds *credentials.Credentials
|
||||
|
||||
isCloud := os.Getenv("MM_CLOUD_FILESTORE_BIFROST") != ""
|
||||
if isCloud {
|
||||
creds = credentials.New(customProvider{isSignV2: b.signV2})
|
||||
} else if b.accessKey == "" && b.secretKey == "" {
|
||||
creds = credentials.NewIAM("")
|
||||
} else if b.signV2 {
|
||||
creds = credentials.NewStatic(b.accessKey, b.secretKey, "", credentials.SignatureV2)
|
||||
} else {
|
||||
creds = credentials.NewStatic(b.accessKey, b.secretKey, "", credentials.SignatureV4)
|
||||
}
|
||||
|
||||
opts := s3.Options{
|
||||
Creds: creds,
|
||||
Secure: b.secure,
|
||||
Region: b.region,
|
||||
}
|
||||
|
||||
// If this is a cloud installation, we override the default transport.
|
||||
if isCloud {
|
||||
tr, err := s3.DefaultTransport(b.secure)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scheme := "http"
|
||||
if b.secure {
|
||||
scheme = "https"
|
||||
}
|
||||
opts.Transport = &customTransport{
|
||||
base: tr,
|
||||
host: b.endpoint,
|
||||
scheme: scheme,
|
||||
}
|
||||
}
|
||||
|
||||
s3Clnt, err := s3.New(b.endpoint, &opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if b.trace {
|
||||
s3Clnt.TraceOn(os.Stdout)
|
||||
}
|
||||
|
||||
return s3Clnt, nil
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) TestConnection() error {
|
||||
exists := true
|
||||
var err error
|
||||
// If a path prefix is present, we attempt to test the bucket by listing objects under the path
|
||||
// and just checking the first response. This is because the BucketExists call is only at a bucket level
|
||||
// and sometimes the user might only be allowed access to the specified path prefix.
|
||||
if b.pathPrefix != "" {
|
||||
obj := <-b.client.ListObjects(context.Background(), b.bucket, s3.ListObjectsOptions{Prefix: b.pathPrefix})
|
||||
if obj.Err != nil {
|
||||
typedErr := s3.ToErrorResponse(obj.Err)
|
||||
if typedErr.Code != bucketNotFound {
|
||||
return &S3FileBackendAuthError{DetailedError: "unable to list objects in the S3 bucket"}
|
||||
}
|
||||
exists = false
|
||||
}
|
||||
} else {
|
||||
exists, err = b.client.BucketExists(context.Background(), b.bucket)
|
||||
if err != nil {
|
||||
return &S3FileBackendAuthError{DetailedError: "unable to check if the S3 bucket exists"}
|
||||
}
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return &S3FileBackendNoBucketError{}
|
||||
}
|
||||
mlog.Debug("Connection to S3 or minio is good. Bucket exists.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) MakeBucket() error {
|
||||
err := b.client.MakeBucket(context.Background(), b.bucket, s3.MakeBucketOptions{Region: b.region})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to create the s3 bucket")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Caller must close the first return value
|
||||
func (b *S3FileBackend) Reader(path string) (ReadCloseSeeker, error) {
|
||||
path = filepath.Join(b.pathPrefix, path)
|
||||
minioObject, err := b.client.GetObject(context.Background(), b.bucket, path, s3.GetObjectOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to open file %s", path)
|
||||
}
|
||||
|
||||
return minioObject, nil
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) ReadFile(path string) ([]byte, error) {
|
||||
path = filepath.Join(b.pathPrefix, path)
|
||||
minioObject, err := b.client.GetObject(context.Background(), b.bucket, path, s3.GetObjectOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to open file %s", path)
|
||||
}
|
||||
|
||||
defer minioObject.Close()
|
||||
f, err := ioutil.ReadAll(minioObject)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to read file %s", path)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) FileExists(path string) (bool, error) {
|
||||
path = filepath.Join(b.pathPrefix, path)
|
||||
|
||||
_, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var s3Err s3.ErrorResponse
|
||||
if errors.As(err, &s3Err); s3Err.Code == "NoSuchKey" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, errors.Wrapf(err, "unable to know if file %s exists", path)
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) FileSize(path string) (int64, error) {
|
||||
path = filepath.Join(b.pathPrefix, path)
|
||||
|
||||
info, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{})
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "unable to get file size for %s", path)
|
||||
}
|
||||
|
||||
return info.Size, nil
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) FileModTime(path string) (time.Time, error) {
|
||||
path = filepath.Join(b.pathPrefix, path)
|
||||
|
||||
info, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{})
|
||||
if err != nil {
|
||||
return time.Time{}, errors.Wrapf(err, "unable to get modification time for file %s", path)
|
||||
}
|
||||
|
||||
return info.LastModified, nil
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) CopyFile(oldPath, newPath string) error {
|
||||
oldPath = filepath.Join(b.pathPrefix, oldPath)
|
||||
newPath = filepath.Join(b.pathPrefix, newPath)
|
||||
srcOpts := s3.CopySrcOptions{
|
||||
Bucket: b.bucket,
|
||||
Object: oldPath,
|
||||
Encryption: encrypt.NewSSE(),
|
||||
}
|
||||
dstOpts := s3.CopyDestOptions{
|
||||
Bucket: b.bucket,
|
||||
Object: newPath,
|
||||
Encryption: encrypt.NewSSE(),
|
||||
}
|
||||
if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {
|
||||
return errors.Wrapf(err, "unable to copy file from %s to %s", oldPath, newPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) MoveFile(oldPath, newPath string) error {
|
||||
oldPath = filepath.Join(b.pathPrefix, oldPath)
|
||||
newPath = filepath.Join(b.pathPrefix, newPath)
|
||||
srcOpts := s3.CopySrcOptions{
|
||||
Bucket: b.bucket,
|
||||
Object: oldPath,
|
||||
Encryption: encrypt.NewSSE(),
|
||||
}
|
||||
dstOpts := s3.CopyDestOptions{
|
||||
Bucket: b.bucket,
|
||||
Object: newPath,
|
||||
Encryption: encrypt.NewSSE(),
|
||||
}
|
||||
|
||||
if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {
|
||||
return errors.Wrapf(err, "unable to copy the file to %s to the new destionation", newPath)
|
||||
}
|
||||
|
||||
if err := b.client.RemoveObject(context.Background(), b.bucket, oldPath, s3.RemoveObjectOptions{}); err != nil {
|
||||
return errors.Wrapf(err, "unable to remove the file old file %s", oldPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) WriteFile(fr io.Reader, path string) (int64, error) {
|
||||
var contentType string
|
||||
path = filepath.Join(b.pathPrefix, path)
|
||||
if ext := filepath.Ext(path); isFileExtImage(ext) {
|
||||
contentType = getImageMimeType(ext)
|
||||
} else {
|
||||
contentType = "binary/octet-stream"
|
||||
}
|
||||
|
||||
options := s3PutOptions(b.encrypt, contentType)
|
||||
info, err := b.client.PutObject(context.Background(), b.bucket, path, fr, -1, options)
|
||||
if err != nil {
|
||||
return info.Size, errors.Wrapf(err, "unable write the data in the file %s", path)
|
||||
}
|
||||
|
||||
return info.Size, nil
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) AppendFile(fr io.Reader, path string) (int64, error) {
|
||||
fp := filepath.Join(b.pathPrefix, path)
|
||||
if _, err := b.client.StatObject(context.Background(), b.bucket, fp, s3.StatObjectOptions{}); err != nil {
|
||||
return 0, errors.Wrapf(err, "unable to find the file %s to append the data", path)
|
||||
}
|
||||
|
||||
var contentType string
|
||||
if ext := filepath.Ext(fp); isFileExtImage(ext) {
|
||||
contentType = getImageMimeType(ext)
|
||||
} else {
|
||||
contentType = "binary/octet-stream"
|
||||
}
|
||||
|
||||
options := s3PutOptions(b.encrypt, contentType)
|
||||
sse := options.ServerSideEncryption
|
||||
partName := fp + ".part"
|
||||
info, err := b.client.PutObject(context.Background(), b.bucket, partName, fr, -1, options)
|
||||
defer b.client.RemoveObject(context.Background(), b.bucket, partName, s3.RemoveObjectOptions{})
|
||||
if info.Size > 0 {
|
||||
src1Opts := s3.CopySrcOptions{
|
||||
Bucket: b.bucket,
|
||||
Object: fp,
|
||||
}
|
||||
src2Opts := s3.CopySrcOptions{
|
||||
Bucket: b.bucket,
|
||||
Object: partName,
|
||||
}
|
||||
dstOpts := s3.CopyDestOptions{
|
||||
Bucket: b.bucket,
|
||||
Object: fp,
|
||||
Encryption: sse,
|
||||
}
|
||||
_, err = b.client.ComposeObject(context.Background(), dstOpts, src1Opts, src2Opts)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "unable append the data in the file %s", path)
|
||||
}
|
||||
return info.Size, nil
|
||||
}
|
||||
|
||||
return 0, errors.Wrapf(err, "unable append the data in the file %s", path)
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) RemoveFile(path string) error {
|
||||
path = filepath.Join(b.pathPrefix, path)
|
||||
if err := b.client.RemoveObject(context.Background(), b.bucket, path, s3.RemoveObjectOptions{}); err != nil {
|
||||
return errors.Wrapf(err, "unable to remove the file %s", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPathsFromObjectInfos(in <-chan s3.ObjectInfo) <-chan s3.ObjectInfo {
|
||||
out := make(chan s3.ObjectInfo, 1)
|
||||
|
||||
go func() {
|
||||
defer close(out)
|
||||
|
||||
for {
|
||||
info, done := <-in
|
||||
|
||||
if !done {
|
||||
break
|
||||
}
|
||||
|
||||
out <- info
|
||||
}
|
||||
}()
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) ListDirectory(path string) ([]string, error) {
|
||||
path = filepath.Join(b.pathPrefix, path)
|
||||
if !strings.HasSuffix(path, "/") && path != "" {
|
||||
// s3Clnt returns only the path itself when "/" is not present
|
||||
// appending "/" to make it consistent across all filestores
|
||||
path = path + "/"
|
||||
}
|
||||
|
||||
opts := s3.ListObjectsOptions{
|
||||
Prefix: path,
|
||||
}
|
||||
var paths []string
|
||||
for object := range b.client.ListObjects(context.Background(), b.bucket, opts) {
|
||||
if object.Err != nil {
|
||||
return nil, errors.Wrapf(object.Err, "unable to list the directory %s", path)
|
||||
}
|
||||
// We strip the path prefix that gets applied,
|
||||
// so that it remains transparent to the application.
|
||||
object.Key = strings.TrimPrefix(object.Key, b.pathPrefix)
|
||||
trimmed := strings.Trim(object.Key, "/")
|
||||
if trimmed != "" {
|
||||
paths = append(paths, trimmed)
|
||||
}
|
||||
}
|
||||
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func (b *S3FileBackend) RemoveDirectory(path string) error {
|
||||
opts := s3.ListObjectsOptions{
|
||||
Prefix: filepath.Join(b.pathPrefix, path),
|
||||
Recursive: true,
|
||||
}
|
||||
list := b.client.ListObjects(context.Background(), b.bucket, opts)
|
||||
objectsCh := b.client.RemoveObjects(context.Background(), b.bucket, getPathsFromObjectInfos(list), s3.RemoveObjectsOptions{})
|
||||
for err := range objectsCh {
|
||||
if err.Err != nil {
|
||||
return errors.Wrapf(err.Err, "unable to remove the directory %s", path)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func s3PutOptions(encrypted bool, contentType string) s3.PutObjectOptions {
|
||||
options := s3.PutObjectOptions{}
|
||||
if encrypted {
|
||||
options.ServerSideEncryption = encrypt.NewSSE()
|
||||
}
|
||||
options.ContentType = contentType
|
||||
// We set the part size to the minimum allowed value of 5MBs
|
||||
// to avoid an excessive allocation in minio.PutObject implementation.
|
||||
options.PartSize = 1024 * 1024 * 5
|
||||
|
||||
return options
|
||||
}
|
185
vendor/github.com/mattermost/mattermost-server/v6/shared/i18n/i18n.go
generated
vendored
Normal file
185
vendor/github.com/mattermost/mattermost-server/v6/shared/i18n/i18n.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package i18n
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/mattermost/go-i18n/i18n"
|
||||
|
||||
"github.com/mattermost/mattermost-server/v6/shared/mlog"
|
||||
)
|
||||
|
||||
const defaultLocale = "en"
|
||||
|
||||
// TranslateFunc is the type of the translate functions
|
||||
type TranslateFunc func(translationID string, args ...interface{}) string
|
||||
|
||||
// T is the translate function using the default server language as fallback language
|
||||
var T TranslateFunc
|
||||
|
||||
// TDefault is the translate function using english as fallback language
|
||||
var TDefault TranslateFunc
|
||||
|
||||
var locales map[string]string = make(map[string]string)
|
||||
var defaultServerLocale string
|
||||
var defaultClientLocale string
|
||||
|
||||
// TranslationsPreInit loads translations from filesystem if they are not
|
||||
// loaded already and assigns english while loading server config
|
||||
func TranslationsPreInit(translationsDir string) error {
|
||||
if T != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set T even if we fail to load the translations. Lots of shutdown handling code will
|
||||
// segfault trying to handle the error, and the untranslated IDs are strictly better.
|
||||
T = tfuncWithFallback(defaultLocale)
|
||||
TDefault = tfuncWithFallback(defaultLocale)
|
||||
|
||||
return initTranslationsWithDir(translationsDir)
|
||||
}
|
||||
|
||||
// InitTranslations set the defaults configured in the server and initialize
|
||||
// the T function using the server default as fallback language
|
||||
func InitTranslations(serverLocale, clientLocale string) error {
|
||||
defaultServerLocale = serverLocale
|
||||
defaultClientLocale = clientLocale
|
||||
|
||||
var err error
|
||||
T, err = getTranslationsBySystemLocale()
|
||||
return err
|
||||
}
|
||||
|
||||
func initTranslationsWithDir(dir string) error {
|
||||
files, _ := ioutil.ReadDir(dir)
|
||||
for _, f := range files {
|
||||
if filepath.Ext(f.Name()) == ".json" {
|
||||
filename := f.Name()
|
||||
locales[strings.Split(filename, ".")[0]] = filepath.Join(dir, filename)
|
||||
|
||||
if err := i18n.LoadTranslationFile(filepath.Join(dir, filename)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getTranslationsBySystemLocale() (TranslateFunc, error) {
|
||||
locale := defaultServerLocale
|
||||
if _, ok := locales[locale]; !ok {
|
||||
mlog.Warn("Failed to load system translations for", mlog.String("locale", locale), mlog.String("attempting to fall back to default locale", defaultLocale))
|
||||
locale = defaultLocale
|
||||
}
|
||||
|
||||
if locales[locale] == "" {
|
||||
return nil, fmt.Errorf("failed to load system translations for '%v'", defaultLocale)
|
||||
}
|
||||
|
||||
translations := tfuncWithFallback(locale)
|
||||
if translations == nil {
|
||||
return nil, fmt.Errorf("failed to load system translations")
|
||||
}
|
||||
|
||||
mlog.Info("Loaded system translations", mlog.String("for locale", locale), mlog.String("from locale", locales[locale]))
|
||||
return translations, nil
|
||||
}
|
||||
|
||||
// GetUserTranslations get the translation function for an specific locale
|
||||
func GetUserTranslations(locale string) TranslateFunc {
|
||||
if _, ok := locales[locale]; !ok {
|
||||
locale = defaultLocale
|
||||
}
|
||||
|
||||
translations := tfuncWithFallback(locale)
|
||||
return translations
|
||||
}
|
||||
|
||||
// GetTranslationsAndLocaleFromRequest return the translation function and the
|
||||
// locale based on a request headers
|
||||
func GetTranslationsAndLocaleFromRequest(r *http.Request) (TranslateFunc, string) {
|
||||
// This is for checking against locales like pt_BR or zn_CN
|
||||
headerLocaleFull := strings.Split(r.Header.Get("Accept-Language"), ",")[0]
|
||||
// This is for checking against locales like en, es
|
||||
headerLocale := strings.Split(strings.Split(r.Header.Get("Accept-Language"), ",")[0], "-")[0]
|
||||
defaultLocale := defaultClientLocale
|
||||
if locales[headerLocaleFull] != "" {
|
||||
translations := tfuncWithFallback(headerLocaleFull)
|
||||
return translations, headerLocaleFull
|
||||
} else if locales[headerLocale] != "" {
|
||||
translations := tfuncWithFallback(headerLocale)
|
||||
return translations, headerLocale
|
||||
} else if locales[defaultLocale] != "" {
|
||||
translations := tfuncWithFallback(defaultLocale)
|
||||
return translations, headerLocale
|
||||
}
|
||||
|
||||
translations := tfuncWithFallback(defaultLocale)
|
||||
return translations, defaultLocale
|
||||
}
|
||||
|
||||
// GetSupportedLocales return a map of locale code and the file path with the
|
||||
// translations
|
||||
func GetSupportedLocales() map[string]string {
|
||||
return locales
|
||||
}
|
||||
|
||||
func tfuncWithFallback(pref string) TranslateFunc {
|
||||
t, _ := i18n.Tfunc(pref)
|
||||
return func(translationID string, args ...interface{}) string {
|
||||
if translated := t(translationID, args...); translated != translationID {
|
||||
return translated
|
||||
}
|
||||
|
||||
t, _ := i18n.Tfunc(defaultLocale)
|
||||
return t(translationID, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// TranslateAsHTML translates the translationID provided and return a
|
||||
// template.HTML object
|
||||
func TranslateAsHTML(t TranslateFunc, translationID string, args map[string]interface{}) template.HTML {
|
||||
message := t(translationID, escapeForHTML(args))
|
||||
message = strings.Replace(message, "[[", "<strong>", -1)
|
||||
message = strings.Replace(message, "]]", "</strong>", -1)
|
||||
return template.HTML(message)
|
||||
}
|
||||
|
||||
func escapeForHTML(arg interface{}) interface{} {
|
||||
switch typedArg := arg.(type) {
|
||||
case string:
|
||||
return template.HTMLEscapeString(typedArg)
|
||||
case *string:
|
||||
return template.HTMLEscapeString(*typedArg)
|
||||
case map[string]interface{}:
|
||||
safeArg := make(map[string]interface{}, len(typedArg))
|
||||
for key, value := range typedArg {
|
||||
safeArg[key] = escapeForHTML(value)
|
||||
}
|
||||
return safeArg
|
||||
default:
|
||||
mlog.Warn(
|
||||
"Unable to escape value for HTML template",
|
||||
mlog.Any("html_template", arg),
|
||||
mlog.String("template_type", reflect.ValueOf(arg).Type().String()),
|
||||
)
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// IdentityTfunc returns a translation function that don't translate, only
|
||||
// returns the same id
|
||||
func IdentityTfunc() TranslateFunc {
|
||||
return func(translationID string, args ...interface{}) string {
|
||||
return translationID
|
||||
}
|
||||
}
|
255
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/autolink.go
generated
vendored
Normal file
255
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/autolink.go
generated
vendored
Normal file
@ -0,0 +1,255 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Based off of extensions/autolink.c from https://github.com/github/cmark
|
||||
|
||||
var (
|
||||
DefaultURLSchemes = []string{"http", "https", "ftp", "mailto", "tel"}
|
||||
wwwAutoLinkRegex = regexp.MustCompile(`^www\d{0,3}\.`)
|
||||
)
|
||||
|
||||
// Given a string with a w at the given position, tries to parse and return a range containing a www link.
|
||||
// if one exists. If the text at the given position isn't a link, returns an empty string. Equivalent to
|
||||
// www_match from the reference code.
|
||||
func parseWWWAutolink(data string, position int) (Range, bool) {
|
||||
// Check that this isn't part of another word
|
||||
if position > 1 {
|
||||
prevChar := data[position-1]
|
||||
|
||||
if !isWhitespaceByte(prevChar) && !isAllowedBeforeWWWLink(prevChar) {
|
||||
return Range{}, false
|
||||
}
|
||||
}
|
||||
|
||||
// Check that this starts with www
|
||||
if len(data)-position < 4 || !wwwAutoLinkRegex.MatchString(data[position:]) {
|
||||
return Range{}, false
|
||||
}
|
||||
|
||||
end := checkDomain(data[position:], false)
|
||||
if end == 0 {
|
||||
return Range{}, false
|
||||
}
|
||||
|
||||
end += position
|
||||
|
||||
// Grab all text until the end of the string or the next whitespace character
|
||||
for end < len(data) && !isWhitespaceByte(data[end]) {
|
||||
end += 1
|
||||
}
|
||||
|
||||
// Trim trailing punctuation
|
||||
end = trimTrailingCharactersFromLink(data, position, end)
|
||||
if position == end {
|
||||
return Range{}, false
|
||||
}
|
||||
|
||||
return Range{position, end}, true
|
||||
}
|
||||
|
||||
func isAllowedBeforeWWWLink(c byte) bool {
|
||||
switch c {
|
||||
case '*', '_', '~', ')':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Given a string with a : at the given position, tried to parse and return a range containing a URL scheme
|
||||
// if one exists. If the text around the given position isn't a link, returns an empty string. Equivalent to
|
||||
// url_match from the reference code.
|
||||
func parseURLAutolink(data string, position int) (Range, bool) {
|
||||
// Check that a :// exists. This doesn't match the clients that treat the slashes as optional.
|
||||
if len(data)-position < 4 || data[position+1] != '/' || data[position+2] != '/' {
|
||||
return Range{}, false
|
||||
}
|
||||
|
||||
start := position - 1
|
||||
for start > 0 && isAlphanumericByte(data[start-1]) {
|
||||
start -= 1
|
||||
}
|
||||
|
||||
if start < 0 || position >= len(data) {
|
||||
return Range{}, false
|
||||
}
|
||||
|
||||
// Ensure that the URL scheme is allowed and that at least one character after the scheme is valid.
|
||||
scheme := data[start:position]
|
||||
if !isSchemeAllowed(scheme) || !isValidHostCharacter(data[position+3:]) {
|
||||
return Range{}, false
|
||||
}
|
||||
|
||||
end := checkDomain(data[position+3:], true)
|
||||
if end == 0 {
|
||||
return Range{}, false
|
||||
}
|
||||
|
||||
end += position
|
||||
|
||||
// Grab all text until the end of the string or the next whitespace character
|
||||
for end < len(data) && !isWhitespaceByte(data[end]) {
|
||||
end += 1
|
||||
}
|
||||
|
||||
// Trim trailing punctuation
|
||||
end = trimTrailingCharactersFromLink(data, start, end)
|
||||
if start == end {
|
||||
return Range{}, false
|
||||
}
|
||||
|
||||
return Range{start, end}, true
|
||||
}
|
||||
|
||||
func isSchemeAllowed(scheme string) bool {
|
||||
// Note that this doesn't support the custom URL schemes implemented by the client
|
||||
for _, allowed := range DefaultURLSchemes {
|
||||
if strings.EqualFold(allowed, scheme) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Given a string starting with a URL, returns the number of valid characters that make up the URL's domain.
|
||||
// Returns 0 if the string doesn't start with a domain name. allowShort determines whether or not the domain
|
||||
// needs to contain a period to be considered valid. Equivalent to check_domain from the reference code.
|
||||
func checkDomain(data string, allowShort bool) int {
|
||||
foundUnderscore := false
|
||||
foundPeriod := false
|
||||
|
||||
i := 1
|
||||
for ; i < len(data)-1; i++ {
|
||||
if data[i] == '_' {
|
||||
foundUnderscore = true
|
||||
break
|
||||
} else if data[i] == '.' {
|
||||
foundPeriod = true
|
||||
} else if !isValidHostCharacter(data[i:]) && data[i] != '-' {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if foundUnderscore {
|
||||
return 0
|
||||
}
|
||||
|
||||
if allowShort {
|
||||
// If allowShort is set, accept any string of valid domain characters
|
||||
return i
|
||||
}
|
||||
|
||||
// If allowShort isn't set, a valid domain just requires at least a single period. Note that this
|
||||
// logic isn't entirely necessary because we already know the string starts with "www." when
|
||||
// this is called from parseWWWAutolink
|
||||
if foundPeriod {
|
||||
return i
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Returns true if the provided link starts with a valid character for a domain name. Equivalent to
|
||||
// is_valid_hostchar from the reference code.
|
||||
func isValidHostCharacter(link string) bool {
|
||||
c, _ := utf8.DecodeRuneInString(link)
|
||||
if c == utf8.RuneError {
|
||||
return false
|
||||
}
|
||||
|
||||
return !unicode.IsSpace(c) && !unicode.IsPunct(c)
|
||||
}
|
||||
|
||||
// Removes any trailing characters such as punctuation or stray brackets that shouldn't be part of the link.
|
||||
// Returns a new end position for the link. Equivalent to autolink_delim from the reference code.
|
||||
func trimTrailingCharactersFromLink(markdown string, start int, end int) int {
|
||||
runes := []rune(markdown[start:end])
|
||||
linkEnd := len(runes)
|
||||
|
||||
// Cut off the link before an open angle bracket if it contains one
|
||||
for i, c := range runes {
|
||||
if c == '<' {
|
||||
linkEnd = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for linkEnd > 0 {
|
||||
c := runes[linkEnd-1]
|
||||
|
||||
if !canEndAutolink(c) {
|
||||
// Trim trailing quotes, periods, etc
|
||||
linkEnd = linkEnd - 1
|
||||
} else if c == ';' {
|
||||
// Trim a trailing HTML entity
|
||||
newEnd := linkEnd - 2
|
||||
|
||||
for newEnd > 0 && ((runes[newEnd] >= 'a' && runes[newEnd] <= 'z') || (runes[newEnd] >= 'A' && runes[newEnd] <= 'Z')) {
|
||||
newEnd -= 1
|
||||
}
|
||||
|
||||
if newEnd < linkEnd-2 && runes[newEnd] == '&' {
|
||||
linkEnd = newEnd
|
||||
} else {
|
||||
// This isn't actually an HTML entity, so just trim the semicolon
|
||||
linkEnd = linkEnd - 1
|
||||
}
|
||||
} else if c == ')' {
|
||||
// Only allow an autolink ending with a bracket if that bracket is part of a matching pair of brackets.
|
||||
// If there are more closing brackets than opening ones, remove the extra bracket
|
||||
|
||||
numClosing := 0
|
||||
numOpening := 0
|
||||
|
||||
// Examples (input text => output linked portion):
|
||||
//
|
||||
// http://www.pokemon.com/Pikachu_(Electric)
|
||||
// => http://www.pokemon.com/Pikachu_(Electric)
|
||||
//
|
||||
// http://www.pokemon.com/Pikachu_((Electric)
|
||||
// => http://www.pokemon.com/Pikachu_((Electric)
|
||||
//
|
||||
// http://www.pokemon.com/Pikachu_(Electric))
|
||||
// => http://www.pokemon.com/Pikachu_(Electric)
|
||||
//
|
||||
// http://www.pokemon.com/Pikachu_((Electric))
|
||||
// => http://www.pokemon.com/Pikachu_((Electric))
|
||||
|
||||
for i := 0; i < linkEnd; i++ {
|
||||
if runes[i] == '(' {
|
||||
numOpening += 1
|
||||
} else if runes[i] == ')' {
|
||||
numClosing += 1
|
||||
}
|
||||
}
|
||||
|
||||
if numClosing <= numOpening {
|
||||
// There's fewer or equal closing brackets, so we've found the end of the link
|
||||
break
|
||||
}
|
||||
|
||||
linkEnd -= 1
|
||||
} else {
|
||||
// There's no special characters at the end of the link, so we're at the end
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return start + len(string(runes[:linkEnd]))
|
||||
}
|
||||
|
||||
func canEndAutolink(c rune) bool {
|
||||
switch c {
|
||||
case '?', '!', '.', ',', ':', '*', '_', '~', '\'', '"':
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
62
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/block_quote.go
generated
vendored
Normal file
62
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/block_quote.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
type BlockQuote struct {
|
||||
blockBase
|
||||
markdown string
|
||||
|
||||
Children []Block
|
||||
}
|
||||
|
||||
func (b *BlockQuote) Continuation(indentation int, r Range) *continuation {
|
||||
if indentation > 3 {
|
||||
return nil
|
||||
}
|
||||
s := b.markdown[r.Position:r.End]
|
||||
if s == "" || s[0] != '>' {
|
||||
return nil
|
||||
}
|
||||
remaining := Range{r.Position + 1, r.End}
|
||||
indentation, indentationBytes := countIndentation(b.markdown, remaining)
|
||||
if indentation > 0 {
|
||||
indentation--
|
||||
}
|
||||
return &continuation{
|
||||
Indentation: indentation,
|
||||
Remaining: Range{remaining.Position + indentationBytes, remaining.End},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BlockQuote) AddChild(openBlocks []Block) []Block {
|
||||
b.Children = append(b.Children, openBlocks[0])
|
||||
return openBlocks
|
||||
}
|
||||
|
||||
func blockQuoteStart(markdown string, indent int, r Range) []Block {
|
||||
if indent > 3 {
|
||||
return nil
|
||||
}
|
||||
s := markdown[r.Position:r.End]
|
||||
if s == "" || s[0] != '>' {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := &BlockQuote{
|
||||
markdown: markdown,
|
||||
}
|
||||
r.Position++
|
||||
if len(s) > 1 && s[1] == ' ' {
|
||||
r.Position++
|
||||
}
|
||||
|
||||
indent, bytes := countIndentation(markdown, r)
|
||||
|
||||
ret := []Block{block}
|
||||
if descendants := blockStartOrParagraph(markdown, indent, Range{r.Position + bytes, r.End}, nil, nil); descendants != nil {
|
||||
block.Children = append(block.Children, descendants[0])
|
||||
ret = append(ret, descendants...)
|
||||
}
|
||||
return ret
|
||||
}
|
154
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/blocks.go
generated
vendored
Normal file
154
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/blocks.go
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type continuation struct {
|
||||
Indentation int
|
||||
Remaining Range
|
||||
}
|
||||
|
||||
type Block interface {
|
||||
Continuation(indentation int, r Range) *continuation
|
||||
AddLine(indentation int, r Range) bool
|
||||
Close()
|
||||
AllowsBlockStarts() bool
|
||||
HasTrailingBlankLine() bool
|
||||
}
|
||||
|
||||
type blockBase struct{}
|
||||
|
||||
func (*blockBase) AddLine(indentation int, r Range) bool { return false }
|
||||
func (*blockBase) Close() {}
|
||||
func (*blockBase) AllowsBlockStarts() bool { return true }
|
||||
func (*blockBase) HasTrailingBlankLine() bool { return false }
|
||||
|
||||
type ContainerBlock interface {
|
||||
Block
|
||||
AddChild(openBlocks []Block) []Block
|
||||
}
|
||||
|
||||
type Range struct {
|
||||
Position int
|
||||
End int
|
||||
}
|
||||
|
||||
func closeBlocks(blocks []Block, referenceDefinitions []*ReferenceDefinition) []*ReferenceDefinition {
|
||||
for _, block := range blocks {
|
||||
block.Close()
|
||||
if p, ok := block.(*Paragraph); ok && len(p.ReferenceDefinitions) > 0 {
|
||||
referenceDefinitions = append(referenceDefinitions, p.ReferenceDefinitions...)
|
||||
}
|
||||
}
|
||||
return referenceDefinitions
|
||||
}
|
||||
|
||||
func ParseBlocks(markdown string, lines []Line) (*Document, []*ReferenceDefinition) {
|
||||
document := &Document{}
|
||||
var referenceDefinitions []*ReferenceDefinition
|
||||
|
||||
openBlocks := []Block{document}
|
||||
|
||||
for _, line := range lines {
|
||||
r := line.Range
|
||||
lastMatchIndex := 0
|
||||
|
||||
indentation, indentationBytes := countIndentation(markdown, r)
|
||||
r = Range{r.Position + indentationBytes, r.End}
|
||||
|
||||
for i, block := range openBlocks {
|
||||
if continuation := block.Continuation(indentation, r); continuation != nil {
|
||||
indentation = continuation.Indentation
|
||||
r = continuation.Remaining
|
||||
additionalIndentation, additionalIndentationBytes := countIndentation(markdown, r)
|
||||
r = Range{r.Position + additionalIndentationBytes, r.End}
|
||||
indentation += additionalIndentation
|
||||
lastMatchIndex = i
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if openBlocks[lastMatchIndex].AllowsBlockStarts() {
|
||||
if newBlocks := blockStart(markdown, indentation, r, openBlocks[:lastMatchIndex+1], openBlocks[lastMatchIndex+1:]); newBlocks != nil {
|
||||
didAdd := false
|
||||
for i := lastMatchIndex; i >= 0; i-- {
|
||||
if container, ok := openBlocks[i].(ContainerBlock); ok {
|
||||
if addedBlocks := container.AddChild(newBlocks); addedBlocks != nil {
|
||||
referenceDefinitions = closeBlocks(openBlocks[i+1:], referenceDefinitions)
|
||||
openBlocks = openBlocks[:i+1]
|
||||
openBlocks = append(openBlocks, addedBlocks...)
|
||||
didAdd = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if didAdd {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
isBlank := strings.TrimSpace(markdown[r.Position:r.End]) == ""
|
||||
if paragraph, ok := openBlocks[len(openBlocks)-1].(*Paragraph); ok && !isBlank {
|
||||
paragraph.Text = append(paragraph.Text, r)
|
||||
continue
|
||||
}
|
||||
|
||||
referenceDefinitions = closeBlocks(openBlocks[lastMatchIndex+1:], referenceDefinitions)
|
||||
openBlocks = openBlocks[:lastMatchIndex+1]
|
||||
|
||||
if openBlocks[lastMatchIndex].AddLine(indentation, r) {
|
||||
continue
|
||||
}
|
||||
|
||||
if paragraph := newParagraph(markdown, r); paragraph != nil {
|
||||
for i := lastMatchIndex; i >= 0; i-- {
|
||||
if container, ok := openBlocks[i].(ContainerBlock); ok {
|
||||
if newBlocks := container.AddChild([]Block{paragraph}); newBlocks != nil {
|
||||
referenceDefinitions = closeBlocks(openBlocks[i+1:], referenceDefinitions)
|
||||
openBlocks = openBlocks[:i+1]
|
||||
openBlocks = append(openBlocks, newBlocks...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
referenceDefinitions = closeBlocks(openBlocks, referenceDefinitions)
|
||||
|
||||
return document, referenceDefinitions
|
||||
}
|
||||
|
||||
func blockStart(markdown string, indentation int, r Range, matchedBlocks, unmatchedBlocks []Block) []Block {
|
||||
if r.Position >= r.End {
|
||||
return nil
|
||||
}
|
||||
|
||||
if start := blockQuoteStart(markdown, indentation, r); start != nil {
|
||||
return start
|
||||
} else if start := listStart(markdown, indentation, r, matchedBlocks, unmatchedBlocks); start != nil {
|
||||
return start
|
||||
} else if start := indentedCodeStart(markdown, indentation, r, matchedBlocks, unmatchedBlocks); start != nil {
|
||||
return start
|
||||
} else if start := fencedCodeStart(markdown, indentation, r); start != nil {
|
||||
return start
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func blockStartOrParagraph(markdown string, indentation int, r Range, matchedBlocks, unmatchedBlocks []Block) []Block {
|
||||
if start := blockStart(markdown, indentation, r, matchedBlocks, unmatchedBlocks); start != nil {
|
||||
return start
|
||||
}
|
||||
if paragraph := newParagraph(markdown, r); paragraph != nil {
|
||||
return []Block{paragraph}
|
||||
}
|
||||
return nil
|
||||
}
|
22
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/document.go
generated
vendored
Normal file
22
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/document.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
type Document struct {
|
||||
blockBase
|
||||
|
||||
Children []Block
|
||||
}
|
||||
|
||||
func (b *Document) Continuation(indentation int, r Range) *continuation {
|
||||
return &continuation{
|
||||
Indentation: indentation,
|
||||
Remaining: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Document) AddChild(openBlocks []Block) []Block {
|
||||
b.Children = append(b.Children, openBlocks[0])
|
||||
return openBlocks
|
||||
}
|
112
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/fenced_code.go
generated
vendored
Normal file
112
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/fenced_code.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type FencedCodeLine struct {
|
||||
Indentation int
|
||||
Range Range
|
||||
}
|
||||
|
||||
type FencedCode struct {
|
||||
blockBase
|
||||
markdown string
|
||||
didSeeClosingFence bool
|
||||
|
||||
Indentation int
|
||||
OpeningFence Range
|
||||
RawInfo Range
|
||||
RawCode []FencedCodeLine
|
||||
}
|
||||
|
||||
func (b *FencedCode) Code() (result string) {
|
||||
for _, code := range b.RawCode {
|
||||
result += strings.Repeat(" ", code.Indentation) + b.markdown[code.Range.Position:code.Range.End]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *FencedCode) Info() string {
|
||||
return Unescape(b.markdown[b.RawInfo.Position:b.RawInfo.End])
|
||||
}
|
||||
|
||||
func (b *FencedCode) Continuation(indentation int, r Range) *continuation {
|
||||
if b.didSeeClosingFence {
|
||||
return nil
|
||||
}
|
||||
return &continuation{
|
||||
Indentation: indentation,
|
||||
Remaining: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *FencedCode) AddLine(indentation int, r Range) bool {
|
||||
s := b.markdown[r.Position:r.End]
|
||||
if indentation <= 3 && strings.HasPrefix(s, b.markdown[b.OpeningFence.Position:b.OpeningFence.End]) {
|
||||
suffix := strings.TrimSpace(s[b.OpeningFence.End-b.OpeningFence.Position:])
|
||||
isClosingFence := true
|
||||
for _, c := range suffix {
|
||||
if c != rune(s[0]) {
|
||||
isClosingFence = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if isClosingFence {
|
||||
b.didSeeClosingFence = true
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if indentation >= b.Indentation {
|
||||
indentation -= b.Indentation
|
||||
} else {
|
||||
indentation = 0
|
||||
}
|
||||
|
||||
b.RawCode = append(b.RawCode, FencedCodeLine{
|
||||
Indentation: indentation,
|
||||
Range: r,
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *FencedCode) AllowsBlockStarts() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func fencedCodeStart(markdown string, indentation int, r Range) []Block {
|
||||
s := markdown[r.Position:r.End]
|
||||
|
||||
if !strings.HasPrefix(s, "```") && !strings.HasPrefix(s, "~~~") {
|
||||
return nil
|
||||
}
|
||||
|
||||
fenceCharacter := rune(s[0])
|
||||
fenceLength := 3
|
||||
for _, c := range s[3:] {
|
||||
if c == fenceCharacter {
|
||||
fenceLength++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for i := r.Position + fenceLength; i < r.End; i++ {
|
||||
if markdown[i] == '`' {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return []Block{
|
||||
&FencedCode{
|
||||
markdown: markdown,
|
||||
Indentation: indentation,
|
||||
RawInfo: trimRightSpace(markdown, Range{r.Position + fenceLength, r.End}),
|
||||
OpeningFence: Range{r.Position, r.Position + fenceLength},
|
||||
},
|
||||
}
|
||||
}
|
192
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/html.go
generated
vendored
Normal file
192
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/html.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var htmlEscaper = strings.NewReplacer(
|
||||
`&`, "&",
|
||||
`<`, "<",
|
||||
`>`, ">",
|
||||
`"`, """,
|
||||
)
|
||||
|
||||
// RenderHTML produces HTML with the same behavior as the example renderer used in the CommonMark
|
||||
// reference materials except for one slight difference: for brevity, no unnecessary whitespace is
|
||||
// inserted between elements. The output is not defined by the CommonMark spec, and it exists
|
||||
// primarily as an aid in testing.
|
||||
func RenderHTML(markdown string) string {
|
||||
return RenderBlockHTML(Parse(markdown))
|
||||
}
|
||||
|
||||
func RenderBlockHTML(block Block, referenceDefinitions []*ReferenceDefinition) (result string) {
|
||||
return renderBlockHTML(block, referenceDefinitions, false)
|
||||
}
|
||||
|
||||
func renderBlockHTML(block Block, referenceDefinitions []*ReferenceDefinition, isTightList bool) (result string) {
|
||||
switch v := block.(type) {
|
||||
case *Document:
|
||||
for _, block := range v.Children {
|
||||
result += RenderBlockHTML(block, referenceDefinitions)
|
||||
}
|
||||
case *Paragraph:
|
||||
if len(v.Text) == 0 {
|
||||
return
|
||||
}
|
||||
if !isTightList {
|
||||
result += "<p>"
|
||||
}
|
||||
for _, inline := range v.ParseInlines(referenceDefinitions) {
|
||||
result += RenderInlineHTML(inline)
|
||||
}
|
||||
if !isTightList {
|
||||
result += "</p>"
|
||||
}
|
||||
case *List:
|
||||
if v.IsOrdered {
|
||||
if v.OrderedStart != 1 {
|
||||
result += fmt.Sprintf(`<ol start="%v">`, v.OrderedStart)
|
||||
} else {
|
||||
result += "<ol>"
|
||||
}
|
||||
} else {
|
||||
result += "<ul>"
|
||||
}
|
||||
for _, block := range v.Children {
|
||||
result += renderBlockHTML(block, referenceDefinitions, !v.IsLoose)
|
||||
}
|
||||
if v.IsOrdered {
|
||||
result += "</ol>"
|
||||
} else {
|
||||
result += "</ul>"
|
||||
}
|
||||
case *ListItem:
|
||||
result += "<li>"
|
||||
for _, block := range v.Children {
|
||||
result += renderBlockHTML(block, referenceDefinitions, isTightList)
|
||||
}
|
||||
result += "</li>"
|
||||
case *BlockQuote:
|
||||
result += "<blockquote>"
|
||||
for _, block := range v.Children {
|
||||
result += RenderBlockHTML(block, referenceDefinitions)
|
||||
}
|
||||
result += "</blockquote>"
|
||||
case *FencedCode:
|
||||
if info := v.Info(); info != "" {
|
||||
language := strings.Fields(info)[0]
|
||||
result += `<pre><code class="language-` + htmlEscaper.Replace(language) + `">`
|
||||
} else {
|
||||
result += "<pre><code>"
|
||||
}
|
||||
result += htmlEscaper.Replace(v.Code()) + "</code></pre>"
|
||||
case *IndentedCode:
|
||||
result += "<pre><code>" + htmlEscaper.Replace(v.Code()) + "</code></pre>"
|
||||
default:
|
||||
panic(fmt.Sprintf("missing case for type %T", v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func escapeURL(url string) (result string) {
|
||||
for i := 0; i < len(url); {
|
||||
switch b := url[i]; b {
|
||||
case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '-', '_', '.', '!', '~', '*', '\'', '(', ')', '#':
|
||||
result += string(b)
|
||||
i++
|
||||
default:
|
||||
if b == '%' && i+2 < len(url) && isHexByte(url[i+1]) && isHexByte(url[i+2]) {
|
||||
result += url[i : i+3]
|
||||
i += 3
|
||||
} else if (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9') {
|
||||
result += string(b)
|
||||
i++
|
||||
} else {
|
||||
result += fmt.Sprintf("%%%0X", b)
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func RenderInlineHTML(inline Inline) (result string) {
|
||||
switch v := inline.(type) {
|
||||
case *Text:
|
||||
return htmlEscaper.Replace(v.Text)
|
||||
case *HardLineBreak:
|
||||
return "<br />"
|
||||
case *SoftLineBreak:
|
||||
return "\n"
|
||||
case *CodeSpan:
|
||||
return "<code>" + htmlEscaper.Replace(v.Code) + "</code>"
|
||||
case *InlineImage:
|
||||
result += `<img src="` + htmlEscaper.Replace(escapeURL(v.Destination())) + `" alt="` + htmlEscaper.Replace(renderImageAltText(v.Children)) + `"`
|
||||
if title := v.Title(); title != "" {
|
||||
result += ` title="` + htmlEscaper.Replace(title) + `"`
|
||||
}
|
||||
result += ` />`
|
||||
case *ReferenceImage:
|
||||
result += `<img src="` + htmlEscaper.Replace(escapeURL(v.Destination())) + `" alt="` + htmlEscaper.Replace(renderImageAltText(v.Children)) + `"`
|
||||
if title := v.Title(); title != "" {
|
||||
result += ` title="` + htmlEscaper.Replace(title) + `"`
|
||||
}
|
||||
result += ` />`
|
||||
case *InlineLink:
|
||||
result += `<a href="` + htmlEscaper.Replace(escapeURL(v.Destination())) + `"`
|
||||
if title := v.Title(); title != "" {
|
||||
result += ` title="` + htmlEscaper.Replace(title) + `"`
|
||||
}
|
||||
result += `>`
|
||||
for _, inline := range v.Children {
|
||||
result += RenderInlineHTML(inline)
|
||||
}
|
||||
result += "</a>"
|
||||
case *ReferenceLink:
|
||||
result += `<a href="` + htmlEscaper.Replace(escapeURL(v.Destination())) + `"`
|
||||
if title := v.Title(); title != "" {
|
||||
result += ` title="` + htmlEscaper.Replace(title) + `"`
|
||||
}
|
||||
result += `>`
|
||||
for _, inline := range v.Children {
|
||||
result += RenderInlineHTML(inline)
|
||||
}
|
||||
result += "</a>"
|
||||
case *Autolink:
|
||||
result += `<a href="` + htmlEscaper.Replace(escapeURL(v.Destination())) + `">`
|
||||
for _, inline := range v.Children {
|
||||
result += RenderInlineHTML(inline)
|
||||
}
|
||||
result += "</a>"
|
||||
default:
|
||||
panic(fmt.Sprintf("missing case for type %T", v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func renderImageAltText(children []Inline) (result string) {
|
||||
for _, inline := range children {
|
||||
result += renderImageChildAltText(inline)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func renderImageChildAltText(inline Inline) (result string) {
|
||||
switch v := inline.(type) {
|
||||
case *Text:
|
||||
return v.Text
|
||||
case *InlineImage:
|
||||
for _, inline := range v.Children {
|
||||
result += renderImageChildAltText(inline)
|
||||
}
|
||||
case *InlineLink:
|
||||
for _, inline := range v.Children {
|
||||
result += renderImageChildAltText(inline)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
2132
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/html_entities.go
generated
vendored
Normal file
2132
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/html_entities.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
98
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/indented_code.go
generated
vendored
Normal file
98
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/indented_code.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type IndentedCodeLine struct {
|
||||
Indentation int
|
||||
Range Range
|
||||
}
|
||||
|
||||
type IndentedCode struct {
|
||||
blockBase
|
||||
markdown string
|
||||
|
||||
RawCode []IndentedCodeLine
|
||||
}
|
||||
|
||||
func (b *IndentedCode) Code() (result string) {
|
||||
for _, code := range b.RawCode {
|
||||
result += strings.Repeat(" ", code.Indentation) + b.markdown[code.Range.Position:code.Range.End]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *IndentedCode) Continuation(indentation int, r Range) *continuation {
|
||||
if indentation >= 4 {
|
||||
return &continuation{
|
||||
Indentation: indentation - 4,
|
||||
Remaining: r,
|
||||
}
|
||||
}
|
||||
s := b.markdown[r.Position:r.End]
|
||||
if strings.TrimSpace(s) == "" {
|
||||
return &continuation{
|
||||
Remaining: r,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *IndentedCode) AddLine(indentation int, r Range) bool {
|
||||
b.RawCode = append(b.RawCode, IndentedCodeLine{
|
||||
Indentation: indentation,
|
||||
Range: r,
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *IndentedCode) Close() {
|
||||
for {
|
||||
last := b.RawCode[len(b.RawCode)-1]
|
||||
s := b.markdown[last.Range.Position:last.Range.End]
|
||||
if strings.TrimRight(s, "\r\n") == "" {
|
||||
b.RawCode = b.RawCode[:len(b.RawCode)-1]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *IndentedCode) AllowsBlockStarts() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func indentedCodeStart(markdown string, indentation int, r Range, matchedBlocks, unmatchedBlocks []Block) []Block {
|
||||
if len(unmatchedBlocks) > 0 {
|
||||
if _, ok := unmatchedBlocks[len(unmatchedBlocks)-1].(*Paragraph); ok {
|
||||
return nil
|
||||
}
|
||||
} else if len(matchedBlocks) > 0 {
|
||||
if _, ok := matchedBlocks[len(matchedBlocks)-1].(*Paragraph); ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if indentation < 4 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s := markdown[r.Position:r.End]
|
||||
if strings.TrimSpace(s) == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return []Block{
|
||||
&IndentedCode{
|
||||
markdown: markdown,
|
||||
RawCode: []IndentedCodeLine{{
|
||||
Indentation: indentation - 4,
|
||||
Range: r,
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
663
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/inlines.go
generated
vendored
Normal file
663
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/inlines.go
generated
vendored
Normal file
@ -0,0 +1,663 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type Inline interface {
|
||||
IsInline() bool
|
||||
}
|
||||
|
||||
type inlineBase struct{}
|
||||
|
||||
func (inlineBase) IsInline() bool { return true }
|
||||
|
||||
type Text struct {
|
||||
inlineBase
|
||||
|
||||
Text string
|
||||
Range Range
|
||||
}
|
||||
|
||||
type CodeSpan struct {
|
||||
inlineBase
|
||||
|
||||
Code string
|
||||
}
|
||||
|
||||
type HardLineBreak struct {
|
||||
inlineBase
|
||||
}
|
||||
|
||||
type SoftLineBreak struct {
|
||||
inlineBase
|
||||
}
|
||||
|
||||
type InlineLinkOrImage struct {
|
||||
inlineBase
|
||||
|
||||
Children []Inline
|
||||
|
||||
RawDestination Range
|
||||
|
||||
markdown string
|
||||
rawTitle string
|
||||
}
|
||||
|
||||
func (i *InlineLinkOrImage) Destination() string {
|
||||
return Unescape(i.markdown[i.RawDestination.Position:i.RawDestination.End])
|
||||
}
|
||||
|
||||
func (i *InlineLinkOrImage) Title() string {
|
||||
return Unescape(i.rawTitle)
|
||||
}
|
||||
|
||||
type InlineLink struct {
|
||||
InlineLinkOrImage
|
||||
}
|
||||
|
||||
type InlineImage struct {
|
||||
InlineLinkOrImage
|
||||
}
|
||||
|
||||
type ReferenceLinkOrImage struct {
|
||||
inlineBase
|
||||
*ReferenceDefinition
|
||||
|
||||
Children []Inline
|
||||
}
|
||||
|
||||
type ReferenceLink struct {
|
||||
ReferenceLinkOrImage
|
||||
}
|
||||
|
||||
type ReferenceImage struct {
|
||||
ReferenceLinkOrImage
|
||||
}
|
||||
|
||||
type Autolink struct {
|
||||
inlineBase
|
||||
|
||||
Children []Inline
|
||||
|
||||
RawDestination Range
|
||||
|
||||
markdown string
|
||||
}
|
||||
|
||||
func (i *Autolink) Destination() string {
|
||||
destination := Unescape(i.markdown[i.RawDestination.Position:i.RawDestination.End])
|
||||
|
||||
if strings.HasPrefix(destination, "www") {
|
||||
destination = "http://" + destination
|
||||
}
|
||||
|
||||
return destination
|
||||
}
|
||||
|
||||
type delimiterType int
|
||||
|
||||
const (
|
||||
linkOpeningDelimiter delimiterType = iota
|
||||
imageOpeningDelimiter
|
||||
)
|
||||
|
||||
type delimiter struct {
|
||||
Type delimiterType
|
||||
IsInactive bool
|
||||
TextNode int
|
||||
Range Range
|
||||
}
|
||||
|
||||
type inlineParser struct {
|
||||
markdown string
|
||||
ranges []Range
|
||||
referenceDefinitions []*ReferenceDefinition
|
||||
|
||||
raw string
|
||||
position int
|
||||
inlines []Inline
|
||||
delimiterStack *list.List
|
||||
}
|
||||
|
||||
func newInlineParser(markdown string, ranges []Range, referenceDefinitions []*ReferenceDefinition) *inlineParser {
|
||||
return &inlineParser{
|
||||
markdown: markdown,
|
||||
ranges: ranges,
|
||||
referenceDefinitions: referenceDefinitions,
|
||||
delimiterStack: list.New(),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *inlineParser) parseBackticks() {
|
||||
count := 1
|
||||
for i := p.position + 1; i < len(p.raw) && p.raw[i] == '`'; i++ {
|
||||
count++
|
||||
}
|
||||
opening := p.raw[p.position : p.position+count]
|
||||
search := p.position + count
|
||||
for search < len(p.raw) {
|
||||
end := strings.Index(p.raw[search:], opening)
|
||||
if end == -1 {
|
||||
break
|
||||
}
|
||||
if search+end+count < len(p.raw) && p.raw[search+end+count] == '`' {
|
||||
search += end + count
|
||||
for search < len(p.raw) && p.raw[search] == '`' {
|
||||
search++
|
||||
}
|
||||
continue
|
||||
}
|
||||
code := strings.Join(strings.Fields(p.raw[p.position+count:search+end]), " ")
|
||||
p.position = search + end + count
|
||||
p.inlines = append(p.inlines, &CodeSpan{
|
||||
Code: code,
|
||||
})
|
||||
return
|
||||
}
|
||||
p.position += len(opening)
|
||||
absPos := relativeToAbsolutePosition(p.ranges, p.position-len(opening))
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: opening,
|
||||
Range: Range{absPos, absPos + len(opening)},
|
||||
})
|
||||
}
|
||||
|
||||
func (p *inlineParser) parseLineEnding() {
|
||||
if p.position >= 1 && p.raw[p.position-1] == '\t' {
|
||||
p.inlines = append(p.inlines, &HardLineBreak{})
|
||||
} else if p.position >= 2 && p.raw[p.position-1] == ' ' && (p.raw[p.position-2] == '\t' || p.raw[p.position-1] == ' ') {
|
||||
p.inlines = append(p.inlines, &HardLineBreak{})
|
||||
} else {
|
||||
p.inlines = append(p.inlines, &SoftLineBreak{})
|
||||
}
|
||||
p.position++
|
||||
if p.position < len(p.raw) && p.raw[p.position] == '\n' {
|
||||
p.position++
|
||||
}
|
||||
}
|
||||
|
||||
func (p *inlineParser) parseEscapeCharacter() {
|
||||
if p.position+1 < len(p.raw) && isEscapableByte(p.raw[p.position+1]) {
|
||||
absPos := relativeToAbsolutePosition(p.ranges, p.position+1)
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: string(p.raw[p.position+1]),
|
||||
Range: Range{absPos, absPos + len(string(p.raw[p.position+1]))},
|
||||
})
|
||||
p.position += 2
|
||||
} else {
|
||||
absPos := relativeToAbsolutePosition(p.ranges, p.position)
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: `\`,
|
||||
Range: Range{absPos, absPos + 1},
|
||||
})
|
||||
p.position++
|
||||
}
|
||||
}
|
||||
|
||||
func (p *inlineParser) parseText() {
|
||||
if next := strings.IndexAny(p.raw[p.position:], "\r\n\\`&![]wW:"); next == -1 {
|
||||
absPos := relativeToAbsolutePosition(p.ranges, p.position)
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: strings.TrimRightFunc(p.raw[p.position:], isWhitespace),
|
||||
Range: Range{absPos, absPos + len(p.raw[p.position:])},
|
||||
})
|
||||
p.position = len(p.raw)
|
||||
} else {
|
||||
absPos := relativeToAbsolutePosition(p.ranges, p.position)
|
||||
if p.raw[p.position+next] == '\r' || p.raw[p.position+next] == '\n' {
|
||||
s := strings.TrimRightFunc(p.raw[p.position:p.position+next], isWhitespace)
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: s,
|
||||
Range: Range{absPos, absPos + len(s)},
|
||||
})
|
||||
} else {
|
||||
if next == 0 {
|
||||
// Always read at least one character since 'w', 'W', and ':' may not actually match another
|
||||
// type of node
|
||||
next = 1
|
||||
}
|
||||
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: p.raw[p.position : p.position+next],
|
||||
Range: Range{absPos, absPos + next},
|
||||
})
|
||||
}
|
||||
p.position += next
|
||||
}
|
||||
}
|
||||
|
||||
func (p *inlineParser) parseLinkOrImageDelimiter() {
|
||||
absPos := relativeToAbsolutePosition(p.ranges, p.position)
|
||||
if p.raw[p.position] == '[' {
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: "[",
|
||||
Range: Range{absPos, absPos + 1},
|
||||
})
|
||||
p.delimiterStack.PushBack(&delimiter{
|
||||
Type: linkOpeningDelimiter,
|
||||
TextNode: len(p.inlines) - 1,
|
||||
Range: Range{p.position, p.position + 1},
|
||||
})
|
||||
p.position++
|
||||
} else if p.raw[p.position] == '!' && p.position+1 < len(p.raw) && p.raw[p.position+1] == '[' {
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: "![",
|
||||
Range: Range{absPos, absPos + 2},
|
||||
})
|
||||
p.delimiterStack.PushBack(&delimiter{
|
||||
Type: imageOpeningDelimiter,
|
||||
TextNode: len(p.inlines) - 1,
|
||||
Range: Range{p.position, p.position + 2},
|
||||
})
|
||||
p.position += 2
|
||||
} else {
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: "!",
|
||||
Range: Range{absPos, absPos + 1},
|
||||
})
|
||||
p.position++
|
||||
}
|
||||
}
|
||||
|
||||
func (p *inlineParser) peekAtInlineLinkDestinationAndTitle(position int, isImage bool) (destination, title Range, end int, ok bool) {
|
||||
if position >= len(p.raw) || p.raw[position] != '(' {
|
||||
return
|
||||
}
|
||||
position++
|
||||
|
||||
destinationStart := nextNonWhitespace(p.raw, position)
|
||||
if destinationStart >= len(p.raw) {
|
||||
return
|
||||
} else if p.raw[destinationStart] == ')' {
|
||||
return Range{destinationStart, destinationStart}, Range{destinationStart, destinationStart}, destinationStart + 1, true
|
||||
}
|
||||
|
||||
destination, end, ok = parseLinkDestination(p.raw, destinationStart)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
position = end
|
||||
|
||||
if isImage && position < len(p.raw) && isWhitespaceByte(p.raw[position]) {
|
||||
dimensionsStart := nextNonWhitespace(p.raw, position)
|
||||
if dimensionsStart >= len(p.raw) {
|
||||
return
|
||||
}
|
||||
|
||||
if p.raw[dimensionsStart] == '=' {
|
||||
// Read optional image dimensions even if we don't use them
|
||||
_, end, ok = parseImageDimensions(p.raw, dimensionsStart)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
position = end
|
||||
}
|
||||
}
|
||||
|
||||
if position < len(p.raw) && isWhitespaceByte(p.raw[position]) {
|
||||
titleStart := nextNonWhitespace(p.raw, position)
|
||||
if titleStart >= len(p.raw) {
|
||||
return
|
||||
} else if p.raw[titleStart] == ')' {
|
||||
return destination, Range{titleStart, titleStart}, titleStart + 1, true
|
||||
}
|
||||
|
||||
if p.raw[titleStart] == '"' || p.raw[titleStart] == '\'' || p.raw[titleStart] == '(' {
|
||||
title, end, ok = parseLinkTitle(p.raw, titleStart)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
position = end
|
||||
}
|
||||
}
|
||||
|
||||
closingPosition := nextNonWhitespace(p.raw, position)
|
||||
if closingPosition >= len(p.raw) || p.raw[closingPosition] != ')' {
|
||||
return Range{}, Range{}, 0, false
|
||||
}
|
||||
|
||||
return destination, title, closingPosition + 1, true
|
||||
}
|
||||
|
||||
func (p *inlineParser) referenceDefinition(label string) *ReferenceDefinition {
|
||||
clean := strings.Join(strings.Fields(label), " ")
|
||||
for _, d := range p.referenceDefinitions {
|
||||
if strings.EqualFold(clean, strings.Join(strings.Fields(d.Label()), " ")) {
|
||||
return d
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *inlineParser) lookForLinkOrImage() {
|
||||
for element := p.delimiterStack.Back(); element != nil; element = element.Prev() {
|
||||
d := element.Value.(*delimiter)
|
||||
if d.Type != imageOpeningDelimiter && d.Type != linkOpeningDelimiter {
|
||||
continue
|
||||
}
|
||||
if d.IsInactive {
|
||||
p.delimiterStack.Remove(element)
|
||||
break
|
||||
}
|
||||
|
||||
isImage := d.Type == imageOpeningDelimiter
|
||||
|
||||
var inline Inline
|
||||
|
||||
if destination, title, next, ok := p.peekAtInlineLinkDestinationAndTitle(p.position+1, isImage); ok {
|
||||
destinationMarkdownPosition := relativeToAbsolutePosition(p.ranges, destination.Position)
|
||||
linkOrImage := InlineLinkOrImage{
|
||||
Children: append([]Inline(nil), p.inlines[d.TextNode+1:]...),
|
||||
RawDestination: Range{destinationMarkdownPosition, destinationMarkdownPosition + destination.End - destination.Position},
|
||||
markdown: p.markdown,
|
||||
rawTitle: p.raw[title.Position:title.End],
|
||||
}
|
||||
if d.Type == imageOpeningDelimiter {
|
||||
inline = &InlineImage{linkOrImage}
|
||||
} else {
|
||||
inline = &InlineLink{linkOrImage}
|
||||
}
|
||||
p.position = next
|
||||
} else {
|
||||
referenceLabel := ""
|
||||
label, next, hasLinkLabel := parseLinkLabel(p.raw, p.position+1)
|
||||
if hasLinkLabel && label.End > label.Position {
|
||||
referenceLabel = p.raw[label.Position:label.End]
|
||||
} else {
|
||||
referenceLabel = p.raw[d.Range.End:p.position]
|
||||
if !hasLinkLabel {
|
||||
next = p.position + 1
|
||||
}
|
||||
}
|
||||
if referenceLabel != "" {
|
||||
if reference := p.referenceDefinition(referenceLabel); reference != nil {
|
||||
linkOrImage := ReferenceLinkOrImage{
|
||||
ReferenceDefinition: reference,
|
||||
Children: append([]Inline(nil), p.inlines[d.TextNode+1:]...),
|
||||
}
|
||||
if d.Type == imageOpeningDelimiter {
|
||||
inline = &ReferenceImage{linkOrImage}
|
||||
} else {
|
||||
inline = &ReferenceLink{linkOrImage}
|
||||
}
|
||||
p.position = next
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if inline != nil {
|
||||
if d.Type == imageOpeningDelimiter {
|
||||
p.inlines = append(p.inlines[:d.TextNode], inline)
|
||||
} else {
|
||||
p.inlines = append(p.inlines[:d.TextNode], inline)
|
||||
for inlineElement := element.Prev(); inlineElement != nil; inlineElement = inlineElement.Prev() {
|
||||
if d := inlineElement.Value.(*delimiter); d.Type == linkOpeningDelimiter {
|
||||
d.IsInactive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
p.delimiterStack.Remove(element)
|
||||
return
|
||||
}
|
||||
p.delimiterStack.Remove(element)
|
||||
break
|
||||
}
|
||||
absPos := relativeToAbsolutePosition(p.ranges, p.position)
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: "]",
|
||||
Range: Range{absPos, absPos + 1},
|
||||
})
|
||||
p.position++
|
||||
}
|
||||
|
||||
func CharacterReference(ref string) string {
|
||||
if ref == "" {
|
||||
return ""
|
||||
}
|
||||
if ref[0] == '#' {
|
||||
if len(ref) < 2 {
|
||||
return ""
|
||||
}
|
||||
n := 0
|
||||
if ref[1] == 'X' || ref[1] == 'x' {
|
||||
if len(ref) < 3 {
|
||||
return ""
|
||||
}
|
||||
for i := 2; i < len(ref); i++ {
|
||||
if i > 9 {
|
||||
return ""
|
||||
}
|
||||
d := ref[i]
|
||||
switch {
|
||||
case d >= '0' && d <= '9':
|
||||
n = n*16 + int(d-'0')
|
||||
case d >= 'a' && d <= 'f':
|
||||
n = n*16 + 10 + int(d-'a')
|
||||
case d >= 'A' && d <= 'F':
|
||||
n = n*16 + 10 + int(d-'A')
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := 1; i < len(ref); i++ {
|
||||
if i > 8 || ref[i] < '0' || ref[i] > '9' {
|
||||
return ""
|
||||
}
|
||||
n = n*10 + int(ref[i]-'0')
|
||||
}
|
||||
}
|
||||
c := rune(n)
|
||||
if c == '\u0000' || !utf8.ValidRune(c) {
|
||||
return string(unicode.ReplacementChar)
|
||||
}
|
||||
return string(c)
|
||||
}
|
||||
if entity, ok := htmlEntities[ref]; ok {
|
||||
return entity
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *inlineParser) parseCharacterReference() {
|
||||
absPos := relativeToAbsolutePosition(p.ranges, p.position)
|
||||
p.position++
|
||||
if semicolon := strings.IndexByte(p.raw[p.position:], ';'); semicolon == -1 {
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: "&",
|
||||
Range: Range{absPos, absPos + 1},
|
||||
})
|
||||
} else if s := CharacterReference(p.raw[p.position : p.position+semicolon]); s != "" {
|
||||
p.position += semicolon + 1
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: s,
|
||||
Range: Range{absPos, absPos + len(s)},
|
||||
})
|
||||
} else {
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: "&",
|
||||
Range: Range{absPos, absPos + 1},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (p *inlineParser) parseAutolink(c rune) bool {
|
||||
for element := p.delimiterStack.Back(); element != nil; element = element.Prev() {
|
||||
d := element.Value.(*delimiter)
|
||||
if !d.IsInactive {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
var link Range
|
||||
if c == ':' {
|
||||
var ok bool
|
||||
link, ok = parseURLAutolink(p.raw, p.position)
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Since the current position is at the colon, we have to rewind the parsing slightly so that
|
||||
// we don't duplicate the URL scheme
|
||||
rewind := strings.Index(p.raw[link.Position:link.End], ":")
|
||||
if rewind != -1 {
|
||||
lastInline := p.inlines[len(p.inlines)-1]
|
||||
lastText, ok := lastInline.(*Text)
|
||||
|
||||
if !ok {
|
||||
// This should never occur since parseURLAutolink will only return a non-empty value
|
||||
// when the previous text ends in a valid URL protocol which would mean that the previous
|
||||
// node is a Text node
|
||||
return false
|
||||
}
|
||||
|
||||
p.inlines = p.inlines[0 : len(p.inlines)-1]
|
||||
p.inlines = append(p.inlines, &Text{
|
||||
Text: lastText.Text[:len(lastText.Text)-rewind],
|
||||
Range: Range{lastText.Range.Position, lastText.Range.End - rewind},
|
||||
})
|
||||
p.position -= rewind
|
||||
}
|
||||
} else if c == 'w' || c == 'W' {
|
||||
var ok bool
|
||||
link, ok = parseWWWAutolink(p.raw, p.position)
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
linkMarkdownPosition := relativeToAbsolutePosition(p.ranges, link.Position)
|
||||
linkRange := Range{linkMarkdownPosition, linkMarkdownPosition + link.End - link.Position}
|
||||
|
||||
p.inlines = append(p.inlines, &Autolink{
|
||||
Children: []Inline{
|
||||
&Text{
|
||||
Text: p.raw[link.Position:link.End],
|
||||
Range: linkRange,
|
||||
},
|
||||
},
|
||||
RawDestination: linkRange,
|
||||
markdown: p.markdown,
|
||||
})
|
||||
p.position += (link.End - link.Position)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *inlineParser) Parse() []Inline {
|
||||
for _, r := range p.ranges {
|
||||
p.raw += p.markdown[r.Position:r.End]
|
||||
}
|
||||
|
||||
for p.position < len(p.raw) {
|
||||
c, _ := utf8.DecodeRuneInString(p.raw[p.position:])
|
||||
|
||||
switch c {
|
||||
case '\r', '\n':
|
||||
p.parseLineEnding()
|
||||
case '\\':
|
||||
p.parseEscapeCharacter()
|
||||
case '`':
|
||||
p.parseBackticks()
|
||||
case '&':
|
||||
p.parseCharacterReference()
|
||||
case '!', '[':
|
||||
p.parseLinkOrImageDelimiter()
|
||||
case ']':
|
||||
p.lookForLinkOrImage()
|
||||
case 'w', 'W', ':':
|
||||
matched := p.parseAutolink(c)
|
||||
|
||||
if !matched {
|
||||
p.parseText()
|
||||
}
|
||||
default:
|
||||
p.parseText()
|
||||
}
|
||||
}
|
||||
|
||||
return p.inlines
|
||||
}
|
||||
|
||||
func ParseInlines(markdown string, ranges []Range, referenceDefinitions []*ReferenceDefinition) (inlines []Inline) {
|
||||
return newInlineParser(markdown, ranges, referenceDefinitions).Parse()
|
||||
}
|
||||
|
||||
func MergeInlineText(inlines []Inline) []Inline {
|
||||
ret := inlines[:0]
|
||||
for i, v := range inlines {
|
||||
// always add first node
|
||||
if i == 0 {
|
||||
ret = append(ret, v)
|
||||
continue
|
||||
}
|
||||
// not a text node? nothing to merge
|
||||
text, ok := v.(*Text)
|
||||
if !ok {
|
||||
ret = append(ret, v)
|
||||
continue
|
||||
}
|
||||
// previous node is not a text node? nothing to merge
|
||||
prevText, ok := ret[len(ret)-1].(*Text)
|
||||
if !ok {
|
||||
ret = append(ret, v)
|
||||
continue
|
||||
}
|
||||
// previous node is not right before this one
|
||||
if prevText.Range.End != text.Range.Position {
|
||||
ret = append(ret, v)
|
||||
continue
|
||||
}
|
||||
// we have two consecutive text nodes
|
||||
ret[len(ret)-1] = &Text{
|
||||
Text: prevText.Text + text.Text,
|
||||
Range: Range{prevText.Range.Position, text.Range.End},
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func Unescape(markdown string) string {
|
||||
ret := ""
|
||||
|
||||
position := 0
|
||||
for position < len(markdown) {
|
||||
c, cSize := utf8.DecodeRuneInString(markdown[position:])
|
||||
|
||||
switch c {
|
||||
case '\\':
|
||||
if position+1 < len(markdown) && isEscapableByte(markdown[position+1]) {
|
||||
ret += string(markdown[position+1])
|
||||
position += 2
|
||||
} else {
|
||||
ret += `\`
|
||||
position++
|
||||
}
|
||||
case '&':
|
||||
position++
|
||||
if semicolon := strings.IndexByte(markdown[position:], ';'); semicolon == -1 {
|
||||
ret += "&"
|
||||
} else if s := CharacterReference(markdown[position : position+semicolon]); s != "" {
|
||||
position += semicolon + 1
|
||||
ret += s
|
||||
} else {
|
||||
ret += "&"
|
||||
}
|
||||
default:
|
||||
ret += string(c)
|
||||
position += cSize
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
78
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/inspect.go
generated
vendored
Normal file
78
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/inspect.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
// Inspect traverses the markdown tree in depth-first order. If f returns true, Inspect invokes f
|
||||
// recursively for each child of the block or inline, followed by a call of f(nil).
|
||||
func Inspect(markdown string, f func(interface{}) bool) {
|
||||
document, referenceDefinitions := Parse(markdown)
|
||||
InspectBlock(document, func(block Block) bool {
|
||||
if !f(block) {
|
||||
return false
|
||||
}
|
||||
switch v := block.(type) {
|
||||
case *Paragraph:
|
||||
for _, inline := range MergeInlineText(v.ParseInlines(referenceDefinitions)) {
|
||||
InspectInline(inline, func(inline Inline) bool {
|
||||
return f(inline)
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// InspectBlock traverses the blocks in depth-first order, starting with block. If f returns true,
|
||||
// InspectBlock invokes f recursively for each child of the block, followed by a call of f(nil).
|
||||
func InspectBlock(block Block, f func(Block) bool) {
|
||||
if !f(block) {
|
||||
return
|
||||
}
|
||||
switch v := block.(type) {
|
||||
case *Document:
|
||||
for _, child := range v.Children {
|
||||
InspectBlock(child, f)
|
||||
}
|
||||
case *List:
|
||||
for _, child := range v.Children {
|
||||
InspectBlock(child, f)
|
||||
}
|
||||
case *ListItem:
|
||||
for _, child := range v.Children {
|
||||
InspectBlock(child, f)
|
||||
}
|
||||
case *BlockQuote:
|
||||
for _, child := range v.Children {
|
||||
InspectBlock(child, f)
|
||||
}
|
||||
}
|
||||
f(nil)
|
||||
}
|
||||
|
||||
// InspectInline traverses the blocks in depth-first order, starting with block. If f returns true,
|
||||
// InspectInline invokes f recursively for each child of the block, followed by a call of f(nil).
|
||||
func InspectInline(inline Inline, f func(Inline) bool) {
|
||||
if !f(inline) {
|
||||
return
|
||||
}
|
||||
switch v := inline.(type) {
|
||||
case *InlineImage:
|
||||
for _, child := range v.Children {
|
||||
InspectInline(child, f)
|
||||
}
|
||||
case *InlineLink:
|
||||
for _, child := range v.Children {
|
||||
InspectInline(child, f)
|
||||
}
|
||||
case *ReferenceImage:
|
||||
for _, child := range v.Children {
|
||||
InspectInline(child, f)
|
||||
}
|
||||
case *ReferenceLink:
|
||||
for _, child := range v.Children {
|
||||
InspectInline(child, f)
|
||||
}
|
||||
}
|
||||
f(nil)
|
||||
}
|
32
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/lines.go
generated
vendored
Normal file
32
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/lines.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Line struct {
|
||||
Range
|
||||
}
|
||||
|
||||
func ParseLines(markdown string) []Line {
|
||||
lineStartPosition := 0
|
||||
isAfterCarriageReturn := false
|
||||
lines := make([]Line, 0, strings.Count(markdown, "\n"))
|
||||
for position, r := range markdown {
|
||||
if r == '\n' {
|
||||
lines = append(lines, Line{Range{lineStartPosition, position + 1}})
|
||||
lineStartPosition = position + 1
|
||||
} else if isAfterCarriageReturn {
|
||||
lines = append(lines, Line{Range{lineStartPosition, position}})
|
||||
lineStartPosition = position
|
||||
}
|
||||
isAfterCarriageReturn = r == '\r'
|
||||
}
|
||||
if lineStartPosition < len(markdown) {
|
||||
lines = append(lines, Line{Range{lineStartPosition, len(markdown)}})
|
||||
}
|
||||
return lines
|
||||
}
|
184
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/links.go
generated
vendored
Normal file
184
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/links.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
func parseLinkDestination(markdown string, position int) (raw Range, next int, ok bool) {
|
||||
if position >= len(markdown) {
|
||||
return
|
||||
}
|
||||
|
||||
if markdown[position] == '<' {
|
||||
isEscaped := false
|
||||
|
||||
for offset, c := range []byte(markdown[position+1:]) {
|
||||
if isEscaped {
|
||||
isEscaped = false
|
||||
if isEscapableByte(c) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if c == '\\' {
|
||||
isEscaped = true
|
||||
} else if c == '<' {
|
||||
break
|
||||
} else if c == '>' {
|
||||
return Range{position + 1, position + 1 + offset}, position + 1 + offset + 1, true
|
||||
} else if isWhitespaceByte(c) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
openCount := 0
|
||||
isEscaped := false
|
||||
for offset, c := range []byte(markdown[position:]) {
|
||||
if isEscaped {
|
||||
isEscaped = false
|
||||
if isEscapableByte(c) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
switch c {
|
||||
case '\\':
|
||||
isEscaped = true
|
||||
case '(':
|
||||
openCount++
|
||||
case ')':
|
||||
if openCount < 1 {
|
||||
return Range{position, position + offset}, position + offset, true
|
||||
}
|
||||
openCount--
|
||||
default:
|
||||
if isWhitespaceByte(c) {
|
||||
return Range{position, position + offset}, position + offset, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return Range{position, len(markdown)}, len(markdown), true
|
||||
}
|
||||
|
||||
func parseLinkTitle(markdown string, position int) (raw Range, next int, ok bool) {
|
||||
if position >= len(markdown) {
|
||||
return
|
||||
}
|
||||
|
||||
originalPosition := position
|
||||
|
||||
var closer byte
|
||||
switch markdown[position] {
|
||||
case '"', '\'':
|
||||
closer = markdown[position]
|
||||
case '(':
|
||||
closer = ')'
|
||||
default:
|
||||
return
|
||||
}
|
||||
position++
|
||||
|
||||
for position < len(markdown) {
|
||||
switch markdown[position] {
|
||||
case '\\':
|
||||
position++
|
||||
if position < len(markdown) && isEscapableByte(markdown[position]) {
|
||||
position++
|
||||
}
|
||||
case closer:
|
||||
return Range{originalPosition + 1, position}, position + 1, true
|
||||
default:
|
||||
position++
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func parseLinkLabel(markdown string, position int) (raw Range, next int, ok bool) {
|
||||
if position >= len(markdown) || markdown[position] != '[' {
|
||||
return
|
||||
}
|
||||
|
||||
originalPosition := position
|
||||
position++
|
||||
|
||||
for position < len(markdown) {
|
||||
switch markdown[position] {
|
||||
case '\\':
|
||||
position++
|
||||
if position < len(markdown) && isEscapableByte(markdown[position]) {
|
||||
position++
|
||||
}
|
||||
case '[':
|
||||
return
|
||||
case ']':
|
||||
if position-originalPosition >= 1000 && utf8.RuneCountInString(markdown[originalPosition:position]) >= 1000 {
|
||||
return
|
||||
}
|
||||
return Range{originalPosition + 1, position}, position + 1, true
|
||||
default:
|
||||
position++
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// As a non-standard feature, we allow image links to specify dimensions of the image by adding "=WIDTHxHEIGHT"
|
||||
// after the image destination but before the image title like .
|
||||
// Both width and height are optional, but at least one of them must be specified.
|
||||
func parseImageDimensions(markdown string, position int) (raw Range, next int, ok bool) {
|
||||
if position >= len(markdown) {
|
||||
return
|
||||
}
|
||||
|
||||
originalPosition := position
|
||||
|
||||
// Read =
|
||||
position += 1
|
||||
if position >= len(markdown) {
|
||||
return
|
||||
}
|
||||
|
||||
// Read width
|
||||
hasWidth := false
|
||||
for position < len(markdown)-1 && isNumericByte(markdown[position]) {
|
||||
hasWidth = true
|
||||
position += 1
|
||||
}
|
||||
|
||||
// Look for early end of dimensions
|
||||
if isWhitespaceByte(markdown[position]) || markdown[position] == ')' {
|
||||
return Range{originalPosition, position - 1}, position, true
|
||||
}
|
||||
|
||||
// Read the x
|
||||
if (markdown[position] != 'x' && markdown[position] != 'X') || position == len(markdown)-1 {
|
||||
return
|
||||
}
|
||||
position += 1
|
||||
|
||||
// Read height
|
||||
hasHeight := false
|
||||
for position < len(markdown)-1 && isNumericByte(markdown[position]) {
|
||||
hasHeight = true
|
||||
position += 1
|
||||
}
|
||||
|
||||
// Make sure the there's no trailing characters
|
||||
if !isWhitespaceByte(markdown[position]) && markdown[position] != ')' {
|
||||
return
|
||||
}
|
||||
|
||||
if !hasWidth && !hasHeight {
|
||||
// At least one of width or height is required
|
||||
return
|
||||
}
|
||||
|
||||
return Range{originalPosition, position - 1}, position, true
|
||||
}
|
220
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/list.go
generated
vendored
Normal file
220
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/list.go
generated
vendored
Normal file
@ -0,0 +1,220 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ListItem struct {
|
||||
blockBase
|
||||
markdown string
|
||||
hasTrailingBlankLine bool
|
||||
hasBlankLineBetweenChildren bool
|
||||
|
||||
Indentation int
|
||||
Children []Block
|
||||
}
|
||||
|
||||
func (b *ListItem) Continuation(indentation int, r Range) *continuation {
|
||||
s := b.markdown[r.Position:r.End]
|
||||
if strings.TrimSpace(s) == "" {
|
||||
if b.Children == nil {
|
||||
return nil
|
||||
}
|
||||
return &continuation{
|
||||
Remaining: r,
|
||||
}
|
||||
}
|
||||
if indentation < b.Indentation {
|
||||
return nil
|
||||
}
|
||||
return &continuation{
|
||||
Indentation: indentation - b.Indentation,
|
||||
Remaining: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *ListItem) AddChild(openBlocks []Block) []Block {
|
||||
b.Children = append(b.Children, openBlocks[0])
|
||||
if b.hasTrailingBlankLine {
|
||||
b.hasBlankLineBetweenChildren = true
|
||||
}
|
||||
b.hasTrailingBlankLine = false
|
||||
return openBlocks
|
||||
}
|
||||
|
||||
func (b *ListItem) AddLine(indentation int, r Range) bool {
|
||||
isBlank := strings.TrimSpace(b.markdown[r.Position:r.End]) == ""
|
||||
if isBlank {
|
||||
b.hasTrailingBlankLine = true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *ListItem) HasTrailingBlankLine() bool {
|
||||
return b.hasTrailingBlankLine || (len(b.Children) > 0 && b.Children[len(b.Children)-1].HasTrailingBlankLine())
|
||||
}
|
||||
|
||||
func (b *ListItem) isLoose() bool {
|
||||
if b.hasBlankLineBetweenChildren {
|
||||
return true
|
||||
}
|
||||
for i, child := range b.Children {
|
||||
if i < len(b.Children)-1 && child.HasTrailingBlankLine() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type List struct {
|
||||
blockBase
|
||||
markdown string
|
||||
hasTrailingBlankLine bool
|
||||
hasBlankLineBetweenChildren bool
|
||||
|
||||
IsLoose bool
|
||||
IsOrdered bool
|
||||
OrderedStart int
|
||||
BulletOrDelimiter byte
|
||||
Children []*ListItem
|
||||
}
|
||||
|
||||
func (b *List) Continuation(indentation int, r Range) *continuation {
|
||||
s := b.markdown[r.Position:r.End]
|
||||
if strings.TrimSpace(s) == "" {
|
||||
return &continuation{
|
||||
Remaining: r,
|
||||
}
|
||||
}
|
||||
return &continuation{
|
||||
Indentation: indentation,
|
||||
Remaining: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *List) AddChild(openBlocks []Block) []Block {
|
||||
if item, ok := openBlocks[0].(*ListItem); ok {
|
||||
b.Children = append(b.Children, item)
|
||||
if b.hasTrailingBlankLine {
|
||||
b.hasBlankLineBetweenChildren = true
|
||||
}
|
||||
b.hasTrailingBlankLine = false
|
||||
return openBlocks
|
||||
} else if list, ok := openBlocks[0].(*List); ok {
|
||||
if len(list.Children) == 1 && list.IsOrdered == b.IsOrdered && list.BulletOrDelimiter == b.BulletOrDelimiter {
|
||||
return b.AddChild(openBlocks[1:])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *List) AddLine(indentation int, r Range) bool {
|
||||
isBlank := strings.TrimSpace(b.markdown[r.Position:r.End]) == ""
|
||||
if isBlank {
|
||||
b.hasTrailingBlankLine = true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *List) HasTrailingBlankLine() bool {
|
||||
return b.hasTrailingBlankLine || (len(b.Children) > 0 && b.Children[len(b.Children)-1].HasTrailingBlankLine())
|
||||
}
|
||||
|
||||
func (b *List) isLoose() bool {
|
||||
if b.hasBlankLineBetweenChildren {
|
||||
return true
|
||||
}
|
||||
for i, child := range b.Children {
|
||||
if child.isLoose() || (i < len(b.Children)-1 && child.HasTrailingBlankLine()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *List) Close() {
|
||||
b.IsLoose = b.isLoose()
|
||||
}
|
||||
|
||||
func parseListMarker(markdown string, r Range) (success, isOrdered bool, orderedStart int, bulletOrDelimiter byte, markerWidth int, remaining Range) {
|
||||
digits := 0
|
||||
n := 0
|
||||
for i := r.Position; i < r.End && markdown[i] >= '0' && markdown[i] <= '9'; i++ {
|
||||
digits++
|
||||
n = n*10 + int(markdown[i]-'0')
|
||||
}
|
||||
if digits > 0 {
|
||||
if digits > 9 || r.Position+digits >= r.End {
|
||||
return
|
||||
}
|
||||
next := markdown[r.Position+digits]
|
||||
if next != '.' && next != ')' {
|
||||
return
|
||||
}
|
||||
return true, true, n, next, digits + 1, Range{r.Position + digits + 1, r.End}
|
||||
}
|
||||
if r.Position >= r.End {
|
||||
return
|
||||
}
|
||||
next := markdown[r.Position]
|
||||
if next != '-' && next != '+' && next != '*' {
|
||||
return
|
||||
}
|
||||
return true, false, 0, next, 1, Range{r.Position + 1, r.End}
|
||||
}
|
||||
|
||||
func listStart(markdown string, indent int, r Range, matchedBlocks, unmatchedBlocks []Block) []Block {
|
||||
afterList := false
|
||||
if len(matchedBlocks) > 0 {
|
||||
_, afterList = matchedBlocks[len(matchedBlocks)-1].(*List)
|
||||
}
|
||||
if !afterList && indent > 3 {
|
||||
return nil
|
||||
}
|
||||
|
||||
success, isOrdered, orderedStart, bulletOrDelimiter, markerWidth, remaining := parseListMarker(markdown, r)
|
||||
if !success {
|
||||
return nil
|
||||
}
|
||||
|
||||
isBlank := strings.TrimSpace(markdown[remaining.Position:remaining.End]) == ""
|
||||
if len(matchedBlocks) > 0 && len(unmatchedBlocks) == 0 {
|
||||
if _, ok := matchedBlocks[len(matchedBlocks)-1].(*Paragraph); ok {
|
||||
if isBlank || (isOrdered && orderedStart != 1) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
indentAfterMarker, indentBytesAfterMarker := countIndentation(markdown, remaining)
|
||||
if !isBlank && indentAfterMarker < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
remaining = Range{remaining.Position + indentBytesAfterMarker, remaining.End}
|
||||
consumedIndentAfterMarker := indentAfterMarker
|
||||
if isBlank || indentAfterMarker >= 5 {
|
||||
consumedIndentAfterMarker = 1
|
||||
}
|
||||
|
||||
listItem := &ListItem{
|
||||
markdown: markdown,
|
||||
Indentation: indent + markerWidth + consumedIndentAfterMarker,
|
||||
}
|
||||
list := &List{
|
||||
markdown: markdown,
|
||||
IsOrdered: isOrdered,
|
||||
OrderedStart: orderedStart,
|
||||
BulletOrDelimiter: bulletOrDelimiter,
|
||||
Children: []*ListItem{listItem},
|
||||
}
|
||||
ret := []Block{list, listItem}
|
||||
if descendants := blockStartOrParagraph(markdown, indentAfterMarker-consumedIndentAfterMarker, remaining, nil, nil); descendants != nil {
|
||||
listItem.Children = append(listItem.Children, descendants[0])
|
||||
ret = append(ret, descendants...)
|
||||
}
|
||||
return ret
|
||||
}
|
147
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/markdown.go
generated
vendored
Normal file
147
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/markdown.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
// This package implements a parser for the subset of the CommonMark spec necessary for us to do
|
||||
// server-side processing. It is not a full implementation and lacks many features. But it is
|
||||
// complete enough to efficiently and accurately allow us to do what we need to like rewrite image
|
||||
// URLs for proxying.
|
||||
package markdown
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func isEscapable(c rune) bool {
|
||||
return c > ' ' && (c < '0' || (c > '9' && (c < 'A' || (c > 'Z' && (c < 'a' || (c > 'z' && c <= '~'))))))
|
||||
}
|
||||
|
||||
func isEscapableByte(c byte) bool {
|
||||
return isEscapable(rune(c))
|
||||
}
|
||||
|
||||
func isWhitespace(c rune) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\u000b', '\u000c', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespaceByte(c byte) bool {
|
||||
return isWhitespace(rune(c))
|
||||
}
|
||||
|
||||
func isNumeric(c rune) bool {
|
||||
return c >= '0' && c <= '9'
|
||||
}
|
||||
|
||||
func isNumericByte(c byte) bool {
|
||||
return isNumeric(rune(c))
|
||||
}
|
||||
|
||||
func isHex(c rune) bool {
|
||||
return isNumeric(c) || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F')
|
||||
}
|
||||
|
||||
func isHexByte(c byte) bool {
|
||||
return isHex(rune(c))
|
||||
}
|
||||
|
||||
func isAlphanumeric(c rune) bool {
|
||||
return isNumeric(c) || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||||
}
|
||||
|
||||
func isAlphanumericByte(c byte) bool {
|
||||
return isAlphanumeric(rune(c))
|
||||
}
|
||||
|
||||
func nextNonWhitespace(markdown string, position int) int {
|
||||
for offset, c := range []byte(markdown[position:]) {
|
||||
if !isWhitespaceByte(c) {
|
||||
return position + offset
|
||||
}
|
||||
}
|
||||
return len(markdown)
|
||||
}
|
||||
|
||||
func nextLine(markdown string, position int) (linePosition int, skippedNonWhitespace bool) {
|
||||
for i := position; i < len(markdown); i++ {
|
||||
c := markdown[i]
|
||||
if c == '\r' {
|
||||
if i+1 < len(markdown) && markdown[i+1] == '\n' {
|
||||
return i + 2, skippedNonWhitespace
|
||||
}
|
||||
return i + 1, skippedNonWhitespace
|
||||
} else if c == '\n' {
|
||||
return i + 1, skippedNonWhitespace
|
||||
} else if !isWhitespaceByte(c) {
|
||||
skippedNonWhitespace = true
|
||||
}
|
||||
}
|
||||
return len(markdown), skippedNonWhitespace
|
||||
}
|
||||
|
||||
func countIndentation(markdown string, r Range) (spaces, bytes int) {
|
||||
for i := r.Position; i < r.End; i++ {
|
||||
if markdown[i] == ' ' {
|
||||
spaces++
|
||||
bytes++
|
||||
} else if markdown[i] == '\t' {
|
||||
spaces += 4
|
||||
bytes++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func trimLeftSpace(markdown string, r Range) Range {
|
||||
s := markdown[r.Position:r.End]
|
||||
trimmed := strings.TrimLeftFunc(s, isWhitespace)
|
||||
return Range{r.Position, r.End - (len(s) - len(trimmed))}
|
||||
}
|
||||
|
||||
func trimRightSpace(markdown string, r Range) Range {
|
||||
s := markdown[r.Position:r.End]
|
||||
trimmed := strings.TrimRightFunc(s, isWhitespace)
|
||||
return Range{r.Position, r.End - (len(s) - len(trimmed))}
|
||||
}
|
||||
|
||||
func relativeToAbsolutePosition(ranges []Range, position int) int {
|
||||
rem := position
|
||||
for _, r := range ranges {
|
||||
l := r.End - r.Position
|
||||
if rem < l {
|
||||
return r.Position + rem
|
||||
}
|
||||
rem -= l
|
||||
}
|
||||
if len(ranges) == 0 {
|
||||
return 0
|
||||
}
|
||||
return ranges[len(ranges)-1].End
|
||||
}
|
||||
|
||||
func trimBytesFromRanges(ranges []Range, bytes int) (result []Range) {
|
||||
rem := bytes
|
||||
for _, r := range ranges {
|
||||
if rem == 0 {
|
||||
result = append(result, r)
|
||||
continue
|
||||
}
|
||||
l := r.End - r.Position
|
||||
if rem < l {
|
||||
result = append(result, Range{r.Position + rem, r.End})
|
||||
rem = 0
|
||||
continue
|
||||
}
|
||||
rem -= l
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func Parse(markdown string) (*Document, []*ReferenceDefinition) {
|
||||
lines := ParseLines(markdown)
|
||||
return ParseBlocks(markdown, lines)
|
||||
}
|
71
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/paragraph.go
generated
vendored
Normal file
71
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/paragraph.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Paragraph struct {
|
||||
blockBase
|
||||
markdown string
|
||||
|
||||
Text []Range
|
||||
ReferenceDefinitions []*ReferenceDefinition
|
||||
}
|
||||
|
||||
func (b *Paragraph) ParseInlines(referenceDefinitions []*ReferenceDefinition) []Inline {
|
||||
return ParseInlines(b.markdown, b.Text, referenceDefinitions)
|
||||
}
|
||||
|
||||
func (b *Paragraph) Continuation(indentation int, r Range) *continuation {
|
||||
s := b.markdown[r.Position:r.End]
|
||||
if strings.TrimSpace(s) == "" {
|
||||
return nil
|
||||
}
|
||||
return &continuation{
|
||||
Indentation: indentation,
|
||||
Remaining: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Paragraph) Close() {
|
||||
for {
|
||||
for i := 0; i < len(b.Text); i++ {
|
||||
b.Text[i] = trimLeftSpace(b.markdown, b.Text[i])
|
||||
if b.Text[i].Position < b.Text[i].End {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(b.Text) == 0 || b.Text[0].Position < b.Text[0].End && b.markdown[b.Text[0].Position] != '[' {
|
||||
break
|
||||
}
|
||||
|
||||
definition, remaining := parseReferenceDefinition(b.markdown, b.Text)
|
||||
if definition == nil {
|
||||
break
|
||||
}
|
||||
b.ReferenceDefinitions = append(b.ReferenceDefinitions, definition)
|
||||
b.Text = remaining
|
||||
}
|
||||
|
||||
for i := len(b.Text) - 1; i >= 0; i-- {
|
||||
b.Text[i] = trimRightSpace(b.markdown, b.Text[i])
|
||||
if b.Text[i].Position < b.Text[i].End {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newParagraph(markdown string, r Range) *Paragraph {
|
||||
s := markdown[r.Position:r.End]
|
||||
if strings.TrimSpace(s) == "" {
|
||||
return nil
|
||||
}
|
||||
return &Paragraph{
|
||||
markdown: markdown,
|
||||
Text: []Range{r},
|
||||
}
|
||||
}
|
75
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/reference_definition.go
generated
vendored
Normal file
75
vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/reference_definition.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package markdown
|
||||
|
||||
type ReferenceDefinition struct {
|
||||
RawDestination Range
|
||||
|
||||
markdown string
|
||||
rawLabel string
|
||||
rawTitle string
|
||||
}
|
||||
|
||||
func (d *ReferenceDefinition) Destination() string {
|
||||
return Unescape(d.markdown[d.RawDestination.Position:d.RawDestination.End])
|
||||
}
|
||||
|
||||
func (d *ReferenceDefinition) Label() string {
|
||||
return d.rawLabel
|
||||
}
|
||||
|
||||
func (d *ReferenceDefinition) Title() string {
|
||||
return Unescape(d.rawTitle)
|
||||
}
|
||||
|
||||
func parseReferenceDefinition(markdown string, ranges []Range) (*ReferenceDefinition, []Range) {
|
||||
raw := ""
|
||||
for _, r := range ranges {
|
||||
raw += markdown[r.Position:r.End]
|
||||
}
|
||||
|
||||
label, next, ok := parseLinkLabel(raw, 0)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
position := next
|
||||
|
||||
if position >= len(raw) || raw[position] != ':' {
|
||||
return nil, nil
|
||||
}
|
||||
position++
|
||||
|
||||
destination, next, ok := parseLinkDestination(raw, nextNonWhitespace(raw, position))
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
position = next
|
||||
|
||||
absoluteDestination := relativeToAbsolutePosition(ranges, destination.Position)
|
||||
ret := &ReferenceDefinition{
|
||||
RawDestination: Range{absoluteDestination, absoluteDestination + destination.End - destination.Position},
|
||||
markdown: markdown,
|
||||
rawLabel: raw[label.Position:label.End],
|
||||
}
|
||||
|
||||
if position < len(raw) && isWhitespaceByte(raw[position]) {
|
||||
title, next, ok := parseLinkTitle(raw, nextNonWhitespace(raw, position))
|
||||
if !ok {
|
||||
if nextLine, skippedNonWhitespace := nextLine(raw, position); !skippedNonWhitespace {
|
||||
return ret, trimBytesFromRanges(ranges, nextLine)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
if nextLine, skippedNonWhitespace := nextLine(raw, next); !skippedNonWhitespace {
|
||||
ret.rawTitle = raw[title.Position:title.End]
|
||||
return ret, trimBytesFromRanges(ranges, nextLine)
|
||||
}
|
||||
}
|
||||
|
||||
if nextLine, skippedNonWhitespace := nextLine(raw, position); !skippedNonWhitespace {
|
||||
return ret, trimBytesFromRanges(ranges, nextLine)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
63
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/default.go
generated
vendored
Normal file
63
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/default.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package mlog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// defaultLog manually encodes the log to STDERR, providing a basic, default logging implementation
|
||||
// before mlog is fully configured.
|
||||
func defaultLog(level Level, msg string, fields ...Field) {
|
||||
mFields := make(map[string]string)
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
for _, fld := range fields {
|
||||
buf.Reset()
|
||||
fld.ValueString(buf, shouldQuote)
|
||||
mFields[fld.Key] = buf.String()
|
||||
}
|
||||
|
||||
log := struct {
|
||||
Level string `json:"level"`
|
||||
Message string `json:"msg"`
|
||||
Fields map[string]string `json:"fields,omitempty"`
|
||||
}{
|
||||
level.Name,
|
||||
msg,
|
||||
mFields,
|
||||
}
|
||||
|
||||
if b, err := json.Marshal(log); err != nil {
|
||||
fmt.Fprintf(os.Stderr, `{"level":"error","msg":"failed to encode log message"}%s`, "\n")
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", b)
|
||||
}
|
||||
}
|
||||
|
||||
func defaultIsLevelEnabled(level Level) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func defaultCustomMultiLog(lvl []Level, msg string, fields ...Field) {
|
||||
for _, level := range lvl {
|
||||
defaultLog(level, msg, fields...)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldQuote returns true if val contains any characters that require quotations.
|
||||
func shouldQuote(val string) bool {
|
||||
for _, c := range val {
|
||||
if !((c >= '0' && c <= '9') ||
|
||||
(c >= 'a' && c <= 'z') ||
|
||||
(c >= 'A' && c <= 'Z') ||
|
||||
c == '-' || c == '.' || c == '_' || c == '/' || c == '@' || c == '^' || c == '+') {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
132
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/global.go
generated
vendored
Normal file
132
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/global.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package mlog
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
globalLogger *Logger
|
||||
muxGlobalLogger sync.RWMutex
|
||||
)
|
||||
|
||||
func InitGlobalLogger(logger *Logger) {
|
||||
muxGlobalLogger.Lock()
|
||||
defer muxGlobalLogger.Unlock()
|
||||
|
||||
globalLogger = logger
|
||||
}
|
||||
|
||||
func getGlobalLogger() *Logger {
|
||||
muxGlobalLogger.RLock()
|
||||
defer muxGlobalLogger.RUnlock()
|
||||
|
||||
return globalLogger
|
||||
}
|
||||
|
||||
// IsLevelEnabled returns true only if at least one log target is
|
||||
// configured to emit the specified log level. Use this check when
|
||||
// gathering the log info may be expensive.
|
||||
//
|
||||
// Note, transformations and serializations done via fields are already
|
||||
// lazily evaluated and don't require this check beforehand.
|
||||
func IsLevelEnabled(level Level) bool {
|
||||
logger := getGlobalLogger()
|
||||
if logger == nil {
|
||||
return defaultIsLevelEnabled(level)
|
||||
}
|
||||
return logger.IsLevelEnabled(level)
|
||||
}
|
||||
|
||||
// Log emits the log record for any targets configured for the specified level.
|
||||
func Log(level Level, msg string, fields ...Field) {
|
||||
logger := getGlobalLogger()
|
||||
if logger == nil {
|
||||
defaultLog(level, msg, fields...)
|
||||
return
|
||||
}
|
||||
logger.Log(level, msg, fields...)
|
||||
}
|
||||
|
||||
// LogM emits the log record for any targets configured for the specified levels.
|
||||
// Equivalent to calling `Log` once for each level.
|
||||
func LogM(levels []Level, msg string, fields ...Field) {
|
||||
logger := getGlobalLogger()
|
||||
if logger == nil {
|
||||
defaultCustomMultiLog(levels, msg, fields...)
|
||||
return
|
||||
}
|
||||
logger.LogM(levels, msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Trace` level.
|
||||
func Trace(msg string, fields ...Field) {
|
||||
logger := getGlobalLogger()
|
||||
if logger == nil {
|
||||
defaultLog(LvlTrace, msg, fields...)
|
||||
return
|
||||
}
|
||||
logger.Trace(msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Debug` level.
|
||||
func Debug(msg string, fields ...Field) {
|
||||
logger := getGlobalLogger()
|
||||
if logger == nil {
|
||||
defaultLog(LvlDebug, msg, fields...)
|
||||
return
|
||||
}
|
||||
logger.Debug(msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Info` level.
|
||||
func Info(msg string, fields ...Field) {
|
||||
logger := getGlobalLogger()
|
||||
if logger == nil {
|
||||
defaultLog(LvlInfo, msg, fields...)
|
||||
return
|
||||
}
|
||||
logger.Info(msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Warn` level.
|
||||
func Warn(msg string, fields ...Field) {
|
||||
logger := getGlobalLogger()
|
||||
if logger == nil {
|
||||
defaultLog(LvlWarn, msg, fields...)
|
||||
return
|
||||
}
|
||||
logger.Warn(msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Error` level.
|
||||
func Error(msg string, fields ...Field) {
|
||||
logger := getGlobalLogger()
|
||||
if logger == nil {
|
||||
defaultLog(LvlError, msg, fields...)
|
||||
return
|
||||
}
|
||||
logger.Error(msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Critical` level.
|
||||
// DEPRECATED: Either use Error or Fatal.
|
||||
func Critical(msg string, fields ...Field) {
|
||||
logger := getGlobalLogger()
|
||||
if logger == nil {
|
||||
defaultLog(LvlCritical, msg, fields...)
|
||||
return
|
||||
}
|
||||
logger.Critical(msg, fields...)
|
||||
}
|
||||
|
||||
func Fatal(msg string, fields ...Field) {
|
||||
logger := getGlobalLogger()
|
||||
if logger == nil {
|
||||
defaultLog(LvlFatal, msg, fields...)
|
||||
return
|
||||
}
|
||||
logger.Fatal(msg, fields...)
|
||||
}
|
58
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/levels.go
generated
vendored
Normal file
58
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/levels.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package mlog
|
||||
|
||||
import "github.com/mattermost/logr/v2"
|
||||
|
||||
// Standard levels.
|
||||
var (
|
||||
LvlPanic = logr.Panic // ID = 0
|
||||
LvlFatal = logr.Fatal // ID = 1
|
||||
LvlError = logr.Error // ID = 2
|
||||
LvlWarn = logr.Warn // ID = 3
|
||||
LvlInfo = logr.Info // ID = 4
|
||||
LvlDebug = logr.Debug // ID = 5
|
||||
LvlTrace = logr.Trace // ID = 6
|
||||
StdAll = []Level{LvlPanic, LvlFatal, LvlError, LvlWarn, LvlInfo, LvlDebug, LvlTrace}
|
||||
// non-standard "critical" level
|
||||
LvlCritical = Level{ID: 7, Name: "critical"}
|
||||
// used by redirected standard logger
|
||||
LvlStdLog = Level{ID: 10, Name: "stdlog"}
|
||||
// used only by the logger
|
||||
LvlLogError = Level{ID: 11, Name: "logerror", Stacktrace: true}
|
||||
)
|
||||
|
||||
// Register custom (discrete) levels here.
|
||||
// !!!!! Custom ID's must be between 20 and 32,768 !!!!!!
|
||||
var (
|
||||
// used by the audit system
|
||||
LvlAuditAPI = Level{ID: 100, Name: "audit-api"}
|
||||
LvlAuditContent = Level{ID: 101, Name: "audit-content"}
|
||||
LvlAuditPerms = Level{ID: 102, Name: "audit-permissions"}
|
||||
LvlAuditCLI = Level{ID: 103, Name: "audit-cli"}
|
||||
|
||||
// used by the TCP log target
|
||||
LvlTCPLogTarget = Level{ID: 120, Name: "TcpLogTarget"}
|
||||
|
||||
// used by Remote Cluster Service
|
||||
LvlRemoteClusterServiceDebug = Level{ID: 130, Name: "RemoteClusterServiceDebug"}
|
||||
LvlRemoteClusterServiceError = Level{ID: 131, Name: "RemoteClusterServiceError"}
|
||||
LvlRemoteClusterServiceWarn = Level{ID: 132, Name: "RemoteClusterServiceWarn"}
|
||||
|
||||
// used by Shared Channel Sync Service
|
||||
LvlSharedChannelServiceDebug = Level{ID: 200, Name: "SharedChannelServiceDebug"}
|
||||
LvlSharedChannelServiceError = Level{ID: 201, Name: "SharedChannelServiceError"}
|
||||
LvlSharedChannelServiceWarn = Level{ID: 202, Name: "SharedChannelServiceWarn"}
|
||||
LvlSharedChannelServiceMessagesInbound = Level{ID: 203, Name: "SharedChannelServiceMsgInbound"}
|
||||
LvlSharedChannelServiceMessagesOutbound = Level{ID: 204, Name: "SharedChannelServiceMsgOutbound"}
|
||||
|
||||
// Focalboard
|
||||
LvlFBTelemetry = Level{ID: 9000, Name: "telemetry"}
|
||||
LvlFBMetrics = Level{ID: 9001, Name: "metrics"}
|
||||
)
|
||||
|
||||
// Combinations for LogM (log multi).
|
||||
var (
|
||||
MLvlAuditAll = []Level{LvlAuditAPI, LvlAuditContent, LvlAuditPerms, LvlAuditCLI}
|
||||
)
|
419
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/mlog.go
generated
vendored
Normal file
419
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/mlog.go
generated
vendored
Normal file
@ -0,0 +1,419 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
// Package mlog provides a simple wrapper around Logr.
|
||||
package mlog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/mattermost/logr/v2"
|
||||
logrcfg "github.com/mattermost/logr/v2/config"
|
||||
)
|
||||
|
||||
const (
|
||||
ShutdownTimeout = time.Second * 15
|
||||
FlushTimeout = time.Second * 15
|
||||
DefaultMaxQueueSize = 1000
|
||||
DefaultMetricsUpdateFreqMillis = 15000
|
||||
)
|
||||
|
||||
type LoggerIFace interface {
|
||||
IsLevelEnabled(Level) bool
|
||||
Debug(string, ...Field)
|
||||
Info(string, ...Field)
|
||||
Warn(string, ...Field)
|
||||
Error(string, ...Field)
|
||||
Critical(string, ...Field)
|
||||
Log(Level, string, ...Field)
|
||||
LogM([]Level, string, ...Field)
|
||||
}
|
||||
|
||||
// Type and function aliases from Logr to limit the spread of dependencies.
|
||||
type Field = logr.Field
|
||||
type Level = logr.Level
|
||||
type Option = logr.Option
|
||||
type Target = logr.Target
|
||||
type TargetInfo = logr.TargetInfo
|
||||
type LogRec = logr.LogRec
|
||||
type LogCloner = logr.LogCloner
|
||||
type MetricsCollector = logr.MetricsCollector
|
||||
type TargetCfg = logrcfg.TargetCfg
|
||||
type Sugar = logr.Sugar
|
||||
|
||||
// LoggerConfiguration is a map of LogTarget configurations.
|
||||
type LoggerConfiguration map[string]TargetCfg
|
||||
|
||||
func (lc LoggerConfiguration) Append(cfg LoggerConfiguration) {
|
||||
for k, v := range cfg {
|
||||
lc[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
func (lc LoggerConfiguration) toTargetCfg() map[string]logrcfg.TargetCfg {
|
||||
tcfg := make(map[string]logrcfg.TargetCfg)
|
||||
for k, v := range lc {
|
||||
tcfg[k] = v
|
||||
}
|
||||
return tcfg
|
||||
}
|
||||
|
||||
// Any picks the best supported field type based on type of val.
|
||||
// For best performance when passing a struct (or struct pointer),
|
||||
// implement `logr.LogWriter` on the struct, otherwise reflection
|
||||
// will be used to generate a string representation.
|
||||
var Any = logr.Any
|
||||
|
||||
// Int64 constructs a field containing a key and Int64 value.
|
||||
var Int64 = logr.Int64
|
||||
|
||||
// Int32 constructs a field containing a key and Int32 value.
|
||||
var Int32 = logr.Int32
|
||||
|
||||
// Int constructs a field containing a key and Int value.
|
||||
var Int = logr.Int
|
||||
|
||||
// Uint64 constructs a field containing a key and Uint64 value.
|
||||
var Uint64 = logr.Uint64
|
||||
|
||||
// Uint32 constructs a field containing a key and Uint32 value.
|
||||
var Uint32 = logr.Uint32
|
||||
|
||||
// Uint constructs a field containing a key and Uint value.
|
||||
var Uint = logr.Uint
|
||||
|
||||
// Float64 constructs a field containing a key and Float64 value.
|
||||
var Float64 = logr.Float64
|
||||
|
||||
// Float32 constructs a field containing a key and Float32 value.
|
||||
var Float32 = logr.Float32
|
||||
|
||||
// String constructs a field containing a key and String value.
|
||||
var String = logr.String
|
||||
|
||||
// Stringer constructs a field containing a key and a fmt.Stringer value.
|
||||
// The fmt.Stringer's `String` method is called lazily.
|
||||
var Stringer = func(key string, s fmt.Stringer) logr.Field {
|
||||
if s == nil {
|
||||
return Field{Key: key, Type: logr.StringType, String: ""}
|
||||
}
|
||||
return Field{Key: key, Type: logr.StringType, String: s.String()}
|
||||
}
|
||||
|
||||
// Err constructs a field containing a default key ("error") and error value.
|
||||
var Err = func(err error) logr.Field {
|
||||
return NamedErr("error", err)
|
||||
}
|
||||
|
||||
// NamedErr constructs a field containing a key and error value.
|
||||
var NamedErr = func(key string, err error) logr.Field {
|
||||
if err == nil {
|
||||
return Field{Key: key, Type: logr.StringType, String: ""}
|
||||
}
|
||||
return Field{Key: key, Type: logr.StringType, String: err.Error()}
|
||||
}
|
||||
|
||||
// Bool constructs a field containing a key and bool value.
|
||||
var Bool = logr.Bool
|
||||
|
||||
// Time constructs a field containing a key and time.Time value.
|
||||
var Time = logr.Time
|
||||
|
||||
// Duration constructs a field containing a key and time.Duration value.
|
||||
var Duration = logr.Duration
|
||||
|
||||
// Millis constructs a field containing a key and timestamp value.
|
||||
// The timestamp is expected to be milliseconds since Jan 1, 1970 UTC.
|
||||
var Millis = logr.Millis
|
||||
|
||||
// Array constructs a field containing a key and array value.
|
||||
var Array = logr.Array
|
||||
|
||||
// Map constructs a field containing a key and map value.
|
||||
var Map = logr.Map
|
||||
|
||||
// Logger provides a thin wrapper around a Logr instance. This is a struct instead of an interface
|
||||
// so that there are no allocations on the heap each interface method invocation. Normally not
|
||||
// something to be concerned about, but logging calls for disabled levels should have as little CPU
|
||||
// and memory impact as possible. Most of these wrapper calls will be inlined as well.
|
||||
type Logger struct {
|
||||
log *logr.Logger
|
||||
lockConfig *int32
|
||||
}
|
||||
|
||||
// NewLogger creates a new Logger instance which can be configured via `(*Logger).Configure`.
|
||||
// Some options with invalid values can cause an error to be returned, however `NewLogger()`
|
||||
// using just defaults never errors.
|
||||
func NewLogger(options ...Option) (*Logger, error) {
|
||||
options = append(options, logr.StackFilter(logr.GetPackageName("NewLogger")))
|
||||
|
||||
lgr, err := logr.New(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log := lgr.NewLogger()
|
||||
var lockConfig int32
|
||||
|
||||
return &Logger{
|
||||
log: &log,
|
||||
lockConfig: &lockConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Configure provides a new configuration for this logger.
|
||||
// Zero or more sources of config can be provided:
|
||||
// cfgFile - path to file containing JSON
|
||||
// cfgEscaped - JSON string probably from ENV var
|
||||
//
|
||||
// For each case JSON containing log targets is provided. Target name collisions are resolved
|
||||
// using the following precedence:
|
||||
// cfgFile > cfgEscaped
|
||||
func (l *Logger) Configure(cfgFile string, cfgEscaped string) error {
|
||||
if atomic.LoadInt32(l.lockConfig) != 0 {
|
||||
return ErrConfigurationLock
|
||||
}
|
||||
|
||||
cfgMap := make(LoggerConfiguration)
|
||||
|
||||
// Add config from file
|
||||
if cfgFile != "" {
|
||||
b, err := ioutil.ReadFile(cfgFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading logger config file %s: %w", cfgFile, err)
|
||||
}
|
||||
|
||||
var mapCfgFile LoggerConfiguration
|
||||
if err := json.Unmarshal(b, &mapCfgFile); err != nil {
|
||||
return fmt.Errorf("error decoding logger config file %s: %w", cfgFile, err)
|
||||
}
|
||||
cfgMap.Append(mapCfgFile)
|
||||
}
|
||||
|
||||
// Add config from escaped json string
|
||||
if cfgEscaped != "" {
|
||||
var mapCfgEscaped LoggerConfiguration
|
||||
if err := json.Unmarshal([]byte(cfgEscaped), &mapCfgEscaped); err != nil {
|
||||
return fmt.Errorf("error decoding logger config as escaped json: %w", err)
|
||||
}
|
||||
cfgMap.Append(mapCfgEscaped)
|
||||
}
|
||||
|
||||
if len(cfgMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return logrcfg.ConfigureTargets(l.log.Logr(), cfgMap.toTargetCfg(), nil)
|
||||
}
|
||||
|
||||
// ConfigureTargets provides a new configuration for this logger via a `LoggerConfig` map.
|
||||
// Typically `mlog.Configure` is used instead which accepts JSON formatted configuration.
|
||||
func (l *Logger) ConfigureTargets(cfg LoggerConfiguration) error {
|
||||
if atomic.LoadInt32(l.lockConfig) != 0 {
|
||||
return ErrConfigurationLock
|
||||
}
|
||||
return logrcfg.ConfigureTargets(l.log.Logr(), cfg.toTargetCfg(), nil)
|
||||
}
|
||||
|
||||
// LockConfiguration disallows further configuration changes until `UnlockConfiguration`
|
||||
// is called. The previous locked stated is returned.
|
||||
func (l *Logger) LockConfiguration() bool {
|
||||
old := atomic.SwapInt32(l.lockConfig, 1)
|
||||
return old != 0
|
||||
}
|
||||
|
||||
// UnlockConfiguration allows configuration changes. The previous locked stated is returned.
|
||||
func (l *Logger) UnlockConfiguration() bool {
|
||||
old := atomic.SwapInt32(l.lockConfig, 0)
|
||||
return old != 0
|
||||
}
|
||||
|
||||
// IsConfigurationLocked returns the current state of the configuration lock.
|
||||
func (l *Logger) IsConfigurationLocked() bool {
|
||||
return atomic.LoadInt32(l.lockConfig) != 0
|
||||
}
|
||||
|
||||
// With creates a new Logger with the specified fields. This is a light-weight
|
||||
// operation and can be called on demand.
|
||||
func (l *Logger) With(fields ...Field) *Logger {
|
||||
logWith := l.log.With(fields...)
|
||||
return &Logger{
|
||||
log: &logWith,
|
||||
lockConfig: l.lockConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// IsLevelEnabled returns true only if at least one log target is
|
||||
// configured to emit the specified log level. Use this check when
|
||||
// gathering the log info may be expensive.
|
||||
//
|
||||
// Note, transformations and serializations done via fields are already
|
||||
// lazily evaluated and don't require this check beforehand.
|
||||
func (l *Logger) IsLevelEnabled(level Level) bool {
|
||||
return l.log.IsLevelEnabled(level)
|
||||
}
|
||||
|
||||
// Log emits the log record for any targets configured for the specified level.
|
||||
func (l *Logger) Log(level Level, msg string, fields ...Field) {
|
||||
l.log.Log(level, msg, fields...)
|
||||
}
|
||||
|
||||
// LogM emits the log record for any targets configured for the specified levels.
|
||||
// Equivalent to calling `Log` once for each level.
|
||||
func (l *Logger) LogM(levels []Level, msg string, fields ...Field) {
|
||||
l.log.LogM(levels, msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Trace` level.
|
||||
func (l *Logger) Trace(msg string, fields ...Field) {
|
||||
l.log.Trace(msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Debug` level.
|
||||
func (l *Logger) Debug(msg string, fields ...Field) {
|
||||
l.log.Debug(msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Info` level.
|
||||
func (l *Logger) Info(msg string, fields ...Field) {
|
||||
l.log.Info(msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Warn` level.
|
||||
func (l *Logger) Warn(msg string, fields ...Field) {
|
||||
l.log.Warn(msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Error` level.
|
||||
func (l *Logger) Error(msg string, fields ...Field) {
|
||||
l.log.Error(msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Critical` level.
|
||||
func (l *Logger) Critical(msg string, fields ...Field) {
|
||||
l.log.Log(LvlCritical, msg, fields...)
|
||||
}
|
||||
|
||||
// Convenience method equivalent to calling `Log` with the `Fatal` level,
|
||||
// followed by `os.Exit(1)`.
|
||||
func (l *Logger) Fatal(msg string, fields ...Field) {
|
||||
l.log.Log(logr.Fatal, msg, fields...)
|
||||
_ = l.Shutdown()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// HasTargets returns true if at least one log target has been added.
|
||||
func (l *Logger) HasTargets() bool {
|
||||
return l.log.Logr().HasTargets()
|
||||
}
|
||||
|
||||
// StdLogger creates a standard logger backed by this logger.
|
||||
// All log records are output with the specified level.
|
||||
func (l *Logger) StdLogger(level Level) *log.Logger {
|
||||
return l.log.StdLogger(level)
|
||||
}
|
||||
|
||||
// StdLogWriter returns a writer that can be hooked up to the output of a golang standard logger
|
||||
// anything written will be interpreted as log entries and passed to this logger.
|
||||
func (l *Logger) StdLogWriter() io.Writer {
|
||||
return &logWriter{
|
||||
logger: l,
|
||||
}
|
||||
}
|
||||
|
||||
// RedirectStdLog redirects output from the standard library's package-global logger
|
||||
// to this logger at the specified level and with zero or more Field's. Since this logger already
|
||||
// handles caller annotations, timestamps, etc., it automatically disables the standard
|
||||
// library's annotations and prefixing.
|
||||
// A function is returned that restores the original prefix and flags and resets the standard
|
||||
// library's output to os.Stdout.
|
||||
func (l *Logger) RedirectStdLog(level Level, fields ...Field) func() {
|
||||
return l.log.Logr().RedirectStdLog(level, fields...)
|
||||
}
|
||||
|
||||
// RemoveTargets safely removes one or more targets based on the filtering method.
|
||||
// `f` should return true to delete the target, false to keep it.
|
||||
// When removing a target, best effort is made to write any queued log records before
|
||||
// closing, with cxt determining how much time can be spent in total.
|
||||
// Note, keep the timeout short since this method blocks certain logging operations.
|
||||
func (l *Logger) RemoveTargets(ctx context.Context, f func(ti TargetInfo) bool) error {
|
||||
return l.log.Logr().RemoveTargets(ctx, f)
|
||||
}
|
||||
|
||||
// SetMetricsCollector sets (or resets) the metrics collector to be used for gathering
|
||||
// metrics for all targets. Only targets added after this call will use the collector.
|
||||
//
|
||||
// To ensure all targets use a collector, use the `SetMetricsCollector` option when
|
||||
// creating the Logger instead, or configure/reconfigure the Logger after calling this method.
|
||||
func (l *Logger) SetMetricsCollector(collector MetricsCollector, updateFrequencyMillis int64) {
|
||||
l.log.Logr().SetMetricsCollector(collector, updateFrequencyMillis)
|
||||
}
|
||||
|
||||
// Sugar creates a new `Logger` with a less structured API. Any fields are preserved.
|
||||
func (l *Logger) Sugar(fields ...Field) Sugar {
|
||||
return l.log.Sugar(fields...)
|
||||
}
|
||||
|
||||
// Flush forces all targets to write out any queued log records with a default timeout.
|
||||
func (l *Logger) Flush() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), FlushTimeout)
|
||||
defer cancel()
|
||||
return l.log.Logr().FlushWithTimeout(ctx)
|
||||
}
|
||||
|
||||
// Flush forces all targets to write out any queued log records with the specfified timeout.
|
||||
func (l *Logger) FlushWithTimeout(ctx context.Context) error {
|
||||
return l.log.Logr().FlushWithTimeout(ctx)
|
||||
}
|
||||
|
||||
// Shutdown shuts down the logger after making best efforts to flush any
|
||||
// remaining records.
|
||||
func (l *Logger) Shutdown() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), ShutdownTimeout)
|
||||
defer cancel()
|
||||
return l.log.Logr().ShutdownWithTimeout(ctx)
|
||||
}
|
||||
|
||||
// Shutdown shuts down the logger after making best efforts to flush any
|
||||
// remaining records.
|
||||
func (l *Logger) ShutdownWithTimeout(ctx context.Context) error {
|
||||
return l.log.Logr().ShutdownWithTimeout(ctx)
|
||||
}
|
||||
|
||||
// GetPackageName reduces a fully qualified function name to the package name
|
||||
// By sirupsen: https://github.com/sirupsen/logrus/blob/master/entry.go
|
||||
func GetPackageName(f string) string {
|
||||
for {
|
||||
lastPeriod := strings.LastIndex(f, ".")
|
||||
lastSlash := strings.LastIndex(f, "/")
|
||||
if lastPeriod > lastSlash {
|
||||
f = f[:lastPeriod]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
type logWriter struct {
|
||||
logger *Logger
|
||||
}
|
||||
|
||||
func (lw *logWriter) Write(p []byte) (int, error) {
|
||||
lw.logger.Info(string(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// ErrConfigurationLock is returned when one of a logger's configuration APIs is called
|
||||
// while the configuration is locked.
|
||||
var ErrConfigurationLock = errors.New("configuration is locked")
|
55
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/options.go
generated
vendored
Normal file
55
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/options.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package mlog
|
||||
|
||||
import "github.com/mattermost/logr/v2"
|
||||
|
||||
// MaxQueueSize is the maximum number of log records that can be queued.
|
||||
// If exceeded, `OnQueueFull` is called which determines if the log
|
||||
// record will be dropped or block until add is successful.
|
||||
// Defaults to DefaultMaxQueueSize.
|
||||
func MaxQueueSize(size int) Option {
|
||||
return logr.MaxQueueSize(size)
|
||||
}
|
||||
|
||||
// OnLoggerError, when not nil, is called any time an internal
|
||||
// logging error occurs. For example, this can happen when a
|
||||
// target cannot connect to its data sink.
|
||||
func OnLoggerError(f func(error)) Option {
|
||||
return logr.OnLoggerError(f)
|
||||
}
|
||||
|
||||
// OnQueueFull, when not nil, is called on an attempt to add
|
||||
// a log record to a full Logr queue.
|
||||
// `MaxQueueSize` can be used to modify the maximum queue size.
|
||||
// This function should return quickly, with a bool indicating whether
|
||||
// the log record should be dropped (true) or block until the log record
|
||||
// is successfully added (false). If nil then blocking (false) is assumed.
|
||||
func OnQueueFull(f func(rec *LogRec, maxQueueSize int) bool) Option {
|
||||
return logr.OnQueueFull(f)
|
||||
}
|
||||
|
||||
// OnTargetQueueFull, when not nil, is called on an attempt to add
|
||||
// a log record to a full target queue provided the target supports reporting
|
||||
// this condition.
|
||||
// This function should return quickly, with a bool indicating whether
|
||||
// the log record should be dropped (true) or block until the log record
|
||||
// is successfully added (false). If nil then blocking (false) is assumed.
|
||||
func OnTargetQueueFull(f func(target Target, rec *LogRec, maxQueueSize int) bool) Option {
|
||||
return logr.OnTargetQueueFull(f)
|
||||
}
|
||||
|
||||
// SetMetricsCollector enables metrics collection by supplying a MetricsCollector.
|
||||
// The MetricsCollector provides counters and gauges that are updated by log targets.
|
||||
// `updateFreqMillis` determines how often polled metrics are updated. Defaults to 15000 (15 seconds)
|
||||
// and must be at least 250 so we don't peg the CPU.
|
||||
func SetMetricsCollector(collector MetricsCollector, updateFreqMillis int64) Option {
|
||||
return logr.SetMetricsCollector(collector, updateFreqMillis)
|
||||
}
|
||||
|
||||
// StackFilter provides a list of package names to exclude from the top of
|
||||
// stack traces. The Logr packages are automatically filtered.
|
||||
func StackFilter(pkg ...string) Option {
|
||||
return logr.StackFilter(pkg...)
|
||||
}
|
79
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/tlog.go
generated
vendored
Normal file
79
vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/tlog.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package mlog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/mattermost/logr/v2"
|
||||
"github.com/mattermost/logr/v2/formatters"
|
||||
"github.com/mattermost/logr/v2/targets"
|
||||
)
|
||||
|
||||
// AddWriterTarget adds a simple io.Writer target to an existing Logger.
|
||||
// The `io.Writer` can be a buffer which is useful for testing.
|
||||
// When adding a buffer to collect logs make sure to use `mlog.Buffer` which is
|
||||
// a thread safe version of `bytes.Buffer`.
|
||||
func AddWriterTarget(logger *Logger, w io.Writer, useJSON bool, levels ...Level) error {
|
||||
filter := logr.NewCustomFilter(levels...)
|
||||
|
||||
var formatter logr.Formatter
|
||||
if useJSON {
|
||||
formatter = &formatters.JSON{EnableCaller: true}
|
||||
} else {
|
||||
formatter = &formatters.Plain{EnableCaller: true}
|
||||
}
|
||||
|
||||
target := targets.NewWriterTarget(w)
|
||||
return logger.log.Logr().AddTarget(target, "_testWriter", filter, formatter, 1000)
|
||||
}
|
||||
|
||||
// CreateConsoleTestLogger creates a logger for unit tests. Log records are output to `os.Stdout`.
|
||||
// Logs can also be mirrored to the optional `io.Writer`.
|
||||
func CreateConsoleTestLogger(useJSON bool, level Level) *Logger {
|
||||
logger, _ := NewLogger()
|
||||
|
||||
filter := logr.StdFilter{
|
||||
Lvl: level,
|
||||
Stacktrace: LvlPanic,
|
||||
}
|
||||
|
||||
var formatter logr.Formatter
|
||||
if useJSON {
|
||||
formatter = &formatters.JSON{EnableCaller: true}
|
||||
} else {
|
||||
formatter = &formatters.Plain{EnableCaller: true}
|
||||
}
|
||||
|
||||
target := targets.NewWriterTarget(os.Stdout)
|
||||
if err := logger.log.Logr().AddTarget(target, "_testcon", filter, formatter, 1000); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return logger
|
||||
}
|
||||
|
||||
// Buffer provides a thread-safe buffer useful for logging to memory in unit tests.
|
||||
type Buffer struct {
|
||||
buf bytes.Buffer
|
||||
mux sync.Mutex
|
||||
}
|
||||
|
||||
func (b *Buffer) Read(p []byte) (n int, err error) {
|
||||
b.mux.Lock()
|
||||
defer b.mux.Unlock()
|
||||
return b.buf.Read(p)
|
||||
}
|
||||
func (b *Buffer) Write(p []byte) (n int, err error) {
|
||||
b.mux.Lock()
|
||||
defer b.mux.Unlock()
|
||||
return b.buf.Write(p)
|
||||
}
|
||||
func (b *Buffer) String() string {
|
||||
b.mux.Lock()
|
||||
defer b.mux.Unlock()
|
||||
return b.buf.String()
|
||||
}
|
Reference in New Issue
Block a user