4
0
mirror of https://github.com/cwinfo/matterbridge.git synced 2025-07-03 04:57:44 +00:00

Update dependencies (#1610)

* Update dependencies

* Update module to go 1.17
This commit is contained in:
Wim
2021-10-17 00:47:22 +02:00
committed by GitHub
parent 7ae45c42e7
commit 4dd8bae5c9
494 changed files with 15698 additions and 19720 deletions

View File

@ -0,0 +1,83 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package filestore
import (
"io"
"time"
"github.com/pkg/errors"
)
const (
driverS3 = "amazons3"
driverLocal = "local"
)
type ReadCloseSeeker interface {
io.ReadCloser
io.Seeker
}
type FileBackend interface {
TestConnection() error
Reader(path string) (ReadCloseSeeker, error)
ReadFile(path string) ([]byte, error)
FileExists(path string) (bool, error)
FileSize(path string) (int64, error)
CopyFile(oldPath, newPath string) error
MoveFile(oldPath, newPath string) error
WriteFile(fr io.Reader, path string) (int64, error)
AppendFile(fr io.Reader, path string) (int64, error)
RemoveFile(path string) error
FileModTime(path string) (time.Time, error)
ListDirectory(path string) ([]string, error)
RemoveDirectory(path string) error
}
type FileBackendSettings struct {
DriverName string
Directory string
AmazonS3AccessKeyId string
AmazonS3SecretAccessKey string
AmazonS3Bucket string
AmazonS3PathPrefix string
AmazonS3Region string
AmazonS3Endpoint string
AmazonS3SSL bool
AmazonS3SignV2 bool
AmazonS3SSE bool
AmazonS3Trace bool
}
func (settings *FileBackendSettings) CheckMandatoryS3Fields() error {
if settings.AmazonS3Bucket == "" {
return errors.New("missing s3 bucket settings")
}
// if S3 endpoint is not set call the set defaults to set that
if settings.AmazonS3Endpoint == "" {
settings.AmazonS3Endpoint = "s3.amazonaws.com"
}
return nil
}
func NewFileBackend(settings FileBackendSettings) (FileBackend, error) {
switch settings.DriverName {
case driverS3:
backend, err := NewS3FileBackend(settings)
if err != nil {
return nil, errors.Wrap(err, "unable to connect to the s3 backend")
}
return backend, nil
case driverLocal:
return &LocalFileBackend{
directory: settings.Directory,
}, nil
}
return nil, errors.New("no valid filestorage driver found")
}

View File

@ -0,0 +1,211 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package filestore
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/pkg/errors"
"github.com/mattermost/mattermost-server/v5/shared/mlog"
)
const (
TestFilePath = "/testfile"
)
type LocalFileBackend struct {
directory string
}
// copyFile will copy a file from src path to dst path.
// Overwrites any existing files at dst.
// Permissions are copied from file at src to the new file at dst.
func copyFile(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
return
}
defer in.Close()
if err = os.MkdirAll(filepath.Dir(dst), os.ModePerm); err != nil {
return
}
out, err := os.Create(dst)
if err != nil {
return
}
defer func() {
if e := out.Close(); e != nil {
err = e
}
}()
_, err = io.Copy(out, in)
if err != nil {
return
}
err = out.Sync()
if err != nil {
return
}
stat, err := os.Stat(src)
if err != nil {
return
}
err = os.Chmod(dst, stat.Mode())
if err != nil {
return
}
return
}
func (b *LocalFileBackend) TestConnection() error {
f := bytes.NewReader([]byte("testingwrite"))
if _, err := writeFileLocally(f, filepath.Join(b.directory, TestFilePath)); err != nil {
return errors.Wrap(err, "unable to write to the local filesystem storage")
}
os.Remove(filepath.Join(b.directory, TestFilePath))
mlog.Debug("Able to write files to local storage.")
return nil
}
func (b *LocalFileBackend) Reader(path string) (ReadCloseSeeker, error) {
f, err := os.Open(filepath.Join(b.directory, path))
if err != nil {
return nil, errors.Wrapf(err, "unable to open file %s", path)
}
return f, nil
}
func (b *LocalFileBackend) ReadFile(path string) ([]byte, error) {
f, err := ioutil.ReadFile(filepath.Join(b.directory, path))
if err != nil {
return nil, errors.Wrapf(err, "unable to read file %s", path)
}
return f, nil
}
func (b *LocalFileBackend) FileExists(path string) (bool, error) {
_, err := os.Stat(filepath.Join(b.directory, path))
if os.IsNotExist(err) {
return false, nil
}
if err != nil {
return false, errors.Wrapf(err, "unable to know if file %s exists", path)
}
return true, nil
}
func (b *LocalFileBackend) FileSize(path string) (int64, error) {
info, err := os.Stat(filepath.Join(b.directory, path))
if err != nil {
return 0, errors.Wrapf(err, "unable to get file size for %s", path)
}
return info.Size(), nil
}
func (b *LocalFileBackend) FileModTime(path string) (time.Time, error) {
info, err := os.Stat(filepath.Join(b.directory, path))
if err != nil {
return time.Time{}, errors.Wrapf(err, "unable to get modification time for file %s", path)
}
return info.ModTime(), nil
}
func (b *LocalFileBackend) CopyFile(oldPath, newPath string) error {
if err := copyFile(filepath.Join(b.directory, oldPath), filepath.Join(b.directory, newPath)); err != nil {
return errors.Wrapf(err, "unable to copy file from %s to %s", oldPath, newPath)
}
return nil
}
func (b *LocalFileBackend) MoveFile(oldPath, newPath string) error {
if err := os.MkdirAll(filepath.Dir(filepath.Join(b.directory, newPath)), 0750); err != nil {
return errors.Wrapf(err, "unable to create the new destination directory %s", filepath.Dir(newPath))
}
if err := os.Rename(filepath.Join(b.directory, oldPath), filepath.Join(b.directory, newPath)); err != nil {
return errors.Wrapf(err, "unable to move the file to %s to the destination directory", newPath)
}
return nil
}
func (b *LocalFileBackend) WriteFile(fr io.Reader, path string) (int64, error) {
return writeFileLocally(fr, filepath.Join(b.directory, path))
}
func writeFileLocally(fr io.Reader, path string) (int64, error) {
if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil {
directory, _ := filepath.Abs(filepath.Dir(path))
return 0, errors.Wrapf(err, "unable to create the directory %s for the file %s", directory, path)
}
fw, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return 0, errors.Wrapf(err, "unable to open the file %s to write the data", path)
}
defer fw.Close()
written, err := io.Copy(fw, fr)
if err != nil {
return written, errors.Wrapf(err, "unable write the data in the file %s", path)
}
return written, nil
}
func (b *LocalFileBackend) AppendFile(fr io.Reader, path string) (int64, error) {
fp := filepath.Join(b.directory, path)
if _, err := os.Stat(fp); err != nil {
return 0, errors.Wrapf(err, "unable to find the file %s to append the data", path)
}
fw, err := os.OpenFile(fp, os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
return 0, errors.Wrapf(err, "unable to open the file %s to append the data", path)
}
defer fw.Close()
written, err := io.Copy(fw, fr)
if err != nil {
return written, errors.Wrapf(err, "unable append the data in the file %s", path)
}
return written, nil
}
func (b *LocalFileBackend) RemoveFile(path string) error {
if err := os.Remove(filepath.Join(b.directory, path)); err != nil {
return errors.Wrapf(err, "unable to remove the file %s", path)
}
return nil
}
func (b *LocalFileBackend) ListDirectory(path string) ([]string, error) {
var paths []string
fileInfos, err := ioutil.ReadDir(filepath.Join(b.directory, path))
if err != nil {
if os.IsNotExist(err) {
return paths, nil
}
return nil, errors.Wrapf(err, "unable to list the directory %s", path)
}
for _, fileInfo := range fileInfos {
paths = append(paths, filepath.Join(path, fileInfo.Name()))
}
return paths, nil
}
func (b *LocalFileBackend) RemoveDirectory(path string) error {
if err := os.RemoveAll(filepath.Join(b.directory, path)); err != nil {
return errors.Wrapf(err, "unable to remove the directory %s", path)
}
return nil
}

View File

@ -0,0 +1,56 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package filestore
import (
"context"
"net/http"
"github.com/minio/minio-go/v7/pkg/credentials"
)
// customTransport is used to point the request to a different server.
// This is helpful in situations where a different service is handling AWS S3 requests
// from multiple Mattermost applications, and the Mattermost service itself does not
// have any S3 credentials.
type customTransport struct {
base http.RoundTripper
host string
scheme string
client http.Client
}
// RoundTrip implements the http.Roundtripper interface.
func (t *customTransport) RoundTrip(req *http.Request) (*http.Response, error) {
// Rountrippers should not modify the original request.
newReq := req.Clone(context.Background())
*newReq.URL = *req.URL
req.URL.Scheme = t.scheme
req.URL.Host = t.host
return t.client.Do(req)
}
// customProvider is a dummy credentials provider for the minio client to work
// without actually providing credentials. This is needed with a custom transport
// in cases where the minio client does not actually have credentials with itself,
// rather needs responses from another entity.
//
// It satisfies the credentials.Provider interface.
type customProvider struct {
isSignV2 bool
}
// Retrieve just returns empty credentials.
func (cp customProvider) Retrieve() (credentials.Value, error) {
sign := credentials.SignatureV4
if cp.isSignV2 {
sign = credentials.SignatureV2
}
return credentials.Value{
SignerType: sign,
}, nil
}
// IsExpired always returns false.
func (cp customProvider) IsExpired() bool { return false }

View File

@ -0,0 +1,442 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package filestore
import (
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
s3 "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/pkg/errors"
"github.com/mattermost/mattermost-server/v5/shared/mlog"
)
// S3FileBackend contains all necessary information to communicate with
// an AWS S3 compatible API backend.
type S3FileBackend struct {
endpoint string
accessKey string
secretKey string
secure bool
signV2 bool
region string
bucket string
pathPrefix string
encrypt bool
trace bool
client *s3.Client
}
type S3FileBackendAuthError struct {
DetailedError string
}
// S3FileBackendNoBucketError is returned when testing a connection and no S3 bucket is found
type S3FileBackendNoBucketError struct{}
const (
// This is not exported by minio. See: https://github.com/minio/minio-go/issues/1339
bucketNotFound = "NoSuchBucket"
)
var (
imageExtensions = map[string]bool{".jpg": true, ".jpeg": true, ".gif": true, ".bmp": true, ".png": true, ".tiff": true, "tif": true}
imageMimeTypes = map[string]string{".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", ".bmp": "image/bmp", ".png": "image/png", ".tiff": "image/tiff", ".tif": "image/tif"}
)
func isFileExtImage(ext string) bool {
ext = strings.ToLower(ext)
return imageExtensions[ext]
}
func getImageMimeType(ext string) string {
ext = strings.ToLower(ext)
if imageMimeTypes[ext] == "" {
return "image"
}
return imageMimeTypes[ext]
}
func (s *S3FileBackendAuthError) Error() string {
return s.DetailedError
}
func (s *S3FileBackendNoBucketError) Error() string {
return "no such bucket"
}
// NewS3FileBackend returns an instance of an S3FileBackend.
func NewS3FileBackend(settings FileBackendSettings) (*S3FileBackend, error) {
backend := &S3FileBackend{
endpoint: settings.AmazonS3Endpoint,
accessKey: settings.AmazonS3AccessKeyId,
secretKey: settings.AmazonS3SecretAccessKey,
secure: settings.AmazonS3SSL,
signV2: settings.AmazonS3SignV2,
region: settings.AmazonS3Region,
bucket: settings.AmazonS3Bucket,
pathPrefix: settings.AmazonS3PathPrefix,
encrypt: settings.AmazonS3SSE,
trace: settings.AmazonS3Trace,
}
cli, err := backend.s3New()
if err != nil {
return nil, err
}
backend.client = cli
return backend, nil
}
// Similar to s3.New() but allows initialization of signature v2 or signature v4 client.
// If signV2 input is false, function always returns signature v4.
//
// Additionally this function also takes a user defined region, if set
// disables automatic region lookup.
func (b *S3FileBackend) s3New() (*s3.Client, error) {
var creds *credentials.Credentials
isCloud := os.Getenv("MM_CLOUD_FILESTORE_BIFROST") != ""
if isCloud {
creds = credentials.New(customProvider{isSignV2: b.signV2})
} else if b.accessKey == "" && b.secretKey == "" {
creds = credentials.NewIAM("")
} else if b.signV2 {
creds = credentials.NewStatic(b.accessKey, b.secretKey, "", credentials.SignatureV2)
} else {
creds = credentials.NewStatic(b.accessKey, b.secretKey, "", credentials.SignatureV4)
}
opts := s3.Options{
Creds: creds,
Secure: b.secure,
Region: b.region,
}
// If this is a cloud installation, we override the default transport.
if isCloud {
tr, err := s3.DefaultTransport(b.secure)
if err != nil {
return nil, err
}
scheme := "http"
if b.secure {
scheme = "https"
}
opts.Transport = &customTransport{
base: tr,
host: b.endpoint,
scheme: scheme,
}
}
s3Clnt, err := s3.New(b.endpoint, &opts)
if err != nil {
return nil, err
}
if b.trace {
s3Clnt.TraceOn(os.Stdout)
}
return s3Clnt, nil
}
func (b *S3FileBackend) TestConnection() error {
exists := true
var err error
// If a path prefix is present, we attempt to test the bucket by listing objects under the path
// and just checking the first response. This is because the BucketExists call is only at a bucket level
// and sometimes the user might only be allowed access to the specified path prefix.
if b.pathPrefix != "" {
obj := <-b.client.ListObjects(context.Background(), b.bucket, s3.ListObjectsOptions{Prefix: b.pathPrefix})
if obj.Err != nil {
typedErr := s3.ToErrorResponse(obj.Err)
if typedErr.Code != bucketNotFound {
return &S3FileBackendAuthError{DetailedError: "unable to list objects in the S3 bucket"}
}
exists = false
}
} else {
exists, err = b.client.BucketExists(context.Background(), b.bucket)
if err != nil {
return &S3FileBackendAuthError{DetailedError: "unable to check if the S3 bucket exists"}
}
}
if !exists {
return &S3FileBackendNoBucketError{}
}
mlog.Debug("Connection to S3 or minio is good. Bucket exists.")
return nil
}
func (b *S3FileBackend) MakeBucket() error {
err := b.client.MakeBucket(context.Background(), b.bucket, s3.MakeBucketOptions{Region: b.region})
if err != nil {
return errors.Wrap(err, "unable to create the s3 bucket")
}
return nil
}
// Caller must close the first return value
func (b *S3FileBackend) Reader(path string) (ReadCloseSeeker, error) {
path = filepath.Join(b.pathPrefix, path)
minioObject, err := b.client.GetObject(context.Background(), b.bucket, path, s3.GetObjectOptions{})
if err != nil {
return nil, errors.Wrapf(err, "unable to open file %s", path)
}
return minioObject, nil
}
func (b *S3FileBackend) ReadFile(path string) ([]byte, error) {
path = filepath.Join(b.pathPrefix, path)
minioObject, err := b.client.GetObject(context.Background(), b.bucket, path, s3.GetObjectOptions{})
if err != nil {
return nil, errors.Wrapf(err, "unable to open file %s", path)
}
defer minioObject.Close()
f, err := ioutil.ReadAll(minioObject)
if err != nil {
return nil, errors.Wrapf(err, "unable to read file %s", path)
}
return f, nil
}
func (b *S3FileBackend) FileExists(path string) (bool, error) {
path = filepath.Join(b.pathPrefix, path)
_, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{})
if err == nil {
return true, nil
}
var s3Err s3.ErrorResponse
if errors.As(err, &s3Err); s3Err.Code == "NoSuchKey" {
return false, nil
}
return false, errors.Wrapf(err, "unable to know if file %s exists", path)
}
func (b *S3FileBackend) FileSize(path string) (int64, error) {
path = filepath.Join(b.pathPrefix, path)
info, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{})
if err != nil {
return 0, errors.Wrapf(err, "unable to get file size for %s", path)
}
return info.Size, nil
}
func (b *S3FileBackend) FileModTime(path string) (time.Time, error) {
path = filepath.Join(b.pathPrefix, path)
info, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{})
if err != nil {
return time.Time{}, errors.Wrapf(err, "unable to get modification time for file %s", path)
}
return info.LastModified, nil
}
func (b *S3FileBackend) CopyFile(oldPath, newPath string) error {
oldPath = filepath.Join(b.pathPrefix, oldPath)
newPath = filepath.Join(b.pathPrefix, newPath)
srcOpts := s3.CopySrcOptions{
Bucket: b.bucket,
Object: oldPath,
Encryption: encrypt.NewSSE(),
}
dstOpts := s3.CopyDestOptions{
Bucket: b.bucket,
Object: newPath,
Encryption: encrypt.NewSSE(),
}
if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {
return errors.Wrapf(err, "unable to copy file from %s to %s", oldPath, newPath)
}
return nil
}
func (b *S3FileBackend) MoveFile(oldPath, newPath string) error {
oldPath = filepath.Join(b.pathPrefix, oldPath)
newPath = filepath.Join(b.pathPrefix, newPath)
srcOpts := s3.CopySrcOptions{
Bucket: b.bucket,
Object: oldPath,
Encryption: encrypt.NewSSE(),
}
dstOpts := s3.CopyDestOptions{
Bucket: b.bucket,
Object: newPath,
Encryption: encrypt.NewSSE(),
}
if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {
return errors.Wrapf(err, "unable to copy the file to %s to the new destionation", newPath)
}
if err := b.client.RemoveObject(context.Background(), b.bucket, oldPath, s3.RemoveObjectOptions{}); err != nil {
return errors.Wrapf(err, "unable to remove the file old file %s", oldPath)
}
return nil
}
func (b *S3FileBackend) WriteFile(fr io.Reader, path string) (int64, error) {
var contentType string
path = filepath.Join(b.pathPrefix, path)
if ext := filepath.Ext(path); isFileExtImage(ext) {
contentType = getImageMimeType(ext)
} else {
contentType = "binary/octet-stream"
}
options := s3PutOptions(b.encrypt, contentType)
info, err := b.client.PutObject(context.Background(), b.bucket, path, fr, -1, options)
if err != nil {
return info.Size, errors.Wrapf(err, "unable write the data in the file %s", path)
}
return info.Size, nil
}
func (b *S3FileBackend) AppendFile(fr io.Reader, path string) (int64, error) {
fp := filepath.Join(b.pathPrefix, path)
if _, err := b.client.StatObject(context.Background(), b.bucket, fp, s3.StatObjectOptions{}); err != nil {
return 0, errors.Wrapf(err, "unable to find the file %s to append the data", path)
}
var contentType string
if ext := filepath.Ext(fp); isFileExtImage(ext) {
contentType = getImageMimeType(ext)
} else {
contentType = "binary/octet-stream"
}
options := s3PutOptions(b.encrypt, contentType)
sse := options.ServerSideEncryption
partName := fp + ".part"
info, err := b.client.PutObject(context.Background(), b.bucket, partName, fr, -1, options)
defer b.client.RemoveObject(context.Background(), b.bucket, partName, s3.RemoveObjectOptions{})
if info.Size > 0 {
src1Opts := s3.CopySrcOptions{
Bucket: b.bucket,
Object: fp,
}
src2Opts := s3.CopySrcOptions{
Bucket: b.bucket,
Object: partName,
}
dstOpts := s3.CopyDestOptions{
Bucket: b.bucket,
Object: fp,
Encryption: sse,
}
_, err = b.client.ComposeObject(context.Background(), dstOpts, src1Opts, src2Opts)
if err != nil {
return 0, errors.Wrapf(err, "unable append the data in the file %s", path)
}
return info.Size, nil
}
return 0, errors.Wrapf(err, "unable append the data in the file %s", path)
}
func (b *S3FileBackend) RemoveFile(path string) error {
path = filepath.Join(b.pathPrefix, path)
if err := b.client.RemoveObject(context.Background(), b.bucket, path, s3.RemoveObjectOptions{}); err != nil {
return errors.Wrapf(err, "unable to remove the file %s", path)
}
return nil
}
func getPathsFromObjectInfos(in <-chan s3.ObjectInfo) <-chan s3.ObjectInfo {
out := make(chan s3.ObjectInfo, 1)
go func() {
defer close(out)
for {
info, done := <-in
if !done {
break
}
out <- info
}
}()
return out
}
func (b *S3FileBackend) ListDirectory(path string) ([]string, error) {
path = filepath.Join(b.pathPrefix, path)
if !strings.HasSuffix(path, "/") && path != "" {
// s3Clnt returns only the path itself when "/" is not present
// appending "/" to make it consistent across all filestores
path = path + "/"
}
opts := s3.ListObjectsOptions{
Prefix: path,
}
var paths []string
for object := range b.client.ListObjects(context.Background(), b.bucket, opts) {
if object.Err != nil {
return nil, errors.Wrapf(object.Err, "unable to list the directory %s", path)
}
// We strip the path prefix that gets applied,
// so that it remains transparent to the application.
object.Key = strings.TrimPrefix(object.Key, b.pathPrefix)
trimmed := strings.Trim(object.Key, "/")
if trimmed != "" {
paths = append(paths, trimmed)
}
}
return paths, nil
}
func (b *S3FileBackend) RemoveDirectory(path string) error {
opts := s3.ListObjectsOptions{
Prefix: filepath.Join(b.pathPrefix, path),
Recursive: true,
}
list := b.client.ListObjects(context.Background(), b.bucket, opts)
objectsCh := b.client.RemoveObjects(context.Background(), b.bucket, getPathsFromObjectInfos(list), s3.RemoveObjectsOptions{})
for err := range objectsCh {
if err.Err != nil {
return errors.Wrapf(err.Err, "unable to remove the directory %s", path)
}
}
return nil
}
func s3PutOptions(encrypted bool, contentType string) s3.PutObjectOptions {
options := s3.PutObjectOptions{}
if encrypted {
options.ServerSideEncryption = encrypt.NewSSE()
}
options.ContentType = contentType
// We set the part size to the minimum allowed value of 5MBs
// to avoid an excessive allocation in minio.PutObject implementation.
options.PartSize = 1024 * 1024 * 5
return options
}

View File

@ -0,0 +1,185 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package i18n
import (
"fmt"
"html/template"
"io/ioutil"
"net/http"
"path/filepath"
"reflect"
"strings"
"github.com/mattermost/go-i18n/i18n"
"github.com/mattermost/mattermost-server/v5/shared/mlog"
)
const defaultLocale = "en"
// TranslateFunc is the type of the translate functions
type TranslateFunc func(translationID string, args ...interface{}) string
// T is the translate function using the default server language as fallback language
var T TranslateFunc
// TDefault is the translate function using english as fallback language
var TDefault TranslateFunc
var locales map[string]string = make(map[string]string)
var defaultServerLocale string
var defaultClientLocale string
// TranslationsPreInit loads translations from filesystem if they are not
// loaded already and assigns english while loading server config
func TranslationsPreInit(translationsDir string) error {
if T != nil {
return nil
}
// Set T even if we fail to load the translations. Lots of shutdown handling code will
// segfault trying to handle the error, and the untranslated IDs are strictly better.
T = tfuncWithFallback(defaultLocale)
TDefault = tfuncWithFallback(defaultLocale)
return initTranslationsWithDir(translationsDir)
}
// InitTranslations set the defaults configured in the server and initialize
// the T function using the server default as fallback language
func InitTranslations(serverLocale, clientLocale string) error {
defaultServerLocale = serverLocale
defaultClientLocale = clientLocale
var err error
T, err = getTranslationsBySystemLocale()
return err
}
func initTranslationsWithDir(dir string) error {
files, _ := ioutil.ReadDir(dir)
for _, f := range files {
if filepath.Ext(f.Name()) == ".json" {
filename := f.Name()
locales[strings.Split(filename, ".")[0]] = filepath.Join(dir, filename)
if err := i18n.LoadTranslationFile(filepath.Join(dir, filename)); err != nil {
return err
}
}
}
return nil
}
func getTranslationsBySystemLocale() (TranslateFunc, error) {
locale := defaultServerLocale
if _, ok := locales[locale]; !ok {
mlog.Warn("Failed to load system translations for", mlog.String("locale", locale), mlog.String("attempting to fall back to default locale", defaultLocale))
locale = defaultLocale
}
if locales[locale] == "" {
return nil, fmt.Errorf("failed to load system translations for '%v'", defaultLocale)
}
translations := tfuncWithFallback(locale)
if translations == nil {
return nil, fmt.Errorf("failed to load system translations")
}
mlog.Info("Loaded system translations", mlog.String("for locale", locale), mlog.String("from locale", locales[locale]))
return translations, nil
}
// GetUserTranslations get the translation function for an specific locale
func GetUserTranslations(locale string) TranslateFunc {
if _, ok := locales[locale]; !ok {
locale = defaultLocale
}
translations := tfuncWithFallback(locale)
return translations
}
// GetTranslationsAndLocaleFromRequest return the translation function and the
// locale based on a request headers
func GetTranslationsAndLocaleFromRequest(r *http.Request) (TranslateFunc, string) {
// This is for checking against locales like pt_BR or zn_CN
headerLocaleFull := strings.Split(r.Header.Get("Accept-Language"), ",")[0]
// This is for checking against locales like en, es
headerLocale := strings.Split(strings.Split(r.Header.Get("Accept-Language"), ",")[0], "-")[0]
defaultLocale := defaultClientLocale
if locales[headerLocaleFull] != "" {
translations := tfuncWithFallback(headerLocaleFull)
return translations, headerLocaleFull
} else if locales[headerLocale] != "" {
translations := tfuncWithFallback(headerLocale)
return translations, headerLocale
} else if locales[defaultLocale] != "" {
translations := tfuncWithFallback(defaultLocale)
return translations, headerLocale
}
translations := tfuncWithFallback(defaultLocale)
return translations, defaultLocale
}
// GetSupportedLocales return a map of locale code and the file path with the
// translations
func GetSupportedLocales() map[string]string {
return locales
}
func tfuncWithFallback(pref string) TranslateFunc {
t, _ := i18n.Tfunc(pref)
return func(translationID string, args ...interface{}) string {
if translated := t(translationID, args...); translated != translationID {
return translated
}
t, _ := i18n.Tfunc(defaultLocale)
return t(translationID, args...)
}
}
// TranslateAsHTML translates the translationID provided and return a
// template.HTML object
func TranslateAsHTML(t TranslateFunc, translationID string, args map[string]interface{}) template.HTML {
message := t(translationID, escapeForHTML(args))
message = strings.Replace(message, "[[", "<strong>", -1)
message = strings.Replace(message, "]]", "</strong>", -1)
return template.HTML(message)
}
func escapeForHTML(arg interface{}) interface{} {
switch typedArg := arg.(type) {
case string:
return template.HTMLEscapeString(typedArg)
case *string:
return template.HTMLEscapeString(*typedArg)
case map[string]interface{}:
safeArg := make(map[string]interface{}, len(typedArg))
for key, value := range typedArg {
safeArg[key] = escapeForHTML(value)
}
return safeArg
default:
mlog.Warn(
"Unable to escape value for HTML template",
mlog.Any("html_template", arg),
mlog.String("template_type", reflect.ValueOf(arg).Type().String()),
)
return ""
}
}
// IdentityTfunc returns a translation function that don't translate, only
// returns the same id
func IdentityTfunc() TranslateFunc {
return func(translationID string, args ...interface{}) string {
return translationID
}
}

View File

@ -0,0 +1,255 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
import (
"regexp"
"strings"
"unicode"
"unicode/utf8"
)
// Based off of extensions/autolink.c from https://github.com/github/cmark
var (
DefaultURLSchemes = []string{"http", "https", "ftp", "mailto", "tel"}
wwwAutoLinkRegex = regexp.MustCompile(`^www\d{0,3}\.`)
)
// Given a string with a w at the given position, tries to parse and return a range containing a www link.
// if one exists. If the text at the given position isn't a link, returns an empty string. Equivalent to
// www_match from the reference code.
func parseWWWAutolink(data string, position int) (Range, bool) {
// Check that this isn't part of another word
if position > 1 {
prevChar := data[position-1]
if !isWhitespaceByte(prevChar) && !isAllowedBeforeWWWLink(prevChar) {
return Range{}, false
}
}
// Check that this starts with www
if len(data)-position < 4 || !wwwAutoLinkRegex.MatchString(data[position:]) {
return Range{}, false
}
end := checkDomain(data[position:], false)
if end == 0 {
return Range{}, false
}
end += position
// Grab all text until the end of the string or the next whitespace character
for end < len(data) && !isWhitespaceByte(data[end]) {
end += 1
}
// Trim trailing punctuation
end = trimTrailingCharactersFromLink(data, position, end)
if position == end {
return Range{}, false
}
return Range{position, end}, true
}
func isAllowedBeforeWWWLink(c byte) bool {
switch c {
case '*', '_', '~', ')':
return true
}
return false
}
// Given a string with a : at the given position, tried to parse and return a range containing a URL scheme
// if one exists. If the text around the given position isn't a link, returns an empty string. Equivalent to
// url_match from the reference code.
func parseURLAutolink(data string, position int) (Range, bool) {
// Check that a :// exists. This doesn't match the clients that treat the slashes as optional.
if len(data)-position < 4 || data[position+1] != '/' || data[position+2] != '/' {
return Range{}, false
}
start := position - 1
for start > 0 && isAlphanumericByte(data[start-1]) {
start -= 1
}
if start < 0 || position >= len(data) {
return Range{}, false
}
// Ensure that the URL scheme is allowed and that at least one character after the scheme is valid.
scheme := data[start:position]
if !isSchemeAllowed(scheme) || !isValidHostCharacter(data[position+3:]) {
return Range{}, false
}
end := checkDomain(data[position+3:], true)
if end == 0 {
return Range{}, false
}
end += position
// Grab all text until the end of the string or the next whitespace character
for end < len(data) && !isWhitespaceByte(data[end]) {
end += 1
}
// Trim trailing punctuation
end = trimTrailingCharactersFromLink(data, start, end)
if start == end {
return Range{}, false
}
return Range{start, end}, true
}
func isSchemeAllowed(scheme string) bool {
// Note that this doesn't support the custom URL schemes implemented by the client
for _, allowed := range DefaultURLSchemes {
if strings.EqualFold(allowed, scheme) {
return true
}
}
return false
}
// Given a string starting with a URL, returns the number of valid characters that make up the URL's domain.
// Returns 0 if the string doesn't start with a domain name. allowShort determines whether or not the domain
// needs to contain a period to be considered valid. Equivalent to check_domain from the reference code.
func checkDomain(data string, allowShort bool) int {
foundUnderscore := false
foundPeriod := false
i := 1
for ; i < len(data)-1; i++ {
if data[i] == '_' {
foundUnderscore = true
break
} else if data[i] == '.' {
foundPeriod = true
} else if !isValidHostCharacter(data[i:]) && data[i] != '-' {
break
}
}
if foundUnderscore {
return 0
}
if allowShort {
// If allowShort is set, accept any string of valid domain characters
return i
}
// If allowShort isn't set, a valid domain just requires at least a single period. Note that this
// logic isn't entirely necessary because we already know the string starts with "www." when
// this is called from parseWWWAutolink
if foundPeriod {
return i
}
return 0
}
// Returns true if the provided link starts with a valid character for a domain name. Equivalent to
// is_valid_hostchar from the reference code.
func isValidHostCharacter(link string) bool {
c, _ := utf8.DecodeRuneInString(link)
if c == utf8.RuneError {
return false
}
return !unicode.IsSpace(c) && !unicode.IsPunct(c)
}
// Removes any trailing characters such as punctuation or stray brackets that shouldn't be part of the link.
// Returns a new end position for the link. Equivalent to autolink_delim from the reference code.
func trimTrailingCharactersFromLink(markdown string, start int, end int) int {
runes := []rune(markdown[start:end])
linkEnd := len(runes)
// Cut off the link before an open angle bracket if it contains one
for i, c := range runes {
if c == '<' {
linkEnd = i
break
}
}
for linkEnd > 0 {
c := runes[linkEnd-1]
if !canEndAutolink(c) {
// Trim trailing quotes, periods, etc
linkEnd = linkEnd - 1
} else if c == ';' {
// Trim a trailing HTML entity
newEnd := linkEnd - 2
for newEnd > 0 && ((runes[newEnd] >= 'a' && runes[newEnd] <= 'z') || (runes[newEnd] >= 'A' && runes[newEnd] <= 'Z')) {
newEnd -= 1
}
if newEnd < linkEnd-2 && runes[newEnd] == '&' {
linkEnd = newEnd
} else {
// This isn't actually an HTML entity, so just trim the semicolon
linkEnd = linkEnd - 1
}
} else if c == ')' {
// Only allow an autolink ending with a bracket if that bracket is part of a matching pair of brackets.
// If there are more closing brackets than opening ones, remove the extra bracket
numClosing := 0
numOpening := 0
// Examples (input text => output linked portion):
//
// http://www.pokemon.com/Pikachu_(Electric)
// => http://www.pokemon.com/Pikachu_(Electric)
//
// http://www.pokemon.com/Pikachu_((Electric)
// => http://www.pokemon.com/Pikachu_((Electric)
//
// http://www.pokemon.com/Pikachu_(Electric))
// => http://www.pokemon.com/Pikachu_(Electric)
//
// http://www.pokemon.com/Pikachu_((Electric))
// => http://www.pokemon.com/Pikachu_((Electric))
for i := 0; i < linkEnd; i++ {
if runes[i] == '(' {
numOpening += 1
} else if runes[i] == ')' {
numClosing += 1
}
}
if numClosing <= numOpening {
// There's fewer or equal closing brackets, so we've found the end of the link
break
}
linkEnd -= 1
} else {
// There's no special characters at the end of the link, so we're at the end
break
}
}
return start + len(string(runes[:linkEnd]))
}
func canEndAutolink(c rune) bool {
switch c {
case '?', '!', '.', ',', ':', '*', '_', '~', '\'', '"':
return false
}
return true
}

View File

@ -0,0 +1,62 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
type BlockQuote struct {
blockBase
markdown string
Children []Block
}
func (b *BlockQuote) Continuation(indentation int, r Range) *continuation {
if indentation > 3 {
return nil
}
s := b.markdown[r.Position:r.End]
if s == "" || s[0] != '>' {
return nil
}
remaining := Range{r.Position + 1, r.End}
indentation, indentationBytes := countIndentation(b.markdown, remaining)
if indentation > 0 {
indentation--
}
return &continuation{
Indentation: indentation,
Remaining: Range{remaining.Position + indentationBytes, remaining.End},
}
}
func (b *BlockQuote) AddChild(openBlocks []Block) []Block {
b.Children = append(b.Children, openBlocks[0])
return openBlocks
}
func blockQuoteStart(markdown string, indent int, r Range) []Block {
if indent > 3 {
return nil
}
s := markdown[r.Position:r.End]
if s == "" || s[0] != '>' {
return nil
}
block := &BlockQuote{
markdown: markdown,
}
r.Position++
if len(s) > 1 && s[1] == ' ' {
r.Position++
}
indent, bytes := countIndentation(markdown, r)
ret := []Block{block}
if descendants := blockStartOrParagraph(markdown, indent, Range{r.Position + bytes, r.End}, nil, nil); descendants != nil {
block.Children = append(block.Children, descendants[0])
ret = append(ret, descendants...)
}
return ret
}

View File

@ -0,0 +1,154 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
import (
"strings"
)
type continuation struct {
Indentation int
Remaining Range
}
type Block interface {
Continuation(indentation int, r Range) *continuation
AddLine(indentation int, r Range) bool
Close()
AllowsBlockStarts() bool
HasTrailingBlankLine() bool
}
type blockBase struct{}
func (*blockBase) AddLine(indentation int, r Range) bool { return false }
func (*blockBase) Close() {}
func (*blockBase) AllowsBlockStarts() bool { return true }
func (*blockBase) HasTrailingBlankLine() bool { return false }
type ContainerBlock interface {
Block
AddChild(openBlocks []Block) []Block
}
type Range struct {
Position int
End int
}
func closeBlocks(blocks []Block, referenceDefinitions []*ReferenceDefinition) []*ReferenceDefinition {
for _, block := range blocks {
block.Close()
if p, ok := block.(*Paragraph); ok && len(p.ReferenceDefinitions) > 0 {
referenceDefinitions = append(referenceDefinitions, p.ReferenceDefinitions...)
}
}
return referenceDefinitions
}
func ParseBlocks(markdown string, lines []Line) (*Document, []*ReferenceDefinition) {
document := &Document{}
var referenceDefinitions []*ReferenceDefinition
openBlocks := []Block{document}
for _, line := range lines {
r := line.Range
lastMatchIndex := 0
indentation, indentationBytes := countIndentation(markdown, r)
r = Range{r.Position + indentationBytes, r.End}
for i, block := range openBlocks {
if continuation := block.Continuation(indentation, r); continuation != nil {
indentation = continuation.Indentation
r = continuation.Remaining
additionalIndentation, additionalIndentationBytes := countIndentation(markdown, r)
r = Range{r.Position + additionalIndentationBytes, r.End}
indentation += additionalIndentation
lastMatchIndex = i
} else {
break
}
}
if openBlocks[lastMatchIndex].AllowsBlockStarts() {
if newBlocks := blockStart(markdown, indentation, r, openBlocks[:lastMatchIndex+1], openBlocks[lastMatchIndex+1:]); newBlocks != nil {
didAdd := false
for i := lastMatchIndex; i >= 0; i-- {
if container, ok := openBlocks[i].(ContainerBlock); ok {
if addedBlocks := container.AddChild(newBlocks); addedBlocks != nil {
referenceDefinitions = closeBlocks(openBlocks[i+1:], referenceDefinitions)
openBlocks = openBlocks[:i+1]
openBlocks = append(openBlocks, addedBlocks...)
didAdd = true
break
}
}
}
if didAdd {
continue
}
}
}
isBlank := strings.TrimSpace(markdown[r.Position:r.End]) == ""
if paragraph, ok := openBlocks[len(openBlocks)-1].(*Paragraph); ok && !isBlank {
paragraph.Text = append(paragraph.Text, r)
continue
}
referenceDefinitions = closeBlocks(openBlocks[lastMatchIndex+1:], referenceDefinitions)
openBlocks = openBlocks[:lastMatchIndex+1]
if openBlocks[lastMatchIndex].AddLine(indentation, r) {
continue
}
if paragraph := newParagraph(markdown, r); paragraph != nil {
for i := lastMatchIndex; i >= 0; i-- {
if container, ok := openBlocks[i].(ContainerBlock); ok {
if newBlocks := container.AddChild([]Block{paragraph}); newBlocks != nil {
referenceDefinitions = closeBlocks(openBlocks[i+1:], referenceDefinitions)
openBlocks = openBlocks[:i+1]
openBlocks = append(openBlocks, newBlocks...)
break
}
}
}
}
}
referenceDefinitions = closeBlocks(openBlocks, referenceDefinitions)
return document, referenceDefinitions
}
func blockStart(markdown string, indentation int, r Range, matchedBlocks, unmatchedBlocks []Block) []Block {
if r.Position >= r.End {
return nil
}
if start := blockQuoteStart(markdown, indentation, r); start != nil {
return start
} else if start := listStart(markdown, indentation, r, matchedBlocks, unmatchedBlocks); start != nil {
return start
} else if start := indentedCodeStart(markdown, indentation, r, matchedBlocks, unmatchedBlocks); start != nil {
return start
} else if start := fencedCodeStart(markdown, indentation, r); start != nil {
return start
}
return nil
}
func blockStartOrParagraph(markdown string, indentation int, r Range, matchedBlocks, unmatchedBlocks []Block) []Block {
if start := blockStart(markdown, indentation, r, matchedBlocks, unmatchedBlocks); start != nil {
return start
}
if paragraph := newParagraph(markdown, r); paragraph != nil {
return []Block{paragraph}
}
return nil
}

View File

@ -0,0 +1,22 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
type Document struct {
blockBase
Children []Block
}
func (b *Document) Continuation(indentation int, r Range) *continuation {
return &continuation{
Indentation: indentation,
Remaining: r,
}
}
func (b *Document) AddChild(openBlocks []Block) []Block {
b.Children = append(b.Children, openBlocks[0])
return openBlocks
}

View File

@ -0,0 +1,112 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
import (
"strings"
)
type FencedCodeLine struct {
Indentation int
Range Range
}
type FencedCode struct {
blockBase
markdown string
didSeeClosingFence bool
Indentation int
OpeningFence Range
RawInfo Range
RawCode []FencedCodeLine
}
func (b *FencedCode) Code() (result string) {
for _, code := range b.RawCode {
result += strings.Repeat(" ", code.Indentation) + b.markdown[code.Range.Position:code.Range.End]
}
return
}
func (b *FencedCode) Info() string {
return Unescape(b.markdown[b.RawInfo.Position:b.RawInfo.End])
}
func (b *FencedCode) Continuation(indentation int, r Range) *continuation {
if b.didSeeClosingFence {
return nil
}
return &continuation{
Indentation: indentation,
Remaining: r,
}
}
func (b *FencedCode) AddLine(indentation int, r Range) bool {
s := b.markdown[r.Position:r.End]
if indentation <= 3 && strings.HasPrefix(s, b.markdown[b.OpeningFence.Position:b.OpeningFence.End]) {
suffix := strings.TrimSpace(s[b.OpeningFence.End-b.OpeningFence.Position:])
isClosingFence := true
for _, c := range suffix {
if c != rune(s[0]) {
isClosingFence = false
break
}
}
if isClosingFence {
b.didSeeClosingFence = true
return true
}
}
if indentation >= b.Indentation {
indentation -= b.Indentation
} else {
indentation = 0
}
b.RawCode = append(b.RawCode, FencedCodeLine{
Indentation: indentation,
Range: r,
})
return true
}
func (b *FencedCode) AllowsBlockStarts() bool {
return false
}
func fencedCodeStart(markdown string, indentation int, r Range) []Block {
s := markdown[r.Position:r.End]
if !strings.HasPrefix(s, "```") && !strings.HasPrefix(s, "~~~") {
return nil
}
fenceCharacter := rune(s[0])
fenceLength := 3
for _, c := range s[3:] {
if c == fenceCharacter {
fenceLength++
} else {
break
}
}
for i := r.Position + fenceLength; i < r.End; i++ {
if markdown[i] == '`' {
return nil
}
}
return []Block{
&FencedCode{
markdown: markdown,
Indentation: indentation,
RawInfo: trimRightSpace(markdown, Range{r.Position + fenceLength, r.End}),
OpeningFence: Range{r.Position, r.Position + fenceLength},
},
}
}

View File

@ -0,0 +1,192 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
import (
"fmt"
"strings"
)
var htmlEscaper = strings.NewReplacer(
`&`, "&amp;",
`<`, "&lt;",
`>`, "&gt;",
`"`, "&quot;",
)
// RenderHTML produces HTML with the same behavior as the example renderer used in the CommonMark
// reference materials except for one slight difference: for brevity, no unnecessary whitespace is
// inserted between elements. The output is not defined by the CommonMark spec, and it exists
// primarily as an aid in testing.
func RenderHTML(markdown string) string {
return RenderBlockHTML(Parse(markdown))
}
func RenderBlockHTML(block Block, referenceDefinitions []*ReferenceDefinition) (result string) {
return renderBlockHTML(block, referenceDefinitions, false)
}
func renderBlockHTML(block Block, referenceDefinitions []*ReferenceDefinition, isTightList bool) (result string) {
switch v := block.(type) {
case *Document:
for _, block := range v.Children {
result += RenderBlockHTML(block, referenceDefinitions)
}
case *Paragraph:
if len(v.Text) == 0 {
return
}
if !isTightList {
result += "<p>"
}
for _, inline := range v.ParseInlines(referenceDefinitions) {
result += RenderInlineHTML(inline)
}
if !isTightList {
result += "</p>"
}
case *List:
if v.IsOrdered {
if v.OrderedStart != 1 {
result += fmt.Sprintf(`<ol start="%v">`, v.OrderedStart)
} else {
result += "<ol>"
}
} else {
result += "<ul>"
}
for _, block := range v.Children {
result += renderBlockHTML(block, referenceDefinitions, !v.IsLoose)
}
if v.IsOrdered {
result += "</ol>"
} else {
result += "</ul>"
}
case *ListItem:
result += "<li>"
for _, block := range v.Children {
result += renderBlockHTML(block, referenceDefinitions, isTightList)
}
result += "</li>"
case *BlockQuote:
result += "<blockquote>"
for _, block := range v.Children {
result += RenderBlockHTML(block, referenceDefinitions)
}
result += "</blockquote>"
case *FencedCode:
if info := v.Info(); info != "" {
language := strings.Fields(info)[0]
result += `<pre><code class="language-` + htmlEscaper.Replace(language) + `">`
} else {
result += "<pre><code>"
}
result += htmlEscaper.Replace(v.Code()) + "</code></pre>"
case *IndentedCode:
result += "<pre><code>" + htmlEscaper.Replace(v.Code()) + "</code></pre>"
default:
panic(fmt.Sprintf("missing case for type %T", v))
}
return
}
func escapeURL(url string) (result string) {
for i := 0; i < len(url); {
switch b := url[i]; b {
case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '-', '_', '.', '!', '~', '*', '\'', '(', ')', '#':
result += string(b)
i++
default:
if b == '%' && i+2 < len(url) && isHexByte(url[i+1]) && isHexByte(url[i+2]) {
result += url[i : i+3]
i += 3
} else if (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9') {
result += string(b)
i++
} else {
result += fmt.Sprintf("%%%0X", b)
i++
}
}
}
return
}
func RenderInlineHTML(inline Inline) (result string) {
switch v := inline.(type) {
case *Text:
return htmlEscaper.Replace(v.Text)
case *HardLineBreak:
return "<br />"
case *SoftLineBreak:
return "\n"
case *CodeSpan:
return "<code>" + htmlEscaper.Replace(v.Code) + "</code>"
case *InlineImage:
result += `<img src="` + htmlEscaper.Replace(escapeURL(v.Destination())) + `" alt="` + htmlEscaper.Replace(renderImageAltText(v.Children)) + `"`
if title := v.Title(); title != "" {
result += ` title="` + htmlEscaper.Replace(title) + `"`
}
result += ` />`
case *ReferenceImage:
result += `<img src="` + htmlEscaper.Replace(escapeURL(v.Destination())) + `" alt="` + htmlEscaper.Replace(renderImageAltText(v.Children)) + `"`
if title := v.Title(); title != "" {
result += ` title="` + htmlEscaper.Replace(title) + `"`
}
result += ` />`
case *InlineLink:
result += `<a href="` + htmlEscaper.Replace(escapeURL(v.Destination())) + `"`
if title := v.Title(); title != "" {
result += ` title="` + htmlEscaper.Replace(title) + `"`
}
result += `>`
for _, inline := range v.Children {
result += RenderInlineHTML(inline)
}
result += "</a>"
case *ReferenceLink:
result += `<a href="` + htmlEscaper.Replace(escapeURL(v.Destination())) + `"`
if title := v.Title(); title != "" {
result += ` title="` + htmlEscaper.Replace(title) + `"`
}
result += `>`
for _, inline := range v.Children {
result += RenderInlineHTML(inline)
}
result += "</a>"
case *Autolink:
result += `<a href="` + htmlEscaper.Replace(escapeURL(v.Destination())) + `">`
for _, inline := range v.Children {
result += RenderInlineHTML(inline)
}
result += "</a>"
default:
panic(fmt.Sprintf("missing case for type %T", v))
}
return
}
func renderImageAltText(children []Inline) (result string) {
for _, inline := range children {
result += renderImageChildAltText(inline)
}
return
}
func renderImageChildAltText(inline Inline) (result string) {
switch v := inline.(type) {
case *Text:
return v.Text
case *InlineImage:
for _, inline := range v.Children {
result += renderImageChildAltText(inline)
}
case *InlineLink:
for _, inline := range v.Children {
result += renderImageChildAltText(inline)
}
}
return
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,98 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
import (
"strings"
)
type IndentedCodeLine struct {
Indentation int
Range Range
}
type IndentedCode struct {
blockBase
markdown string
RawCode []IndentedCodeLine
}
func (b *IndentedCode) Code() (result string) {
for _, code := range b.RawCode {
result += strings.Repeat(" ", code.Indentation) + b.markdown[code.Range.Position:code.Range.End]
}
return
}
func (b *IndentedCode) Continuation(indentation int, r Range) *continuation {
if indentation >= 4 {
return &continuation{
Indentation: indentation - 4,
Remaining: r,
}
}
s := b.markdown[r.Position:r.End]
if strings.TrimSpace(s) == "" {
return &continuation{
Remaining: r,
}
}
return nil
}
func (b *IndentedCode) AddLine(indentation int, r Range) bool {
b.RawCode = append(b.RawCode, IndentedCodeLine{
Indentation: indentation,
Range: r,
})
return true
}
func (b *IndentedCode) Close() {
for {
last := b.RawCode[len(b.RawCode)-1]
s := b.markdown[last.Range.Position:last.Range.End]
if strings.TrimRight(s, "\r\n") == "" {
b.RawCode = b.RawCode[:len(b.RawCode)-1]
} else {
break
}
}
}
func (b *IndentedCode) AllowsBlockStarts() bool {
return false
}
func indentedCodeStart(markdown string, indentation int, r Range, matchedBlocks, unmatchedBlocks []Block) []Block {
if len(unmatchedBlocks) > 0 {
if _, ok := unmatchedBlocks[len(unmatchedBlocks)-1].(*Paragraph); ok {
return nil
}
} else if len(matchedBlocks) > 0 {
if _, ok := matchedBlocks[len(matchedBlocks)-1].(*Paragraph); ok {
return nil
}
}
if indentation < 4 {
return nil
}
s := markdown[r.Position:r.End]
if strings.TrimSpace(s) == "" {
return nil
}
return []Block{
&IndentedCode{
markdown: markdown,
RawCode: []IndentedCodeLine{{
Indentation: indentation - 4,
Range: r,
}},
},
}
}

View File

@ -0,0 +1,663 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
import (
"container/list"
"strings"
"unicode"
"unicode/utf8"
)
type Inline interface {
IsInline() bool
}
type inlineBase struct{}
func (inlineBase) IsInline() bool { return true }
type Text struct {
inlineBase
Text string
Range Range
}
type CodeSpan struct {
inlineBase
Code string
}
type HardLineBreak struct {
inlineBase
}
type SoftLineBreak struct {
inlineBase
}
type InlineLinkOrImage struct {
inlineBase
Children []Inline
RawDestination Range
markdown string
rawTitle string
}
func (i *InlineLinkOrImage) Destination() string {
return Unescape(i.markdown[i.RawDestination.Position:i.RawDestination.End])
}
func (i *InlineLinkOrImage) Title() string {
return Unescape(i.rawTitle)
}
type InlineLink struct {
InlineLinkOrImage
}
type InlineImage struct {
InlineLinkOrImage
}
type ReferenceLinkOrImage struct {
inlineBase
*ReferenceDefinition
Children []Inline
}
type ReferenceLink struct {
ReferenceLinkOrImage
}
type ReferenceImage struct {
ReferenceLinkOrImage
}
type Autolink struct {
inlineBase
Children []Inline
RawDestination Range
markdown string
}
func (i *Autolink) Destination() string {
destination := Unescape(i.markdown[i.RawDestination.Position:i.RawDestination.End])
if strings.HasPrefix(destination, "www") {
destination = "http://" + destination
}
return destination
}
type delimiterType int
const (
linkOpeningDelimiter delimiterType = iota
imageOpeningDelimiter
)
type delimiter struct {
Type delimiterType
IsInactive bool
TextNode int
Range Range
}
type inlineParser struct {
markdown string
ranges []Range
referenceDefinitions []*ReferenceDefinition
raw string
position int
inlines []Inline
delimiterStack *list.List
}
func newInlineParser(markdown string, ranges []Range, referenceDefinitions []*ReferenceDefinition) *inlineParser {
return &inlineParser{
markdown: markdown,
ranges: ranges,
referenceDefinitions: referenceDefinitions,
delimiterStack: list.New(),
}
}
func (p *inlineParser) parseBackticks() {
count := 1
for i := p.position + 1; i < len(p.raw) && p.raw[i] == '`'; i++ {
count++
}
opening := p.raw[p.position : p.position+count]
search := p.position + count
for search < len(p.raw) {
end := strings.Index(p.raw[search:], opening)
if end == -1 {
break
}
if search+end+count < len(p.raw) && p.raw[search+end+count] == '`' {
search += end + count
for search < len(p.raw) && p.raw[search] == '`' {
search++
}
continue
}
code := strings.Join(strings.Fields(p.raw[p.position+count:search+end]), " ")
p.position = search + end + count
p.inlines = append(p.inlines, &CodeSpan{
Code: code,
})
return
}
p.position += len(opening)
absPos := relativeToAbsolutePosition(p.ranges, p.position-len(opening))
p.inlines = append(p.inlines, &Text{
Text: opening,
Range: Range{absPos, absPos + len(opening)},
})
}
func (p *inlineParser) parseLineEnding() {
if p.position >= 1 && p.raw[p.position-1] == '\t' {
p.inlines = append(p.inlines, &HardLineBreak{})
} else if p.position >= 2 && p.raw[p.position-1] == ' ' && (p.raw[p.position-2] == '\t' || p.raw[p.position-1] == ' ') {
p.inlines = append(p.inlines, &HardLineBreak{})
} else {
p.inlines = append(p.inlines, &SoftLineBreak{})
}
p.position++
if p.position < len(p.raw) && p.raw[p.position] == '\n' {
p.position++
}
}
func (p *inlineParser) parseEscapeCharacter() {
if p.position+1 < len(p.raw) && isEscapableByte(p.raw[p.position+1]) {
absPos := relativeToAbsolutePosition(p.ranges, p.position+1)
p.inlines = append(p.inlines, &Text{
Text: string(p.raw[p.position+1]),
Range: Range{absPos, absPos + len(string(p.raw[p.position+1]))},
})
p.position += 2
} else {
absPos := relativeToAbsolutePosition(p.ranges, p.position)
p.inlines = append(p.inlines, &Text{
Text: `\`,
Range: Range{absPos, absPos + 1},
})
p.position++
}
}
func (p *inlineParser) parseText() {
if next := strings.IndexAny(p.raw[p.position:], "\r\n\\`&![]wW:"); next == -1 {
absPos := relativeToAbsolutePosition(p.ranges, p.position)
p.inlines = append(p.inlines, &Text{
Text: strings.TrimRightFunc(p.raw[p.position:], isWhitespace),
Range: Range{absPos, absPos + len(p.raw[p.position:])},
})
p.position = len(p.raw)
} else {
absPos := relativeToAbsolutePosition(p.ranges, p.position)
if p.raw[p.position+next] == '\r' || p.raw[p.position+next] == '\n' {
s := strings.TrimRightFunc(p.raw[p.position:p.position+next], isWhitespace)
p.inlines = append(p.inlines, &Text{
Text: s,
Range: Range{absPos, absPos + len(s)},
})
} else {
if next == 0 {
// Always read at least one character since 'w', 'W', and ':' may not actually match another
// type of node
next = 1
}
p.inlines = append(p.inlines, &Text{
Text: p.raw[p.position : p.position+next],
Range: Range{absPos, absPos + next},
})
}
p.position += next
}
}
func (p *inlineParser) parseLinkOrImageDelimiter() {
absPos := relativeToAbsolutePosition(p.ranges, p.position)
if p.raw[p.position] == '[' {
p.inlines = append(p.inlines, &Text{
Text: "[",
Range: Range{absPos, absPos + 1},
})
p.delimiterStack.PushBack(&delimiter{
Type: linkOpeningDelimiter,
TextNode: len(p.inlines) - 1,
Range: Range{p.position, p.position + 1},
})
p.position++
} else if p.raw[p.position] == '!' && p.position+1 < len(p.raw) && p.raw[p.position+1] == '[' {
p.inlines = append(p.inlines, &Text{
Text: "![",
Range: Range{absPos, absPos + 2},
})
p.delimiterStack.PushBack(&delimiter{
Type: imageOpeningDelimiter,
TextNode: len(p.inlines) - 1,
Range: Range{p.position, p.position + 2},
})
p.position += 2
} else {
p.inlines = append(p.inlines, &Text{
Text: "!",
Range: Range{absPos, absPos + 1},
})
p.position++
}
}
func (p *inlineParser) peekAtInlineLinkDestinationAndTitle(position int, isImage bool) (destination, title Range, end int, ok bool) {
if position >= len(p.raw) || p.raw[position] != '(' {
return
}
position++
destinationStart := nextNonWhitespace(p.raw, position)
if destinationStart >= len(p.raw) {
return
} else if p.raw[destinationStart] == ')' {
return Range{destinationStart, destinationStart}, Range{destinationStart, destinationStart}, destinationStart + 1, true
}
destination, end, ok = parseLinkDestination(p.raw, destinationStart)
if !ok {
return
}
position = end
if isImage && position < len(p.raw) && isWhitespaceByte(p.raw[position]) {
dimensionsStart := nextNonWhitespace(p.raw, position)
if dimensionsStart >= len(p.raw) {
return
}
if p.raw[dimensionsStart] == '=' {
// Read optional image dimensions even if we don't use them
_, end, ok = parseImageDimensions(p.raw, dimensionsStart)
if !ok {
return
}
position = end
}
}
if position < len(p.raw) && isWhitespaceByte(p.raw[position]) {
titleStart := nextNonWhitespace(p.raw, position)
if titleStart >= len(p.raw) {
return
} else if p.raw[titleStart] == ')' {
return destination, Range{titleStart, titleStart}, titleStart + 1, true
}
if p.raw[titleStart] == '"' || p.raw[titleStart] == '\'' || p.raw[titleStart] == '(' {
title, end, ok = parseLinkTitle(p.raw, titleStart)
if !ok {
return
}
position = end
}
}
closingPosition := nextNonWhitespace(p.raw, position)
if closingPosition >= len(p.raw) || p.raw[closingPosition] != ')' {
return Range{}, Range{}, 0, false
}
return destination, title, closingPosition + 1, true
}
func (p *inlineParser) referenceDefinition(label string) *ReferenceDefinition {
clean := strings.Join(strings.Fields(label), " ")
for _, d := range p.referenceDefinitions {
if strings.EqualFold(clean, strings.Join(strings.Fields(d.Label()), " ")) {
return d
}
}
return nil
}
func (p *inlineParser) lookForLinkOrImage() {
for element := p.delimiterStack.Back(); element != nil; element = element.Prev() {
d := element.Value.(*delimiter)
if d.Type != imageOpeningDelimiter && d.Type != linkOpeningDelimiter {
continue
}
if d.IsInactive {
p.delimiterStack.Remove(element)
break
}
isImage := d.Type == imageOpeningDelimiter
var inline Inline
if destination, title, next, ok := p.peekAtInlineLinkDestinationAndTitle(p.position+1, isImage); ok {
destinationMarkdownPosition := relativeToAbsolutePosition(p.ranges, destination.Position)
linkOrImage := InlineLinkOrImage{
Children: append([]Inline(nil), p.inlines[d.TextNode+1:]...),
RawDestination: Range{destinationMarkdownPosition, destinationMarkdownPosition + destination.End - destination.Position},
markdown: p.markdown,
rawTitle: p.raw[title.Position:title.End],
}
if d.Type == imageOpeningDelimiter {
inline = &InlineImage{linkOrImage}
} else {
inline = &InlineLink{linkOrImage}
}
p.position = next
} else {
referenceLabel := ""
label, next, hasLinkLabel := parseLinkLabel(p.raw, p.position+1)
if hasLinkLabel && label.End > label.Position {
referenceLabel = p.raw[label.Position:label.End]
} else {
referenceLabel = p.raw[d.Range.End:p.position]
if !hasLinkLabel {
next = p.position + 1
}
}
if referenceLabel != "" {
if reference := p.referenceDefinition(referenceLabel); reference != nil {
linkOrImage := ReferenceLinkOrImage{
ReferenceDefinition: reference,
Children: append([]Inline(nil), p.inlines[d.TextNode+1:]...),
}
if d.Type == imageOpeningDelimiter {
inline = &ReferenceImage{linkOrImage}
} else {
inline = &ReferenceLink{linkOrImage}
}
p.position = next
}
}
}
if inline != nil {
if d.Type == imageOpeningDelimiter {
p.inlines = append(p.inlines[:d.TextNode], inline)
} else {
p.inlines = append(p.inlines[:d.TextNode], inline)
for inlineElement := element.Prev(); inlineElement != nil; inlineElement = inlineElement.Prev() {
if d := inlineElement.Value.(*delimiter); d.Type == linkOpeningDelimiter {
d.IsInactive = true
}
}
}
p.delimiterStack.Remove(element)
return
}
p.delimiterStack.Remove(element)
break
}
absPos := relativeToAbsolutePosition(p.ranges, p.position)
p.inlines = append(p.inlines, &Text{
Text: "]",
Range: Range{absPos, absPos + 1},
})
p.position++
}
func CharacterReference(ref string) string {
if ref == "" {
return ""
}
if ref[0] == '#' {
if len(ref) < 2 {
return ""
}
n := 0
if ref[1] == 'X' || ref[1] == 'x' {
if len(ref) < 3 {
return ""
}
for i := 2; i < len(ref); i++ {
if i > 9 {
return ""
}
d := ref[i]
switch {
case d >= '0' && d <= '9':
n = n*16 + int(d-'0')
case d >= 'a' && d <= 'f':
n = n*16 + 10 + int(d-'a')
case d >= 'A' && d <= 'F':
n = n*16 + 10 + int(d-'A')
default:
return ""
}
}
} else {
for i := 1; i < len(ref); i++ {
if i > 8 || ref[i] < '0' || ref[i] > '9' {
return ""
}
n = n*10 + int(ref[i]-'0')
}
}
c := rune(n)
if c == '\u0000' || !utf8.ValidRune(c) {
return string(unicode.ReplacementChar)
}
return string(c)
}
if entity, ok := htmlEntities[ref]; ok {
return entity
}
return ""
}
func (p *inlineParser) parseCharacterReference() {
absPos := relativeToAbsolutePosition(p.ranges, p.position)
p.position++
if semicolon := strings.IndexByte(p.raw[p.position:], ';'); semicolon == -1 {
p.inlines = append(p.inlines, &Text{
Text: "&",
Range: Range{absPos, absPos + 1},
})
} else if s := CharacterReference(p.raw[p.position : p.position+semicolon]); s != "" {
p.position += semicolon + 1
p.inlines = append(p.inlines, &Text{
Text: s,
Range: Range{absPos, absPos + len(s)},
})
} else {
p.inlines = append(p.inlines, &Text{
Text: "&",
Range: Range{absPos, absPos + 1},
})
}
}
func (p *inlineParser) parseAutolink(c rune) bool {
for element := p.delimiterStack.Back(); element != nil; element = element.Prev() {
d := element.Value.(*delimiter)
if !d.IsInactive {
return false
}
}
var link Range
if c == ':' {
var ok bool
link, ok = parseURLAutolink(p.raw, p.position)
if !ok {
return false
}
// Since the current position is at the colon, we have to rewind the parsing slightly so that
// we don't duplicate the URL scheme
rewind := strings.Index(p.raw[link.Position:link.End], ":")
if rewind != -1 {
lastInline := p.inlines[len(p.inlines)-1]
lastText, ok := lastInline.(*Text)
if !ok {
// This should never occur since parseURLAutolink will only return a non-empty value
// when the previous text ends in a valid URL protocol which would mean that the previous
// node is a Text node
return false
}
p.inlines = p.inlines[0 : len(p.inlines)-1]
p.inlines = append(p.inlines, &Text{
Text: lastText.Text[:len(lastText.Text)-rewind],
Range: Range{lastText.Range.Position, lastText.Range.End - rewind},
})
p.position -= rewind
}
} else if c == 'w' || c == 'W' {
var ok bool
link, ok = parseWWWAutolink(p.raw, p.position)
if !ok {
return false
}
}
linkMarkdownPosition := relativeToAbsolutePosition(p.ranges, link.Position)
linkRange := Range{linkMarkdownPosition, linkMarkdownPosition + link.End - link.Position}
p.inlines = append(p.inlines, &Autolink{
Children: []Inline{
&Text{
Text: p.raw[link.Position:link.End],
Range: linkRange,
},
},
RawDestination: linkRange,
markdown: p.markdown,
})
p.position += (link.End - link.Position)
return true
}
func (p *inlineParser) Parse() []Inline {
for _, r := range p.ranges {
p.raw += p.markdown[r.Position:r.End]
}
for p.position < len(p.raw) {
c, _ := utf8.DecodeRuneInString(p.raw[p.position:])
switch c {
case '\r', '\n':
p.parseLineEnding()
case '\\':
p.parseEscapeCharacter()
case '`':
p.parseBackticks()
case '&':
p.parseCharacterReference()
case '!', '[':
p.parseLinkOrImageDelimiter()
case ']':
p.lookForLinkOrImage()
case 'w', 'W', ':':
matched := p.parseAutolink(c)
if !matched {
p.parseText()
}
default:
p.parseText()
}
}
return p.inlines
}
func ParseInlines(markdown string, ranges []Range, referenceDefinitions []*ReferenceDefinition) (inlines []Inline) {
return newInlineParser(markdown, ranges, referenceDefinitions).Parse()
}
func MergeInlineText(inlines []Inline) []Inline {
ret := inlines[:0]
for i, v := range inlines {
// always add first node
if i == 0 {
ret = append(ret, v)
continue
}
// not a text node? nothing to merge
text, ok := v.(*Text)
if !ok {
ret = append(ret, v)
continue
}
// previous node is not a text node? nothing to merge
prevText, ok := ret[len(ret)-1].(*Text)
if !ok {
ret = append(ret, v)
continue
}
// previous node is not right before this one
if prevText.Range.End != text.Range.Position {
ret = append(ret, v)
continue
}
// we have two consecutive text nodes
ret[len(ret)-1] = &Text{
Text: prevText.Text + text.Text,
Range: Range{prevText.Range.Position, text.Range.End},
}
}
return ret
}
func Unescape(markdown string) string {
ret := ""
position := 0
for position < len(markdown) {
c, cSize := utf8.DecodeRuneInString(markdown[position:])
switch c {
case '\\':
if position+1 < len(markdown) && isEscapableByte(markdown[position+1]) {
ret += string(markdown[position+1])
position += 2
} else {
ret += `\`
position++
}
case '&':
position++
if semicolon := strings.IndexByte(markdown[position:], ';'); semicolon == -1 {
ret += "&"
} else if s := CharacterReference(markdown[position : position+semicolon]); s != "" {
position += semicolon + 1
ret += s
} else {
ret += "&"
}
default:
ret += string(c)
position += cSize
}
}
return ret
}

View File

@ -0,0 +1,78 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
// Inspect traverses the markdown tree in depth-first order. If f returns true, Inspect invokes f
// recursively for each child of the block or inline, followed by a call of f(nil).
func Inspect(markdown string, f func(interface{}) bool) {
document, referenceDefinitions := Parse(markdown)
InspectBlock(document, func(block Block) bool {
if !f(block) {
return false
}
switch v := block.(type) {
case *Paragraph:
for _, inline := range MergeInlineText(v.ParseInlines(referenceDefinitions)) {
InspectInline(inline, func(inline Inline) bool {
return f(inline)
})
}
}
return true
})
}
// InspectBlock traverses the blocks in depth-first order, starting with block. If f returns true,
// InspectBlock invokes f recursively for each child of the block, followed by a call of f(nil).
func InspectBlock(block Block, f func(Block) bool) {
if !f(block) {
return
}
switch v := block.(type) {
case *Document:
for _, child := range v.Children {
InspectBlock(child, f)
}
case *List:
for _, child := range v.Children {
InspectBlock(child, f)
}
case *ListItem:
for _, child := range v.Children {
InspectBlock(child, f)
}
case *BlockQuote:
for _, child := range v.Children {
InspectBlock(child, f)
}
}
f(nil)
}
// InspectInline traverses the blocks in depth-first order, starting with block. If f returns true,
// InspectInline invokes f recursively for each child of the block, followed by a call of f(nil).
func InspectInline(inline Inline, f func(Inline) bool) {
if !f(inline) {
return
}
switch v := inline.(type) {
case *InlineImage:
for _, child := range v.Children {
InspectInline(child, f)
}
case *InlineLink:
for _, child := range v.Children {
InspectInline(child, f)
}
case *ReferenceImage:
for _, child := range v.Children {
InspectInline(child, f)
}
case *ReferenceLink:
for _, child := range v.Children {
InspectInline(child, f)
}
}
f(nil)
}

View File

@ -0,0 +1,32 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
import (
"strings"
)
type Line struct {
Range
}
func ParseLines(markdown string) []Line {
lineStartPosition := 0
isAfterCarriageReturn := false
lines := make([]Line, 0, strings.Count(markdown, "\n"))
for position, r := range markdown {
if r == '\n' {
lines = append(lines, Line{Range{lineStartPosition, position + 1}})
lineStartPosition = position + 1
} else if isAfterCarriageReturn {
lines = append(lines, Line{Range{lineStartPosition, position}})
lineStartPosition = position
}
isAfterCarriageReturn = r == '\r'
}
if lineStartPosition < len(markdown) {
lines = append(lines, Line{Range{lineStartPosition, len(markdown)}})
}
return lines
}

View File

@ -0,0 +1,184 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
import (
"unicode/utf8"
)
func parseLinkDestination(markdown string, position int) (raw Range, next int, ok bool) {
if position >= len(markdown) {
return
}
if markdown[position] == '<' {
isEscaped := false
for offset, c := range []byte(markdown[position+1:]) {
if isEscaped {
isEscaped = false
if isEscapableByte(c) {
continue
}
}
if c == '\\' {
isEscaped = true
} else if c == '<' {
break
} else if c == '>' {
return Range{position + 1, position + 1 + offset}, position + 1 + offset + 1, true
} else if isWhitespaceByte(c) {
break
}
}
}
openCount := 0
isEscaped := false
for offset, c := range []byte(markdown[position:]) {
if isEscaped {
isEscaped = false
if isEscapableByte(c) {
continue
}
}
switch c {
case '\\':
isEscaped = true
case '(':
openCount++
case ')':
if openCount < 1 {
return Range{position, position + offset}, position + offset, true
}
openCount--
default:
if isWhitespaceByte(c) {
return Range{position, position + offset}, position + offset, true
}
}
}
return Range{position, len(markdown)}, len(markdown), true
}
func parseLinkTitle(markdown string, position int) (raw Range, next int, ok bool) {
if position >= len(markdown) {
return
}
originalPosition := position
var closer byte
switch markdown[position] {
case '"', '\'':
closer = markdown[position]
case '(':
closer = ')'
default:
return
}
position++
for position < len(markdown) {
switch markdown[position] {
case '\\':
position++
if position < len(markdown) && isEscapableByte(markdown[position]) {
position++
}
case closer:
return Range{originalPosition + 1, position}, position + 1, true
default:
position++
}
}
return
}
func parseLinkLabel(markdown string, position int) (raw Range, next int, ok bool) {
if position >= len(markdown) || markdown[position] != '[' {
return
}
originalPosition := position
position++
for position < len(markdown) {
switch markdown[position] {
case '\\':
position++
if position < len(markdown) && isEscapableByte(markdown[position]) {
position++
}
case '[':
return
case ']':
if position-originalPosition >= 1000 && utf8.RuneCountInString(markdown[originalPosition:position]) >= 1000 {
return
}
return Range{originalPosition + 1, position}, position + 1, true
default:
position++
}
}
return
}
// As a non-standard feature, we allow image links to specify dimensions of the image by adding "=WIDTHxHEIGHT"
// after the image destination but before the image title like ![alt](http://example.com/image.png =100x200 "title").
// Both width and height are optional, but at least one of them must be specified.
func parseImageDimensions(markdown string, position int) (raw Range, next int, ok bool) {
if position >= len(markdown) {
return
}
originalPosition := position
// Read =
position += 1
if position >= len(markdown) {
return
}
// Read width
hasWidth := false
for position < len(markdown)-1 && isNumericByte(markdown[position]) {
hasWidth = true
position += 1
}
// Look for early end of dimensions
if isWhitespaceByte(markdown[position]) || markdown[position] == ')' {
return Range{originalPosition, position - 1}, position, true
}
// Read the x
if (markdown[position] != 'x' && markdown[position] != 'X') || position == len(markdown)-1 {
return
}
position += 1
// Read height
hasHeight := false
for position < len(markdown)-1 && isNumericByte(markdown[position]) {
hasHeight = true
position += 1
}
// Make sure the there's no trailing characters
if !isWhitespaceByte(markdown[position]) && markdown[position] != ')' {
return
}
if !hasWidth && !hasHeight {
// At least one of width or height is required
return
}
return Range{originalPosition, position - 1}, position, true
}

View File

@ -0,0 +1,220 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
import (
"strings"
)
type ListItem struct {
blockBase
markdown string
hasTrailingBlankLine bool
hasBlankLineBetweenChildren bool
Indentation int
Children []Block
}
func (b *ListItem) Continuation(indentation int, r Range) *continuation {
s := b.markdown[r.Position:r.End]
if strings.TrimSpace(s) == "" {
if b.Children == nil {
return nil
}
return &continuation{
Remaining: r,
}
}
if indentation < b.Indentation {
return nil
}
return &continuation{
Indentation: indentation - b.Indentation,
Remaining: r,
}
}
func (b *ListItem) AddChild(openBlocks []Block) []Block {
b.Children = append(b.Children, openBlocks[0])
if b.hasTrailingBlankLine {
b.hasBlankLineBetweenChildren = true
}
b.hasTrailingBlankLine = false
return openBlocks
}
func (b *ListItem) AddLine(indentation int, r Range) bool {
isBlank := strings.TrimSpace(b.markdown[r.Position:r.End]) == ""
if isBlank {
b.hasTrailingBlankLine = true
}
return false
}
func (b *ListItem) HasTrailingBlankLine() bool {
return b.hasTrailingBlankLine || (len(b.Children) > 0 && b.Children[len(b.Children)-1].HasTrailingBlankLine())
}
func (b *ListItem) isLoose() bool {
if b.hasBlankLineBetweenChildren {
return true
}
for i, child := range b.Children {
if i < len(b.Children)-1 && child.HasTrailingBlankLine() {
return true
}
}
return false
}
type List struct {
blockBase
markdown string
hasTrailingBlankLine bool
hasBlankLineBetweenChildren bool
IsLoose bool
IsOrdered bool
OrderedStart int
BulletOrDelimiter byte
Children []*ListItem
}
func (b *List) Continuation(indentation int, r Range) *continuation {
s := b.markdown[r.Position:r.End]
if strings.TrimSpace(s) == "" {
return &continuation{
Remaining: r,
}
}
return &continuation{
Indentation: indentation,
Remaining: r,
}
}
func (b *List) AddChild(openBlocks []Block) []Block {
if item, ok := openBlocks[0].(*ListItem); ok {
b.Children = append(b.Children, item)
if b.hasTrailingBlankLine {
b.hasBlankLineBetweenChildren = true
}
b.hasTrailingBlankLine = false
return openBlocks
} else if list, ok := openBlocks[0].(*List); ok {
if len(list.Children) == 1 && list.IsOrdered == b.IsOrdered && list.BulletOrDelimiter == b.BulletOrDelimiter {
return b.AddChild(openBlocks[1:])
}
}
return nil
}
func (b *List) AddLine(indentation int, r Range) bool {
isBlank := strings.TrimSpace(b.markdown[r.Position:r.End]) == ""
if isBlank {
b.hasTrailingBlankLine = true
}
return false
}
func (b *List) HasTrailingBlankLine() bool {
return b.hasTrailingBlankLine || (len(b.Children) > 0 && b.Children[len(b.Children)-1].HasTrailingBlankLine())
}
func (b *List) isLoose() bool {
if b.hasBlankLineBetweenChildren {
return true
}
for i, child := range b.Children {
if child.isLoose() || (i < len(b.Children)-1 && child.HasTrailingBlankLine()) {
return true
}
}
return false
}
func (b *List) Close() {
b.IsLoose = b.isLoose()
}
func parseListMarker(markdown string, r Range) (success, isOrdered bool, orderedStart int, bulletOrDelimiter byte, markerWidth int, remaining Range) {
digits := 0
n := 0
for i := r.Position; i < r.End && markdown[i] >= '0' && markdown[i] <= '9'; i++ {
digits++
n = n*10 + int(markdown[i]-'0')
}
if digits > 0 {
if digits > 9 || r.Position+digits >= r.End {
return
}
next := markdown[r.Position+digits]
if next != '.' && next != ')' {
return
}
return true, true, n, next, digits + 1, Range{r.Position + digits + 1, r.End}
}
if r.Position >= r.End {
return
}
next := markdown[r.Position]
if next != '-' && next != '+' && next != '*' {
return
}
return true, false, 0, next, 1, Range{r.Position + 1, r.End}
}
func listStart(markdown string, indent int, r Range, matchedBlocks, unmatchedBlocks []Block) []Block {
afterList := false
if len(matchedBlocks) > 0 {
_, afterList = matchedBlocks[len(matchedBlocks)-1].(*List)
}
if !afterList && indent > 3 {
return nil
}
success, isOrdered, orderedStart, bulletOrDelimiter, markerWidth, remaining := parseListMarker(markdown, r)
if !success {
return nil
}
isBlank := strings.TrimSpace(markdown[remaining.Position:remaining.End]) == ""
if len(matchedBlocks) > 0 && len(unmatchedBlocks) == 0 {
if _, ok := matchedBlocks[len(matchedBlocks)-1].(*Paragraph); ok {
if isBlank || (isOrdered && orderedStart != 1) {
return nil
}
}
}
indentAfterMarker, indentBytesAfterMarker := countIndentation(markdown, remaining)
if !isBlank && indentAfterMarker < 1 {
return nil
}
remaining = Range{remaining.Position + indentBytesAfterMarker, remaining.End}
consumedIndentAfterMarker := indentAfterMarker
if isBlank || indentAfterMarker >= 5 {
consumedIndentAfterMarker = 1
}
listItem := &ListItem{
markdown: markdown,
Indentation: indent + markerWidth + consumedIndentAfterMarker,
}
list := &List{
markdown: markdown,
IsOrdered: isOrdered,
OrderedStart: orderedStart,
BulletOrDelimiter: bulletOrDelimiter,
Children: []*ListItem{listItem},
}
ret := []Block{list, listItem}
if descendants := blockStartOrParagraph(markdown, indentAfterMarker-consumedIndentAfterMarker, remaining, nil, nil); descendants != nil {
listItem.Children = append(listItem.Children, descendants[0])
ret = append(ret, descendants...)
}
return ret
}

View File

@ -0,0 +1,147 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
// This package implements a parser for the subset of the CommonMark spec necessary for us to do
// server-side processing. It is not a full implementation and lacks many features. But it is
// complete enough to efficiently and accurately allow us to do what we need to like rewrite image
// URLs for proxying.
package markdown
import (
"strings"
)
func isEscapable(c rune) bool {
return c > ' ' && (c < '0' || (c > '9' && (c < 'A' || (c > 'Z' && (c < 'a' || (c > 'z' && c <= '~'))))))
}
func isEscapableByte(c byte) bool {
return isEscapable(rune(c))
}
func isWhitespace(c rune) bool {
switch c {
case ' ', '\t', '\n', '\u000b', '\u000c', '\r':
return true
}
return false
}
func isWhitespaceByte(c byte) bool {
return isWhitespace(rune(c))
}
func isNumeric(c rune) bool {
return c >= '0' && c <= '9'
}
func isNumericByte(c byte) bool {
return isNumeric(rune(c))
}
func isHex(c rune) bool {
return isNumeric(c) || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F')
}
func isHexByte(c byte) bool {
return isHex(rune(c))
}
func isAlphanumeric(c rune) bool {
return isNumeric(c) || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
func isAlphanumericByte(c byte) bool {
return isAlphanumeric(rune(c))
}
func nextNonWhitespace(markdown string, position int) int {
for offset, c := range []byte(markdown[position:]) {
if !isWhitespaceByte(c) {
return position + offset
}
}
return len(markdown)
}
func nextLine(markdown string, position int) (linePosition int, skippedNonWhitespace bool) {
for i := position; i < len(markdown); i++ {
c := markdown[i]
if c == '\r' {
if i+1 < len(markdown) && markdown[i+1] == '\n' {
return i + 2, skippedNonWhitespace
}
return i + 1, skippedNonWhitespace
} else if c == '\n' {
return i + 1, skippedNonWhitespace
} else if !isWhitespaceByte(c) {
skippedNonWhitespace = true
}
}
return len(markdown), skippedNonWhitespace
}
func countIndentation(markdown string, r Range) (spaces, bytes int) {
for i := r.Position; i < r.End; i++ {
if markdown[i] == ' ' {
spaces++
bytes++
} else if markdown[i] == '\t' {
spaces += 4
bytes++
} else {
break
}
}
return
}
func trimLeftSpace(markdown string, r Range) Range {
s := markdown[r.Position:r.End]
trimmed := strings.TrimLeftFunc(s, isWhitespace)
return Range{r.Position, r.End - (len(s) - len(trimmed))}
}
func trimRightSpace(markdown string, r Range) Range {
s := markdown[r.Position:r.End]
trimmed := strings.TrimRightFunc(s, isWhitespace)
return Range{r.Position, r.End - (len(s) - len(trimmed))}
}
func relativeToAbsolutePosition(ranges []Range, position int) int {
rem := position
for _, r := range ranges {
l := r.End - r.Position
if rem < l {
return r.Position + rem
}
rem -= l
}
if len(ranges) == 0 {
return 0
}
return ranges[len(ranges)-1].End
}
func trimBytesFromRanges(ranges []Range, bytes int) (result []Range) {
rem := bytes
for _, r := range ranges {
if rem == 0 {
result = append(result, r)
continue
}
l := r.End - r.Position
if rem < l {
result = append(result, Range{r.Position + rem, r.End})
rem = 0
continue
}
rem -= l
}
return
}
func Parse(markdown string) (*Document, []*ReferenceDefinition) {
lines := ParseLines(markdown)
return ParseBlocks(markdown, lines)
}

View File

@ -0,0 +1,71 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
import (
"strings"
)
type Paragraph struct {
blockBase
markdown string
Text []Range
ReferenceDefinitions []*ReferenceDefinition
}
func (b *Paragraph) ParseInlines(referenceDefinitions []*ReferenceDefinition) []Inline {
return ParseInlines(b.markdown, b.Text, referenceDefinitions)
}
func (b *Paragraph) Continuation(indentation int, r Range) *continuation {
s := b.markdown[r.Position:r.End]
if strings.TrimSpace(s) == "" {
return nil
}
return &continuation{
Indentation: indentation,
Remaining: r,
}
}
func (b *Paragraph) Close() {
for {
for i := 0; i < len(b.Text); i++ {
b.Text[i] = trimLeftSpace(b.markdown, b.Text[i])
if b.Text[i].Position < b.Text[i].End {
break
}
}
if len(b.Text) == 0 || b.Text[0].Position < b.Text[0].End && b.markdown[b.Text[0].Position] != '[' {
break
}
definition, remaining := parseReferenceDefinition(b.markdown, b.Text)
if definition == nil {
break
}
b.ReferenceDefinitions = append(b.ReferenceDefinitions, definition)
b.Text = remaining
}
for i := len(b.Text) - 1; i >= 0; i-- {
b.Text[i] = trimRightSpace(b.markdown, b.Text[i])
if b.Text[i].Position < b.Text[i].End {
break
}
}
}
func newParagraph(markdown string, r Range) *Paragraph {
s := markdown[r.Position:r.End]
if strings.TrimSpace(s) == "" {
return nil
}
return &Paragraph{
markdown: markdown,
Text: []Range{r},
}
}

View File

@ -0,0 +1,75 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
type ReferenceDefinition struct {
RawDestination Range
markdown string
rawLabel string
rawTitle string
}
func (d *ReferenceDefinition) Destination() string {
return Unescape(d.markdown[d.RawDestination.Position:d.RawDestination.End])
}
func (d *ReferenceDefinition) Label() string {
return d.rawLabel
}
func (d *ReferenceDefinition) Title() string {
return Unescape(d.rawTitle)
}
func parseReferenceDefinition(markdown string, ranges []Range) (*ReferenceDefinition, []Range) {
raw := ""
for _, r := range ranges {
raw += markdown[r.Position:r.End]
}
label, next, ok := parseLinkLabel(raw, 0)
if !ok {
return nil, nil
}
position := next
if position >= len(raw) || raw[position] != ':' {
return nil, nil
}
position++
destination, next, ok := parseLinkDestination(raw, nextNonWhitespace(raw, position))
if !ok {
return nil, nil
}
position = next
absoluteDestination := relativeToAbsolutePosition(ranges, destination.Position)
ret := &ReferenceDefinition{
RawDestination: Range{absoluteDestination, absoluteDestination + destination.End - destination.Position},
markdown: markdown,
rawLabel: raw[label.Position:label.End],
}
if position < len(raw) && isWhitespaceByte(raw[position]) {
title, next, ok := parseLinkTitle(raw, nextNonWhitespace(raw, position))
if !ok {
if nextLine, skippedNonWhitespace := nextLine(raw, position); !skippedNonWhitespace {
return ret, trimBytesFromRanges(ranges, nextLine)
}
return nil, nil
}
if nextLine, skippedNonWhitespace := nextLine(raw, next); !skippedNonWhitespace {
ret.rawTitle = raw[title.Position:title.End]
return ret, trimBytesFromRanges(ranges, nextLine)
}
}
if nextLine, skippedNonWhitespace := nextLine(raw, position); !skippedNonWhitespace {
return ret, trimBytesFromRanges(ranges, nextLine)
}
return nil, nil
}

View File

@ -0,0 +1,99 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"github.com/mattermost/logr"
)
// defaultLog manually encodes the log to STDERR, providing a basic, default logging implementation
// before mlog is fully configured.
func defaultLog(level, msg string, fields ...Field) {
log := struct {
Level string `json:"level"`
Message string `json:"msg"`
Fields []Field `json:"fields,omitempty"`
}{
level,
msg,
fields,
}
if b, err := json.Marshal(log); err != nil {
fmt.Fprintf(os.Stderr, `{"level":"error","msg":"failed to encode log message"}%s`, "\n")
} else {
fmt.Fprintf(os.Stderr, "%s\n", b)
}
}
func defaultIsLevelEnabled(level LogLevel) bool {
return true
}
func defaultDebugLog(msg string, fields ...Field) {
defaultLog("debug", msg, fields...)
}
func defaultInfoLog(msg string, fields ...Field) {
defaultLog("info", msg, fields...)
}
func defaultWarnLog(msg string, fields ...Field) {
defaultLog("warn", msg, fields...)
}
func defaultErrorLog(msg string, fields ...Field) {
defaultLog("error", msg, fields...)
}
func defaultCriticalLog(msg string, fields ...Field) {
// We map critical to error in zap, so be consistent.
defaultLog("error", msg, fields...)
}
func defaultCustomLog(lvl LogLevel, msg string, fields ...Field) {
// custom log levels are only output once log targets are configured.
}
func defaultCustomMultiLog(lvl []LogLevel, msg string, fields ...Field) {
// custom log levels are only output once log targets are configured.
}
func defaultFlush(ctx context.Context) error {
return nil
}
func defaultAdvancedConfig(cfg LogTargetCfg) error {
// mlog.ConfigAdvancedConfig should not be called until default
// logger is replaced with mlog.Logger instance.
return errors.New("cannot config advanced logging on default logger")
}
func defaultAdvancedShutdown(ctx context.Context) error {
return nil
}
func defaultAddTarget(targets ...logr.Target) error {
// mlog.AddTarget should not be called until default
// logger is replaced with mlog.Logger instance.
return errors.New("cannot AddTarget on default logger")
}
func defaultRemoveTargets(ctx context.Context, f func(TargetInfo) bool) error {
// mlog.RemoveTargets should not be called until default
// logger is replaced with mlog.Logger instance.
return errors.New("cannot RemoveTargets on default logger")
}
func defaultEnableMetrics(collector logr.MetricsCollector) error {
// mlog.EnableMetrics should not be called until default
// logger is replaced with mlog.Logger instance.
return errors.New("cannot EnableMetrics on default logger")
}

View File

@ -0,0 +1,32 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
import (
"github.com/mattermost/logr"
)
// onLoggerError is called when the logging system encounters an error,
// such as a target not able to write records. The targets will keep trying
// however the error will be logged with a dedicated level that can be output
// to a safe/always available target for monitoring or alerting.
func onLoggerError(err error) {
Log(LvlLogError, "advanced logging error", Err(err))
}
// onQueueFull is called when the main logger queue is full, indicating the
// volume and frequency of log record creation is too high for the queue size
// and/or the target latencies.
func onQueueFull(rec *logr.LogRec, maxQueueSize int) bool {
Log(LvlLogError, "main queue full, dropping record", Any("rec", rec))
return true // drop record
}
// onTargetQueueFull is called when the main logger queue is full, indicating the
// volume and frequency of log record creation is too high for the target's queue size
// and/or the target latency.
func onTargetQueueFull(target logr.Target, rec *logr.LogRec, maxQueueSize int) bool {
Log(LvlLogError, "target queue full, dropping record", String("target", ""), Any("rec", rec))
return true // drop record
}

View File

@ -0,0 +1,98 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
import (
"context"
"log"
"sync/atomic"
"github.com/mattermost/logr"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var globalLogger *Logger
func InitGlobalLogger(logger *Logger) {
// Clean up previous instance.
if globalLogger != nil && globalLogger.logrLogger != nil {
globalLogger.logrLogger.Logr().Shutdown()
}
glob := *logger
glob.zap = glob.zap.WithOptions(zap.AddCallerSkip(1))
globalLogger = &glob
IsLevelEnabled = globalLogger.IsLevelEnabled
Debug = globalLogger.Debug
Info = globalLogger.Info
Warn = globalLogger.Warn
Error = globalLogger.Error
Critical = globalLogger.Critical
Log = globalLogger.Log
LogM = globalLogger.LogM
Flush = globalLogger.Flush
ConfigAdvancedLogging = globalLogger.ConfigAdvancedLogging
ShutdownAdvancedLogging = globalLogger.ShutdownAdvancedLogging
AddTarget = globalLogger.AddTarget
RemoveTargets = globalLogger.RemoveTargets
EnableMetrics = globalLogger.EnableMetrics
}
// logWriterFunc provides access to mlog via io.Writer, so the standard logger
// can be redirected to use mlog and whatever targets are defined.
type logWriterFunc func([]byte) (int, error)
func (lw logWriterFunc) Write(p []byte) (int, error) {
return lw(p)
}
func RedirectStdLog(logger *Logger) {
if atomic.LoadInt32(&disableZap) == 0 {
zap.RedirectStdLogAt(logger.zap.With(zap.String("source", "stdlog")).WithOptions(zap.AddCallerSkip(-2)), zapcore.ErrorLevel)
return
}
writer := func(p []byte) (int, error) {
Log(LvlStdLog, string(p))
return len(p), nil
}
log.SetOutput(logWriterFunc(writer))
}
type IsLevelEnabledFunc func(LogLevel) bool
type LogFunc func(string, ...Field)
type LogFuncCustom func(LogLevel, string, ...Field)
type LogFuncCustomMulti func([]LogLevel, string, ...Field)
type FlushFunc func(context.Context) error
type ConfigFunc func(cfg LogTargetCfg) error
type ShutdownFunc func(context.Context) error
type AddTargetFunc func(...logr.Target) error
type RemoveTargetsFunc func(context.Context, func(TargetInfo) bool) error
type EnableMetricsFunc func(logr.MetricsCollector) error
// DON'T USE THIS Modify the level on the app logger
func GloballyDisableDebugLogForTest() {
globalLogger.consoleLevel.SetLevel(zapcore.ErrorLevel)
}
// DON'T USE THIS Modify the level on the app logger
func GloballyEnableDebugLogForTest() {
globalLogger.consoleLevel.SetLevel(zapcore.DebugLevel)
}
var IsLevelEnabled IsLevelEnabledFunc = defaultIsLevelEnabled
var Debug LogFunc = defaultDebugLog
var Info LogFunc = defaultInfoLog
var Warn LogFunc = defaultWarnLog
var Error LogFunc = defaultErrorLog
var Critical LogFunc = defaultCriticalLog
var Log LogFuncCustom = defaultCustomLog
var LogM LogFuncCustomMulti = defaultCustomMultiLog
var Flush FlushFunc = defaultFlush
var ConfigAdvancedLogging ConfigFunc = defaultAdvancedConfig
var ShutdownAdvancedLogging ShutdownFunc = defaultAdvancedShutdown
var AddTarget AddTargetFunc = defaultAddTarget
var RemoveTargets RemoveTargetsFunc = defaultRemoveTargets
var EnableMetrics EnableMetricsFunc = defaultEnableMetrics

View File

@ -0,0 +1,51 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
// Standard levels
var (
LvlPanic = LogLevel{ID: 0, Name: "panic", Stacktrace: true}
LvlFatal = LogLevel{ID: 1, Name: "fatal", Stacktrace: true}
LvlError = LogLevel{ID: 2, Name: "error"}
LvlWarn = LogLevel{ID: 3, Name: "warn"}
LvlInfo = LogLevel{ID: 4, Name: "info"}
LvlDebug = LogLevel{ID: 5, Name: "debug"}
LvlTrace = LogLevel{ID: 6, Name: "trace"}
// used by redirected standard logger
LvlStdLog = LogLevel{ID: 10, Name: "stdlog"}
// used only by the logger
LvlLogError = LogLevel{ID: 11, Name: "logerror", Stacktrace: true}
)
// Register custom (discrete) levels here.
// !!!!! ID's must not exceed 32,768 !!!!!!
var (
// used by the audit system
LvlAuditAPI = LogLevel{ID: 100, Name: "audit-api"}
LvlAuditContent = LogLevel{ID: 101, Name: "audit-content"}
LvlAuditPerms = LogLevel{ID: 102, Name: "audit-permissions"}
LvlAuditCLI = LogLevel{ID: 103, Name: "audit-cli"}
// used by the TCP log target
LvlTcpLogTarget = LogLevel{ID: 120, Name: "TcpLogTarget"}
// used by Remote Cluster Service
LvlRemoteClusterServiceDebug = LogLevel{ID: 130, Name: "RemoteClusterServiceDebug"}
LvlRemoteClusterServiceError = LogLevel{ID: 131, Name: "RemoteClusterServiceError"}
LvlRemoteClusterServiceWarn = LogLevel{ID: 132, Name: "RemoteClusterServiceWarn"}
// used by Shared Channel Sync Service
LvlSharedChannelServiceDebug = LogLevel{ID: 200, Name: "SharedChannelServiceDebug"}
LvlSharedChannelServiceError = LogLevel{ID: 201, Name: "SharedChannelServiceError"}
LvlSharedChannelServiceWarn = LogLevel{ID: 202, Name: "SharedChannelServiceWarn"}
LvlSharedChannelServiceMessagesInbound = LogLevel{ID: 203, Name: "SharedChannelServiceMsgInbound"}
LvlSharedChannelServiceMessagesOutbound = LogLevel{ID: 204, Name: "SharedChannelServiceMsgOutbound"}
// add more here ...
)
// Combinations for LogM (log multi)
var (
MLvlAuditAll = []LogLevel{LvlAuditAPI, LvlAuditContent, LvlAuditPerms, LvlAuditCLI}
)

View File

@ -0,0 +1,361 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
import (
"context"
"fmt"
"io"
"log"
"os"
"sync"
"sync/atomic"
"time"
"github.com/mattermost/logr"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
)
const (
// Very verbose messages for debugging specific issues
LevelDebug = "debug"
// Default log level, informational
LevelInfo = "info"
// Warnings are messages about possible issues
LevelWarn = "warn"
// Errors are messages about things we know are problems
LevelError = "error"
// DefaultFlushTimeout is the default amount of time mlog.Flush will wait
// before timing out.
DefaultFlushTimeout = time.Second * 5
)
var (
// disableZap is set when Zap should be disabled and Logr used instead.
// This is needed for unit testing as Zap has no shutdown capabilities
// and holds file handles until process exit. Currently unit test create
// many server instances, and thus many Zap log files.
// This flag will be removed when Zap is permanently replaced.
disableZap int32
)
// Type and function aliases from zap to limit the libraries scope into MM code
type Field = zapcore.Field
var Int64 = zap.Int64
var Int32 = zap.Int32
var Int = zap.Int
var Uint32 = zap.Uint32
var String = zap.String
var Any = zap.Any
var Err = zap.Error
var NamedErr = zap.NamedError
var Bool = zap.Bool
var Duration = zap.Duration
type LoggerIFace interface {
IsLevelEnabled(LogLevel) bool
Debug(string, ...Field)
Info(string, ...Field)
Warn(string, ...Field)
Error(string, ...Field)
Critical(string, ...Field)
Log(LogLevel, string, ...Field)
LogM([]LogLevel, string, ...Field)
}
type TargetInfo logr.TargetInfo
type LoggerConfiguration struct {
EnableConsole bool
ConsoleJson bool
EnableColor bool
ConsoleLevel string
EnableFile bool
FileJson bool
FileLevel string
FileLocation string
}
type Logger struct {
zap *zap.Logger
consoleLevel zap.AtomicLevel
fileLevel zap.AtomicLevel
logrLogger *logr.Logger
mutex *sync.RWMutex
}
func getZapLevel(level string) zapcore.Level {
switch level {
case LevelInfo:
return zapcore.InfoLevel
case LevelWarn:
return zapcore.WarnLevel
case LevelDebug:
return zapcore.DebugLevel
case LevelError:
return zapcore.ErrorLevel
default:
return zapcore.InfoLevel
}
}
func makeEncoder(json, color bool) zapcore.Encoder {
encoderConfig := zap.NewProductionEncoderConfig()
if json {
return zapcore.NewJSONEncoder(encoderConfig)
}
if color {
encoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
}
encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
return zapcore.NewConsoleEncoder(encoderConfig)
}
func NewLogger(config *LoggerConfiguration) *Logger {
cores := []zapcore.Core{}
logger := &Logger{
consoleLevel: zap.NewAtomicLevelAt(getZapLevel(config.ConsoleLevel)),
fileLevel: zap.NewAtomicLevelAt(getZapLevel(config.FileLevel)),
logrLogger: newLogr(),
mutex: &sync.RWMutex{},
}
if config.EnableConsole {
writer := zapcore.Lock(os.Stderr)
core := zapcore.NewCore(makeEncoder(config.ConsoleJson, config.EnableColor), writer, logger.consoleLevel)
cores = append(cores, core)
}
if config.EnableFile {
if atomic.LoadInt32(&disableZap) != 0 {
t := &LogTarget{
Type: "file",
Format: "json",
Levels: mlogLevelToLogrLevels(config.FileLevel),
MaxQueueSize: DefaultMaxTargetQueue,
Options: []byte(fmt.Sprintf(`{"Filename":"%s", "MaxSizeMB":%d, "Compress":%t}`,
config.FileLocation, 100, true)),
}
if !config.FileJson {
t.Format = "plain"
}
if tgt, err := NewLogrTarget("mlogFile", t); err == nil {
logger.logrLogger.Logr().AddTarget(tgt)
} else {
Error("error creating mlogFile", Err(err))
}
} else {
writer := zapcore.AddSync(&lumberjack.Logger{
Filename: config.FileLocation,
MaxSize: 100,
Compress: true,
})
core := zapcore.NewCore(makeEncoder(config.FileJson, false), writer, logger.fileLevel)
cores = append(cores, core)
}
}
combinedCore := zapcore.NewTee(cores...)
logger.zap = zap.New(combinedCore,
zap.AddCaller(),
)
return logger
}
func (l *Logger) ChangeLevels(config *LoggerConfiguration) {
l.consoleLevel.SetLevel(getZapLevel(config.ConsoleLevel))
l.fileLevel.SetLevel(getZapLevel(config.FileLevel))
}
func (l *Logger) SetConsoleLevel(level string) {
l.consoleLevel.SetLevel(getZapLevel(level))
}
func (l *Logger) With(fields ...Field) *Logger {
newLogger := *l
newLogger.zap = newLogger.zap.With(fields...)
if newLogger.getLogger() != nil {
ll := newLogger.getLogger().WithFields(zapToLogr(fields))
newLogger.logrLogger = &ll
}
return &newLogger
}
func (l *Logger) StdLog(fields ...Field) *log.Logger {
return zap.NewStdLog(l.With(fields...).zap.WithOptions(getStdLogOption()))
}
// StdLogAt returns *log.Logger which writes to supplied zap logger at required level.
func (l *Logger) StdLogAt(level string, fields ...Field) (*log.Logger, error) {
return zap.NewStdLogAt(l.With(fields...).zap.WithOptions(getStdLogOption()), getZapLevel(level))
}
// StdLogWriter returns a writer that can be hooked up to the output of a golang standard logger
// anything written will be interpreted as log entries accordingly
func (l *Logger) StdLogWriter() io.Writer {
newLogger := *l
newLogger.zap = newLogger.zap.WithOptions(zap.AddCallerSkip(4), getStdLogOption())
f := newLogger.Info
return &loggerWriter{f}
}
func (l *Logger) WithCallerSkip(skip int) *Logger {
newLogger := *l
newLogger.zap = newLogger.zap.WithOptions(zap.AddCallerSkip(skip))
return &newLogger
}
// Made for the plugin interface, wraps mlog in a simpler interface
// at the cost of performance
func (l *Logger) Sugar() *SugarLogger {
return &SugarLogger{
wrappedLogger: l,
zapSugar: l.zap.Sugar(),
}
}
func (l *Logger) IsLevelEnabled(level LogLevel) bool {
return isLevelEnabled(l.getLogger(), logr.Level(level))
}
func (l *Logger) Debug(message string, fields ...Field) {
l.zap.Debug(message, fields...)
if isLevelEnabled(l.getLogger(), logr.Debug) {
l.getLogger().WithFields(zapToLogr(fields)).Debug(message)
}
}
func (l *Logger) Info(message string, fields ...Field) {
l.zap.Info(message, fields...)
if isLevelEnabled(l.getLogger(), logr.Info) {
l.getLogger().WithFields(zapToLogr(fields)).Info(message)
}
}
func (l *Logger) Warn(message string, fields ...Field) {
l.zap.Warn(message, fields...)
if isLevelEnabled(l.getLogger(), logr.Warn) {
l.getLogger().WithFields(zapToLogr(fields)).Warn(message)
}
}
func (l *Logger) Error(message string, fields ...Field) {
l.zap.Error(message, fields...)
if isLevelEnabled(l.getLogger(), logr.Error) {
l.getLogger().WithFields(zapToLogr(fields)).Error(message)
}
}
func (l *Logger) Critical(message string, fields ...Field) {
l.zap.Error(message, fields...)
if isLevelEnabled(l.getLogger(), logr.Error) {
l.getLogger().WithFields(zapToLogr(fields)).Error(message)
}
}
func (l *Logger) Log(level LogLevel, message string, fields ...Field) {
l.getLogger().WithFields(zapToLogr(fields)).Log(logr.Level(level), message)
}
func (l *Logger) LogM(levels []LogLevel, message string, fields ...Field) {
var logger *logr.Logger
for _, lvl := range levels {
if isLevelEnabled(l.getLogger(), logr.Level(lvl)) {
// don't create logger with fields unless at least one level is active.
if logger == nil {
l := l.getLogger().WithFields(zapToLogr(fields))
logger = &l
}
logger.Log(logr.Level(lvl), message)
}
}
}
func (l *Logger) Flush(cxt context.Context) error {
return l.getLogger().Logr().FlushWithTimeout(cxt)
}
// ShutdownAdvancedLogging stops the logger from accepting new log records and tries to
// flush queues within the context timeout. Once complete all targets are shutdown
// and any resources released.
func (l *Logger) ShutdownAdvancedLogging(cxt context.Context) error {
err := l.getLogger().Logr().ShutdownWithTimeout(cxt)
l.setLogger(newLogr())
return err
}
// ConfigAdvancedLoggingConfig (re)configures advanced logging based on the
// specified log targets. This is the easiest way to get the advanced logger
// configured via a config source such as file.
func (l *Logger) ConfigAdvancedLogging(targets LogTargetCfg) error {
if err := l.ShutdownAdvancedLogging(context.Background()); err != nil {
Error("error shutting down previous logger", Err(err))
}
err := logrAddTargets(l.getLogger(), targets)
return err
}
// AddTarget adds one or more logr.Target to the advanced logger. This is the preferred method
// to add custom targets or provide configuration that cannot be expressed via a
// config source.
func (l *Logger) AddTarget(targets ...logr.Target) error {
return l.getLogger().Logr().AddTarget(targets...)
}
// RemoveTargets selectively removes targets that were previously added to this logger instance
// using the passed in filter function. The filter function should return true to remove the target
// and false to keep it.
func (l *Logger) RemoveTargets(ctx context.Context, f func(ti TargetInfo) bool) error {
// Use locally defined TargetInfo type so we don't spread Logr dependencies.
fc := func(tic logr.TargetInfo) bool {
return f(TargetInfo(tic))
}
return l.getLogger().Logr().RemoveTargets(ctx, fc)
}
// EnableMetrics enables metrics collection by supplying a MetricsCollector.
// The MetricsCollector provides counters and gauges that are updated by log targets.
func (l *Logger) EnableMetrics(collector logr.MetricsCollector) error {
return l.getLogger().Logr().SetMetricsCollector(collector)
}
// getLogger is a concurrent safe getter of the logr logger
func (l *Logger) getLogger() *logr.Logger {
defer l.mutex.RUnlock()
l.mutex.RLock()
return l.logrLogger
}
// setLogger is a concurrent safe setter of the logr logger
func (l *Logger) setLogger(logger *logr.Logger) {
defer l.mutex.Unlock()
l.mutex.Lock()
l.logrLogger = logger
}
// DisableZap is called to disable Zap, and Logr will be used instead. Any Logger
// instances created after this call will only use Logr.
//
// This is needed for unit testing as Zap has no shutdown capabilities
// and holds file handles until process exit. Currently unit tests create
// many server instances, and thus many Zap log file handles.
//
// This method will be removed when Zap is permanently replaced.
func DisableZap() {
atomic.StoreInt32(&disableZap, 1)
}
// EnableZap re-enables Zap such that any Logger instances created after this
// call will allow Zap targets.
func EnableZap() {
atomic.StoreInt32(&disableZap, 0)
}

View File

@ -0,0 +1,244 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
import (
"encoding/json"
"fmt"
"io"
"os"
"github.com/hashicorp/go-multierror"
"github.com/mattermost/logr"
logrFmt "github.com/mattermost/logr/format"
"github.com/mattermost/logr/target"
"go.uber.org/zap/zapcore"
)
const (
DefaultMaxTargetQueue = 1000
DefaultSysLogPort = 514
)
type LogLevel struct {
ID logr.LevelID
Name string
Stacktrace bool
}
type LogTarget struct {
Type string // one of "console", "file", "tcp", "syslog", "none".
Format string // one of "json", "plain"
Levels []LogLevel
Options json.RawMessage
MaxQueueSize int
}
type LogTargetCfg map[string]*LogTarget
type LogrCleanup func() error
func newLogr() *logr.Logger {
lgr := &logr.Logr{}
lgr.OnExit = func(int) {}
lgr.OnPanic = func(interface{}) {}
lgr.OnLoggerError = onLoggerError
lgr.OnQueueFull = onQueueFull
lgr.OnTargetQueueFull = onTargetQueueFull
logger := lgr.NewLogger()
return &logger
}
func logrAddTargets(logger *logr.Logger, targets LogTargetCfg) error {
lgr := logger.Logr()
var errs error
for name, t := range targets {
target, err := NewLogrTarget(name, t)
if err != nil {
errs = multierror.Append(err)
continue
}
if target != nil {
target.SetName(name)
lgr.AddTarget(target)
}
}
return errs
}
// NewLogrTarget creates a `logr.Target` based on a target config.
// Can be used when parsing custom config files, or when programmatically adding
// built-in targets. Use `mlog.AddTarget` to add custom targets.
func NewLogrTarget(name string, t *LogTarget) (logr.Target, error) {
formatter, err := newFormatter(name, t.Format)
if err != nil {
return nil, err
}
filter := newFilter(t.Levels)
if t.MaxQueueSize == 0 {
t.MaxQueueSize = DefaultMaxTargetQueue
}
switch t.Type {
case "console":
return newConsoleTarget(name, t, filter, formatter)
case "file":
return newFileTarget(name, t, filter, formatter)
case "syslog":
return newSyslogTarget(name, t, filter, formatter)
case "tcp":
return newTCPTarget(name, t, filter, formatter)
case "none":
return nil, nil
}
return nil, fmt.Errorf("invalid type '%s' for target %s", t.Type, name)
}
func newFilter(levels []LogLevel) logr.Filter {
filter := &logr.CustomFilter{}
for _, lvl := range levels {
filter.Add(logr.Level(lvl))
}
return filter
}
func newFormatter(name string, format string) (logr.Formatter, error) {
switch format {
case "json", "":
return &logrFmt.JSON{}, nil
case "plain":
return &logrFmt.Plain{Delim: " | "}, nil
default:
return nil, fmt.Errorf("invalid format '%s' for target %s", format, name)
}
}
func newConsoleTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) {
type consoleOptions struct {
Out string `json:"Out"`
}
options := &consoleOptions{}
if err := json.Unmarshal(t.Options, options); err != nil {
return nil, err
}
var w io.Writer
switch options.Out {
case "stdout", "":
w = os.Stdout
case "stderr":
w = os.Stderr
default:
return nil, fmt.Errorf("invalid out '%s' for target %s", options.Out, name)
}
newTarget := target.NewWriterTarget(filter, formatter, w, t.MaxQueueSize)
return newTarget, nil
}
func newFileTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) {
type fileOptions struct {
Filename string `json:"Filename"`
MaxSize int `json:"MaxSizeMB"`
MaxAge int `json:"MaxAgeDays"`
MaxBackups int `json:"MaxBackups"`
Compress bool `json:"Compress"`
}
options := &fileOptions{}
if err := json.Unmarshal(t.Options, options); err != nil {
return nil, err
}
return newFileTargetWithOpts(name, t, target.FileOptions(*options), filter, formatter)
}
func newFileTargetWithOpts(name string, t *LogTarget, opts target.FileOptions, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) {
if opts.Filename == "" {
return nil, fmt.Errorf("missing 'Filename' option for target %s", name)
}
if err := checkFileWritable(opts.Filename); err != nil {
return nil, fmt.Errorf("error writing to 'Filename' for target %s: %w", name, err)
}
newTarget := target.NewFileTarget(filter, formatter, opts, t.MaxQueueSize)
return newTarget, nil
}
func newSyslogTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) {
options := &SyslogParams{}
if err := json.Unmarshal(t.Options, options); err != nil {
return nil, err
}
if options.IP == "" {
return nil, fmt.Errorf("missing 'IP' option for target %s", name)
}
if options.Port == 0 {
options.Port = DefaultSysLogPort
}
return NewSyslogTarget(filter, formatter, options, t.MaxQueueSize)
}
func newTCPTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) {
options := &TcpParams{}
if err := json.Unmarshal(t.Options, options); err != nil {
return nil, err
}
if options.IP == "" {
return nil, fmt.Errorf("missing 'IP' option for target %s", name)
}
if options.Port == 0 {
return nil, fmt.Errorf("missing 'Port' option for target %s", name)
}
return NewTcpTarget(filter, formatter, options, t.MaxQueueSize)
}
func checkFileWritable(filename string) error {
// try opening/creating the file for writing
file, err := os.OpenFile(filename, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
if err != nil {
return err
}
file.Close()
return nil
}
func isLevelEnabled(logger *logr.Logger, level logr.Level) bool {
if logger == nil || logger.Logr() == nil {
return false
}
status := logger.Logr().IsLevelEnabled(level)
return status.Enabled
}
// zapToLogr converts Zap fields to Logr fields.
// This will not be needed once Logr is used for all logging.
func zapToLogr(zapFields []Field) logr.Fields {
encoder := zapcore.NewMapObjectEncoder()
for _, zapField := range zapFields {
zapField.AddTo(encoder)
}
return logr.Fields(encoder.Fields)
}
// mlogLevelToLogrLevel converts a mlog logger level to
// an array of discrete Logr levels.
func mlogLevelToLogrLevels(level string) []LogLevel {
levels := make([]LogLevel, 0)
levels = append(levels, LvlError, LvlPanic, LvlFatal, LvlStdLog)
switch level {
case LevelDebug:
levels = append(levels, LvlDebug)
fallthrough
case LevelInfo:
levels = append(levels, LvlInfo)
fallthrough
case LevelWarn:
levels = append(levels, LvlWarn)
}
return levels
}

View File

@ -0,0 +1,87 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
import (
"bytes"
"strings"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Implementation of zapcore.Core to interpret log messages from a standard logger
// and translate the levels to zapcore levels.
type stdLogLevelInterpreterCore struct {
wrappedCore zapcore.Core
}
func stdLogInterpretZapEntry(entry zapcore.Entry) zapcore.Entry {
message := entry.Message
if strings.Index(message, "[DEBUG]") == 0 {
entry.Level = zapcore.DebugLevel
entry.Message = message[7:]
} else if strings.Index(message, "[DEBG]") == 0 {
entry.Level = zapcore.DebugLevel
entry.Message = message[6:]
} else if strings.Index(message, "[WARN]") == 0 {
entry.Level = zapcore.WarnLevel
entry.Message = message[6:]
} else if strings.Index(message, "[ERROR]") == 0 {
entry.Level = zapcore.ErrorLevel
entry.Message = message[7:]
} else if strings.Index(message, "[EROR]") == 0 {
entry.Level = zapcore.ErrorLevel
entry.Message = message[6:]
} else if strings.Index(message, "[ERR]") == 0 {
entry.Level = zapcore.ErrorLevel
entry.Message = message[5:]
} else if strings.Index(message, "[INFO]") == 0 {
entry.Level = zapcore.InfoLevel
entry.Message = message[6:]
}
return entry
}
func (s *stdLogLevelInterpreterCore) Enabled(lvl zapcore.Level) bool {
return s.wrappedCore.Enabled(lvl)
}
func (s *stdLogLevelInterpreterCore) With(fields []zapcore.Field) zapcore.Core {
return s.wrappedCore.With(fields)
}
func (s *stdLogLevelInterpreterCore) Check(entry zapcore.Entry, checkedEntry *zapcore.CheckedEntry) *zapcore.CheckedEntry {
entry = stdLogInterpretZapEntry(entry)
return s.wrappedCore.Check(entry, checkedEntry)
}
func (s *stdLogLevelInterpreterCore) Write(entry zapcore.Entry, fields []zapcore.Field) error {
entry = stdLogInterpretZapEntry(entry)
return s.wrappedCore.Write(entry, fields)
}
func (s *stdLogLevelInterpreterCore) Sync() error {
return s.wrappedCore.Sync()
}
func getStdLogOption() zap.Option {
return zap.WrapCore(
func(core zapcore.Core) zapcore.Core {
return &stdLogLevelInterpreterCore{core}
},
)
}
type loggerWriter struct {
logFunc func(msg string, fields ...Field)
}
func (l *loggerWriter) Write(p []byte) (int, error) {
trimmed := string(bytes.TrimSpace(p))
for _, line := range strings.Split(trimmed, "\n") {
l.logFunc(line)
}
return len(p), nil
}

View File

@ -0,0 +1,30 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
import (
"go.uber.org/zap"
)
// Made for the plugin interface, use the regular logger for other uses
type SugarLogger struct {
wrappedLogger *Logger
zapSugar *zap.SugaredLogger
}
func (l *SugarLogger) Debug(msg string, keyValuePairs ...interface{}) {
l.zapSugar.Debugw(msg, keyValuePairs...)
}
func (l *SugarLogger) Info(msg string, keyValuePairs ...interface{}) {
l.zapSugar.Infow(msg, keyValuePairs...)
}
func (l *SugarLogger) Error(msg string, keyValuePairs ...interface{}) {
l.zapSugar.Errorw(msg, keyValuePairs...)
}
func (l *SugarLogger) Warn(msg string, keyValuePairs ...interface{}) {
l.zapSugar.Warnw(msg, keyValuePairs...)
}

View File

@ -0,0 +1,142 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"io/ioutil"
"github.com/mattermost/logr"
"github.com/wiggin77/merror"
syslog "github.com/wiggin77/srslog"
)
// Syslog outputs log records to local or remote syslog.
type Syslog struct {
logr.Basic
w *syslog.Writer
}
// SyslogParams provides parameters for dialing a syslog daemon.
type SyslogParams struct {
IP string `json:"IP"`
Port int `json:"Port"`
Tag string `json:"Tag"`
TLS bool `json:"TLS"`
Cert string `json:"Cert"`
Insecure bool `json:"Insecure"`
}
// NewSyslogTarget creates a target capable of outputting log records to remote or local syslog, with or without TLS.
func NewSyslogTarget(filter logr.Filter, formatter logr.Formatter, params *SyslogParams, maxQueue int) (*Syslog, error) {
network := "tcp"
var config *tls.Config
if params.TLS {
network = "tcp+tls"
config = &tls.Config{InsecureSkipVerify: params.Insecure}
if params.Cert != "" {
pool, err := getCertPool(params.Cert)
if err != nil {
return nil, err
}
config.RootCAs = pool
}
}
raddr := fmt.Sprintf("%s:%d", params.IP, params.Port)
writer, err := syslog.DialWithTLSConfig(network, raddr, syslog.LOG_INFO, params.Tag, config)
if err != nil {
return nil, err
}
s := &Syslog{w: writer}
s.Basic.Start(s, s, filter, formatter, maxQueue)
return s, nil
}
// Shutdown stops processing log records after making best effort to flush queue.
func (s *Syslog) Shutdown(ctx context.Context) error {
errs := merror.New()
err := s.Basic.Shutdown(ctx)
errs.Append(err)
err = s.w.Close()
errs.Append(err)
return errs.ErrorOrNil()
}
// getCertPool returns a x509.CertPool containing the cert(s)
// from `cert`, which can be a path to a .pem or .crt file,
// or a base64 encoded cert.
func getCertPool(cert string) (*x509.CertPool, error) {
if cert == "" {
return nil, errors.New("no cert provided")
}
// first treat as a file and try to read.
serverCert, err := ioutil.ReadFile(cert)
if err != nil {
// maybe it's a base64 encoded cert
serverCert, err = base64.StdEncoding.DecodeString(cert)
if err != nil {
return nil, errors.New("cert cannot be read")
}
}
pool := x509.NewCertPool()
if ok := pool.AppendCertsFromPEM(serverCert); ok {
return pool, nil
}
return nil, errors.New("cannot parse cert")
}
// Write converts the log record to bytes, via the Formatter,
// and outputs to syslog.
func (s *Syslog) Write(rec *logr.LogRec) error {
_, stacktrace := s.IsLevelEnabled(rec.Level())
buf := rec.Logger().Logr().BorrowBuffer()
defer rec.Logger().Logr().ReleaseBuffer(buf)
buf, err := s.Formatter().Format(rec, stacktrace, buf)
if err != nil {
return err
}
txt := buf.String()
switch rec.Level() {
case logr.Panic, logr.Fatal:
err = s.w.Crit(txt)
case logr.Error:
err = s.w.Err(txt)
case logr.Warn:
err = s.w.Warning(txt)
case logr.Debug, logr.Trace:
err = s.w.Debug(txt)
default:
// logr.Info plus all custom levels.
err = s.w.Info(txt)
}
if err != nil {
reporter := rec.Logger().Logr().ReportError
reporter(fmt.Errorf("syslog write fail: %w", err))
// syslog writer will try to reconnect.
}
return err
}
// String returns a string representation of this target.
func (s *Syslog) String() string {
return "SyslogTarget"
}

View File

@ -0,0 +1,273 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
_ "net/http/pprof"
"sync"
"time"
"github.com/hashicorp/go-multierror"
"github.com/mattermost/logr"
)
const (
DialTimeoutSecs = 30
WriteTimeoutSecs = 30
RetryBackoffMillis int64 = 100
MaxRetryBackoffMillis int64 = 30 * 1000 // 30 seconds
)
// Tcp outputs log records to raw socket server.
type Tcp struct {
logr.Basic
params *TcpParams
addy string
mutex sync.Mutex
conn net.Conn
monitor chan struct{}
shutdown chan struct{}
}
// TcpParams provides parameters for dialing a socket server.
type TcpParams struct {
IP string `json:"IP"`
Port int `json:"Port"`
TLS bool `json:"TLS"`
Cert string `json:"Cert"`
Insecure bool `json:"Insecure"`
}
// NewTcpTarget creates a target capable of outputting log records to a raw socket, with or without TLS.
func NewTcpTarget(filter logr.Filter, formatter logr.Formatter, params *TcpParams, maxQueue int) (*Tcp, error) {
tcp := &Tcp{
params: params,
addy: fmt.Sprintf("%s:%d", params.IP, params.Port),
monitor: make(chan struct{}),
shutdown: make(chan struct{}),
}
tcp.Basic.Start(tcp, tcp, filter, formatter, maxQueue)
return tcp, nil
}
// getConn provides a net.Conn. If a connection already exists, it is returned immediately,
// otherwise this method blocks until a new connection is created, timeout or shutdown.
func (tcp *Tcp) getConn() (net.Conn, error) {
tcp.mutex.Lock()
defer tcp.mutex.Unlock()
Log(LvlTcpLogTarget, "getConn enter", String("addy", tcp.addy))
defer Log(LvlTcpLogTarget, "getConn exit", String("addy", tcp.addy))
if tcp.conn != nil {
Log(LvlTcpLogTarget, "reusing existing conn", String("addy", tcp.addy)) // use "With" once Zap is removed
return tcp.conn, nil
}
type result struct {
conn net.Conn
err error
}
connChan := make(chan result)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*DialTimeoutSecs)
defer cancel()
go func(ctx context.Context, ch chan result) {
Log(LvlTcpLogTarget, "dailing", String("addy", tcp.addy))
conn, err := tcp.dial(ctx)
if err == nil {
tcp.conn = conn
tcp.monitor = make(chan struct{})
go monitor(tcp.conn, tcp.monitor, Log)
}
ch <- result{conn: conn, err: err}
}(ctx, connChan)
select {
case <-tcp.shutdown:
return nil, errors.New("shutdown")
case res := <-connChan:
return res.conn, res.err
}
}
// dial connects to a TCP socket, and optionally performs a TLS handshake.
// A non-nil context must be provided which can cancel the dial.
func (tcp *Tcp) dial(ctx context.Context) (net.Conn, error) {
var dialer net.Dialer
dialer.Timeout = time.Second * DialTimeoutSecs
conn, err := dialer.DialContext(ctx, "tcp", fmt.Sprintf("%s:%d", tcp.params.IP, tcp.params.Port))
if err != nil {
return nil, err
}
if !tcp.params.TLS {
return conn, nil
}
Log(LvlTcpLogTarget, "TLS handshake", String("addy", tcp.addy))
tlsconfig := &tls.Config{
ServerName: tcp.params.IP,
InsecureSkipVerify: tcp.params.Insecure,
}
if tcp.params.Cert != "" {
pool, err := getCertPool(tcp.params.Cert)
if err != nil {
return nil, err
}
tlsconfig.RootCAs = pool
}
tlsConn := tls.Client(conn, tlsconfig)
if err := tlsConn.Handshake(); err != nil {
return nil, err
}
return tlsConn, nil
}
func (tcp *Tcp) close() error {
tcp.mutex.Lock()
defer tcp.mutex.Unlock()
var err error
if tcp.conn != nil {
Log(LvlTcpLogTarget, "closing connection", String("addy", tcp.addy))
close(tcp.monitor)
err = tcp.conn.Close()
tcp.conn = nil
}
return err
}
// Shutdown stops processing log records after making best effort to flush queue.
func (tcp *Tcp) Shutdown(ctx context.Context) error {
errs := &multierror.Error{}
Log(LvlTcpLogTarget, "shutting down", String("addy", tcp.addy))
if err := tcp.Basic.Shutdown(ctx); err != nil {
errs = multierror.Append(errs, err)
}
if err := tcp.close(); err != nil {
errs = multierror.Append(errs, err)
}
close(tcp.shutdown)
return errs.ErrorOrNil()
}
// Write converts the log record to bytes, via the Formatter, and outputs to the socket.
// Called by dedicated target goroutine and will block until success or shutdown.
func (tcp *Tcp) Write(rec *logr.LogRec) error {
_, stacktrace := tcp.IsLevelEnabled(rec.Level())
buf := rec.Logger().Logr().BorrowBuffer()
defer rec.Logger().Logr().ReleaseBuffer(buf)
buf, err := tcp.Formatter().Format(rec, stacktrace, buf)
if err != nil {
return err
}
try := 1
backoff := RetryBackoffMillis
for {
select {
case <-tcp.shutdown:
return err
default:
}
conn, err := tcp.getConn()
if err != nil {
Log(LvlTcpLogTarget, "failed getting connection", String("addy", tcp.addy), Err(err))
reporter := rec.Logger().Logr().ReportError
reporter(fmt.Errorf("log target %s connection error: %w", tcp.String(), err))
backoff = tcp.sleep(backoff)
continue
}
conn.SetWriteDeadline(time.Now().Add(time.Second * WriteTimeoutSecs))
_, err = buf.WriteTo(conn)
if err == nil {
return nil
}
Log(LvlTcpLogTarget, "write error", String("addy", tcp.addy), Err(err))
reporter := rec.Logger().Logr().ReportError
reporter(fmt.Errorf("log target %s write error: %w", tcp.String(), err))
_ = tcp.close()
backoff = tcp.sleep(backoff)
try++
Log(LvlTcpLogTarget, "retrying write", String("addy", tcp.addy), Int("try", try))
}
}
// monitor continuously tries to read from the connection to detect socket close.
// This is needed because TCP target uses a write only socket and Linux systems
// take a long time to detect a loss of connectivity on a socket when only writing;
// the writes simply fail without an error returned.
func monitor(conn net.Conn, done <-chan struct{}, logFunc LogFuncCustom) {
addy := conn.RemoteAddr().String()
defer logFunc(LvlTcpLogTarget, "monitor exiting", String("addy", addy))
buf := make([]byte, 1)
for {
logFunc(LvlTcpLogTarget, "monitor loop", String("addy", addy))
select {
case <-done:
return
case <-time.After(1 * time.Second):
}
err := conn.SetReadDeadline(time.Now().Add(time.Second * 30))
if err != nil {
continue
}
_, err = conn.Read(buf)
if errt, ok := err.(net.Error); ok && errt.Timeout() {
// read timeout is expected, keep looping.
continue
}
// Any other error closes the connection, forcing a reconnect.
logFunc(LvlTcpLogTarget, "monitor closing connection", Err(err))
conn.Close()
return
}
}
// String returns a string representation of this target.
func (tcp *Tcp) String() string {
return fmt.Sprintf("TcpTarget[%s:%d]", tcp.params.IP, tcp.params.Port)
}
func (tcp *Tcp) sleep(backoff int64) int64 {
select {
case <-tcp.shutdown:
case <-time.After(time.Millisecond * time.Duration(backoff)):
}
nextBackoff := backoff + (backoff >> 1)
if nextBackoff > MaxRetryBackoffMillis {
nextBackoff = MaxRetryBackoffMillis
}
return nextBackoff
}

View File

@ -0,0 +1,43 @@
-----BEGIN CERTIFICATE-----
MIIDjzCCAnegAwIBAgIRAPYfRSwdzKopBKxYxKqslJUwDQYJKoZIhvcNAQELBQAw
JzElMCMGA1UEAwwcTWF0dGVybW9zdCwgSW5jLiBJbnRlcm5hbCBDQTAeFw0xOTAz
MjIwMDE0MTVaFw0yMjAzMDYwMDE0MTVaMDsxOTA3BgNVBAMTME1hdHRlcm1vc3Qs
IEluYy4gSW50ZXJuYWwgSW50ZXJtZWRpYXRlIEF1dGhvcml0eTCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAMjliRdmvnNL4u/Jr/M2dPwQmTJXEBY/Vq9Q
vAU52X3tRMCPxcaFz+x6ftuvdO2NdohXGAmtx9QU5LZcvFeTDpoVEBo9A+4jtLvD
DZYaTNLpJmoSoJHaDbdWX+OAOqyDiWS741LuiMKWHhew9QOisat2ZINPxjmAd9wE
xthTMgzsv7MUqnMer8U5OGQ0Qy7wAmNRc+2K3qPwkxe2RUvcte50DUFNgxEginsh
vrkOXR383vUCZfu72qu8oggjiQpyTllu5je2Ap6JLjYLkEMiMqrYADuWor/ZHwa6
WrFqVETxWfAV5u9Eh0wZM/KKYwRQuw9y+Nans77FmUl1tVWWNN8CAwEAAaOBoTCB
njAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBQY4Uqswyr2hO/HetZt2RDxJdTIPjBi
BgNVHSMEWzBZgBRFZXVg2Z5tNIsWeWjBLEy2yzKbMKErpCkwJzElMCMGA1UEAwwc
TWF0dGVybW9zdCwgSW5jLiBJbnRlcm5hbCBDQYIUEifGUOM+bIFZo1tkjZB5YGBr
0xEwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQAEdexL30Q0zBHmPAH8
LhdK7dbzW1CmILbxRZlKAwRN+hKRXiMW3MHIkhNuoV9Aev602Q+ja4lWsRi/ktOL
ni1FWx5gSScgdG8JGj47dOmoT3vXKX7+umiv4rQLPDl9/DKMuv204OYJq6VT+uNU
6C6kL157jGJEO76H4fMZ8oYsD7Sq0zjiNKtuCYii0ngH3j3gB1jACLqRgveU7MdT
pqOV2KfY31+h8VBtkUvljNztQ9xNY8Fjmt0SMf7E3FaUcaar3ZCr70G5aU3dKbe7
47vGOBa5tCqw4YK0jgDKid3IJQul9a3J1mSsH8Wy3to9cAV4KGZBQLnzCX15a/+v
3yVh
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDfjCCAmagAwIBAgIUEifGUOM+bIFZo1tkjZB5YGBr0xEwDQYJKoZIhvcNAQEL
BQAwJzElMCMGA1UEAwwcTWF0dGVybW9zdCwgSW5jLiBJbnRlcm5hbCBDQTAeFw0x
OTAzMjEyMTI4NDNaFw0yOTAzMTgyMTI4NDNaMCcxJTAjBgNVBAMMHE1hdHRlcm1v
c3QsIEluYy4gSW50ZXJuYWwgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQDH0Xq5rMBGpKOVWTpb5MnaJIWFP/vOtvEk+7hVrfOfe1/5x0Kk3UgAHj85
otaEZD1Lhn/JLkEqCiE/UXMJFwJDlNcO4CkdKBSpYX4bKAqy5q/X3QwioMSNpJG1
+YYrNGBH0sgKcKjyCaLhmqYLD0xZDVOmWIYBU9jUPyXw5U0tnsVrTqGMxVkm1xCY
krCWN1ZoUrLvL0MCZc5qpxoPTopr9UO9cqSBSuy6BVWVuEWBZhpqHt+ul8VxhzzY
q1k4l7r2qw+/wm1iJBedTeBVeWNag8JaVfLgu+/W7oJVlPO32Po7pnvHp8iJ3b4K
zXyVHaTX4S6Em+6LV8855TYrShzlAgMBAAGjgaEwgZ4wHQYDVR0OBBYEFEVldWDZ
nm00ixZ5aMEsTLbLMpswMGIGA1UdIwRbMFmAFEVldWDZnm00ixZ5aMEsTLbLMpsw
oSukKTAnMSUwIwYDVQQDDBxNYXR0ZXJtb3N0LCBJbmMuIEludGVybmFsIENBghQS
J8ZQ4z5sgVmjW2SNkHlgYGvTETAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjAN
BgkqhkiG9w0BAQsFAAOCAQEAPiCWFmopyAkY2T3Zyo4yaRPhX1+VOTMKJtY6EUhq
/GHz6kzEyvCUBf0N892cibGxekrEoItY9NqO6RQRfowg+Gn5kc13z4NyL2W8/eoT
Xy0ZvfaQbU++fQ6pVtWtMblDMU9xiYd7/MDvJpO328l1Vhcdp8kEi+lCvpy0sCRc
PxzPhbgCMAbZEGx+4TMQd4SZKzlRxW/2fflpReh6v1Dv0VDUSYQWwsUnaLpdKHfh
a5k0vuySYcszE4YKlY0zakeFlJfp7fBp1xTwcdW8aTfw15EicPMwTc6xxA4JJUJx
cddu817n1nayK5u6r9Qh1oIVkr0nC9YELMMy4dpPgJ88SA==
-----END CERTIFICATE-----

View File

@ -0,0 +1,46 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
import (
"io"
"strings"
"sync"
"testing"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// testingWriter is an io.Writer that writes through t.Log
type testingWriter struct {
tb testing.TB
}
func (tw *testingWriter) Write(b []byte) (int, error) {
tw.tb.Log(strings.TrimSpace(string(b)))
return len(b), nil
}
// NewTestingLogger creates a Logger that proxies logs through a testing interface.
// This allows tests that spin up App instances to avoid spewing logs unless the test fails or -verbose is specified.
func NewTestingLogger(tb testing.TB, writer io.Writer) *Logger {
logWriter := &testingWriter{tb}
multiWriter := io.MultiWriter(logWriter, writer)
logWriterSync := zapcore.AddSync(multiWriter)
testingLogger := &Logger{
consoleLevel: zap.NewAtomicLevelAt(getZapLevel("debug")),
fileLevel: zap.NewAtomicLevelAt(getZapLevel("info")),
logrLogger: newLogr(),
mutex: &sync.RWMutex{},
}
logWriterCore := zapcore.NewCore(makeEncoder(true, false), zapcore.Lock(logWriterSync), testingLogger.consoleLevel)
testingLogger.zap = zap.New(logWriterCore,
zap.AddCaller(),
)
return testingLogger
}