4
0
mirror of https://github.com/cwinfo/matterbridge.git synced 2025-07-04 19:27:45 +00:00

Update mattermost library (#2152)

* Update mattermost library

* Fix linting
This commit is contained in:
Wim
2024-05-24 23:08:09 +02:00
committed by GitHub
parent 65d78e38af
commit d16645c952
1003 changed files with 89451 additions and 114025 deletions

View File

@ -1,4 +1,4 @@
[![GoDoc](https://godoc.org/github.com/go-ldap/ldap?status.svg)](https://godoc.org/github.com/go-ldap/ldap)
[![Go Reference](https://pkg.go.dev/badge/github.com/mattermost/ldap.svg)](https://pkg.go.dev/github.com/mattermost/ldap)
[![Build Status](https://travis-ci.org/go-ldap/ldap.svg)](https://travis-ci.org/go-ldap/ldap)
# Basic LDAP v3 functionality for the GO programming language.

View File

@ -10,9 +10,8 @@
package ldap
import (
"log"
ber "github.com/go-asn1-ber/asn1-ber"
"github.com/mattermost/mattermost/server/public/shared/mlog"
)
// Attribute represents an LDAP attribute
@ -88,13 +87,14 @@ func (l *Conn) Add(addRequest *AddRequest) error {
return err
}
if packet.Children[1].Tag == ApplicationAddResponse {
tag := packet.Children[1].Tag
if tag == ApplicationAddResponse {
err := GetLDAPError(packet)
if err != nil {
return err
}
} else {
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
l.Debug.Log("Unexpected Response", mlog.Uint("tag", tag))
}
return nil
}

View File

@ -4,7 +4,6 @@ import (
"crypto/tls"
"errors"
"fmt"
"log"
"net"
"net/url"
"sync"
@ -12,6 +11,7 @@ import (
"time"
ber "github.com/go-asn1-ber/asn1-ber"
"github.com/mattermost/mattermost/server/public/shared/mlog"
)
const (
@ -210,14 +210,14 @@ func (l *Conn) Close() {
defer l.messageMutex.Unlock()
if l.setClosing() {
l.Debug.Printf("Sending quit message and waiting for confirmation")
l.Debug.Log("Sending quit message and waiting for confirmation")
l.chanMessage <- &messagePacket{Op: MessageQuit}
<-l.chanConfirm
close(l.chanMessage)
l.Debug.Printf("Closing network connection")
l.Debug.Log("Closing network connection")
if err := l.conn.Close(); err != nil {
log.Println(err)
l.Debug.Log("Error closing network connection", mlog.Err(err))
}
l.wgClose.Done()
@ -251,7 +251,8 @@ func (l *Conn) StartTLS(config *tls.Config) error {
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS")
request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command"))
packet.AppendChild(request)
l.Debug.PrintPacket(packet)
l.Debug.Log("Sending StartTLS packet", PacketToField(packet))
msgCtx, err := l.sendMessageWithFlags(packet, startTLS)
if err != nil {
@ -259,24 +260,24 @@ func (l *Conn) StartTLS(config *tls.Config) error {
}
defer l.finishMessage(msgCtx)
l.Debug.Printf("%d: waiting for response", msgCtx.id)
l.Debug.Log("Waiting for StartTLS response", mlog.Int("id", msgCtx.id))
packetResponse, ok := <-msgCtx.responses
if !ok {
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
}
packet, err = packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if err != nil {
return err
}
if l.Debug {
if l.Debug.Enabled() {
if err := addLDAPDescriptions(packet); err != nil {
l.Close()
return err
}
l.Debug.PrintPacket(packet)
l.Debug.Log("Got response %p", mlog.Err(err), mlog.Int("id", msgCtx.id), PacketToField(packet), mlog.Err(err))
}
if err != nil {
return err
}
if err := GetLDAPError(packet); err == nil {
@ -317,7 +318,7 @@ func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags)
return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
}
l.messageMutex.Lock()
l.Debug.Printf("flags&startTLS = %d", flags&startTLS)
if l.isStartingTLS {
l.messageMutex.Unlock()
return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase"))
@ -382,8 +383,8 @@ func (l *Conn) sendProcessMessage(message *messagePacket) bool {
func (l *Conn) processMessages() {
defer func() {
if err := recover(); err != nil {
log.Printf("ldap: recovered panic in processMessages: %v", err)
if r := recover(); r != nil {
l.Debug.Log("Recovered panic in processMessages", mlog.Any("panic", r))
}
for messageID, msgCtx := range l.messageContexts {
// If we are closing due to an error, inform anyone who
@ -391,7 +392,7 @@ func (l *Conn) processMessages() {
if l.IsClosing() && l.closeErr.Load() != nil {
msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)})
}
l.Debug.Printf("Closing channel for MessageID %d", messageID)
l.Debug.Log("Closing channel for MessageID", mlog.Int("message_id", messageID))
close(msgCtx.responses)
delete(l.messageContexts, messageID)
}
@ -407,16 +408,14 @@ func (l *Conn) processMessages() {
case message := <-l.chanMessage:
switch message.Op {
case MessageQuit:
l.Debug.Printf("Shutting down - quit message received")
l.Debug.Log("Quit message received: Shutting down")
return
case MessageRequest:
// Add to message list and write to network
l.Debug.Printf("Sending message %d", message.MessageID)
buf := message.Packet.Bytes()
_, err := l.conn.Write(buf)
if err != nil {
l.Debug.Printf("Error Sending Message: %s", err.Error())
l.Debug.Log("Error Sending Message", mlog.Err(err))
message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)})
close(message.Context.responses)
break
@ -431,8 +430,8 @@ func (l *Conn) processMessages() {
if requestTimeout > 0 {
go func() {
defer func() {
if err := recover(); err != nil {
log.Printf("ldap: recovered panic in RequestTimeout: %v", err)
if r := recover(); r != nil {
l.Debug.Log("Recovered panic in RequestTimeout", mlog.Any("panic", r))
}
}()
time.Sleep(requestTimeout)
@ -444,24 +443,27 @@ func (l *Conn) processMessages() {
}()
}
case MessageResponse:
l.Debug.Printf("Receiving message %d", message.MessageID)
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
msgCtx.sendResponse(&PacketResponse{message.Packet, nil})
} else {
log.Printf("Received unexpected message %d, %v", message.MessageID, l.IsClosing())
l.Debug.PrintPacket(message.Packet)
l.Debug.Log(
"Received unexpected message",
mlog.Int("message_id", message.MessageID),
mlog.Bool("is_closing", l.IsClosing()),
PacketToField(message.Packet),
)
}
case MessageTimeout:
// Handle the timeout by closing the channel
// All reads will return immediately
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
l.Debug.Printf("Receiving message timeout for %d", message.MessageID)
l.Debug.Log("Receiving message timeout", mlog.Int("message_id", message.MessageID))
msgCtx.sendResponse(&PacketResponse{message.Packet, errors.New("ldap: connection timed out")})
delete(l.messageContexts, message.MessageID)
close(msgCtx.responses)
}
case MessageFinish:
l.Debug.Printf("Finished message %d", message.MessageID)
l.Debug.Log("Finished message", mlog.Int("message_id", message.MessageID))
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
delete(l.messageContexts, message.MessageID)
close(msgCtx.responses)
@ -474,8 +476,8 @@ func (l *Conn) processMessages() {
func (l *Conn) reader() {
cleanstop := false
defer func() {
if err := recover(); err != nil {
log.Printf("ldap: recovered panic in reader: %v", err)
if r := recover(); r != nil {
l.Debug.Log("Recovered panic in reader", mlog.Any("panic", r))
}
if !cleanstop {
l.Close()
@ -484,7 +486,7 @@ func (l *Conn) reader() {
for {
if cleanstop {
l.Debug.Printf("reader clean stopping (without closing the connection)")
l.Debug.Log("Reader clean stopping (without closing the connection)")
return
}
packet, err := ber.ReadPacket(l.conn)
@ -492,15 +494,15 @@ func (l *Conn) reader() {
// A read error is expected here if we are closing the connection...
if !l.IsClosing() {
l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err))
l.Debug.Printf("reader error: %s", err)
l.Debug.Log("Reader error", mlog.Err(err))
}
return
}
if err := addLDAPDescriptions(packet); err != nil {
l.Debug.Printf("descriptions error: %s", err)
l.Debug.Log("Descriptions error", mlog.Err(err))
}
if len(packet.Children) == 0 {
l.Debug.Printf("Received bad ldap packet")
l.Debug.Log("Received bad ldap packet")
continue
}
l.messageMutex.Lock()

View File

@ -2,36 +2,48 @@ package ldap
import (
"bytes"
"log"
ber "github.com/go-asn1-ber/asn1-ber"
"github.com/mattermost/mattermost/server/public/shared/mlog"
)
const LDAP_TRACE_PREFIX = "ldap-trace: "
// debugging type
// - has a Printf method to write the debug output
type debugging bool
type debugging struct {
logger mlog.LoggerIFace
levels []mlog.Level
}
// Enable controls debugging mode.
func (debug *debugging) Enable(b bool) {
*debug = debugging(b)
}
// Printf writes debug output.
func (debug debugging) Printf(format string, args ...interface{}) {
if debug {
format = LDAP_TRACE_PREFIX + format
log.Printf(format, args...)
func (debug *debugging) Enable(logger mlog.LoggerIFace, levels ...mlog.Level) {
*debug = debugging{
logger: logger,
levels: levels,
}
}
// PrintPacket dumps a packet.
func (debug debugging) PrintPacket(packet *ber.Packet) {
if debug {
var b bytes.Buffer
ber.WritePacket(&b, packet)
textToPrint := LDAP_TRACE_PREFIX + b.String()
log.Printf(textToPrint)
func (debug debugging) Enabled() bool {
return debug.logger != nil
}
// Log writes debug output.
func (debug debugging) Log(msg string, fields ...mlog.Field) {
if debug.Enabled() {
debug.logger.LogM(debug.levels, msg, fields...)
}
}
type Packet ber.Packet
func (p Packet) LogClone() any {
bp := ber.Packet(p)
var b bytes.Buffer
ber.WritePacket(&b, &bp)
return b.String()
}
func PacketToField(packet *ber.Packet) mlog.Field {
if packet == nil {
return mlog.Any("packet", nil)
}
return mlog.Any("packet", Packet(*packet))
}

View File

@ -6,9 +6,9 @@
package ldap
import (
"log"
ber "github.com/go-asn1-ber/asn1-ber"
"github.com/mattermost/mattermost/server/public/shared/mlog"
)
// DelRequest implements an LDAP deletion request
@ -52,13 +52,14 @@ func (l *Conn) Del(delRequest *DelRequest) error {
return err
}
if packet.Children[1].Tag == ApplicationDelResponse {
tag := packet.Children[1].Tag
if tag == ApplicationDelResponse {
err := GetLDAPError(packet)
if err != nil {
return err
}
} else {
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
l.Debug.Log("Unexpected Response tag", mlog.Uint("tag", tag))
}
return nil
}

View File

@ -1,19 +1,17 @@
// Package ldap - moddn.go contains ModifyDN functionality
//
// https://tools.ietf.org/html/rfc4511
// ModifyDNRequest ::= [APPLICATION 12] SEQUENCE {
// entry LDAPDN,
// newrdn RelativeLDAPDN,
// deleteoldrdn BOOLEAN,
// newSuperior [0] LDAPDN OPTIONAL }
//
//
// ModifyDNRequest ::= [APPLICATION 12] SEQUENCE {
// entry LDAPDN,
// newrdn RelativeLDAPDN,
// deleteoldrdn BOOLEAN,
// newSuperior [0] LDAPDN OPTIONAL }
package ldap
import (
"log"
ber "github.com/go-asn1-ber/asn1-ber"
"github.com/mattermost/mattermost/server/public/shared/mlog"
)
// ModifyDNRequest holds the request to modify a DN
@ -33,7 +31,9 @@ type ModifyDNRequest struct {
// RDN of the given DN.
//
// A call like
// mdnReq := NewModifyDNRequest("uid=someone,dc=example,dc=org", "uid=newname", true, "")
//
// mdnReq := NewModifyDNRequest("uid=someone,dc=example,dc=org", "uid=newname", true, "")
//
// will setup the request to just rename uid=someone,dc=example,dc=org to
// uid=newname,dc=example,dc=org.
func NewModifyDNRequest(dn string, rdn string, delOld bool, newSup string) *ModifyDNRequest {
@ -73,13 +73,14 @@ func (l *Conn) ModifyDN(m *ModifyDNRequest) error {
return err
}
if packet.Children[1].Tag == ApplicationModifyDNResponse {
tag := packet.Children[1].Tag
if tag == ApplicationModifyDNResponse {
err := GetLDAPError(packet)
if err != nil {
return err
}
} else {
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
l.Debug.Log("Unexpected Response tag", mlog.Uint("tag", tag))
}
return nil
}

View File

@ -26,9 +26,8 @@
package ldap
import (
"log"
ber "github.com/go-asn1-ber/asn1-ber"
"github.com/mattermost/mattermost/server/public/shared/mlog"
)
// Change operation choices
@ -139,13 +138,14 @@ func (l *Conn) Modify(modifyRequest *ModifyRequest) error {
return err
}
if packet.Children[1].Tag == ApplicationModifyResponse {
tag := packet.Children[1].Tag
if tag == ApplicationModifyResponse {
err := GetLDAPError(packet)
if err != nil {
return err
}
} else {
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
l.Debug.Log("Unexpected Response tag", mlog.Uint("tag", tag))
}
return nil
}

View File

@ -4,6 +4,7 @@ import (
"errors"
ber "github.com/go-asn1-ber/asn1-ber"
"github.com/mattermost/mattermost/server/public/shared/mlog"
)
var (
@ -28,26 +29,31 @@ func (l *Conn) doRequest(req request) (*messageContext, error) {
return nil, err
}
if l.Debug {
l.Debug.PrintPacket(packet)
}
l.Debug.Log("Sending package", PacketToField(packet))
msgCtx, err := l.sendMessage(packet)
if err != nil {
return nil, err
}
l.Debug.Printf("%d: returning", msgCtx.id)
l.Debug.Log("Send package", mlog.Int("id", msgCtx.id))
return msgCtx, nil
}
func (l *Conn) readPacket(msgCtx *messageContext) (*ber.Packet, error) {
l.Debug.Printf("%d: waiting for response", msgCtx.id)
l.Debug.Log("Waiting for response", mlog.Int("id", msgCtx.id))
packetResponse, ok := <-msgCtx.responses
if !ok {
return nil, NewError(ErrorNetwork, errRespChanClosed)
}
packet, err := packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if l.Debug.Enabled() {
if err := addLDAPDescriptions(packet); err != nil {
return nil, err
}
l.Debug.Log("Got response", mlog.Int("id", msgCtx.id), PacketToField(packet), mlog.Err(err))
}
if err != nil {
return nil, err
}
@ -56,11 +62,5 @@ func (l *Conn) readPacket(msgCtx *messageContext) (*ber.Packet, error) {
return nil, NewError(ErrorNetwork, errCouldNotRetMsg)
}
if l.Debug {
if err = addLDAPDescriptions(packet); err != nil {
return nil, err
}
l.Debug.PrintPacket(packet)
}
return packet, nil
}

View File

@ -300,10 +300,11 @@ func NewSearchRequest(
// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the
// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically.
// The following four cases are possible given the arguments:
// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size
// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries
// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size
// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries
//
// A requested pagingSize of 0 is interpreted as no limit by LDAP servers.
func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) {
var pagingControl *ControlPaging
@ -326,7 +327,6 @@ func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32)
searchResult := new(SearchResult)
for {
result, err := l.Search(searchRequest)
l.Debug.Printf("Looking for Paging Control...")
if err != nil {
return searchResult, err
}
@ -344,25 +344,21 @@ func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32)
searchResult.Controls = append(searchResult.Controls, control)
}
l.Debug.Printf("Looking for Paging Control...")
pagingResult := FindControl(result.Controls, ControlTypePaging)
if pagingResult == nil {
pagingControl = nil
l.Debug.Printf("Could not find paging control. Breaking...")
break
}
cookie := pagingResult.(*ControlPaging).Cookie
if len(cookie) == 0 {
pagingControl = nil
l.Debug.Printf("Could not find cookie. Breaking...")
break
}
pagingControl.SetCookie(cookie)
}
if pagingControl != nil {
l.Debug.Printf("Abandoning Paging...")
pagingControl.PagingSize = 0
l.Search(searchRequest)
}

View File

@ -1,4 +0,0 @@
language: go
sudo: false
go:
- 1.x

View File

@ -1,23 +1,27 @@
![Logr_Logo](https://user-images.githubusercontent.com/7295363/200433587-ae9df127-9427-4753-a0a0-85723a216e0e.png)
> A fully asynchronous, contextual logger for Go.
# logr
[![GoDoc](https://godoc.org/github.com/mattermost/logr?status.svg)](http://godoc.org/github.com/mattermost/logr)
[![Report Card](https://goreportcard.com/badge/github.com/mattermost/logr)](https://goreportcard.com/report/github.com/mattermost/logr)
Logr is a fully asynchronous, contextual logger for Go.
It is very much inspired by [Logrus](https://github.com/sirupsen/logrus) but addresses two issues:
Logr is inspired by [Logrus](https://github.com/sirupsen/logrus) and [Zap](https://github.com/uber-go/zap) but addresses a number of issues:
1. Logr is fully asynchronous, meaning that all formatting and writing is done in the background. Latency sensitive applications benefit from not waiting for logging to complete.
2. Logr provides custom filters which provide more flexibility than Trace, Debug, Info... levels. If you need to temporarily increase verbosity of logging while tracking down a problem you can avoid the fire-hose that typically comes from Debug or Trace by using custom filters.
3. Logr generates much less allocations than Logrus, and is close to Zap in allocations.
## Concepts
<!-- markdownlint-disable MD033 -->
| entity | description |
| ------ | ----------- |
| Logr | Engine instance typically instantiated once; used to configure logging.<br>```lgr,_ := logr.New()```|
| Logger | Provides contextual logging via fields; lightweight, can be created once and accessed globally or create on demand.<br>```logger := lgr.NewLogger()```<br>```logger2 := logger.WithField("user", "Sam")```|
| Logger | Provides contextual logging via fields; lightweight, can be created once and accessed globally, or created on demand.<br>```logger := lgr.NewLogger()```<br>```logger2 := logger.With(logr.String("user", "Sam"))```|
| Target | A destination for log items such as console, file, database or just about anything that can be written to. Each target has its own filter/level and formatter, and any number of targets can be added to a Logr. Targets for file, syslog and any io.Writer are built-in and it is easy to create your own. You can also use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) via a simple [adapter](https://github.com/wiggin77/logrus4logr).|
| Filter | Determines which logging calls get written versus filtered out. Also determines which logging calls generate a stack trace.<br>```filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Fatal}```|
| Formatter | Formats the output. Logr includes built-in formatters for JSON and plain text with delimiters. It is easy to create your own formatters or you can also use any [Logrus formatters](https://github.com/sirupsen/logrus#formatters) via a simple [adapter](https://github.com/wiggin77/logrus4logr).<br>```formatter := &format.Plain{Delim: " \| "}```|
@ -39,7 +43,7 @@ lgr.AddTarget(t)
// One or more Loggers can be created, shared, used concurrently,
// or created on demand.
logger := lgr.NewLogger().WithField("user", "Sarah")
logger := lgr.NewLogger().With("user", "Sarah")
// Now we can log to the target(s).
logger.Debug("login attempt")
@ -53,22 +57,22 @@ lgr.Shutdown()
Fields allow for contextual logging, meaning information can be added to log statements without changing the statements themselves. Information can be shared across multiple logging statements thus allowing log analysis tools to group them.
Fields are added via Loggers:
Fields can be added to a Logger via `Logger.With` or included with each log record:
```go
lgr,_ := logr.New()
// ... add targets ...
logger := lgr.NewLogger().WithFields(logr.Fields{
"user": user,
"role": role})
logger.Info("login attempt")
logger := lgr.NewLogger().With(
logr.Any("user": user),
logr.String("role", role)
)
logger.Info("login attempt", logr.Int("attempt_count", count))
// ... later ...
logger.Info("login successful")
logger.Info("login", logr.String("result", result))
```
`Logger.WithFields` can be used to create additional Loggers that add more fields.
Logr fields are inspired by and work the same as [Logrus fields](https://github.com/sirupsen/logrus#fields).
Logr fields are inspired by and work the same as [Zap fields](https://pkg.go.dev/go.uber.org/zap#Field).
## Filters
@ -97,21 +101,21 @@ Logr also supports custom filters (logr.CustomFilter) which allow fine grained i
formatter := &formatters.Plain{Delim: " | "}
tgr := targets.NewWriterTarget(filter, formatter, os.StdOut, 1000)
lgr.AddTarget(tgr)
logger := lgr.NewLogger().WithFields(logr.Fields{"user": "Bob", "role": "admin"})
logger := lgr.NewLogger().With(logr.String("user": "Bob"), logr.String("role": "admin"))
logger.Log(LoginLevel, "this item will get logged")
logger.Debug("won't be logged since Debug wasn't added to custom filter")
```
Both filter types allow you to determine which levels require a stack trace to be output. Note that generating stack traces cannot happen fully asynchronously and thus add latency to the calling goroutine.
Both filter types allow you to determine which levels force a stack trace to be output. Note that generating stack traces cannot happen fully asynchronously and thus add some latency to the calling goroutine.
## Targets
There are built-in targets for outputting to syslog, file, or any `io.Writer`. More will be added.
There are built-in targets for outputting to syslog, file, TCP, or any `io.Writer`. More will be added.
You can use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) via a simple [adapter](https://github.com/wiggin77/logrus4logr).
You can create your own target by implementing the [Target](./target.go) interface.
You can create your own target by implementing the simple [Target](./target.go) interface.
Example target that outputs to `io.Writer`:
@ -130,7 +134,7 @@ func (w *Writer) Init() error {
return nil
}
// Write will always be called by a single goroutine, so no locking needed.
// Write will always be called by a single internal Logr goroutine, so no locking needed.
func (w *Writer) Write(p []byte, rec *logr.LogRec) (int, error) {
return w.out.Write(buf.Bytes())
}
@ -153,9 +157,18 @@ You can create your own formatter by implementing the [Formatter](./formatter.go
Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error)
```
## Handlers
## Configuration options
When creating the Logr instance, you can add several handlers that get called when exceptional events occur:
When creating the Logr instance, you can set configuration options. For example:
```go
lgr, err := logr.New(
logr.MaxQueueSize(1000),
logr.StackFilter("mypackage1", "mypackage2"),
)
```
Some options are documented below. See [options.go](./options.go) for all available configuration options.
### ```Logr.OnLoggerError(err error)```
@ -168,7 +181,7 @@ It may be tempting to log this error, however there is a danger that logging thi
Called on an attempt to add a log record to a full Logr queue. This generally means the Logr maximum queue size is too small, or at least one target is very slow. Logr maximum queue size can be changed before adding any targets via:
```go
lgr := logr.Logr{MaxQueueSize: 10000}
lgr, err := logr.New(logr.MaxQueueSize(2000))
```
Returning true will drop the log record. False will block until the log record can be added, which creates a natural throttle at the expense of latency for the calling goroutine. The default is to block.
@ -186,3 +199,7 @@ OnExit and OnPanic are called when the Logger.FatalXXX and Logger.PanicXXX funct
In both cases the default behavior is to shut down gracefully, draining all targets, and calling `os.Exit` or `panic` respectively.
When adding your own handlers, be sure to call `Logr.Shutdown` before exiting the application to avoid losing log records.
### ```Logr.StackFilter(pkg ...string)```
StackFilter sets a list of package names to exclude from the top of stack traces. The `Logr` packages are automatically filtered.

View File

@ -157,7 +157,7 @@ func newTarget(targetType string, options json.RawMessage, factory TargetFactory
return t, nil
}
}
return nil, fmt.Errorf("target type '%s' is unrecogized", targetType)
return nil, fmt.Errorf("target type '%s' is unrecognized", targetType)
}
func newFormatter(format string, options json.RawMessage, factory FormatterFactory) (logr.Formatter, error) {
@ -205,5 +205,5 @@ func newFormatter(format string, options json.RawMessage, factory FormatterFacto
return f, nil
}
}
return nil, fmt.Errorf("format '%s' is unrecogized", format)
return nil, fmt.Errorf("format '%s' is unrecognized", format)
}

View File

@ -31,4 +31,7 @@ const (
// DefaultMaxPooledBuffer is the maximum size a pooled buffer can be.
// Buffers that grow beyond this size are garbage collected.
DefaultMaxPooledBuffer = 1024 * 1024
// DefaultMaxFieldLength is the maximum size of a String or fmt.Stringer field can be.
DefaultMaxFieldLength = -1
)

View File

@ -187,16 +187,22 @@ func (f Field) ValueString(w io.Writer, shouldQuote func(s string) bool) error {
break arr
}
}
if _, err = w.Write(Comma); err != nil {
break arr
if i != a.Len()-1 {
if _, err = w.Write(Comma); err != nil {
break arr
}
}
}
case MapType:
a := reflect.ValueOf(f.Interface)
iter := a.MapRange()
// Already advance to first element
if !iter.Next() {
return nil
}
it:
for iter.Next() {
for {
if _, err = io.WriteString(w, iter.Key().String()); err != nil {
break it
}
@ -219,9 +225,15 @@ func (f Field) ValueString(w io.Writer, shouldQuote func(s string) bool) error {
break it
}
}
if !iter.Next() {
break it
}
if _, err = w.Write(Comma); err != nil {
break it
}
}
case UnknownType:
@ -269,19 +281,19 @@ func fieldForAny(key string, val interface{}) Field {
}
return Bool(key, *v)
case float64:
return Float64(key, v)
return Float(key, v)
case *float64:
if v == nil {
return nilField(key)
}
return Float64(key, *v)
return Float(key, *v)
case float32:
return Float32(key, v)
return Float(key, v)
case *float32:
if v == nil {
return nilField(key)
}
return Float32(key, *v)
return Float(key, *v)
case int:
return Int(key, v)
case *int:
@ -290,33 +302,33 @@ func fieldForAny(key string, val interface{}) Field {
}
return Int(key, *v)
case int64:
return Int64(key, v)
return Int(key, v)
case *int64:
if v == nil {
return nilField(key)
}
return Int64(key, *v)
return Int(key, *v)
case int32:
return Int32(key, v)
return Int(key, v)
case *int32:
if v == nil {
return nilField(key)
}
return Int32(key, *v)
return Int(key, *v)
case int16:
return Int32(key, int32(v))
return Int(key, int32(v))
case *int16:
if v == nil {
return nilField(key)
}
return Int32(key, int32(*v))
return Int(key, int32(*v))
case int8:
return Int32(key, int32(v))
return Int(key, int32(v))
case *int8:
if v == nil {
return nilField(key)
}
return Int32(key, int32(*v))
return Int(key, int32(*v))
case string:
return String(key, v)
case *string:
@ -332,33 +344,33 @@ func fieldForAny(key string, val interface{}) Field {
}
return Uint(key, *v)
case uint64:
return Uint64(key, v)
return Uint(key, v)
case *uint64:
if v == nil {
return nilField(key)
}
return Uint64(key, *v)
return Uint(key, *v)
case uint32:
return Uint32(key, v)
return Uint(key, v)
case *uint32:
if v == nil {
return nilField(key)
}
return Uint32(key, *v)
return Uint(key, *v)
case uint16:
return Uint32(key, uint32(v))
return Uint(key, uint32(v))
case *uint16:
if v == nil {
return nilField(key)
}
return Uint32(key, uint32(*v))
return Uint(key, uint32(*v))
case uint8:
return Uint32(key, uint32(v))
return Uint(key, uint32(v))
case *uint8:
if v == nil {
return nilField(key)
}
return Uint32(key, uint32(*v))
return Uint(key, uint32(*v))
case []byte:
if v == nil {
return nilField(key)

View File

@ -9,53 +9,70 @@ import (
// For best performance when passing a struct (or struct pointer),
// implement `logr.LogWriter` on the struct, otherwise reflection
// will be used to generate a string representation.
func Any(key string, val interface{}) Field {
func Any(key string, val any) Field {
return fieldForAny(key, val)
}
// Int64 constructs a field containing a key and Int64 value.
//
// Deprecated: Use [logr.Int] instead.
func Int64(key string, val int64) Field {
return Field{Key: key, Type: Int64Type, Integer: val}
}
// Int32 constructs a field containing a key and Int32 value.
//
// Deprecated: Use [logr.Int] instead.
func Int32(key string, val int32) Field {
return Field{Key: key, Type: Int32Type, Integer: int64(val)}
}
// Int constructs a field containing a key and Int value.
func Int(key string, val int) Field {
// Int constructs a field containing a key and int value.
func Int[T ~int | ~int8 | ~int16 | ~int32 | ~int64](key string, val T) Field {
return Field{Key: key, Type: IntType, Integer: int64(val)}
}
// Uint64 constructs a field containing a key and Uint64 value.
//
// Deprecated: Use [logr.Uint] instead.
func Uint64(key string, val uint64) Field {
return Field{Key: key, Type: Uint64Type, Integer: int64(val)}
}
// Uint32 constructs a field containing a key and Uint32 value.
//
// Deprecated: Use [logr.Uint] instead
func Uint32(key string, val uint32) Field {
return Field{Key: key, Type: Uint32Type, Integer: int64(val)}
}
// Uint constructs a field containing a key and Uint value.
func Uint(key string, val uint) Field {
// Uint constructs a field containing a key and uint value.
func Uint[T ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr](key string, val T) Field {
return Field{Key: key, Type: UintType, Integer: int64(val)}
}
// Float64 constructs a field containing a key and Float64 value.
//
// Deprecated: Use [logr.Float] instead
func Float64(key string, val float64) Field {
return Field{Key: key, Type: Float64Type, Float: val}
}
// Float32 constructs a field containing a key and Float32 value.
//
// Deprecated: Use [logr.Float] instead
func Float32(key string, val float32) Field {
return Field{Key: key, Type: Float32Type, Float: float64(val)}
}
// Float32 constructs a field containing a key and float value.
func Float[T ~float32 | ~float64](key string, val T) Field {
return Field{Key: key, Type: Float32Type, Float: float64(val)}
}
// String constructs a field containing a key and String value.
func String(key string, val string) Field {
return Field{Key: key, Type: StringType, String: val}
func String[T ~string | ~[]byte](key string, val T) Field {
return Field{Key: key, Type: StringType, String: string(val)}
}
// Stringer constructs a field containing a key and a `fmt.Stringer` value.
@ -75,7 +92,7 @@ func NamedErr(key string, err error) Field {
}
// Bool constructs a field containing a key and bool value.
func Bool(key string, val bool) Field {
func Bool[T ~bool](key string, val T) Field {
var b int64
if val {
b = 1
@ -100,11 +117,11 @@ func Millis(key string, val int64) Field {
}
// Array constructs a field containing a key and array value.
func Array(key string, val interface{}) Field {
func Array[S ~[]E, E any](key string, val S) Field {
return Field{Key: key, Type: ArrayType, Interface: val}
}
// Map constructs a field containing a key and map value.
func Map(key string, val interface{}) Field {
func Map[M ~map[K]V, K comparable, V any](key string, val M) Field {
return Field{Key: key, Type: MapType, Interface: val}
}

View File

@ -2,6 +2,7 @@ package logr
import (
"bytes"
"fmt"
"io"
"runtime"
"strconv"
@ -27,6 +28,31 @@ const (
TimestampMillisFormat = "Jan _2 15:04:05.000"
)
// LimitByteSlice discards the bytes from a slice that exceeds the limit
func LimitByteSlice(b []byte, limit int) []byte {
if limit > 0 && limit < len(b) {
lb := make([]byte, limit, limit+3)
copy(lb, b[:limit])
return append(lb, []byte("...")...)
}
return b
}
// LimitString discards the runes from a slice that exceeds the limit
func LimitString(b string, limit int) string {
return string(LimitByteSlice([]byte(b), limit))
}
type LimitedStringer struct {
fmt.Stringer
Limit int
}
func (ls *LimitedStringer) String() string {
return LimitString(ls.Stringer.String(), ls.Limit)
}
type Writer struct {
io.Writer
}

View File

@ -79,7 +79,7 @@ type gelfRecord struct {
func (gr gelfRecord) MarshalJSONObject(enc *gojay.Encoder) {
enc.AddStringKey(GelfVersionKey, GelfVersion)
enc.AddStringKey(GelfHostKey, gr.getHostname())
enc.AddStringKey(GelfShortKey, gr.Msg())
enc.AddStringKey(GelfShortKey, gr.safeMsg("-")) // Gelf requires a non-empty `short_message`
if gr.level.Stacktrace {
frames := gr.StackFrames()
@ -131,6 +131,15 @@ func (gr gelfRecord) IsNil() bool {
return gr.LogRec == nil
}
// safeMsg returns the log record Message field or an alternate string when msg is empty.
func (gr gelfRecord) safeMsg(alt string) string {
s := gr.Msg()
if s == "" {
s = alt
}
return s
}
func (g *Gelf) getHostname() string {
if g.Hostname != "" {
return g.Hostname

View File

@ -42,6 +42,7 @@ func New(opts ...Option) (*Logr, error) {
shutdownTimeout: DefaultShutdownTimeout,
flushTimeout: DefaultFlushTimeout,
maxPooledBuffer: DefaultMaxPooledBuffer,
maxFieldLen: DefaultMaxFieldLength,
}
lgr := &Logr{options: options}
@ -246,6 +247,29 @@ func (lgr *Logr) SetMetricsCollector(collector MetricsCollector, updateFreqMilli
// this function either blocks or the log record is dropped, depending on
// the result of calling `OnQueueFull`.
func (lgr *Logr) enqueue(rec *LogRec) {
// check if a limit has been configured
if limit := lgr.options.maxFieldLen; limit > 0 {
// we limit the message
rec.msg = LimitString(rec.msg, limit)
// then we range over fields to apply the limit
for i := range rec.fields {
switch rec.fields[i].Type {
case StringType:
rec.fields[i].String = LimitString(rec.fields[i].String, limit)
case StringerType:
if v, ok := rec.fields[i].Interface.(fmt.Stringer); ok {
rec.fields[i].Interface = &LimitedStringer{
Stringer: v,
Limit: limit,
}
}
default:
// no limits for other field types
}
}
}
select {
case lgr.in <- rec:
default:

View File

@ -23,6 +23,7 @@ type options struct {
metricsCollector MetricsCollector
metricsUpdateFreqMillis int64
stackFilter map[string]struct{}
maxFieldLen int
}
// MaxQueueSize is the maximum number of log records that can be queued.
@ -76,7 +77,7 @@ func OnTargetQueueFull(f func(target Target, rec *LogRec, maxQueueSize int) bool
}
// OnExit, when not nil, is called when a FatalXXX style log API is called.
// When nil, then the default behavior is to cleanly shut down this Logr and
// When nil, the default behavior is to cleanly shut down this Logr and
// call `os.Exit(code)`.
func OnExit(f func(code int)) Option {
return func(l *Logr) error {
@ -86,7 +87,7 @@ func OnExit(f func(code int)) Option {
}
// OnPanic, when not nil, is called when a PanicXXX style log API is called.
// When nil, then the default behavior is to cleanly shut down this Logr and
// When nil, the default behavior is to cleanly shut down this Logr and
// call `panic(err)`.
func OnPanic(f func(err interface{})) Option {
return func(l *Logr) error {
@ -190,3 +191,16 @@ func StackFilter(pkg ...string) Option {
return nil
}
}
// MaxFieldLen is the maximum number of characters for a field.
// If exceeded, remaining bytes will be discarded.
// Defaults to DefaultMaxFieldLength.
func MaxFieldLen(size int) Option {
return func(l *Logr) error {
if size < 0 {
return errors.New("size cannot be less than zero")
}
l.options.maxFieldLen = size
return nil
}
}

View File

@ -1,3 +1,4 @@
//go:build !windows && !nacl && !plan9
// +build !windows,!nacl,!plan9
package targets
@ -19,7 +20,7 @@ type Syslog struct {
// SyslogOptions provides parameters for dialing a syslog daemon.
type SyslogOptions struct {
IP string `json:"ip,omitempty"` // deprecated
IP string `json:"ip,omitempty"` // deprecated (use Host instead)
Host string `json:"host"`
Port int `json:"port"`
TLS bool `json:"tls"`
@ -55,6 +56,11 @@ func (s *Syslog) Init() error {
network := "tcp"
var config *tls.Config
host := s.params.Host
if host == "" {
host = s.params.IP
}
if s.params.TLS {
network = "tcp+tls"
config = &tls.Config{InsecureSkipVerify: s.params.Insecure}
@ -66,7 +72,7 @@ func (s *Syslog) Init() error {
config.RootCAs = pool
}
}
raddr := fmt.Sprintf("%s:%d", s.params.IP, s.params.Port)
raddr := fmt.Sprintf("%s:%d", host, s.params.Port)
if raddr == ":0" {
// If no IP:port provided then connect to local syslog.
raddr = ""

View File

@ -1,897 +0,0 @@
Mattermost Licensing
SOFTWARE LICENSING
You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE
- See MIT-COMPILED-LICENSE.md included in compiled versions for details
You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways:
1. Under the Free Software Foundations GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or
2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com
You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/default.json, i18n/, model/,
plugin/ and all subdirectories thereof) under the Apache License v2.0.
We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not
link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and
(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of
a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license.
MATTERMOST TRADEMARK GUIDELINES
Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organizations Trademark
Standards of Use at https://mattermost.com/trademark-standards-of-use/. For trademark approval or any questions
you have about using these trademarks, please email trademark@mattermost.com
------------------------------------------------------------------------------------------------------------------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
The software is released under the terms of the GNU Affero General Public
License, version 3.
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<http://www.gnu.org/licenses/>.

File diff suppressed because it is too large Load Diff

View File

@ -1,203 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
import (
"net/http"
"strings"
)
const (
ChannelNotifyDefault = "default"
ChannelNotifyAll = "all"
ChannelNotifyMention = "mention"
ChannelNotifyNone = "none"
ChannelMarkUnreadAll = "all"
ChannelMarkUnreadMention = "mention"
IgnoreChannelMentionsDefault = "default"
IgnoreChannelMentionsOff = "off"
IgnoreChannelMentionsOn = "on"
IgnoreChannelMentionsNotifyProp = "ignore_channel_mentions"
)
type ChannelUnread struct {
TeamId string `json:"team_id"`
ChannelId string `json:"channel_id"`
MsgCount int64 `json:"msg_count"`
MentionCount int64 `json:"mention_count"`
MentionCountRoot int64 `json:"mention_count_root"`
MsgCountRoot int64 `json:"msg_count_root"`
NotifyProps StringMap `json:"-"`
}
type ChannelUnreadAt struct {
TeamId string `json:"team_id"`
UserId string `json:"user_id"`
ChannelId string `json:"channel_id"`
MsgCount int64 `json:"msg_count"`
MentionCount int64 `json:"mention_count"`
MentionCountRoot int64 `json:"mention_count_root"`
MsgCountRoot int64 `json:"msg_count_root"`
LastViewedAt int64 `json:"last_viewed_at"`
NotifyProps StringMap `json:"-"`
}
type ChannelMember struct {
ChannelId string `json:"channel_id"`
UserId string `json:"user_id"`
Roles string `json:"roles"`
LastViewedAt int64 `json:"last_viewed_at"`
MsgCount int64 `json:"msg_count"`
MentionCount int64 `json:"mention_count"`
MentionCountRoot int64 `json:"mention_count_root"`
MsgCountRoot int64 `json:"msg_count_root"`
NotifyProps StringMap `json:"notify_props"`
LastUpdateAt int64 `json:"last_update_at"`
SchemeGuest bool `json:"scheme_guest"`
SchemeUser bool `json:"scheme_user"`
SchemeAdmin bool `json:"scheme_admin"`
ExplicitRoles string `json:"explicit_roles"`
}
// The following are some GraphQL methods necessary to return the
// data in float64 type. The spec doesn't support 64 bit integers,
// so we have to pass the data in float64. The _ at the end is
// a hack to keep the attribute name same in GraphQL schema.
func (o *ChannelMember) LastViewedAt_() float64 {
return float64(o.LastViewedAt)
}
func (o *ChannelMember) MsgCount_() float64 {
return float64(o.MsgCount)
}
func (o *ChannelMember) MentionCount_() float64 {
return float64(o.MentionCount)
}
func (o *ChannelMember) MentionCountRoot_() float64 {
return float64(o.MentionCountRoot)
}
func (o *ChannelMember) LastUpdateAt_() float64 {
return float64(o.LastUpdateAt)
}
// ChannelMemberWithTeamData contains ChannelMember appended with extra team information
// as well.
type ChannelMemberWithTeamData struct {
ChannelMember
TeamDisplayName string `json:"team_display_name"`
TeamName string `json:"team_name"`
TeamUpdateAt int64 `json:"team_update_at"`
}
type ChannelMembers []ChannelMember
type ChannelMembersWithTeamData []ChannelMemberWithTeamData
type ChannelMemberForExport struct {
ChannelMember
ChannelName string
Username string
}
func (o *ChannelMember) IsValid() *AppError {
if !IsValidId(o.ChannelId) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest)
}
if !IsValidId(o.UserId) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
}
notifyLevel := o.NotifyProps[DesktopNotifyProp]
if len(notifyLevel) > 20 || !IsChannelNotifyLevelValid(notifyLevel) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.notify_level.app_error", nil, "notify_level="+notifyLevel, http.StatusBadRequest)
}
markUnreadLevel := o.NotifyProps[MarkUnreadNotifyProp]
if len(markUnreadLevel) > 20 || !IsChannelMarkUnreadLevelValid(markUnreadLevel) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.unread_level.app_error", nil, "mark_unread_level="+markUnreadLevel, http.StatusBadRequest)
}
if pushLevel, ok := o.NotifyProps[PushNotifyProp]; ok {
if len(pushLevel) > 20 || !IsChannelNotifyLevelValid(pushLevel) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.push_level.app_error", nil, "push_notification_level="+pushLevel, http.StatusBadRequest)
}
}
if sendEmail, ok := o.NotifyProps[EmailNotifyProp]; ok {
if len(sendEmail) > 20 || !IsSendEmailValid(sendEmail) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.email_value.app_error", nil, "push_notification_level="+sendEmail, http.StatusBadRequest)
}
}
if ignoreChannelMentions, ok := o.NotifyProps[IgnoreChannelMentionsNotifyProp]; ok {
if len(ignoreChannelMentions) > 40 || !IsIgnoreChannelMentionsValid(ignoreChannelMentions) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.ignore_channel_mentions_value.app_error", nil, "ignore_channel_mentions="+ignoreChannelMentions, http.StatusBadRequest)
}
}
if len(o.Roles) > UserRolesMaxLength {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.roles_limit.app_error",
map[string]interface{}{"Limit": UserRolesMaxLength}, "", http.StatusBadRequest)
}
return nil
}
func (o *ChannelMember) PreSave() {
o.LastUpdateAt = GetMillis()
}
func (o *ChannelMember) PreUpdate() {
o.LastUpdateAt = GetMillis()
}
func (o *ChannelMember) GetRoles() []string {
return strings.Fields(o.Roles)
}
func (o *ChannelMember) SetChannelMuted(muted bool) {
if o.IsChannelMuted() {
o.NotifyProps[MarkUnreadNotifyProp] = ChannelMarkUnreadAll
} else {
o.NotifyProps[MarkUnreadNotifyProp] = ChannelMarkUnreadMention
}
}
func (o *ChannelMember) IsChannelMuted() bool {
return o.NotifyProps[MarkUnreadNotifyProp] == ChannelMarkUnreadMention
}
func IsChannelNotifyLevelValid(notifyLevel string) bool {
return notifyLevel == ChannelNotifyDefault ||
notifyLevel == ChannelNotifyAll ||
notifyLevel == ChannelNotifyMention ||
notifyLevel == ChannelNotifyNone
}
func IsChannelMarkUnreadLevelValid(markUnreadLevel string) bool {
return markUnreadLevel == ChannelMarkUnreadAll || markUnreadLevel == ChannelMarkUnreadMention
}
func IsSendEmailValid(sendEmail string) bool {
return sendEmail == ChannelNotifyDefault || sendEmail == "true" || sendEmail == "false"
}
func IsIgnoreChannelMentionsValid(ignoreChannelMentions string) bool {
return ignoreChannelMentions == IgnoreChannelMentionsOn || ignoreChannelMentions == IgnoreChannelMentionsOff || ignoreChannelMentions == IgnoreChannelMentionsDefault
}
func GetDefaultChannelNotifyProps() StringMap {
return StringMap{
DesktopNotifyProp: ChannelNotifyDefault,
MarkUnreadNotifyProp: ChannelMarkUnreadAll,
PushNotifyProp: ChannelNotifyDefault,
EmailNotifyProp: ChannelNotifyDefault,
IgnoreChannelMentionsNotifyProp: IgnoreChannelMentionsDefault,
}
}

View File

@ -1,183 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
import "strings"
const (
EventTypeFailedPayment = "failed-payment"
EventTypeFailedPaymentNoCard = "failed-payment-no-card"
EventTypeSendAdminWelcomeEmail = "send-admin-welcome-email"
EventTypeTrialWillEnd = "trial-will-end"
EventTypeTrialEnded = "trial-ended"
)
var MockCWS string
type BillingScheme string
const (
BillingSchemePerSeat = BillingScheme("per_seat")
BillingSchemeFlatFee = BillingScheme("flat_fee")
BillingSchemeSalesServe = BillingScheme("sales_serve")
)
type RecurringInterval string
const (
RecurringIntervalYearly = RecurringInterval("year")
RecurringIntervalMonthly = RecurringInterval("month")
)
type SubscriptionFamily string
const (
SubscriptionFamilyCloud = SubscriptionFamily("cloud")
SubscriptionFamilyOnPrem = SubscriptionFamily("on-prem")
)
// Product model represents a product on the cloud system.
type Product struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
PricePerSeat float64 `json:"price_per_seat"`
AddOns []*AddOn `json:"add_ons"`
SKU string `json:"sku"`
PriceID string `json:"price_id"`
Family SubscriptionFamily `json:"product_family"`
RecurringInterval RecurringInterval `json:"recurring_interval"`
BillingScheme BillingScheme `json:"billing_scheme"`
}
// AddOn represents an addon to a product.
type AddOn struct {
ID string `json:"id"`
Name string `json:"name"`
DisplayName string `json:"display_name"`
PricePerSeat float64 `json:"price_per_seat"`
}
// StripeSetupIntent represents the SetupIntent model from Stripe for updating payment methods.
type StripeSetupIntent struct {
ID string `json:"id"`
ClientSecret string `json:"client_secret"`
}
// ConfirmPaymentMethodRequest contains the fields for the customer payment update API.
type ConfirmPaymentMethodRequest struct {
StripeSetupIntentID string `json:"stripe_setup_intent_id"`
SubscriptionID string `json:"subscription_id"`
}
// Customer model represents a customer on the system.
type CloudCustomer struct {
CloudCustomerInfo
ID string `json:"id"`
CreatorID string `json:"creator_id"`
CreateAt int64 `json:"create_at"`
BillingAddress *Address `json:"billing_address"`
CompanyAddress *Address `json:"company_address"`
PaymentMethod *PaymentMethod `json:"payment_method"`
}
// CloudCustomerInfo represents editable info of a customer.
type CloudCustomerInfo struct {
Name string `json:"name"`
Email string `json:"email,omitempty"`
ContactFirstName string `json:"contact_first_name,omitempty"`
ContactLastName string `json:"contact_last_name,omitempty"`
NumEmployees int `json:"num_employees"`
}
// Address model represents a customer's address.
type Address struct {
City string `json:"city"`
Country string `json:"country"`
Line1 string `json:"line1"`
Line2 string `json:"line2"`
PostalCode string `json:"postal_code"`
State string `json:"state"`
}
// PaymentMethod represents methods of payment for a customer.
type PaymentMethod struct {
Type string `json:"type"`
LastFour string `json:"last_four"`
ExpMonth int `json:"exp_month"`
ExpYear int `json:"exp_year"`
CardBrand string `json:"card_brand"`
Name string `json:"name"`
}
// Subscription model represents a subscription on the system.
type Subscription struct {
ID string `json:"id"`
CustomerID string `json:"customer_id"`
ProductID string `json:"product_id"`
AddOns []string `json:"add_ons"`
StartAt int64 `json:"start_at"`
EndAt int64 `json:"end_at"`
CreateAt int64 `json:"create_at"`
Seats int `json:"seats"`
Status string `json:"status"`
DNS string `json:"dns"`
IsPaidTier string `json:"is_paid_tier"`
LastInvoice *Invoice `json:"last_invoice"`
IsFreeTrial string `json:"is_free_trial"`
TrialEndAt int64 `json:"trial_end_at"`
}
// GetWorkSpaceNameFromDNS returns the work space name. For example from test.mattermost.cloud.com, it returns test
func (s *Subscription) GetWorkSpaceNameFromDNS() string {
return strings.Split(s.DNS, ".")[0]
}
// Invoice model represents a cloud invoice
type Invoice struct {
ID string `json:"id"`
Number string `json:"number"`
CreateAt int64 `json:"create_at"`
Total int64 `json:"total"`
Tax int64 `json:"tax"`
Status string `json:"status"`
Description string `json:"description"`
PeriodStart int64 `json:"period_start"`
PeriodEnd int64 `json:"period_end"`
SubscriptionID string `json:"subscription_id"`
Items []*InvoiceLineItem `json:"line_items"`
CurrentProductName string `json:"current_product_name"`
}
// InvoiceLineItem model represents a cloud invoice lineitem tied to an invoice.
type InvoiceLineItem struct {
PriceID string `json:"price_id"`
Total int64 `json:"total"`
Quantity float64 `json:"quantity"`
PricePerUnit int64 `json:"price_per_unit"`
Description string `json:"description"`
Type string `json:"type"`
Metadata map[string]interface{} `json:"metadata"`
}
type CWSWebhookPayload struct {
Event string `json:"event"`
FailedPayment *FailedPayment `json:"failed_payment"`
CloudWorkspaceOwner *CloudWorkspaceOwner `json:"cloud_workspace_owner"`
SubscriptionTrialEndUnixTimeStamp int64 `json:"trial_end_time_stamp"`
}
type FailedPayment struct {
CardBrand string `json:"card_brand"`
LastFour string `json:"last_four"`
FailureMessage string `json:"failure_message"`
}
// CloudWorkspaceOwner is part of the CWS Webhook payload that contains information about the user that created the workspace from the CWS
type CloudWorkspaceOwner struct {
UserName string `json:"username"`
}
type SubscriptionChange struct {
ProductID string `json:"product_id"`
}

View File

@ -1,12 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
type ClusterInfo struct {
Id string `json:"id"`
Version string `json:"version"`
ConfigHash string `json:"config_hash"`
IPAddress string `json:"ipaddress"`
Hostname string `json:"hostname"`
}

File diff suppressed because one or more lines are too long

View File

@ -1,134 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
import (
"reflect"
"strconv"
)
type FeatureFlags struct {
// Exists only for unit and manual testing.
// When set to a value, will be returned by the ping endpoint.
TestFeature string
// Exists only for testing bool functionality. Boolean feature flags interpret "on" or "true" as true and
// all other values as false.
TestBoolFeature bool
// Toggle on and off support for Collapsed Threads
CollapsedThreads bool
// Enable the remote cluster service for shared channels.
EnableRemoteClusterService bool
// AppsEnabled toggles the Apps framework functionalities both in server and client side
AppsEnabled bool
// AppBarEnabled toggles the App Bar component on client side
AppBarEnabled bool
// Feature flags to control plugin versions
PluginPlaybooks string `plugin_id:"playbooks"`
PluginApps string `plugin_id:"com.mattermost.apps"`
PluginFocalboard string `plugin_id:"focalboard"`
PermalinkPreviews bool
// Enable Calls plugin support in the mobile app
CallsMobile bool
// A dash separated list for feature flags to turn on for Boards
BoardsFeatureFlags string
// Enable Create First Channel
GuidedChannelCreation bool
// A/B test for whether radio buttons or toggle button is more effective in in-screen invite to team modal ("none", "toggle")
InviteToTeam string
CustomGroups bool
// Enable DataRetention for Boards
BoardsDataRetention bool
NormalizeLdapDNs bool
EnableInactivityCheckJob bool
// Enable special onboarding flow for first admin
UseCaseOnboarding bool
// Enable GraphQL feature
GraphQL bool
InsightsEnabled bool
CommandPalette bool
}
func (f *FeatureFlags) SetDefaults() {
f.TestFeature = "off"
f.TestBoolFeature = false
f.CollapsedThreads = true
f.EnableRemoteClusterService = false
f.AppsEnabled = true
f.AppBarEnabled = false
f.PluginApps = ""
f.PluginFocalboard = ""
f.PermalinkPreviews = true
f.CallsMobile = false
f.BoardsFeatureFlags = ""
f.GuidedChannelCreation = false
f.InviteToTeam = "none"
f.CustomGroups = true
f.BoardsDataRetention = false
f.NormalizeLdapDNs = false
f.EnableInactivityCheckJob = true
f.UseCaseOnboarding = true
f.GraphQL = false
f.InsightsEnabled = false
f.CommandPalette = false
}
func (f *FeatureFlags) Plugins() map[string]string {
rFFVal := reflect.ValueOf(f).Elem()
rFFType := reflect.TypeOf(f).Elem()
pluginVersions := make(map[string]string)
for i := 0; i < rFFVal.NumField(); i++ {
rFieldVal := rFFVal.Field(i)
rFieldType := rFFType.Field(i)
pluginId, hasPluginId := rFieldType.Tag.Lookup("plugin_id")
if !hasPluginId {
continue
}
pluginVersions[pluginId] = rFieldVal.String()
}
return pluginVersions
}
// ToMap returns the feature flags as a map[string]string
// Supports boolean and string feature flags.
func (f *FeatureFlags) ToMap() map[string]string {
refStructVal := reflect.ValueOf(*f)
refStructType := reflect.TypeOf(*f)
ret := make(map[string]string)
for i := 0; i < refStructVal.NumField(); i++ {
refFieldVal := refStructVal.Field(i)
if !refFieldVal.IsValid() {
continue
}
refFieldType := refStructType.Field(i)
switch refFieldType.Type.Kind() {
case reflect.Bool:
ret[refFieldType.Name] = strconv.FormatBool(refFieldVal.Bool())
default:
ret[refFieldType.Name] = refFieldVal.String()
}
}
return ret
}

View File

@ -1,76 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
import (
"net/http"
"time"
)
const (
TimeRangeToday string = "today"
TimeRange7Day string = "7_day"
TimeRange28Day string = "28_day"
)
type InsightsOpts struct {
StartUnixMilli int64
Page int
PerPage int
}
type InsightsListData struct {
HasNext bool `json:"has_next"`
}
type InsightsData struct {
Rank int `json:"rank"`
}
type TopReactionList struct {
InsightsListData
Items []*TopReaction `json:"items"`
}
type TopReaction struct {
InsightsData
EmojiName string `json:"emoji_name"`
Count int64 `json:"count"`
}
// GetStartUnixMilliForTimeRange gets the unix start time in milliseconds from the given time range.
// Time range can be one of: "1_day", "7_day", or "28_day".
func GetStartUnixMilliForTimeRange(timeRange string) (int64, *AppError) {
now := time.Now()
_, offset := now.Zone()
switch timeRange {
case TimeRangeToday:
return GetStartOfDayMillis(now, offset), nil
case TimeRange7Day:
return GetStartOfDayMillis(now.Add(time.Hour*time.Duration(-168)), offset), nil
case TimeRange28Day:
return GetStartOfDayMillis(now.Add(time.Hour*time.Duration(-672)), offset), nil
}
return GetStartOfDayMillis(now, offset), NewAppError("Insights.IsValidRequest", "model.insights.time_range.app_error", nil, "", http.StatusBadRequest)
}
// GetTopReactionListWithRankAndPagination adds a rank to each item in the given list of TopReaction and checks if there is
// another page that can be fetched based on the given limit and offset. The given list of TopReaction is assumed to be
// sorted by Count. Returns a TopReactionList.
func GetTopReactionListWithRankAndPagination(reactions []*TopReaction, limit int, offset int) *TopReactionList {
// Add pagination support
var hasNext bool
if (limit != 0) && (len(reactions) == limit+1) {
hasNext = true
reactions = reactions[:len(reactions)-1]
}
// Assign rank to each reaction
for i, reaction := range reactions {
reaction.Rank = offset + i + 1
}
return &TopReactionList{InsightsListData: InsightsListData{HasNext: hasNext}, Items: reactions}
}

View File

@ -1,84 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
import (
"strings"
)
const (
PushNotifyApple = "apple"
PushNotifyAndroid = "android"
PushNotifyAppleReactNative = "apple_rn"
PushNotifyAndroidReactNative = "android_rn"
PushTypeMessage = "message"
PushTypeClear = "clear"
PushTypeUpdateBadge = "update_badge"
PushTypeSession = "session"
PushTypeTest = "test"
PushMessageV2 = "v2"
PushSoundNone = "none"
// The category is set to handle a set of interactive Actions
// with the push notifications
CategoryCanReply = "CAN_REPLY"
MHPNS = "https://push.mattermost.com"
PushSendPrepare = "Prepared to send"
PushSendSuccess = "Successful"
PushNotSent = "Not Sent due to preferences"
PushReceived = "Received by device"
)
type PushNotificationAck struct {
Id string `json:"id"`
ClientReceivedAt int64 `json:"received_at"`
ClientPlatform string `json:"platform"`
NotificationType string `json:"type"`
PostId string `json:"post_id,omitempty"`
IsIdLoaded bool `json:"is_id_loaded"`
}
type PushNotification struct {
AckId string `json:"ack_id"`
Platform string `json:"platform"`
ServerId string `json:"server_id"`
DeviceId string `json:"device_id"`
PostId string `json:"post_id"`
Category string `json:"category,omitempty"`
Sound string `json:"sound,omitempty"`
Message string `json:"message,omitempty"`
Badge int `json:"badge,omitempty"`
ContentAvailable int `json:"cont_ava,omitempty"`
TeamId string `json:"team_id,omitempty"`
ChannelId string `json:"channel_id,omitempty"`
RootId string `json:"root_id,omitempty"`
ChannelName string `json:"channel_name,omitempty"`
Type string `json:"type,omitempty"`
SenderId string `json:"sender_id,omitempty"`
SenderName string `json:"sender_name,omitempty"`
OverrideUsername string `json:"override_username,omitempty"`
OverrideIconURL string `json:"override_icon_url,omitempty"`
FromWebhook string `json:"from_webhook,omitempty"`
Version string `json:"version,omitempty"`
IsCRTEnabled bool `json:"is_crt_enabled"`
IsIdLoaded bool `json:"is_id_loaded"`
}
func (pn *PushNotification) DeepCopy() *PushNotification {
copy := *pn
return &copy
}
func (pn *PushNotification) SetDeviceIdAndPlatform(deviceId string) {
index := strings.Index(deviceId, ":")
if index > -1 {
pn.Platform = deviceId[:index]
pn.DeviceId = deviceId[index+1:]
}
}

View File

@ -1,363 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
import (
"encoding/json"
"fmt"
"io"
)
const (
WebsocketEventTyping = "typing"
WebsocketEventPosted = "posted"
WebsocketEventPostEdited = "post_edited"
WebsocketEventPostDeleted = "post_deleted"
WebsocketEventPostUnread = "post_unread"
WebsocketEventChannelConverted = "channel_converted"
WebsocketEventChannelCreated = "channel_created"
WebsocketEventChannelDeleted = "channel_deleted"
WebsocketEventChannelRestored = "channel_restored"
WebsocketEventChannelUpdated = "channel_updated"
WebsocketEventChannelMemberUpdated = "channel_member_updated"
WebsocketEventChannelSchemeUpdated = "channel_scheme_updated"
WebsocketEventDirectAdded = "direct_added"
WebsocketEventGroupAdded = "group_added"
WebsocketEventNewUser = "new_user"
WebsocketEventAddedToTeam = "added_to_team"
WebsocketEventLeaveTeam = "leave_team"
WebsocketEventUpdateTeam = "update_team"
WebsocketEventDeleteTeam = "delete_team"
WebsocketEventRestoreTeam = "restore_team"
WebsocketEventUpdateTeamScheme = "update_team_scheme"
WebsocketEventUserAdded = "user_added"
WebsocketEventUserUpdated = "user_updated"
WebsocketEventUserRoleUpdated = "user_role_updated"
WebsocketEventMemberroleUpdated = "memberrole_updated"
WebsocketEventUserRemoved = "user_removed"
WebsocketEventPreferenceChanged = "preference_changed"
WebsocketEventPreferencesChanged = "preferences_changed"
WebsocketEventPreferencesDeleted = "preferences_deleted"
WebsocketEventEphemeralMessage = "ephemeral_message"
WebsocketEventStatusChange = "status_change"
WebsocketEventHello = "hello"
WebsocketAuthenticationChallenge = "authentication_challenge"
WebsocketEventReactionAdded = "reaction_added"
WebsocketEventReactionRemoved = "reaction_removed"
WebsocketEventResponse = "response"
WebsocketEventEmojiAdded = "emoji_added"
WebsocketEventChannelViewed = "channel_viewed"
WebsocketEventPluginStatusesChanged = "plugin_statuses_changed"
WebsocketEventPluginEnabled = "plugin_enabled"
WebsocketEventPluginDisabled = "plugin_disabled"
WebsocketEventRoleUpdated = "role_updated"
WebsocketEventLicenseChanged = "license_changed"
WebsocketEventConfigChanged = "config_changed"
WebsocketEventOpenDialog = "open_dialog"
WebsocketEventGuestsDeactivated = "guests_deactivated"
WebsocketEventUserActivationStatusChange = "user_activation_status_change"
WebsocketEventReceivedGroup = "received_group"
WebsocketEventReceivedGroupAssociatedToTeam = "received_group_associated_to_team"
WebsocketEventReceivedGroupNotAssociatedToTeam = "received_group_not_associated_to_team"
WebsocketEventReceivedGroupAssociatedToChannel = "received_group_associated_to_channel"
WebsocketEventReceivedGroupNotAssociatedToChannel = "received_group_not_associated_to_channel"
WebsocketEventGroupMemberDelete = "group_member_deleted"
WebsocketEventGroupMemberAdd = "group_member_add"
WebsocketEventSidebarCategoryCreated = "sidebar_category_created"
WebsocketEventSidebarCategoryUpdated = "sidebar_category_updated"
WebsocketEventSidebarCategoryDeleted = "sidebar_category_deleted"
WebsocketEventSidebarCategoryOrderUpdated = "sidebar_category_order_updated"
WebsocketWarnMetricStatusReceived = "warn_metric_status_received"
WebsocketWarnMetricStatusRemoved = "warn_metric_status_removed"
WebsocketEventCloudPaymentStatusUpdated = "cloud_payment_status_updated"
WebsocketEventThreadUpdated = "thread_updated"
WebsocketEventThreadFollowChanged = "thread_follow_changed"
WebsocketEventThreadReadChanged = "thread_read_changed"
WebsocketFirstAdminVisitMarketplaceStatusReceived = "first_admin_visit_marketplace_status_received"
)
type WebSocketMessage interface {
ToJSON() ([]byte, error)
IsValid() bool
EventType() string
}
type WebsocketBroadcast struct {
OmitUsers map[string]bool `json:"omit_users"` // broadcast is omitted for users listed here
UserId string `json:"user_id"` // broadcast only occurs for this user
ChannelId string `json:"channel_id"` // broadcast only occurs for users in this channel
TeamId string `json:"team_id"` // broadcast only occurs for users in this team
ContainsSanitizedData bool `json:"-"`
ContainsSensitiveData bool `json:"-"`
// ReliableClusterSend indicates whether or not the message should
// be sent through the cluster using the reliable, TCP backed channel.
ReliableClusterSend bool `json:"-"`
}
func (wb *WebsocketBroadcast) copy() *WebsocketBroadcast {
if wb == nil {
return nil
}
var c WebsocketBroadcast
if wb.OmitUsers != nil {
c.OmitUsers = make(map[string]bool, len(wb.OmitUsers))
for k, v := range wb.OmitUsers {
c.OmitUsers[k] = v
}
}
c.UserId = wb.UserId
c.ChannelId = wb.ChannelId
c.TeamId = wb.TeamId
c.ContainsSanitizedData = wb.ContainsSanitizedData
c.ContainsSensitiveData = wb.ContainsSensitiveData
return &c
}
type precomputedWebSocketEventJSON struct {
Event json.RawMessage
Data json.RawMessage
Broadcast json.RawMessage
}
func (p *precomputedWebSocketEventJSON) copy() *precomputedWebSocketEventJSON {
if p == nil {
return nil
}
var c precomputedWebSocketEventJSON
if p.Event != nil {
c.Event = make([]byte, len(p.Event))
copy(c.Event, p.Event)
}
if p.Data != nil {
c.Data = make([]byte, len(p.Data))
copy(c.Data, p.Data)
}
if p.Broadcast != nil {
c.Broadcast = make([]byte, len(p.Broadcast))
copy(c.Broadcast, p.Broadcast)
}
return &c
}
// webSocketEventJSON mirrors WebSocketEvent to make some of its unexported fields serializable
type webSocketEventJSON struct {
Event string `json:"event"`
Data map[string]interface{} `json:"data"`
Broadcast *WebsocketBroadcast `json:"broadcast"`
Sequence int64 `json:"seq"`
}
type WebSocketEvent struct {
event string
data map[string]interface{}
broadcast *WebsocketBroadcast
sequence int64
precomputedJSON *precomputedWebSocketEventJSON
}
// PrecomputeJSON precomputes and stores the serialized JSON for all fields other than Sequence.
// This makes ToJSON much more efficient when sending the same event to multiple connections.
func (ev *WebSocketEvent) PrecomputeJSON() *WebSocketEvent {
copy := ev.Copy()
event, _ := json.Marshal(copy.event)
data, _ := json.Marshal(copy.data)
broadcast, _ := json.Marshal(copy.broadcast)
copy.precomputedJSON = &precomputedWebSocketEventJSON{
Event: json.RawMessage(event),
Data: json.RawMessage(data),
Broadcast: json.RawMessage(broadcast),
}
return copy
}
func (ev *WebSocketEvent) Add(key string, value interface{}) {
ev.data[key] = value
}
func NewWebSocketEvent(event, teamId, channelId, userId string, omitUsers map[string]bool) *WebSocketEvent {
return &WebSocketEvent{
event: event,
data: make(map[string]interface{}),
broadcast: &WebsocketBroadcast{
TeamId: teamId,
ChannelId: channelId,
UserId: userId,
OmitUsers: omitUsers},
}
}
func (ev *WebSocketEvent) Copy() *WebSocketEvent {
copy := &WebSocketEvent{
event: ev.event,
data: ev.data,
broadcast: ev.broadcast,
sequence: ev.sequence,
precomputedJSON: ev.precomputedJSON,
}
return copy
}
func (ev *WebSocketEvent) DeepCopy() *WebSocketEvent {
var dataCopy map[string]interface{}
if ev.data != nil {
dataCopy = make(map[string]interface{}, len(ev.data))
for k, v := range ev.data {
dataCopy[k] = v
}
}
copy := &WebSocketEvent{
event: ev.event,
data: dataCopy,
broadcast: ev.broadcast.copy(),
sequence: ev.sequence,
precomputedJSON: ev.precomputedJSON.copy(),
}
return copy
}
func (ev *WebSocketEvent) GetData() map[string]interface{} {
return ev.data
}
func (ev *WebSocketEvent) GetBroadcast() *WebsocketBroadcast {
return ev.broadcast
}
func (ev *WebSocketEvent) GetSequence() int64 {
return ev.sequence
}
func (ev *WebSocketEvent) SetEvent(event string) *WebSocketEvent {
copy := ev.Copy()
copy.event = event
return copy
}
func (ev *WebSocketEvent) SetData(data map[string]interface{}) *WebSocketEvent {
copy := ev.Copy()
copy.data = data
return copy
}
func (ev *WebSocketEvent) SetBroadcast(broadcast *WebsocketBroadcast) *WebSocketEvent {
copy := ev.Copy()
copy.broadcast = broadcast
return copy
}
func (ev *WebSocketEvent) SetSequence(seq int64) *WebSocketEvent {
copy := ev.Copy()
copy.sequence = seq
return copy
}
func (ev *WebSocketEvent) IsValid() bool {
return ev.event != ""
}
func (ev *WebSocketEvent) EventType() string {
return ev.event
}
func (ev *WebSocketEvent) ToJSON() ([]byte, error) {
if ev.precomputedJSON != nil {
return []byte(fmt.Sprintf(`{"event": %s, "data": %s, "broadcast": %s, "seq": %d}`, ev.precomputedJSON.Event, ev.precomputedJSON.Data, ev.precomputedJSON.Broadcast, ev.GetSequence())), nil
}
return json.Marshal(webSocketEventJSON{
ev.event,
ev.data,
ev.broadcast,
ev.sequence,
})
}
// Encode encodes the event to the given encoder.
func (ev *WebSocketEvent) Encode(enc *json.Encoder) error {
if ev.precomputedJSON != nil {
return enc.Encode(json.RawMessage(
fmt.Sprintf(`{"event": %s, "data": %s, "broadcast": %s, "seq": %d}`, ev.precomputedJSON.Event, ev.precomputedJSON.Data, ev.precomputedJSON.Broadcast, ev.sequence),
))
}
return enc.Encode(webSocketEventJSON{
ev.event,
ev.data,
ev.broadcast,
ev.sequence,
})
}
func WebSocketEventFromJSON(data io.Reader) (*WebSocketEvent, error) {
var ev WebSocketEvent
var o webSocketEventJSON
if err := json.NewDecoder(data).Decode(&o); err != nil {
return nil, err
}
ev.event = o.Event
if u, ok := o.Data["user"]; ok {
// We need to convert to and from JSON again
// because the user is in the form of a map[string]interface{}.
buf, err := json.Marshal(u)
if err != nil {
return nil, err
}
var user User
if err = json.Unmarshal(buf, &user); err != nil {
return nil, err
}
o.Data["user"] = &user
}
ev.data = o.Data
ev.broadcast = o.Broadcast
ev.sequence = o.Sequence
return &ev, nil
}
// WebSocketResponse represents a response received through the WebSocket
// for a request made to the server. This is available through the ResponseChannel
// channel in WebSocketClient.
type WebSocketResponse struct {
Status string `json:"status"` // The status of the response. For example: OK, FAIL.
SeqReply int64 `json:"seq_reply,omitempty"` // A counter which is incremented for every response sent.
Data map[string]interface{} `json:"data,omitempty"` // The data contained in the response.
Error *AppError `json:"error,omitempty"` // A field that is set if any error has occurred.
}
func (m *WebSocketResponse) Add(key string, value interface{}) {
m.Data[key] = value
}
func NewWebSocketResponse(status string, seqReply int64, data map[string]interface{}) *WebSocketResponse {
return &WebSocketResponse{Status: status, SeqReply: seqReply, Data: data}
}
func NewWebSocketError(seqReply int64, err *AppError) *WebSocketResponse {
return &WebSocketResponse{Status: StatusFail, SeqReply: seqReply, Error: err}
}
func (m *WebSocketResponse) IsValid() bool {
return m.Status != ""
}
func (m *WebSocketResponse) EventType() string {
return WebsocketEventResponse
}
func (m *WebSocketResponse) ToJSON() ([]byte, error) {
return json.Marshal(m)
}
func WebSocketResponseFromJSON(data io.Reader) (*WebSocketResponse, error) {
var o *WebSocketResponse
return o, json.NewDecoder(data).Decode(&o)
}

View File

@ -1,84 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package filestore
import (
"io"
"time"
"github.com/pkg/errors"
)
const (
driverS3 = "amazons3"
driverLocal = "local"
)
type ReadCloseSeeker interface {
io.ReadCloser
io.Seeker
}
type FileBackend interface {
TestConnection() error
Reader(path string) (ReadCloseSeeker, error)
ReadFile(path string) ([]byte, error)
FileExists(path string) (bool, error)
FileSize(path string) (int64, error)
CopyFile(oldPath, newPath string) error
MoveFile(oldPath, newPath string) error
WriteFile(fr io.Reader, path string) (int64, error)
AppendFile(fr io.Reader, path string) (int64, error)
RemoveFile(path string) error
FileModTime(path string) (time.Time, error)
ListDirectory(path string) ([]string, error)
ListDirectoryRecursively(path string) ([]string, error)
RemoveDirectory(path string) error
}
type FileBackendSettings struct {
DriverName string
Directory string
AmazonS3AccessKeyId string
AmazonS3SecretAccessKey string
AmazonS3Bucket string
AmazonS3PathPrefix string
AmazonS3Region string
AmazonS3Endpoint string
AmazonS3SSL bool
AmazonS3SignV2 bool
AmazonS3SSE bool
AmazonS3Trace bool
}
func (settings *FileBackendSettings) CheckMandatoryS3Fields() error {
if settings.AmazonS3Bucket == "" {
return errors.New("missing s3 bucket settings")
}
// if S3 endpoint is not set call the set defaults to set that
if settings.AmazonS3Endpoint == "" {
settings.AmazonS3Endpoint = "s3.amazonaws.com"
}
return nil
}
func NewFileBackend(settings FileBackendSettings) (FileBackend, error) {
switch settings.DriverName {
case driverS3:
backend, err := NewS3FileBackend(settings)
if err != nil {
return nil, errors.Wrap(err, "unable to connect to the s3 backend")
}
return backend, nil
case driverLocal:
return &LocalFileBackend{
directory: settings.Directory,
}, nil
}
return nil, errors.New("no valid filestorage driver found")
}

View File

@ -1,240 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package filestore
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/pkg/errors"
"github.com/mattermost/mattermost-server/v6/shared/mlog"
)
const (
TestFilePath = "/testfile"
)
type LocalFileBackend struct {
directory string
}
// copyFile will copy a file from src path to dst path.
// Overwrites any existing files at dst.
// Permissions are copied from file at src to the new file at dst.
func copyFile(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
return
}
defer in.Close()
if err = os.MkdirAll(filepath.Dir(dst), os.ModePerm); err != nil {
return
}
out, err := os.Create(dst)
if err != nil {
return
}
defer func() {
if e := out.Close(); e != nil {
err = e
}
}()
_, err = io.Copy(out, in)
if err != nil {
return
}
err = out.Sync()
if err != nil {
return
}
stat, err := os.Stat(src)
if err != nil {
return
}
err = os.Chmod(dst, stat.Mode())
if err != nil {
return
}
return
}
func (b *LocalFileBackend) TestConnection() error {
f := bytes.NewReader([]byte("testingwrite"))
if _, err := writeFileLocally(f, filepath.Join(b.directory, TestFilePath)); err != nil {
return errors.Wrap(err, "unable to write to the local filesystem storage")
}
os.Remove(filepath.Join(b.directory, TestFilePath))
mlog.Debug("Able to write files to local storage.")
return nil
}
func (b *LocalFileBackend) Reader(path string) (ReadCloseSeeker, error) {
f, err := os.Open(filepath.Join(b.directory, path))
if err != nil {
return nil, errors.Wrapf(err, "unable to open file %s", path)
}
return f, nil
}
func (b *LocalFileBackend) ReadFile(path string) ([]byte, error) {
f, err := ioutil.ReadFile(filepath.Join(b.directory, path))
if err != nil {
return nil, errors.Wrapf(err, "unable to read file %s", path)
}
return f, nil
}
func (b *LocalFileBackend) FileExists(path string) (bool, error) {
_, err := os.Stat(filepath.Join(b.directory, path))
if os.IsNotExist(err) {
return false, nil
}
if err != nil {
return false, errors.Wrapf(err, "unable to know if file %s exists", path)
}
return true, nil
}
func (b *LocalFileBackend) FileSize(path string) (int64, error) {
info, err := os.Stat(filepath.Join(b.directory, path))
if err != nil {
return 0, errors.Wrapf(err, "unable to get file size for %s", path)
}
return info.Size(), nil
}
func (b *LocalFileBackend) FileModTime(path string) (time.Time, error) {
info, err := os.Stat(filepath.Join(b.directory, path))
if err != nil {
return time.Time{}, errors.Wrapf(err, "unable to get modification time for file %s", path)
}
return info.ModTime(), nil
}
func (b *LocalFileBackend) CopyFile(oldPath, newPath string) error {
if err := copyFile(filepath.Join(b.directory, oldPath), filepath.Join(b.directory, newPath)); err != nil {
return errors.Wrapf(err, "unable to copy file from %s to %s", oldPath, newPath)
}
return nil
}
func (b *LocalFileBackend) MoveFile(oldPath, newPath string) error {
if err := os.MkdirAll(filepath.Dir(filepath.Join(b.directory, newPath)), 0750); err != nil {
return errors.Wrapf(err, "unable to create the new destination directory %s", filepath.Dir(newPath))
}
if err := os.Rename(filepath.Join(b.directory, oldPath), filepath.Join(b.directory, newPath)); err != nil {
return errors.Wrapf(err, "unable to move the file to %s to the destination directory", newPath)
}
return nil
}
func (b *LocalFileBackend) WriteFile(fr io.Reader, path string) (int64, error) {
return writeFileLocally(fr, filepath.Join(b.directory, path))
}
func writeFileLocally(fr io.Reader, path string) (int64, error) {
if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil {
directory, _ := filepath.Abs(filepath.Dir(path))
return 0, errors.Wrapf(err, "unable to create the directory %s for the file %s", directory, path)
}
fw, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return 0, errors.Wrapf(err, "unable to open the file %s to write the data", path)
}
defer fw.Close()
written, err := io.Copy(fw, fr)
if err != nil {
return written, errors.Wrapf(err, "unable write the data in the file %s", path)
}
return written, nil
}
func (b *LocalFileBackend) AppendFile(fr io.Reader, path string) (int64, error) {
fp := filepath.Join(b.directory, path)
if _, err := os.Stat(fp); err != nil {
return 0, errors.Wrapf(err, "unable to find the file %s to append the data", path)
}
fw, err := os.OpenFile(fp, os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
return 0, errors.Wrapf(err, "unable to open the file %s to append the data", path)
}
defer fw.Close()
written, err := io.Copy(fw, fr)
if err != nil {
return written, errors.Wrapf(err, "unable append the data in the file %s", path)
}
return written, nil
}
func (b *LocalFileBackend) RemoveFile(path string) error {
if err := os.Remove(filepath.Join(b.directory, path)); err != nil {
return errors.Wrapf(err, "unable to remove the file %s", path)
}
return nil
}
// basePath: path to get to the file but won't be added to the end result
// path: basePath+path current directory we are looking at
// maxDepth: parameter to prevent infinite recursion, once this is reached we won't look any further
func appendRecursively(basePath, path string, maxDepth int) ([]string, error) {
results := []string{}
dirEntries, err := os.ReadDir(filepath.Join(basePath, path))
if err != nil {
if os.IsNotExist(err) {
return results, nil
}
return results, errors.Wrapf(err, "unable to list the directory %s", path)
}
for _, dirEntry := range dirEntries {
entryName := dirEntry.Name()
entryPath := filepath.Join(path, entryName)
if entryName == "." || entryName == ".." || entryPath == path {
continue
}
if dirEntry.IsDir() {
if maxDepth <= 0 {
mlog.Warn("Max Depth reached", mlog.String("path", entryPath))
results = append(results, entryPath)
continue // we'll ignore it if max depth is reached.
}
nestedResults, err := appendRecursively(basePath, entryPath, maxDepth-1)
if err != nil {
return results, err
}
results = append(results, nestedResults...)
} else {
results = append(results, entryPath)
}
}
return results, nil
}
func (b *LocalFileBackend) ListDirectory(path string) ([]string, error) {
return appendRecursively(b.directory, path, 0)
}
func (b *LocalFileBackend) ListDirectoryRecursively(path string) ([]string, error) {
return appendRecursively(b.directory, path, 10)
}
func (b *LocalFileBackend) RemoveDirectory(path string) error {
if err := os.RemoveAll(filepath.Join(b.directory, path)); err != nil {
return errors.Wrapf(err, "unable to remove the directory %s", path)
}
return nil
}

View File

@ -1,56 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package filestore
import (
"context"
"net/http"
"github.com/minio/minio-go/v7/pkg/credentials"
)
// customTransport is used to point the request to a different server.
// This is helpful in situations where a different service is handling AWS S3 requests
// from multiple Mattermost applications, and the Mattermost service itself does not
// have any S3 credentials.
type customTransport struct {
base http.RoundTripper
host string
scheme string
client http.Client
}
// RoundTrip implements the http.Roundtripper interface.
func (t *customTransport) RoundTrip(req *http.Request) (*http.Response, error) {
// Roundtrippers should not modify the original request.
newReq := req.Clone(context.Background())
*newReq.URL = *req.URL
req.URL.Scheme = t.scheme
req.URL.Host = t.host
return t.client.Do(req)
}
// customProvider is a dummy credentials provider for the minio client to work
// without actually providing credentials. This is needed with a custom transport
// in cases where the minio client does not actually have credentials with itself,
// rather needs responses from another entity.
//
// It satisfies the credentials.Provider interface.
type customProvider struct {
isSignV2 bool
}
// Retrieve just returns empty credentials.
func (cp customProvider) Retrieve() (credentials.Value, error) {
sign := credentials.SignatureV4
if cp.isSignV2 {
sign = credentials.SignatureV2
}
return credentials.Value{
SignerType: sign,
}, nil
}
// IsExpired always returns false.
func (cp customProvider) IsExpired() bool { return false }

View File

@ -1,463 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package filestore
import (
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
s3 "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/pkg/errors"
"github.com/mattermost/mattermost-server/v6/shared/mlog"
)
// S3FileBackend contains all necessary information to communicate with
// an AWS S3 compatible API backend.
type S3FileBackend struct {
endpoint string
accessKey string
secretKey string
secure bool
signV2 bool
region string
bucket string
pathPrefix string
encrypt bool
trace bool
client *s3.Client
}
type S3FileBackendAuthError struct {
DetailedError string
}
// S3FileBackendNoBucketError is returned when testing a connection and no S3 bucket is found
type S3FileBackendNoBucketError struct{}
const (
// This is not exported by minio. See: https://github.com/minio/minio-go/issues/1339
bucketNotFound = "NoSuchBucket"
)
var (
imageExtensions = map[string]bool{".jpg": true, ".jpeg": true, ".gif": true, ".bmp": true, ".png": true, ".tiff": true, "tif": true}
imageMimeTypes = map[string]string{".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", ".bmp": "image/bmp", ".png": "image/png", ".tiff": "image/tiff", ".tif": "image/tif"}
)
func isFileExtImage(ext string) bool {
ext = strings.ToLower(ext)
return imageExtensions[ext]
}
func getImageMimeType(ext string) string {
ext = strings.ToLower(ext)
if imageMimeTypes[ext] == "" {
return "image"
}
return imageMimeTypes[ext]
}
func (s *S3FileBackendAuthError) Error() string {
return s.DetailedError
}
func (s *S3FileBackendNoBucketError) Error() string {
return "no such bucket"
}
// NewS3FileBackend returns an instance of an S3FileBackend.
func NewS3FileBackend(settings FileBackendSettings) (*S3FileBackend, error) {
backend := &S3FileBackend{
endpoint: settings.AmazonS3Endpoint,
accessKey: settings.AmazonS3AccessKeyId,
secretKey: settings.AmazonS3SecretAccessKey,
secure: settings.AmazonS3SSL,
signV2: settings.AmazonS3SignV2,
region: settings.AmazonS3Region,
bucket: settings.AmazonS3Bucket,
pathPrefix: settings.AmazonS3PathPrefix,
encrypt: settings.AmazonS3SSE,
trace: settings.AmazonS3Trace,
}
cli, err := backend.s3New()
if err != nil {
return nil, err
}
backend.client = cli
return backend, nil
}
// Similar to s3.New() but allows initialization of signature v2 or signature v4 client.
// If signV2 input is false, function always returns signature v4.
//
// Additionally this function also takes a user defined region, if set
// disables automatic region lookup.
func (b *S3FileBackend) s3New() (*s3.Client, error) {
var creds *credentials.Credentials
isCloud := os.Getenv("MM_CLOUD_FILESTORE_BIFROST") != ""
if isCloud {
creds = credentials.New(customProvider{isSignV2: b.signV2})
} else if b.accessKey == "" && b.secretKey == "" {
creds = credentials.NewIAM("")
} else if b.signV2 {
creds = credentials.NewStatic(b.accessKey, b.secretKey, "", credentials.SignatureV2)
} else {
creds = credentials.NewStatic(b.accessKey, b.secretKey, "", credentials.SignatureV4)
}
opts := s3.Options{
Creds: creds,
Secure: b.secure,
Region: b.region,
}
// If this is a cloud installation, we override the default transport.
if isCloud {
tr, err := s3.DefaultTransport(b.secure)
if err != nil {
return nil, err
}
scheme := "http"
if b.secure {
scheme = "https"
}
opts.Transport = &customTransport{
base: tr,
host: b.endpoint,
scheme: scheme,
}
}
s3Clnt, err := s3.New(b.endpoint, &opts)
if err != nil {
return nil, err
}
if b.trace {
s3Clnt.TraceOn(os.Stdout)
}
return s3Clnt, nil
}
func (b *S3FileBackend) TestConnection() error {
exists := true
var err error
// If a path prefix is present, we attempt to test the bucket by listing objects under the path
// and just checking the first response. This is because the BucketExists call is only at a bucket level
// and sometimes the user might only be allowed access to the specified path prefix.
if b.pathPrefix != "" {
obj := <-b.client.ListObjects(context.Background(), b.bucket, s3.ListObjectsOptions{Prefix: b.pathPrefix})
if obj.Err != nil {
typedErr := s3.ToErrorResponse(obj.Err)
if typedErr.Code != bucketNotFound {
return &S3FileBackendAuthError{DetailedError: "unable to list objects in the S3 bucket"}
}
exists = false
}
} else {
exists, err = b.client.BucketExists(context.Background(), b.bucket)
if err != nil {
return &S3FileBackendAuthError{DetailedError: "unable to check if the S3 bucket exists"}
}
}
if !exists {
return &S3FileBackendNoBucketError{}
}
mlog.Debug("Connection to S3 or minio is good. Bucket exists.")
return nil
}
func (b *S3FileBackend) MakeBucket() error {
err := b.client.MakeBucket(context.Background(), b.bucket, s3.MakeBucketOptions{Region: b.region})
if err != nil {
return errors.Wrap(err, "unable to create the s3 bucket")
}
return nil
}
// Caller must close the first return value
func (b *S3FileBackend) Reader(path string) (ReadCloseSeeker, error) {
path = filepath.Join(b.pathPrefix, path)
minioObject, err := b.client.GetObject(context.Background(), b.bucket, path, s3.GetObjectOptions{})
if err != nil {
return nil, errors.Wrapf(err, "unable to open file %s", path)
}
return minioObject, nil
}
func (b *S3FileBackend) ReadFile(path string) ([]byte, error) {
path = filepath.Join(b.pathPrefix, path)
minioObject, err := b.client.GetObject(context.Background(), b.bucket, path, s3.GetObjectOptions{})
if err != nil {
return nil, errors.Wrapf(err, "unable to open file %s", path)
}
defer minioObject.Close()
f, err := ioutil.ReadAll(minioObject)
if err != nil {
return nil, errors.Wrapf(err, "unable to read file %s", path)
}
return f, nil
}
func (b *S3FileBackend) FileExists(path string) (bool, error) {
path = filepath.Join(b.pathPrefix, path)
_, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{})
if err == nil {
return true, nil
}
var s3Err s3.ErrorResponse
if errors.As(err, &s3Err); s3Err.Code == "NoSuchKey" {
return false, nil
}
return false, errors.Wrapf(err, "unable to know if file %s exists", path)
}
func (b *S3FileBackend) FileSize(path string) (int64, error) {
path = filepath.Join(b.pathPrefix, path)
info, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{})
if err != nil {
return 0, errors.Wrapf(err, "unable to get file size for %s", path)
}
return info.Size, nil
}
func (b *S3FileBackend) FileModTime(path string) (time.Time, error) {
path = filepath.Join(b.pathPrefix, path)
info, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{})
if err != nil {
return time.Time{}, errors.Wrapf(err, "unable to get modification time for file %s", path)
}
return info.LastModified, nil
}
func (b *S3FileBackend) CopyFile(oldPath, newPath string) error {
oldPath = filepath.Join(b.pathPrefix, oldPath)
newPath = filepath.Join(b.pathPrefix, newPath)
srcOpts := s3.CopySrcOptions{
Bucket: b.bucket,
Object: oldPath,
}
if b.encrypt {
srcOpts.Encryption = encrypt.NewSSE()
}
dstOpts := s3.CopyDestOptions{
Bucket: b.bucket,
Object: newPath,
}
if b.encrypt {
dstOpts.Encryption = encrypt.NewSSE()
}
if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {
return errors.Wrapf(err, "unable to copy file from %s to %s", oldPath, newPath)
}
return nil
}
func (b *S3FileBackend) MoveFile(oldPath, newPath string) error {
oldPath = filepath.Join(b.pathPrefix, oldPath)
newPath = filepath.Join(b.pathPrefix, newPath)
srcOpts := s3.CopySrcOptions{
Bucket: b.bucket,
Object: oldPath,
}
if b.encrypt {
srcOpts.Encryption = encrypt.NewSSE()
}
dstOpts := s3.CopyDestOptions{
Bucket: b.bucket,
Object: newPath,
}
if b.encrypt {
dstOpts.Encryption = encrypt.NewSSE()
}
if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {
return errors.Wrapf(err, "unable to copy the file to %s to the new destination", newPath)
}
if err := b.client.RemoveObject(context.Background(), b.bucket, oldPath, s3.RemoveObjectOptions{}); err != nil {
return errors.Wrapf(err, "unable to remove the file old file %s", oldPath)
}
return nil
}
func (b *S3FileBackend) WriteFile(fr io.Reader, path string) (int64, error) {
var contentType string
path = filepath.Join(b.pathPrefix, path)
if ext := filepath.Ext(path); isFileExtImage(ext) {
contentType = getImageMimeType(ext)
} else {
contentType = "binary/octet-stream"
}
options := s3PutOptions(b.encrypt, contentType)
info, err := b.client.PutObject(context.Background(), b.bucket, path, fr, -1, options)
if err != nil {
return info.Size, errors.Wrapf(err, "unable write the data in the file %s", path)
}
return info.Size, nil
}
func (b *S3FileBackend) AppendFile(fr io.Reader, path string) (int64, error) {
fp := filepath.Join(b.pathPrefix, path)
if _, err := b.client.StatObject(context.Background(), b.bucket, fp, s3.StatObjectOptions{}); err != nil {
return 0, errors.Wrapf(err, "unable to find the file %s to append the data", path)
}
var contentType string
if ext := filepath.Ext(fp); isFileExtImage(ext) {
contentType = getImageMimeType(ext)
} else {
contentType = "binary/octet-stream"
}
options := s3PutOptions(b.encrypt, contentType)
sse := options.ServerSideEncryption
partName := fp + ".part"
info, err := b.client.PutObject(context.Background(), b.bucket, partName, fr, -1, options)
defer b.client.RemoveObject(context.Background(), b.bucket, partName, s3.RemoveObjectOptions{})
if info.Size > 0 {
src1Opts := s3.CopySrcOptions{
Bucket: b.bucket,
Object: fp,
}
src2Opts := s3.CopySrcOptions{
Bucket: b.bucket,
Object: partName,
}
dstOpts := s3.CopyDestOptions{
Bucket: b.bucket,
Object: fp,
Encryption: sse,
}
_, err = b.client.ComposeObject(context.Background(), dstOpts, src1Opts, src2Opts)
if err != nil {
return 0, errors.Wrapf(err, "unable append the data in the file %s", path)
}
return info.Size, nil
}
return 0, errors.Wrapf(err, "unable append the data in the file %s", path)
}
func (b *S3FileBackend) RemoveFile(path string) error {
path = filepath.Join(b.pathPrefix, path)
if err := b.client.RemoveObject(context.Background(), b.bucket, path, s3.RemoveObjectOptions{}); err != nil {
return errors.Wrapf(err, "unable to remove the file %s", path)
}
return nil
}
func getPathsFromObjectInfos(in <-chan s3.ObjectInfo) <-chan s3.ObjectInfo {
out := make(chan s3.ObjectInfo, 1)
go func() {
defer close(out)
for {
info, done := <-in
if !done {
break
}
out <- info
}
}()
return out
}
func (b *S3FileBackend) listDirectory(path string, recursion bool) ([]string, error) {
path = filepath.Join(b.pathPrefix, path)
if !strings.HasSuffix(path, "/") && path != "" {
// s3Clnt returns only the path itself when "/" is not present
// appending "/" to make it consistent across all filestores
path = path + "/"
}
opts := s3.ListObjectsOptions{
Prefix: path,
Recursive: recursion,
}
var paths []string
for object := range b.client.ListObjects(context.Background(), b.bucket, opts) {
if object.Err != nil {
return nil, errors.Wrapf(object.Err, "unable to list the directory %s", path)
}
// We strip the path prefix that gets applied,
// so that it remains transparent to the application.
object.Key = strings.TrimPrefix(object.Key, b.pathPrefix)
trimmed := strings.Trim(object.Key, "/")
if trimmed != "" {
paths = append(paths, trimmed)
}
}
return paths, nil
}
func (b *S3FileBackend) ListDirectory(path string) ([]string, error) {
return b.listDirectory(path, false)
}
func (b *S3FileBackend) ListDirectoryRecursively(path string) ([]string, error) {
return b.listDirectory(path, true)
}
func (b *S3FileBackend) RemoveDirectory(path string) error {
opts := s3.ListObjectsOptions{
Prefix: filepath.Join(b.pathPrefix, path),
Recursive: true,
}
list := b.client.ListObjects(context.Background(), b.bucket, opts)
objectsCh := b.client.RemoveObjects(context.Background(), b.bucket, getPathsFromObjectInfos(list), s3.RemoveObjectsOptions{})
for err := range objectsCh {
if err.Err != nil {
return errors.Wrapf(err.Err, "unable to remove the directory %s", path)
}
}
return nil
}
func s3PutOptions(encrypted bool, contentType string) s3.PutObjectOptions {
options := s3.PutObjectOptions{}
if encrypted {
options.ServerSideEncryption = encrypt.NewSSE()
}
options.ContentType = contentType
// We set the part size to the minimum allowed value of 5MBs
// to avoid an excessive allocation in minio.PutObject implementation.
options.PartSize = 1024 * 1024 * 5
return options
}

View File

@ -1,78 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package markdown
// Inspect traverses the markdown tree in depth-first order. If f returns true, Inspect invokes f
// recursively for each child of the block or inline, followed by a call of f(nil).
func Inspect(markdown string, f func(interface{}) bool) {
document, referenceDefinitions := Parse(markdown)
InspectBlock(document, func(block Block) bool {
if !f(block) {
return false
}
switch v := block.(type) {
case *Paragraph:
for _, inline := range MergeInlineText(v.ParseInlines(referenceDefinitions)) {
InspectInline(inline, func(inline Inline) bool {
return f(inline)
})
}
}
return true
})
}
// InspectBlock traverses the blocks in depth-first order, starting with block. If f returns true,
// InspectBlock invokes f recursively for each child of the block, followed by a call of f(nil).
func InspectBlock(block Block, f func(Block) bool) {
if !f(block) {
return
}
switch v := block.(type) {
case *Document:
for _, child := range v.Children {
InspectBlock(child, f)
}
case *List:
for _, child := range v.Children {
InspectBlock(child, f)
}
case *ListItem:
for _, child := range v.Children {
InspectBlock(child, f)
}
case *BlockQuote:
for _, child := range v.Children {
InspectBlock(child, f)
}
}
f(nil)
}
// InspectInline traverses the blocks in depth-first order, starting with block. If f returns true,
// InspectInline invokes f recursively for each child of the block, followed by a call of f(nil).
func InspectInline(inline Inline, f func(Inline) bool) {
if !f(inline) {
return
}
switch v := inline.(type) {
case *InlineImage:
for _, child := range v.Children {
InspectInline(child, f)
}
case *InlineLink:
for _, child := range v.Children {
InspectInline(child, f)
}
case *ReferenceImage:
for _, child := range v.Children {
InspectInline(child, f)
}
case *ReferenceLink:
for _, child := range v.Children {
InspectInline(child, f)
}
}
f(nil)
}

View File

@ -1,23 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
import (
"context"
)
// GraphQLLogger is used to log panics that occur during query execution.
type GraphQLLogger struct {
logger *Logger
}
func NewGraphQLLogger(logger *Logger) *GraphQLLogger {
return &GraphQLLogger{logger: logger}
}
// LogPanic satisfies the graphql/log.Logger interface.
// It converts the panic into an error.
func (l *GraphQLLogger) LogPanic(_ context.Context, value interface{}) {
l.logger.Error("Error while executing GraphQL query", Any("error", value))
}

View File

@ -1,79 +0,0 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mlog
import (
"bytes"
"io"
"os"
"sync"
"github.com/mattermost/logr/v2"
"github.com/mattermost/logr/v2/formatters"
"github.com/mattermost/logr/v2/targets"
)
// AddWriterTarget adds a simple io.Writer target to an existing Logger.
// The `io.Writer` can be a buffer which is useful for testing.
// When adding a buffer to collect logs make sure to use `mlog.Buffer` which is
// a thread safe version of `bytes.Buffer`.
func AddWriterTarget(logger *Logger, w io.Writer, useJSON bool, levels ...Level) error {
filter := logr.NewCustomFilter(levels...)
var formatter logr.Formatter
if useJSON {
formatter = &formatters.JSON{EnableCaller: true}
} else {
formatter = &formatters.Plain{EnableCaller: true}
}
target := targets.NewWriterTarget(w)
return logger.log.Logr().AddTarget(target, "_testWriter", filter, formatter, 1000)
}
// CreateConsoleTestLogger creates a logger for unit tests. Log records are output to `os.Stdout`.
// Logs can also be mirrored to the optional `io.Writer`.
func CreateConsoleTestLogger(useJSON bool, level Level) *Logger {
logger, _ := NewLogger()
filter := logr.StdFilter{
Lvl: level,
Stacktrace: LvlPanic,
}
var formatter logr.Formatter
if useJSON {
formatter = &formatters.JSON{EnableCaller: true}
} else {
formatter = &formatters.Plain{EnableCaller: true}
}
target := targets.NewWriterTarget(os.Stdout)
if err := logger.log.Logr().AddTarget(target, "_testcon", filter, formatter, 1000); err != nil {
panic(err)
}
return logger
}
// Buffer provides a thread-safe buffer useful for logging to memory in unit tests.
type Buffer struct {
buf bytes.Buffer
mux sync.Mutex
}
func (b *Buffer) Read(p []byte) (n int, err error) {
b.mux.Lock()
defer b.mux.Unlock()
return b.buf.Read(p)
}
func (b *Buffer) Write(p []byte) (n int, err error) {
b.mux.Lock()
defer b.mux.Unlock()
return b.buf.Write(p)
}
func (b *Buffer) String() string {
b.mux.Lock()
defer b.mux.Unlock()
return b.buf.String()
}

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -24,12 +24,12 @@ type AccessData struct {
}
type AccessResponse struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
ExpiresIn int32 `json:"expires_in"`
Scope string `json:"scope"`
RefreshToken string `json:"refresh_token"`
IdToken string `json:"id_token"`
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
ExpiresInSeconds int32 `json:"expires_in"`
Scope string `json:"scope"`
RefreshToken string `json:"refresh_token"`
IdToken string `json:"id_token"`
}
// IsValid validates the AccessData and returns an error if it isn't configured
@ -59,7 +59,6 @@ func (ad *AccessData) IsValid() *AppError {
}
func (ad *AccessData) IsExpired() bool {
if ad.ExpiresAt <= 0 {
return false
}

View File

@ -10,7 +10,7 @@ import (
)
// AuditModelTypeConv converts key model types to something better suited for audit output.
func AuditModelTypeConv(val interface{}) (newVal interface{}, converted bool) {
func AuditModelTypeConv(val any) (newVal any, converted bool) {
if val == nil {
return nil, false
}

View File

@ -36,7 +36,6 @@ type AuthorizeRequest struct {
// IsValid validates the AuthData and returns an error if it isn't configured
// correctly.
func (ad *AuthData) IsValid() *AppError {
if !IsValidId(ad.ClientId) {
return NewAppError("AuthData.IsValid", "model.authorize.is_valid.client_id.app_error", nil, "", http.StatusBadRequest)
}
@ -75,7 +74,6 @@ func (ad *AuthData) IsValid() *AppError {
// IsValid validates the AuthorizeRequest and returns an error if it isn't configured
// correctly.
func (ar *AuthorizeRequest) IsValid() *AppError {
if !IsValidId(ar.ClientId) {
return NewAppError("AuthData.IsValid", "model.authorize.is_valid.client_id.app_error", nil, "", http.StatusBadRequest)
}

View File

@ -33,6 +33,20 @@ type Bot struct {
DeleteAt int64 `json:"delete_at"`
}
func (b *Bot) Auditable() map[string]interface{} {
return map[string]interface{}{
"user_id": b.UserId,
"username": b.Username,
"display_name": b.DisplayName,
"description": b.Description,
"owner_id": b.OwnerId,
"last_icon_update": b.LastIconUpdate,
"create_at": b.CreateAt,
"update_at": b.UpdateAt,
"delete_at": b.DeleteAt,
}
}
// BotPatch is a description of what fields to update on an existing bot.
type BotPatch struct {
Username *string `json:"username"`
@ -40,6 +54,14 @@ type BotPatch struct {
Description *string `json:"description"`
}
func (b *BotPatch) Auditable() map[string]interface{} {
return map[string]interface{}{
"username": b.Username,
"display_name": b.DisplayName,
"description": b.Description,
}
}
// BotGetOptions acts as a filter on bulk bot fetching queries.
type BotGetOptions struct {
OwnerId string
@ -53,14 +75,14 @@ type BotGetOptions struct {
type BotList []*Bot
// Trace describes the minimum information required to identify a bot for the purpose of logging.
func (b *Bot) Trace() map[string]interface{} {
return map[string]interface{}{"user_id": b.UserId}
func (b *Bot) Trace() map[string]any {
return map[string]any{"user_id": b.UserId}
}
// Clone returns a shallow copy of the bot.
func (b *Bot) Clone() *Bot {
copy := *b
return &copy
bCopy := *b
return &bCopy
}
// IsValidCreate validates bot for Create call. This skips validations of fields that are auto-filled on Create
@ -175,15 +197,14 @@ func BotFromUser(u *User) *Bot {
// Etag computes the etag for a list of bots.
func (l *BotList) Etag() string {
id := "0"
var t int64 = 0
var delta int64 = 0
var t int64
var delta int64
for _, v := range *l {
if v.UpdateAt > t {
t = v.UpdateAt
id = v.UserId
}
}
return Etag(id, t, delta, len(*l))
@ -191,8 +212,8 @@ func (l *BotList) Etag() string {
// MakeBotNotFoundError creates the error returned when a bot does not exist, or when the user isn't allowed to query the bot.
// The errors must the same in both cases to avoid leaking that a user is a bot.
func MakeBotNotFoundError(userId string) *AppError {
return NewAppError("SqlBotStore.Get", "store.sql_bot.get.missing.app_error", map[string]interface{}{"user_id": userId}, "", http.StatusNotFound)
func MakeBotNotFoundError(where, userId string) *AppError {
return NewAppError(where, "store.sql_bot.get.missing.app_error", map[string]any{"user_id": userId}, "", http.StatusNotFound)
}
func IsBotDMChannel(channel *Channel, botUserID string) bool {

View File

@ -8,6 +8,9 @@ package model
const ExportDataDir = "data"
type BulkExportOpts struct {
IncludeAttachments bool
CreateArchive bool
IncludeAttachments bool
IncludeProfilePictures bool
IncludeArchivedChannels bool
IncludeRolesAndSchemes bool
CreateArchive bool
}

View File

@ -4,7 +4,7 @@
package model
import (
"github.com/mattermost/mattermost-server/v6/shared/mlog"
"github.com/mattermost/mattermost/server/public/shared/mlog"
)
type BundleInfo struct {

View File

@ -7,9 +7,9 @@ import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"io"
"net/http"
"regexp"
"sort"
"strings"
"unicode/utf8"
@ -38,27 +38,52 @@ const (
)
type Channel struct {
Id string `json:"id"`
CreateAt int64 `json:"create_at"`
UpdateAt int64 `json:"update_at"`
DeleteAt int64 `json:"delete_at"`
TeamId string `json:"team_id"`
Type ChannelType `json:"type"`
DisplayName string `json:"display_name"`
Name string `json:"name"`
Header string `json:"header"`
Purpose string `json:"purpose"`
LastPostAt int64 `json:"last_post_at"`
TotalMsgCount int64 `json:"total_msg_count"`
ExtraUpdateAt int64 `json:"extra_update_at"`
CreatorId string `json:"creator_id"`
SchemeId *string `json:"scheme_id"`
Props map[string]interface{} `json:"props"`
GroupConstrained *bool `json:"group_constrained"`
Shared *bool `json:"shared"`
TotalMsgCountRoot int64 `json:"total_msg_count_root"`
PolicyID *string `json:"policy_id"`
LastRootPostAt int64 `json:"last_root_post_at"`
Id string `json:"id"`
CreateAt int64 `json:"create_at"`
UpdateAt int64 `json:"update_at"`
DeleteAt int64 `json:"delete_at"`
TeamId string `json:"team_id"`
Type ChannelType `json:"type"`
DisplayName string `json:"display_name"`
Name string `json:"name"`
Header string `json:"header"`
Purpose string `json:"purpose"`
LastPostAt int64 `json:"last_post_at"`
TotalMsgCount int64 `json:"total_msg_count"`
ExtraUpdateAt int64 `json:"extra_update_at"`
CreatorId string `json:"creator_id"`
SchemeId *string `json:"scheme_id"`
Props map[string]any `json:"props"`
GroupConstrained *bool `json:"group_constrained"`
Shared *bool `json:"shared"`
TotalMsgCountRoot int64 `json:"total_msg_count_root"`
PolicyID *string `json:"policy_id"`
LastRootPostAt int64 `json:"last_root_post_at"`
}
func (o *Channel) Auditable() map[string]interface{} {
return map[string]interface{}{
"create_at": o.CreateAt,
"creator_id": o.CreatorId,
"delete_at": o.DeleteAt,
"extra_group_at": o.ExtraUpdateAt,
"group_constrained": o.GroupConstrained,
"id": o.Id,
"last_post_at": o.LastPostAt,
"last_root_post_at": o.LastRootPostAt,
"policy_id": o.PolicyID,
"props": o.Props,
"scheme_id": o.SchemeId,
"shared": o.Shared,
"team_id": o.TeamId,
"total_msg_count_root": o.TotalMsgCountRoot,
"type": o.Type,
"update_at": o.UpdateAt,
}
}
func (o *Channel) LogClone() any {
return o.Auditable()
}
type ChannelWithTeamData struct {
@ -81,6 +106,14 @@ type ChannelPatch struct {
GroupConstrained *bool `json:"group_constrained"`
}
func (c *ChannelPatch) Auditable() map[string]interface{} {
return map[string]interface{}{
"header": c.Header,
"group_constrained": c.GroupConstrained,
"purpose": c.Purpose,
}
}
type ChannelForExport struct {
Channel
TeamName string
@ -112,6 +145,13 @@ type ChannelModerationPatch struct {
Roles *ChannelModeratedRolesPatch `json:"roles"`
}
func (c *ChannelModerationPatch) Auditable() map[string]interface{} {
return map[string]interface{}{
"name": c.Name,
"roles": c.Roles,
}
}
type ChannelModeratedRolesPatch struct {
Guests *bool `json:"guests"`
Members *bool `json:"members"`
@ -123,14 +163,14 @@ type ChannelModeratedRolesPatch struct {
// ExcludeDefaultChannels will exclude the configured default channels (ex 'town-square' and 'off-topic').
// IncludeDeleted will include channel records where DeleteAt != 0.
// ExcludeChannelNames will exclude channels from the results by name.
// IncludeSearchById will include searching matches against channel IDs in the results
// Paginate whether to paginate the results.
// Page page requested, if results are paginated.
// PerPage number of results per page, if paginated.
//
type ChannelSearchOpts struct {
NotAssociatedToGroup string
ExcludeDefaultChannels bool
IncludeDeleted bool
IncludeDeleted bool // If true, deleted channels will be included in the results.
Deleted bool
ExcludeChannelNames []string
TeamIds []string
@ -139,11 +179,12 @@ type ChannelSearchOpts struct {
PolicyID string
ExcludePolicyConstrained bool
IncludePolicyID bool
IncludeSearchById bool
Public bool
Private bool
Page *int
PerPage *int
LastDeleteAt int
LastDeleteAt int // When combined with IncludeDeleted, only channels deleted after this time will be returned.
LastUpdateAt int
}
@ -155,43 +196,20 @@ type ChannelMemberCountByGroup struct {
type ChannelOption func(channel *Channel)
var gmNameRegex = regexp.MustCompile("^[a-f0-9]{40}$")
func WithID(ID string) ChannelOption {
return func(channel *Channel) {
channel.Id = ID
}
}
// The following are some GraphQL methods necessary to return the
// data in float64 type. The spec doesn't support 64 bit integers,
// so we have to pass the data in float64. The _ at the end is
// a hack to keep the attribute name same in GraphQL schema.
func (o *Channel) CreateAt_() float64 {
return float64(o.CreateAt)
}
func (o *Channel) UpdateAt_() float64 {
return float64(o.UpdateAt)
}
func (o *Channel) DeleteAt_() float64 {
return float64(o.DeleteAt)
}
func (o *Channel) LastPostAt_() float64 {
return float64(o.LastPostAt)
}
func (o *Channel) TotalMsgCount_() float64 {
return float64(o.TotalMsgCount)
}
func (o *Channel) DeepCopy() *Channel {
copy := *o
if copy.SchemeId != nil {
copy.SchemeId = NewString(*o.SchemeId)
cCopy := *o
if cCopy.SchemeId != nil {
cCopy.SchemeId = NewString(*o.SchemeId)
}
return &copy
return &cCopy
}
func (o *Channel) Etag() string {
@ -235,9 +253,11 @@ func (o *Channel) IsValid() *AppError {
return NewAppError("Channel.IsValid", "model.channel.is_valid.creator_id.app_error", nil, "", http.StatusBadRequest)
}
userIds := strings.Split(o.Name, "__")
if o.Type != ChannelTypeDirect && len(userIds) == 2 && IsValidId(userIds[0]) && IsValidId(userIds[1]) {
return NewAppError("Channel.IsValid", "model.channel.is_valid.name.app_error", nil, "", http.StatusBadRequest)
if o.Type != ChannelTypeDirect && o.Type != ChannelTypeGroup {
userIds := strings.Split(o.Name, "__")
if ok := gmNameRegex.MatchString(o.Name); ok || (o.Type != ChannelTypeDirect && len(userIds) == 2 && IsValidId(userIds[0]) && IsValidId(userIds[1])) {
return NewAppError("Channel.IsValid", "model.channel.is_valid.name.app_error", nil, "", http.StatusBadRequest)
}
}
return nil
@ -250,8 +270,9 @@ func (o *Channel) PreSave() {
o.Name = SanitizeUnicode(o.Name)
o.DisplayName = SanitizeUnicode(o.DisplayName)
o.CreateAt = GetMillis()
if o.CreateAt == 0 {
o.CreateAt = GetMillis()
}
o.UpdateAt = o.CreateAt
o.ExtraUpdateAt = 0
}
@ -294,11 +315,11 @@ func (o *Channel) Patch(patch *ChannelPatch) {
func (o *Channel) MakeNonNil() {
if o.Props == nil {
o.Props = make(map[string]interface{})
o.Props = make(map[string]any)
}
}
func (o *Channel) AddProp(key string, value interface{}) {
func (o *Channel) AddProp(key string, value any) {
o.MakeNonNil()
o.Props[key] = value
@ -318,6 +339,9 @@ func (o *Channel) GetOtherUserIdForDM(userId string) string {
}
userIds := strings.Split(o.Name, "__")
if len(userIds) != 2 {
return ""
}
var otherUserId string
@ -332,24 +356,10 @@ func (o *Channel) GetOtherUserIdForDM(userId string) string {
return otherUserId
}
func (ChannelType) ImplementsGraphQLType(name string) bool {
return name == "ChannelType"
}
func (t ChannelType) MarshalJSON() ([]byte, error) {
return json.Marshal(string(t))
}
func (t *ChannelType) UnmarshalGraphQL(input interface{}) error {
chType, ok := input.(string)
if !ok {
return errors.New("wrong type")
}
*t = ChannelType(chType)
return nil
}
func GetDMNameFromIds(userId1, userId2 string) string {
if userId1 > userId2 {
return userId2 + "__" + userId1
@ -384,3 +394,10 @@ func GetGroupNameFromUserIds(userIds []string) string {
return hex.EncodeToString(h.Sum(nil))
}
type GroupMessageConversionRequestBody struct {
ChannelID string `json:"channel_id"`
TeamID string `json:"team_id"`
Name string `json:"name"`
DisplayName string `json:"display_name"`
}

View File

@ -0,0 +1,322 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
import (
"net/http"
)
type ChannelBookmarkType string
const (
ChannelBookmarkLink ChannelBookmarkType = "link"
ChannelBookmarkFile ChannelBookmarkType = "file"
BookmarkFileOwner = "bookmark"
MaxBookmarksPerChannel = 50
)
type ChannelBookmark struct {
Id string `json:"id"`
CreateAt int64 `json:"create_at"`
UpdateAt int64 `json:"update_at"`
DeleteAt int64 `json:"delete_at"`
ChannelId string `json:"channel_id"`
OwnerId string `json:"owner_id"`
FileId string `json:"file_id"`
DisplayName string `json:"display_name"`
SortOrder int64 `json:"sort_order"`
LinkUrl string `json:"link_url,omitempty"`
ImageUrl string `json:"image_url,omitempty"`
Emoji string `json:"emoji,omitempty"`
Type ChannelBookmarkType `json:"type"`
OriginalId string `json:"original_id,omitempty"`
ParentId string `json:"parent_id,omitempty"`
}
func (o *ChannelBookmark) Auditable() map[string]interface{} {
return map[string]interface{}{
"id": o.Id,
"create_at": o.CreateAt,
"update_at": o.UpdateAt,
"delete_at": o.DeleteAt,
"channel_id": o.ChannelId,
"owner_id": o.OwnerId,
"file_id": o.FileId,
"type": o.Type,
"original_id": o.OriginalId,
"parent_id": o.ParentId,
}
}
// Clone returns a shallow copy of the channel bookmark.
func (o *ChannelBookmark) Clone() *ChannelBookmark {
bCopy := *o
return &bCopy
}
// SetOriginal generates a new bookmark copying the data of the
// receiver bookmark, resets its timestamps and main ID, updates its
// OriginalId and sets the owner to the ID passed as a parameter
func (o *ChannelBookmark) SetOriginal(newOwnerId string) *ChannelBookmark {
bCopy := *o
bCopy.Id = ""
bCopy.CreateAt = 0
bCopy.DeleteAt = 0
bCopy.UpdateAt = 0
bCopy.OriginalId = o.Id
bCopy.OwnerId = newOwnerId
return &bCopy
}
func (o *ChannelBookmark) IsValid() *AppError {
if !IsValidId(o.Id) {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.id.app_error", nil, "", http.StatusBadRequest)
}
if o.CreateAt == 0 {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
}
if o.UpdateAt == 0 {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
}
if !IsValidId(o.ChannelId) {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest)
}
if !IsValidId(o.OwnerId) {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.owner_id.app_error", nil, "", http.StatusBadRequest)
}
if o.DisplayName == "" {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.display_name.app_error", nil, "", http.StatusBadRequest)
}
if !(o.Type == ChannelBookmarkFile || o.Type == ChannelBookmarkLink) {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.type.app_error", nil, "id="+o.Id, http.StatusBadRequest)
}
if o.Type == ChannelBookmarkLink && (o.LinkUrl == "" || !IsValidHTTPURL(o.LinkUrl)) {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.link_url.missing_or_invalid.app_error", nil, "id="+o.Id, http.StatusBadRequest)
}
if o.Type == ChannelBookmarkLink && o.ImageUrl != "" && !IsValidHTTPURL(o.ImageUrl) {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.image_url.app_error", nil, "id="+o.Id, http.StatusBadRequest)
}
if o.Type == ChannelBookmarkFile && (o.FileId == "" || !IsValidId(o.FileId)) {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.file_id.missing_or_invalid.app_error", nil, "id="+o.Id, http.StatusBadRequest)
}
if o.ImageUrl != "" && o.FileId != "" {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.link_file.app_error", nil, "id="+o.Id, http.StatusBadRequest)
}
if o.OriginalId != "" && !IsValidId(o.OriginalId) {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.original_id.app_error", nil, "", http.StatusBadRequest)
}
if o.ParentId != "" && !IsValidId(o.ParentId) {
return NewAppError("ChannelBookmark.IsValid", "model.channel_bookmark.is_valid.parent_id.app_error", nil, "", http.StatusBadRequest)
}
return nil
}
func (o *ChannelBookmark) PreSave() {
if o.Id == "" {
o.Id = NewId()
}
o.DisplayName = SanitizeUnicode(o.DisplayName)
if o.CreateAt == 0 {
o.CreateAt = GetMillis()
}
o.UpdateAt = o.CreateAt
}
func (o *ChannelBookmark) PreUpdate() {
o.UpdateAt = GetMillis()
o.DisplayName = SanitizeUnicode(o.DisplayName)
}
func (o *ChannelBookmark) ToBookmarkWithFileInfo(f *FileInfo) *ChannelBookmarkWithFileInfo {
bwf := ChannelBookmarkWithFileInfo{
ChannelBookmark: &ChannelBookmark{
Id: o.Id,
CreateAt: o.CreateAt,
UpdateAt: o.UpdateAt,
DeleteAt: o.DeleteAt,
ChannelId: o.ChannelId,
OwnerId: o.OwnerId,
FileId: o.FileId,
DisplayName: o.DisplayName,
SortOrder: o.SortOrder,
LinkUrl: o.LinkUrl,
ImageUrl: o.ImageUrl,
Emoji: o.Emoji,
Type: o.Type,
OriginalId: o.OriginalId,
ParentId: o.ParentId,
},
}
if f != nil && f.Id != "" {
bwf.FileInfo = f
}
return &bwf
}
type ChannelBookmarkPatch struct {
FileId *string `json:"file_id"`
DisplayName *string `json:"display_name"`
SortOrder *int64 `json:"sort_order"`
LinkUrl *string `json:"link_url,omitempty"`
ImageUrl *string `json:"image_url,omitempty"`
Emoji *string `json:"emoji,omitempty"`
}
func (o *ChannelBookmarkPatch) Auditable() map[string]interface{} {
return map[string]interface{}{
"file_id": o.FileId,
}
}
func (o *ChannelBookmark) Patch(patch *ChannelBookmarkPatch) {
if patch.FileId != nil {
o.FileId = *patch.FileId
}
if patch.DisplayName != nil {
o.DisplayName = *patch.DisplayName
}
if patch.SortOrder != nil {
o.SortOrder = *patch.SortOrder
}
if patch.LinkUrl != nil {
o.LinkUrl = *patch.LinkUrl
}
if patch.ImageUrl != nil {
o.ImageUrl = *patch.ImageUrl
}
if patch.Emoji != nil {
o.Emoji = *patch.Emoji
}
}
type ChannelBookmarkWithFileInfo struct {
*ChannelBookmark
FileInfo *FileInfo `json:"file,omitempty"`
}
func (o *ChannelBookmarkWithFileInfo) Auditable() map[string]interface{} {
a := o.ChannelBookmark.Auditable()
if o.FileInfo != nil {
a["file"] = o.FileInfo.Auditable()
}
return a
}
// Clone returns a shallow copy of the channel bookmark with file info.
func (o *ChannelBookmarkWithFileInfo) Clone() *ChannelBookmarkWithFileInfo {
bCopy := *o
return &bCopy
}
type ChannelWithBookmarks struct {
*Channel
Bookmarks []*ChannelBookmarkWithFileInfo `json:"bookmarks,omitempty"`
}
type ChannelWithTeamDataAndBookmarks struct {
*ChannelWithTeamData
Bookmarks []*ChannelBookmarkWithFileInfo `json:"bookmarks,omitempty"`
}
type UpdateChannelBookmarkResponse struct {
Updated *ChannelBookmarkWithFileInfo `json:"updated,omitempty"`
Deleted *ChannelBookmarkWithFileInfo `json:"deleted,omitempty"`
}
func (o *UpdateChannelBookmarkResponse) Auditable() map[string]any {
a := map[string]any{}
if o.Updated != nil {
a["updated"] = o.Updated.Auditable()
}
if o.Deleted != nil {
a["updated"] = o.Deleted.Auditable()
}
return a
}
type ChannelBookmarkAndFileInfo struct {
Id string
CreateAt int64
UpdateAt int64
DeleteAt int64
ChannelId string
OwnerId string
FileInfoId string
DisplayName string
SortOrder int64
LinkUrl string
ImageUrl string
Emoji string
Type ChannelBookmarkType
OriginalId string
ParentId string
FileId string
FileName string
Extension string
Size int64
MimeType string
Width int
Height int
HasPreviewImage bool
MiniPreview *[]byte
}
func (o *ChannelBookmarkAndFileInfo) ToChannelBookmarkWithFileInfo() *ChannelBookmarkWithFileInfo {
bwf := &ChannelBookmarkWithFileInfo{
ChannelBookmark: &ChannelBookmark{
Id: o.Id,
CreateAt: o.CreateAt,
UpdateAt: o.UpdateAt,
DeleteAt: o.DeleteAt,
ChannelId: o.ChannelId,
OwnerId: o.OwnerId,
FileId: o.FileInfoId,
DisplayName: o.DisplayName,
SortOrder: o.SortOrder,
LinkUrl: o.LinkUrl,
ImageUrl: o.ImageUrl,
Emoji: o.Emoji,
Type: o.Type,
OriginalId: o.OriginalId,
ParentId: o.ParentId,
},
}
if o.FileInfoId != "" && o.FileId != "" {
miniPreview := o.MiniPreview
if len(*miniPreview) == 0 {
miniPreview = nil
}
bwf.FileInfo = &FileInfo{
Id: o.FileId,
Name: o.FileName,
Extension: o.Extension,
Size: o.Size,
MimeType: o.MimeType,
Width: o.Width,
Height: o.Height,
HasPreviewImage: o.HasPreviewImage,
MiniPreview: miniPreview,
}
}
return bwf
}

View File

@ -31,7 +31,7 @@ func (o *ChannelCounts) Etag() string {
md5Counts := fmt.Sprintf("%x", md5.Sum([]byte(str)))
var update int64 = 0
var update int64
for _, u := range o.UpdateTimes {
if u > update {
update = u

View File

@ -9,7 +9,7 @@ type ChannelData struct {
}
func (o *ChannelData) Etag() string {
var mt int64 = 0
var mt int64
if o.Member != nil {
mt = o.Member.LastUpdateAt
}

View File

@ -6,10 +6,9 @@ package model
type ChannelList []*Channel
func (o *ChannelList) Etag() string {
id := "0"
var t int64 = 0
var delta int64 = 0
var t int64
var delta int64
for _, v := range *o {
if v.LastPostAt > t {
@ -21,7 +20,6 @@ func (o *ChannelList) Etag() string {
t = v.UpdateAt
id = v.Id
}
}
return Etag(id, t, delta, len(*o))
@ -30,10 +28,9 @@ func (o *ChannelList) Etag() string {
type ChannelListWithTeamData []*ChannelWithTeamData
func (o *ChannelListWithTeamData) Etag() string {
id := "0"
var t int64 = 0
var delta int64 = 0
var t int64
var delta int64
for _, v := range *o {
if v.LastPostAt > t {

View File

@ -0,0 +1,238 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
import (
"fmt"
"net/http"
"strings"
"unicode/utf8"
)
const (
ChannelNotifyDefault = "default"
ChannelNotifyAll = "all"
ChannelNotifyMention = "mention"
ChannelNotifyNone = "none"
ChannelMarkUnreadAll = "all"
ChannelMarkUnreadMention = "mention"
IgnoreChannelMentionsDefault = "default"
IgnoreChannelMentionsOff = "off"
IgnoreChannelMentionsOn = "on"
IgnoreChannelMentionsNotifyProp = "ignore_channel_mentions"
ChannelAutoFollowThreadsOff = "off"
ChannelAutoFollowThreadsOn = "on"
ChannelAutoFollowThreads = "channel_auto_follow_threads"
ChannelMemberNotifyPropsMaxRunes = 800000
)
type ChannelUnread struct {
TeamId string `json:"team_id"`
ChannelId string `json:"channel_id"`
MsgCount int64 `json:"msg_count"`
MentionCount int64 `json:"mention_count"`
MentionCountRoot int64 `json:"mention_count_root"`
UrgentMentionCount int64 `json:"urgent_mention_count"`
MsgCountRoot int64 `json:"msg_count_root"`
NotifyProps StringMap `json:"-"`
}
type ChannelUnreadAt struct {
TeamId string `json:"team_id"`
UserId string `json:"user_id"`
ChannelId string `json:"channel_id"`
MsgCount int64 `json:"msg_count"`
MentionCount int64 `json:"mention_count"`
MentionCountRoot int64 `json:"mention_count_root"`
UrgentMentionCount int64 `json:"urgent_mention_count"`
MsgCountRoot int64 `json:"msg_count_root"`
LastViewedAt int64 `json:"last_viewed_at"`
NotifyProps StringMap `json:"-"`
}
type ChannelMember struct {
ChannelId string `json:"channel_id"`
UserId string `json:"user_id"`
Roles string `json:"roles"`
LastViewedAt int64 `json:"last_viewed_at"`
MsgCount int64 `json:"msg_count"`
MentionCount int64 `json:"mention_count"`
MentionCountRoot int64 `json:"mention_count_root"`
UrgentMentionCount int64 `json:"urgent_mention_count"`
MsgCountRoot int64 `json:"msg_count_root"`
NotifyProps StringMap `json:"notify_props"`
LastUpdateAt int64 `json:"last_update_at"`
SchemeGuest bool `json:"scheme_guest"`
SchemeUser bool `json:"scheme_user"`
SchemeAdmin bool `json:"scheme_admin"`
ExplicitRoles string `json:"explicit_roles"`
}
func (o *ChannelMember) Auditable() map[string]interface{} {
return map[string]interface{}{
"channel_id": o.ChannelId,
"user_id": o.UserId,
"roles": o.Roles,
"last_viewed_at": o.LastViewedAt,
"msg_count": o.MsgCount,
"mention_count": o.MentionCount,
"mention_count_root": o.MentionCountRoot,
"urgent_mention_count": o.UrgentMentionCount,
"msg_count_root": o.MsgCountRoot,
"notify_props": o.NotifyProps,
"last_update_at": o.LastUpdateAt,
"scheme_guest": o.SchemeGuest,
"scheme_user": o.SchemeUser,
"scheme_admin": o.SchemeAdmin,
"explicit_roles": o.ExplicitRoles,
}
}
// ChannelMemberWithTeamData contains ChannelMember appended with extra team information
// as well.
type ChannelMemberWithTeamData struct {
ChannelMember
TeamDisplayName string `json:"team_display_name"`
TeamName string `json:"team_name"`
TeamUpdateAt int64 `json:"team_update_at"`
}
type ChannelMembers []ChannelMember
type ChannelMembersWithTeamData []ChannelMemberWithTeamData
type ChannelMemberForExport struct {
ChannelMember
ChannelName string
Username string
}
func (o *ChannelMember) IsValid() *AppError {
if !IsValidId(o.ChannelId) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest)
}
if !IsValidId(o.UserId) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
}
if appErr := IsChannelMemberNotifyPropsValid(o.NotifyProps, false); appErr != nil {
return appErr
}
if len(o.Roles) > UserRolesMaxLength {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.roles_limit.app_error",
map[string]any{"Limit": UserRolesMaxLength}, "", http.StatusBadRequest)
}
return nil
}
func IsChannelMemberNotifyPropsValid(notifyProps map[string]string, allowMissingFields bool) *AppError {
if notifyLevel, ok := notifyProps[DesktopNotifyProp]; ok || !allowMissingFields {
if len(notifyLevel) > 20 || !IsChannelNotifyLevelValid(notifyLevel) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.notify_level.app_error", nil, "notify_level="+notifyLevel, http.StatusBadRequest)
}
}
if markUnreadLevel, ok := notifyProps[MarkUnreadNotifyProp]; ok || !allowMissingFields {
if len(markUnreadLevel) > 20 || !IsChannelMarkUnreadLevelValid(markUnreadLevel) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.unread_level.app_error", nil, "mark_unread_level="+markUnreadLevel, http.StatusBadRequest)
}
}
if pushLevel, ok := notifyProps[PushNotifyProp]; ok {
if len(pushLevel) > 20 || !IsChannelNotifyLevelValid(pushLevel) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.push_level.app_error", nil, "push_notification_level="+pushLevel, http.StatusBadRequest)
}
}
if sendEmail, ok := notifyProps[EmailNotifyProp]; ok {
if len(sendEmail) > 20 || !IsSendEmailValid(sendEmail) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.email_value.app_error", nil, "push_notification_level="+sendEmail, http.StatusBadRequest)
}
}
if ignoreChannelMentions, ok := notifyProps[IgnoreChannelMentionsNotifyProp]; ok {
if len(ignoreChannelMentions) > 40 || !IsIgnoreChannelMentionsValid(ignoreChannelMentions) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.ignore_channel_mentions_value.app_error", nil, "ignore_channel_mentions="+ignoreChannelMentions, http.StatusBadRequest)
}
}
if channelAutoFollowThreads, ok := notifyProps[ChannelAutoFollowThreads]; ok {
if len(channelAutoFollowThreads) > 3 || !IsChannelAutoFollowThreadsValid(channelAutoFollowThreads) {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_auto_follow_threads_value.app_error", nil, "channel_auto_follow_threads="+channelAutoFollowThreads, http.StatusBadRequest)
}
}
jsonStringNotifyProps := string(ToJSON(notifyProps))
if utf8.RuneCountInString(jsonStringNotifyProps) > ChannelMemberNotifyPropsMaxRunes {
return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.notify_props.app_error", nil, fmt.Sprint("length=", utf8.RuneCountInString(jsonStringNotifyProps)), http.StatusBadRequest)
}
return nil
}
func (o *ChannelMember) PreSave() {
o.LastUpdateAt = GetMillis()
}
func (o *ChannelMember) PreUpdate() {
o.LastUpdateAt = GetMillis()
}
func (o *ChannelMember) GetRoles() []string {
return strings.Fields(o.Roles)
}
func (o *ChannelMember) SetChannelMuted(muted bool) {
if o.IsChannelMuted() {
o.NotifyProps[MarkUnreadNotifyProp] = ChannelMarkUnreadAll
} else {
o.NotifyProps[MarkUnreadNotifyProp] = ChannelMarkUnreadMention
}
}
func (o *ChannelMember) IsChannelMuted() bool {
return o.NotifyProps[MarkUnreadNotifyProp] == ChannelMarkUnreadMention
}
func IsChannelNotifyLevelValid(notifyLevel string) bool {
return notifyLevel == ChannelNotifyDefault ||
notifyLevel == ChannelNotifyAll ||
notifyLevel == ChannelNotifyMention ||
notifyLevel == ChannelNotifyNone
}
func IsChannelMarkUnreadLevelValid(markUnreadLevel string) bool {
return markUnreadLevel == ChannelMarkUnreadAll || markUnreadLevel == ChannelMarkUnreadMention
}
func IsSendEmailValid(sendEmail string) bool {
return sendEmail == ChannelNotifyDefault || sendEmail == "true" || sendEmail == "false"
}
func IsIgnoreChannelMentionsValid(ignoreChannelMentions string) bool {
return ignoreChannelMentions == IgnoreChannelMentionsOn || ignoreChannelMentions == IgnoreChannelMentionsOff || ignoreChannelMentions == IgnoreChannelMentionsDefault
}
func IsChannelAutoFollowThreadsValid(channelAutoFollowThreads string) bool {
return channelAutoFollowThreads == ChannelAutoFollowThreadsOn || channelAutoFollowThreads == ChannelAutoFollowThreadsOff
}
func GetDefaultChannelNotifyProps() StringMap {
return StringMap{
DesktopNotifyProp: ChannelNotifyDefault,
MarkUnreadNotifyProp: ChannelMarkUnreadAll,
PushNotifyProp: ChannelNotifyDefault,
EmailNotifyProp: ChannelNotifyDefault,
IgnoreChannelMentionsNotifyProp: IgnoreChannelMentionsDefault,
ChannelAutoFollowThreads: ChannelAutoFollowThreadsOff,
}
}
type ChannelMemberIdentifier struct {
ChannelId string `json:"channel_id"`
UserId string `json:"user_id"`
}

View File

@ -16,6 +16,7 @@ type ChannelSearch struct {
Public bool `json:"public"`
Private bool `json:"private"`
IncludeDeleted bool `json:"include_deleted"`
IncludeSearchById bool `json:"include_search_by_id"`
Deleted bool `json:"deleted"`
Page *int `json:"page,omitempty"`
PerPage *int `json:"per_page,omitempty"`

View File

@ -5,7 +5,6 @@ package model
import (
"encoding/json"
"errors"
"regexp"
)
@ -89,38 +88,10 @@ func IsValidCategoryId(s string) bool {
return categoryIdPattern.MatchString(s)
}
func (SidebarCategoryType) ImplementsGraphQLType(name string) bool {
return name == "SidebarCategoryType"
}
func (t SidebarCategoryType) MarshalJSON() ([]byte, error) {
return json.Marshal(string(t))
}
func (t *SidebarCategoryType) UnmarshalGraphQL(input interface{}) error {
chType, ok := input.(string)
if !ok {
return errors.New("wrong type")
}
*t = SidebarCategoryType(chType)
return nil
}
func (SidebarCategorySorting) ImplementsGraphQLType(name string) bool {
return name == "SidebarCategorySorting"
}
func (t SidebarCategorySorting) MarshalJSON() ([]byte, error) {
return json.Marshal(string(t))
}
func (t *SidebarCategorySorting) UnmarshalGraphQL(input interface{}) error {
chType, ok := input.(string)
if !ok {
return errors.New("wrong type")
}
*t = SidebarCategorySorting(chType)
return nil
}

View File

@ -0,0 +1,367 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
import (
"encoding/json"
"strings"
"time"
)
const (
EventTypeFailedPayment = "failed-payment"
EventTypeFailedPaymentNoCard = "failed-payment-no-card"
EventTypeSendAdminWelcomeEmail = "send-admin-welcome-email"
EventTypeSendUpgradeConfirmationEmail = "send-upgrade-confirmation-email"
EventTypeSubscriptionChanged = "subscription-changed"
EventTypeTriggerDelinquencyEmail = "trigger-delinquency-email"
)
const UpcomingInvoice = "upcoming"
var MockCWS string
type BillingScheme string
const (
BillingSchemePerSeat = BillingScheme("per_seat")
BillingSchemeFlatFee = BillingScheme("flat_fee")
BillingSchemeSalesServe = BillingScheme("sales_serve")
)
type BillingType string
const (
BillingTypeLicensed = BillingType("licensed")
BillingTypeInternal = BillingType("internal")
)
type RecurringInterval string
const (
RecurringIntervalYearly = RecurringInterval("year")
RecurringIntervalMonthly = RecurringInterval("month")
)
type SubscriptionFamily string
const (
SubscriptionFamilyCloud = SubscriptionFamily("cloud")
SubscriptionFamilyOnPrem = SubscriptionFamily("on-prem")
)
type ProductSku string
const (
SkuStarterGov = ProductSku("starter-gov")
SkuProfessionalGov = ProductSku("professional-gov")
SkuEnterpriseGov = ProductSku("enterprise-gov")
SkuStarter = ProductSku("starter")
SkuProfessional = ProductSku("professional")
SkuEnterprise = ProductSku("enterprise")
SkuCloudStarter = ProductSku("cloud-starter")
SkuCloudProfessional = ProductSku("cloud-professional")
SkuCloudEnterprise = ProductSku("cloud-enterprise")
)
// Product model represents a product on the cloud system.
type Product struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
PricePerSeat float64 `json:"price_per_seat"`
AddOns []*AddOn `json:"add_ons"`
SKU string `json:"sku"`
PriceID string `json:"price_id"`
Family SubscriptionFamily `json:"product_family"`
RecurringInterval RecurringInterval `json:"recurring_interval"`
BillingScheme BillingScheme `json:"billing_scheme"`
CrossSellsTo string `json:"cross_sells_to"`
}
type UserFacingProduct struct {
ID string `json:"id"`
Name string `json:"name"`
SKU string `json:"sku"`
PricePerSeat float64 `json:"price_per_seat"`
RecurringInterval RecurringInterval `json:"recurring_interval"`
CrossSellsTo string `json:"cross_sells_to"`
}
// AddOn represents an addon to a product.
type AddOn struct {
ID string `json:"id"`
Name string `json:"name"`
DisplayName string `json:"display_name"`
PricePerSeat float64 `json:"price_per_seat"`
}
// StripeSetupIntent represents the SetupIntent model from Stripe for updating payment methods.
type StripeSetupIntent struct {
ID string `json:"id"`
ClientSecret string `json:"client_secret"`
}
// ConfirmPaymentMethodRequest contains the fields for the customer payment update API.
type ConfirmPaymentMethodRequest struct {
StripeSetupIntentID string `json:"stripe_setup_intent_id"`
SubscriptionID string `json:"subscription_id"`
}
// Customer model represents a customer on the system.
type CloudCustomer struct {
CloudCustomerInfo
ID string `json:"id"`
CreatorID string `json:"creator_id"`
CreateAt int64 `json:"create_at"`
BillingAddress *Address `json:"billing_address"`
CompanyAddress *Address `json:"company_address"`
PaymentMethod *PaymentMethod `json:"payment_method"`
}
type StartCloudTrialRequest struct {
Email string `json:"email"`
SubscriptionID string `json:"subscription_id"`
}
type ValidateBusinessEmailRequest struct {
Email string `json:"email"`
}
type ValidateBusinessEmailResponse struct {
IsValid bool `json:"is_valid"`
}
type SubscriptionLicenseSelfServeStatusResponse struct {
IsExpandable bool `json:"is_expandable"`
IsRenewable bool `json:"is_renewable"`
}
// CloudCustomerInfo represents editable info of a customer.
type CloudCustomerInfo struct {
Name string `json:"name"`
Email string `json:"email,omitempty"`
ContactFirstName string `json:"contact_first_name,omitempty"`
ContactLastName string `json:"contact_last_name,omitempty"`
NumEmployees int `json:"num_employees"`
CloudAltPaymentMethod string `json:"monthly_subscription_alt_payment_method"`
}
// Address model represents a customer's address.
type Address struct {
City string `json:"city"`
Country string `json:"country"`
Line1 string `json:"line1"`
Line2 string `json:"line2"`
PostalCode string `json:"postal_code"`
State string `json:"state"`
}
// PaymentMethod represents methods of payment for a customer.
type PaymentMethod struct {
Type string `json:"type"`
LastFour string `json:"last_four"`
ExpMonth int `json:"exp_month"`
ExpYear int `json:"exp_year"`
CardBrand string `json:"card_brand"`
Name string `json:"name"`
}
// Subscription model represents a subscription on the system.
type Subscription struct {
ID string `json:"id"`
CustomerID string `json:"customer_id"`
ProductID string `json:"product_id"`
AddOns []string `json:"add_ons"`
StartAt int64 `json:"start_at"`
EndAt int64 `json:"end_at"`
CreateAt int64 `json:"create_at"`
Seats int `json:"seats"`
Status string `json:"status"`
DNS string `json:"dns"`
LastInvoice *Invoice `json:"last_invoice"`
UpcomingInvoice *Invoice `json:"upcoming_invoice"`
IsFreeTrial string `json:"is_free_trial"`
TrialEndAt int64 `json:"trial_end_at"`
DelinquentSince *int64 `json:"delinquent_since"`
OriginallyLicensedSeats int `json:"originally_licensed_seats"`
ComplianceBlocked string `json:"compliance_blocked"`
BillingType string `json:"billing_type"`
CancelAt *int64 `json:"cancel_at"`
WillRenew string `json:"will_renew"`
SimulatedCurrentTimeMs *int64 `json:"simulated_current_time_ms"`
}
func (s *Subscription) DaysToExpiration() int64 {
now := time.Now().UnixMilli()
if GetServiceEnvironment() == ServiceEnvironmentTest {
// In the test environment we have test clocks. A test clock is a ms timestamp
// If it's not nil, we use it as the current time in all calculations
if s.SimulatedCurrentTimeMs != nil {
now = *s.SimulatedCurrentTimeMs
}
}
daysToExpiry := (s.EndAt - now) / (1000 * 60 * 60 * 24)
return daysToExpiry
}
// Subscription History model represents true up event in a yearly subscription
type SubscriptionHistory struct {
ID string `json:"id"`
SubscriptionID string `json:"subscription_id"`
Seats int `json:"seats"`
CreateAt int64 `json:"create_at"`
}
type SubscriptionHistoryChange struct {
SubscriptionID string `json:"subscription_id"`
Seats int `json:"seats"`
CreateAt int64 `json:"create_at"`
}
// GetWorkSpaceNameFromDNS returns the work space name. For example from test.mattermost.cloud.com, it returns test
func (s *Subscription) GetWorkSpaceNameFromDNS() string {
return strings.Split(s.DNS, ".")[0]
}
// Invoice model represents a cloud invoice
type Invoice struct {
ID string `json:"id"`
Number string `json:"number"`
CreateAt int64 `json:"create_at"`
Total int64 `json:"total"`
Tax int64 `json:"tax"`
Status string `json:"status"`
Description string `json:"description"`
PeriodStart int64 `json:"period_start"`
PeriodEnd int64 `json:"period_end"`
SubscriptionID string `json:"subscription_id"`
Items []*InvoiceLineItem `json:"line_items"`
CurrentProductName string `json:"current_product_name"`
}
// InvoiceLineItem model represents a cloud invoice lineitem tied to an invoice.
type InvoiceLineItem struct {
PriceID string `json:"price_id"`
Total int64 `json:"total"`
Quantity float64 `json:"quantity"`
PricePerUnit int64 `json:"price_per_unit"`
Description string `json:"description"`
Type string `json:"type"`
Metadata map[string]any `json:"metadata"`
PeriodStart int64 `json:"period_start"`
PeriodEnd int64 `json:"period_end"`
}
type DelinquencyEmailTrigger struct {
EmailToTrigger string `json:"email_to_send"`
}
type DelinquencyEmail string
const (
DelinquencyEmail7 DelinquencyEmail = "7"
DelinquencyEmail14 DelinquencyEmail = "14"
DelinquencyEmail30 DelinquencyEmail = "30"
DelinquencyEmail45 DelinquencyEmail = "45"
DelinquencyEmail60 DelinquencyEmail = "60"
DelinquencyEmail75 DelinquencyEmail = "75"
DelinquencyEmail90 DelinquencyEmail = "90"
)
type CWSWebhookPayload struct {
Event string `json:"event"`
FailedPayment *FailedPayment `json:"failed_payment"`
CloudWorkspaceOwner *CloudWorkspaceOwner `json:"cloud_workspace_owner"`
ProductLimits *ProductLimits `json:"product_limits"`
Subscription *Subscription `json:"subscription"`
SubscriptionTrialEndUnixTimeStamp int64 `json:"trial_end_time_stamp"`
DelinquencyEmail *DelinquencyEmailTrigger `json:"delinquency_email"`
}
type FailedPayment struct {
CardBrand string `json:"card_brand"`
LastFour string `json:"last_four"`
FailureMessage string `json:"failure_message"`
}
// CloudWorkspaceOwner is part of the CWS Webhook payload that contains information about the user that created the workspace from the CWS
type CloudWorkspaceOwner struct {
UserName string `json:"username"`
}
type SubscriptionChange struct {
ProductID string `json:"product_id"`
Seats int `json:"seats"`
Feedback *Feedback `json:"downgrade_feedback"`
ShippingAddress *Address `json:"shipping_address"`
Customer *CloudCustomerInfo `json:"customer"`
}
type FilesLimits struct {
TotalStorage *int64 `json:"total_storage"`
}
type MessagesLimits struct {
History *int `json:"history"`
}
type TeamsLimits struct {
Active *int `json:"active"`
}
type ProductLimits struct {
Files *FilesLimits `json:"files,omitempty"`
Messages *MessagesLimits `json:"messages,omitempty"`
Teams *TeamsLimits `json:"teams,omitempty"`
}
// CreateSubscriptionRequest is the parameters for the API request to create a subscription.
type CreateSubscriptionRequest struct {
ProductID string `json:"product_id"`
AddOns []string `json:"add_ons"`
Seats int `json:"seats"`
Total float64 `json:"total"`
InternalPurchaseOrder string `json:"internal_purchase_order"`
DiscountID string `json:"discount_id"`
}
type Installation struct {
ID string `json:"id"`
State string `json:"state"`
AllowedIPRanges *AllowedIPRanges `json:"allowed_ip_ranges"`
}
type Feedback struct {
Reason string `json:"reason"`
Comments string `json:"comments"`
}
type WorkspaceDeletionRequest struct {
SubscriptionID string `json:"subscription_id"`
Feedback *Feedback `json:"delete_feedback"`
}
func (p *Product) IsYearly() bool {
return p.RecurringInterval == RecurringIntervalYearly
}
func (p *Product) IsMonthly() bool {
return p.RecurringInterval == RecurringIntervalMonthly
}
func (df *Feedback) ToMap() map[string]any {
var res map[string]any
feedback, err := json.Marshal(df)
if err != nil {
return res
}
err = json.Unmarshal(feedback, &res)
if err != nil {
return res
}
return res
}

View File

@ -19,7 +19,7 @@ type ClusterDiscovery struct {
ClusterName string `json:"cluster_name"`
Hostname string `json:"hostname"`
GossipPort int32 `json:"gossip_port"`
Port int32 `json:"port"`
Port int32 `json:"port"` // Deperacted: Port is unused. It's only kept for backwards compatibility.
CreateAt int64 `json:"create_at"`
LastPingAt int64 `json:"last_ping_at"`
}
@ -76,14 +76,14 @@ func (o *ClusterDiscovery) IsEqual(in *ClusterDiscovery) bool {
}
func FilterClusterDiscovery(vs []*ClusterDiscovery, f func(*ClusterDiscovery) bool) []*ClusterDiscovery {
copy := make([]*ClusterDiscovery, 0)
cdCopy := make([]*ClusterDiscovery, 0)
for _, v := range vs {
if f(v) {
copy = append(copy, v)
cdCopy = append(cdCopy, v)
}
}
return copy
return cdCopy
}
func (o *ClusterDiscovery) IsValid() *AppError {

View File

@ -0,0 +1,13 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
type ClusterInfo struct {
Id string `json:"id"`
Version string `json:"version"`
SchemaVersion string `json:"schema_version"`
ConfigHash string `json:"config_hash"`
IPAddress string `json:"ipaddress"`
Hostname string `json:"hostname"`
}

View File

@ -20,6 +20,7 @@ const (
ClusterEventInvalidateCacheForRoles ClusterEvent = "inv_roles"
ClusterEventInvalidateCacheForRolePermissions ClusterEvent = "inv_role_permissions"
ClusterEventInvalidateCacheForProfileByIds ClusterEvent = "inv_profile_ids"
ClusterEventInvalidateCacheForAllProfiles ClusterEvent = "inv_all_profiles"
ClusterEventInvalidateCacheForProfileInChannel ClusterEvent = "inv_profile_in_channel"
ClusterEventInvalidateCacheForSchemes ClusterEvent = "inv_schemes"
ClusterEventInvalidateCacheForFileInfos ClusterEvent = "inv_file_infos"
@ -29,8 +30,10 @@ const (
ClusterEventInvalidateCacheForChannelFileCount ClusterEvent = "inv_channel_file_count"
ClusterEventInvalidateCacheForChannelPinnedpostsCounts ClusterEvent = "inv_channel_pinnedposts_counts"
ClusterEventInvalidateCacheForChannelMemberCounts ClusterEvent = "inv_channel_member_counts"
ClusterEventInvalidateCacheForChannelsMemberCount ClusterEvent = "inv_channels_member_count"
ClusterEventInvalidateCacheForLastPosts ClusterEvent = "inv_last_posts"
ClusterEventInvalidateCacheForLastPostTime ClusterEvent = "inv_last_post_time"
ClusterEventInvalidateCacheForPostsUsage ClusterEvent = "inv_posts_usage"
ClusterEventInvalidateCacheForTeams ClusterEvent = "inv_teams"
ClusterEventClearSessionCacheForAllUsers ClusterEvent = "inv_all_user_sessions"
ClusterEventInstallPlugin ClusterEvent = "install_plugin"
@ -48,6 +51,8 @@ const (
ClusterGossipEventResponseGetPluginStatuses = "gossip_response_plugin_statuses"
ClusterGossipEventRequestSaveConfig = "gossip_request_save_config"
ClusterGossipEventResponseSaveConfig = "gossip_response_save_config"
ClusterGossipEventRequestWebConnCount = "gossip_request_webconn_count"
ClusterGossipEventResponseWebConnCount = "gossip_response_webconn_count"
// SendTypes for ClusterMessage.
ClusterSendBestEffort = "best_effort"

View File

@ -41,6 +41,26 @@ type Command struct {
AutocompleteIconData string `db:"-" json:"autocomplete_icon_data,omitempty"`
}
func (o *Command) Auditable() map[string]interface{} {
return map[string]interface{}{
"id": o.Id,
"create_at": o.CreateAt,
"update_at": o.UpdateAt,
"delete_at": o.DeleteAt,
"creator_id": o.CreatorId,
"team_id": o.TeamId,
"trigger": o.Trigger,
"username": o.Username,
"icon_url": o.IconURL,
"auto_complete": o.AutoComplete,
"auto_complete_desc": o.AutoCompleteDesc,
"auto_complete_hint": o.AutoCompleteHint,
"display_name": o.DisplayName,
"description": o.Description,
"url": o.URL,
}
}
func (o *Command) IsValid() *AppError {
if !IsValidId(o.Id) {
return NewAppError("Command.IsValid", "model.command.is_valid.id.app_error", nil, "", http.StatusBadRequest)
@ -102,7 +122,7 @@ func (o *Command) IsValid() *AppError {
if o.AutocompleteData != nil {
if err := o.AutocompleteData.IsValid(); err != nil {
return NewAppError("Command.IsValid", "model.command.is_valid.autocomplete_data.app_error", nil, err.Error(), http.StatusBadRequest)
return NewAppError("Command.IsValid", "model.command.is_valid.autocomplete_data.app_error", nil, "", http.StatusBadRequest).Wrap(err)
}
}

View File

@ -4,7 +4,7 @@
package model
import (
"github.com/mattermost/mattermost-server/v6/shared/i18n"
"github.com/mattermost/mattermost/server/public/shared/i18n"
)
type CommandArgs struct {
@ -19,9 +19,19 @@ type CommandArgs struct {
T i18n.TranslateFunc `json:"-"`
UserMentions UserMentionMap `json:"-"`
ChannelMentions ChannelMentionMap `json:"-"`
}
// DO NOT USE Session field is deprecated. MM-26398
Session Session `json:"-"`
func (o *CommandArgs) Auditable() map[string]interface{} {
return map[string]interface{}{
"user_id": o.UserId,
"channel_id": o.ChannelId,
"team_id": o.TeamId,
"root_id": o.RootId,
"parent_id": o.ParentId,
"trigger_id": o.TriggerId,
"command": o.Command,
"site_url": o.SiteURL,
}
}
// AddUserMention adds or overrides an entry in UserMentions with name username

View File

@ -8,6 +8,7 @@ import (
"net/url"
"path"
"reflect"
"slices"
"strings"
"github.com/pkg/errors"
@ -54,7 +55,7 @@ type AutocompleteArg struct {
// Required determines if argument is optional or not.
Required bool
// Actual data of the argument (depends on the Type)
Data interface{}
Data any
}
// AutocompleteTextArg describes text user can input as an argument.
@ -211,7 +212,6 @@ func (ad *AutocompleteData) UpdateRelativeURLsForPluginCommands(baseURL *url.URL
absURL.Path = path.Join(absURL.Path, dynamicList.FetchURL)
dynamicList.FetchURL = absURL.String()
}
}
for _, command := range ad.SubCommands {
err := command.UpdateRelativeURLsForPluginCommands(baseURL)
@ -234,7 +234,7 @@ func (ad *AutocompleteData) IsValid() error {
return errors.New("Command should be lowercase")
}
roles := []string{SystemAdminRoleId, SystemUserRoleId, ""}
if stringNotInSlice(ad.RoleID, roles) {
if !slices.Contains(roles, ad.RoleID) {
return errors.New("Wrong role in the autocomplete data")
}
if len(ad.Arguments) > 0 && len(ad.SubCommands) > 0 {
@ -304,7 +304,7 @@ func (a *AutocompleteArg) Equals(arg *AutocompleteArg) bool {
// UnmarshalJSON will unmarshal argument
func (a *AutocompleteArg) UnmarshalJSON(b []byte) error {
var arg map[string]interface{}
var arg map[string]any
if err := json.Unmarshal(b, &arg); err != nil {
return errors.Wrapf(err, "Can't unmarshal argument %s", string(b))
}
@ -336,7 +336,7 @@ func (a *AutocompleteArg) UnmarshalJSON(b []byte) error {
}
if a.Type == AutocompleteArgTypeText {
m, ok := data.(map[string]interface{})
m, ok := data.(map[string]any)
if !ok {
return errors.Errorf("Wrong Data type in the TextInput argument %s", string(b))
}
@ -350,18 +350,18 @@ func (a *AutocompleteArg) UnmarshalJSON(b []byte) error {
}
a.Data = &AutocompleteTextArg{Hint: hint, Pattern: pattern}
} else if a.Type == AutocompleteArgTypeStaticList {
m, ok := data.(map[string]interface{})
m, ok := data.(map[string]any)
if !ok {
return errors.Errorf("Wrong Data type in the StaticList argument %s", string(b))
}
list, ok := m["PossibleArguments"].([]interface{})
list, ok := m["PossibleArguments"].([]any)
if !ok {
return errors.Errorf("No field PossibleArguments in the StaticList argument %s", string(b))
}
possibleArguments := []AutocompleteListItem{}
for i := range list {
args, ok := list[i].(map[string]interface{})
args, ok := list[i].(map[string]any)
if !ok {
return errors.Errorf("Wrong AutocompleteStaticListItem type in the StaticList argument %s", string(b))
}
@ -387,7 +387,7 @@ func (a *AutocompleteArg) UnmarshalJSON(b []byte) error {
}
a.Data = &AutocompleteStaticListArg{PossibleArguments: possibleArguments}
} else if a.Type == AutocompleteArgTypeDynamicList {
m, ok := data.(map[string]interface{})
m, ok := data.(map[string]any)
if !ok {
return errors.Errorf("Wrong type in the DynamicList argument %s", string(b))
}
@ -399,12 +399,3 @@ func (a *AutocompleteArg) UnmarshalJSON(b []byte) error {
}
return nil
}
func stringNotInSlice(a string, slice []string) bool {
for _, b := range slice {
if b == a {
return false
}
}
return true
}

View File

@ -6,10 +6,9 @@ package model
import (
"encoding/json"
"io"
"io/ioutil"
"strings"
"github.com/mattermost/mattermost-server/v6/utils/jsonutils"
"github.com/mattermost/mattermost/server/public/utils"
)
const (
@ -36,7 +35,7 @@ func CommandResponseFromHTTPBody(contentType string, body io.Reader) (*CommandRe
if strings.TrimSpace(strings.Split(contentType, ";")[0]) == "application/json" {
return CommandResponseFromJSON(body)
}
if b, err := ioutil.ReadAll(body); err == nil {
if b, err := io.ReadAll(body); err == nil {
return CommandResponseFromPlainText(string(b)), nil
}
return nil, nil
@ -49,7 +48,7 @@ func CommandResponseFromPlainText(text string) *CommandResponse {
}
func CommandResponseFromJSON(data io.Reader) (*CommandResponse, error) {
b, err := ioutil.ReadAll(data)
b, err := io.ReadAll(data)
if err != nil {
return nil, err
}
@ -57,7 +56,7 @@ func CommandResponseFromJSON(data io.Reader) (*CommandResponse, error) {
var o CommandResponse
err = json.Unmarshal(b, &o)
if err != nil {
return nil, jsonutils.HumanizeJSONError(err, b)
return nil, utils.HumanizeJSONError(err, b)
}
o.Attachments = StringifySlackFieldValue(o.Attachments)

View File

@ -6,6 +6,8 @@ package model
import (
"net/http"
"strings"
"github.com/mattermost/mattermost/server/public/shared/mlog"
)
const (
@ -33,6 +35,22 @@ type Compliance struct {
Emails string `json:"emails"`
}
func (c *Compliance) Auditable() map[string]interface{} {
return map[string]interface{}{
"id": c.Id,
"create_at": c.CreateAt,
"user_id": c.UserId,
"status": c.Status,
"count": c.Count,
"desc": c.Desc,
"type": c.Type,
"start_at": c.StartAt,
"end_at": c.EndAt,
"keywords": c.Keywords,
"emails": c.Emails,
}
}
type Compliances []Compliance
// ComplianceExportCursor is used for paginated iteration of posts
@ -65,8 +83,8 @@ func (c *Compliance) PreSave() {
}
func (c *Compliance) DeepCopy() *Compliance {
copy := *c
return &copy
cCopy := *c
return &cCopy
}
func (c *Compliance) JobName() string {
@ -107,3 +125,17 @@ func (c *Compliance) IsValid() *AppError {
return nil
}
// LoggerFields returns the logger annotations reflecting the given compliance job metadata.
func (c *Compliance) LoggerFields() []mlog.Field {
if c == nil {
return nil
}
return []mlog.Field{
mlog.String("job_id", c.Id),
mlog.String("job_type", c.Type),
mlog.String("job_name", c.JobName()),
mlog.Millis("job_create_at", c.CreateAt),
}
}

View File

@ -76,7 +76,6 @@ func cleanComplianceStrings(in string) string {
}
func (cp *CompliancePost) Row() []string {
postDeleteAt := ""
if cp.PostDeleteAt > 0 {
postDeleteAt = time.Unix(0, cp.PostDeleteAt*int64(1000*1000)).Format(time.RFC3339)

View File

@ -8,8 +8,6 @@ import (
"encoding/json"
"fmt"
"time"
"github.com/graph-gophers/graphql-go"
)
const (
@ -37,10 +35,6 @@ type CustomStatus struct {
}
func (cs *CustomStatus) PreSave() {
if cs.Emoji == "" {
cs.Emoji = DefaultCustomStatusEmoji
}
if cs.Duration == "" && !cs.ExpiresAt.Before(time.Now()) {
cs.Duration = "date_and_time"
}
@ -63,12 +57,6 @@ func (cs *CustomStatus) AreDurationAndExpirationTimeValid() bool {
return false
}
// ExpiresAt_ returns the time in a type that has the marshal/unmarshal methods
// attached to it.
func (cs *CustomStatus) ExpiresAt_() graphql.Time {
return graphql.Time{Time: cs.ExpiresAt}
}
func RuneToHexadecimalString(r rune) string {
return fmt.Sprintf("%04x", r)
}

View File

@ -6,10 +6,8 @@ package model
type GlobalRetentionPolicy struct {
MessageDeletionEnabled bool `json:"message_deletion_enabled"`
FileDeletionEnabled bool `json:"file_deletion_enabled"`
BoardsDeletionEnabled bool `json:"boards_deletion_enabled"`
MessageRetentionCutoff int64 `json:"message_retention_cutoff"`
FileRetentionCutoff int64 `json:"file_retention_cutoff"`
BoardsRetentionCutoff int64 `json:"boards_retention_cutoff"`
}
type RetentionPolicy struct {
@ -24,12 +22,28 @@ type RetentionPolicyWithTeamAndChannelIDs struct {
ChannelIDs []string `json:"channel_ids"`
}
func (o *RetentionPolicyWithTeamAndChannelIDs) Auditable() map[string]interface{} {
return map[string]interface{}{
"retention_policy": o.RetentionPolicy,
"team_ids": o.TeamIDs,
"channel_ids": o.ChannelIDs,
}
}
type RetentionPolicyWithTeamAndChannelCounts struct {
RetentionPolicy
ChannelCount int64 `json:"channel_count"`
TeamCount int64 `json:"team_count"`
}
func (o *RetentionPolicyWithTeamAndChannelCounts) Auditable() map[string]interface{} {
return map[string]interface{}{
"retention_policy": o.RetentionPolicy,
"channel_count": o.ChannelCount,
"team_count": o.TeamCount,
}
}
type RetentionPolicyChannel struct {
PolicyID string `db:"PolicyId"`
ChannelID string `db:"ChannelId"`
@ -70,3 +84,15 @@ type RetentionPolicyCursor struct {
TeamPoliciesDone bool
GlobalPoliciesDone bool
}
type RetentionIdsForDeletion struct {
Id string
TableName string
Ids []string
}
func (r *RetentionIdsForDeletion) PreSave() {
if r.Id == "" {
r.Id = NewId()
}
}

View File

@ -0,0 +1,104 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
import (
"net/http"
"sync"
"unicode/utf8"
)
type Draft struct {
CreateAt int64 `json:"create_at"`
UpdateAt int64 `json:"update_at"`
DeleteAt int64 `json:"delete_at"` // Deprecated, we now just hard delete the rows
UserId string `json:"user_id"`
ChannelId string `json:"channel_id"`
RootId string `json:"root_id"`
Message string `json:"message"`
propsMu sync.RWMutex `db:"-"` // Unexported mutex used to guard Draft.Props.
Props StringInterface `json:"props"` // Deprecated: use GetProps()
FileIds StringArray `json:"file_ids,omitempty"`
Metadata *PostMetadata `json:"metadata,omitempty"`
Priority StringInterface `json:"priority,omitempty"`
}
func (o *Draft) IsValid(maxDraftSize int) *AppError {
if o.CreateAt == 0 {
return NewAppError("Drafts.IsValid", "model.draft.is_valid.create_at.app_error", nil, "channelid="+o.ChannelId, http.StatusBadRequest)
}
if o.UpdateAt == 0 {
return NewAppError("Drafts.IsValid", "model.draft.is_valid.update_at.app_error", nil, "channelid="+o.ChannelId, http.StatusBadRequest)
}
if !IsValidId(o.UserId) {
return NewAppError("Drafts.IsValid", "model.draft.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
}
if !IsValidId(o.ChannelId) {
return NewAppError("Drafts.IsValid", "model.draft.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest)
}
if !(IsValidId(o.RootId) || o.RootId == "") {
return NewAppError("Drafts.IsValid", "model.draft.is_valid.root_id.app_error", nil, "", http.StatusBadRequest)
}
if utf8.RuneCountInString(o.Message) > maxDraftSize {
return NewAppError("Drafts.IsValid", "model.draft.is_valid.msg.app_error", nil, "channelid="+o.ChannelId, http.StatusBadRequest)
}
if utf8.RuneCountInString(ArrayToJSON(o.FileIds)) > PostFileidsMaxRunes {
return NewAppError("Drafts.IsValid", "model.draft.is_valid.file_ids.app_error", nil, "channelid="+o.ChannelId, http.StatusBadRequest)
}
if utf8.RuneCountInString(StringInterfaceToJSON(o.GetProps())) > PostPropsMaxRunes {
return NewAppError("Drafts.IsValid", "model.draft.is_valid.props.app_error", nil, "channelid="+o.ChannelId, http.StatusBadRequest)
}
if utf8.RuneCountInString(StringInterfaceToJSON(o.Priority)) > PostPropsMaxRunes {
return NewAppError("Drafts.IsValid", "model.draft.is_valid.priority.app_error", nil, "channelid="+o.ChannelId, http.StatusBadRequest)
}
return nil
}
func (o *Draft) SetProps(props StringInterface) {
o.propsMu.Lock()
defer o.propsMu.Unlock()
o.Props = props
}
func (o *Draft) GetProps() StringInterface {
o.propsMu.RLock()
defer o.propsMu.RUnlock()
return o.Props
}
func (o *Draft) PreSave() {
if o.CreateAt == 0 {
o.CreateAt = GetMillis()
o.UpdateAt = o.CreateAt
} else {
o.UpdateAt = GetMillis()
}
o.DeleteAt = 0
o.PreCommit()
}
func (o *Draft) PreCommit() {
if o.GetProps() == nil {
o.SetProps(make(map[string]interface{}))
}
if o.FileIds == nil {
o.FileIds = []string{}
}
// There's a rare bug where the client sends up duplicate FileIds so protect against that
o.FileIds = RemoveDuplicateStrings(o.FileIds)
}

View File

@ -25,7 +25,18 @@ type Emoji struct {
Name string `json:"name"`
}
func inSystemEmoji(emojiName string) bool {
func (emoji *Emoji) Auditable() map[string]interface{} {
return map[string]interface{}{
"id": emoji.Id,
"create_at": emoji.CreateAt,
"update_at": emoji.UpdateAt,
"delete_at": emoji.CreateAt,
"creator_id": emoji.CreatorId,
"name": emoji.Name,
}
}
func IsSystemEmojiName(emojiName string) bool {
_, ok := SystemEmojis[emojiName]
return ok
}
@ -81,7 +92,7 @@ func IsValidEmojiName(name string) *AppError {
if name == "" || len(name) > EmojiNameMaxLength || !IsValidAlphaNumHyphenUnderscorePlus(name) {
return NewAppError("Emoji.IsValid", "model.emoji.name.app_error", nil, "", http.StatusBadRequest)
}
if inSystemEmoji(name) {
if IsSystemEmojiName(name) {
return NewAppError("Emoji.IsValid", "model.emoji.system_emoji_name.app_error", nil, "", http.StatusBadRequest)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,110 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
import (
"reflect"
"strconv"
)
type FeatureFlags struct {
// Exists only for unit and manual testing.
// When set to a value, will be returned by the ping endpoint.
TestFeature string
// Exists only for testing bool functionality. Boolean feature flags interpret "on" or "true" as true and
// all other values as false.
TestBoolFeature bool
// Enable the remote cluster service for shared channels.
EnableRemoteClusterService bool
// AppsEnabled toggles the Apps framework functionalities both in server and client side
AppsEnabled bool
PermalinkPreviews bool
// CallsEnabled controls whether or not the Calls plugin should be enabled
CallsEnabled bool
NormalizeLdapDNs bool
// Enable WYSIWYG text editor
WysiwygEditor bool
OnboardingTourTips bool
DeprecateCloudFree bool
CloudReverseTrial bool
EnableExportDirectDownload bool
MoveThreadsEnabled bool
StreamlinedMarketplace bool
CloudIPFiltering bool
ConsumePostHook bool
CloudAnnualRenewals bool
CloudDedicatedExportUI bool
ChannelBookmarks bool
WebSocketEventScope bool
NotificationMonitoring bool
ExperimentalAuditSettingsSystemConsoleUI bool
ClientMetrics bool
}
func (f *FeatureFlags) SetDefaults() {
f.TestFeature = "off"
f.TestBoolFeature = false
f.EnableRemoteClusterService = false
f.AppsEnabled = true
f.NormalizeLdapDNs = false
f.CallsEnabled = true
f.DeprecateCloudFree = false
f.WysiwygEditor = false
f.OnboardingTourTips = true
f.CloudReverseTrial = false
f.EnableExportDirectDownload = false
f.MoveThreadsEnabled = false
f.StreamlinedMarketplace = true
f.CloudIPFiltering = false
f.ConsumePostHook = false
f.CloudAnnualRenewals = false
f.CloudDedicatedExportUI = false
f.ChannelBookmarks = false
f.WebSocketEventScope = false
f.NotificationMonitoring = true
f.ExperimentalAuditSettingsSystemConsoleUI = false
f.ClientMetrics = false
}
// ToMap returns the feature flags as a map[string]string
// Supports boolean and string feature flags.
func (f *FeatureFlags) ToMap() map[string]string {
refStructVal := reflect.ValueOf(*f)
refStructType := reflect.TypeOf(*f)
ret := make(map[string]string)
for i := 0; i < refStructVal.NumField(); i++ {
refFieldVal := refStructVal.Field(i)
if !refFieldVal.IsValid() {
continue
}
refFieldType := refStructType.Field(i)
switch refFieldType.Type.Kind() {
case reflect.Bool:
ret[refFieldType.Name] = strconv.FormatBool(refFieldVal.Bool())
default:
ret[refFieldType.Name] = refFieldVal.String()
}
}
return ret
}

View File

@ -3,6 +3,8 @@
package model
import "time"
const (
MaxImageSize = int64(6048 * 4032) // 24 megapixels, roughly 36MB as a raw image
)
@ -11,3 +13,8 @@ type FileUploadResponse struct {
FileInfos []*FileInfo `json:"file_infos"`
ClientIds []string `json:"client_ids"`
}
type PresignURLResponse struct {
URL string `json:"url"`
Expiration time.Duration `json:"expiration"`
}

View File

@ -4,9 +4,6 @@
package model
import (
"image"
"image/gif"
"io"
"mime"
"net/http"
"path/filepath"
@ -35,10 +32,14 @@ type GetFileInfosOptions struct {
}
type FileInfo struct {
Id string `json:"id"`
CreatorId string `json:"user_id"`
PostId string `json:"post_id,omitempty"`
ChannelId string `db:"-" json:"channel_id"`
Id string `json:"id"`
CreatorId string `json:"user_id"`
PostId string `json:"post_id,omitempty"`
// ChannelId is the denormalized value from the corresponding post. Note that this value is
// potentially distinct from the ChannelId provided when the file is first uploaded and
// used to organize the directories in the file store, since in theory that same file
// could be attached to a post from a different channel (or not attached to a post at all).
ChannelId string `json:"channel_id"`
CreateAt int64 `json:"create_at"`
UpdateAt int64 `json:"update_at"`
DeleteAt int64 `json:"delete_at"`
@ -55,6 +56,22 @@ type FileInfo struct {
MiniPreview *[]byte `json:"mini_preview"` // declared as *[]byte to avoid postgres/mysql differences in deserialization
Content string `json:"-"`
RemoteId *string `json:"remote_id"`
Archived bool `json:"archived"`
}
func (fi *FileInfo) Auditable() map[string]interface{} {
return map[string]interface{}{
"id": fi.Id,
"creator_id": fi.CreatorId,
"post_id": fi.PostId,
"channel_id": fi.ChannelId,
"create_at": fi.CreateAt,
"update_at": fi.UpdateAt,
"delete_at": fi.DeleteAt,
"name": fi.Name,
"extension": fi.Extension,
"size": fi.Size,
}
}
func (fi *FileInfo) PreSave() {
@ -80,7 +97,7 @@ func (fi *FileInfo) IsValid() *AppError {
return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.id.app_error", nil, "", http.StatusBadRequest)
}
if !IsValidId(fi.CreatorId) && fi.CreatorId != "nouser" {
if !IsValidId(fi.CreatorId) && (fi.CreatorId != "nouser" && fi.CreatorId != BookmarkFileOwner) {
return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.user_id.app_error", nil, "id="+fi.Id, http.StatusBadRequest)
}
@ -107,6 +124,10 @@ func (fi *FileInfo) IsImage() bool {
return strings.HasPrefix(fi.MimeType, "image")
}
func (fi *FileInfo) IsSvg() bool {
return fi.MimeType == "image/svg+xml"
}
func NewInfo(name string) *FileInfo {
info := &FileInfo{
Name: name,
@ -125,48 +146,6 @@ func NewInfo(name string) *FileInfo {
return info
}
func GetInfoForBytes(name string, data io.ReadSeeker, size int) (*FileInfo, *AppError) {
info := &FileInfo{
Name: name,
Size: int64(size),
}
var err *AppError
extension := strings.ToLower(filepath.Ext(name))
info.MimeType = mime.TypeByExtension(extension)
if extension != "" && extension[0] == '.' {
// The client expects a file extension without the leading period
info.Extension = extension[1:]
} else {
info.Extension = extension
}
if info.IsImage() {
// Only set the width and height if it's actually an image that we can understand
if config, _, err := image.DecodeConfig(data); err == nil {
info.Width = config.Width
info.Height = config.Height
if info.MimeType == "image/gif" {
// Just show the gif itself instead of a preview image for animated gifs
data.Seek(0, io.SeekStart)
gifConfig, err := gif.DecodeAll(data)
if err != nil {
// Still return the rest of the info even though it doesn't appear to be an actual gif
info.HasPreviewImage = true
return info, NewAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, err.Error(), http.StatusBadRequest)
}
info.HasPreviewImage = len(gifConfig.Image) == 1
} else {
info.HasPreviewImage = true
}
}
}
return info, err
}
func GetEtagForFileInfos(infos []*FileInfo) string {
if len(infos) == 0 {
return Etag()
@ -182,3 +161,17 @@ func GetEtagForFileInfos(infos []*FileInfo) string {
return Etag(infos[0].PostId, maxUpdateAt)
}
func (fi *FileInfo) MakeContentInaccessible() {
if fi == nil {
return
}
fi.Archived = true
fi.Content = ""
fi.HasPreviewImage = false
fi.MiniPreview = nil
fi.Path = ""
fi.PreviewPath = ""
fi.ThumbnailPath = ""
}

View File

@ -12,6 +12,8 @@ type FileInfoList struct {
FileInfos map[string]*FileInfo `json:"file_infos"`
NextFileInfoId string `json:"next_file_info_id"`
PrevFileInfoId string `json:"prev_file_info_id"`
// If there are inaccessible files, FirstInaccessibleFileTime is the time of the latest inaccessible file
FirstInaccessibleFileTime int64 `json:"first_inaccessible_file_time"`
}
func NewFileInfoList() *FileInfoList {
@ -90,7 +92,7 @@ func (o *FileInfoList) SortByCreateAt() {
func (o *FileInfoList) Etag() string {
id := "0"
var t int64 = 0
var t int64
for _, v := range o.FileInfos {
if v.UpdateAt > t {

View File

@ -19,7 +19,7 @@ type GithubReleaseInfo struct {
func (g *GithubReleaseInfo) IsValid() *AppError {
if g.Id == 0 {
return NewAppError("GithubReleaseInfo.IsValid", "model.github_release_info.is_valid.id.app_error", nil, "", http.StatusInternalServerError)
return NewAppError("GithubReleaseInfo.IsValid", NoTranslation, nil, "empty ID", http.StatusInternalServerError)
}
return nil

View File

@ -31,18 +31,39 @@ var groupSourcesRequiringRemoteID = []GroupSource{
}
type Group struct {
Id string `json:"id"`
Name *string `json:"name,omitempty"`
DisplayName string `json:"display_name"`
Description string `json:"description"`
Source GroupSource `json:"source"`
RemoteId *string `json:"remote_id"`
CreateAt int64 `json:"create_at"`
UpdateAt int64 `json:"update_at"`
DeleteAt int64 `json:"delete_at"`
HasSyncables bool `db:"-" json:"has_syncables"`
MemberCount *int `db:"-" json:"member_count,omitempty"`
AllowReference bool `json:"allow_reference"`
Id string `json:"id"`
Name *string `json:"name,omitempty"`
DisplayName string `json:"display_name"`
Description string `json:"description"`
Source GroupSource `json:"source"`
RemoteId *string `json:"remote_id"`
CreateAt int64 `json:"create_at"`
UpdateAt int64 `json:"update_at"`
DeleteAt int64 `json:"delete_at"`
HasSyncables bool `db:"-" json:"has_syncables"`
MemberCount *int `db:"-" json:"member_count,omitempty"`
AllowReference bool `json:"allow_reference"`
ChannelMemberCount *int `db:"-" json:"channel_member_count,omitempty"`
ChannelMemberTimezonesCount *int `db:"-" json:"channel_member_timezones_count,omitempty"`
MemberIDs []string `db:"-" json:"member_ids"`
}
func (group *Group) Auditable() map[string]interface{} {
return map[string]interface{}{
"id": group.Id,
"source": group.Source,
"remote_id": group.RemoteId,
"create_at": group.CreateAt,
"update_at": group.UpdateAt,
"delete_at": group.DeleteAt,
"has_syncables": group.HasSyncables,
"member_count": group.MemberCount,
"allow_reference": group.AllowReference,
}
}
func (group *Group) LogClone() any {
return group.Auditable()
}
type GroupWithUserIds struct {
@ -50,6 +71,21 @@ type GroupWithUserIds struct {
UserIds []string `json:"user_ids"`
}
func (group *GroupWithUserIds) Auditable() map[string]interface{} {
return map[string]interface{}{
"id": group.Id,
"source": group.Source,
"remote_id": group.RemoteId,
"create_at": group.CreateAt,
"update_at": group.UpdateAt,
"delete_at": group.DeleteAt,
"has_syncables": group.HasSyncables,
"member_count": group.MemberCount,
"allow_reference": group.AllowReference,
"user_ids": group.UserIds,
}
}
type GroupWithSchemeAdmin struct {
Group
SchemeAdmin *bool `db:"SyncableSchemeAdmin" json:"scheme_admin,omitempty"`
@ -99,10 +135,21 @@ type GroupSearchOpts struct {
// FilterHasMember filters the groups to the intersect of the
// set returned by the query and those that have the given user as a member.
FilterHasMember string
IncludeChannelMemberCount string
IncludeTimezones bool
IncludeMemberIDs bool
// Include archived groups
IncludeArchived bool
// Only return archived groups
FilterArchived bool
}
type GetGroupOpts struct {
IncludeMemberCount bool
IncludeMemberIDs bool
}
type PageOpts struct {
@ -119,6 +166,12 @@ type GroupModifyMembers struct {
UserIds []string `json:"user_ids"`
}
func (group *GroupModifyMembers) Auditable() map[string]interface{} {
return map[string]interface{}{
"user_ids": group.UserIds,
}
}
func (group *Group) Patch(patch *GroupPatch) {
if patch.Name != nil {
group.Name = patch.Name
@ -135,17 +188,17 @@ func (group *Group) Patch(patch *GroupPatch) {
}
func (group *Group) IsValidForCreate() *AppError {
err := group.IsValidName()
if err != nil {
return err
appErr := group.IsValidName()
if appErr != nil {
return appErr
}
if l := len(group.DisplayName); l == 0 || l > GroupDisplayNameMaxLength {
return NewAppError("Group.IsValidForCreate", "model.group.display_name.app_error", map[string]interface{}{"GroupDisplayNameMaxLength": GroupDisplayNameMaxLength}, "", http.StatusBadRequest)
return NewAppError("Group.IsValidForCreate", "model.group.display_name.app_error", map[string]any{"GroupDisplayNameMaxLength": GroupDisplayNameMaxLength}, "", http.StatusBadRequest)
}
if len(group.Description) > GroupDescriptionMaxLength {
return NewAppError("Group.IsValidForCreate", "model.group.description.app_error", map[string]interface{}{"GroupDescriptionMaxLength": GroupDescriptionMaxLength}, "", http.StatusBadRequest)
return NewAppError("Group.IsValidForCreate", "model.group.description.app_error", map[string]any{"GroupDescriptionMaxLength": GroupDescriptionMaxLength}, "", http.StatusBadRequest)
}
isValidSource := false
@ -185,8 +238,8 @@ func (group *Group) IsValidForUpdate() *AppError {
if group.UpdateAt == 0 {
return NewAppError("Group.IsValidForUpdate", "model.group.update_at.app_error", nil, "", http.StatusBadRequest)
}
if err := group.IsValidForCreate(); err != nil {
return err
if appErr := group.IsValidForCreate(); appErr != nil {
return appErr
}
return nil
}
@ -194,14 +247,17 @@ func (group *Group) IsValidForUpdate() *AppError {
var validGroupnameChars = regexp.MustCompile(`^[a-z0-9\.\-_]+$`)
func (group *Group) IsValidName() *AppError {
if group.Name == nil {
if group.AllowReference {
return NewAppError("Group.IsValidName", "model.group.name.app_error", map[string]interface{}{"GroupNameMaxLength": GroupNameMaxLength}, "", http.StatusBadRequest)
return NewAppError("Group.IsValidName", "model.group.name.app_error", map[string]any{"GroupNameMaxLength": GroupNameMaxLength}, "", http.StatusBadRequest)
}
} else {
if l := len(*group.Name); l == 0 || l > GroupNameMaxLength {
return NewAppError("Group.IsValidName", "model.group.name.invalid_length.app_error", map[string]interface{}{"GroupNameMaxLength": GroupNameMaxLength}, "", http.StatusBadRequest)
return NewAppError("Group.IsValidName", "model.group.name.invalid_length.app_error", map[string]any{"GroupNameMaxLength": GroupNameMaxLength}, "", http.StatusBadRequest)
}
if *group.Name == UserNotifyAll || *group.Name == ChannelMentionsNotifyProp || *group.Name == UserNotifyHere {
return NewAppError("IsValidName", "model.group.name.reserved_name.app_error", nil, "", http.StatusBadRequest)
}
if !validGroupnameChars.MatchString(*group.Name) {
@ -229,3 +285,11 @@ type GroupsWithCount struct {
Groups []*Group `json:"groups"`
TotalCount int64 `json:"total_count"`
}
type CreateDefaultMembershipParams struct {
Since int64
ReAddRemovedMembers bool
ScopedUserID *string
ScopedTeamID *string
ScopedChannelID *string
}

View File

@ -21,3 +21,8 @@ func (gm *GroupMember) IsValid() *AppError {
}
return nil
}
type GroupMemberList struct {
Members []*User `json:"members"`
Count int `json:"total_member_count"`
}

View File

@ -42,6 +42,24 @@ type GroupSyncable struct {
TeamID string `db:"-" json:"-"`
}
func (syncable *GroupSyncable) Auditable() map[string]interface{} {
return map[string]interface{}{
"group_id": syncable.GroupId,
"syncable_id": syncable.SyncableId,
"auto_add": syncable.AutoAdd,
"scheme_admin": syncable.SchemeAdmin,
"create_at": syncable.CreateAt,
"delete_at": syncable.DeleteAt,
"update_at": syncable.UpdateAt,
"type": syncable.Type,
"channel_display_name": syncable.ChannelDisplayName,
"team_display_name": syncable.TeamDisplayName,
"team_type": syncable.TeamType,
"channel_type": syncable.ChannelType,
"team_id": syncable.TeamID,
}
}
func (syncable *GroupSyncable) IsValid() *AppError {
if !IsValidId(syncable.GroupId) {
return NewAppError("GroupSyncable.SyncableIsValid", "model.group_syncable.group_id.app_error", nil, "", http.StatusBadRequest)
@ -53,7 +71,7 @@ func (syncable *GroupSyncable) IsValid() *AppError {
}
func (syncable *GroupSyncable) UnmarshalJSON(b []byte) error {
var kvp map[string]interface{}
var kvp map[string]any
err := json.Unmarshal(b, &kvp)
if err != nil {
return err
@ -126,9 +144,7 @@ func (syncable *GroupSyncable) MarshalJSON() ([]byte, error) {
Alias: (*Alias)(syncable),
})
default:
return nil, &json.MarshalerError{
Err: fmt.Errorf("unknown syncable type: %s", syncable.Type),
}
return nil, fmt.Errorf("unknown syncable type: %s", syncable.Type)
}
}
@ -137,6 +153,13 @@ type GroupSyncablePatch struct {
SchemeAdmin *bool `json:"scheme_admin"`
}
func (syncable *GroupSyncablePatch) Auditable() map[string]interface{} {
return map[string]interface{}{
"auto_add": syncable.AutoAdd,
"scheme_admin": syncable.SchemeAdmin,
}
}
func (syncable *GroupSyncable) Patch(patch *GroupSyncablePatch) {
if patch.AutoAdd != nil {
syncable.AutoAdd = *patch.AutoAdd

View File

@ -13,6 +13,13 @@ type GuestsInvite struct {
Message string `json:"message"`
}
func (i *GuestsInvite) Auditable() map[string]interface{} {
return map[string]interface{}{
"emails": i.Emails,
"channels": i.Channels,
}
}
// IsValid validates the user and returns an error if it isn't configured
// correctly.
func (i *GuestsInvite) IsValid() *AppError {

View File

@ -0,0 +1,10 @@
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package model
type SubscribeNewsletterRequest struct {
Email string `json:"email"`
ServerID string `json:"server_id"`
SubscribedContent string `json:"subscribed_content"`
}

View File

@ -30,6 +30,23 @@ type IncomingWebhook struct {
ChannelLocked bool `json:"channel_locked"`
}
func (o *IncomingWebhook) Auditable() map[string]interface{} {
return map[string]interface{}{
"id": o.Id,
"create_at": o.CreateAt,
"update_at": o.UpdateAt,
"delete_at": o.DeleteAt,
"user_id": o.UserId,
"channel_id": o.ChannelId,
"team_id": o.TeamId,
"display_name": o.DisplayName,
"description": o.Description,
"username": o.Username,
"icon_url:": o.IconURL,
"channel_locked": o.ChannelLocked,
}
}
type IncomingWebhookRequest struct {
Text string `json:"text"`
Username string `json:"username"`
@ -39,12 +56,12 @@ type IncomingWebhookRequest struct {
Attachments []*SlackAttachment `json:"attachments"`
Type string `json:"type"`
IconEmoji string `json:"icon_emoji"`
Priority *PostPriority `json:"priority"`
}
func (o *IncomingWebhook) IsValid() *AppError {
if !IsValidId(o.Id) {
return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.id.app_error", nil, "", http.StatusBadRequest)
}
if o.CreateAt == 0 {
@ -106,22 +123,24 @@ func (o *IncomingWebhook) PreUpdate() {
// try to handle that. An example invalid JSON string from an incoming webhook
// might look like this (strings for both "text" and "fallback" attributes are
// invalid JSON strings because they contain unescaped newlines and tabs):
// `{
// "text": "this is a test
// that contains a newline and tabs",
// "attachments": [
// {
// "fallback": "Required plain-text summary of the attachment
// that contains a newline and tabs",
// "color": "#36a64f",
// ...
// "text": "Optional text that appears within the attachment
// that contains a newline and tabs",
// ...
// "thumb_url": "http://example.com/path/to/thumb.png"
// }
// ]
// }`
//
// `{
// "text": "this is a test
// that contains a newline and tabs",
// "attachments": [
// {
// "fallback": "Required plain-text summary of the attachment
// that contains a newline and tabs",
// "color": "#36a64f",
// ...
// "text": "Optional text that appears within the attachment
// that contains a newline and tabs",
// ...
// "thumb_url": "http://example.com/path/to/thumb.png"
// }
// ]
// }`
//
// This function will search for `"key": "value"` pairs, and escape \n, \t
// from the value.
func escapeControlCharsFromPayload(by []byte) []byte {
@ -174,7 +193,7 @@ func IncomingWebhookRequestFromJSON(data io.Reader) (*IncomingWebhookRequest, *A
if err != nil {
o, err = decodeIncomingWebhookRequest(escapeControlCharsFromPayload(by))
if err != nil {
return nil, NewAppError("IncomingWebhookRequestFromJSON", "model.incoming_hook.parse_data.app_error", nil, err.Error(), http.StatusBadRequest)
return nil, NewAppError("IncomingWebhookRequestFromJSON", "model.incoming_hook.parse_data.app_error", nil, "", http.StatusBadRequest).Wrap(err)
}
}

View File

@ -19,12 +19,23 @@ import (
"reflect"
"strconv"
"strings"
"time"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
)
const (
PostActionTypeButton = "button"
PostActionTypeSelect = "select"
InteractiveDialogTriggerTimeoutMilliseconds = 3000
PostActionTypeButton = "button"
PostActionTypeSelect = "select"
DialogTitleMaxLength = 24
DialogElementDisplayNameMaxLength = 24
DialogElementNameMaxLength = 300
DialogElementHelpTextMaxLength = 150
DialogElementTextMaxLength = 150
DialogElementTextareaMaxLength = 3000
DialogElementSelectMaxLength = 3000
DialogElementBoolMaxLength = 150
)
var PostActionRetainPropKeys = []string{"from_webhook", "override_username", "override_icon_url"}
@ -122,6 +133,11 @@ func (p *PostAction) Equals(input *PostAction) bool {
return p.Integration == nil
}
// At this point, input is not nil, so return false if original is.
if p.Integration == nil {
return false
}
// Both are unequal and not nil.
if p.Integration.URL != input.Integration.URL {
return false
@ -164,7 +180,7 @@ type PostActionCookie struct {
ChannelId string `json:"channel_id,omitempty"`
DataSource string `json:"data_source,omitempty"`
Integration *PostActionIntegration `json:"integration,omitempty"`
RetainProps map[string]interface{} `json:"retain_props,omitempty"`
RetainProps map[string]any `json:"retain_props,omitempty"`
RemoveProps []string `json:"remove_props,omitempty"`
}
@ -174,22 +190,22 @@ type PostActionOptions struct {
}
type PostActionIntegration struct {
URL string `json:"url,omitempty"`
Context map[string]interface{} `json:"context,omitempty"`
URL string `json:"url,omitempty"`
Context map[string]any `json:"context,omitempty"`
}
type PostActionIntegrationRequest struct {
UserId string `json:"user_id"`
UserName string `json:"user_name"`
ChannelId string `json:"channel_id"`
ChannelName string `json:"channel_name"`
TeamId string `json:"team_id"`
TeamName string `json:"team_domain"`
PostId string `json:"post_id"`
TriggerId string `json:"trigger_id"`
Type string `json:"type"`
DataSource string `json:"data_source"`
Context map[string]interface{} `json:"context,omitempty"`
UserId string `json:"user_id"`
UserName string `json:"user_name"`
ChannelId string `json:"channel_id"`
ChannelName string `json:"channel_name"`
TeamId string `json:"team_id"`
TeamName string `json:"team_domain"`
PostId string `json:"post_id"`
TriggerId string `json:"trigger_id"`
Type string `json:"type"`
DataSource string `json:"data_source"`
Context map[string]any `json:"context,omitempty"`
}
type PostActionIntegrationResponse struct {
@ -236,15 +252,15 @@ type OpenDialogRequest struct {
}
type SubmitDialogRequest struct {
Type string `json:"type"`
URL string `json:"url,omitempty"`
CallbackId string `json:"callback_id"`
State string `json:"state"`
UserId string `json:"user_id"`
ChannelId string `json:"channel_id"`
TeamId string `json:"team_id"`
Submission map[string]interface{} `json:"submission"`
Cancelled bool `json:"cancelled"`
Type string `json:"type"`
URL string `json:"url,omitempty"`
CallbackId string `json:"callback_id"`
State string `json:"state"`
UserId string `json:"user_id"`
ChannelId string `json:"channel_id"`
TeamId string `json:"team_id"`
Submission map[string]any `json:"submission"`
Cancelled bool `json:"cancelled"`
}
type SubmitDialogResponse struct {
@ -261,7 +277,7 @@ func GenerateTriggerId(userId string, s crypto.Signer) (string, string, *AppErro
sum.Write([]byte(triggerData))
signature, err := s.Sign(rand.Reader, sum.Sum(nil), h)
if err != nil {
return "", "", NewAppError("GenerateTriggerId", "interactive_message.generate_trigger_id.signing_failed", nil, err.Error(), http.StatusInternalServerError)
return "", "", NewAppError("GenerateTriggerId", "interactive_message.generate_trigger_id.signing_failed", nil, "", http.StatusInternalServerError).Wrap(err)
}
base64Sig := base64.StdEncoding.EncodeToString(signature)
@ -271,19 +287,19 @@ func GenerateTriggerId(userId string, s crypto.Signer) (string, string, *AppErro
}
func (r *PostActionIntegrationRequest) GenerateTriggerId(s crypto.Signer) (string, string, *AppError) {
clientTriggerId, triggerId, err := GenerateTriggerId(r.UserId, s)
if err != nil {
return "", "", err
clientTriggerId, triggerId, appErr := GenerateTriggerId(r.UserId, s)
if appErr != nil {
return "", "", appErr
}
r.TriggerId = triggerId
return clientTriggerId, triggerId, nil
}
func DecodeAndVerifyTriggerId(triggerId string, s *ecdsa.PrivateKey) (string, string, *AppError) {
func DecodeAndVerifyTriggerId(triggerId string, s *ecdsa.PrivateKey, timeout time.Duration) (string, string, *AppError) {
triggerIdBytes, err := base64.StdEncoding.DecodeString(triggerId)
if err != nil {
return "", "", NewAppError("DecodeAndVerifyTriggerId", "interactive_message.decode_trigger_id.base64_decode_failed", nil, err.Error(), http.StatusBadRequest)
return "", "", NewAppError("DecodeAndVerifyTriggerId", "interactive_message.decode_trigger_id.base64_decode_failed", nil, "", http.StatusBadRequest).Wrap(err)
}
split := strings.Split(string(triggerIdBytes), ":")
@ -296,14 +312,13 @@ func DecodeAndVerifyTriggerId(triggerId string, s *ecdsa.PrivateKey) (string, st
timestampStr := split[2]
timestamp, _ := strconv.ParseInt(timestampStr, 10, 64)
now := GetMillis()
if now-timestamp > InteractiveDialogTriggerTimeoutMilliseconds {
return "", "", NewAppError("DecodeAndVerifyTriggerId", "interactive_message.decode_trigger_id.expired", map[string]interface{}{"Seconds": InteractiveDialogTriggerTimeoutMilliseconds / 1000}, "", http.StatusBadRequest)
if time.Since(time.UnixMilli(timestamp)) > timeout {
return "", "", NewAppError("DecodeAndVerifyTriggerId", "interactive_message.decode_trigger_id.expired", map[string]any{"Duration": timeout.String()}, "", http.StatusBadRequest)
}
signature, err := base64.StdEncoding.DecodeString(split[3])
if err != nil {
return "", "", NewAppError("DecodeAndVerifyTriggerId", "interactive_message.decode_trigger_id.base64_decode_failed_signature", nil, err.Error(), http.StatusBadRequest)
return "", "", NewAppError("DecodeAndVerifyTriggerId", "interactive_message.decode_trigger_id.base64_decode_failed_signature", nil, "", http.StatusBadRequest).Wrap(err)
}
var esig struct {
@ -311,7 +326,7 @@ func DecodeAndVerifyTriggerId(triggerId string, s *ecdsa.PrivateKey) (string, st
}
if _, err := asn1.Unmarshal(signature, &esig); err != nil {
return "", "", NewAppError("DecodeAndVerifyTriggerId", "interactive_message.decode_trigger_id.signature_decode_failed", nil, err.Error(), http.StatusBadRequest)
return "", "", NewAppError("DecodeAndVerifyTriggerId", "interactive_message.decode_trigger_id.signature_decode_failed", nil, "", http.StatusBadRequest).Wrap(err)
}
triggerData := strings.Join([]string{clientTriggerId, userId, timestampStr}, ":") + ":"
@ -327,8 +342,150 @@ func DecodeAndVerifyTriggerId(triggerId string, s *ecdsa.PrivateKey) (string, st
return clientTriggerId, userId, nil
}
func (r *OpenDialogRequest) DecodeAndVerifyTriggerId(s *ecdsa.PrivateKey) (string, string, *AppError) {
return DecodeAndVerifyTriggerId(r.TriggerId, s)
func (r *OpenDialogRequest) DecodeAndVerifyTriggerId(s *ecdsa.PrivateKey, timeout time.Duration) (string, string, *AppError) {
return DecodeAndVerifyTriggerId(r.TriggerId, s, timeout)
}
func (r *OpenDialogRequest) IsValid() error {
var multiErr *multierror.Error
if r.URL == "" {
multiErr = multierror.Append(multiErr, errors.New("empty URL"))
}
if r.TriggerId == "" {
multiErr = multierror.Append(multiErr, errors.New("empty trigger id"))
}
err := r.Dialog.IsValid()
if err != nil {
multiErr = multierror.Append(multiErr, err)
}
return multiErr.ErrorOrNil()
}
func (d *Dialog) IsValid() error {
var multiErr *multierror.Error
if d.Title == "" || len(d.Title) > DialogTitleMaxLength {
multiErr = multierror.Append(multiErr, errors.Errorf("invalid dialog title %q", d.Title))
}
if d.IconURL != "" && !IsValidHTTPURL(d.IconURL) {
multiErr = multierror.Append(multiErr, errors.New("invalid icon url"))
}
if len(d.Elements) != 0 {
elementMap := make(map[string]bool)
for _, element := range d.Elements {
if elementMap[element.Name] {
multiErr = multierror.Append(multiErr, errors.Errorf("duplicate dialog element %q", element.Name))
}
elementMap[element.Name] = true
err := element.IsValid()
if err != nil {
multiErr = multierror.Append(multiErr, errors.Wrapf(err, "%q field is not valid", element.Name))
}
}
}
return multiErr.ErrorOrNil()
}
func (e *DialogElement) IsValid() error {
var multiErr *multierror.Error
textSubTypes := map[string]bool{
"": true,
"text": true,
"email": true,
"number": true,
"tel": true,
"url": true,
"password": true,
}
if e.MinLength < 0 {
multiErr = multierror.Append(multiErr, errors.Errorf("min length cannot be a negative number, got %d", e.MinLength))
}
if e.MinLength > e.MaxLength {
multiErr = multierror.Append(multiErr, errors.Errorf("min length should be less then max length, got %d > %d", e.MinLength, e.MaxLength))
}
multiErr = multierror.Append(multiErr, checkMaxLength("DisplayName", e.DisplayName, DialogElementDisplayNameMaxLength))
multiErr = multierror.Append(multiErr, checkMaxLength("Name", e.Name, DialogElementNameMaxLength))
multiErr = multierror.Append(multiErr, checkMaxLength("HelpText", e.HelpText, DialogElementHelpTextMaxLength))
switch e.Type {
case "text":
multiErr = multierror.Append(multiErr, checkMaxLength("Default", e.Default, DialogElementTextMaxLength))
multiErr = multierror.Append(multiErr, checkMaxLength("Placeholder", e.Placeholder, DialogElementTextMaxLength))
if _, ok := textSubTypes[e.SubType]; !ok {
multiErr = multierror.Append(multiErr, errors.Errorf("invalid subtype %q", e.Type))
}
case "textarea":
multiErr = multierror.Append(multiErr, checkMaxLength("Default", e.Default, DialogElementTextareaMaxLength))
multiErr = multierror.Append(multiErr, checkMaxLength("Placeholder", e.Placeholder, DialogElementTextareaMaxLength))
if _, ok := textSubTypes[e.SubType]; !ok {
multiErr = multierror.Append(multiErr, errors.Errorf("invalid subtype %q", e.Type))
}
case "select":
multiErr = multierror.Append(multiErr, checkMaxLength("Default", e.Default, DialogElementSelectMaxLength))
multiErr = multierror.Append(multiErr, checkMaxLength("Placeholder", e.Placeholder, DialogElementSelectMaxLength))
if e.DataSource != "" && e.DataSource != "users" && e.DataSource != "channels" {
multiErr = multierror.Append(multiErr, errors.Errorf("invalid data source %q, allowed are 'users' or 'channels'", e.DataSource))
}
if e.DataSource == "" && !isDefaultInOptions(e.Default, e.Options) {
multiErr = multierror.Append(multiErr, errors.Errorf("default value %q doesn't exist in options ", e.Default))
}
case "bool":
if e.Default != "" && e.Default != "true" && e.Default != "false" {
multiErr = multierror.Append(multiErr, errors.New("invalid default of bool"))
}
multiErr = multierror.Append(multiErr, checkMaxLength("Placeholder", e.Placeholder, DialogElementBoolMaxLength))
case "radio":
if !isDefaultInOptions(e.Default, e.Options) {
multiErr = multierror.Append(multiErr, errors.Errorf("default value %q doesn't exist in options ", e.Default))
}
default:
multiErr = multierror.Append(multiErr, errors.Errorf("invalid element type: %q", e.Type))
}
return multiErr.ErrorOrNil()
}
func isDefaultInOptions(defaultValue string, options []*PostActionOptions) bool {
if defaultValue == "" {
return true
}
for _, option := range options {
if defaultValue == option.Value {
return true
}
}
return false
}
func checkMaxLength(fieldName string, field string, length int) error {
var valid bool
// DisplayName and Name are required fields
if fieldName == "DisplayName" || fieldName == "Name" {
valid = len(field) > 0 && len(field) > length
} else {
valid = len(field) > length
}
if valid {
return errors.Errorf("%v cannot be longer than %d characters", fieldName, length)
}
return nil
}
func (o *Post) StripActionIntegrations() {
@ -373,7 +530,7 @@ func AddPostActionCookies(o *Post, secret []byte) *Post {
p := o.Clone()
// retainedProps carry over their value from the old post, including no value
retainProps := map[string]interface{}{}
retainProps := map[string]any{}
removeProps := []string{}
for _, key := range PostActionRetainPropKeys {
value, ok := p.GetProps()[key]
@ -434,7 +591,7 @@ func encryptPostActionCookie(plain string, secret []byte) (string, error) {
sealed := aesgcm.Seal(nil, nonce, []byte(plain), nil)
combined := append(nonce, sealed...)
combined := append(nonce, sealed...) //nolint:makezero
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(combined)))
base64.StdEncoding.Encode(encoded, combined)

View File

@ -22,25 +22,25 @@ type RelationalIntegrityCheckData struct {
}
type IntegrityCheckResult struct {
Data interface{} `json:"data"`
Err error `json:"err"`
Data any `json:"data"`
Err error `json:"err"`
}
func (r *IntegrityCheckResult) UnmarshalJSON(b []byte) error {
var data map[string]interface{}
var data map[string]any
if err := json.Unmarshal(b, &data); err != nil {
return err
}
if d, ok := data["data"]; ok && d != nil {
var rdata RelationalIntegrityCheckData
m := d.(map[string]interface{})
m := d.(map[string]any)
rdata.ParentName = m["parent_name"].(string)
rdata.ChildName = m["child_name"].(string)
rdata.ParentIdAttr = m["parent_id_attr"].(string)
rdata.ChildIdAttr = m["child_id_attr"].(string)
for _, recData := range m["records"].([]interface{}) {
for _, recData := range m["records"].([]any) {
var record OrphanedRecord
m := recData.(map[string]interface{})
m := recData.(map[string]any)
if val := m["parent_id"]; val != nil {
record.ParentId = NewString(val.(string))
}

View File

@ -0,0 +1,20 @@
package model
type AllowedIPRanges []AllowedIPRange
type AllowedIPRange struct {
CIDRBlock string `json:"cidr_block"`
Description string `json:"description"`
Enabled bool `json:"enabled"`
OwnerID string `json:"owner_id"`
}
func (air *AllowedIPRanges) Auditable() map[string]interface{} {
return map[string]interface{}{
"AllowedIPRanges": air,
}
}
type GetIPAddressResponse struct {
IP string `json:"ip"`
}

View File

@ -5,7 +5,6 @@ package model
import (
"net/http"
"time"
)
const (
@ -27,6 +26,19 @@ const (
JobTypeCloud = "cloud"
JobTypeResendInvitationEmail = "resend_invitation_email"
JobTypeExtractContent = "extract_content"
JobTypeLastAccessiblePost = "last_accessible_post"
JobTypeLastAccessibleFile = "last_accessible_file"
JobTypeUpgradeNotifyAdmin = "upgrade_notify_admin"
JobTypeTrialNotifyAdmin = "trial_notify_admin"
JobTypePostPersistentNotifications = "post_persistent_notifications"
JobTypeInstallPluginNotifyAdmin = "install_plugin_notify_admin"
JobTypeHostedPurchaseScreening = "hosted_purchase_screening"
JobTypeS3PathMigration = "s3_path_migration"
JobTypeCleanupDesktopTokens = "cleanup_desktop_tokens"
JobTypeDeleteEmptyDraftsMigration = "delete_empty_drafts_migration"
JobTypeRefreshPostStats = "refresh_post_stats"
JobTypeDeleteOrphanDraftsMigration = "delete_orphan_drafts_migration"
JobTypeExportUsersToCSV = "export_users_to_csv"
JobStatusPending = "pending"
JobStatusInProgress = "in_progress"
@ -55,6 +67,10 @@ var AllJobTypes = [...]string{
JobTypeExportDelete,
JobTypeCloud,
JobTypeExtractContent,
JobTypeLastAccessiblePost,
JobTypeLastAccessibleFile,
JobTypeCleanupDesktopTokens,
JobTypeRefreshPostStats,
}
type Job struct {
@ -69,6 +85,20 @@ type Job struct {
Data StringMap `json:"data"`
}
func (j *Job) Auditable() map[string]interface{} {
return map[string]interface{}{
"id": j.Id,
"type": j.Type,
"priority": j.Priority,
"create_at": j.CreateAt,
"start_at": j.StartAt,
"last_activity_at": j.LastActivityAt,
"status": j.Status,
"progress": j.Progress,
"data": j.Data, // TODO do we want this here
}
}
func (j *Job) IsValid() *AppError {
if !IsValidId(j.Id) {
return NewAppError("Job.IsValid", "model.job.is_valid.id.app_error", nil, "id="+j.Id, http.StatusBadRequest)
@ -79,12 +109,13 @@ func (j *Job) IsValid() *AppError {
}
switch j.Status {
case JobStatusPending:
case JobStatusInProgress:
case JobStatusSuccess:
case JobStatusError:
case JobStatusCancelRequested:
case JobStatusCanceled:
case JobStatusPending,
JobStatusInProgress,
JobStatusSuccess,
JobStatusError,
JobStatusWarning,
JobStatusCancelRequested,
JobStatusCanceled:
default:
return NewAppError("Job.IsValid", "model.job.is_valid.status.app_error", nil, "id="+j.Id, http.StatusBadRequest)
}
@ -92,15 +123,13 @@ func (j *Job) IsValid() *AppError {
return nil
}
func (j *Job) LogClone() any {
return j.Auditable()
}
type Worker interface {
Run()
Stop()
JobChannel() chan<- Job
IsEnabled(cfg *Config) bool
}
type Scheduler interface {
Enabled(cfg *Config) bool
NextScheduleTime(cfg *Config, now time.Time, pendingJobs bool, lastSuccessfulJob *Job) *time.Time
ScheduleJob(cfg *Config, pendingJobs bool, lastSuccessfulJob *Job) (*Job, *AppError)
}

Some files were not shown because too many files have changed in this diff Show More