mirror of
https://github.com/42wim/matterbridge.git
synced 2024-12-22 11:12:44 +01:00
Update dependencies and remove old matterclient lib (#2067)
This commit is contained in:
parent
9459495484
commit
56e7bd01ca
72
go.mod
72
go.mod
@ -6,56 +6,54 @@ require (
|
||||
github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560
|
||||
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c
|
||||
github.com/SevereCloud/vksdk/v2 v2.16.0
|
||||
github.com/bwmarrin/discordgo v0.27.0
|
||||
github.com/d5/tengo/v2 v2.13.0
|
||||
github.com/bwmarrin/discordgo v0.27.1
|
||||
github.com/d5/tengo/v2 v2.16.1
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/gomarkdown/markdown v0.0.0-20221013030248-663e2500819c
|
||||
github.com/gomarkdown/markdown v0.0.0-20230716120725-531d2d74bc12
|
||||
github.com/google/gops v0.3.27
|
||||
github.com/gorilla/schema v1.2.0
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/harmony-development/shibshib v0.0.0-20220101224523-c98059d09cfa
|
||||
github.com/hashicorp/golang-lru v0.6.0
|
||||
github.com/jpillora/backoff v1.0.0
|
||||
github.com/keybase/go-keybase-chat-bot v0.0.0-20221220212439-e48d9abd2c20
|
||||
github.com/kyokomi/emoji/v2 v2.2.12
|
||||
github.com/labstack/echo/v4 v4.10.2
|
||||
github.com/lrstanley/girc v0.0.0-20221222153823-a92667a5c9b4
|
||||
github.com/labstack/echo/v4 v4.11.1
|
||||
github.com/lrstanley/girc v0.0.0-20230729130341-dd5853a5f1a6
|
||||
github.com/matterbridge/Rocket.Chat.Go.SDK v0.0.0-20211016222428-79310a412696
|
||||
github.com/matterbridge/go-xmpp v0.0.0-20211030125215-791a06c5f1be
|
||||
github.com/matterbridge/gomatrix v0.0.0-20220411225302-271e5088ea27
|
||||
github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75
|
||||
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba
|
||||
github.com/matterbridge/matterclient v0.0.0-20221106190440-8bcf49695e0d
|
||||
github.com/matterbridge/matterclient v0.0.0-20230329213635-bc6e42a4a84a
|
||||
github.com/matterbridge/telegram-bot-api/v6 v6.5.0
|
||||
github.com/mattermost/mattermost-server/v5 v5.39.3
|
||||
github.com/mattermost/mattermost-server/v6 v6.7.2
|
||||
github.com/mattn/godown v0.0.1
|
||||
github.com/mdp/qrterminal v1.0.1
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/nelsonken/gomf v0.0.0-20190423072027-c65cc0469e94
|
||||
github.com/olahol/melody v1.1.2
|
||||
github.com/olahol/melody v1.1.4
|
||||
github.com/paulrosania/go-charset v0.0.0-20190326053356-55c9d7a5834c
|
||||
github.com/rs/xid v1.4.0
|
||||
github.com/rs/xid v1.5.0
|
||||
github.com/russross/blackfriday v1.6.0
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
||||
github.com/shazow/ssh-chat v1.10.1
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/slack-go/slack v0.12.1
|
||||
github.com/spf13/viper v1.15.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/slack-go/slack v0.12.2
|
||||
github.com/spf13/viper v1.16.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/vincent-petithory/dataurl v1.0.0
|
||||
github.com/writeas/go-strip-markdown v2.0.1+incompatible
|
||||
github.com/yaegashi/msgraph.go v0.1.4
|
||||
github.com/zfjagann/golang-ring v0.0.0-20220330170733-19bcea1b6289
|
||||
go.mau.fi/whatsmeow v0.0.0-20230306190159-5caded34a872
|
||||
golang.org/x/image v0.6.0
|
||||
golang.org/x/oauth2 v0.6.0
|
||||
golang.org/x/text v0.8.0
|
||||
go.mau.fi/whatsmeow v0.0.0-20230805111647-405414b9b5c0
|
||||
golang.org/x/image v0.11.0
|
||||
golang.org/x/oauth2 v0.11.0
|
||||
golang.org/x/text v0.12.0
|
||||
gomod.garykim.dev/nc-talk v0.3.0
|
||||
google.golang.org/protobuf v1.29.1
|
||||
google.golang.org/protobuf v1.31.0
|
||||
layeh.com/gumble v0.0.0-20221205141517-d1df60a3cc14
|
||||
modernc.org/sqlite v1.21.0
|
||||
modernc.org/sqlite v1.25.0
|
||||
)
|
||||
|
||||
require (
|
||||
@ -65,17 +63,16 @@ require (
|
||||
github.com/apex/log v1.9.0 // indirect
|
||||
github.com/av-elier/go-decimal-to-rational v0.0.0-20191127152832-89e6aad02ecf // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/dyatlov/go-opengraph v0.0.0-20210112100619-dae8665a5b09 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.3 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gopackage/ddp v0.0.3 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
@ -86,10 +83,9 @@ require (
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 // indirect
|
||||
github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d // indirect
|
||||
github.com/mattermost/logr v1.0.13 // indirect
|
||||
github.com/mattermost/logr/v2 v2.0.15 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
@ -102,7 +98,7 @@ require (
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pborman/uuid v1.2.1 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/philhofer/fwd v1.1.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
@ -113,8 +109,8 @@ require (
|
||||
github.com/shazow/rateio v0.0.0-20200113175441-4461efc8bdc4 // indirect
|
||||
github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882 // indirect
|
||||
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 // indirect
|
||||
github.com/spf13/afero v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/afero v1.9.5 // indirect
|
||||
github.com/spf13/cast v1.5.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.4.2 // indirect
|
||||
@ -123,18 +119,14 @@ require (
|
||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
github.com/wiggin77/cfg v1.0.2 // indirect
|
||||
github.com/wiggin77/merror v1.0.3 // indirect
|
||||
github.com/wiggin77/srslog v1.0.1 // indirect
|
||||
go.mau.fi/libsignal v0.1.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/term v0.6.0 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/term v0.11.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
@ -145,9 +137,9 @@ require (
|
||||
lukechampine.com/uint128 v1.2.0 // indirect
|
||||
modernc.org/cc/v3 v3.40.0 // indirect
|
||||
modernc.org/ccgo/v3 v3.16.13 // indirect
|
||||
modernc.org/libc v1.22.3 // indirect
|
||||
modernc.org/libc v1.24.1 // indirect
|
||||
modernc.org/mathutil v1.5.0 // indirect
|
||||
modernc.org/memory v1.5.0 // indirect
|
||||
modernc.org/memory v1.6.0 // indirect
|
||||
modernc.org/opt v0.1.3 // indirect
|
||||
modernc.org/strutil v1.1.3 // indirect
|
||||
modernc.org/token v1.0.1 // indirect
|
||||
@ -156,4 +148,4 @@ require (
|
||||
|
||||
//replace github.com/matrix-org/gomatrix => github.com/matterbridge/gomatrix v0.0.0-20220205235239-607eb9ee6419
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
1
matterclient/README.md
Normal file
1
matterclient/README.md
Normal file
@ -0,0 +1 @@
|
||||
Find matterclient on https://github.com/matterbridge/matterclient
|
@ -1,226 +0,0 @@
|
||||
package matterclient
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/mattermost/mattermost-server/v5/model"
|
||||
)
|
||||
|
||||
// GetChannels returns all channels we're members off
|
||||
func (m *MMClient) GetChannels() []*model.Channel {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
var channels []*model.Channel
|
||||
// our primary team channels first
|
||||
channels = append(channels, m.Team.Channels...)
|
||||
for _, t := range m.OtherTeams {
|
||||
if t.Id != m.Team.Id {
|
||||
channels = append(channels, t.Channels...)
|
||||
}
|
||||
}
|
||||
return channels
|
||||
}
|
||||
|
||||
func (m *MMClient) GetChannelHeader(channelId string) string { //nolint:golint
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
for _, t := range m.OtherTeams {
|
||||
for _, channel := range append(t.Channels, t.MoreChannels...) {
|
||||
if channel.Id == channelId {
|
||||
return channel.Header
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getNormalisedName(channel *model.Channel) string {
|
||||
if channel.Type == model.CHANNEL_GROUP {
|
||||
// (deprecated in favor of ReplaceAll in go 1.12)
|
||||
res := strings.Replace(channel.DisplayName, ", ", "-", -1) //nolint: gocritic
|
||||
res = strings.Replace(res, " ", "_", -1) //nolint: gocritic
|
||||
return res
|
||||
}
|
||||
return channel.Name
|
||||
}
|
||||
|
||||
func (m *MMClient) GetChannelId(name string, teamId string) string { //nolint:golint
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
if teamId != "" {
|
||||
return m.getChannelIdTeam(name, teamId)
|
||||
}
|
||||
|
||||
for _, t := range m.OtherTeams {
|
||||
for _, channel := range append(t.Channels, t.MoreChannels...) {
|
||||
if getNormalisedName(channel) == name {
|
||||
return channel.Id
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *MMClient) getChannelIdTeam(name string, teamId string) string { //nolint:golint
|
||||
for _, t := range m.OtherTeams {
|
||||
if t.Id == teamId {
|
||||
for _, channel := range append(t.Channels, t.MoreChannels...) {
|
||||
if getNormalisedName(channel) == name {
|
||||
return channel.Id
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *MMClient) GetChannelName(channelId string) string { //nolint:golint
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
for _, t := range m.OtherTeams {
|
||||
if t == nil {
|
||||
continue
|
||||
}
|
||||
for _, channel := range append(t.Channels, t.MoreChannels...) {
|
||||
if channel.Id == channelId {
|
||||
return getNormalisedName(channel)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *MMClient) GetChannelTeamId(id string) string { //nolint:golint
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
for _, t := range append(m.OtherTeams, m.Team) {
|
||||
for _, channel := range append(t.Channels, t.MoreChannels...) {
|
||||
if channel.Id == id {
|
||||
return channel.TeamId
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *MMClient) GetLastViewedAt(channelId string) int64 { //nolint:golint
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
res, resp := m.Client.GetChannelMember(channelId, m.User.Id, "")
|
||||
if resp.Error != nil {
|
||||
return model.GetMillis()
|
||||
}
|
||||
return res.LastViewedAt
|
||||
}
|
||||
|
||||
// GetMoreChannels returns existing channels where we're not a member off.
|
||||
func (m *MMClient) GetMoreChannels() []*model.Channel {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
var channels []*model.Channel
|
||||
for _, t := range m.OtherTeams {
|
||||
channels = append(channels, t.MoreChannels...)
|
||||
}
|
||||
return channels
|
||||
}
|
||||
|
||||
// GetTeamFromChannel returns teamId belonging to channel (DM channels have no teamId).
|
||||
func (m *MMClient) GetTeamFromChannel(channelId string) string { //nolint:golint
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
var channels []*model.Channel
|
||||
for _, t := range m.OtherTeams {
|
||||
channels = append(channels, t.Channels...)
|
||||
if t.MoreChannels != nil {
|
||||
channels = append(channels, t.MoreChannels...)
|
||||
}
|
||||
for _, c := range channels {
|
||||
if c.Id == channelId {
|
||||
if c.Type == model.CHANNEL_GROUP {
|
||||
return "G"
|
||||
}
|
||||
return t.Id
|
||||
}
|
||||
}
|
||||
channels = nil
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *MMClient) JoinChannel(channelId string) error { //nolint:golint
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
for _, c := range m.Team.Channels {
|
||||
if c.Id == channelId {
|
||||
m.logger.Debug("Not joining ", channelId, " already joined.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
m.logger.Debug("Joining ", channelId)
|
||||
_, resp := m.Client.AddChannelMember(channelId, m.User.Id)
|
||||
if resp.Error != nil {
|
||||
return resp.Error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MMClient) UpdateChannelsTeam(teamID string) error {
|
||||
mmchannels, resp := m.Client.GetChannelsForTeamForUser(teamID, m.User.Id, false, "")
|
||||
if resp.Error != nil {
|
||||
return errors.New(resp.Error.DetailedError)
|
||||
}
|
||||
for idx, t := range m.OtherTeams {
|
||||
if t.Id == teamID {
|
||||
m.Lock()
|
||||
m.OtherTeams[idx].Channels = mmchannels
|
||||
m.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
mmchannels, resp = m.Client.GetPublicChannelsForTeam(teamID, 0, 5000, "")
|
||||
if resp.Error != nil {
|
||||
return errors.New(resp.Error.DetailedError)
|
||||
}
|
||||
for idx, t := range m.OtherTeams {
|
||||
if t.Id == teamID {
|
||||
m.Lock()
|
||||
m.OtherTeams[idx].MoreChannels = mmchannels
|
||||
m.Unlock()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MMClient) UpdateChannels() error {
|
||||
if err := m.UpdateChannelsTeam(m.Team.Id); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range m.OtherTeams {
|
||||
if err := m.UpdateChannelsTeam(t.Id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MMClient) UpdateChannelHeader(channelId string, header string) { //nolint:golint
|
||||
channel := &model.Channel{Id: channelId, Header: header}
|
||||
m.logger.Debugf("updating channelheader %#v, %#v", channelId, header)
|
||||
_, resp := m.Client.UpdateChannel(channel)
|
||||
if resp.Error != nil {
|
||||
m.logger.Error(resp.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MMClient) UpdateLastViewed(channelId string) error { //nolint:golint
|
||||
m.logger.Debugf("posting lastview %#v", channelId)
|
||||
view := &model.ChannelView{ChannelId: channelId}
|
||||
_, resp := m.Client.ViewChannel(m.User.Id, view)
|
||||
if resp.Error != nil {
|
||||
m.logger.Errorf("ChannelView update for %s failed: %s", channelId, resp.Error)
|
||||
return resp.Error
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,297 +0,0 @@
|
||||
package matterclient
|
||||
|
||||
import (
|
||||
"crypto/md5" //nolint:gosec
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/jpillora/backoff"
|
||||
"github.com/mattermost/mattermost-server/v5/model"
|
||||
)
|
||||
|
||||
func (m *MMClient) doLogin(firstConnection bool, b *backoff.Backoff) error {
|
||||
var resp *model.Response
|
||||
var appErr *model.AppError
|
||||
var logmsg = "trying login"
|
||||
var err error
|
||||
for {
|
||||
m.logger.Debugf("%s %s %s %s", logmsg, m.Credentials.Team, m.Credentials.Login, m.Credentials.Server)
|
||||
if m.Credentials.Token != "" {
|
||||
resp, err = m.doLoginToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
m.User, resp = m.Client.Login(m.Credentials.Login, m.Credentials.Pass)
|
||||
}
|
||||
appErr = resp.Error
|
||||
if appErr != nil {
|
||||
d := b.Duration()
|
||||
m.logger.Debug(appErr.DetailedError)
|
||||
if firstConnection {
|
||||
if appErr.Message == "" {
|
||||
return errors.New(appErr.DetailedError)
|
||||
}
|
||||
return errors.New(appErr.Message)
|
||||
}
|
||||
m.logger.Debugf("LOGIN: %s, reconnecting in %s", appErr, d)
|
||||
time.Sleep(d)
|
||||
logmsg = "retrying login"
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
// reset timer
|
||||
b.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MMClient) doLoginToken() (*model.Response, error) {
|
||||
var resp *model.Response
|
||||
var logmsg = "trying login"
|
||||
m.Client.AuthType = model.HEADER_BEARER
|
||||
m.Client.AuthToken = m.Credentials.Token
|
||||
if m.Credentials.CookieToken {
|
||||
m.logger.Debugf(logmsg + " with cookie (MMAUTH) token")
|
||||
m.Client.HttpClient.Jar = m.createCookieJar(m.Credentials.Token)
|
||||
} else {
|
||||
m.logger.Debugf(logmsg + " with personal token")
|
||||
}
|
||||
m.User, resp = m.Client.GetMe("")
|
||||
if resp.Error != nil {
|
||||
return resp, resp.Error
|
||||
}
|
||||
if m.User == nil {
|
||||
m.logger.Errorf("LOGIN TOKEN: %s is invalid", m.Credentials.Pass)
|
||||
return resp, errors.New("invalid token")
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *MMClient) handleLoginToken() error {
|
||||
switch {
|
||||
case strings.Contains(m.Credentials.Pass, model.SESSION_COOKIE_TOKEN):
|
||||
token := strings.Split(m.Credentials.Pass, model.SESSION_COOKIE_TOKEN+"=")
|
||||
if len(token) != 2 {
|
||||
return errors.New("incorrect MMAUTHTOKEN. valid input is MMAUTHTOKEN=yourtoken")
|
||||
}
|
||||
m.Credentials.Token = token[1]
|
||||
m.Credentials.CookieToken = true
|
||||
case strings.Contains(m.Credentials.Pass, "token="):
|
||||
token := strings.Split(m.Credentials.Pass, "token=")
|
||||
if len(token) != 2 {
|
||||
return errors.New("incorrect personal token. valid input is token=yourtoken")
|
||||
}
|
||||
m.Credentials.Token = token[1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MMClient) initClient(firstConnection bool, b *backoff.Backoff) error {
|
||||
uriScheme := "https://"
|
||||
if m.NoTLS {
|
||||
uriScheme = "http://"
|
||||
}
|
||||
// login to mattermost
|
||||
m.Client = model.NewAPIv4Client(uriScheme + m.Credentials.Server)
|
||||
m.Client.HttpClient.Transport = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: m.SkipTLSVerify}, //nolint:gosec
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
m.Client.HttpClient.Timeout = time.Second * 10
|
||||
|
||||
// handle MMAUTHTOKEN and personal token
|
||||
if err := m.handleLoginToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if server alive, retry until
|
||||
if err := m.serverAlive(firstConnection, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// initialize user and teams
|
||||
func (m *MMClient) initUser() error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
// we only load all team data on initial login.
|
||||
// all other updates are for channels from our (primary) team only.
|
||||
//m.logger.Debug("initUser(): loading all team data")
|
||||
teams, resp := m.Client.GetTeamsForUser(m.User.Id, "")
|
||||
if resp.Error != nil {
|
||||
return resp.Error
|
||||
}
|
||||
for _, team := range teams {
|
||||
idx := 0
|
||||
max := 200
|
||||
usermap := make(map[string]*model.User)
|
||||
mmusers, resp := m.Client.GetUsersInTeam(team.Id, idx, max, "")
|
||||
if resp.Error != nil {
|
||||
return errors.New(resp.Error.DetailedError)
|
||||
}
|
||||
for len(mmusers) > 0 {
|
||||
for _, user := range mmusers {
|
||||
usermap[user.Id] = user
|
||||
}
|
||||
mmusers, resp = m.Client.GetUsersInTeam(team.Id, idx, max, "")
|
||||
if resp.Error != nil {
|
||||
return errors.New(resp.Error.DetailedError)
|
||||
}
|
||||
idx++
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
}
|
||||
m.logger.Infof("found %d users in team %s", len(usermap), team.Name)
|
||||
|
||||
t := &Team{Team: team, Users: usermap, Id: team.Id}
|
||||
|
||||
mmchannels, resp := m.Client.GetChannelsForTeamForUser(team.Id, m.User.Id, false, "")
|
||||
if resp.Error != nil {
|
||||
return resp.Error
|
||||
}
|
||||
t.Channels = mmchannels
|
||||
mmchannels, resp = m.Client.GetPublicChannelsForTeam(team.Id, 0, 5000, "")
|
||||
if resp.Error != nil {
|
||||
return resp.Error
|
||||
}
|
||||
t.MoreChannels = mmchannels
|
||||
m.OtherTeams = append(m.OtherTeams, t)
|
||||
if team.Name == m.Credentials.Team {
|
||||
m.Team = t
|
||||
m.logger.Debugf("initUser(): found our team %s (id: %s)", team.Name, team.Id)
|
||||
}
|
||||
// add all users
|
||||
for k, v := range t.Users {
|
||||
m.Users[k] = v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MMClient) serverAlive(firstConnection bool, b *backoff.Backoff) error {
|
||||
defer b.Reset()
|
||||
for {
|
||||
d := b.Duration()
|
||||
// bogus call to get the serverversion
|
||||
_, resp := m.Client.Logout()
|
||||
if resp.Error != nil {
|
||||
return fmt.Errorf("%#v", resp.Error.Error())
|
||||
}
|
||||
if firstConnection && !m.SkipVersionCheck && !supportedVersion(resp.ServerVersion) {
|
||||
return fmt.Errorf("unsupported mattermost version: %s", resp.ServerVersion)
|
||||
}
|
||||
if !m.SkipVersionCheck {
|
||||
m.ServerVersion = resp.ServerVersion
|
||||
if m.ServerVersion == "" {
|
||||
m.logger.Debugf("Server not up yet, reconnecting in %s", d)
|
||||
time.Sleep(d)
|
||||
} else {
|
||||
m.logger.Infof("Found version %s", m.ServerVersion)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MMClient) wsConnect() {
|
||||
b := &backoff.Backoff{
|
||||
Min: time.Second,
|
||||
Max: 5 * time.Minute,
|
||||
Jitter: true,
|
||||
}
|
||||
|
||||
m.WsConnected = false
|
||||
wsScheme := "wss://"
|
||||
if m.NoTLS {
|
||||
wsScheme = "ws://"
|
||||
}
|
||||
|
||||
// setup websocket connection
|
||||
wsurl := wsScheme + m.Credentials.Server + model.API_URL_SUFFIX_V4 + "/websocket"
|
||||
header := http.Header{}
|
||||
header.Set(model.HEADER_AUTH, "BEARER "+m.Client.AuthToken)
|
||||
|
||||
m.logger.Debugf("WsClient: making connection: %s", wsurl)
|
||||
for {
|
||||
wsDialer := &websocket.Dialer{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: m.SkipTLSVerify}, //nolint:gosec
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
var err error
|
||||
m.WsClient, _, err = wsDialer.Dial(wsurl, header)
|
||||
if err != nil {
|
||||
d := b.Duration()
|
||||
m.logger.Debugf("WSS: %s, reconnecting in %s", err, d)
|
||||
time.Sleep(d)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
m.logger.Debug("WsClient: connected")
|
||||
m.WsSequence = 1
|
||||
m.WsPingChan = make(chan *model.WebSocketResponse)
|
||||
// only start to parse WS messages when login is completely done
|
||||
m.WsConnected = true
|
||||
}
|
||||
|
||||
func (m *MMClient) createCookieJar(token string) *cookiejar.Jar {
|
||||
var cookies []*http.Cookie
|
||||
jar, _ := cookiejar.New(nil)
|
||||
firstCookie := &http.Cookie{
|
||||
Name: "MMAUTHTOKEN",
|
||||
Value: token,
|
||||
Path: "/",
|
||||
Domain: m.Credentials.Server,
|
||||
}
|
||||
cookies = append(cookies, firstCookie)
|
||||
cookieURL, _ := url.Parse("https://" + m.Credentials.Server)
|
||||
jar.SetCookies(cookieURL, cookies)
|
||||
return jar
|
||||
}
|
||||
|
||||
func (m *MMClient) checkAlive() error {
|
||||
// check if session still is valid
|
||||
_, resp := m.Client.GetMe("")
|
||||
if resp.Error != nil {
|
||||
return resp.Error
|
||||
}
|
||||
m.logger.Debug("WS PING")
|
||||
return m.sendWSRequest("ping", nil)
|
||||
}
|
||||
|
||||
func (m *MMClient) sendWSRequest(action string, data map[string]interface{}) error {
|
||||
req := &model.WebSocketRequest{}
|
||||
req.Seq = m.WsSequence
|
||||
req.Action = action
|
||||
req.Data = data
|
||||
m.WsSequence++
|
||||
m.logger.Debugf("sendWsRequest %#v", req)
|
||||
return m.WsClient.WriteJSON(req)
|
||||
}
|
||||
|
||||
func supportedVersion(version string) bool {
|
||||
if strings.HasPrefix(version, "3.8.0") ||
|
||||
strings.HasPrefix(version, "3.9.0") ||
|
||||
strings.HasPrefix(version, "3.10.0") ||
|
||||
strings.HasPrefix(version, "4.") ||
|
||||
strings.HasPrefix(version, "5.") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func digestString(s string) string {
|
||||
return fmt.Sprintf("%x", md5.Sum([]byte(s))) //nolint:gosec
|
||||
}
|
@ -1,294 +0,0 @@
|
||||
package matterclient
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/jpillora/backoff"
|
||||
prefixed "github.com/matterbridge/logrus-prefixed-formatter"
|
||||
"github.com/mattermost/mattermost-server/v5/model"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Credentials struct {
|
||||
Login string
|
||||
Team string
|
||||
Pass string
|
||||
Token string
|
||||
CookieToken bool
|
||||
Server string
|
||||
NoTLS bool
|
||||
SkipTLSVerify bool
|
||||
SkipVersionCheck bool
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
Raw *model.WebSocketEvent
|
||||
Post *model.Post
|
||||
Team string
|
||||
Channel string
|
||||
Username string
|
||||
Text string
|
||||
Type string
|
||||
UserID string
|
||||
}
|
||||
|
||||
//nolint:golint
|
||||
type Team struct {
|
||||
Team *model.Team
|
||||
Id string
|
||||
Channels []*model.Channel
|
||||
MoreChannels []*model.Channel
|
||||
Users map[string]*model.User
|
||||
}
|
||||
|
||||
type MMClient struct {
|
||||
sync.RWMutex
|
||||
*Credentials
|
||||
|
||||
Team *Team
|
||||
OtherTeams []*Team
|
||||
Client *model.Client4
|
||||
User *model.User
|
||||
Users map[string]*model.User
|
||||
MessageChan chan *Message
|
||||
WsClient *websocket.Conn
|
||||
WsQuit bool
|
||||
WsAway bool
|
||||
WsConnected bool
|
||||
WsSequence int64
|
||||
WsPingChan chan *model.WebSocketResponse
|
||||
ServerVersion string
|
||||
OnWsConnect func()
|
||||
|
||||
logger *logrus.Entry
|
||||
rootLogger *logrus.Logger
|
||||
lruCache *lru.Cache
|
||||
allevents bool
|
||||
}
|
||||
|
||||
// New will instantiate a new Matterclient with the specified login details without connecting.
|
||||
func New(login string, pass string, team string, server string) *MMClient {
|
||||
rootLogger := logrus.New()
|
||||
rootLogger.SetFormatter(&prefixed.TextFormatter{
|
||||
PrefixPadding: 13,
|
||||
DisableColors: true,
|
||||
})
|
||||
|
||||
cred := &Credentials{
|
||||
Login: login,
|
||||
Pass: pass,
|
||||
Team: team,
|
||||
Server: server,
|
||||
}
|
||||
|
||||
cache, _ := lru.New(500)
|
||||
return &MMClient{
|
||||
Credentials: cred,
|
||||
MessageChan: make(chan *Message, 100),
|
||||
Users: make(map[string]*model.User),
|
||||
rootLogger: rootLogger,
|
||||
lruCache: cache,
|
||||
logger: rootLogger.WithFields(logrus.Fields{"prefix": "matterclient"}),
|
||||
}
|
||||
}
|
||||
|
||||
// SetDebugLog activates debugging logging on all Matterclient log output.
|
||||
func (m *MMClient) SetDebugLog() {
|
||||
m.rootLogger.SetFormatter(&prefixed.TextFormatter{
|
||||
PrefixPadding: 13,
|
||||
DisableColors: true,
|
||||
FullTimestamp: false,
|
||||
ForceFormatting: true,
|
||||
})
|
||||
}
|
||||
|
||||
// SetLogLevel tries to parse the specified level and if successful sets
|
||||
// the log level accordingly. Accepted levels are: 'debug', 'info', 'warn',
|
||||
// 'error', 'fatal' and 'panic'.
|
||||
func (m *MMClient) SetLogLevel(level string) {
|
||||
l, err := logrus.ParseLevel(level)
|
||||
if err != nil {
|
||||
m.logger.Warnf("Failed to parse specified log-level '%s': %#v", level, err)
|
||||
} else {
|
||||
m.rootLogger.SetLevel(l)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MMClient) EnableAllEvents() {
|
||||
m.allevents = true
|
||||
}
|
||||
|
||||
// Login tries to connect the client with the loging details with which it was initialized.
|
||||
func (m *MMClient) Login() error {
|
||||
// check if this is a first connect or a reconnection
|
||||
firstConnection := true
|
||||
if m.WsConnected {
|
||||
firstConnection = false
|
||||
}
|
||||
m.WsConnected = false
|
||||
if m.WsQuit {
|
||||
return nil
|
||||
}
|
||||
b := &backoff.Backoff{
|
||||
Min: time.Second,
|
||||
Max: 5 * time.Minute,
|
||||
Jitter: true,
|
||||
}
|
||||
|
||||
// do initialization setup
|
||||
if err := m.initClient(firstConnection, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.doLogin(firstConnection, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.initUser(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if m.Team == nil {
|
||||
validTeamNames := make([]string, len(m.OtherTeams))
|
||||
for i, t := range m.OtherTeams {
|
||||
validTeamNames[i] = t.Team.Name
|
||||
}
|
||||
return fmt.Errorf("Team '%s' not found in %v", m.Credentials.Team, validTeamNames)
|
||||
}
|
||||
|
||||
m.wsConnect()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Logout disconnects the client from the chat server.
|
||||
func (m *MMClient) Logout() error {
|
||||
m.logger.Debugf("logout as %s (team: %s) on %s", m.Credentials.Login, m.Credentials.Team, m.Credentials.Server)
|
||||
m.WsQuit = true
|
||||
m.WsClient.Close()
|
||||
m.WsClient.UnderlyingConn().Close()
|
||||
if strings.Contains(m.Credentials.Pass, model.SESSION_COOKIE_TOKEN) {
|
||||
m.logger.Debug("Not invalidating session in logout, credential is a token")
|
||||
return nil
|
||||
}
|
||||
_, resp := m.Client.Logout()
|
||||
if resp.Error != nil {
|
||||
return resp.Error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WsReceiver implements the core loop that manages the connection to the chat server. In
|
||||
// case of a disconnect it will try to reconnect. A call to this method is blocking until
|
||||
// the 'WsQuite' field of the MMClient object is set to 'true'.
|
||||
func (m *MMClient) WsReceiver() {
|
||||
for {
|
||||
var rawMsg json.RawMessage
|
||||
var err error
|
||||
|
||||
if m.WsQuit {
|
||||
m.logger.Debug("exiting WsReceiver")
|
||||
return
|
||||
}
|
||||
|
||||
if !m.WsConnected {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, rawMsg, err = m.WsClient.ReadMessage(); err != nil {
|
||||
m.logger.Error("error:", err)
|
||||
// reconnect
|
||||
m.wsConnect()
|
||||
}
|
||||
|
||||
var event model.WebSocketEvent
|
||||
if err := json.Unmarshal(rawMsg, &event); err == nil && event.IsValid() {
|
||||
m.logger.Debugf("WsReceiver event: %#v", event)
|
||||
msg := &Message{Raw: &event, Team: m.Credentials.Team}
|
||||
m.parseMessage(msg)
|
||||
// check if we didn't empty the message
|
||||
if msg.Text != "" {
|
||||
m.MessageChan <- msg
|
||||
continue
|
||||
}
|
||||
// if we have file attached but the message is empty, also send it
|
||||
if msg.Post != nil {
|
||||
if msg.Text != "" || len(msg.Post.FileIds) > 0 || msg.Post.Type == "slack_attachment" {
|
||||
m.MessageChan <- msg
|
||||
continue
|
||||
}
|
||||
}
|
||||
if m.allevents {
|
||||
m.MessageChan <- msg
|
||||
continue
|
||||
}
|
||||
switch msg.Raw.Event {
|
||||
case model.WEBSOCKET_EVENT_USER_ADDED,
|
||||
model.WEBSOCKET_EVENT_USER_REMOVED,
|
||||
model.WEBSOCKET_EVENT_CHANNEL_CREATED,
|
||||
model.WEBSOCKET_EVENT_CHANNEL_DELETED:
|
||||
m.MessageChan <- msg
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var response model.WebSocketResponse
|
||||
if err := json.Unmarshal(rawMsg, &response); err == nil && response.IsValid() {
|
||||
m.logger.Debugf("WsReceiver response: %#v", response)
|
||||
m.parseResponse(response)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StatusLoop implements a ping-cycle that ensures that the connection to the chat servers
|
||||
// remains alive. In case of a disconnect it will try to reconnect. A call to this method
|
||||
// is blocking until the 'WsQuite' field of the MMClient object is set to 'true'.
|
||||
func (m *MMClient) StatusLoop() {
|
||||
retries := 0
|
||||
backoff := time.Second * 60
|
||||
if m.OnWsConnect != nil {
|
||||
m.OnWsConnect()
|
||||
}
|
||||
m.logger.Debug("StatusLoop:", m.OnWsConnect != nil)
|
||||
for {
|
||||
if m.WsQuit {
|
||||
return
|
||||
}
|
||||
if m.WsConnected {
|
||||
if err := m.checkAlive(); err != nil {
|
||||
m.logger.Errorf("Connection is not alive: %#v", err)
|
||||
}
|
||||
select {
|
||||
case <-m.WsPingChan:
|
||||
m.logger.Debug("WS PONG received")
|
||||
backoff = time.Second * 60
|
||||
case <-time.After(time.Second * 5):
|
||||
if retries > 3 {
|
||||
m.logger.Debug("StatusLoop() timeout")
|
||||
m.Logout()
|
||||
m.WsQuit = false
|
||||
err := m.Login()
|
||||
if err != nil {
|
||||
m.logger.Errorf("Login failed: %#v", err)
|
||||
break
|
||||
}
|
||||
if m.OnWsConnect != nil {
|
||||
m.OnWsConnect()
|
||||
}
|
||||
go m.WsReceiver()
|
||||
} else {
|
||||
retries++
|
||||
backoff = time.Second * 5
|
||||
}
|
||||
}
|
||||
}
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
}
|
@ -1,207 +0,0 @@
|
||||
package matterclient
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/mattermost/mattermost-server/v5/model"
|
||||
)
|
||||
|
||||
func (m *MMClient) parseActionPost(rmsg *Message) {
|
||||
// add post to cache, if it already exists don't relay this again.
|
||||
// this should fix reposts
|
||||
if ok, _ := m.lruCache.ContainsOrAdd(digestString(rmsg.Raw.Data["post"].(string)), true); ok && rmsg.Raw.Event != model.WEBSOCKET_EVENT_POST_DELETED {
|
||||
m.logger.Debugf("message %#v in cache, not processing again", rmsg.Raw.Data["post"].(string))
|
||||
rmsg.Text = ""
|
||||
return
|
||||
}
|
||||
data := model.PostFromJson(strings.NewReader(rmsg.Raw.Data["post"].(string)))
|
||||
// we don't have the user, refresh the userlist
|
||||
if m.GetUser(data.UserId) == nil {
|
||||
m.logger.Infof("User '%v' is not known, ignoring message '%#v'",
|
||||
data.UserId, data)
|
||||
return
|
||||
}
|
||||
rmsg.Username = m.GetUserName(data.UserId)
|
||||
rmsg.Channel = m.GetChannelName(data.ChannelId)
|
||||
rmsg.UserID = data.UserId
|
||||
rmsg.Type = data.Type
|
||||
teamid, _ := rmsg.Raw.Data["team_id"].(string)
|
||||
// edit messsages have no team_id for some reason
|
||||
if teamid == "" {
|
||||
// we can find the team_id from the channelid
|
||||
teamid = m.GetChannelTeamId(data.ChannelId)
|
||||
rmsg.Raw.Data["team_id"] = teamid
|
||||
}
|
||||
if teamid != "" {
|
||||
rmsg.Team = m.GetTeamName(teamid)
|
||||
}
|
||||
// direct message
|
||||
if rmsg.Raw.Data["channel_type"] == "D" {
|
||||
rmsg.Channel = m.GetUser(data.UserId).Username
|
||||
}
|
||||
rmsg.Text = data.Message
|
||||
rmsg.Post = data
|
||||
}
|
||||
|
||||
func (m *MMClient) parseMessage(rmsg *Message) {
|
||||
switch rmsg.Raw.Event {
|
||||
case model.WEBSOCKET_EVENT_POSTED, model.WEBSOCKET_EVENT_POST_EDITED, model.WEBSOCKET_EVENT_POST_DELETED:
|
||||
m.parseActionPost(rmsg)
|
||||
case "user_updated":
|
||||
user := rmsg.Raw.Data["user"].(map[string]interface{})
|
||||
if _, ok := user["id"].(string); ok {
|
||||
m.UpdateUser(user["id"].(string))
|
||||
}
|
||||
case "group_added":
|
||||
if err := m.UpdateChannels(); err != nil {
|
||||
m.logger.Errorf("failed to update channels: %#v", err)
|
||||
}
|
||||
/*
|
||||
case model.ACTION_USER_REMOVED:
|
||||
m.handleWsActionUserRemoved(&rmsg)
|
||||
case model.ACTION_USER_ADDED:
|
||||
m.handleWsActionUserAdded(&rmsg)
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MMClient) parseResponse(rmsg model.WebSocketResponse) {
|
||||
if rmsg.Data != nil {
|
||||
// ping reply
|
||||
if rmsg.Data["text"].(string) == "pong" {
|
||||
m.WsPingChan <- &rmsg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MMClient) DeleteMessage(postId string) error { //nolint:golint
|
||||
_, resp := m.Client.DeletePost(postId)
|
||||
if resp.Error != nil {
|
||||
return resp.Error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MMClient) EditMessage(postId string, text string) (string, error) { //nolint:golint
|
||||
post := &model.Post{Message: text, Id: postId}
|
||||
res, resp := m.Client.UpdatePost(postId, post)
|
||||
if resp.Error != nil {
|
||||
return "", resp.Error
|
||||
}
|
||||
return res.Id, nil
|
||||
}
|
||||
|
||||
func (m *MMClient) GetFileLinks(filenames []string) []string {
|
||||
uriScheme := "https://"
|
||||
if m.NoTLS {
|
||||
uriScheme = "http://"
|
||||
}
|
||||
|
||||
var output []string
|
||||
for _, f := range filenames {
|
||||
res, resp := m.Client.GetFileLink(f)
|
||||
if resp.Error != nil {
|
||||
// public links is probably disabled, create the link ourselves
|
||||
output = append(output, uriScheme+m.Credentials.Server+model.API_URL_SUFFIX_V4+"/files/"+f)
|
||||
continue
|
||||
}
|
||||
output = append(output, res)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func (m *MMClient) GetPosts(channelId string, limit int) *model.PostList { //nolint:golint
|
||||
res, resp := m.Client.GetPostsForChannel(channelId, 0, limit, "", true)
|
||||
if resp.Error != nil {
|
||||
return nil
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (m *MMClient) GetPostsSince(channelId string, time int64) *model.PostList { //nolint:golint
|
||||
res, resp := m.Client.GetPostsSince(channelId, time, true)
|
||||
if resp.Error != nil {
|
||||
return nil
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (m *MMClient) GetPublicLink(filename string) string {
|
||||
res, resp := m.Client.GetFileLink(filename)
|
||||
if resp.Error != nil {
|
||||
return ""
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (m *MMClient) GetPublicLinks(filenames []string) []string {
|
||||
var output []string
|
||||
for _, f := range filenames {
|
||||
res, resp := m.Client.GetFileLink(f)
|
||||
if resp.Error != nil {
|
||||
continue
|
||||
}
|
||||
output = append(output, res)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func (m *MMClient) PostMessage(channelId string, text string, rootId string) (string, error) { //nolint:golint
|
||||
post := &model.Post{ChannelId: channelId, Message: text, RootId: rootId}
|
||||
res, resp := m.Client.CreatePost(post)
|
||||
if resp.Error != nil {
|
||||
return "", resp.Error
|
||||
}
|
||||
return res.Id, nil
|
||||
}
|
||||
|
||||
func (m *MMClient) PostMessageWithFiles(channelId string, text string, rootId string, fileIds []string) (string, error) { //nolint:golint
|
||||
post := &model.Post{ChannelId: channelId, Message: text, RootId: rootId, FileIds: fileIds}
|
||||
res, resp := m.Client.CreatePost(post)
|
||||
if resp.Error != nil {
|
||||
return "", resp.Error
|
||||
}
|
||||
return res.Id, nil
|
||||
}
|
||||
|
||||
func (m *MMClient) SearchPosts(query string) *model.PostList {
|
||||
res, resp := m.Client.SearchPosts(m.Team.Id, query, false)
|
||||
if resp.Error != nil {
|
||||
return nil
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// SendDirectMessage sends a direct message to specified user
|
||||
func (m *MMClient) SendDirectMessage(toUserId string, msg string, rootId string) { //nolint:golint
|
||||
m.SendDirectMessageProps(toUserId, msg, rootId, nil)
|
||||
}
|
||||
|
||||
func (m *MMClient) SendDirectMessageProps(toUserId string, msg string, rootId string, props map[string]interface{}) { //nolint:golint
|
||||
m.logger.Debugf("SendDirectMessage to %s, msg %s", toUserId, msg)
|
||||
// create DM channel (only happens on first message)
|
||||
_, resp := m.Client.CreateDirectChannel(m.User.Id, toUserId)
|
||||
if resp.Error != nil {
|
||||
m.logger.Debugf("SendDirectMessage to %#v failed: %s", toUserId, resp.Error)
|
||||
return
|
||||
}
|
||||
channelName := model.GetDMNameFromIds(toUserId, m.User.Id)
|
||||
|
||||
// update our channels
|
||||
if err := m.UpdateChannels(); err != nil {
|
||||
m.logger.Errorf("failed to update channels: %#v", err)
|
||||
}
|
||||
|
||||
// build & send the message
|
||||
msg = strings.Replace(msg, "\r", "", -1)
|
||||
post := &model.Post{ChannelId: m.GetChannelId(channelName, m.Team.Id), Message: msg, RootId: rootId, Props: props}
|
||||
m.Client.CreatePost(post)
|
||||
}
|
||||
|
||||
func (m *MMClient) UploadFile(data []byte, channelId string, filename string) (string, error) { //nolint:golint
|
||||
f, resp := m.Client.UploadFile(data, channelId, filename)
|
||||
if resp.Error != nil {
|
||||
return "", resp.Error
|
||||
}
|
||||
return f.FileInfos[0].Id, nil
|
||||
}
|
@ -1,165 +0,0 @@
|
||||
package matterclient
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/mattermost/mattermost-server/v5/model"
|
||||
)
|
||||
|
||||
func (m *MMClient) GetNickName(userId string) string { //nolint:golint
|
||||
user := m.GetUser(userId)
|
||||
if user != nil {
|
||||
return user.Nickname
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *MMClient) GetStatus(userId string) string { //nolint:golint
|
||||
res, resp := m.Client.GetUserStatus(userId, "")
|
||||
if resp.Error != nil {
|
||||
return ""
|
||||
}
|
||||
if res.Status == model.STATUS_AWAY {
|
||||
return "away"
|
||||
}
|
||||
if res.Status == model.STATUS_ONLINE {
|
||||
return "online"
|
||||
}
|
||||
return "offline"
|
||||
}
|
||||
|
||||
func (m *MMClient) GetStatuses() map[string]string {
|
||||
var ids []string
|
||||
statuses := make(map[string]string)
|
||||
for id := range m.Users {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
res, resp := m.Client.GetUsersStatusesByIds(ids)
|
||||
if resp.Error != nil {
|
||||
return statuses
|
||||
}
|
||||
for _, status := range res {
|
||||
statuses[status.UserId] = "offline"
|
||||
if status.Status == model.STATUS_AWAY {
|
||||
statuses[status.UserId] = "away"
|
||||
}
|
||||
if status.Status == model.STATUS_ONLINE {
|
||||
statuses[status.UserId] = "online"
|
||||
}
|
||||
}
|
||||
return statuses
|
||||
}
|
||||
|
||||
func (m *MMClient) GetTeamId() string { //nolint:golint
|
||||
return m.Team.Id
|
||||
}
|
||||
|
||||
// GetTeamName returns the name of the specified teamId
|
||||
func (m *MMClient) GetTeamName(teamId string) string { //nolint:golint
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
for _, t := range m.OtherTeams {
|
||||
if t.Id == teamId {
|
||||
return t.Team.Name
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *MMClient) GetUser(userId string) *model.User { //nolint:golint
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
_, ok := m.Users[userId]
|
||||
if !ok {
|
||||
res, resp := m.Client.GetUser(userId, "")
|
||||
if resp.Error != nil {
|
||||
return nil
|
||||
}
|
||||
m.Users[userId] = res
|
||||
}
|
||||
return m.Users[userId]
|
||||
}
|
||||
|
||||
func (m *MMClient) GetUserName(userId string) string { //nolint:golint
|
||||
user := m.GetUser(userId)
|
||||
if user != nil {
|
||||
return user.Username
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *MMClient) GetUsers() map[string]*model.User {
|
||||
users := make(map[string]*model.User)
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
for k, v := range m.Users {
|
||||
users[k] = v
|
||||
}
|
||||
return users
|
||||
}
|
||||
|
||||
func (m *MMClient) UpdateUsers() error {
|
||||
idx := 0
|
||||
max := 200
|
||||
mmusers, resp := m.Client.GetUsers(idx, max, "")
|
||||
if resp.Error != nil {
|
||||
return errors.New(resp.Error.DetailedError)
|
||||
}
|
||||
for len(mmusers) > 0 {
|
||||
m.Lock()
|
||||
for _, user := range mmusers {
|
||||
m.Users[user.Id] = user
|
||||
}
|
||||
m.Unlock()
|
||||
mmusers, resp = m.Client.GetUsers(idx, max, "")
|
||||
time.Sleep(time.Millisecond * 300)
|
||||
if resp.Error != nil {
|
||||
return errors.New(resp.Error.DetailedError)
|
||||
}
|
||||
idx++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MMClient) UpdateUserNick(nick string) error {
|
||||
user := m.User
|
||||
user.Nickname = nick
|
||||
_, resp := m.Client.UpdateUser(user)
|
||||
if resp.Error != nil {
|
||||
return resp.Error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MMClient) UsernamesInChannel(channelId string) []string { //nolint:golint
|
||||
res, resp := m.Client.GetChannelMembers(channelId, 0, 50000, "")
|
||||
if resp.Error != nil {
|
||||
m.logger.Errorf("UsernamesInChannel(%s) failed: %s", channelId, resp.Error)
|
||||
return []string{}
|
||||
}
|
||||
allusers := m.GetUsers()
|
||||
result := []string{}
|
||||
for _, member := range *res {
|
||||
result = append(result, allusers[member.UserId].Nickname)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (m *MMClient) UpdateStatus(userId string, status string) error { //nolint:golint
|
||||
_, resp := m.Client.UpdateUserStatus(userId, &model.Status{Status: status})
|
||||
if resp.Error != nil {
|
||||
return resp.Error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MMClient) UpdateUser(userId string) { //nolint:golint
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
res, resp := m.Client.GetUser(userId, "")
|
||||
if resp.Error != nil {
|
||||
return
|
||||
}
|
||||
m.Users[userId] = res
|
||||
}
|
2
vendor/github.com/bwmarrin/discordgo/discord.go
generated
vendored
2
vendor/github.com/bwmarrin/discordgo/discord.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
// VERSION of DiscordGo, follows Semantic Versioning. (http://semver.org/)
|
||||
const VERSION = "0.27.0"
|
||||
const VERSION = "0.27.1"
|
||||
|
||||
// New creates a new Discord session with provided token.
|
||||
// If the token is for a bot, it must be prefixed with "Bot "
|
||||
|
1
vendor/github.com/bwmarrin/discordgo/restapi.go
generated
vendored
1
vendor/github.com/bwmarrin/discordgo/restapi.go
generated
vendored
@ -223,6 +223,7 @@ func (s *Session) RequestWithLockedBucket(method, urlStr, contentType string, b
|
||||
for _, opt := range options {
|
||||
opt(cfg)
|
||||
}
|
||||
req = cfg.Request
|
||||
|
||||
if s.Debug {
|
||||
for k, v := range req.Header {
|
||||
|
3
vendor/github.com/bwmarrin/discordgo/structs.go
generated
vendored
3
vendor/github.com/bwmarrin/discordgo/structs.go
generated
vendored
@ -527,7 +527,7 @@ type ThreadMember struct {
|
||||
// The time the current user last joined the thread
|
||||
JoinTimestamp time.Time `json:"join_timestamp"`
|
||||
// Any user-thread settings, currently only used for notifications
|
||||
Flags int
|
||||
Flags int `json:"flags"`
|
||||
}
|
||||
|
||||
// ThreadsList represents a list of threads alongisde with thread member objects for the current user.
|
||||
@ -622,6 +622,7 @@ const (
|
||||
StickerFormatTypePNG StickerFormat = 1
|
||||
StickerFormatTypeAPNG StickerFormat = 2
|
||||
StickerFormatTypeLottie StickerFormat = 3
|
||||
StickerFormatTypeGIF StickerFormat = 4
|
||||
)
|
||||
|
||||
// StickerType is the type of sticker.
|
||||
|
26
vendor/github.com/bwmarrin/discordgo/voice.go
generated
vendored
26
vendor/github.com/bwmarrin/discordgo/voice.go
generated
vendored
@ -599,44 +599,46 @@ func (v *VoiceConnection) udpOpen() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Create a 70 byte array and put the SSRC code from the Op 2 VoiceConnection event
|
||||
// into it. Then send that over the UDP connection to Discord
|
||||
sb := make([]byte, 70)
|
||||
binary.BigEndian.PutUint32(sb, v.op2.SSRC)
|
||||
// Create a 74 byte array to store the packet data
|
||||
sb := make([]byte, 74)
|
||||
binary.BigEndian.PutUint16(sb, 1) // Packet type (0x1 is request, 0x2 is response)
|
||||
binary.BigEndian.PutUint16(sb[2:], 70) // Packet length (excluding type and length fields)
|
||||
binary.BigEndian.PutUint32(sb[4:], v.op2.SSRC) // The SSRC code from the Op 2 VoiceConnection event
|
||||
|
||||
// And send that data over the UDP connection to Discord.
|
||||
_, err = v.udpConn.Write(sb)
|
||||
if err != nil {
|
||||
v.log(LogWarning, "udp write error to %s, %s", addr.String(), err)
|
||||
return
|
||||
}
|
||||
|
||||
// Create a 70 byte array and listen for the initial handshake response
|
||||
// Create a 74 byte array and listen for the initial handshake response
|
||||
// from Discord. Once we get it parse the IP and PORT information out
|
||||
// of the response. This should be our public IP and PORT as Discord
|
||||
// saw us.
|
||||
rb := make([]byte, 70)
|
||||
rb := make([]byte, 74)
|
||||
rlen, _, err := v.udpConn.ReadFromUDP(rb)
|
||||
if err != nil {
|
||||
v.log(LogWarning, "udp read error, %s, %s", addr.String(), err)
|
||||
return
|
||||
}
|
||||
|
||||
if rlen < 70 {
|
||||
if rlen < 74 {
|
||||
v.log(LogWarning, "received udp packet too small")
|
||||
return fmt.Errorf("received udp packet too small")
|
||||
}
|
||||
|
||||
// Loop over position 4 through 20 to grab the IP address
|
||||
// Should never be beyond position 20.
|
||||
// Loop over position 8 through 71 to grab the IP address.
|
||||
var ip string
|
||||
for i := 4; i < 20; i++ {
|
||||
for i := 8; i < len(rb)-2; i++ {
|
||||
if rb[i] == 0 {
|
||||
break
|
||||
}
|
||||
ip += string(rb[i])
|
||||
}
|
||||
|
||||
// Grab port from position 68 and 69
|
||||
port := binary.BigEndian.Uint16(rb[68:70])
|
||||
// Grab port from position 72 and 73
|
||||
port := binary.BigEndian.Uint16(rb[len(rb)-2:])
|
||||
|
||||
// Take the data from above and send it back to Discord to finalize
|
||||
// the UDP connection handshake.
|
||||
|
1
vendor/github.com/d5/tengo/v2/parser/parser.go
generated
vendored
1
vendor/github.com/d5/tengo/v2/parser/parser.go
generated
vendored
@ -143,6 +143,7 @@ func (p *Parser) ParseFile() (file *File, err error) {
|
||||
}
|
||||
|
||||
stmts := p.parseStmtList()
|
||||
p.expect(token.EOF)
|
||||
if p.errors.Len() > 0 {
|
||||
return nil, p.errors.Err()
|
||||
}
|
||||
|
4
vendor/github.com/d5/tengo/v2/script.go
generated
vendored
4
vendor/github.com/d5/tengo/v2/script.go
generated
vendored
@ -247,8 +247,8 @@ func (c *Compiled) RunContext(ctx context.Context) (err error) {
|
||||
// Clone creates a new copy of Compiled. Cloned copies are safe for concurrent
|
||||
// use by multiple goroutines.
|
||||
func (c *Compiled) Clone() *Compiled {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
clone := &Compiled{
|
||||
globalIndexes: c.globalIndexes,
|
||||
|
16
vendor/github.com/d5/tengo/v2/stdlib/json/decode.go
generated
vendored
16
vendor/github.com/d5/tengo/v2/stdlib/json/decode.go
generated
vendored
@ -62,9 +62,12 @@ func (d *decodeState) scanNext() {
|
||||
|
||||
// scanWhile processes bytes in d.data[d.off:] until it
|
||||
// receives a scan code not equal to op.
|
||||
func (d *decodeState) scanWhile(op int) {
|
||||
func (d *decodeState) scanWhile(op int) (isFloat bool) {
|
||||
s, data, i := &d.scan, d.data, d.off
|
||||
for i < len(data) {
|
||||
if data[i] == '.' || data[i] == 'e' || data[i] == 'E' {
|
||||
isFloat = true
|
||||
}
|
||||
newOp := s.step(s, data[i])
|
||||
i++
|
||||
if newOp != op {
|
||||
@ -76,6 +79,7 @@ func (d *decodeState) scanWhile(op int) {
|
||||
|
||||
d.off = len(data) + 1 // mark processed EOF with len+1
|
||||
d.opcode = d.scan.eof()
|
||||
return
|
||||
}
|
||||
|
||||
func (d *decodeState) value() (tengo.Object, error) {
|
||||
@ -185,7 +189,7 @@ func (d *decodeState) object() (tengo.Object, error) {
|
||||
func (d *decodeState) literal() (tengo.Object, error) {
|
||||
// All bytes inside literal return scanContinue op code.
|
||||
start := d.readIndex()
|
||||
d.scanWhile(scanContinue)
|
||||
isFloat := d.scanWhile(scanContinue)
|
||||
|
||||
item := d.data[start:d.readIndex()]
|
||||
|
||||
@ -210,8 +214,12 @@ func (d *decodeState) literal() (tengo.Object, error) {
|
||||
if c != '-' && (c < '0' || c > '9') {
|
||||
panic(phasePanicMsg)
|
||||
}
|
||||
n, _ := strconv.ParseFloat(string(item), 10)
|
||||
return &tengo.Float{Value: n}, nil
|
||||
if isFloat {
|
||||
n, _ := strconv.ParseFloat(string(item), 10)
|
||||
return &tengo.Float{Value: n}, nil
|
||||
}
|
||||
n, _ := strconv.ParseInt(string(item), 10, 64)
|
||||
return &tengo.Int{Value: n}, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
36
vendor/github.com/d5/tengo/v2/stdlib/math.go
generated
vendored
36
vendor/github.com/d5/tengo/v2/stdlib/math.go
generated
vendored
@ -7,17 +7,31 @@ import (
|
||||
)
|
||||
|
||||
var mathModule = map[string]tengo.Object{
|
||||
"e": &tengo.Float{Value: math.E},
|
||||
"pi": &tengo.Float{Value: math.Pi},
|
||||
"phi": &tengo.Float{Value: math.Phi},
|
||||
"sqrt2": &tengo.Float{Value: math.Sqrt2},
|
||||
"sqrtE": &tengo.Float{Value: math.SqrtE},
|
||||
"sqrtPi": &tengo.Float{Value: math.SqrtPi},
|
||||
"sqrtPhi": &tengo.Float{Value: math.SqrtPhi},
|
||||
"ln2": &tengo.Float{Value: math.Ln2},
|
||||
"log2E": &tengo.Float{Value: math.Log2E},
|
||||
"ln10": &tengo.Float{Value: math.Ln10},
|
||||
"log10E": &tengo.Float{Value: math.Log10E},
|
||||
"e": &tengo.Float{Value: math.E},
|
||||
"pi": &tengo.Float{Value: math.Pi},
|
||||
"phi": &tengo.Float{Value: math.Phi},
|
||||
"sqrt2": &tengo.Float{Value: math.Sqrt2},
|
||||
"sqrtE": &tengo.Float{Value: math.SqrtE},
|
||||
"sqrtPi": &tengo.Float{Value: math.SqrtPi},
|
||||
"sqrtPhi": &tengo.Float{Value: math.SqrtPhi},
|
||||
"ln2": &tengo.Float{Value: math.Ln2},
|
||||
"log2E": &tengo.Float{Value: math.Log2E},
|
||||
"ln10": &tengo.Float{Value: math.Ln10},
|
||||
"log10E": &tengo.Float{Value: math.Log10E},
|
||||
"maxFloat32": &tengo.Float{Value: math.MaxFloat32},
|
||||
"smallestNonzeroFloat32": &tengo.Float{Value: math.SmallestNonzeroFloat32},
|
||||
"maxFloat64": &tengo.Float{Value: math.MaxFloat64},
|
||||
"smallestNonzeroFloat64": &tengo.Float{Value: math.SmallestNonzeroFloat64},
|
||||
"maxInt": &tengo.Int{Value: math.MaxInt},
|
||||
"minInt": &tengo.Int{Value: math.MinInt},
|
||||
"maxInt8": &tengo.Int{Value: math.MaxInt8},
|
||||
"minInt8": &tengo.Int{Value: math.MinInt8},
|
||||
"maxInt16": &tengo.Int{Value: math.MaxInt16},
|
||||
"minInt16": &tengo.Int{Value: math.MinInt16},
|
||||
"maxInt32": &tengo.Int{Value: math.MaxInt32},
|
||||
"minInt32": &tengo.Int{Value: math.MinInt32},
|
||||
"maxInt64": &tengo.Int{Value: math.MaxInt64},
|
||||
"minInt64": &tengo.Int{Value: math.MinInt64},
|
||||
"abs": &tengo.UserFunction{
|
||||
Name: "abs",
|
||||
Value: FuncAFRF(math.Abs),
|
||||
|
68
vendor/github.com/d5/tengo/v2/stdlib/times.go
generated
vendored
68
vendor/github.com/d5/tengo/v2/stdlib/times.go
generated
vendored
@ -180,6 +180,10 @@ var timesModule = map[string]tengo.Object{
|
||||
Name: "to_utc",
|
||||
Value: timesToUTC,
|
||||
}, // to_utc(time) => time
|
||||
"in_location": &tengo.UserFunction{
|
||||
Name: "in_location",
|
||||
Value: timesInLocation,
|
||||
}, // in_location(time, location) => time
|
||||
}
|
||||
|
||||
func timesSleep(args ...tengo.Object) (ret tengo.Object, err error) {
|
||||
@ -430,7 +434,7 @@ func timesDate(args ...tengo.Object) (
|
||||
ret tengo.Object,
|
||||
err error,
|
||||
) {
|
||||
if len(args) != 7 {
|
||||
if len(args) < 7 || len(args) > 8 {
|
||||
err = tengo.ErrWrongNumArguments
|
||||
return
|
||||
}
|
||||
@ -499,9 +503,29 @@ func timesDate(args ...tengo.Object) (
|
||||
return
|
||||
}
|
||||
|
||||
var loc *time.Location
|
||||
if len(args) == 8 {
|
||||
i8, ok := tengo.ToString(args[7])
|
||||
if !ok {
|
||||
err = tengo.ErrInvalidArgumentType{
|
||||
Name: "eighth",
|
||||
Expected: "string(compatible)",
|
||||
Found: args[7].TypeName(),
|
||||
}
|
||||
return
|
||||
}
|
||||
loc, err = time.LoadLocation(i8)
|
||||
if err != nil {
|
||||
ret = wrapError(err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
loc = time.Now().Location()
|
||||
}
|
||||
|
||||
ret = &tengo.Time{
|
||||
Value: time.Date(i1,
|
||||
time.Month(i2), i3, i4, i5, i6, i7, time.Now().Location()),
|
||||
time.Month(i2), i3, i4, i5, i6, i7, loc),
|
||||
}
|
||||
|
||||
return
|
||||
@ -1113,6 +1137,46 @@ func timesTimeLocation(args ...tengo.Object) (
|
||||
return
|
||||
}
|
||||
|
||||
func timesInLocation(args ...tengo.Object) (
|
||||
ret tengo.Object,
|
||||
err error,
|
||||
) {
|
||||
if len(args) != 2 {
|
||||
err = tengo.ErrWrongNumArguments
|
||||
return
|
||||
}
|
||||
|
||||
t1, ok := tengo.ToTime(args[0])
|
||||
if !ok {
|
||||
err = tengo.ErrInvalidArgumentType{
|
||||
Name: "first",
|
||||
Expected: "time(compatible)",
|
||||
Found: args[0].TypeName(),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
s2, ok := tengo.ToString(args[1])
|
||||
if !ok {
|
||||
err = tengo.ErrInvalidArgumentType{
|
||||
Name: "second",
|
||||
Expected: "string(compatible)",
|
||||
Found: args[1].TypeName(),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
location, err := time.LoadLocation(s2)
|
||||
if err != nil {
|
||||
ret = wrapError(err)
|
||||
return
|
||||
}
|
||||
|
||||
ret = &tengo.Time{Value: t1.In(location)}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func timesTimeString(args ...tengo.Object) (ret tengo.Object, err error) {
|
||||
if len(args) != 1 {
|
||||
err = tengo.ErrWrongNumArguments
|
||||
|
16
vendor/github.com/dustin/go-humanize/.travis.yml
generated
vendored
16
vendor/github.com/dustin/go-humanize/.travis.yml
generated
vendored
@ -1,12 +1,12 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go_import_path: github.com/dustin/go-humanize
|
||||
go:
|
||||
- 1.3.x
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
- 1.16.x
|
||||
- stable
|
||||
- master
|
||||
matrix:
|
||||
allow_failures:
|
||||
@ -15,7 +15,7 @@ matrix:
|
||||
install:
|
||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d -s .)
|
||||
- go tool vet .
|
||||
- go vet .
|
||||
- go install -v -race ./...
|
||||
- go test -v -race ./...
|
||||
|
2
vendor/github.com/dustin/go-humanize/README.markdown
generated
vendored
2
vendor/github.com/dustin/go-humanize/README.markdown
generated
vendored
@ -5,7 +5,7 @@ Just a few functions for helping humanize times and sizes.
|
||||
`go get` it as `github.com/dustin/go-humanize`, import it as
|
||||
`"github.com/dustin/go-humanize"`, use it as `humanize`.
|
||||
|
||||
See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
|
||||
See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for
|
||||
complete documentation.
|
||||
|
||||
## Sizes
|
||||
|
20
vendor/github.com/dustin/go-humanize/bigbytes.go
generated
vendored
20
vendor/github.com/dustin/go-humanize/bigbytes.go
generated
vendored
@ -28,6 +28,10 @@ var (
|
||||
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
|
||||
// BigYiByte is 1,024 z bytes in bit.Ints
|
||||
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
|
||||
// BigRiByte is 1,024 y bytes in bit.Ints
|
||||
BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp)
|
||||
// BigQiByte is 1,024 r bytes in bit.Ints
|
||||
BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp)
|
||||
)
|
||||
|
||||
var (
|
||||
@ -51,6 +55,10 @@ var (
|
||||
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
|
||||
// BigYByte is 1,000 SI z bytes in big.Ints
|
||||
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
|
||||
// BigRByte is 1,000 SI y bytes in big.Ints
|
||||
BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp)
|
||||
// BigQByte is 1,000 SI r bytes in big.Ints
|
||||
BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp)
|
||||
)
|
||||
|
||||
var bigBytesSizeTable = map[string]*big.Int{
|
||||
@ -71,6 +79,10 @@ var bigBytesSizeTable = map[string]*big.Int{
|
||||
"zb": BigZByte,
|
||||
"yib": BigYiByte,
|
||||
"yb": BigYByte,
|
||||
"rib": BigRiByte,
|
||||
"rb": BigRByte,
|
||||
"qib": BigQiByte,
|
||||
"qb": BigQByte,
|
||||
// Without suffix
|
||||
"": BigByte,
|
||||
"ki": BigKiByte,
|
||||
@ -89,6 +101,10 @@ var bigBytesSizeTable = map[string]*big.Int{
|
||||
"zi": BigZiByte,
|
||||
"y": BigYByte,
|
||||
"yi": BigYiByte,
|
||||
"r": BigRByte,
|
||||
"ri": BigRiByte,
|
||||
"q": BigQByte,
|
||||
"qi": BigQiByte,
|
||||
}
|
||||
|
||||
var ten = big.NewInt(10)
|
||||
@ -115,7 +131,7 @@ func humanateBigBytes(s, base *big.Int, sizes []string) string {
|
||||
//
|
||||
// BigBytes(82854982) -> 83 MB
|
||||
func BigBytes(s *big.Int) string {
|
||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"}
|
||||
return humanateBigBytes(s, bigSIExp, sizes)
|
||||
}
|
||||
|
||||
@ -125,7 +141,7 @@ func BigBytes(s *big.Int) string {
|
||||
//
|
||||
// BigIBytes(82854982) -> 79 MiB
|
||||
func BigIBytes(s *big.Int) string {
|
||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
|
||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"}
|
||||
return humanateBigBytes(s, bigIECExp, sizes)
|
||||
}
|
||||
|
||||
|
1
vendor/github.com/dustin/go-humanize/commaf.go
generated
vendored
1
vendor/github.com/dustin/go-humanize/commaf.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build go1.6
|
||||
// +build go1.6
|
||||
|
||||
package humanize
|
||||
|
3
vendor/github.com/dustin/go-humanize/ftoa.go
generated
vendored
3
vendor/github.com/dustin/go-humanize/ftoa.go
generated
vendored
@ -6,6 +6,9 @@ import (
|
||||
)
|
||||
|
||||
func stripTrailingZeros(s string) string {
|
||||
if !strings.ContainsRune(s, '.') {
|
||||
return s
|
||||
}
|
||||
offset := len(s) - 1
|
||||
for offset > 0 {
|
||||
if s[offset] == '.' {
|
||||
|
2
vendor/github.com/dustin/go-humanize/number.go
generated
vendored
2
vendor/github.com/dustin/go-humanize/number.go
generated
vendored
@ -73,7 +73,7 @@ func FormatFloat(format string, n float64) string {
|
||||
if n > math.MaxFloat64 {
|
||||
return "Infinity"
|
||||
}
|
||||
if n < -math.MaxFloat64 {
|
||||
if n < (0.0 - math.MaxFloat64) {
|
||||
return "-Infinity"
|
||||
}
|
||||
|
||||
|
4
vendor/github.com/dustin/go-humanize/si.go
generated
vendored
4
vendor/github.com/dustin/go-humanize/si.go
generated
vendored
@ -8,6 +8,8 @@ import (
|
||||
)
|
||||
|
||||
var siPrefixTable = map[float64]string{
|
||||
-30: "q", // quecto
|
||||
-27: "r", // ronto
|
||||
-24: "y", // yocto
|
||||
-21: "z", // zepto
|
||||
-18: "a", // atto
|
||||
@ -25,6 +27,8 @@ var siPrefixTable = map[float64]string{
|
||||
18: "E", // exa
|
||||
21: "Z", // zetta
|
||||
24: "Y", // yotta
|
||||
27: "R", // ronna
|
||||
30: "Q", // quetta
|
||||
}
|
||||
|
||||
var revSIPrefixTable = revfmap(siPrefixTable)
|
||||
|
9
vendor/github.com/gomarkdown/markdown/.gitpod.yml
generated
vendored
9
vendor/github.com/gomarkdown/markdown/.gitpod.yml
generated
vendored
@ -1,9 +0,0 @@
|
||||
# This configuration file was automatically generated by Gitpod.
|
||||
# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file)
|
||||
# and commit this file to your remote git repository to share the goodness with others.
|
||||
|
||||
tasks:
|
||||
- init: go get && go build ./... && go test ./...
|
||||
command: go run
|
||||
|
||||
|
149
vendor/github.com/gomarkdown/markdown/README.md
generated
vendored
149
vendor/github.com/gomarkdown/markdown/README.md
generated
vendored
@ -6,7 +6,16 @@ Package `github.com/gomarkdown/markdown` is a Go library for parsing Markdown te
|
||||
|
||||
It's very fast and supports common extensions.
|
||||
|
||||
Try code examples online: https://replit.com/@kjk1?path=folder/gomarkdown
|
||||
Tutorial: https://blog.kowalczyk.info/article/cxn3/advanced-markdown-processing-in-go.html
|
||||
|
||||
Code examples:
|
||||
* https://onlinetool.io/goplayground/#txO7hJ-ibeU : basic markdown => HTML
|
||||
* https://onlinetool.io/goplayground/#yFRIWRiu-KL : customize HTML renderer
|
||||
* https://onlinetool.io/goplayground/#2yV5-HDKBUV : modify AST
|
||||
* https://onlinetool.io/goplayground/#9fqKwRbuJ04 : customize parser
|
||||
* https://onlinetool.io/goplayground/#Bk0zTvrzUDR : syntax highlight
|
||||
|
||||
Those examples are also in [examples](./examples) directory.
|
||||
|
||||
## API Docs:
|
||||
|
||||
@ -15,101 +24,58 @@ Try code examples online: https://replit.com/@kjk1?path=folder/gomarkdown
|
||||
- https://pkg.go.dev/github.com/gomarkdown/markdown/parser : parser
|
||||
- https://pkg.go.dev/github.com/gomarkdown/markdown/html : html renderer
|
||||
|
||||
## Users
|
||||
|
||||
Some tools using this package: https://pkg.go.dev/github.com/gomarkdown/markdown?tab=importedby
|
||||
|
||||
## Usage
|
||||
|
||||
To convert markdown text to HTML using reasonable defaults:
|
||||
|
||||
```go
|
||||
md := []byte("## markdown document")
|
||||
output := markdown.ToHTML(md, nil, nil)
|
||||
```
|
||||
package main
|
||||
|
||||
Try it online: https://replit.com/@kjk1/gomarkdown-basic
|
||||
|
||||
## Customizing markdown parser
|
||||
|
||||
Markdown format is loosely specified and there are multiple extensions invented after original specification was created.
|
||||
|
||||
The parser supports several [extensions](https://pkg.go.dev/github.com/gomarkdown/markdown/parser#Extensions).
|
||||
|
||||
Default parser uses most common `parser.CommonExtensions` but you can easily use parser with custom extension:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/gomarkdown/markdown"
|
||||
"github.com/gomarkdown/markdown/parser"
|
||||
"os"
|
||||
|
||||
"github.com/gomarkdown/markdown"
|
||||
"github.com/gomarkdown/markdown/ast"
|
||||
"github.com/gomarkdown/markdown/html"
|
||||
"github.com/gomarkdown/markdown/parser"
|
||||
|
||||
"fmt"
|
||||
)
|
||||
|
||||
extensions := parser.CommonExtensions | parser.AutoHeadingIDs
|
||||
parser := parser.NewWithExtensions(extensions)
|
||||
var mds = `# header
|
||||
|
||||
md := []byte("markdown text")
|
||||
html := markdown.ToHTML(md, parser, nil)
|
||||
```
|
||||
Sample text.
|
||||
|
||||
Try it online: https://replit.com/@kjk1/gomarkdown-customized-html-renderer
|
||||
[link](http://example.com)
|
||||
`
|
||||
|
||||
## Customizing HTML renderer
|
||||
func mdToHTML(md []byte) []byte {
|
||||
// create markdown parser with extensions
|
||||
extensions := parser.CommonExtensions | parser.AutoHeadingIDs | parser.NoEmptyLineBeforeBlock
|
||||
p := parser.NewWithExtensions(extensions)
|
||||
doc := p.Parse(md)
|
||||
|
||||
Similarly, HTML renderer can be configured with different [options](https://pkg.go.dev/github.com/gomarkdown/markdown/html#RendererOptions)
|
||||
// create HTML renderer with extensions
|
||||
htmlFlags := html.CommonFlags | html.HrefTargetBlank
|
||||
opts := html.RendererOptions{Flags: htmlFlags}
|
||||
renderer := html.NewRenderer(opts)
|
||||
|
||||
Here's how to use a custom renderer:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/gomarkdown/markdown"
|
||||
"github.com/gomarkdown/markdown/html"
|
||||
)
|
||||
|
||||
htmlFlags := html.CommonFlags | html.HrefTargetBlank
|
||||
opts := html.RendererOptions{Flags: htmlFlags}
|
||||
renderer := html.NewRenderer(opts)
|
||||
|
||||
md := []byte("markdown text")
|
||||
html := markdown.ToHTML(md, nil, renderer)
|
||||
```
|
||||
|
||||
Try it online: https://replit.com/@kjk1/gomarkdown-customized-html-renderer
|
||||
|
||||
HTML renderer also supports reusing most of the logic and overriding rendering of only specific nodes.
|
||||
|
||||
You can provide [RenderNodeFunc](https://pkg.go.dev/github.com/gomarkdown/markdown/html#RenderNodeFunc) in [RendererOptions](https://pkg.go.dev/github.com/gomarkdown/markdown/html#RendererOptions).
|
||||
|
||||
The function is called for each node in AST, you can implement custom rendering logic and tell HTML renderer to skip rendering this node.
|
||||
|
||||
Here's the simplest example that drops all code blocks from the output:
|
||||
|
||||
````go
|
||||
import (
|
||||
"github.com/gomarkdown/markdown"
|
||||
"github.com/gomarkdown/markdown/ast"
|
||||
"github.com/gomarkdown/markdown/html"
|
||||
)
|
||||
|
||||
// return (ast.GoToNext, true) to tell html renderer to skip rendering this node
|
||||
// (because you've rendered it)
|
||||
func renderHookDropCodeBlock(w io.Writer, node ast.Node, entering bool) (ast.WalkStatus, bool) {
|
||||
// skip all nodes that are not CodeBlock nodes
|
||||
if _, ok := node.(*ast.CodeBlock); !ok {
|
||||
return ast.GoToNext, false
|
||||
}
|
||||
// custom rendering logic for ast.CodeBlock. By doing nothing it won't be
|
||||
// present in the output
|
||||
return ast.GoToNext, true
|
||||
return markdown.Render(doc, renderer)
|
||||
}
|
||||
|
||||
opts := html.RendererOptions{
|
||||
Flags: html.CommonFlags,
|
||||
RenderNodeHook: renderHookDropCodeBlock,
|
||||
func main() {
|
||||
md := []byte(mds)
|
||||
html := mdToHTML(md)
|
||||
|
||||
fmt.Printf("--- Markdown:\n%s\n\n--- HTML:\n%s\n", md, html)
|
||||
}
|
||||
renderer := html.NewRenderer(opts)
|
||||
md := "test\n```\nthis code block will be dropped from output\n```\ntext"
|
||||
html := markdown.ToHTML([]byte(md), nil, renderer)
|
||||
````
|
||||
```
|
||||
|
||||
Try it online: https://onlinetool.io/goplayground/#txO7hJ-ibeU
|
||||
|
||||
For more documentation read [this guide](https://blog.kowalczyk.info/article/cxn3/advanced-markdown-processing-in-go.html)
|
||||
|
||||
Comparing to other markdown parsers: https://babelmark.github.io/
|
||||
|
||||
## Sanitize untrusted content
|
||||
|
||||
@ -129,12 +95,6 @@ maybeUnsafeHTML := markdown.ToHTML(md, nil, nil)
|
||||
html := bluemonday.UGCPolicy().SanitizeBytes(maybeUnsafeHTML)
|
||||
```
|
||||
|
||||
## Windows / Mac newlines
|
||||
|
||||
The library only supports Unix newlines. If you have markdown text with possibly
|
||||
Windows / Mac newlines, normalize newlines before calling this library using
|
||||
`d = markdown.NormalizeNewlines(d)`
|
||||
|
||||
## mdtohtml command-line tool
|
||||
|
||||
https://github.com/gomarkdown/mdtohtml is a command-line markdown to html
|
||||
@ -323,26 +283,15 @@ implements the following extensions:
|
||||
|
||||
- **Mmark support**, see <https://mmark.miek.nl/post/syntax/> for all new syntax elements this adds.
|
||||
|
||||
## Todo
|
||||
## Users
|
||||
|
||||
- port https://github.com/russross/blackfriday/issues/348
|
||||
- port [LaTeX output](https://github.com/Ambrevar/Blackfriday-LaTeX):
|
||||
renders output as LaTeX.
|
||||
- port https://github.com/shurcooL/github_flavored_markdown to markdown
|
||||
- port [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
|
||||
but for markdown.
|
||||
- More unit testing
|
||||
- Improve unicode support. It does not understand all unicode
|
||||
rules (about what constitutes a letter, a punctuation symbol,
|
||||
etc.), so it may fail to detect word boundaries correctly in
|
||||
some instances. It is safe on all utf-8 input.
|
||||
Some tools using this package: https://pkg.go.dev/github.com/gomarkdown/markdown?tab=importedby
|
||||
|
||||
## History
|
||||
|
||||
markdown is a fork of v2 of https://github.com/russross/blackfriday that is:
|
||||
markdown is a fork of v2 of https://github.com/russross/blackfriday.
|
||||
|
||||
- actively maintained (sadly in Feb 2018 blackfriday was inactive for 5 months with many bugs and pull requests accumulated)
|
||||
- refactored API (split into ast/parser/html sub-packages)
|
||||
I refactored the API (split into ast/parser/html sub-packages).
|
||||
|
||||
Blackfriday itself was based on C implementation [sundown](https://github.com/vmg/sundown) which in turn was based on [libsoldout](http://fossil.instinctive.eu/libsoldout/home).
|
||||
|
||||
|
6
vendor/github.com/gomarkdown/markdown/ast/node.go
generated
vendored
6
vendor/github.com/gomarkdown/markdown/ast/node.go
generated
vendored
@ -92,6 +92,12 @@ type Container struct {
|
||||
*Attribute // Block level attribute
|
||||
}
|
||||
|
||||
// return true if can contain children of a given node type
|
||||
// used by custom nodes to over-ride logic in canNodeContain
|
||||
type CanContain interface {
|
||||
CanContain(Node) bool
|
||||
}
|
||||
|
||||
// AsContainer returns itself as *Container
|
||||
func (c *Container) AsContainer() *Container {
|
||||
return c
|
||||
|
2
vendor/github.com/gomarkdown/markdown/ast/print.go
generated
vendored
2
vendor/github.com/gomarkdown/markdown/ast/print.go
generated
vendored
@ -157,6 +157,8 @@ func printRecur(w io.Writer, node Node, prefix string, depth int) {
|
||||
content += "flags=" + flags + " "
|
||||
}
|
||||
printDefault(w, indent, typeName, content)
|
||||
case *CodeBlock:
|
||||
printDefault(w, indent, typeName + ":" + string(v.Info), content)
|
||||
default:
|
||||
printDefault(w, indent, typeName, content)
|
||||
}
|
||||
|
8
vendor/github.com/gomarkdown/markdown/codecov.yml
generated
vendored
8
vendor/github.com/gomarkdown/markdown/codecov.yml
generated
vendored
@ -1,8 +0,0 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
# basic
|
||||
target: 60%
|
||||
threshold: 2%
|
||||
base: auto
|
267
vendor/github.com/gomarkdown/markdown/html/renderer.go
generated
vendored
267
vendor/github.com/gomarkdown/markdown/html/renderer.go
generated
vendored
@ -88,13 +88,15 @@ type RendererOptions struct {
|
||||
// FootnoteReturnLinks flag is enabled. If blank, the string
|
||||
// <sup>[return]</sup> is used.
|
||||
FootnoteReturnLinkContents string
|
||||
// CitationFormatString defines how a citation is rendered. If blnck, the string
|
||||
// CitationFormatString defines how a citation is rendered. If blank, the string
|
||||
// <sup>[%s]</sup> is used. Where %s will be substituted with the citation target.
|
||||
CitationFormatString string
|
||||
// If set, add this text to the front of each Heading ID, to ensure uniqueness.
|
||||
HeadingIDPrefix string
|
||||
// If set, add this text to the back of each Heading ID, to ensure uniqueness.
|
||||
HeadingIDSuffix string
|
||||
// can over-write <p> for paragraph tag
|
||||
ParagraphTag string
|
||||
|
||||
Title string // Document title (used if CompletePage is set)
|
||||
CSS string // Optional CSS file URL (used if CompletePage is set)
|
||||
@ -120,7 +122,7 @@ type RendererOptions struct {
|
||||
//
|
||||
// Do not create this directly, instead use the NewRenderer function.
|
||||
type Renderer struct {
|
||||
opts RendererOptions
|
||||
Opts RendererOptions
|
||||
|
||||
closeTag string // how to end singleton tags: either " />" or ">"
|
||||
|
||||
@ -168,7 +170,7 @@ func EscapeHTML(w io.Writer, d []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
func escLink(w io.Writer, text []byte) {
|
||||
func EscLink(w io.Writer, text []byte) {
|
||||
unesc := html.UnescapeString(string(text))
|
||||
EscapeHTML(w, []byte(unesc))
|
||||
}
|
||||
@ -207,7 +209,7 @@ func NewRenderer(opts RendererOptions) *Renderer {
|
||||
}
|
||||
|
||||
return &Renderer{
|
||||
opts: opts,
|
||||
Opts: opts,
|
||||
|
||||
closeTag: closeTag,
|
||||
headingIDs: make(map[string]int),
|
||||
@ -250,12 +252,12 @@ func isRelativeLink(link []byte) (yes bool) {
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *Renderer) addAbsPrefix(link []byte) []byte {
|
||||
if len(link) == 0 {
|
||||
func AddAbsPrefix(link []byte, prefix string) []byte {
|
||||
if len(link) == 0 || len(prefix) == 0 {
|
||||
return link
|
||||
}
|
||||
if r.opts.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
|
||||
newDest := r.opts.AbsolutePrefix
|
||||
if isRelativeLink(link) && link[0] != '.' {
|
||||
newDest := prefix
|
||||
if link[0] != '/' {
|
||||
newDest += "/"
|
||||
}
|
||||
@ -294,7 +296,7 @@ func isMailto(link []byte) bool {
|
||||
}
|
||||
|
||||
func needSkipLink(r *Renderer, dest []byte) bool {
|
||||
flags := r.opts.Flags
|
||||
flags := r.Opts.Flags
|
||||
if flags&SkipLinks != 0 {
|
||||
return true
|
||||
}
|
||||
@ -317,7 +319,7 @@ func appendLanguageAttr(attrs []string, info []byte) []string {
|
||||
return append(attrs, s)
|
||||
}
|
||||
|
||||
func (r *Renderer) outTag(w io.Writer, name string, attrs []string) {
|
||||
func (r *Renderer) OutTag(w io.Writer, name string, attrs []string) {
|
||||
s := name
|
||||
if len(attrs) > 0 {
|
||||
s += " " + strings.Join(attrs, " ")
|
||||
@ -326,22 +328,22 @@ func (r *Renderer) outTag(w io.Writer, name string, attrs []string) {
|
||||
r.lastOutputLen = 1
|
||||
}
|
||||
|
||||
func footnoteRef(prefix string, node *ast.Link) string {
|
||||
urlFrag := prefix + string(slugify(node.Destination))
|
||||
func FootnoteRef(prefix string, node *ast.Link) string {
|
||||
urlFrag := prefix + string(Slugify(node.Destination))
|
||||
nStr := strconv.Itoa(node.NoteID)
|
||||
anchor := `<a href="#fn:` + urlFrag + `">` + nStr + `</a>`
|
||||
return `<sup class="footnote-ref" id="fnref:` + urlFrag + `">` + anchor + `</sup>`
|
||||
}
|
||||
|
||||
func footnoteItem(prefix string, slug []byte) string {
|
||||
func FootnoteItem(prefix string, slug []byte) string {
|
||||
return `<li id="fn:` + prefix + string(slug) + `">`
|
||||
}
|
||||
|
||||
func footnoteReturnLink(prefix, returnLink string, slug []byte) string {
|
||||
func FootnoteReturnLink(prefix, returnLink string, slug []byte) string {
|
||||
return ` <a class="footnote-return" href="#fnref:` + prefix + string(slug) + `">` + returnLink + `</a>`
|
||||
}
|
||||
|
||||
func listItemOpenCR(listItem *ast.ListItem) bool {
|
||||
func ListItemOpenCR(listItem *ast.ListItem) bool {
|
||||
if ast.GetPrevNode(listItem) == nil {
|
||||
return false
|
||||
}
|
||||
@ -349,13 +351,13 @@ func listItemOpenCR(listItem *ast.ListItem) bool {
|
||||
return !ld.Tight && ld.ListFlags&ast.ListTypeDefinition == 0
|
||||
}
|
||||
|
||||
func skipParagraphTags(para *ast.Paragraph) bool {
|
||||
func SkipParagraphTags(para *ast.Paragraph) bool {
|
||||
parent := para.Parent
|
||||
grandparent := parent.GetParent()
|
||||
if grandparent == nil || !isList(grandparent) {
|
||||
if grandparent == nil || !IsList(grandparent) {
|
||||
return false
|
||||
}
|
||||
isParentTerm := isListItemTerm(parent)
|
||||
isParentTerm := IsListItemTerm(parent)
|
||||
grandparentListData := grandparent.(*ast.List)
|
||||
tightOrTerm := grandparentListData.Tight || isParentTerm
|
||||
return tightOrTerm
|
||||
@ -391,35 +393,35 @@ var (
|
||||
closeHTags = []string{"</h1>", "</h2>", "</h3>", "</h4>", "</h5>"}
|
||||
)
|
||||
|
||||
func headingOpenTagFromLevel(level int) string {
|
||||
func HeadingOpenTagFromLevel(level int) string {
|
||||
if level < 1 || level > 5 {
|
||||
return "<h6"
|
||||
}
|
||||
return openHTags[level-1]
|
||||
}
|
||||
|
||||
func headingCloseTagFromLevel(level int) string {
|
||||
func HeadingCloseTagFromLevel(level int) string {
|
||||
if level < 1 || level > 5 {
|
||||
return "</h6>"
|
||||
}
|
||||
return closeHTags[level-1]
|
||||
}
|
||||
|
||||
func (r *Renderer) outHRTag(w io.Writer, attrs []string) {
|
||||
func (r *Renderer) OutHRTag(w io.Writer, attrs []string) {
|
||||
hr := TagWithAttributes("<hr", attrs)
|
||||
r.OutOneOf(w, r.opts.Flags&UseXHTML == 0, hr, "<hr />")
|
||||
r.OutOneOf(w, r.Opts.Flags&UseXHTML == 0, hr, "<hr />")
|
||||
}
|
||||
|
||||
// Text writes ast.Text node
|
||||
func (r *Renderer) Text(w io.Writer, text *ast.Text) {
|
||||
if r.opts.Flags&Smartypants != 0 {
|
||||
if r.Opts.Flags&Smartypants != 0 {
|
||||
var tmp bytes.Buffer
|
||||
EscapeHTML(&tmp, text.Literal)
|
||||
r.sr.Process(w, tmp.Bytes())
|
||||
} else {
|
||||
_, parentIsLink := text.Parent.(*ast.Link)
|
||||
if parentIsLink {
|
||||
escLink(w, text.Literal)
|
||||
EscLink(w, text.Literal)
|
||||
} else {
|
||||
EscapeHTML(w, text.Literal)
|
||||
}
|
||||
@ -428,7 +430,7 @@ func (r *Renderer) Text(w io.Writer, text *ast.Text) {
|
||||
|
||||
// HardBreak writes ast.Hardbreak node
|
||||
func (r *Renderer) HardBreak(w io.Writer, node *ast.Hardbreak) {
|
||||
r.OutOneOf(w, r.opts.Flags&UseXHTML == 0, "<br>", "<br />")
|
||||
r.OutOneOf(w, r.Opts.Flags&UseXHTML == 0, "<br>", "<br />")
|
||||
r.CR(w)
|
||||
}
|
||||
|
||||
@ -459,7 +461,7 @@ func (r *Renderer) OutOneOfCr(w io.Writer, outFirst bool, first string, second s
|
||||
|
||||
// HTMLSpan writes ast.HTMLSpan node
|
||||
func (r *Renderer) HTMLSpan(w io.Writer, span *ast.HTMLSpan) {
|
||||
if r.opts.Flags&SkipHTML == 0 {
|
||||
if r.Opts.Flags&SkipHTML == 0 {
|
||||
r.Out(w, span.Literal)
|
||||
}
|
||||
}
|
||||
@ -467,18 +469,18 @@ func (r *Renderer) HTMLSpan(w io.Writer, span *ast.HTMLSpan) {
|
||||
func (r *Renderer) linkEnter(w io.Writer, link *ast.Link) {
|
||||
attrs := link.AdditionalAttributes
|
||||
dest := link.Destination
|
||||
dest = r.addAbsPrefix(dest)
|
||||
dest = AddAbsPrefix(dest, r.Opts.AbsolutePrefix)
|
||||
var hrefBuf bytes.Buffer
|
||||
hrefBuf.WriteString("href=\"")
|
||||
escLink(&hrefBuf, dest)
|
||||
EscLink(&hrefBuf, dest)
|
||||
hrefBuf.WriteByte('"')
|
||||
attrs = append(attrs, hrefBuf.String())
|
||||
if link.NoteID != 0 {
|
||||
r.Outs(w, footnoteRef(r.opts.FootnoteAnchorPrefix, link))
|
||||
r.Outs(w, FootnoteRef(r.Opts.FootnoteAnchorPrefix, link))
|
||||
return
|
||||
}
|
||||
|
||||
attrs = appendLinkAttrs(attrs, r.opts.Flags, dest)
|
||||
attrs = appendLinkAttrs(attrs, r.Opts.Flags, dest)
|
||||
if len(link.Title) > 0 {
|
||||
var titleBuff bytes.Buffer
|
||||
titleBuff.WriteString("title=\"")
|
||||
@ -486,7 +488,7 @@ func (r *Renderer) linkEnter(w io.Writer, link *ast.Link) {
|
||||
titleBuff.WriteByte('"')
|
||||
attrs = append(attrs, titleBuff.String())
|
||||
}
|
||||
r.outTag(w, "<a", attrs)
|
||||
r.OutTag(w, "<a", attrs)
|
||||
}
|
||||
|
||||
func (r *Renderer) linkExit(w io.Writer, link *ast.Link) {
|
||||
@ -511,33 +513,34 @@ func (r *Renderer) Link(w io.Writer, link *ast.Link, entering bool) {
|
||||
}
|
||||
|
||||
func (r *Renderer) imageEnter(w io.Writer, image *ast.Image) {
|
||||
dest := image.Destination
|
||||
dest = r.addAbsPrefix(dest)
|
||||
if r.DisableTags == 0 {
|
||||
//if options.safe && potentiallyUnsafe(dest) {
|
||||
//out(w, `<img src="" alt="`)
|
||||
//} else {
|
||||
if r.opts.Flags&LazyLoadImages != 0 {
|
||||
r.Outs(w, `<img loading="lazy" src="`)
|
||||
} else {
|
||||
r.Outs(w, `<img src="`)
|
||||
}
|
||||
escLink(w, dest)
|
||||
r.Outs(w, `" alt="`)
|
||||
//}
|
||||
}
|
||||
r.DisableTags++
|
||||
if r.DisableTags > 1 {
|
||||
return
|
||||
}
|
||||
src := image.Destination
|
||||
src = AddAbsPrefix(src, r.Opts.AbsolutePrefix)
|
||||
attrs := BlockAttrs(image)
|
||||
if r.Opts.Flags&LazyLoadImages != 0 {
|
||||
attrs = append(attrs, `loading="lazy"`)
|
||||
}
|
||||
|
||||
s := TagWithAttributes("<img", attrs)
|
||||
s = s[:len(s)-1] // hackish: strip off ">" from end
|
||||
r.Outs(w, s+` src="`)
|
||||
EscLink(w, src)
|
||||
r.Outs(w, `" alt="`)
|
||||
}
|
||||
|
||||
func (r *Renderer) imageExit(w io.Writer, image *ast.Image) {
|
||||
r.DisableTags--
|
||||
if r.DisableTags == 0 {
|
||||
if image.Title != nil {
|
||||
r.Outs(w, `" title="`)
|
||||
EscapeHTML(w, image.Title)
|
||||
}
|
||||
r.Outs(w, `" />`)
|
||||
if r.DisableTags > 0 {
|
||||
return
|
||||
}
|
||||
if image.Title != nil {
|
||||
r.Outs(w, `" title="`)
|
||||
EscapeHTML(w, image.Title)
|
||||
}
|
||||
r.Outs(w, `" />`)
|
||||
}
|
||||
|
||||
// Image writes ast.Image node
|
||||
@ -571,20 +574,28 @@ func (r *Renderer) paragraphEnter(w io.Writer, para *ast.Paragraph) {
|
||||
}
|
||||
}
|
||||
|
||||
tag := TagWithAttributes("<p", BlockAttrs(para))
|
||||
ptag := "<p"
|
||||
if r.Opts.ParagraphTag != "" {
|
||||
ptag = "<" + r.Opts.ParagraphTag
|
||||
}
|
||||
tag := TagWithAttributes(ptag, BlockAttrs(para))
|
||||
r.Outs(w, tag)
|
||||
}
|
||||
|
||||
func (r *Renderer) paragraphExit(w io.Writer, para *ast.Paragraph) {
|
||||
r.Outs(w, "</p>")
|
||||
if !(isListItem(para.Parent) && ast.GetNextNode(para) == nil) {
|
||||
ptag := "</p>"
|
||||
if r.Opts.ParagraphTag != "" {
|
||||
ptag = "</" + r.Opts.ParagraphTag + ">"
|
||||
}
|
||||
r.Outs(w, ptag)
|
||||
if !(IsListItem(para.Parent) && ast.GetNextNode(para) == nil) {
|
||||
r.CR(w)
|
||||
}
|
||||
}
|
||||
|
||||
// Paragraph writes ast.Paragraph node
|
||||
func (r *Renderer) Paragraph(w io.Writer, para *ast.Paragraph, entering bool) {
|
||||
if skipParagraphTags(para) {
|
||||
if SkipParagraphTags(para) {
|
||||
return
|
||||
}
|
||||
if entering {
|
||||
@ -603,7 +614,7 @@ func (r *Renderer) Code(w io.Writer, node *ast.Code) {
|
||||
|
||||
// HTMLBlock write ast.HTMLBlock node
|
||||
func (r *Renderer) HTMLBlock(w io.Writer, node *ast.HTMLBlock) {
|
||||
if r.opts.Flags&SkipHTML != 0 {
|
||||
if r.Opts.Flags&SkipHTML != 0 {
|
||||
return
|
||||
}
|
||||
r.CR(w)
|
||||
@ -611,6 +622,25 @@ func (r *Renderer) HTMLBlock(w io.Writer, node *ast.HTMLBlock) {
|
||||
r.CR(w)
|
||||
}
|
||||
|
||||
func (r *Renderer) EnsureUniqueHeadingID(id string) string {
|
||||
for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] {
|
||||
tmp := fmt.Sprintf("%s-%d", id, count+1)
|
||||
|
||||
if _, tmpFound := r.headingIDs[tmp]; !tmpFound {
|
||||
r.headingIDs[id] = count + 1
|
||||
id = tmp
|
||||
} else {
|
||||
id = id + "-1"
|
||||
}
|
||||
}
|
||||
|
||||
if _, found := r.headingIDs[id]; !found {
|
||||
r.headingIDs[id] = 0
|
||||
}
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
func (r *Renderer) headingEnter(w io.Writer, nodeData *ast.Heading) {
|
||||
var attrs []string
|
||||
var class string
|
||||
@ -629,44 +659,25 @@ func (r *Renderer) headingEnter(w io.Writer, nodeData *ast.Heading) {
|
||||
attrs = []string{`class="` + class + `"`}
|
||||
}
|
||||
|
||||
ensureUniqueHeadingID := func(id string) string {
|
||||
for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] {
|
||||
tmp := fmt.Sprintf("%s-%d", id, count+1)
|
||||
|
||||
if _, tmpFound := r.headingIDs[tmp]; !tmpFound {
|
||||
r.headingIDs[id] = count + 1
|
||||
id = tmp
|
||||
} else {
|
||||
id = id + "-1"
|
||||
}
|
||||
}
|
||||
|
||||
if _, found := r.headingIDs[id]; !found {
|
||||
r.headingIDs[id] = 0
|
||||
}
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
if nodeData.HeadingID != "" {
|
||||
id := ensureUniqueHeadingID(nodeData.HeadingID)
|
||||
if r.opts.HeadingIDPrefix != "" {
|
||||
id = r.opts.HeadingIDPrefix + id
|
||||
id := r.EnsureUniqueHeadingID(nodeData.HeadingID)
|
||||
if r.Opts.HeadingIDPrefix != "" {
|
||||
id = r.Opts.HeadingIDPrefix + id
|
||||
}
|
||||
if r.opts.HeadingIDSuffix != "" {
|
||||
id = id + r.opts.HeadingIDSuffix
|
||||
if r.Opts.HeadingIDSuffix != "" {
|
||||
id = id + r.Opts.HeadingIDSuffix
|
||||
}
|
||||
attrID := `id="` + id + `"`
|
||||
attrs = append(attrs, attrID)
|
||||
}
|
||||
attrs = append(attrs, BlockAttrs(nodeData)...)
|
||||
r.CR(w)
|
||||
r.outTag(w, headingOpenTagFromLevel(nodeData.Level), attrs)
|
||||
r.OutTag(w, HeadingOpenTagFromLevel(nodeData.Level), attrs)
|
||||
}
|
||||
|
||||
func (r *Renderer) headingExit(w io.Writer, heading *ast.Heading) {
|
||||
r.Outs(w, headingCloseTagFromLevel(heading.Level))
|
||||
if !(isListItem(heading.Parent) && ast.GetNextNode(heading) == nil) {
|
||||
r.Outs(w, HeadingCloseTagFromLevel(heading.Level))
|
||||
if !(IsListItem(heading.Parent) && ast.GetNextNode(heading) == nil) {
|
||||
r.CR(w)
|
||||
}
|
||||
}
|
||||
@ -683,7 +694,7 @@ func (r *Renderer) Heading(w io.Writer, node *ast.Heading, entering bool) {
|
||||
// HorizontalRule writes ast.HorizontalRule node
|
||||
func (r *Renderer) HorizontalRule(w io.Writer, node *ast.HorizontalRule) {
|
||||
r.CR(w)
|
||||
r.outHRTag(w, BlockAttrs(node))
|
||||
r.OutHRTag(w, BlockAttrs(node))
|
||||
r.CR(w)
|
||||
}
|
||||
|
||||
@ -693,15 +704,15 @@ func (r *Renderer) listEnter(w io.Writer, nodeData *ast.List) {
|
||||
|
||||
if nodeData.IsFootnotesList {
|
||||
r.Outs(w, "\n<div class=\"footnotes\">\n\n")
|
||||
if r.opts.Flags&FootnoteNoHRTag == 0 {
|
||||
r.outHRTag(w, nil)
|
||||
if r.Opts.Flags&FootnoteNoHRTag == 0 {
|
||||
r.OutHRTag(w, nil)
|
||||
r.CR(w)
|
||||
}
|
||||
}
|
||||
r.CR(w)
|
||||
if isListItem(nodeData.Parent) {
|
||||
if IsListItem(nodeData.Parent) {
|
||||
grand := nodeData.Parent.GetParent()
|
||||
if isListTight(grand) {
|
||||
if IsListTight(grand) {
|
||||
r.CR(w)
|
||||
}
|
||||
}
|
||||
@ -717,7 +728,7 @@ func (r *Renderer) listEnter(w io.Writer, nodeData *ast.List) {
|
||||
openTag = "<dl"
|
||||
}
|
||||
attrs = append(attrs, BlockAttrs(nodeData)...)
|
||||
r.outTag(w, openTag, attrs)
|
||||
r.OutTag(w, openTag, attrs)
|
||||
r.CR(w)
|
||||
}
|
||||
|
||||
@ -760,12 +771,12 @@ func (r *Renderer) List(w io.Writer, list *ast.List, entering bool) {
|
||||
}
|
||||
|
||||
func (r *Renderer) listItemEnter(w io.Writer, listItem *ast.ListItem) {
|
||||
if listItemOpenCR(listItem) {
|
||||
if ListItemOpenCR(listItem) {
|
||||
r.CR(w)
|
||||
}
|
||||
if listItem.RefLink != nil {
|
||||
slug := slugify(listItem.RefLink)
|
||||
r.Outs(w, footnoteItem(r.opts.FootnoteAnchorPrefix, slug))
|
||||
slug := Slugify(listItem.RefLink)
|
||||
r.Outs(w, FootnoteItem(r.Opts.FootnoteAnchorPrefix, slug))
|
||||
return
|
||||
}
|
||||
|
||||
@ -780,11 +791,11 @@ func (r *Renderer) listItemEnter(w io.Writer, listItem *ast.ListItem) {
|
||||
}
|
||||
|
||||
func (r *Renderer) listItemExit(w io.Writer, listItem *ast.ListItem) {
|
||||
if listItem.RefLink != nil && r.opts.Flags&FootnoteReturnLinks != 0 {
|
||||
slug := slugify(listItem.RefLink)
|
||||
prefix := r.opts.FootnoteAnchorPrefix
|
||||
link := r.opts.FootnoteReturnLinkContents
|
||||
s := footnoteReturnLink(prefix, link, slug)
|
||||
if listItem.RefLink != nil && r.Opts.Flags&FootnoteReturnLinks != 0 {
|
||||
slug := Slugify(listItem.RefLink)
|
||||
prefix := r.Opts.FootnoteAnchorPrefix
|
||||
link := r.Opts.FootnoteReturnLinkContents
|
||||
s := FootnoteReturnLink(prefix, link, slug)
|
||||
r.Outs(w, s)
|
||||
}
|
||||
|
||||
@ -815,7 +826,7 @@ func (r *Renderer) EscapeHTMLCallouts(w io.Writer, d []byte) {
|
||||
ld := len(d)
|
||||
Parse:
|
||||
for i := 0; i < ld; i++ {
|
||||
for _, comment := range r.opts.Comments {
|
||||
for _, comment := range r.Opts.Comments {
|
||||
if !bytes.HasPrefix(d[i:], comment) {
|
||||
break
|
||||
}
|
||||
@ -853,14 +864,14 @@ func (r *Renderer) CodeBlock(w io.Writer, codeBlock *ast.CodeBlock) {
|
||||
r.Outs(w, "<pre>")
|
||||
code := TagWithAttributes("<code", attrs)
|
||||
r.Outs(w, code)
|
||||
if r.opts.Comments != nil {
|
||||
if r.Opts.Comments != nil {
|
||||
r.EscapeHTMLCallouts(w, codeBlock.Literal)
|
||||
} else {
|
||||
EscapeHTML(w, codeBlock.Literal)
|
||||
}
|
||||
r.Outs(w, "</code>")
|
||||
r.Outs(w, "</pre>")
|
||||
if !isListItem(codeBlock.Parent) {
|
||||
if !IsListItem(codeBlock.Parent) {
|
||||
r.CR(w)
|
||||
}
|
||||
}
|
||||
@ -910,7 +921,7 @@ func (r *Renderer) TableCell(w io.Writer, tableCell *ast.TableCell, entering boo
|
||||
if ast.GetPrevNode(tableCell) == nil {
|
||||
r.CR(w)
|
||||
}
|
||||
r.outTag(w, openTag, attrs)
|
||||
r.OutTag(w, openTag, attrs)
|
||||
}
|
||||
|
||||
// TableBody writes ast.TableBody node
|
||||
@ -959,8 +970,8 @@ func (r *Renderer) Citation(w io.Writer, node *ast.Citation) {
|
||||
case ast.CitationTypeSuppressed:
|
||||
attr[0] = `class="suppressed"`
|
||||
}
|
||||
r.outTag(w, "<cite", attr)
|
||||
r.Outs(w, fmt.Sprintf(`<a href="#%s">`+r.opts.CitationFormatString+`</a>`, c, c))
|
||||
r.OutTag(w, "<cite", attr)
|
||||
r.Outs(w, fmt.Sprintf(`<a href="#%s">`+r.Opts.CitationFormatString+`</a>`, c, c))
|
||||
r.Outs(w, "</cite>")
|
||||
}
|
||||
}
|
||||
@ -968,7 +979,7 @@ func (r *Renderer) Citation(w io.Writer, node *ast.Citation) {
|
||||
// Callout writes ast.Callout node
|
||||
func (r *Renderer) Callout(w io.Writer, node *ast.Callout) {
|
||||
attr := []string{`class="callout"`}
|
||||
r.outTag(w, "<span", attr)
|
||||
r.OutTag(w, "<span", attr)
|
||||
r.Out(w, node.ID)
|
||||
r.Outs(w, "</span>")
|
||||
}
|
||||
@ -977,14 +988,14 @@ func (r *Renderer) Callout(w io.Writer, node *ast.Callout) {
|
||||
func (r *Renderer) Index(w io.Writer, node *ast.Index) {
|
||||
// there is no in-text representation.
|
||||
attr := []string{`class="index"`, fmt.Sprintf(`id="%s"`, node.ID)}
|
||||
r.outTag(w, "<span", attr)
|
||||
r.OutTag(w, "<span", attr)
|
||||
r.Outs(w, "</span>")
|
||||
}
|
||||
|
||||
// RenderNode renders a markdown node to HTML
|
||||
func (r *Renderer) RenderNode(w io.Writer, node ast.Node, entering bool) ast.WalkStatus {
|
||||
if r.opts.RenderNodeHook != nil {
|
||||
status, didHandle := r.opts.RenderNodeHook(w, node, entering)
|
||||
if r.Opts.RenderNodeHook != nil {
|
||||
status, didHandle := r.Opts.RenderNodeHook(w, node, entering)
|
||||
if didHandle {
|
||||
return status
|
||||
}
|
||||
@ -1019,7 +1030,7 @@ func (r *Renderer) RenderNode(w io.Writer, node ast.Node, entering bool) ast.Wal
|
||||
case *ast.Citation:
|
||||
r.Citation(w, node)
|
||||
case *ast.Image:
|
||||
if r.opts.Flags&SkipImages != 0 {
|
||||
if r.Opts.Flags&SkipImages != 0 {
|
||||
return ast.SkipChildren
|
||||
}
|
||||
r.Image(w, node, entering)
|
||||
@ -1098,7 +1109,7 @@ func (r *Renderer) RenderNode(w io.Writer, node ast.Node, entering bool) ast.Wal
|
||||
// RenderHeader writes HTML document preamble and TOC if requested.
|
||||
func (r *Renderer) RenderHeader(w io.Writer, ast ast.Node) {
|
||||
r.writeDocumentHeader(w)
|
||||
if r.opts.Flags&TOC != 0 {
|
||||
if r.Opts.Flags&TOC != 0 {
|
||||
r.writeTOC(w, ast)
|
||||
}
|
||||
}
|
||||
@ -1109,18 +1120,18 @@ func (r *Renderer) RenderFooter(w io.Writer, _ ast.Node) {
|
||||
r.Outs(w, "</section>\n")
|
||||
}
|
||||
|
||||
if r.opts.Flags&CompletePage == 0 {
|
||||
if r.Opts.Flags&CompletePage == 0 {
|
||||
return
|
||||
}
|
||||
io.WriteString(w, "\n</body>\n</html>\n")
|
||||
}
|
||||
|
||||
func (r *Renderer) writeDocumentHeader(w io.Writer) {
|
||||
if r.opts.Flags&CompletePage == 0 {
|
||||
if r.Opts.Flags&CompletePage == 0 {
|
||||
return
|
||||
}
|
||||
ending := ""
|
||||
if r.opts.Flags&UseXHTML != 0 {
|
||||
if r.Opts.Flags&UseXHTML != 0 {
|
||||
io.WriteString(w, "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ")
|
||||
io.WriteString(w, "\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n")
|
||||
io.WriteString(w, "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n")
|
||||
@ -1131,35 +1142,35 @@ func (r *Renderer) writeDocumentHeader(w io.Writer) {
|
||||
}
|
||||
io.WriteString(w, "<head>\n")
|
||||
io.WriteString(w, " <title>")
|
||||
if r.opts.Flags&Smartypants != 0 {
|
||||
r.sr.Process(w, []byte(r.opts.Title))
|
||||
if r.Opts.Flags&Smartypants != 0 {
|
||||
r.sr.Process(w, []byte(r.Opts.Title))
|
||||
} else {
|
||||
EscapeHTML(w, []byte(r.opts.Title))
|
||||
EscapeHTML(w, []byte(r.Opts.Title))
|
||||
}
|
||||
io.WriteString(w, "</title>\n")
|
||||
io.WriteString(w, r.opts.Generator)
|
||||
io.WriteString(w, r.Opts.Generator)
|
||||
io.WriteString(w, "\"")
|
||||
io.WriteString(w, ending)
|
||||
io.WriteString(w, ">\n")
|
||||
io.WriteString(w, " <meta charset=\"utf-8\"")
|
||||
io.WriteString(w, ending)
|
||||
io.WriteString(w, ">\n")
|
||||
if r.opts.CSS != "" {
|
||||
if r.Opts.CSS != "" {
|
||||
io.WriteString(w, " <link rel=\"stylesheet\" type=\"text/css\" href=\"")
|
||||
EscapeHTML(w, []byte(r.opts.CSS))
|
||||
EscapeHTML(w, []byte(r.Opts.CSS))
|
||||
io.WriteString(w, "\"")
|
||||
io.WriteString(w, ending)
|
||||
io.WriteString(w, ">\n")
|
||||
}
|
||||
if r.opts.Icon != "" {
|
||||
if r.Opts.Icon != "" {
|
||||
io.WriteString(w, " <link rel=\"icon\" type=\"image/x-icon\" href=\"")
|
||||
EscapeHTML(w, []byte(r.opts.Icon))
|
||||
EscapeHTML(w, []byte(r.Opts.Icon))
|
||||
io.WriteString(w, "\"")
|
||||
io.WriteString(w, ending)
|
||||
io.WriteString(w, ">\n")
|
||||
}
|
||||
if r.opts.Head != nil {
|
||||
w.Write(r.opts.Head)
|
||||
if r.Opts.Head != nil {
|
||||
w.Write(r.Opts.Head)
|
||||
}
|
||||
io.WriteString(w, "</head>\n")
|
||||
io.WriteString(w, "<body>\n\n")
|
||||
@ -1221,31 +1232,31 @@ func (r *Renderer) writeTOC(w io.Writer, doc ast.Node) {
|
||||
r.lastOutputLen = buf.Len()
|
||||
}
|
||||
|
||||
func isList(node ast.Node) bool {
|
||||
func IsList(node ast.Node) bool {
|
||||
_, ok := node.(*ast.List)
|
||||
return ok
|
||||
}
|
||||
|
||||
func isListTight(node ast.Node) bool {
|
||||
func IsListTight(node ast.Node) bool {
|
||||
if list, ok := node.(*ast.List); ok {
|
||||
return list.Tight
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isListItem(node ast.Node) bool {
|
||||
func IsListItem(node ast.Node) bool {
|
||||
_, ok := node.(*ast.ListItem)
|
||||
return ok
|
||||
}
|
||||
|
||||
func isListItemTerm(node ast.Node) bool {
|
||||
func IsListItemTerm(node ast.Node) bool {
|
||||
data, ok := node.(*ast.ListItem)
|
||||
return ok && data.ListFlags&ast.ListTypeTerm != 0
|
||||
}
|
||||
|
||||
// TODO: move to internal package
|
||||
// Create a url-safe slug for fragments
|
||||
func slugify(in []byte) []byte {
|
||||
func Slugify(in []byte) []byte {
|
||||
if len(in) == 0 {
|
||||
return in
|
||||
}
|
||||
|
27
vendor/github.com/gomarkdown/markdown/markdown.go
generated
vendored
27
vendor/github.com/gomarkdown/markdown/markdown.go
generated
vendored
@ -84,28 +84,7 @@ func ToHTML(markdown []byte, p *parser.Parser, renderer Renderer) []byte {
|
||||
return Render(doc, renderer)
|
||||
}
|
||||
|
||||
// NormalizeNewlines converts Windows and Mac newlines to Unix newlines
|
||||
// The parser only supports Unix newlines. If your mardown content
|
||||
// NormalizeNewlines converts Windows and Mac newlines to Unix newlines.
|
||||
// The parser only supports Unix newlines. If your markdown content
|
||||
// might contain Windows or Mac newlines, use this function to convert to Unix newlines
|
||||
func NormalizeNewlines(d []byte) []byte {
|
||||
wi := 0
|
||||
n := len(d)
|
||||
for i := 0; i < n; i++ {
|
||||
c := d[i]
|
||||
// 13 is CR
|
||||
if c != 13 {
|
||||
d[wi] = c
|
||||
wi++
|
||||
continue
|
||||
}
|
||||
// replace CR (mac / win) with LF (unix)
|
||||
d[wi] = 10
|
||||
wi++
|
||||
if i < n-1 && d[i+1] == 10 {
|
||||
// this was CRLF, so skip the LF
|
||||
i++
|
||||
}
|
||||
|
||||
}
|
||||
return d[:wi]
|
||||
}
|
||||
var NormalizeNewlines = parser.NormalizeNewlines
|
||||
|
10
vendor/github.com/gomarkdown/markdown/parser/aside.go
generated
vendored
10
vendor/github.com/gomarkdown/markdown/parser/aside.go
generated
vendored
@ -25,13 +25,13 @@ func (p *Parser) asidePrefix(data []byte) int {
|
||||
// aside ends with at least one blank line
|
||||
// followed by something without a aside prefix
|
||||
func (p *Parser) terminateAside(data []byte, beg, end int) bool {
|
||||
if p.isEmpty(data[beg:]) <= 0 {
|
||||
if IsEmpty(data[beg:]) <= 0 {
|
||||
return false
|
||||
}
|
||||
if end >= len(data) {
|
||||
return true
|
||||
}
|
||||
return p.asidePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
|
||||
return p.asidePrefix(data[end:]) == 0 && IsEmpty(data[end:]) == 0
|
||||
}
|
||||
|
||||
// parse a aside fragment
|
||||
@ -66,8 +66,8 @@ func (p *Parser) aside(data []byte) int {
|
||||
beg = end
|
||||
}
|
||||
|
||||
block := p.addBlock(&ast.Aside{})
|
||||
p.block(raw.Bytes())
|
||||
p.finalize(block)
|
||||
block := p.AddBlock(&ast.Aside{})
|
||||
p.Block(raw.Bytes())
|
||||
p.Finalize(block)
|
||||
return end
|
||||
}
|
||||
|
134
vendor/github.com/gomarkdown/markdown/parser/block.go
generated
vendored
134
vendor/github.com/gomarkdown/markdown/parser/block.go
generated
vendored
@ -103,10 +103,10 @@ func sanitizeHeadingID(text string) string {
|
||||
return string(anchorName)
|
||||
}
|
||||
|
||||
// Parse block-level data.
|
||||
// Parse Block-level data.
|
||||
// Note: this function and many that it calls assume that
|
||||
// the input buffer ends with a newline.
|
||||
func (p *Parser) block(data []byte) {
|
||||
func (p *Parser) Block(data []byte) {
|
||||
// this is called recursively: enforce a maximum depth
|
||||
if p.nesting >= p.maxNesting {
|
||||
return
|
||||
@ -142,7 +142,7 @@ func (p *Parser) block(data []byte) {
|
||||
}
|
||||
}
|
||||
p.includeStack.Push(path)
|
||||
p.block(included)
|
||||
p.Block(included)
|
||||
p.includeStack.Pop()
|
||||
data = data[consumed:]
|
||||
continue
|
||||
@ -156,10 +156,10 @@ func (p *Parser) block(data []byte) {
|
||||
data = data[consumed:]
|
||||
|
||||
if node != nil {
|
||||
p.addBlock(node)
|
||||
p.AddBlock(node)
|
||||
if blockdata != nil {
|
||||
p.block(blockdata)
|
||||
p.finalize(node)
|
||||
p.Block(blockdata)
|
||||
p.Finalize(node)
|
||||
}
|
||||
}
|
||||
continue
|
||||
@ -213,7 +213,7 @@ func (p *Parser) block(data []byte) {
|
||||
}
|
||||
|
||||
// blank lines. note: returns the # of bytes to skip
|
||||
if i := p.isEmpty(data); i > 0 {
|
||||
if i := IsEmpty(data); i > 0 {
|
||||
data = data[i:]
|
||||
continue
|
||||
}
|
||||
@ -255,11 +255,11 @@ func (p *Parser) block(data []byte) {
|
||||
// ******
|
||||
// or
|
||||
// ______
|
||||
if p.isHRule(data) {
|
||||
if isHRule(data) {
|
||||
i := skipUntilChar(data, 0, '\n')
|
||||
hr := ast.HorizontalRule{}
|
||||
hr.Literal = bytes.Trim(data[:i], " \n")
|
||||
p.addBlock(&hr)
|
||||
p.AddBlock(&hr)
|
||||
data = data[i:]
|
||||
continue
|
||||
}
|
||||
@ -377,7 +377,7 @@ func (p *Parser) block(data []byte) {
|
||||
p.nesting--
|
||||
}
|
||||
|
||||
func (p *Parser) addBlock(n ast.Node) ast.Node {
|
||||
func (p *Parser) AddBlock(n ast.Node) ast.Node {
|
||||
p.closeUnmatchedBlocks()
|
||||
|
||||
if p.attr != nil {
|
||||
@ -448,7 +448,7 @@ func (p *Parser) prefixHeading(data []byte) int {
|
||||
p.allHeadingsWithAutoID = append(p.allHeadingsWithAutoID, block)
|
||||
}
|
||||
block.Content = data[i:end]
|
||||
p.addBlock(block)
|
||||
p.AddBlock(block)
|
||||
}
|
||||
return skip
|
||||
}
|
||||
@ -521,7 +521,7 @@ func (p *Parser) prefixSpecialHeading(data []byte) int {
|
||||
}
|
||||
block.Literal = data[i:end]
|
||||
block.Content = data[i:end]
|
||||
p.addBlock(block)
|
||||
p.AddBlock(block)
|
||||
}
|
||||
return skip
|
||||
}
|
||||
@ -572,7 +572,7 @@ func (p *Parser) titleBlock(data []byte, doRender bool) int {
|
||||
IsTitleblock: true,
|
||||
}
|
||||
block.Content = data
|
||||
p.addBlock(block)
|
||||
p.AddBlock(block)
|
||||
|
||||
return consumed
|
||||
}
|
||||
@ -617,14 +617,14 @@ func (p *Parser) html(data []byte, doRender bool) int {
|
||||
}
|
||||
|
||||
// see if it is the only thing on the line
|
||||
if skip := p.isEmpty(data[j:]); skip > 0 {
|
||||
if skip := IsEmpty(data[j:]); skip > 0 {
|
||||
// see if it is followed by a blank line/eof
|
||||
j += skip
|
||||
if j >= len(data) {
|
||||
found = true
|
||||
i = j
|
||||
} else {
|
||||
if skip := p.isEmpty(data[j:]); skip > 0 {
|
||||
if skip := IsEmpty(data[j:]); skip > 0 {
|
||||
j += skip
|
||||
found = true
|
||||
i = j
|
||||
@ -667,7 +667,7 @@ func (p *Parser) html(data []byte, doRender bool) int {
|
||||
// trim newlines
|
||||
end := backChar(data, i, '\n')
|
||||
htmlBLock := &ast.HTMLBlock{Leaf: ast.Leaf{Content: data[:end]}}
|
||||
p.addBlock(htmlBLock)
|
||||
p.AddBlock(htmlBLock)
|
||||
finalizeHTMLBlock(htmlBLock)
|
||||
}
|
||||
|
||||
@ -683,13 +683,13 @@ func finalizeHTMLBlock(block *ast.HTMLBlock) {
|
||||
func (p *Parser) htmlComment(data []byte, doRender bool) int {
|
||||
i := p.inlineHTMLComment(data)
|
||||
// needs to end with a blank line
|
||||
if j := p.isEmpty(data[i:]); j > 0 {
|
||||
if j := IsEmpty(data[i:]); j > 0 {
|
||||
size := i + j
|
||||
if doRender {
|
||||
// trim trailing newlines
|
||||
end := backChar(data, size, '\n')
|
||||
htmlBLock := &ast.HTMLBlock{Leaf: ast.Leaf{Content: data[:end]}}
|
||||
p.addBlock(htmlBLock)
|
||||
p.AddBlock(htmlBLock)
|
||||
finalizeHTMLBlock(htmlBLock)
|
||||
}
|
||||
return size
|
||||
@ -715,13 +715,13 @@ func (p *Parser) htmlHr(data []byte, doRender bool) int {
|
||||
}
|
||||
if i < len(data) && data[i] == '>' {
|
||||
i++
|
||||
if j := p.isEmpty(data[i:]); j > 0 {
|
||||
if j := IsEmpty(data[i:]); j > 0 {
|
||||
size := i + j
|
||||
if doRender {
|
||||
// trim newlines
|
||||
end := backChar(data, size, '\n')
|
||||
htmlBlock := &ast.HTMLBlock{Leaf: ast.Leaf{Content: data[:end]}}
|
||||
p.addBlock(htmlBlock)
|
||||
p.AddBlock(htmlBlock)
|
||||
finalizeHTMLBlock(htmlBlock)
|
||||
}
|
||||
return size
|
||||
@ -753,7 +753,7 @@ func (p *Parser) htmlFindEnd(tag string, data []byte) int {
|
||||
|
||||
// check that the rest of the line is blank
|
||||
skip := 0
|
||||
if skip = p.isEmpty(data[i:]); skip == 0 {
|
||||
if skip = IsEmpty(data[i:]); skip == 0 {
|
||||
return 0
|
||||
}
|
||||
i += skip
|
||||
@ -766,7 +766,7 @@ func (p *Parser) htmlFindEnd(tag string, data []byte) int {
|
||||
if p.extensions&LaxHTMLBlocks != 0 {
|
||||
return i
|
||||
}
|
||||
if skip = p.isEmpty(data[i:]); skip == 0 {
|
||||
if skip = IsEmpty(data[i:]); skip == 0 {
|
||||
// following line must be blank
|
||||
return 0
|
||||
}
|
||||
@ -774,7 +774,7 @@ func (p *Parser) htmlFindEnd(tag string, data []byte) int {
|
||||
return i + skip
|
||||
}
|
||||
|
||||
func (*Parser) isEmpty(data []byte) int {
|
||||
func IsEmpty(data []byte) int {
|
||||
// it is okay to call isEmpty on an empty buffer
|
||||
if len(data) == 0 {
|
||||
return 0
|
||||
@ -790,7 +790,7 @@ func (*Parser) isEmpty(data []byte) int {
|
||||
return i
|
||||
}
|
||||
|
||||
func (*Parser) isHRule(data []byte) bool {
|
||||
func isHRule(data []byte) bool {
|
||||
i := 0
|
||||
|
||||
// skip up to three spaces
|
||||
@ -976,7 +976,7 @@ func (p *Parser) fencedCodeBlock(data []byte, doRender bool) int {
|
||||
codeBlock.Content = work.Bytes() // TODO: get rid of temp buffer
|
||||
|
||||
if p.extensions&Mmark == 0 {
|
||||
p.addBlock(codeBlock)
|
||||
p.AddBlock(codeBlock)
|
||||
finalizeCodeBlock(codeBlock)
|
||||
return beg
|
||||
}
|
||||
@ -988,12 +988,12 @@ func (p *Parser) fencedCodeBlock(data []byte, doRender bool) int {
|
||||
figure.HeadingID = id
|
||||
p.Inline(caption, captionContent)
|
||||
|
||||
p.addBlock(figure)
|
||||
p.AddBlock(figure)
|
||||
codeBlock.AsLeaf().Attribute = figure.AsContainer().Attribute
|
||||
p.addChild(codeBlock)
|
||||
finalizeCodeBlock(codeBlock)
|
||||
p.addChild(caption)
|
||||
p.finalize(figure)
|
||||
p.Finalize(figure)
|
||||
|
||||
beg += consumed
|
||||
|
||||
@ -1001,7 +1001,7 @@ func (p *Parser) fencedCodeBlock(data []byte, doRender bool) int {
|
||||
}
|
||||
|
||||
// Still here, normal block
|
||||
p.addBlock(codeBlock)
|
||||
p.AddBlock(codeBlock)
|
||||
finalizeCodeBlock(codeBlock)
|
||||
}
|
||||
|
||||
@ -1055,13 +1055,13 @@ func (p *Parser) quotePrefix(data []byte) int {
|
||||
// blockquote ends with at least one blank line
|
||||
// followed by something without a blockquote prefix
|
||||
func (p *Parser) terminateBlockquote(data []byte, beg, end int) bool {
|
||||
if p.isEmpty(data[beg:]) <= 0 {
|
||||
if IsEmpty(data[beg:]) <= 0 {
|
||||
return false
|
||||
}
|
||||
if end >= len(data) {
|
||||
return true
|
||||
}
|
||||
return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
|
||||
return p.quotePrefix(data[end:]) == 0 && IsEmpty(data[end:]) == 0
|
||||
}
|
||||
|
||||
// parse a blockquote fragment
|
||||
@ -1096,9 +1096,9 @@ func (p *Parser) quote(data []byte) int {
|
||||
}
|
||||
|
||||
if p.extensions&Mmark == 0 {
|
||||
block := p.addBlock(&ast.BlockQuote{})
|
||||
p.block(raw.Bytes())
|
||||
p.finalize(block)
|
||||
block := p.AddBlock(&ast.BlockQuote{})
|
||||
p.Block(raw.Bytes())
|
||||
p.Finalize(block)
|
||||
return end
|
||||
}
|
||||
|
||||
@ -1108,24 +1108,24 @@ func (p *Parser) quote(data []byte) int {
|
||||
figure.HeadingID = id
|
||||
p.Inline(caption, captionContent)
|
||||
|
||||
p.addBlock(figure) // this discard any attributes
|
||||
p.AddBlock(figure) // this discard any attributes
|
||||
block := &ast.BlockQuote{}
|
||||
block.AsContainer().Attribute = figure.AsContainer().Attribute
|
||||
p.addChild(block)
|
||||
p.block(raw.Bytes())
|
||||
p.finalize(block)
|
||||
p.Block(raw.Bytes())
|
||||
p.Finalize(block)
|
||||
|
||||
p.addChild(caption)
|
||||
p.finalize(figure)
|
||||
p.Finalize(figure)
|
||||
|
||||
end += consumed
|
||||
|
||||
return end
|
||||
}
|
||||
|
||||
block := p.addBlock(&ast.BlockQuote{})
|
||||
p.block(raw.Bytes())
|
||||
p.finalize(block)
|
||||
block := p.AddBlock(&ast.BlockQuote{})
|
||||
p.Block(raw.Bytes())
|
||||
p.Finalize(block)
|
||||
|
||||
return end
|
||||
}
|
||||
@ -1152,7 +1152,7 @@ func (p *Parser) code(data []byte) int {
|
||||
i = skipUntilChar(data, i, '\n')
|
||||
i = skipCharN(data, i, '\n', 1)
|
||||
|
||||
blankline := p.isEmpty(data[beg:i]) > 0
|
||||
blankline := IsEmpty(data[beg:i]) > 0
|
||||
if pre := p.codePrefix(data[beg:i]); pre > 0 {
|
||||
beg += pre
|
||||
} else if !blankline {
|
||||
@ -1185,7 +1185,7 @@ func (p *Parser) code(data []byte) int {
|
||||
}
|
||||
// TODO: get rid of temp buffer
|
||||
codeBlock.Content = work.Bytes()
|
||||
p.addBlock(codeBlock)
|
||||
p.AddBlock(codeBlock)
|
||||
finalizeCodeBlock(codeBlock)
|
||||
|
||||
return i
|
||||
@ -1237,10 +1237,29 @@ func (p *Parser) dliPrefix(data []byte) int {
|
||||
if data[0] != ':' || !(data[1] == ' ' || data[1] == '\t') {
|
||||
return 0
|
||||
}
|
||||
// TODO: this is a no-op (data[0] is ':' so not ' ').
|
||||
// Maybe the intent was to eat spaces before ':' ?
|
||||
// either way, no change in tests
|
||||
i := skipChar(data, 0, ' ')
|
||||
return i + 2
|
||||
}
|
||||
|
||||
// TODO: maybe it was meant to be like below
|
||||
// either way, no change in tests
|
||||
/*
|
||||
func (p *Parser) dliPrefix(data []byte) int {
|
||||
i := skipChar(data, 0, ' ')
|
||||
if i+len(data) < 2 {
|
||||
return 0
|
||||
}
|
||||
// need a ':' followed by a space or a tab
|
||||
if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') {
|
||||
return 0
|
||||
}
|
||||
return i + 2
|
||||
}
|
||||
*/
|
||||
|
||||
// parse ordered or unordered list block
|
||||
func (p *Parser) list(data []byte, flags ast.ListType, start int, delim byte) int {
|
||||
i := 0
|
||||
@ -1251,7 +1270,7 @@ func (p *Parser) list(data []byte, flags ast.ListType, start int, delim byte) in
|
||||
Start: start,
|
||||
Delimiter: delim,
|
||||
}
|
||||
block := p.addBlock(list)
|
||||
block := p.AddBlock(list)
|
||||
|
||||
for i < len(data) {
|
||||
skip := p.listItem(data[i:], &flags)
|
||||
@ -1398,7 +1417,7 @@ gatherlines:
|
||||
|
||||
// if it is an empty line, guess that it is part of this item
|
||||
// and move on to the next line
|
||||
if p.isEmpty(data[line:i]) > 0 {
|
||||
if IsEmpty(data[line:i]) > 0 {
|
||||
containsBlankLine = true
|
||||
line = i
|
||||
continue
|
||||
@ -1432,7 +1451,7 @@ gatherlines:
|
||||
// evaluate how this line fits in
|
||||
switch {
|
||||
// is this a nested list item?
|
||||
case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || p.oliPrefix(chunk) > 0 || p.dliPrefix(chunk) > 0:
|
||||
case (p.uliPrefix(chunk) > 0 && !isHRule(chunk)) || p.oliPrefix(chunk) > 0 || p.dliPrefix(chunk) > 0:
|
||||
|
||||
// if indent is 4 or more spaces on unordered or ordered lists
|
||||
// we need to add leadingWhiteSpaces + 1 spaces in the beginning of the chunk
|
||||
@ -1484,10 +1503,7 @@ gatherlines:
|
||||
case containsBlankLine && indent < 4:
|
||||
if *flags&ast.ListTypeDefinition != 0 && i < len(data)-1 {
|
||||
// is the next item still a part of this list?
|
||||
next := i
|
||||
for next < len(data) && data[next] != '\n' {
|
||||
next++
|
||||
}
|
||||
next := skipUntilChar(data, i, '\n')
|
||||
for next < len(data)-1 && data[next] == '\n' {
|
||||
next++
|
||||
}
|
||||
@ -1526,16 +1542,16 @@ gatherlines:
|
||||
BulletChar: bulletChar,
|
||||
Delimiter: delimiter,
|
||||
}
|
||||
p.addBlock(listItem)
|
||||
p.AddBlock(listItem)
|
||||
|
||||
// render the contents of the list item
|
||||
if *flags&ast.ListItemContainsBlock != 0 && *flags&ast.ListTypeTerm == 0 {
|
||||
// intermediate render of block item, except for definition term
|
||||
if sublist > 0 {
|
||||
p.block(rawBytes[:sublist])
|
||||
p.block(rawBytes[sublist:])
|
||||
p.Block(rawBytes[:sublist])
|
||||
p.Block(rawBytes[sublist:])
|
||||
} else {
|
||||
p.block(rawBytes)
|
||||
p.Block(rawBytes)
|
||||
}
|
||||
} else {
|
||||
// intermediate render of inline item
|
||||
@ -1547,7 +1563,7 @@ gatherlines:
|
||||
}
|
||||
p.addChild(para)
|
||||
if sublist > 0 {
|
||||
p.block(rawBytes[sublist:])
|
||||
p.Block(rawBytes[sublist:])
|
||||
}
|
||||
}
|
||||
return line
|
||||
@ -1574,7 +1590,7 @@ func (p *Parser) renderParagraph(data []byte) {
|
||||
}
|
||||
para := &ast.Paragraph{}
|
||||
para.Content = data[beg:end]
|
||||
p.addBlock(para)
|
||||
p.AddBlock(para)
|
||||
}
|
||||
|
||||
// blockMath handle block surround with $$
|
||||
@ -1596,7 +1612,7 @@ func (p *Parser) blockMath(data []byte) int {
|
||||
// render the display math
|
||||
mathBlock := &ast.MathBlock{}
|
||||
mathBlock.Literal = data[2:end]
|
||||
p.addBlock(mathBlock)
|
||||
p.AddBlock(mathBlock)
|
||||
|
||||
return end + 2
|
||||
}
|
||||
@ -1626,7 +1642,7 @@ func (p *Parser) paragraph(data []byte) int {
|
||||
}
|
||||
|
||||
// did we find a blank line marking the end of the paragraph?
|
||||
if n := p.isEmpty(current); n > 0 {
|
||||
if n := IsEmpty(current); n > 0 {
|
||||
// did this blank line followed by a definition list item?
|
||||
if p.extensions&DefinitionLists != 0 {
|
||||
if i < len(data)-1 && data[i+1] == ':' {
|
||||
@ -1663,7 +1679,7 @@ func (p *Parser) paragraph(data []byte) int {
|
||||
}
|
||||
|
||||
block.Content = data[prev:eol]
|
||||
p.addBlock(block)
|
||||
p.AddBlock(block)
|
||||
|
||||
// find the end of the underline
|
||||
return skipUntilChar(data, i, '\n')
|
||||
@ -1680,7 +1696,7 @@ func (p *Parser) paragraph(data []byte) int {
|
||||
}
|
||||
|
||||
// if there's a prefixed heading or a horizontal rule after this, paragraph is over
|
||||
if p.isPrefixHeading(current) || p.isPrefixSpecialHeading(current) || p.isHRule(current) {
|
||||
if p.isPrefixHeading(current) || p.isPrefixSpecialHeading(current) || isHRule(current) {
|
||||
p.renderParagraph(data[:i])
|
||||
return i
|
||||
}
|
||||
|
18
vendor/github.com/gomarkdown/markdown/parser/block_table.go
generated
vendored
18
vendor/github.com/gomarkdown/markdown/parser/block_table.go
generated
vendored
@ -12,7 +12,7 @@ func isBackslashEscaped(data []byte, i int) bool {
|
||||
}
|
||||
|
||||
func (p *Parser) tableRow(data []byte, columns []ast.CellAlignFlags, header bool) {
|
||||
p.addBlock(&ast.TableRow{})
|
||||
p.AddBlock(&ast.TableRow{})
|
||||
col := 0
|
||||
|
||||
i := skipChar(data, 0, '|')
|
||||
@ -61,7 +61,7 @@ func (p *Parser) tableRow(data []byte, columns []ast.CellAlignFlags, header bool
|
||||
// an empty cell that we should ignore, it exists because of colspan
|
||||
colspans--
|
||||
} else {
|
||||
p.addBlock(block)
|
||||
p.AddBlock(block)
|
||||
}
|
||||
|
||||
if colspan > 0 {
|
||||
@ -75,7 +75,7 @@ func (p *Parser) tableRow(data []byte, columns []ast.CellAlignFlags, header bool
|
||||
IsHeader: header,
|
||||
Align: columns[col],
|
||||
}
|
||||
p.addBlock(block)
|
||||
p.AddBlock(block)
|
||||
}
|
||||
|
||||
// silently ignore rows with too many cells
|
||||
@ -109,7 +109,7 @@ func (p *Parser) tableFooter(data []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
p.addBlock(&ast.TableFooter{})
|
||||
p.AddBlock(&ast.TableFooter{})
|
||||
|
||||
return true
|
||||
}
|
||||
@ -217,7 +217,7 @@ func (p *Parser) tableHeader(data []byte, doRender bool) (size int, columns []as
|
||||
}
|
||||
// end of column test is messy
|
||||
switch {
|
||||
case dashes < 3:
|
||||
case dashes < 1:
|
||||
// not a valid column
|
||||
return
|
||||
|
||||
@ -253,9 +253,9 @@ func (p *Parser) tableHeader(data []byte, doRender bool) (size int, columns []as
|
||||
|
||||
if doRender {
|
||||
table = &ast.Table{}
|
||||
p.addBlock(table)
|
||||
p.AddBlock(table)
|
||||
if header != nil {
|
||||
p.addBlock(&ast.TableHeader{})
|
||||
p.AddBlock(&ast.TableHeader{})
|
||||
p.tableRow(header, columns, true)
|
||||
}
|
||||
}
|
||||
@ -277,7 +277,7 @@ func (p *Parser) table(data []byte) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
p.addBlock(&ast.TableBody{})
|
||||
p.AddBlock(&ast.TableBody{})
|
||||
|
||||
for i < len(data) {
|
||||
pipes, rowStart := 0, i
|
||||
@ -319,7 +319,7 @@ func (p *Parser) table(data []byte) int {
|
||||
ast.AppendChild(figure, caption)
|
||||
|
||||
p.addChild(figure)
|
||||
p.finalize(figure)
|
||||
p.Finalize(figure)
|
||||
|
||||
i += consumed
|
||||
}
|
||||
|
8
vendor/github.com/gomarkdown/markdown/parser/caption.go
generated
vendored
8
vendor/github.com/gomarkdown/markdown/parser/caption.go
generated
vendored
@ -11,7 +11,7 @@ func (p *Parser) caption(data, caption []byte) ([]byte, string, int) {
|
||||
}
|
||||
j := len(caption)
|
||||
data = data[j:]
|
||||
end := p.linesUntilEmpty(data)
|
||||
end := LinesUntilEmpty(data)
|
||||
|
||||
data = data[:end]
|
||||
|
||||
@ -23,8 +23,8 @@ func (p *Parser) caption(data, caption []byte) ([]byte, string, int) {
|
||||
return data, "", end + j
|
||||
}
|
||||
|
||||
// linesUntilEmpty scans lines up to the first empty line.
|
||||
func (p *Parser) linesUntilEmpty(data []byte) int {
|
||||
// LinesUntilEmpty scans lines up to the first empty line.
|
||||
func LinesUntilEmpty(data []byte) int {
|
||||
line, i := 0, 0
|
||||
|
||||
for line < len(data) {
|
||||
@ -35,7 +35,7 @@ func (p *Parser) linesUntilEmpty(data []byte) int {
|
||||
i++
|
||||
}
|
||||
|
||||
if p.isEmpty(data[line:i]) == 0 {
|
||||
if IsEmpty(data[line:i]) == 0 {
|
||||
line = i
|
||||
continue
|
||||
}
|
||||
|
8
vendor/github.com/gomarkdown/markdown/parser/figures.go
generated
vendored
8
vendor/github.com/gomarkdown/markdown/parser/figures.go
generated
vendored
@ -98,10 +98,10 @@ func (p *Parser) figureBlock(data []byte, doRender bool) int {
|
||||
}
|
||||
|
||||
figure := &ast.CaptionFigure{}
|
||||
p.addBlock(figure)
|
||||
p.block(raw.Bytes())
|
||||
p.AddBlock(figure)
|
||||
p.Block(raw.Bytes())
|
||||
|
||||
defer p.finalize(figure)
|
||||
defer p.Finalize(figure)
|
||||
|
||||
if captionContent, id, consumed := p.caption(data[beg:], []byte("Figure: ")); consumed > 0 {
|
||||
caption := &ast.Caption{}
|
||||
@ -113,7 +113,5 @@ func (p *Parser) figureBlock(data []byte, doRender bool) int {
|
||||
|
||||
beg += consumed
|
||||
}
|
||||
|
||||
p.finalize(figure)
|
||||
return beg
|
||||
}
|
||||
|
4
vendor/github.com/gomarkdown/markdown/parser/matter.go
generated
vendored
4
vendor/github.com/gomarkdown/markdown/parser/matter.go
generated
vendored
@ -29,8 +29,8 @@ func (p *Parser) documentMatter(data []byte) int {
|
||||
return 0
|
||||
}
|
||||
node := &ast.DocumentMatter{Matter: matter}
|
||||
p.addBlock(node)
|
||||
p.finalize(node)
|
||||
p.AddBlock(node)
|
||||
p.Finalize(node)
|
||||
|
||||
return consumed
|
||||
}
|
||||
|
59
vendor/github.com/gomarkdown/markdown/parser/parser.go
generated
vendored
59
vendor/github.com/gomarkdown/markdown/parser/parser.go
generated
vendored
@ -42,7 +42,7 @@ const (
|
||||
SuperSubscript // Super- and subscript support: 2^10^, H~2~O.
|
||||
EmptyLinesBreakList // 2 empty lines break out of list
|
||||
Includes // Support including other files.
|
||||
Mmark // Support Mmark syntax, see https://mmark.nl/syntax
|
||||
Mmark // Support Mmark syntax, see https://mmark.miek.nl/post/syntax/
|
||||
|
||||
CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode |
|
||||
Autolink | Strikethrough | SpaceHeadings | HeadingIDs |
|
||||
@ -206,13 +206,13 @@ func (p *Parser) isFootnote(ref *reference) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
func (p *Parser) finalize(block ast.Node) {
|
||||
func (p *Parser) Finalize(block ast.Node) {
|
||||
p.tip = block.GetParent()
|
||||
}
|
||||
|
||||
func (p *Parser) addChild(node ast.Node) ast.Node {
|
||||
for !canNodeContain(p.tip, node) {
|
||||
p.finalize(p.tip)
|
||||
p.Finalize(p.tip)
|
||||
}
|
||||
ast.AppendChild(p.tip, node)
|
||||
p.tip = node
|
||||
@ -239,6 +239,18 @@ func canNodeContain(n ast.Node, v ast.Node) bool {
|
||||
_, ok := v.(*ast.TableCell)
|
||||
return ok
|
||||
}
|
||||
// for nodes implemented outside of ast package, allow them
|
||||
// to implement this logic via CanContain interface
|
||||
if o, ok := n.(ast.CanContain); ok {
|
||||
return o.CanContain(v)
|
||||
}
|
||||
// for container nodes outside of ast package default to true
|
||||
// because false is a bad default
|
||||
typ := fmt.Sprintf("%T", n)
|
||||
customNode := !strings.HasPrefix(typ, "*ast.")
|
||||
if customNode {
|
||||
return n.AsLeaf() == nil
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@ -248,7 +260,7 @@ func (p *Parser) closeUnmatchedBlocks() {
|
||||
}
|
||||
for p.oldTip != p.lastMatchedContainer {
|
||||
parent := p.oldTip.GetParent()
|
||||
p.finalize(p.oldTip)
|
||||
p.Finalize(p.oldTip)
|
||||
p.oldTip = parent
|
||||
}
|
||||
p.allClosed = true
|
||||
@ -273,10 +285,14 @@ type Reference struct {
|
||||
// You can then convert AST to html using html.Renderer, to some other format
|
||||
// using a custom renderer or transform the tree.
|
||||
func (p *Parser) Parse(input []byte) ast.Node {
|
||||
p.block(input)
|
||||
// the code only works with Unix CR newlines so to make life easy for
|
||||
// callers normalize newlines
|
||||
input = NormalizeNewlines(input)
|
||||
|
||||
p.Block(input)
|
||||
// Walk the tree and finish up some of unfinished blocks
|
||||
for p.tip != nil {
|
||||
p.finalize(p.tip)
|
||||
p.Finalize(p.tip)
|
||||
}
|
||||
// Walk the tree again and process inline markdown in each block
|
||||
ast.WalkFunc(p.Doc, func(node ast.Node, entering bool) ast.WalkStatus {
|
||||
@ -322,8 +338,8 @@ func (p *Parser) parseRefsToAST() {
|
||||
IsFootnotesList: true,
|
||||
ListFlags: ast.ListTypeOrdered,
|
||||
}
|
||||
p.addBlock(&ast.Footnotes{})
|
||||
block := p.addBlock(list)
|
||||
p.AddBlock(&ast.Footnotes{})
|
||||
block := p.AddBlock(list)
|
||||
flags := ast.ListItemBeginningOfList
|
||||
// Note: this loop is intentionally explicit, not range-form. This is
|
||||
// because the body of the loop will append nested footnotes to p.notes and
|
||||
@ -338,7 +354,7 @@ func (p *Parser) parseRefsToAST() {
|
||||
listItem.RefLink = ref.link
|
||||
if ref.hasBlock {
|
||||
flags |= ast.ListItemContainsBlock
|
||||
p.block(ref.title)
|
||||
p.Block(ref.title)
|
||||
} else {
|
||||
p.Inline(block, ref.title)
|
||||
}
|
||||
@ -660,7 +676,7 @@ gatherLines:
|
||||
|
||||
// if it is an empty line, guess that it is part of this item
|
||||
// and move on to the next line
|
||||
if p.isEmpty(data[blockEnd:i]) > 0 {
|
||||
if IsEmpty(data[blockEnd:i]) > 0 {
|
||||
containsBlankLine = true
|
||||
blockEnd = i
|
||||
continue
|
||||
@ -883,3 +899,26 @@ func isListItem(d ast.Node) bool {
|
||||
_, ok := d.(*ast.ListItem)
|
||||
return ok
|
||||
}
|
||||
|
||||
func NormalizeNewlines(d []byte) []byte {
|
||||
wi := 0
|
||||
n := len(d)
|
||||
for i := 0; i < n; i++ {
|
||||
c := d[i]
|
||||
// 13 is CR
|
||||
if c != 13 {
|
||||
d[wi] = c
|
||||
wi++
|
||||
continue
|
||||
}
|
||||
// replace CR (mac / win) with LF (unix)
|
||||
d[wi] = 10
|
||||
wi++
|
||||
if i < n-1 && d[i+1] == 10 {
|
||||
// this was CRLF, so skip the LF
|
||||
i++
|
||||
}
|
||||
|
||||
}
|
||||
return d[:wi]
|
||||
}
|
||||
|
7
vendor/github.com/gomarkdown/markdown/todo.md
generated
vendored
7
vendor/github.com/gomarkdown/markdown/todo.md
generated
vendored
@ -1,7 +0,0 @@
|
||||
# Things to do
|
||||
|
||||
[ ] docs: add examples like https://godoc.org/github.com/dgrijalva/jwt-go (put in foo_example_test.go). Or see https://github.com/garyburd/redigo/blob/master/redis/zpop_example_test.go#L5 / https://godoc.org/github.com/garyburd/redigo/redis or https://godoc.org/github.com/go-redis/redis
|
||||
|
||||
[ ] figure out expandTabs and parser.TabSizeEight. Are those used?
|
||||
|
||||
[ ] SoftbreakData is not used
|
354
vendor/github.com/hashicorp/errwrap/LICENSE
generated
vendored
354
vendor/github.com/hashicorp/errwrap/LICENSE
generated
vendored
@ -1,354 +0,0 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
89
vendor/github.com/hashicorp/errwrap/README.md
generated
vendored
89
vendor/github.com/hashicorp/errwrap/README.md
generated
vendored
@ -1,89 +0,0 @@
|
||||
# errwrap
|
||||
|
||||
`errwrap` is a package for Go that formalizes the pattern of wrapping errors
|
||||
and checking if an error contains another error.
|
||||
|
||||
There is a common pattern in Go of taking a returned `error` value and
|
||||
then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
|
||||
with this pattern is that you completely lose the original `error` structure.
|
||||
|
||||
Arguably the _correct_ approach is that you should make a custom structure
|
||||
implementing the `error` interface, and have the original error as a field
|
||||
on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
|
||||
This is a good approach, but you have to know the entire chain of possible
|
||||
rewrapping that happens, when you might just care about one.
|
||||
|
||||
`errwrap` formalizes this pattern (it doesn't matter what approach you use
|
||||
above) by giving a single interface for wrapping errors, checking if a specific
|
||||
error is wrapped, and extracting that error.
|
||||
|
||||
## Installation and Docs
|
||||
|
||||
Install using `go get github.com/hashicorp/errwrap`.
|
||||
|
||||
Full documentation is available at
|
||||
http://godoc.org/github.com/hashicorp/errwrap
|
||||
|
||||
## Usage
|
||||
|
||||
#### Basic Usage
|
||||
|
||||
Below is a very basic example of its usage:
|
||||
|
||||
```go
|
||||
// A function that always returns an error, but wraps it, like a real
|
||||
// function might.
|
||||
func tryOpen() error {
|
||||
_, err := os.Open("/i/dont/exist")
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("Doesn't exist: {{err}}", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
err := tryOpen()
|
||||
|
||||
// We can use the Contains helpers to check if an error contains
|
||||
// another error. It is safe to do this with a nil error, or with
|
||||
// an error that doesn't even use the errwrap package.
|
||||
if errwrap.Contains(err, "does not exist") {
|
||||
// Do something
|
||||
}
|
||||
if errwrap.ContainsType(err, new(os.PathError)) {
|
||||
// Do something
|
||||
}
|
||||
|
||||
// Or we can use the associated `Get` functions to just extract
|
||||
// a specific error. This would return nil if that specific error doesn't
|
||||
// exist.
|
||||
perr := errwrap.GetType(err, new(os.PathError))
|
||||
}
|
||||
```
|
||||
|
||||
#### Custom Types
|
||||
|
||||
If you're already making custom types that properly wrap errors, then
|
||||
you can get all the functionality of `errwraps.Contains` and such by
|
||||
implementing the `Wrapper` interface with just one function. Example:
|
||||
|
||||
```go
|
||||
type AppError {
|
||||
Code ErrorCode
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *AppError) WrappedErrors() []error {
|
||||
return []error{e.Err}
|
||||
}
|
||||
```
|
||||
|
||||
Now this works:
|
||||
|
||||
```go
|
||||
err := &AppError{Err: fmt.Errorf("an error")}
|
||||
if errwrap.ContainsType(err, fmt.Errorf("")) {
|
||||
// This will work!
|
||||
}
|
||||
```
|
178
vendor/github.com/hashicorp/errwrap/errwrap.go
generated
vendored
178
vendor/github.com/hashicorp/errwrap/errwrap.go
generated
vendored
@ -1,178 +0,0 @@
|
||||
// Package errwrap implements methods to formalize error wrapping in Go.
|
||||
//
|
||||
// All of the top-level functions that take an `error` are built to be able
|
||||
// to take any error, not just wrapped errors. This allows you to use errwrap
|
||||
// without having to type-check and type-cast everywhere.
|
||||
package errwrap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// WalkFunc is the callback called for Walk.
|
||||
type WalkFunc func(error)
|
||||
|
||||
// Wrapper is an interface that can be implemented by custom types to
|
||||
// have all the Contains, Get, etc. functions in errwrap work.
|
||||
//
|
||||
// When Walk reaches a Wrapper, it will call the callback for every
|
||||
// wrapped error in addition to the wrapper itself. Since all the top-level
|
||||
// functions in errwrap use Walk, this means that all those functions work
|
||||
// with your custom type.
|
||||
type Wrapper interface {
|
||||
WrappedErrors() []error
|
||||
}
|
||||
|
||||
// Wrap defines that outer wraps inner, returning an error type that
|
||||
// can be cleanly used with the other methods in this package, such as
|
||||
// Contains, GetAll, etc.
|
||||
//
|
||||
// This function won't modify the error message at all (the outer message
|
||||
// will be used).
|
||||
func Wrap(outer, inner error) error {
|
||||
return &wrappedError{
|
||||
Outer: outer,
|
||||
Inner: inner,
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapf wraps an error with a formatting message. This is similar to using
|
||||
// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
|
||||
// errors, you should replace it with this.
|
||||
//
|
||||
// format is the format of the error message. The string '{{err}}' will
|
||||
// be replaced with the original error message.
|
||||
//
|
||||
// Deprecated: Use fmt.Errorf()
|
||||
func Wrapf(format string, err error) error {
|
||||
outerMsg := "<nil>"
|
||||
if err != nil {
|
||||
outerMsg = err.Error()
|
||||
}
|
||||
|
||||
outer := errors.New(strings.Replace(
|
||||
format, "{{err}}", outerMsg, -1))
|
||||
|
||||
return Wrap(outer, err)
|
||||
}
|
||||
|
||||
// Contains checks if the given error contains an error with the
|
||||
// message msg. If err is not a wrapped error, this will always return
|
||||
// false unless the error itself happens to match this msg.
|
||||
func Contains(err error, msg string) bool {
|
||||
return len(GetAll(err, msg)) > 0
|
||||
}
|
||||
|
||||
// ContainsType checks if the given error contains an error with
|
||||
// the same concrete type as v. If err is not a wrapped error, this will
|
||||
// check the err itself.
|
||||
func ContainsType(err error, v interface{}) bool {
|
||||
return len(GetAllType(err, v)) > 0
|
||||
}
|
||||
|
||||
// Get is the same as GetAll but returns the deepest matching error.
|
||||
func Get(err error, msg string) error {
|
||||
es := GetAll(err, msg)
|
||||
if len(es) > 0 {
|
||||
return es[len(es)-1]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetType is the same as GetAllType but returns the deepest matching error.
|
||||
func GetType(err error, v interface{}) error {
|
||||
es := GetAllType(err, v)
|
||||
if len(es) > 0 {
|
||||
return es[len(es)-1]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAll gets all the errors that might be wrapped in err with the
|
||||
// given message. The order of the errors is such that the outermost
|
||||
// matching error (the most recent wrap) is index zero, and so on.
|
||||
func GetAll(err error, msg string) []error {
|
||||
var result []error
|
||||
|
||||
Walk(err, func(err error) {
|
||||
if err.Error() == msg {
|
||||
result = append(result, err)
|
||||
}
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// GetAllType gets all the errors that are the same type as v.
|
||||
//
|
||||
// The order of the return value is the same as described in GetAll.
|
||||
func GetAllType(err error, v interface{}) []error {
|
||||
var result []error
|
||||
|
||||
var search string
|
||||
if v != nil {
|
||||
search = reflect.TypeOf(v).String()
|
||||
}
|
||||
Walk(err, func(err error) {
|
||||
var needle string
|
||||
if err != nil {
|
||||
needle = reflect.TypeOf(err).String()
|
||||
}
|
||||
|
||||
if needle == search {
|
||||
result = append(result, err)
|
||||
}
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Walk walks all the wrapped errors in err and calls the callback. If
|
||||
// err isn't a wrapped error, this will be called once for err. If err
|
||||
// is a wrapped error, the callback will be called for both the wrapper
|
||||
// that implements error as well as the wrapped error itself.
|
||||
func Walk(err error, cb WalkFunc) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch e := err.(type) {
|
||||
case *wrappedError:
|
||||
cb(e.Outer)
|
||||
Walk(e.Inner, cb)
|
||||
case Wrapper:
|
||||
cb(err)
|
||||
|
||||
for _, err := range e.WrappedErrors() {
|
||||
Walk(err, cb)
|
||||
}
|
||||
case interface{ Unwrap() error }:
|
||||
cb(err)
|
||||
Walk(e.Unwrap(), cb)
|
||||
default:
|
||||
cb(err)
|
||||
}
|
||||
}
|
||||
|
||||
// wrappedError is an implementation of error that has both the
|
||||
// outer and inner errors.
|
||||
type wrappedError struct {
|
||||
Outer error
|
||||
Inner error
|
||||
}
|
||||
|
||||
func (w *wrappedError) Error() string {
|
||||
return w.Outer.Error()
|
||||
}
|
||||
|
||||
func (w *wrappedError) WrappedErrors() []error {
|
||||
return []error{w.Outer, w.Inner}
|
||||
}
|
||||
|
||||
func (w *wrappedError) Unwrap() error {
|
||||
return w.Inner
|
||||
}
|
353
vendor/github.com/hashicorp/go-multierror/LICENSE
generated
vendored
353
vendor/github.com/hashicorp/go-multierror/LICENSE
generated
vendored
@ -1,353 +0,0 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
31
vendor/github.com/hashicorp/go-multierror/Makefile
generated
vendored
31
vendor/github.com/hashicorp/go-multierror/Makefile
generated
vendored
@ -1,31 +0,0 @@
|
||||
TEST?=./...
|
||||
|
||||
default: test
|
||||
|
||||
# test runs the test suite and vets the code.
|
||||
test: generate
|
||||
@echo "==> Running tests..."
|
||||
@go list $(TEST) \
|
||||
| grep -v "/vendor/" \
|
||||
| xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS}
|
||||
|
||||
# testrace runs the race checker
|
||||
testrace: generate
|
||||
@echo "==> Running tests (race)..."
|
||||
@go list $(TEST) \
|
||||
| grep -v "/vendor/" \
|
||||
| xargs -n1 go test -timeout=60s -race ${TESTARGS}
|
||||
|
||||
# updatedeps installs all the dependencies needed to run and build.
|
||||
updatedeps:
|
||||
@sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'"
|
||||
|
||||
# generate runs `go generate` to build the dynamically generated source files.
|
||||
generate:
|
||||
@echo "==> Generating..."
|
||||
@find . -type f -name '.DS_Store' -delete
|
||||
@go list ./... \
|
||||
| grep -v "/vendor/" \
|
||||
| xargs -n1 go generate
|
||||
|
||||
.PHONY: default test testrace updatedeps generate
|
150
vendor/github.com/hashicorp/go-multierror/README.md
generated
vendored
150
vendor/github.com/hashicorp/go-multierror/README.md
generated
vendored
@ -1,150 +0,0 @@
|
||||
# go-multierror
|
||||
|
||||
[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror)
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror)
|
||||
![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror)
|
||||
|
||||
[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror
|
||||
[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror
|
||||
|
||||
`go-multierror` is a package for Go that provides a mechanism for
|
||||
representing a list of `error` values as a single `error`.
|
||||
|
||||
This allows a function in Go to return an `error` that might actually
|
||||
be a list of errors. If the caller knows this, they can unwrap the
|
||||
list and access the errors. If the caller doesn't know, the error
|
||||
formats to a nice human-readable format.
|
||||
|
||||
`go-multierror` is fully compatible with the Go standard library
|
||||
[errors](https://golang.org/pkg/errors/) package, including the
|
||||
functions `As`, `Is`, and `Unwrap`. This provides a standardized approach
|
||||
for introspecting on error values.
|
||||
|
||||
## Installation and Docs
|
||||
|
||||
Install using `go get github.com/hashicorp/go-multierror`.
|
||||
|
||||
Full documentation is available at
|
||||
https://pkg.go.dev/github.com/hashicorp/go-multierror
|
||||
|
||||
### Requires go version 1.13 or newer
|
||||
|
||||
`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced
|
||||
[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which
|
||||
this library takes advantage of.
|
||||
|
||||
If you need to use an earlier version of go, you can use the
|
||||
[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0)
|
||||
tag, which doesn't rely on features in go 1.13.
|
||||
|
||||
If you see compile errors that look like the below, it's likely that
|
||||
you're on an older version of go:
|
||||
|
||||
```
|
||||
/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As
|
||||
/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
go-multierror is easy to use and purposely built to be unobtrusive in
|
||||
existing Go applications/libraries that may not be aware of it.
|
||||
|
||||
**Building a list of errors**
|
||||
|
||||
The `Append` function is used to create a list of errors. This function
|
||||
behaves a lot like the Go built-in `append` function: it doesn't matter
|
||||
if the first argument is nil, a `multierror.Error`, or any other `error`,
|
||||
the function behaves as you would expect.
|
||||
|
||||
```go
|
||||
var result error
|
||||
|
||||
if err := step1(); err != nil {
|
||||
result = multierror.Append(result, err)
|
||||
}
|
||||
if err := step2(); err != nil {
|
||||
result = multierror.Append(result, err)
|
||||
}
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
**Customizing the formatting of the errors**
|
||||
|
||||
By specifying a custom `ErrorFormat`, you can customize the format
|
||||
of the `Error() string` function:
|
||||
|
||||
```go
|
||||
var result *multierror.Error
|
||||
|
||||
// ... accumulate errors here, maybe using Append
|
||||
|
||||
if result != nil {
|
||||
result.ErrorFormat = func([]error) string {
|
||||
return "errors!"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Accessing the list of errors**
|
||||
|
||||
`multierror.Error` implements `error` so if the caller doesn't know about
|
||||
multierror, it will work just fine. But if you're aware a multierror might
|
||||
be returned, you can use type switches to access the list of errors:
|
||||
|
||||
```go
|
||||
if err := something(); err != nil {
|
||||
if merr, ok := err.(*multierror.Error); ok {
|
||||
// Use merr.Errors
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap)
|
||||
function. This will continue to unwrap into subsequent errors until none exist.
|
||||
|
||||
**Extracting an error**
|
||||
|
||||
The standard library [`errors.As`](https://golang.org/pkg/errors/#As)
|
||||
function can be used directly with a multierror to extract a specific error:
|
||||
|
||||
```go
|
||||
// Assume err is a multierror value
|
||||
err := somefunc()
|
||||
|
||||
// We want to know if "err" has a "RichErrorType" in it and extract it.
|
||||
var errRich RichErrorType
|
||||
if errors.As(err, &errRich) {
|
||||
// It has it, and now errRich is populated.
|
||||
}
|
||||
```
|
||||
|
||||
**Checking for an exact error value**
|
||||
|
||||
Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables)
|
||||
error in the `os` package. You can check if this error is present by using
|
||||
the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function.
|
||||
|
||||
```go
|
||||
// Assume err is a multierror value
|
||||
err := somefunc()
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// err contains os.ErrNotExist
|
||||
}
|
||||
```
|
||||
|
||||
**Returning a multierror only if there are errors**
|
||||
|
||||
If you build a `multierror.Error`, you can use the `ErrorOrNil` function
|
||||
to return an `error` implementation only if there are errors to return:
|
||||
|
||||
```go
|
||||
var result *multierror.Error
|
||||
|
||||
// ... accumulate errors here
|
||||
|
||||
// Return the `error` only if errors were added to the multierror, otherwise
|
||||
// return nil since there are no errors.
|
||||
return result.ErrorOrNil()
|
||||
```
|
43
vendor/github.com/hashicorp/go-multierror/append.go
generated
vendored
43
vendor/github.com/hashicorp/go-multierror/append.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
package multierror
|
||||
|
||||
// Append is a helper function that will append more errors
|
||||
// onto an Error in order to create a larger multi-error.
|
||||
//
|
||||
// If err is not a multierror.Error, then it will be turned into
|
||||
// one. If any of the errs are multierr.Error, they will be flattened
|
||||
// one level into err.
|
||||
// Any nil errors within errs will be ignored. If err is nil, a new
|
||||
// *Error will be returned.
|
||||
func Append(err error, errs ...error) *Error {
|
||||
switch err := err.(type) {
|
||||
case *Error:
|
||||
// Typed nils can reach here, so initialize if we are nil
|
||||
if err == nil {
|
||||
err = new(Error)
|
||||
}
|
||||
|
||||
// Go through each error and flatten
|
||||
for _, e := range errs {
|
||||
switch e := e.(type) {
|
||||
case *Error:
|
||||
if e != nil {
|
||||
err.Errors = append(err.Errors, e.Errors...)
|
||||
}
|
||||
default:
|
||||
if e != nil {
|
||||
err.Errors = append(err.Errors, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
default:
|
||||
newErrs := make([]error, 0, len(errs)+1)
|
||||
if err != nil {
|
||||
newErrs = append(newErrs, err)
|
||||
}
|
||||
newErrs = append(newErrs, errs...)
|
||||
|
||||
return Append(&Error{}, newErrs...)
|
||||
}
|
||||
}
|
26
vendor/github.com/hashicorp/go-multierror/flatten.go
generated
vendored
26
vendor/github.com/hashicorp/go-multierror/flatten.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package multierror
|
||||
|
||||
// Flatten flattens the given error, merging any *Errors together into
|
||||
// a single *Error.
|
||||
func Flatten(err error) error {
|
||||
// If it isn't an *Error, just return the error as-is
|
||||
if _, ok := err.(*Error); !ok {
|
||||
return err
|
||||
}
|
||||
|
||||
// Otherwise, make the result and flatten away!
|
||||
flatErr := new(Error)
|
||||
flatten(err, flatErr)
|
||||
return flatErr
|
||||
}
|
||||
|
||||
func flatten(err error, flatErr *Error) {
|
||||
switch err := err.(type) {
|
||||
case *Error:
|
||||
for _, e := range err.Errors {
|
||||
flatten(e, flatErr)
|
||||
}
|
||||
default:
|
||||
flatErr.Errors = append(flatErr.Errors, err)
|
||||
}
|
||||
}
|
27
vendor/github.com/hashicorp/go-multierror/format.go
generated
vendored
27
vendor/github.com/hashicorp/go-multierror/format.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
package multierror
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrorFormatFunc is a function callback that is called by Error to
|
||||
// turn the list of errors into a string.
|
||||
type ErrorFormatFunc func([]error) string
|
||||
|
||||
// ListFormatFunc is a basic formatter that outputs the number of errors
|
||||
// that occurred along with a bullet point list of the errors.
|
||||
func ListFormatFunc(es []error) string {
|
||||
if len(es) == 1 {
|
||||
return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0])
|
||||
}
|
||||
|
||||
points := make([]string, len(es))
|
||||
for i, err := range es {
|
||||
points[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%d errors occurred:\n\t%s\n\n",
|
||||
len(es), strings.Join(points, "\n\t"))
|
||||
}
|
38
vendor/github.com/hashicorp/go-multierror/group.go
generated
vendored
38
vendor/github.com/hashicorp/go-multierror/group.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
package multierror
|
||||
|
||||
import "sync"
|
||||
|
||||
// Group is a collection of goroutines which return errors that need to be
|
||||
// coalesced.
|
||||
type Group struct {
|
||||
mutex sync.Mutex
|
||||
err *Error
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// Go calls the given function in a new goroutine.
|
||||
//
|
||||
// If the function returns an error it is added to the group multierror which
|
||||
// is returned by Wait.
|
||||
func (g *Group) Go(f func() error) {
|
||||
g.wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer g.wg.Done()
|
||||
|
||||
if err := f(); err != nil {
|
||||
g.mutex.Lock()
|
||||
g.err = Append(g.err, err)
|
||||
g.mutex.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait blocks until all function calls from the Go method have returned, then
|
||||
// returns the multierror.
|
||||
func (g *Group) Wait() *Error {
|
||||
g.wg.Wait()
|
||||
g.mutex.Lock()
|
||||
defer g.mutex.Unlock()
|
||||
return g.err
|
||||
}
|
121
vendor/github.com/hashicorp/go-multierror/multierror.go
generated
vendored
121
vendor/github.com/hashicorp/go-multierror/multierror.go
generated
vendored
@ -1,121 +0,0 @@
|
||||
package multierror
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error is an error type to track multiple errors. This is used to
|
||||
// accumulate errors in cases and return them as a single "error".
|
||||
type Error struct {
|
||||
Errors []error
|
||||
ErrorFormat ErrorFormatFunc
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
fn := e.ErrorFormat
|
||||
if fn == nil {
|
||||
fn = ListFormatFunc
|
||||
}
|
||||
|
||||
return fn(e.Errors)
|
||||
}
|
||||
|
||||
// ErrorOrNil returns an error interface if this Error represents
|
||||
// a list of errors, or returns nil if the list of errors is empty. This
|
||||
// function is useful at the end of accumulation to make sure that the value
|
||||
// returned represents the existence of errors.
|
||||
func (e *Error) ErrorOrNil() error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
if len(e.Errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *Error) GoString() string {
|
||||
return fmt.Sprintf("*%#v", *e)
|
||||
}
|
||||
|
||||
// WrappedErrors returns the list of errors that this Error is wrapping. It is
|
||||
// an implementation of the errwrap.Wrapper interface so that multierror.Error
|
||||
// can be used with that library.
|
||||
//
|
||||
// This method is not safe to be called concurrently. Unlike accessing the
|
||||
// Errors field directly, this function also checks if the multierror is nil to
|
||||
// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface.
|
||||
func (e *Error) WrappedErrors() []error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return e.Errors
|
||||
}
|
||||
|
||||
// Unwrap returns an error from Error (or nil if there are no errors).
|
||||
// This error returned will further support Unwrap to get the next error,
|
||||
// etc. The order will match the order of Errors in the multierror.Error
|
||||
// at the time of calling.
|
||||
//
|
||||
// The resulting error supports errors.As/Is/Unwrap so you can continue
|
||||
// to use the stdlib errors package to introspect further.
|
||||
//
|
||||
// This will perform a shallow copy of the errors slice. Any errors appended
|
||||
// to this error after calling Unwrap will not be available until a new
|
||||
// Unwrap is called on the multierror.Error.
|
||||
func (e *Error) Unwrap() error {
|
||||
// If we have no errors then we do nothing
|
||||
if e == nil || len(e.Errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we have exactly one error, we can just return that directly.
|
||||
if len(e.Errors) == 1 {
|
||||
return e.Errors[0]
|
||||
}
|
||||
|
||||
// Shallow copy the slice
|
||||
errs := make([]error, len(e.Errors))
|
||||
copy(errs, e.Errors)
|
||||
return chain(errs)
|
||||
}
|
||||
|
||||
// chain implements the interfaces necessary for errors.Is/As/Unwrap to
|
||||
// work in a deterministic way with multierror. A chain tracks a list of
|
||||
// errors while accounting for the current represented error. This lets
|
||||
// Is/As be meaningful.
|
||||
//
|
||||
// Unwrap returns the next error. In the cleanest form, Unwrap would return
|
||||
// the wrapped error here but we can't do that if we want to properly
|
||||
// get access to all the errors. Instead, users are recommended to use
|
||||
// Is/As to get the correct error type out.
|
||||
//
|
||||
// Precondition: []error is non-empty (len > 0)
|
||||
type chain []error
|
||||
|
||||
// Error implements the error interface
|
||||
func (e chain) Error() string {
|
||||
return e[0].Error()
|
||||
}
|
||||
|
||||
// Unwrap implements errors.Unwrap by returning the next error in the
|
||||
// chain or nil if there are no more errors.
|
||||
func (e chain) Unwrap() error {
|
||||
if len(e) == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e[1:]
|
||||
}
|
||||
|
||||
// As implements errors.As by attempting to map to the current value.
|
||||
func (e chain) As(target interface{}) bool {
|
||||
return errors.As(e[0], target)
|
||||
}
|
||||
|
||||
// Is implements errors.Is by comparing the current value directly.
|
||||
func (e chain) Is(target error) bool {
|
||||
return errors.Is(e[0], target)
|
||||
}
|
37
vendor/github.com/hashicorp/go-multierror/prefix.go
generated
vendored
37
vendor/github.com/hashicorp/go-multierror/prefix.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
package multierror
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
)
|
||||
|
||||
// Prefix is a helper function that will prefix some text
|
||||
// to the given error. If the error is a multierror.Error, then
|
||||
// it will be prefixed to each wrapped error.
|
||||
//
|
||||
// This is useful to use when appending multiple multierrors
|
||||
// together in order to give better scoping.
|
||||
func Prefix(err error, prefix string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
format := fmt.Sprintf("%s {{err}}", prefix)
|
||||
switch err := err.(type) {
|
||||
case *Error:
|
||||
// Typed nils can reach here, so initialize if we are nil
|
||||
if err == nil {
|
||||
err = new(Error)
|
||||
}
|
||||
|
||||
// Wrap each of the errors
|
||||
for i, e := range err.Errors {
|
||||
err.Errors[i] = errwrap.Wrapf(format, e)
|
||||
}
|
||||
|
||||
return err
|
||||
default:
|
||||
return errwrap.Wrapf(format, err)
|
||||
}
|
||||
}
|
16
vendor/github.com/hashicorp/go-multierror/sort.go
generated
vendored
16
vendor/github.com/hashicorp/go-multierror/sort.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
package multierror
|
||||
|
||||
// Len implements sort.Interface function for length
|
||||
func (err Error) Len() int {
|
||||
return len(err.Errors)
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface function for swapping elements
|
||||
func (err Error) Swap(i, j int) {
|
||||
err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i]
|
||||
}
|
||||
|
||||
// Less implements sort.Interface function for determining order
|
||||
func (err Error) Less(i, j int) bool {
|
||||
return err.Errors[i].Error() < err.Errors[j].Error()
|
||||
}
|
33
vendor/github.com/labstack/echo/v4/CHANGELOG.md
generated
vendored
33
vendor/github.com/labstack/echo/v4/CHANGELOG.md
generated
vendored
@ -1,5 +1,38 @@
|
||||
# Changelog
|
||||
|
||||
## v4.11.1 - 2023-07-16
|
||||
|
||||
**Fixes**
|
||||
|
||||
* Fix `Gzip` middleware not sending response code for no content responses (404, 301/302 redirects etc) [#2481](https://github.com/labstack/echo/pull/2481)
|
||||
|
||||
|
||||
## v4.11.0 - 2023-07-14
|
||||
|
||||
|
||||
**Fixes**
|
||||
|
||||
* Fixes the proxy middleware concurrency issue of calling the Next() proxy target on Round Robin Balancer [#2409](https://github.com/labstack/echo/pull/2409)
|
||||
* Fix `group.RouteNotFound` not working when group has attached middlewares [#2411](https://github.com/labstack/echo/pull/2411)
|
||||
* Fix global error handler return error message when message is an error [#2456](https://github.com/labstack/echo/pull/2456)
|
||||
* Do not use global timeNow variables [#2477](https://github.com/labstack/echo/pull/2477)
|
||||
|
||||
|
||||
**Enhancements**
|
||||
|
||||
* Added a optional config variable to disable centralized error handler in recovery middleware [#2410](https://github.com/labstack/echo/pull/2410)
|
||||
* refactor: use `strings.ReplaceAll` directly [#2424](https://github.com/labstack/echo/pull/2424)
|
||||
* Add support for Go1.20 `http.rwUnwrapper` to Response struct [#2425](https://github.com/labstack/echo/pull/2425)
|
||||
* Check whether is nil before invoking centralized error handling [#2429](https://github.com/labstack/echo/pull/2429)
|
||||
* Proper colon support in `echo.Reverse` method [#2416](https://github.com/labstack/echo/pull/2416)
|
||||
* Fix misuses of a vs an in documentation comments [#2436](https://github.com/labstack/echo/pull/2436)
|
||||
* Add link to slog.Handler library for Echo logging into README.md [#2444](https://github.com/labstack/echo/pull/2444)
|
||||
* In proxy middleware Support retries of failed proxy requests [#2414](https://github.com/labstack/echo/pull/2414)
|
||||
* gofmt fixes to comments [#2452](https://github.com/labstack/echo/pull/2452)
|
||||
* gzip response only if it exceeds a minimal length [#2267](https://github.com/labstack/echo/pull/2267)
|
||||
* Upgrade packages [#2475](https://github.com/labstack/echo/pull/2475)
|
||||
|
||||
|
||||
## v4.10.2 - 2023-02-22
|
||||
|
||||
**Security**
|
||||
|
1
vendor/github.com/labstack/echo/v4/README.md
generated
vendored
1
vendor/github.com/labstack/echo/v4/README.md
generated
vendored
@ -110,6 +110,7 @@ of middlewares in this list.
|
||||
| [github.com/swaggo/echo-swagger](https://github.com/swaggo/echo-swagger) | Automatically generate RESTful API documentation with [Swagger](https://swagger.io/) 2.0. |
|
||||
| [github.com/ziflex/lecho](https://github.com/ziflex/lecho) | [Zerolog](https://github.com/rs/zerolog) logging library wrapper for Echo logger interface. |
|
||||
| [github.com/brpaz/echozap](https://github.com/brpaz/echozap) | Uber´s [Zap](https://github.com/uber-go/zap) logging library wrapper for Echo logger interface. |
|
||||
| [github.com/samber/slog-echo](https://github.com/samber/slog-echo) | Go [slog](https://pkg.go.dev/golang.org/x/exp/slog) logging library wrapper for Echo logger interface. |
|
||||
| [github.com/darkweak/souin/plugins/echo](https://github.com/darkweak/souin/tree/master/plugins/echo) | HTTP cache system based on [Souin](https://github.com/darkweak/souin) to automatically get your endpoints cached. It supports some distributed and non-distributed storage systems depending your needs. |
|
||||
| [github.com/mikestefanello/pagoda](https://github.com/mikestefanello/pagoda) | Rapid, easy full-stack web development starter kit built with Echo. |
|
||||
| [github.com/go-woo/protoc-gen-echo](https://github.com/go-woo/protoc-gen-echo) | ProtoBuf generate Echo server side code |
|
||||
|
2
vendor/github.com/labstack/echo/v4/bind.go
generated
vendored
2
vendor/github.com/labstack/echo/v4/bind.go
generated
vendored
@ -114,7 +114,7 @@ func (b *DefaultBinder) Bind(i interface{}, c Context) (err error) {
|
||||
// Only bind query parameters for GET/DELETE/HEAD to avoid unexpected behavior with destination struct binding from body.
|
||||
// For example a request URL `&id=1&lang=en` with body `{"id":100,"lang":"de"}` would lead to precedence issues.
|
||||
// The HTTP method check restores pre-v4.1.11 behavior to avoid these problems (see issue #1670)
|
||||
method := c.Request().Method
|
||||
method := c.Request().Method
|
||||
if method == http.MethodGet || method == http.MethodDelete || method == http.MethodHead {
|
||||
if err = b.BindQueryParams(c, i); err != nil {
|
||||
return err
|
||||
|
16
vendor/github.com/labstack/echo/v4/binder.go
generated
vendored
16
vendor/github.com/labstack/echo/v4/binder.go
generated
vendored
@ -1236,7 +1236,7 @@ func (b *ValueBinder) durations(sourceParam string, values []string, dest *[]tim
|
||||
// Example: 1609180603 bind to 2020-12-28T18:36:43.000000000+00:00
|
||||
//
|
||||
// Note:
|
||||
// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
|
||||
// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
|
||||
func (b *ValueBinder) UnixTime(sourceParam string, dest *time.Time) *ValueBinder {
|
||||
return b.unixTime(sourceParam, dest, false, time.Second)
|
||||
}
|
||||
@ -1247,7 +1247,7 @@ func (b *ValueBinder) UnixTime(sourceParam string, dest *time.Time) *ValueBinder
|
||||
// Example: 1609180603 bind to 2020-12-28T18:36:43.000000000+00:00
|
||||
//
|
||||
// Note:
|
||||
// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
|
||||
// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
|
||||
func (b *ValueBinder) MustUnixTime(sourceParam string, dest *time.Time) *ValueBinder {
|
||||
return b.unixTime(sourceParam, dest, true, time.Second)
|
||||
}
|
||||
@ -1257,7 +1257,7 @@ func (b *ValueBinder) MustUnixTime(sourceParam string, dest *time.Time) *ValueBi
|
||||
// Example: 1647184410140 bind to 2022-03-13T15:13:30.140000000+00:00
|
||||
//
|
||||
// Note:
|
||||
// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
|
||||
// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
|
||||
func (b *ValueBinder) UnixTimeMilli(sourceParam string, dest *time.Time) *ValueBinder {
|
||||
return b.unixTime(sourceParam, dest, false, time.Millisecond)
|
||||
}
|
||||
@ -1268,7 +1268,7 @@ func (b *ValueBinder) UnixTimeMilli(sourceParam string, dest *time.Time) *ValueB
|
||||
// Example: 1647184410140 bind to 2022-03-13T15:13:30.140000000+00:00
|
||||
//
|
||||
// Note:
|
||||
// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
|
||||
// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
|
||||
func (b *ValueBinder) MustUnixTimeMilli(sourceParam string, dest *time.Time) *ValueBinder {
|
||||
return b.unixTime(sourceParam, dest, true, time.Millisecond)
|
||||
}
|
||||
@ -1280,8 +1280,8 @@ func (b *ValueBinder) MustUnixTimeMilli(sourceParam string, dest *time.Time) *Va
|
||||
// Example: 999999999 binds to 1970-01-01T00:00:00.999999999+00:00
|
||||
//
|
||||
// Note:
|
||||
// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
|
||||
// * Javascript's Number type only has about 53 bits of precision (Number.MAX_SAFE_INTEGER = 9007199254740991). Compare it to 1609180603123456789 in example.
|
||||
// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
|
||||
// - Javascript's Number type only has about 53 bits of precision (Number.MAX_SAFE_INTEGER = 9007199254740991). Compare it to 1609180603123456789 in example.
|
||||
func (b *ValueBinder) UnixTimeNano(sourceParam string, dest *time.Time) *ValueBinder {
|
||||
return b.unixTime(sourceParam, dest, false, time.Nanosecond)
|
||||
}
|
||||
@ -1294,8 +1294,8 @@ func (b *ValueBinder) UnixTimeNano(sourceParam string, dest *time.Time) *ValueBi
|
||||
// Example: 999999999 binds to 1970-01-01T00:00:00.999999999+00:00
|
||||
//
|
||||
// Note:
|
||||
// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
|
||||
// * Javascript's Number type only has about 53 bits of precision (Number.MAX_SAFE_INTEGER = 9007199254740991). Compare it to 1609180603123456789 in example.
|
||||
// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
|
||||
// - Javascript's Number type only has about 53 bits of precision (Number.MAX_SAFE_INTEGER = 9007199254740991). Compare it to 1609180603123456789 in example.
|
||||
func (b *ValueBinder) MustUnixTimeNano(sourceParam string, dest *time.Time) *ValueBinder {
|
||||
return b.unixTime(sourceParam, dest, true, time.Nanosecond)
|
||||
}
|
||||
|
4
vendor/github.com/labstack/echo/v4/context.go
generated
vendored
4
vendor/github.com/labstack/echo/v4/context.go
generated
vendored
@ -100,8 +100,8 @@ type (
|
||||
// Set saves data in the context.
|
||||
Set(key string, val interface{})
|
||||
|
||||
// Bind binds the request body into provided type `i`. The default binder
|
||||
// does it based on Content-Type header.
|
||||
// Bind binds path params, query params and the request body into provided type `i`. The default binder
|
||||
// binds body based on Content-Type header.
|
||||
Bind(i interface{}) error
|
||||
|
||||
// Validate validates provided `i`. It is usually called after `Context#Bind()`.
|
||||
|
13
vendor/github.com/labstack/echo/v4/echo.go
generated
vendored
13
vendor/github.com/labstack/echo/v4/echo.go
generated
vendored
@ -39,6 +39,7 @@ package echo
|
||||
import (
|
||||
stdContext "context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -258,7 +259,7 @@ const (
|
||||
|
||||
const (
|
||||
// Version of Echo
|
||||
Version = "4.10.2"
|
||||
Version = "4.11.1"
|
||||
website = "https://echo.labstack.com"
|
||||
// http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo
|
||||
banner = `
|
||||
@ -438,12 +439,18 @@ func (e *Echo) DefaultHTTPErrorHandler(err error, c Context) {
|
||||
// Issue #1426
|
||||
code := he.Code
|
||||
message := he.Message
|
||||
if m, ok := he.Message.(string); ok {
|
||||
|
||||
switch m := he.Message.(type) {
|
||||
case string:
|
||||
if e.Debug {
|
||||
message = Map{"message": m, "error": err.Error()}
|
||||
} else {
|
||||
message = Map{"message": m}
|
||||
}
|
||||
case json.Marshaler:
|
||||
// do nothing - this type knows how to format itself to JSON
|
||||
case error:
|
||||
message = Map{"message": m.Error()}
|
||||
}
|
||||
|
||||
// Send response
|
||||
@ -614,7 +621,7 @@ func (e *Echo) URL(h HandlerFunc, params ...interface{}) string {
|
||||
return e.URI(h, params...)
|
||||
}
|
||||
|
||||
// Reverse generates an URL from route name and provided parameters.
|
||||
// Reverse generates a URL from route name and provided parameters.
|
||||
func (e *Echo) Reverse(name string, params ...interface{}) string {
|
||||
return e.router.Reverse(name, params...)
|
||||
}
|
||||
|
10
vendor/github.com/labstack/echo/v4/group.go
generated
vendored
10
vendor/github.com/labstack/echo/v4/group.go
generated
vendored
@ -23,10 +23,12 @@ func (g *Group) Use(middleware ...MiddlewareFunc) {
|
||||
if len(g.middleware) == 0 {
|
||||
return
|
||||
}
|
||||
// Allow all requests to reach the group as they might get dropped if router
|
||||
// doesn't find a match, making none of the group middleware process.
|
||||
g.Any("", NotFoundHandler)
|
||||
g.Any("/*", NotFoundHandler)
|
||||
// group level middlewares are different from Echo `Pre` and `Use` middlewares (those are global). Group level middlewares
|
||||
// are only executed if they are added to the Router with route.
|
||||
// So we register catch all route (404 is a safe way to emulate route match) for this group and now during routing the
|
||||
// Router would find route to match our request path and therefore guarantee the middleware(s) will get executed.
|
||||
g.RouteNotFound("", NotFoundHandler)
|
||||
g.RouteNotFound("/*", NotFoundHandler)
|
||||
}
|
||||
|
||||
// CONNECT implements `Echo#CONNECT()` for sub-routes within the Group.
|
||||
|
2
vendor/github.com/labstack/echo/v4/middleware/basic_auth.go
generated
vendored
2
vendor/github.com/labstack/echo/v4/middleware/basic_auth.go
generated
vendored
@ -2,9 +2,9 @@ package middleware
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"net/http"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
97
vendor/github.com/labstack/echo/v4/middleware/compress.go
generated
vendored
97
vendor/github.com/labstack/echo/v4/middleware/compress.go
generated
vendored
@ -2,6 +2,7 @@ package middleware
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net"
|
||||
@ -21,12 +22,30 @@ type (
|
||||
// Gzip compression level.
|
||||
// Optional. Default value -1.
|
||||
Level int `yaml:"level"`
|
||||
|
||||
// Length threshold before gzip compression is applied.
|
||||
// Optional. Default value 0.
|
||||
//
|
||||
// Most of the time you will not need to change the default. Compressing
|
||||
// a short response might increase the transmitted data because of the
|
||||
// gzip format overhead. Compressing the response will also consume CPU
|
||||
// and time on the server and the client (for decompressing). Depending on
|
||||
// your use case such a threshold might be useful.
|
||||
//
|
||||
// See also:
|
||||
// https://webmasters.stackexchange.com/questions/31750/what-is-recommended-minimum-object-size-for-gzip-performance-benefits
|
||||
MinLength int
|
||||
}
|
||||
|
||||
gzipResponseWriter struct {
|
||||
io.Writer
|
||||
http.ResponseWriter
|
||||
wroteBody bool
|
||||
wroteHeader bool
|
||||
wroteBody bool
|
||||
minLength int
|
||||
minLengthExceeded bool
|
||||
buffer *bytes.Buffer
|
||||
code int
|
||||
}
|
||||
)
|
||||
|
||||
@ -37,8 +56,9 @@ const (
|
||||
var (
|
||||
// DefaultGzipConfig is the default Gzip middleware config.
|
||||
DefaultGzipConfig = GzipConfig{
|
||||
Skipper: DefaultSkipper,
|
||||
Level: -1,
|
||||
Skipper: DefaultSkipper,
|
||||
Level: -1,
|
||||
MinLength: 0,
|
||||
}
|
||||
)
|
||||
|
||||
@ -58,8 +78,12 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc {
|
||||
if config.Level == 0 {
|
||||
config.Level = DefaultGzipConfig.Level
|
||||
}
|
||||
if config.MinLength < 0 {
|
||||
config.MinLength = DefaultGzipConfig.MinLength
|
||||
}
|
||||
|
||||
pool := gzipCompressPool(config)
|
||||
bpool := bufferPool()
|
||||
|
||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
@ -70,7 +94,6 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc {
|
||||
res := c.Response()
|
||||
res.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
|
||||
if strings.Contains(c.Request().Header.Get(echo.HeaderAcceptEncoding), gzipScheme) {
|
||||
res.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
|
||||
i := pool.Get()
|
||||
w, ok := i.(*gzip.Writer)
|
||||
if !ok {
|
||||
@ -78,19 +101,38 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc {
|
||||
}
|
||||
rw := res.Writer
|
||||
w.Reset(rw)
|
||||
grw := &gzipResponseWriter{Writer: w, ResponseWriter: rw}
|
||||
|
||||
buf := bpool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
|
||||
grw := &gzipResponseWriter{Writer: w, ResponseWriter: rw, minLength: config.MinLength, buffer: buf}
|
||||
defer func() {
|
||||
// There are different reasons for cases when we have not yet written response to the client and now need to do so.
|
||||
// a) handler response had only response code and no response body (ala 404 or redirects etc). Response code need to be written now.
|
||||
// b) body is shorter than our minimum length threshold and being buffered currently and needs to be written
|
||||
if !grw.wroteBody {
|
||||
if res.Header().Get(echo.HeaderContentEncoding) == gzipScheme {
|
||||
res.Header().Del(echo.HeaderContentEncoding)
|
||||
}
|
||||
if grw.wroteHeader {
|
||||
rw.WriteHeader(grw.code)
|
||||
}
|
||||
// We have to reset response to it's pristine state when
|
||||
// nothing is written to body or error is returned.
|
||||
// See issue #424, #407.
|
||||
res.Writer = rw
|
||||
w.Reset(io.Discard)
|
||||
} else if !grw.minLengthExceeded {
|
||||
// Write uncompressed response
|
||||
res.Writer = rw
|
||||
if grw.wroteHeader {
|
||||
grw.ResponseWriter.WriteHeader(grw.code)
|
||||
}
|
||||
grw.buffer.WriteTo(rw)
|
||||
w.Reset(io.Discard)
|
||||
}
|
||||
w.Close()
|
||||
bpool.Put(buf)
|
||||
pool.Put(w)
|
||||
}()
|
||||
res.Writer = grw
|
||||
@ -102,7 +144,11 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc {
|
||||
|
||||
func (w *gzipResponseWriter) WriteHeader(code int) {
|
||||
w.Header().Del(echo.HeaderContentLength) // Issue #444
|
||||
w.ResponseWriter.WriteHeader(code)
|
||||
|
||||
w.wroteHeader = true
|
||||
|
||||
// Delay writing of the header until we know if we'll actually compress the response
|
||||
w.code = code
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
@ -110,10 +156,40 @@ func (w *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
w.Header().Set(echo.HeaderContentType, http.DetectContentType(b))
|
||||
}
|
||||
w.wroteBody = true
|
||||
|
||||
if !w.minLengthExceeded {
|
||||
n, err := w.buffer.Write(b)
|
||||
|
||||
if w.buffer.Len() >= w.minLength {
|
||||
w.minLengthExceeded = true
|
||||
|
||||
// The minimum length is exceeded, add Content-Encoding header and write the header
|
||||
w.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
|
||||
if w.wroteHeader {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
}
|
||||
|
||||
return w.Writer.Write(w.buffer.Bytes())
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
return w.Writer.Write(b)
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) Flush() {
|
||||
if !w.minLengthExceeded {
|
||||
// Enforce compression because we will not know how much more data will come
|
||||
w.minLengthExceeded = true
|
||||
w.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
|
||||
if w.wroteHeader {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
}
|
||||
|
||||
w.Writer.Write(w.buffer.Bytes())
|
||||
}
|
||||
|
||||
w.Writer.(*gzip.Writer).Flush()
|
||||
if flusher, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
@ -142,3 +218,12 @@ func gzipCompressPool(config GzipConfig) sync.Pool {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func bufferPool() sync.Pool {
|
||||
return sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := &bytes.Buffer{}
|
||||
return b
|
||||
},
|
||||
}
|
||||
}
|
||||
|
4
vendor/github.com/labstack/echo/v4/middleware/cors.go
generated
vendored
4
vendor/github.com/labstack/echo/v4/middleware/cors.go
generated
vendored
@ -150,8 +150,8 @@ func CORSWithConfig(config CORSConfig) echo.MiddlewareFunc {
|
||||
allowOriginPatterns := []string{}
|
||||
for _, origin := range config.AllowOrigins {
|
||||
pattern := regexp.QuoteMeta(origin)
|
||||
pattern = strings.Replace(pattern, "\\*", ".*", -1)
|
||||
pattern = strings.Replace(pattern, "\\?", ".", -1)
|
||||
pattern = strings.ReplaceAll(pattern, "\\*", ".*")
|
||||
pattern = strings.ReplaceAll(pattern, "\\?", ".")
|
||||
pattern = "^" + pattern + "$"
|
||||
allowOriginPatterns = append(allowOriginPatterns, pattern)
|
||||
}
|
||||
|
6
vendor/github.com/labstack/echo/v4/middleware/decompress.go
generated
vendored
6
vendor/github.com/labstack/echo/v4/middleware/decompress.go
generated
vendored
@ -20,7 +20,7 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
//GZIPEncoding content-encoding header if set to "gzip", decompress body contents.
|
||||
// GZIPEncoding content-encoding header if set to "gzip", decompress body contents.
|
||||
const GZIPEncoding string = "gzip"
|
||||
|
||||
// Decompressor is used to get the sync.Pool used by the middleware to get Gzip readers
|
||||
@ -44,12 +44,12 @@ func (d *DefaultGzipDecompressPool) gzipDecompressPool() sync.Pool {
|
||||
return sync.Pool{New: func() interface{} { return new(gzip.Reader) }}
|
||||
}
|
||||
|
||||
//Decompress decompresses request body based if content encoding type is set to "gzip" with default config
|
||||
// Decompress decompresses request body based if content encoding type is set to "gzip" with default config
|
||||
func Decompress() echo.MiddlewareFunc {
|
||||
return DecompressWithConfig(DefaultDecompressConfig)
|
||||
}
|
||||
|
||||
//DecompressWithConfig decompresses request body based if content encoding type is set to "gzip" with config
|
||||
// DecompressWithConfig decompresses request body based if content encoding type is set to "gzip" with config
|
||||
func DecompressWithConfig(config DecompressConfig) echo.MiddlewareFunc {
|
||||
// Defaults
|
||||
if config.Skipper == nil {
|
||||
|
4
vendor/github.com/labstack/echo/v4/middleware/middleware.go
generated
vendored
4
vendor/github.com/labstack/echo/v4/middleware/middleware.go
generated
vendored
@ -38,9 +38,9 @@ func rewriteRulesRegex(rewrite map[string]string) map[*regexp.Regexp]string {
|
||||
rulesRegex := map[*regexp.Regexp]string{}
|
||||
for k, v := range rewrite {
|
||||
k = regexp.QuoteMeta(k)
|
||||
k = strings.Replace(k, `\*`, "(.*?)", -1)
|
||||
k = strings.ReplaceAll(k, `\*`, "(.*?)")
|
||||
if strings.HasPrefix(k, `\^`) {
|
||||
k = strings.Replace(k, `\^`, "^", -1)
|
||||
k = strings.ReplaceAll(k, `\^`, "^")
|
||||
}
|
||||
k = k + "$"
|
||||
rulesRegex[regexp.MustCompile(k)] = v
|
||||
|
213
vendor/github.com/labstack/echo/v4/middleware/proxy.go
generated
vendored
213
vendor/github.com/labstack/echo/v4/middleware/proxy.go
generated
vendored
@ -12,7 +12,6 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
@ -30,6 +29,33 @@ type (
|
||||
// Required.
|
||||
Balancer ProxyBalancer
|
||||
|
||||
// RetryCount defines the number of times a failed proxied request should be retried
|
||||
// using the next available ProxyTarget. Defaults to 0, meaning requests are never retried.
|
||||
RetryCount int
|
||||
|
||||
// RetryFilter defines a function used to determine if a failed request to a
|
||||
// ProxyTarget should be retried. The RetryFilter will only be called when the number
|
||||
// of previous retries is less than RetryCount. If the function returns true, the
|
||||
// request will be retried. The provided error indicates the reason for the request
|
||||
// failure. When the ProxyTarget is unavailable, the error will be an instance of
|
||||
// echo.HTTPError with a Code of http.StatusBadGateway. In all other cases, the error
|
||||
// will indicate an internal error in the Proxy middleware. When a RetryFilter is not
|
||||
// specified, all requests that fail with http.StatusBadGateway will be retried. A custom
|
||||
// RetryFilter can be provided to only retry specific requests. Note that RetryFilter is
|
||||
// only called when the request to the target fails, or an internal error in the Proxy
|
||||
// middleware has occurred. Successful requests that return a non-200 response code cannot
|
||||
// be retried.
|
||||
RetryFilter func(c echo.Context, e error) bool
|
||||
|
||||
// ErrorHandler defines a function which can be used to return custom errors from
|
||||
// the Proxy middleware. ErrorHandler is only invoked when there has been
|
||||
// either an internal error in the Proxy middleware or the ProxyTarget is
|
||||
// unavailable. Due to the way requests are proxied, ErrorHandler is not invoked
|
||||
// when a ProxyTarget returns a non-200 response. In these cases, the response
|
||||
// is already written so errors cannot be modified. ErrorHandler is only
|
||||
// invoked after all retry attempts have been exhausted.
|
||||
ErrorHandler func(c echo.Context, err error) error
|
||||
|
||||
// Rewrite defines URL path rewrite rules. The values captured in asterisk can be
|
||||
// retrieved by index e.g. $1, $2 and so on.
|
||||
// Examples:
|
||||
@ -72,26 +98,28 @@ type (
|
||||
Next(echo.Context) *ProxyTarget
|
||||
}
|
||||
|
||||
// TargetProvider defines an interface that gives the opportunity for balancer to return custom errors when selecting target.
|
||||
// TargetProvider defines an interface that gives the opportunity for balancer
|
||||
// to return custom errors when selecting target.
|
||||
TargetProvider interface {
|
||||
NextTarget(echo.Context) (*ProxyTarget, error)
|
||||
}
|
||||
|
||||
commonBalancer struct {
|
||||
targets []*ProxyTarget
|
||||
mutex sync.RWMutex
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// RandomBalancer implements a random load balancing technique.
|
||||
randomBalancer struct {
|
||||
*commonBalancer
|
||||
commonBalancer
|
||||
random *rand.Rand
|
||||
}
|
||||
|
||||
// RoundRobinBalancer implements a round-robin load balancing technique.
|
||||
roundRobinBalancer struct {
|
||||
*commonBalancer
|
||||
i uint32
|
||||
commonBalancer
|
||||
// tracking the index on `targets` slice for the next `*ProxyTarget` to be used
|
||||
i int
|
||||
}
|
||||
)
|
||||
|
||||
@ -107,14 +135,14 @@ func proxyRaw(t *ProxyTarget, c echo.Context) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
in, _, err := c.Response().Hijack()
|
||||
if err != nil {
|
||||
c.Set("_error", fmt.Sprintf("proxy raw, hijack error=%v, url=%s", t.URL, err))
|
||||
c.Set("_error", fmt.Errorf("proxy raw, hijack error=%w, url=%s", err, t.URL))
|
||||
return
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
out, err := net.Dial("tcp", t.URL.Host)
|
||||
if err != nil {
|
||||
c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, dial error=%v, url=%s", t.URL, err)))
|
||||
c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, dial error=%v, url=%s", err, t.URL)))
|
||||
return
|
||||
}
|
||||
defer out.Close()
|
||||
@ -122,7 +150,7 @@ func proxyRaw(t *ProxyTarget, c echo.Context) http.Handler {
|
||||
// Write header
|
||||
err = r.Write(out)
|
||||
if err != nil {
|
||||
c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, request header copy error=%v, url=%s", t.URL, err)))
|
||||
c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, request header copy error=%v, url=%s", err, t.URL)))
|
||||
return
|
||||
}
|
||||
|
||||
@ -136,39 +164,44 @@ func proxyRaw(t *ProxyTarget, c echo.Context) http.Handler {
|
||||
go cp(in, out)
|
||||
err = <-errCh
|
||||
if err != nil && err != io.EOF {
|
||||
c.Set("_error", fmt.Errorf("proxy raw, copy body error=%v, url=%s", t.URL, err))
|
||||
c.Set("_error", fmt.Errorf("proxy raw, copy body error=%w, url=%s", err, t.URL))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// NewRandomBalancer returns a random proxy balancer.
|
||||
func NewRandomBalancer(targets []*ProxyTarget) ProxyBalancer {
|
||||
b := &randomBalancer{commonBalancer: new(commonBalancer)}
|
||||
b := randomBalancer{}
|
||||
b.targets = targets
|
||||
return b
|
||||
b.random = rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
|
||||
return &b
|
||||
}
|
||||
|
||||
// NewRoundRobinBalancer returns a round-robin proxy balancer.
|
||||
func NewRoundRobinBalancer(targets []*ProxyTarget) ProxyBalancer {
|
||||
b := &roundRobinBalancer{commonBalancer: new(commonBalancer)}
|
||||
b := roundRobinBalancer{}
|
||||
b.targets = targets
|
||||
return b
|
||||
return &b
|
||||
}
|
||||
|
||||
// AddTarget adds an upstream target to the list.
|
||||
// AddTarget adds an upstream target to the list and returns `true`.
|
||||
//
|
||||
// However, if a target with the same name already exists then the operation is aborted returning `false`.
|
||||
func (b *commonBalancer) AddTarget(target *ProxyTarget) bool {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
for _, t := range b.targets {
|
||||
if t.Name == target.Name {
|
||||
return false
|
||||
}
|
||||
}
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
b.targets = append(b.targets, target)
|
||||
return true
|
||||
}
|
||||
|
||||
// RemoveTarget removes an upstream target from the list.
|
||||
// RemoveTarget removes an upstream target from the list by name.
|
||||
//
|
||||
// Returns `true` on success, `false` if no target with the name is found.
|
||||
func (b *commonBalancer) RemoveTarget(name string) bool {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
@ -182,21 +215,58 @@ func (b *commonBalancer) RemoveTarget(name string) bool {
|
||||
}
|
||||
|
||||
// Next randomly returns an upstream target.
|
||||
//
|
||||
// Note: `nil` is returned in case upstream target list is empty.
|
||||
func (b *randomBalancer) Next(c echo.Context) *ProxyTarget {
|
||||
if b.random == nil {
|
||||
b.random = rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
if len(b.targets) == 0 {
|
||||
return nil
|
||||
} else if len(b.targets) == 1 {
|
||||
return b.targets[0]
|
||||
}
|
||||
b.mutex.RLock()
|
||||
defer b.mutex.RUnlock()
|
||||
return b.targets[b.random.Intn(len(b.targets))]
|
||||
}
|
||||
|
||||
// Next returns an upstream target using round-robin technique.
|
||||
// Next returns an upstream target using round-robin technique. In the case
|
||||
// where a previously failed request is being retried, the round-robin
|
||||
// balancer will attempt to use the next target relative to the original
|
||||
// request. If the list of targets held by the balancer is modified while a
|
||||
// failed request is being retried, it is possible that the balancer will
|
||||
// return the original failed target.
|
||||
//
|
||||
// Note: `nil` is returned in case upstream target list is empty.
|
||||
func (b *roundRobinBalancer) Next(c echo.Context) *ProxyTarget {
|
||||
b.i = b.i % uint32(len(b.targets))
|
||||
t := b.targets[b.i]
|
||||
atomic.AddUint32(&b.i, 1)
|
||||
return t
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
if len(b.targets) == 0 {
|
||||
return nil
|
||||
} else if len(b.targets) == 1 {
|
||||
return b.targets[0]
|
||||
}
|
||||
|
||||
var i int
|
||||
const lastIdxKey = "_round_robin_last_index"
|
||||
// This request is a retry, start from the index of the previous
|
||||
// target to ensure we don't attempt to retry the request with
|
||||
// the same failed target
|
||||
if c.Get(lastIdxKey) != nil {
|
||||
i = c.Get(lastIdxKey).(int)
|
||||
i++
|
||||
if i >= len(b.targets) {
|
||||
i = 0
|
||||
}
|
||||
} else {
|
||||
// This is a first time request, use the global index
|
||||
if b.i >= len(b.targets) {
|
||||
b.i = 0
|
||||
}
|
||||
i = b.i
|
||||
b.i++
|
||||
}
|
||||
|
||||
c.Set(lastIdxKey, i)
|
||||
return b.targets[i]
|
||||
}
|
||||
|
||||
// Proxy returns a Proxy middleware.
|
||||
@ -211,14 +281,26 @@ func Proxy(balancer ProxyBalancer) echo.MiddlewareFunc {
|
||||
// ProxyWithConfig returns a Proxy middleware with config.
|
||||
// See: `Proxy()`
|
||||
func ProxyWithConfig(config ProxyConfig) echo.MiddlewareFunc {
|
||||
if config.Balancer == nil {
|
||||
panic("echo: proxy middleware requires balancer")
|
||||
}
|
||||
// Defaults
|
||||
if config.Skipper == nil {
|
||||
config.Skipper = DefaultProxyConfig.Skipper
|
||||
}
|
||||
if config.Balancer == nil {
|
||||
panic("echo: proxy middleware requires balancer")
|
||||
if config.RetryFilter == nil {
|
||||
config.RetryFilter = func(c echo.Context, e error) bool {
|
||||
if httpErr, ok := e.(*echo.HTTPError); ok {
|
||||
return httpErr.Code == http.StatusBadGateway
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
if config.ErrorHandler == nil {
|
||||
config.ErrorHandler = func(c echo.Context, err error) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if config.Rewrite != nil {
|
||||
if config.RegexRewrite == nil {
|
||||
config.RegexRewrite = make(map[*regexp.Regexp]string)
|
||||
@ -229,28 +311,17 @@ func ProxyWithConfig(config ProxyConfig) echo.MiddlewareFunc {
|
||||
}
|
||||
|
||||
provider, isTargetProvider := config.Balancer.(TargetProvider)
|
||||
|
||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) (err error) {
|
||||
return func(c echo.Context) error {
|
||||
if config.Skipper(c) {
|
||||
return next(c)
|
||||
}
|
||||
|
||||
req := c.Request()
|
||||
res := c.Response()
|
||||
|
||||
var tgt *ProxyTarget
|
||||
if isTargetProvider {
|
||||
tgt, err = provider.NextTarget(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
tgt = config.Balancer.Next(c)
|
||||
}
|
||||
c.Set(config.ContextKey, tgt)
|
||||
|
||||
if err := rewriteURL(config.RegexRewrite, req); err != nil {
|
||||
return err
|
||||
return config.ErrorHandler(c, err)
|
||||
}
|
||||
|
||||
// Fix header
|
||||
@ -266,19 +337,49 @@ func ProxyWithConfig(config ProxyConfig) echo.MiddlewareFunc {
|
||||
req.Header.Set(echo.HeaderXForwardedFor, c.RealIP())
|
||||
}
|
||||
|
||||
// Proxy
|
||||
switch {
|
||||
case c.IsWebSocket():
|
||||
proxyRaw(tgt, c).ServeHTTP(res, req)
|
||||
case req.Header.Get(echo.HeaderAccept) == "text/event-stream":
|
||||
default:
|
||||
proxyHTTP(tgt, c, config).ServeHTTP(res, req)
|
||||
}
|
||||
if e, ok := c.Get("_error").(error); ok {
|
||||
err = e
|
||||
}
|
||||
retries := config.RetryCount
|
||||
for {
|
||||
var tgt *ProxyTarget
|
||||
var err error
|
||||
if isTargetProvider {
|
||||
tgt, err = provider.NextTarget(c)
|
||||
if err != nil {
|
||||
return config.ErrorHandler(c, err)
|
||||
}
|
||||
} else {
|
||||
tgt = config.Balancer.Next(c)
|
||||
}
|
||||
|
||||
return
|
||||
c.Set(config.ContextKey, tgt)
|
||||
|
||||
//If retrying a failed request, clear any previous errors from
|
||||
//context here so that balancers have the option to check for
|
||||
//errors that occurred using previous target
|
||||
if retries < config.RetryCount {
|
||||
c.Set("_error", nil)
|
||||
}
|
||||
|
||||
// Proxy
|
||||
switch {
|
||||
case c.IsWebSocket():
|
||||
proxyRaw(tgt, c).ServeHTTP(res, req)
|
||||
case req.Header.Get(echo.HeaderAccept) == "text/event-stream":
|
||||
default:
|
||||
proxyHTTP(tgt, c, config).ServeHTTP(res, req)
|
||||
}
|
||||
|
||||
err, hasError := c.Get("_error").(error)
|
||||
if !hasError {
|
||||
return nil
|
||||
}
|
||||
|
||||
retry := retries > 0 && config.RetryFilter(c, err)
|
||||
if !retry {
|
||||
return config.ErrorHandler(c, err)
|
||||
}
|
||||
|
||||
retries--
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
21
vendor/github.com/labstack/echo/v4/middleware/rate_limiter.go
generated
vendored
21
vendor/github.com/labstack/echo/v4/middleware/rate_limiter.go
generated
vendored
@ -160,6 +160,8 @@ type (
|
||||
burst int
|
||||
expiresIn time.Duration
|
||||
lastCleanup time.Time
|
||||
|
||||
timeNow func() time.Time
|
||||
}
|
||||
// Visitor signifies a unique user's limiter details
|
||||
Visitor struct {
|
||||
@ -219,7 +221,8 @@ func NewRateLimiterMemoryStoreWithConfig(config RateLimiterMemoryStoreConfig) (s
|
||||
store.burst = int(config.Rate)
|
||||
}
|
||||
store.visitors = make(map[string]*Visitor)
|
||||
store.lastCleanup = now()
|
||||
store.timeNow = time.Now
|
||||
store.lastCleanup = store.timeNow()
|
||||
return
|
||||
}
|
||||
|
||||
@ -244,12 +247,13 @@ func (store *RateLimiterMemoryStore) Allow(identifier string) (bool, error) {
|
||||
limiter.Limiter = rate.NewLimiter(store.rate, store.burst)
|
||||
store.visitors[identifier] = limiter
|
||||
}
|
||||
limiter.lastSeen = now()
|
||||
if now().Sub(store.lastCleanup) > store.expiresIn {
|
||||
now := store.timeNow()
|
||||
limiter.lastSeen = now
|
||||
if now.Sub(store.lastCleanup) > store.expiresIn {
|
||||
store.cleanupStaleVisitors()
|
||||
}
|
||||
store.mutex.Unlock()
|
||||
return limiter.AllowN(now(), 1), nil
|
||||
return limiter.AllowN(store.timeNow(), 1), nil
|
||||
}
|
||||
|
||||
/*
|
||||
@ -258,14 +262,9 @@ of users who haven't visited again after the configured expiry time has elapsed
|
||||
*/
|
||||
func (store *RateLimiterMemoryStore) cleanupStaleVisitors() {
|
||||
for id, visitor := range store.visitors {
|
||||
if now().Sub(visitor.lastSeen) > store.expiresIn {
|
||||
if store.timeNow().Sub(visitor.lastSeen) > store.expiresIn {
|
||||
delete(store.visitors, id)
|
||||
}
|
||||
}
|
||||
store.lastCleanup = now()
|
||||
store.lastCleanup = store.timeNow()
|
||||
}
|
||||
|
||||
/*
|
||||
actual time method which is mocked in test file
|
||||
*/
|
||||
var now = time.Now
|
||||
|
28
vendor/github.com/labstack/echo/v4/middleware/recover.go
generated
vendored
28
vendor/github.com/labstack/echo/v4/middleware/recover.go
generated
vendored
@ -37,19 +37,26 @@ type (
|
||||
|
||||
// LogErrorFunc defines a function for custom logging in the middleware.
|
||||
// If it's set you don't need to provide LogLevel for config.
|
||||
// If this function returns nil, the centralized HTTPErrorHandler will not be called.
|
||||
LogErrorFunc LogErrorFunc
|
||||
|
||||
// DisableErrorHandler disables the call to centralized HTTPErrorHandler.
|
||||
// The recovered error is then passed back to upstream middleware, instead of swallowing the error.
|
||||
// Optional. Default value false.
|
||||
DisableErrorHandler bool `yaml:"disable_error_handler"`
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultRecoverConfig is the default Recover middleware config.
|
||||
DefaultRecoverConfig = RecoverConfig{
|
||||
Skipper: DefaultSkipper,
|
||||
StackSize: 4 << 10, // 4 KB
|
||||
DisableStackAll: false,
|
||||
DisablePrintStack: false,
|
||||
LogLevel: 0,
|
||||
LogErrorFunc: nil,
|
||||
Skipper: DefaultSkipper,
|
||||
StackSize: 4 << 10, // 4 KB
|
||||
DisableStackAll: false,
|
||||
DisablePrintStack: false,
|
||||
LogLevel: 0,
|
||||
LogErrorFunc: nil,
|
||||
DisableErrorHandler: false,
|
||||
}
|
||||
)
|
||||
|
||||
@ -71,7 +78,7 @@ func RecoverWithConfig(config RecoverConfig) echo.MiddlewareFunc {
|
||||
}
|
||||
|
||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
return func(c echo.Context) (returnErr error) {
|
||||
if config.Skipper(c) {
|
||||
return next(c)
|
||||
}
|
||||
@ -113,7 +120,12 @@ func RecoverWithConfig(config RecoverConfig) echo.MiddlewareFunc {
|
||||
c.Logger().Print(msg)
|
||||
}
|
||||
}
|
||||
c.Error(err)
|
||||
|
||||
if err != nil && !config.DisableErrorHandler {
|
||||
c.Error(err)
|
||||
} else {
|
||||
returnErr = err
|
||||
}
|
||||
}
|
||||
}()
|
||||
return next(c)
|
||||
|
4
vendor/github.com/labstack/echo/v4/middleware/request_logger.go
generated
vendored
4
vendor/github.com/labstack/echo/v4/middleware/request_logger.go
generated
vendored
@ -225,7 +225,7 @@ func (config RequestLoggerConfig) ToMiddleware() (echo.MiddlewareFunc, error) {
|
||||
if config.Skipper == nil {
|
||||
config.Skipper = DefaultSkipper
|
||||
}
|
||||
now = time.Now
|
||||
now := time.Now
|
||||
if config.timeNow != nil {
|
||||
now = config.timeNow
|
||||
}
|
||||
@ -257,7 +257,7 @@ func (config RequestLoggerConfig) ToMiddleware() (echo.MiddlewareFunc, error) {
|
||||
config.BeforeNextFunc(c)
|
||||
}
|
||||
err := next(c)
|
||||
if config.HandleError {
|
||||
if err != nil && config.HandleError {
|
||||
c.Error(err)
|
||||
}
|
||||
|
||||
|
7
vendor/github.com/labstack/echo/v4/response.go
generated
vendored
7
vendor/github.com/labstack/echo/v4/response.go
generated
vendored
@ -94,6 +94,13 @@ func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
return r.Writer.(http.Hijacker).Hijack()
|
||||
}
|
||||
|
||||
// Unwrap returns the original http.ResponseWriter.
|
||||
// ResponseController can be used to access the original http.ResponseWriter.
|
||||
// See [https://go.dev/blog/go1.20]
|
||||
func (r *Response) Unwrap() http.ResponseWriter {
|
||||
return r.Writer
|
||||
}
|
||||
|
||||
func (r *Response) reset(w http.ResponseWriter) {
|
||||
r.beforeFuncs = nil
|
||||
r.afterFuncs = nil
|
||||
|
9
vendor/github.com/labstack/echo/v4/router.go
generated
vendored
9
vendor/github.com/labstack/echo/v4/router.go
generated
vendored
@ -151,7 +151,7 @@ func (r *Router) Routes() []*Route {
|
||||
return routes
|
||||
}
|
||||
|
||||
// Reverse generates an URL from route name and provided parameters.
|
||||
// Reverse generates a URL from route name and provided parameters.
|
||||
func (r *Router) Reverse(name string, params ...interface{}) string {
|
||||
uri := new(bytes.Buffer)
|
||||
ln := len(params)
|
||||
@ -159,7 +159,12 @@ func (r *Router) Reverse(name string, params ...interface{}) string {
|
||||
for _, route := range r.routes {
|
||||
if route.Name == name {
|
||||
for i, l := 0, len(route.Path); i < l; i++ {
|
||||
if (route.Path[i] == ':' || route.Path[i] == '*') && n < ln {
|
||||
hasBackslash := route.Path[i] == '\\'
|
||||
if hasBackslash && i+1 < l && route.Path[i+1] == ':' {
|
||||
i++ // backslash before colon escapes that colon. in that case skip backslash
|
||||
}
|
||||
if n < ln && (route.Path[i] == '*' || (!hasBackslash && route.Path[i] == ':')) {
|
||||
// in case of `*` wildcard or `:` (unescaped colon) param we replace everything till next slash or end of path
|
||||
for ; i < l && route.Path[i] != '/'; i++ {
|
||||
}
|
||||
uri.WriteString(fmt.Sprintf("%v", params[n]))
|
||||
|
5
vendor/github.com/lrstanley/girc/.editorconfig
generated
vendored
5
vendor/github.com/lrstanley/girc/.editorconfig
generated
vendored
@ -1,6 +1,9 @@
|
||||
# THIS FILE IS GENERATED! DO NOT EDIT! Maintained by Terraform.
|
||||
#
|
||||
# editorconfig.org
|
||||
# editorconfig: https://editorconfig.org/
|
||||
# actual source: https://github.com/lrstanley/.github/blob/master/terraform/github-common-files/templates/.editorconfig
|
||||
#
|
||||
|
||||
root = true
|
||||
|
||||
[*]
|
||||
|
191
vendor/github.com/lrstanley/girc/.golangci.yml
generated
vendored
191
vendor/github.com/lrstanley/girc/.golangci.yml
generated
vendored
@ -1,11 +1,33 @@
|
||||
# THIS FILE IS GENERATED! DO NOT EDIT! Maintained by Terraform.
|
||||
#
|
||||
# golangci-lint: https://golangci-lint.run/
|
||||
# false-positives: https://golangci-lint.run/usage/false-positives/
|
||||
# actual source: https://github.com/lrstanley/.github/blob/master/terraform/github-common-files/templates/.golangci.yml
|
||||
# modified variant of: https://gist.github.com/maratori/47a4d00457a92aa426dbd48a18776322
|
||||
#
|
||||
|
||||
run:
|
||||
tests: False
|
||||
timeout: 3m
|
||||
|
||||
issues:
|
||||
max-per-linter: 0
|
||||
max-same-issues: 0
|
||||
# max-same-issues: 0
|
||||
max-same-issues: 50
|
||||
|
||||
exclude-rules:
|
||||
- source: "(noinspection|TODO)"
|
||||
linters: [godot]
|
||||
- source: "//noinspection"
|
||||
linters: [gocritic]
|
||||
- path: "_test\\.go"
|
||||
linters:
|
||||
- bodyclose
|
||||
- dupl
|
||||
- funlen
|
||||
- goconst
|
||||
- gosec
|
||||
- noctx
|
||||
- wrapcheck
|
||||
|
||||
severity:
|
||||
default-severity: error
|
||||
@ -16,17 +38,102 @@ severity:
|
||||
severity: warning
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asciicheck
|
||||
- exportloopref
|
||||
- gci
|
||||
- gocritic
|
||||
- gofmt
|
||||
- misspell
|
||||
- asasalint # checks for pass []any as any in variadic func(...any)
|
||||
- asciicheck # checks that your code does not contain non-ASCII identifiers
|
||||
- bidichk # checks for dangerous unicode character sequences
|
||||
- bodyclose # checks whether HTTP response body is closed successfully
|
||||
- cyclop # checks function and package cyclomatic complexity
|
||||
- dupl # tool for code clone detection
|
||||
- durationcheck # checks for two durations multiplied together
|
||||
- errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases
|
||||
- errorlint # finds code that will cause problems with the error wrapping scheme introduced in Go 1.13
|
||||
- execinquery # checks query string in Query function which reads your Go src files and warning it finds
|
||||
- exportloopref # checks for pointers to enclosing loop variables
|
||||
- forbidigo # forbids identifiers
|
||||
- funlen # tool for detection of long functions
|
||||
- gci # controls golang package import order and makes it always deterministic
|
||||
- gocheckcompilerdirectives # validates go compiler directive comments (//go:)
|
||||
- gochecknoinits # checks that no init functions are present in Go code
|
||||
- goconst # finds repeated strings that could be replaced by a constant
|
||||
- gocritic # provides diagnostics that check for bugs, performance and style issues
|
||||
- gocyclo # computes and checks the cyclomatic complexity of functions
|
||||
- godot # checks if comments end in a period
|
||||
- godox # detects FIXME, TODO and other comment keywords
|
||||
- goimports # in addition to fixing imports, goimports also formats your code in the same style as gofmt
|
||||
- gomnd # detects magic numbers
|
||||
- gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod
|
||||
- gomodguard # allow and block lists linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations
|
||||
- goprintffuncname # checks that printf-like functions are named with f at the end
|
||||
- gosec # inspects source code for security problems
|
||||
- gosimple # specializes in simplifying a code
|
||||
- govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
|
||||
- ineffassign # detects when assignments to existing variables are not used
|
||||
- loggercheck # checks key value pairs for common logger libraries (kitlog,klog,logr,zap)
|
||||
- makezero # finds slice declarations with non-zero initial length
|
||||
- misspell # finds commonly misspelled words
|
||||
- musttag # enforces field tags in (un)marshaled structs
|
||||
- nakedret # finds naked returns in functions greater than a specified function length
|
||||
- nilerr # finds the code that returns nil even if it checks that the error is not nil
|
||||
- nilnil # checks that there is no simultaneous return of nil error and an invalid value
|
||||
- noctx # finds sending http request without context.Context
|
||||
- nosprintfhostport # checks for misuse of Sprintf to construct a host with port in a URL
|
||||
- predeclared # finds code that shadows one of Go's predeclared identifiers
|
||||
- promlinter # checks Prometheus metrics naming via promlint
|
||||
- reassign # checks that package variables are not reassigned
|
||||
- revive # fast, configurable, extensible, flexible, and beautiful linter for Go, drop-in replacement of golint
|
||||
- rowserrcheck # checks whether Err of rows is checked successfully
|
||||
- sqlclosecheck # checks that sql.Rows and sql.Stmt are closed
|
||||
- staticcheck # is a go vet on steroids, applying a ton of static analysis checks
|
||||
- stylecheck # is a replacement for golint
|
||||
- tenv # detects using os.Setenv instead of t.Setenv since Go1.17
|
||||
- testableexamples # checks if examples are testable (have an expected output)
|
||||
- tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes
|
||||
- typecheck # like the front-end of a Go compiler, parses and type-checks Go code
|
||||
- unconvert # removes unnecessary type conversions
|
||||
- unparam # reports unused function parameters
|
||||
- unused # checks for unused constants, variables, functions and types
|
||||
- usestdlibvars # detects the possibility to use variables/constants from the Go standard library
|
||||
- wastedassign # finds wasted assignment statements
|
||||
- whitespace # detects leading and trailing whitespace
|
||||
|
||||
# disabled for now:
|
||||
# - errname # checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error
|
||||
# - gochecknoglobals # checks that no global variables exist
|
||||
# - gocognit # computes and checks the cognitive complexity of functions
|
||||
# - nestif # reports deeply nested if statements
|
||||
# - nonamedreturns # reports all named returns
|
||||
# - testpackage # makes you use a separate _test package
|
||||
|
||||
linters-settings:
|
||||
cyclop:
|
||||
# The maximal code complexity to report.
|
||||
max-complexity: 30
|
||||
# The maximal average package complexity.
|
||||
# If it's higher than 0.0 (float) the check is enabled
|
||||
package-average: 10.0
|
||||
|
||||
errcheck:
|
||||
# Report about not checking of errors in type assertions: `a := b.(MyStruct)`.
|
||||
# Such cases aren't reported by default.
|
||||
check-type-assertions: true
|
||||
|
||||
funlen:
|
||||
# Checks the number of lines in a function.
|
||||
# If lower than 0, disable the check.
|
||||
lines: 150
|
||||
# Checks the number of statements in a function.
|
||||
# If lower than 0, disable the check.
|
||||
statements: 75
|
||||
|
||||
# gocognit:
|
||||
# # Minimal code complexity to report.
|
||||
# min-complexity: 25
|
||||
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- whyNoLint
|
||||
- hugeParam
|
||||
- ifElseChain
|
||||
enabled-tags:
|
||||
@ -34,5 +141,71 @@ linters-settings:
|
||||
- opinionated
|
||||
- performance
|
||||
- style
|
||||
# https://go-critic.github.io/overview.
|
||||
settings:
|
||||
captLocal:
|
||||
# Whether to restrict checker to params only.
|
||||
paramsOnly: false
|
||||
underef:
|
||||
# Whether to skip (*x).method() calls where x is a pointer receiver.
|
||||
skipRecvDeref: false
|
||||
|
||||
gomnd:
|
||||
# Values always ignored: `time.Date`,
|
||||
# `strconv.FormatInt`, `strconv.FormatUint`, `strconv.FormatFloat`,
|
||||
# `strconv.ParseInt`, `strconv.ParseUint`, `strconv.ParseFloat`.
|
||||
ignored-functions:
|
||||
- os.Chmod
|
||||
- os.Mkdir
|
||||
- os.MkdirAll
|
||||
- os.OpenFile
|
||||
- os.WriteFile
|
||||
- prometheus.ExponentialBuckets
|
||||
- prometheus.ExponentialBucketsRange
|
||||
- prometheus.LinearBuckets
|
||||
|
||||
gomodguard:
|
||||
blocked:
|
||||
# List of blocked modules.
|
||||
modules:
|
||||
- github.com/golang/protobuf:
|
||||
recommendations:
|
||||
- google.golang.org/protobuf
|
||||
reason: "see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules"
|
||||
- github.com/satori/go.uuid:
|
||||
recommendations:
|
||||
- github.com/google/uuid
|
||||
reason: "satori's package is not maintained"
|
||||
- github.com/gofrs/uuid:
|
||||
recommendations:
|
||||
- github.com/google/uuid
|
||||
reason: "gofrs' package is not go module"
|
||||
|
||||
govet:
|
||||
check-shadowing: true
|
||||
enable-all: true
|
||||
# Run `go tool vet help` to see all analyzers.
|
||||
disable:
|
||||
- fieldalignment # too strict
|
||||
settings:
|
||||
shadow:
|
||||
# Whether to be strict about shadowing; can be noisy.
|
||||
strict: true
|
||||
|
||||
nakedret:
|
||||
# Make an issue if func has more lines of code than this setting, and it has naked returns.
|
||||
max-func-lines: 0
|
||||
|
||||
rowserrcheck:
|
||||
# database/sql is always checked
|
||||
packages:
|
||||
- github.com/jmoiron/sqlx
|
||||
|
||||
stylecheck:
|
||||
checks:
|
||||
- all
|
||||
- -ST1008 # handled by revive already.
|
||||
|
||||
tenv:
|
||||
# The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures.
|
||||
# Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked.
|
||||
all: true
|
||||
|
42
vendor/github.com/lrstanley/girc/builtin.go
generated
vendored
42
vendor/github.com/lrstanley/girc/builtin.go
generated
vendored
@ -408,6 +408,48 @@ func handleISUPPORT(c *Client, e Event) {
|
||||
c.state.serverOptions[name] = val
|
||||
}
|
||||
c.state.Unlock()
|
||||
|
||||
// Check for max line/nick/user/host lengths here.
|
||||
c.state.RLock()
|
||||
maxLineLength := c.state.maxLineLength
|
||||
c.state.RUnlock()
|
||||
maxNickLength := defaultNickLength
|
||||
maxUserLength := defaultUserLength
|
||||
maxHostLength := defaultHostLength
|
||||
|
||||
var ok bool
|
||||
var tmp int
|
||||
|
||||
if tmp, ok = c.GetServerOptionInt("LINELEN"); ok {
|
||||
maxLineLength = tmp
|
||||
c.state.Lock()
|
||||
c.state.maxLineLength = maxTagLength - 2 // -2 for CR-LF.
|
||||
c.state.Unlock()
|
||||
}
|
||||
|
||||
if tmp, ok = c.GetServerOptionInt("NICKLEN"); ok {
|
||||
maxNickLength = tmp
|
||||
}
|
||||
if tmp, ok = c.GetServerOptionInt("MAXNICKLEN"); ok && tmp > maxNickLength {
|
||||
maxNickLength = tmp
|
||||
}
|
||||
if tmp, ok = c.GetServerOptionInt("USERLEN"); ok && tmp > maxUserLength {
|
||||
maxUserLength = tmp
|
||||
}
|
||||
if tmp, ok = c.GetServerOptionInt("HOSTLEN"); ok && tmp > maxHostLength {
|
||||
maxHostLength = tmp
|
||||
}
|
||||
|
||||
prefixLen := defaultPrefixPadding + maxNickLength + maxUserLength + maxHostLength
|
||||
if prefixLen >= maxLineLength {
|
||||
// Give up and go with defaults.
|
||||
c.state.notify(c, UPDATE_GENERAL)
|
||||
return
|
||||
}
|
||||
c.state.Lock()
|
||||
c.state.maxPrefixLength = prefixLen
|
||||
c.state.Unlock()
|
||||
|
||||
c.state.notify(c, UPDATE_GENERAL)
|
||||
}
|
||||
|
||||
|
4
vendor/github.com/lrstanley/girc/cap.go
generated
vendored
4
vendor/github.com/lrstanley/girc/cap.go
generated
vendored
@ -267,9 +267,9 @@ func handleCAP(c *Client, e Event) {
|
||||
}
|
||||
|
||||
if isError {
|
||||
c.rx <- &Event{Command: ERROR, Params: []string{
|
||||
c.receive(&Event{Command: ERROR, Params: []string{
|
||||
fmt.Sprintf("closing connection: strict transport policy provided by server is invalid; possible MITM? config: %#v", sts),
|
||||
}}
|
||||
}})
|
||||
return
|
||||
}
|
||||
|
||||
|
6
vendor/github.com/lrstanley/girc/cap_sasl.go
generated
vendored
6
vendor/github.com/lrstanley/girc/cap_sasl.go
generated
vendored
@ -95,9 +95,9 @@ func handleSASL(c *Client, e Event) {
|
||||
// some reason. The SASL spec and IRCv3 spec do not define a clear
|
||||
// way to abort a SASL exchange, other than to disconnect, or proceed
|
||||
// with CAP END.
|
||||
c.rx <- &Event{Command: ERROR, Params: []string{
|
||||
c.receive(&Event{Command: ERROR, Params: []string{
|
||||
fmt.Sprintf("closing connection: SASL %s failed: %s", c.Config.SASL.Method(), e.Last()),
|
||||
}}
|
||||
}})
|
||||
return
|
||||
}
|
||||
|
||||
@ -131,5 +131,5 @@ func handleSASLError(c *Client, e Event) {
|
||||
// Authentication failed. The SASL spec and IRCv3 spec do not define a
|
||||
// clear way to abort a SASL exchange, other than to disconnect, or
|
||||
// proceed with CAP END.
|
||||
c.rx <- &Event{Command: ERROR, Params: []string{"closing connection: " + e.Last()}}
|
||||
c.receive(&Event{Command: ERROR, Params: []string{"closing connection: " + e.Last()}})
|
||||
}
|
||||
|
7
vendor/github.com/lrstanley/girc/cap_tags.go
generated
vendored
7
vendor/github.com/lrstanley/girc/cap_tags.go
generated
vendored
@ -52,9 +52,12 @@ type Tags map[string]string
|
||||
|
||||
// ParseTags parses out the key-value map of tags. raw should only be the tag
|
||||
// data, not a full message. For example:
|
||||
// @aaa=bbb;ccc;example.com/ddd=eee
|
||||
//
|
||||
// @aaa=bbb;ccc;example.com/ddd=eee
|
||||
//
|
||||
// NOT:
|
||||
// @aaa=bbb;ccc;example.com/ddd=eee :nick!ident@host.com PRIVMSG me :Hello
|
||||
//
|
||||
// @aaa=bbb;ccc;example.com/ddd=eee :nick!ident@host.com PRIVMSG me :Hello
|
||||
//
|
||||
// Technically, there is a length limit of 4096, but the server should reject
|
||||
// tag messages longer than this.
|
||||
|
89
vendor/github.com/lrstanley/girc/client.go
generated
vendored
89
vendor/github.com/lrstanley/girc/client.go
generated
vendored
@ -155,6 +155,10 @@ type Config struct {
|
||||
// and the client. If this is set to -1, the client will not attempt to
|
||||
// send client -> server PING requests.
|
||||
PingDelay time.Duration
|
||||
// PingTimeout specifies the duration at which girc will assume
|
||||
// that the connection to the server has been lost if no PONG
|
||||
// message has been received in reply to an outstanding PING.
|
||||
PingTimeout time.Duration
|
||||
|
||||
// disableTracking disables all channel and user-level tracking. Useful
|
||||
// for highly embedded scripts with single purposes. This has an exported
|
||||
@ -179,13 +183,13 @@ type Config struct {
|
||||
// server.
|
||||
//
|
||||
// Client expectations:
|
||||
// - Perform any proxy resolution.
|
||||
// - Check the reverse DNS and forward DNS match.
|
||||
// - Check the IP against suitable access controls (ipaccess, dnsbl, etc).
|
||||
// - Perform any proxy resolution.
|
||||
// - Check the reverse DNS and forward DNS match.
|
||||
// - Check the IP against suitable access controls (ipaccess, dnsbl, etc).
|
||||
//
|
||||
// More information:
|
||||
// - https://ircv3.net/specs/extensions/webirc.html
|
||||
// - https://kiwiirc.com/docs/webirc
|
||||
// - https://ircv3.net/specs/extensions/webirc.html
|
||||
// - https://kiwiirc.com/docs/webirc
|
||||
type WebIRC struct {
|
||||
// Password that authenticates the WEBIRC command from this client.
|
||||
Password string
|
||||
@ -262,6 +266,10 @@ func New(config Config) *Client {
|
||||
c.Config.PingDelay = 600 * time.Second
|
||||
}
|
||||
|
||||
if c.Config.PingTimeout == 0 {
|
||||
c.Config.PingTimeout = 60 * time.Second
|
||||
}
|
||||
|
||||
envDebug, _ := strconv.ParseBool(os.Getenv("GIRC_DEBUG"))
|
||||
if c.Config.Debug == nil {
|
||||
if envDebug {
|
||||
@ -300,6 +308,23 @@ func New(config Config) *Client {
|
||||
return c
|
||||
}
|
||||
|
||||
// receive is a wrapper for sending to the Client.rx channel. It will timeout if
|
||||
// the event can't be sent within 30s.
|
||||
func (c *Client) receive(e *Event) {
|
||||
t := time.NewTimer(30 * time.Second)
|
||||
defer func() {
|
||||
if !t.Stop() {
|
||||
<-t.C
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case c.rx <- e:
|
||||
case <-t.C:
|
||||
c.debugLogEvent(e, true)
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a brief description of the current client state.
|
||||
func (c *Client) String() string {
|
||||
connected := c.IsConnected()
|
||||
@ -380,7 +405,7 @@ func (e *ErrEvent) Error() string {
|
||||
return e.Event.Last()
|
||||
}
|
||||
|
||||
func (c *Client) execLoop(ctx context.Context, errs chan error, wg *sync.WaitGroup) {
|
||||
func (c *Client) execLoop(ctx context.Context) error {
|
||||
c.debug.Print("starting execLoop")
|
||||
defer c.debug.Print("closing execLoop")
|
||||
|
||||
@ -403,9 +428,10 @@ func (c *Client) execLoop(ctx context.Context, errs chan error, wg *sync.WaitGro
|
||||
}
|
||||
|
||||
done:
|
||||
wg.Done()
|
||||
return
|
||||
return nil
|
||||
case event = <-c.rx:
|
||||
c.RunHandlers(event)
|
||||
|
||||
if event != nil && event.Command == ERROR {
|
||||
// Handles incoming ERROR responses. These are only ever sent
|
||||
// by the server (with the exception that this library may use
|
||||
@ -415,13 +441,9 @@ func (c *Client) execLoop(ctx context.Context, errs chan error, wg *sync.WaitGro
|
||||
// some reason the server doesn't disconnect the client, or
|
||||
// if this library is the source of the error, this should
|
||||
// signal back up to the main connect loop, to disconnect.
|
||||
errs <- &ErrEvent{Event: event}
|
||||
|
||||
// Make sure to not actually exit, so we can let any handlers
|
||||
// actually handle the ERROR event.
|
||||
return &ErrEvent{Event: event}
|
||||
}
|
||||
|
||||
c.RunHandlers(event)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -669,8 +691,7 @@ func (c *Client) IsInChannel(channel string) (in bool) {
|
||||
// during client connection. This is also known as ISUPPORT (or RPL_PROTOCTL).
|
||||
// Will panic if used when tracking has been disabled. Examples of usage:
|
||||
//
|
||||
// nickLen, success := GetServerOption("MAXNICKLEN")
|
||||
//
|
||||
// nickLen, success := GetServerOption("MAXNICKLEN")
|
||||
func (c *Client) GetServerOption(key string) (result string, ok bool) {
|
||||
c.panicIfNotTracking()
|
||||
|
||||
@ -680,6 +701,42 @@ func (c *Client) GetServerOption(key string) (result string, ok bool) {
|
||||
return result, ok
|
||||
}
|
||||
|
||||
// GetServerOptionInt retrieves a server capability setting (as an integer) that was
|
||||
// retrieved during client connection. This is also known as ISUPPORT (or RPL_PROTOCTL).
|
||||
// Will panic if used when tracking has been disabled. Examples of usage:
|
||||
//
|
||||
// nickLen, success := GetServerOption("MAXNICKLEN")
|
||||
func (c *Client) GetServerOptionInt(key string) (result int, ok bool) {
|
||||
var data string
|
||||
var err error
|
||||
|
||||
data, ok = c.GetServerOption(key)
|
||||
if !ok {
|
||||
return result, ok
|
||||
}
|
||||
result, err = strconv.Atoi(data)
|
||||
if err != nil {
|
||||
ok = false
|
||||
}
|
||||
|
||||
return result, ok
|
||||
}
|
||||
|
||||
// MaxEventLength returns the maximum supported server length of an event. This is the
|
||||
// maximum length of the command and arguments, excluding the source/prefix supported
|
||||
// by the protocol. If state tracking is enabled, this will utilize ISUPPORT/IRCv3
|
||||
// information to more accurately calculate the maximum supported length (i.e. extended
|
||||
// length events).
|
||||
func (c *Client) MaxEventLength() (max int) {
|
||||
if !c.Config.disableTracking {
|
||||
c.state.RLock()
|
||||
max = c.state.maxLineLength - c.state.maxPrefixLength
|
||||
c.state.RUnlock()
|
||||
return max
|
||||
}
|
||||
return DefaultMaxLineLength - DefaultMaxPrefixLength
|
||||
}
|
||||
|
||||
// NetworkName returns the network identifier. E.g. "EsperNet", "ByteIRC".
|
||||
// May be empty if the server does not support RPL_ISUPPORT (or RPL_PROTOCTL).
|
||||
// Will panic if used when tracking has been disabled.
|
||||
@ -773,7 +830,7 @@ func (c *Client) debugLogEvent(e *Event, dropped bool) {
|
||||
var prefix string
|
||||
|
||||
if dropped {
|
||||
prefix = "dropping event (disconnected):"
|
||||
prefix = "dropping event (disconnected or timeout):"
|
||||
} else {
|
||||
prefix = ">"
|
||||
}
|
||||
|
8
vendor/github.com/lrstanley/girc/commands.go
generated
vendored
8
vendor/github.com/lrstanley/girc/commands.go
generated
vendored
@ -25,8 +25,8 @@ func (cmd *Commands) Nick(name string) {
|
||||
// prevent sending extensive JOIN commands.
|
||||
func (cmd *Commands) Join(channels ...string) {
|
||||
// We can join multiple channels at once, however we need to ensure that
|
||||
// we are not exceeding the line length. (see maxLength)
|
||||
max := maxLength - len(JOIN) - 1
|
||||
// we are not exceeding the line length (see Client.MaxEventLength()).
|
||||
max := cmd.c.MaxEventLength() - len(JOIN) - 1
|
||||
|
||||
var buffer string
|
||||
|
||||
@ -329,8 +329,8 @@ func (cmd *Commands) List(channels ...string) {
|
||||
}
|
||||
|
||||
// We can LIST multiple channels at once, however we need to ensure that
|
||||
// we are not exceeding the line length. (see maxLength)
|
||||
max := maxLength - len(JOIN) - 1
|
||||
// we are not exceeding the line length (see Client.MaxEventLength()).
|
||||
max := cmd.c.MaxEventLength() - len(JOIN) - 1
|
||||
|
||||
var buffer string
|
||||
|
||||
|
208
vendor/github.com/lrstanley/girc/conn.go
generated
vendored
208
vendor/github.com/lrstanley/girc/conn.go
generated
vendored
@ -12,6 +12,8 @@ import (
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lrstanley/girc/internal/ctxgroup"
|
||||
)
|
||||
|
||||
// Messages are delimited with CR and LF line endings, we're using the last
|
||||
@ -142,17 +144,44 @@ type ErrParseEvent struct {
|
||||
|
||||
func (e ErrParseEvent) Error() string { return "unable to parse event: " + e.Line }
|
||||
|
||||
func (c *ircConn) decode() (event *Event, err error) {
|
||||
line, err := c.io.ReadString(delim)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
type decodedEvent struct {
|
||||
event *Event
|
||||
err error
|
||||
}
|
||||
|
||||
if event = ParseEvent(line); event == nil {
|
||||
return nil, ErrParseEvent{line}
|
||||
}
|
||||
func (c *ircConn) decode() <-chan decodedEvent {
|
||||
ch := make(chan decodedEvent)
|
||||
|
||||
return event, nil
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
line, err := c.io.ReadString(delim)
|
||||
if err != nil {
|
||||
select {
|
||||
case ch <- decodedEvent{err: err}:
|
||||
default:
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
event := ParseEvent(line)
|
||||
if event == nil {
|
||||
select {
|
||||
case ch <- decodedEvent{err: ErrParseEvent{Line: line}}:
|
||||
default:
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- decodedEvent{event: event}:
|
||||
default:
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
func (c *ircConn) encode(event *Event) error {
|
||||
@ -291,20 +320,17 @@ startConn:
|
||||
} else {
|
||||
c.conn = newMockConn(mock)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
var ctx context.Context
|
||||
ctx, c.stop = context.WithCancel(context.Background())
|
||||
c.mu.Unlock()
|
||||
|
||||
errs := make(chan error, 4)
|
||||
var wg sync.WaitGroup
|
||||
// 4 being the number of goroutines we need to finish when this function
|
||||
// returns.
|
||||
wg.Add(4)
|
||||
go c.execLoop(ctx, errs, &wg)
|
||||
go c.readLoop(ctx, errs, &wg)
|
||||
go c.sendLoop(ctx, errs, &wg)
|
||||
go c.pingLoop(ctx, errs, &wg)
|
||||
group := ctxgroup.New(ctx)
|
||||
|
||||
group.Go(c.execLoop)
|
||||
group.Go(c.readLoop)
|
||||
group.Go(c.sendLoop)
|
||||
group.Go(c.pingLoop)
|
||||
|
||||
// Passwords first.
|
||||
|
||||
@ -338,16 +364,15 @@ startConn:
|
||||
c.RunHandlers(&Event{Command: INITIALIZED, Params: []string{addr}})
|
||||
|
||||
// Wait for the first error.
|
||||
var result error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err := group.Wait()
|
||||
if err != nil {
|
||||
c.debug.Printf("received error, beginning cleanup: %v", err)
|
||||
} else {
|
||||
if !c.state.sts.beginUpgrade {
|
||||
c.debug.Print("received request to close, beginning clean up")
|
||||
}
|
||||
|
||||
c.RunHandlers(&Event{Command: CLOSED, Params: []string{addr}})
|
||||
case err := <-errs:
|
||||
c.debug.Printf("received error, beginning cleanup: %v", err)
|
||||
result = err
|
||||
}
|
||||
|
||||
// Make sure that the connection is closed if not already.
|
||||
@ -363,20 +388,13 @@ startConn:
|
||||
|
||||
c.RunHandlers(&Event{Command: DISCONNECTED, Params: []string{addr}})
|
||||
|
||||
// Once we have our error/result, let all other functions know we're done.
|
||||
c.debug.Print("waiting for all routines to finish")
|
||||
|
||||
// Wait for all goroutines to finish.
|
||||
wg.Wait()
|
||||
close(errs)
|
||||
|
||||
// This helps ensure that the end user isn't improperly using the client
|
||||
// more than once. If they want to do this, they should be using multiple
|
||||
// clients, not multiple instances of Connect().
|
||||
c.mu.Lock()
|
||||
c.conn = nil
|
||||
|
||||
if result == nil {
|
||||
if err == nil {
|
||||
if c.state.sts.beginUpgrade {
|
||||
c.state.sts.beginUpgrade = false
|
||||
c.mu.Unlock()
|
||||
@ -389,76 +407,85 @@ startConn:
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
return result
|
||||
return err
|
||||
}
|
||||
|
||||
// readLoop sets a timeout of 300 seconds, and then attempts to read from the
|
||||
// IRC server. If there is an error, it calls Reconnect.
|
||||
func (c *Client) readLoop(ctx context.Context, errs chan error, wg *sync.WaitGroup) {
|
||||
func (c *Client) readLoop(ctx context.Context) error {
|
||||
c.debug.Print("starting readLoop")
|
||||
defer c.debug.Print("closing readLoop")
|
||||
|
||||
var event *Event
|
||||
var err error
|
||||
var de decodedEvent
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
wg.Done()
|
||||
return
|
||||
return nil
|
||||
default:
|
||||
_ = c.conn.sock.SetReadDeadline(time.Now().Add(300 * time.Second))
|
||||
event, err = c.conn.decode()
|
||||
if err != nil {
|
||||
errs <- err
|
||||
wg.Done()
|
||||
return
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case de = <-c.conn.decode():
|
||||
}
|
||||
|
||||
if de.err != nil {
|
||||
return de.err
|
||||
}
|
||||
|
||||
// Check if it's an echo-message.
|
||||
if !c.Config.disableTracking {
|
||||
event.Echo = (event.Command == PRIVMSG || event.Command == NOTICE) &&
|
||||
event.Source != nil && event.Source.ID() == c.GetID()
|
||||
de.event.Echo = (de.event.Command == PRIVMSG || de.event.Command == NOTICE) &&
|
||||
de.event.Source != nil && de.event.Source.ID() == c.GetID()
|
||||
}
|
||||
|
||||
c.rx <- event
|
||||
c.receive(de.event)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Send sends an event to the server. Use Client.RunHandlers() if you are
|
||||
// simply looking to trigger handlers with an event.
|
||||
// Send sends an event to the server. Send will split events if the event is longer
|
||||
// than what the server supports, and is an event that supports splitting. Use
|
||||
// Client.RunHandlers() if you are simply looking to trigger handlers with an event.
|
||||
func (c *Client) Send(event *Event) {
|
||||
var delay time.Duration
|
||||
|
||||
if !c.Config.AllowFlood {
|
||||
c.mu.RLock()
|
||||
|
||||
// Drop the event early as we're disconnected, this way we don't have to wait
|
||||
// the (potentially long) rate limit delay before dropping.
|
||||
if c.conn == nil {
|
||||
c.debugLogEvent(event, true)
|
||||
c.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
c.conn.mu.Lock()
|
||||
delay = c.conn.rate(event.Len())
|
||||
c.conn.mu.Unlock()
|
||||
c.mu.RUnlock()
|
||||
}
|
||||
|
||||
if c.Config.GlobalFormat && len(event.Params) > 0 && event.Params[len(event.Params)-1] != "" &&
|
||||
(event.Command == PRIVMSG || event.Command == TOPIC || event.Command == NOTICE) {
|
||||
event.Params[len(event.Params)-1] = Fmt(event.Params[len(event.Params)-1])
|
||||
}
|
||||
|
||||
<-time.After(delay)
|
||||
c.write(event)
|
||||
var events []*Event
|
||||
events = event.split(c.MaxEventLength())
|
||||
|
||||
for _, e := range events {
|
||||
if !c.Config.AllowFlood {
|
||||
c.mu.RLock()
|
||||
|
||||
// Drop the event early as we're disconnected, this way we don't have to wait
|
||||
// the (potentially long) rate limit delay before dropping.
|
||||
if c.conn == nil {
|
||||
c.debugLogEvent(e, true)
|
||||
c.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
c.conn.mu.Lock()
|
||||
delay = c.conn.rate(e.Len())
|
||||
c.conn.mu.Unlock()
|
||||
c.mu.RUnlock()
|
||||
}
|
||||
|
||||
<-time.After(delay)
|
||||
c.write(e)
|
||||
}
|
||||
}
|
||||
|
||||
// write is the lower level function to write an event. It does not have a
|
||||
// write-delay when sending events.
|
||||
// write-delay when sending events. write will timeout after 30s if the event
|
||||
// can't be sent.
|
||||
func (c *Client) write(event *Event) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
@ -468,7 +495,19 @@ func (c *Client) write(event *Event) {
|
||||
c.debugLogEvent(event, true)
|
||||
return
|
||||
}
|
||||
c.tx <- event
|
||||
|
||||
t := time.NewTimer(30 * time.Second)
|
||||
defer func() {
|
||||
if !t.Stop() {
|
||||
<-t.C
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case c.tx <- event:
|
||||
case <-t.C:
|
||||
c.debugLogEvent(event, true)
|
||||
}
|
||||
}
|
||||
|
||||
// rate allows limiting events based on how frequent the event is being sent,
|
||||
@ -487,7 +526,7 @@ func (c *ircConn) rate(chars int) time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *Client) sendLoop(ctx context.Context, errs chan error, wg *sync.WaitGroup) {
|
||||
func (c *Client) sendLoop(ctx context.Context) error {
|
||||
c.debug.Print("starting sendLoop")
|
||||
defer c.debug.Print("closing sendLoop")
|
||||
|
||||
@ -537,18 +576,14 @@ func (c *Client) sendLoop(ctx context.Context, errs chan error, wg *sync.WaitGro
|
||||
|
||||
if event.Command == QUIT {
|
||||
c.Close()
|
||||
wg.Done()
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
errs <- err
|
||||
wg.Done()
|
||||
return
|
||||
return err
|
||||
}
|
||||
case <-ctx.Done():
|
||||
wg.Done()
|
||||
return
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -568,11 +603,10 @@ type ErrTimedOut struct {
|
||||
|
||||
func (ErrTimedOut) Error() string { return "timed out waiting for a requested PING response" }
|
||||
|
||||
func (c *Client) pingLoop(ctx context.Context, errs chan error, wg *sync.WaitGroup) {
|
||||
func (c *Client) pingLoop(ctx context.Context) error {
|
||||
// Don't run the pingLoop if they want to disable it.
|
||||
if c.Config.PingDelay <= 0 {
|
||||
wg.Done()
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
c.debug.Print("starting pingLoop")
|
||||
@ -604,9 +638,8 @@ func (c *Client) pingLoop(ctx context.Context, errs chan error, wg *sync.WaitGro
|
||||
}
|
||||
|
||||
c.conn.mu.RLock()
|
||||
if pingSent && time.Since(c.conn.lastPong) > c.Config.PingDelay+(60*time.Second) {
|
||||
// It's 60 seconds over what out ping delay is, connection
|
||||
// has probably dropped.
|
||||
if pingSent && time.Since(c.conn.lastPong) > c.Config.PingDelay+c.Config.PingTimeout {
|
||||
// PingTimeout exceeded, connection has probably dropped.
|
||||
err := ErrTimedOut{
|
||||
TimeSinceSuccess: time.Since(c.conn.lastPong),
|
||||
LastPong: c.conn.lastPong,
|
||||
@ -615,9 +648,7 @@ func (c *Client) pingLoop(ctx context.Context, errs chan error, wg *sync.WaitGro
|
||||
}
|
||||
|
||||
c.conn.mu.RUnlock()
|
||||
errs <- err
|
||||
wg.Done()
|
||||
return
|
||||
return err
|
||||
}
|
||||
c.conn.mu.RUnlock()
|
||||
|
||||
@ -628,8 +659,7 @@ func (c *Client) pingLoop(ctx context.Context, errs chan error, wg *sync.WaitGro
|
||||
c.Cmd.Ping(fmt.Sprintf("%d", time.Now().UnixNano()))
|
||||
pingSent = true
|
||||
case <-ctx.Done():
|
||||
wg.Done()
|
||||
return
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
146
vendor/github.com/lrstanley/girc/event.go
generated
vendored
146
vendor/github.com/lrstanley/girc/event.go
generated
vendored
@ -13,7 +13,41 @@ import (
|
||||
|
||||
const (
|
||||
eventSpace byte = ' ' // Separator.
|
||||
maxLength int = 510 // Maximum length is 510 (2 for line endings).
|
||||
|
||||
// TODO: if state tracking is enabled, we SHOULD be able to use it's known length.
|
||||
|
||||
// Can be overridden by the NICKLEN (or MAXNICKLEN) ISUPPORT parameter. 30 or 31
|
||||
// are typical values for this parameter advertised by servers today.
|
||||
defaultNickLength = 30
|
||||
// The maximum length of <username> may be specified by the USERLEN RPL_ISUPPORT
|
||||
// parameter. If this length is advertised, the username MUST be silently truncated
|
||||
// to the given length before being used.
|
||||
defaultUserLength = 18
|
||||
// If a looked-up domain name is longer than this length (or overridden by the
|
||||
// HOSTLEN ISUPPORT parameter), the server SHOULD opt to use the IP address instead,
|
||||
// so that the hostname is underneath this length.
|
||||
defaultHostLength = 63
|
||||
|
||||
// defaultPrefixPadding defaults the estimated prefix padding length of a given
|
||||
// event. See also:
|
||||
// [ ":" ( servername / ( nickname [ [ "!" user ] "@" host ] ) ) SPACE ]
|
||||
defaultPrefixPadding = 4
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultMaxLineLength is the default maximum length for an event. 510 (+2 for line endings)
|
||||
// is used as a default as this is used by many older implementations.
|
||||
//
|
||||
// See also: RFC 2812
|
||||
// IRC messages are always lines of characters terminated with a CR-LF
|
||||
// (Carriage Return - Line Feed) pair, and these messages SHALL NOT
|
||||
// exceed 512 characters in length, counting all characters including
|
||||
// the trailing CR-LF.
|
||||
DefaultMaxLineLength = 510
|
||||
|
||||
// DefaultMaxPrefixLength defines the default max ":nickname!user@host " length
|
||||
// that's used to calculate line splitting.
|
||||
DefaultMaxPrefixLength = defaultPrefixPadding + defaultNickLength + defaultUserLength + defaultHostLength
|
||||
)
|
||||
|
||||
// cutCRFunc is used to trim CR characters from prefixes/messages.
|
||||
@ -125,16 +159,16 @@ func ParseEvent(raw string) (e *Event) {
|
||||
|
||||
// Event represents an IRC protocol message, see RFC1459 section 2.3.1
|
||||
//
|
||||
// <message> :: [':' <prefix> <SPACE>] <command> <params> <crlf>
|
||||
// <prefix> :: <servername> | <nick> ['!' <user>] ['@' <host>]
|
||||
// <command> :: <letter>{<letter>} | <number> <number> <number>
|
||||
// <SPACE> :: ' '{' '}
|
||||
// <params> :: <SPACE> [':' <trailing> | <middle> <params>]
|
||||
// <middle> :: <Any *non-empty* sequence of octets not including SPACE or NUL
|
||||
// or CR or LF, the first of which may not be ':'>
|
||||
// <trailing> :: <Any, possibly empty, sequence of octets not including NUL or
|
||||
// CR or LF>
|
||||
// <crlf> :: CR LF
|
||||
// <message> :: [':' <prefix> <SPACE>] <command> <params> <crlf>
|
||||
// <prefix> :: <servername> | <nick> ['!' <user>] ['@' <host>]
|
||||
// <command> :: <letter>{<letter>} | <number> <number> <number>
|
||||
// <SPACE> :: ' '{' '}
|
||||
// <params> :: <SPACE> [':' <trailing> | <middle> <params>]
|
||||
// <middle> :: <Any *non-empty* sequence of octets not including SPACE or NUL
|
||||
// or CR or LF, the first of which may not be ':'>
|
||||
// <trailing> :: <Any, possibly empty, sequence of octets not including NUL or
|
||||
// CR or LF>
|
||||
// <crlf> :: CR LF
|
||||
type Event struct {
|
||||
// Source is the origin of the event.
|
||||
Source *Source `json:"source"`
|
||||
@ -223,11 +257,80 @@ func (e *Event) Equals(ev *Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Len calculates the length of the string representation of event. Note that
|
||||
// this will return the true length (even if longer than what IRC supports),
|
||||
// which may be useful if you are trying to check and see if a message is
|
||||
// too long, to trim it down yourself.
|
||||
// split will split a potentially large event that is larger than what the server
|
||||
// supports, into multiple events. split will ignore events that cannot be split, and
|
||||
// if the event isn't longer than what the server supports, it will just return an array
|
||||
// with 1 entry, the original event.
|
||||
func (e *Event) split(maxLength int) []*Event {
|
||||
if len(e.Params) < 1 || (e.Command != PRIVMSG && e.Command != NOTICE) {
|
||||
return []*Event{e}
|
||||
}
|
||||
|
||||
// Exclude source, even if it does exist, because the server will likely ignore the
|
||||
// sent source anyway.
|
||||
event := e.Copy()
|
||||
event.Source = nil
|
||||
|
||||
if event.LenOpts(false) < maxLength {
|
||||
return []*Event{e}
|
||||
}
|
||||
|
||||
results := []*Event{}
|
||||
|
||||
// Will force the length check to include " :". This will allow us to get the length
|
||||
// of the commands and necessary prefixes.
|
||||
text := event.Last()
|
||||
event.Params[len(event.Params)-1] = ""
|
||||
cmdLen := event.LenOpts(false)
|
||||
|
||||
var ok bool
|
||||
var ctcp *CTCPEvent
|
||||
if ok, ctcp = e.IsCTCP(); ok {
|
||||
if text == "" {
|
||||
return []*Event{e}
|
||||
}
|
||||
|
||||
text = ctcp.Text
|
||||
|
||||
// ctcpDelim's at start and end, and space between command and trailing text.
|
||||
maxLength -= len(ctcp.Command) + 4
|
||||
}
|
||||
|
||||
// If the command itself is longer than the limit, there is a problem. PRIVMSG should
|
||||
// be 1->1 per RFC. Just return the original message and let it be the user of the
|
||||
// libraries problem.
|
||||
if cmdLen > maxLength {
|
||||
return []*Event{e}
|
||||
}
|
||||
|
||||
// Split the text into correctly size segments, and make the necessary number of
|
||||
// events that duplicate the original event.
|
||||
for _, split := range splitMessage(text, maxLength-cmdLen) {
|
||||
if ctcp != nil {
|
||||
split = string(ctcpDelim) + ctcp.Command + string(eventSpace) + split + string(ctcpDelim)
|
||||
}
|
||||
clonedEvent := event.Copy()
|
||||
clonedEvent.Source = e.Source
|
||||
clonedEvent.Params[len(e.Params)-1] = split
|
||||
results = append(results, clonedEvent)
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// Len calculates the length of the string representation of event (including tags).
|
||||
// Note that this will return the true length (even if longer than what IRC supports),
|
||||
// which may be useful if you are trying to check and see if a message is too long, to
|
||||
// trim it down yourself.
|
||||
func (e *Event) Len() (length int) {
|
||||
return e.LenOpts(true)
|
||||
}
|
||||
|
||||
// LenOpts calculates the length of the string representation of event (with a toggle
|
||||
// for tags). Note that this will return the true length (even if longer than what IRC
|
||||
// supports), which may be useful if you are trying to check and see if a message is
|
||||
// too long, to trim it down yourself.
|
||||
func (e *Event) LenOpts(includeTags bool) (length int) {
|
||||
if e.Tags != nil {
|
||||
// Include tags and trailing space.
|
||||
length = e.Tags.Len() + 1
|
||||
@ -248,7 +351,7 @@ func (e *Event) Len() (length int) {
|
||||
|
||||
// If param contains a space or it's empty, it's trailing, so it should be
|
||||
// prefixed with a colon (:).
|
||||
if i == len(e.Params)-1 && (strings.Contains(e.Params[i], " ") || strings.HasPrefix(e.Params[i], ":") || e.Params[i] == "") {
|
||||
if i == len(e.Params)-1 && (strings.Contains(e.Params[i], " ") || e.Params[i] == "" || strings.HasPrefix(e.Params[i], ":")) {
|
||||
length++
|
||||
}
|
||||
}
|
||||
@ -259,10 +362,6 @@ func (e *Event) Len() (length int) {
|
||||
|
||||
// Bytes returns a []byte representation of event. Strips all newlines and
|
||||
// carriage returns.
|
||||
//
|
||||
// Per RFC2812 section 2.3, messages should not exceed 512 characters in
|
||||
// length. This method forces that limit by discarding any characters
|
||||
// exceeding the length limit.
|
||||
func (e *Event) Bytes() []byte {
|
||||
buffer := new(bytes.Buffer)
|
||||
|
||||
@ -284,7 +383,7 @@ func (e *Event) Bytes() []byte {
|
||||
// Space separated list of arguments.
|
||||
if len(e.Params) > 0 {
|
||||
for i := 0; i < len(e.Params); i++ {
|
||||
if i == len(e.Params)-1 && (strings.Contains(e.Params[i], " ") || strings.HasPrefix(e.Params[i], ":") || e.Params[i] == "") {
|
||||
if i == len(e.Params)-1 && (strings.Contains(e.Params[i], " ") || e.Params[i] == "" || strings.HasPrefix(e.Params[i], ":")) {
|
||||
buffer.WriteString(string(eventSpace) + string(messagePrefix) + e.Params[i])
|
||||
continue
|
||||
}
|
||||
@ -292,11 +391,6 @@ func (e *Event) Bytes() []byte {
|
||||
}
|
||||
}
|
||||
|
||||
// We need the limit the buffer length.
|
||||
if buffer.Len() > (maxLength) {
|
||||
buffer.Truncate(maxLength)
|
||||
}
|
||||
|
||||
// If we truncated in the middle of a utf8 character, we need to remove
|
||||
// the other (now invalid) bytes.
|
||||
out := bytes.ToValidUTF8(buffer.Bytes(), nil)
|
||||
|
216
vendor/github.com/lrstanley/girc/format.go
generated
vendored
216
vendor/github.com/lrstanley/girc/format.go
generated
vendored
@ -7,13 +7,21 @@ package girc
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
fmtOpenChar = '{'
|
||||
fmtCloseChar = '}'
|
||||
fmtOpenChar = '{'
|
||||
fmtCloseChar = '}'
|
||||
maxWordSplitLength = 30
|
||||
)
|
||||
|
||||
var (
|
||||
reCode = regexp.MustCompile(`(\x02|\x1d|\x0f|\x03|\x16|\x1f|\x01)`)
|
||||
reColor = regexp.MustCompile(`\x03([019]?\d(,[019]?\d)?)`)
|
||||
)
|
||||
|
||||
var fmtColors = map[string]int{
|
||||
@ -66,9 +74,9 @@ var fmtCodes = map[string]string{
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// client.Message("#channel", Fmt("{red}{b}Hello {red,blue}World{c}"))
|
||||
// client.Message("#channel", Fmt("{red}{b}Hello {red,blue}World{c}"))
|
||||
func Fmt(text string) string {
|
||||
var last = -1
|
||||
last := -1
|
||||
for i := 0; i < len(text); i++ {
|
||||
if text[i] == fmtOpenChar {
|
||||
last = i
|
||||
@ -136,16 +144,12 @@ func TrimFmt(text string) string {
|
||||
return text
|
||||
}
|
||||
|
||||
// This is really the only fastest way of doing this (marginally better than
|
||||
// actually trying to parse it manually.)
|
||||
var reStripColor = regexp.MustCompile(`\x03([019]?\d(,[019]?\d)?)?`)
|
||||
|
||||
// StripRaw tries to strip all ASCII format codes that are used for IRC.
|
||||
// Primarily, foreground/background colors, and other control bytes like
|
||||
// reset, bold, italic, reverse, etc. This also is done in a specific way
|
||||
// in order to ensure no truncation of other non-irc formatting.
|
||||
func StripRaw(text string) string {
|
||||
text = reStripColor.ReplaceAllString(text, "")
|
||||
text = reColor.ReplaceAllString(text, "")
|
||||
|
||||
for _, code := range fmtCodes {
|
||||
text = strings.ReplaceAll(text, code, "")
|
||||
@ -164,12 +168,12 @@ func StripRaw(text string) string {
|
||||
// all ASCII printable chars. This function will NOT do that for
|
||||
// compatibility reasons.
|
||||
//
|
||||
// channel = ( "#" / "+" / ( "!" channelid ) / "&" ) chanstring
|
||||
// [ ":" chanstring ]
|
||||
// chanstring = 0x01-0x07 / 0x08-0x09 / 0x0B-0x0C / 0x0E-0x1F / 0x21-0x2B
|
||||
// chanstring = / 0x2D-0x39 / 0x3B-0xFF
|
||||
// ; any octet except NUL, BELL, CR, LF, " ", "," and ":"
|
||||
// channelid = 5( 0x41-0x5A / digit ) ; 5( A-Z / 0-9 )
|
||||
// channel = ( "#" / "+" / ( "!" channelid ) / "&" ) chanstring
|
||||
// [ ":" chanstring ]
|
||||
// chanstring = 0x01-0x07 / 0x08-0x09 / 0x0B-0x0C / 0x0E-0x1F / 0x21-0x2B
|
||||
// chanstring = / 0x2D-0x39 / 0x3B-0xFF
|
||||
// ; any octet except NUL, BELL, CR, LF, " ", "," and ":"
|
||||
// channelid = 5( 0x41-0x5A / digit ) ; 5( A-Z / 0-9 )
|
||||
func IsValidChannel(channel string) bool {
|
||||
if len(channel) <= 1 || len(channel) > 50 {
|
||||
return false
|
||||
@ -214,10 +218,10 @@ func IsValidChannel(channel string) bool {
|
||||
// IsValidNick validates an IRC nickname. Note that this does not validate
|
||||
// IRC nickname length.
|
||||
//
|
||||
// nickname = ( letter / special ) *8( letter / digit / special / "-" )
|
||||
// letter = 0x41-0x5A / 0x61-0x7A
|
||||
// digit = 0x30-0x39
|
||||
// special = 0x5B-0x60 / 0x7B-0x7D
|
||||
// nickname = ( letter / special ) *8( letter / digit / special / "-" )
|
||||
// letter = 0x41-0x5A / 0x61-0x7A
|
||||
// digit = 0x30-0x39
|
||||
// special = 0x5B-0x60 / 0x7B-0x7D
|
||||
func IsValidNick(nick string) bool {
|
||||
if nick == "" {
|
||||
return false
|
||||
@ -253,8 +257,9 @@ func IsValidNick(nick string) bool {
|
||||
// not be supported on all networks. Some limit this to only a single period.
|
||||
//
|
||||
// Per RFC:
|
||||
// user = 1*( %x01-09 / %x0B-0C / %x0E-1F / %x21-3F / %x41-FF )
|
||||
// ; any octet except NUL, CR, LF, " " and "@"
|
||||
//
|
||||
// user = 1*( %x01-09 / %x0B-0C / %x0E-1F / %x21-3F / %x41-FF )
|
||||
// ; any octet except NUL, CR, LF, " " and "@"
|
||||
func IsValidUser(name string) bool {
|
||||
if name == "" {
|
||||
return false
|
||||
@ -350,3 +355,172 @@ func Glob(input, match string) bool {
|
||||
// Check suffix last.
|
||||
return trailingGlob || strings.HasSuffix(input, parts[last])
|
||||
}
|
||||
|
||||
// sliceInsert inserts a string into a slice at a specific index, while trying
|
||||
// to avoid as many allocations as possible.
|
||||
func sliceInsert(input []string, i int, v ...string) []string {
|
||||
total := len(input) + len(v)
|
||||
if total <= cap(input) {
|
||||
output := input[:total]
|
||||
copy(output[i+len(v):], input[i:])
|
||||
copy(output[i:], v)
|
||||
return output
|
||||
}
|
||||
output := make([]string, total)
|
||||
copy(output, input[:i])
|
||||
copy(output[i:], v)
|
||||
copy(output[i+len(v):], input[i:])
|
||||
return output
|
||||
}
|
||||
|
||||
// splitMessage is a text splitter that takes into consideration a few things:
|
||||
// - Ensuring the returned text is no longer than maxWidth.
|
||||
// - Attempting to split at the closest word boundary, while still staying inside
|
||||
// of the specific maxWidth.
|
||||
// - if there is no good word boundary for longer words (or e.g. links, raw data, etc)
|
||||
// that are above maxWordSplitLength characters, split the word into chunks to fit the
|
||||
//
|
||||
// maximum width.
|
||||
func splitMessage(input string, maxWidth int) (output []string) {
|
||||
input = strings.ToValidUTF8(input, "?")
|
||||
|
||||
words := strings.FieldsFunc(strings.TrimSpace(input), func(r rune) bool {
|
||||
switch r { // Same as unicode.IsSpace, but without ctrl/lf.
|
||||
case '\t', '\v', '\f', ' ', 0x85, 0xA0:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
output = []string{""}
|
||||
codes := []string{}
|
||||
|
||||
var lastColor string
|
||||
var match []string
|
||||
|
||||
for i := 0; i < len(words); i++ {
|
||||
j := strings.IndexAny(words[i], "\n\r")
|
||||
if j == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
word := words[i]
|
||||
words[i] = word[:j]
|
||||
|
||||
words = sliceInsert(words, i+1, "", strings.TrimLeft(word[j:], "\n\r"))
|
||||
}
|
||||
|
||||
for _, word := range words {
|
||||
// Used in place of a single newline.
|
||||
if word == "" {
|
||||
// Last line was already empty or already only had control characters.
|
||||
if output[len(output)-1] == "" || output[len(output)-1] == lastColor+word {
|
||||
continue
|
||||
}
|
||||
|
||||
output = append(output, strings.Join(codes, "")+lastColor+word)
|
||||
continue
|
||||
}
|
||||
|
||||
// Keep track of the last used color codes.
|
||||
match = reColor.FindAllString(word, -1)
|
||||
if len(match) > 0 {
|
||||
lastColor = match[len(match)-1]
|
||||
}
|
||||
|
||||
// Find all sequence codes -- this approach isn't perfect (ideally, a lexer
|
||||
// should be used to track each exact type of code), but it's good enough for
|
||||
// most cases.
|
||||
match = reCode.FindAllString(word, -1)
|
||||
if len(match) > 0 {
|
||||
for _, m := range match {
|
||||
// Reset was used, so clear all codes.
|
||||
if m == fmtCodes["reset"] {
|
||||
lastColor = ""
|
||||
codes = []string{}
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if we already have the code, and if so, remove it (closing).
|
||||
contains := false
|
||||
for i := 0; i < len(codes); i++ {
|
||||
if m == codes[i] {
|
||||
contains = true
|
||||
codes = append(codes[:i], codes[i+1:]...)
|
||||
|
||||
// If it's a closing color code, reset the last used color
|
||||
// as well.
|
||||
if m == fmtCodes["clear"] {
|
||||
lastColor = ""
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Track the new code, unless it's a color clear but we aren't
|
||||
// tracking a color right now.
|
||||
if !contains && (lastColor == "" || m != fmtCodes["clear"]) {
|
||||
codes = append(codes, m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkappend:
|
||||
|
||||
// Check if we can append, otherwise we must split.
|
||||
if 1+utf8.RuneCountInString(word)+utf8.RuneCountInString(output[len(output)-1]) < maxWidth {
|
||||
if output[len(output)-1] != "" {
|
||||
output[len(output)-1] += " "
|
||||
}
|
||||
output[len(output)-1] += word
|
||||
continue
|
||||
}
|
||||
|
||||
// If the word can fit on a line by itself, check if it's a url. If it is,
|
||||
// put it on it's own line.
|
||||
if utf8.RuneCountInString(word+strings.Join(codes, "")+lastColor) < maxWidth {
|
||||
if _, err := url.Parse(word); err == nil {
|
||||
output = append(output, strings.Join(codes, "")+lastColor+word)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Check to see if we can split by misc symbols, but must be at least a few
|
||||
// characters long to be split by it.
|
||||
if j := strings.IndexAny(word, "-+_=|/~:;,."); j > 3 && 1+utf8.RuneCountInString(word[0:j])+utf8.RuneCountInString(output[len(output)-1]) < maxWidth {
|
||||
if output[len(output)-1] != "" {
|
||||
output[len(output)-1] += " "
|
||||
}
|
||||
output[len(output)-1] += word[0:j]
|
||||
word = word[j+1:]
|
||||
goto checkappend
|
||||
}
|
||||
|
||||
// If the word is longer than is acceptable to just put on the next line,
|
||||
// split it into chunks. Also don't split the word if only a few characters
|
||||
// left of the word would be on the next line.
|
||||
if 1+utf8.RuneCountInString(word) > maxWordSplitLength && maxWidth-utf8.RuneCountInString(output[len(output)-1]) > 5 {
|
||||
left := maxWidth - utf8.RuneCountInString(output[len(output)-1]) - 1 // -1 for the space
|
||||
|
||||
if output[len(output)-1] != "" {
|
||||
output[len(output)-1] += " "
|
||||
}
|
||||
output[len(output)-1] += word[0:left]
|
||||
word = word[left:]
|
||||
goto checkappend
|
||||
}
|
||||
|
||||
left := maxWidth - utf8.RuneCountInString(output[len(output)-1])
|
||||
output[len(output)-1] += word[0:left]
|
||||
|
||||
output = append(output, strings.Join(codes, "")+lastColor)
|
||||
word = word[left:]
|
||||
goto checkappend
|
||||
}
|
||||
|
||||
for i := 0; i < len(output); i++ {
|
||||
output[i] = strings.ToValidUTF8(output[i], "?")
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
67
vendor/github.com/lrstanley/girc/internal/ctxgroup/ctxgroup.go
generated
vendored
Normal file
67
vendor/github.com/lrstanley/girc/internal/ctxgroup/ctxgroup.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright (c) Liam Stanley <me@liamstanley.io>. All rights reserved. Use
|
||||
// of this source code is governed by the MIT license that can be found in
|
||||
// the LICENSE file.
|
||||
|
||||
package ctxgroup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A Group is a collection of goroutines working on subtasks that are part of
|
||||
// the same overall task.
|
||||
type Group struct {
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
errOnce sync.Once
|
||||
err error
|
||||
}
|
||||
|
||||
// New returns a new Group and an associated context derived from ctx.
|
||||
// Obtain the derived context from calling Group.Context().
|
||||
//
|
||||
// The derived context is canceled the first time a function passed to Go
|
||||
// returns a non-nil error or the first time Wait returns, whichever occurs
|
||||
// first.
|
||||
func New(ctx context.Context) *Group {
|
||||
nctx, cancel := context.WithCancel(ctx)
|
||||
return &Group{ctx: nctx, cancel: cancel}
|
||||
}
|
||||
|
||||
// Context returns the context for this group. It may be canceled by the first
|
||||
// function to return a non-nil error.
|
||||
func (g *Group) Context() context.Context {
|
||||
return g.ctx
|
||||
}
|
||||
|
||||
// Wait blocks until all function calls from the Go method have returned, then
|
||||
// returns the first non-nil error (if any) from them.
|
||||
func (g *Group) Wait() error {
|
||||
g.wg.Wait()
|
||||
if g.cancel != nil {
|
||||
g.cancel()
|
||||
}
|
||||
return g.err
|
||||
}
|
||||
|
||||
// Go calls the given function in a new goroutine. The first call to return a
|
||||
// non-nil error cancels the group; its error will be returned by Wait.
|
||||
func (g *Group) Go(f func(ctx context.Context) error) {
|
||||
g.wg.Add(1)
|
||||
go func() {
|
||||
defer g.wg.Done()
|
||||
|
||||
if err := f(g.ctx); err != nil {
|
||||
g.errOnce.Do(func() {
|
||||
g.err = err
|
||||
if g.cancel != nil {
|
||||
g.cancel()
|
||||
}
|
||||
})
|
||||
}
|
||||
}()
|
||||
}
|
15
vendor/github.com/lrstanley/girc/modes.go
generated
vendored
15
vendor/github.com/lrstanley/girc/modes.go
generated
vendored
@ -118,13 +118,14 @@ func (c *CModes) Get(mode string) (args string, ok bool) {
|
||||
}
|
||||
|
||||
// hasArg checks to see if the mode supports arguments. What ones support this?:
|
||||
// A = Mode that adds or removes a nick or address to a list. Always has a parameter.
|
||||
// B = Mode that changes a setting and always has a parameter.
|
||||
// C = Mode that changes a setting and only has a parameter when set.
|
||||
// D = Mode that changes a setting and never has a parameter.
|
||||
// Note: Modes of type A return the list when there is no parameter present.
|
||||
// Note: Some clients assumes that any mode not listed is of type D.
|
||||
// Note: Modes in PREFIX are not listed but could be considered type B.
|
||||
//
|
||||
// A = Mode that adds or removes a nick or address to a list. Always has a parameter.
|
||||
// B = Mode that changes a setting and always has a parameter.
|
||||
// C = Mode that changes a setting and only has a parameter when set.
|
||||
// D = Mode that changes a setting and never has a parameter.
|
||||
// Note: Modes of type A return the list when there is no parameter present.
|
||||
// Note: Some clients assumes that any mode not listed is of type D.
|
||||
// Note: Modes in PREFIX are not listed but could be considered type B.
|
||||
func (c *CModes) hasArg(set bool, mode byte) (hasArgs, isSetting bool) {
|
||||
if len(c.raw) < 1 {
|
||||
return false, true
|
||||
|
15
vendor/github.com/lrstanley/girc/state.go
generated
vendored
15
vendor/github.com/lrstanley/girc/state.go
generated
vendored
@ -28,10 +28,21 @@ type state struct {
|
||||
// last capability check. These will get sent once we have received the
|
||||
// last capability list command from the server.
|
||||
tmpCap map[string]map[string]string
|
||||
|
||||
// serverOptions are the standard capabilities and configurations
|
||||
// supported by the server at connection time. This also includes
|
||||
// RPL_ISUPPORT entries.
|
||||
serverOptions map[string]string
|
||||
|
||||
// maxLineLength defines how long before we truncate (or split) messages.
|
||||
// DefaultMaxLineLength is what is used by default, as this is going to be a common
|
||||
// standard. However, protocols like IRCv3, or ISUPPORT can override this.
|
||||
maxLineLength int
|
||||
|
||||
// maxPrefixLength defines the estimated prefix length (":nick!user@host ") that
|
||||
// we can use to calculate line splits.
|
||||
maxPrefixLength int
|
||||
|
||||
// motd is the servers message of the day.
|
||||
motd string
|
||||
|
||||
@ -51,9 +62,11 @@ func (s *state) reset(initial bool) {
|
||||
s.host = ""
|
||||
s.channels = make(map[string]*Channel)
|
||||
s.users = make(map[string]*User)
|
||||
s.serverOptions = make(map[string]string)
|
||||
s.enabledCap = make(map[string]map[string]string)
|
||||
s.tmpCap = make(map[string]map[string]string)
|
||||
s.serverOptions = make(map[string]string)
|
||||
s.maxLineLength = DefaultMaxLineLength
|
||||
s.maxPrefixLength = DefaultMaxPrefixLength
|
||||
s.motd = ""
|
||||
|
||||
if initial {
|
||||
|
10
vendor/github.com/matterbridge/matterclient/matterclient.go
generated
vendored
10
vendor/github.com/matterbridge/matterclient/matterclient.go
generated
vendored
@ -144,10 +144,6 @@ func (m *Client) Login() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.initUserChannels(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if m.Team == nil {
|
||||
validTeamNames := make([]string, len(m.OtherTeams))
|
||||
for i, t := range m.OtherTeams {
|
||||
@ -157,6 +153,10 @@ func (m *Client) Login() error {
|
||||
return fmt.Errorf("Team '%s' not found in %v", m.Credentials.Team, validTeamNames)
|
||||
}
|
||||
|
||||
if err := m.initUserChannels(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// connect websocket
|
||||
m.wsConnect()
|
||||
|
||||
@ -532,7 +532,7 @@ func (m *Client) wsConnect() {
|
||||
}
|
||||
|
||||
func (m *Client) doCheckAlive() error {
|
||||
if _, _, err := m.Client.GetMe(""); err != nil {
|
||||
if _, _, err := m.Client.GetPing(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
36
vendor/github.com/mattermost/logr/.gitignore
generated
vendored
36
vendor/github.com/mattermost/logr/.gitignore
generated
vendored
@ -1,36 +0,0 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
debug
|
||||
dynip
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Output of profiler
|
||||
*.prof
|
||||
|
||||
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
|
||||
.glide/
|
||||
|
||||
# IntelliJ config
|
||||
.idea
|
||||
|
||||
# log files
|
||||
*.log
|
||||
|
||||
# transient directories
|
||||
vendor
|
||||
output
|
||||
build
|
||||
app
|
||||
logs
|
||||
|
||||
# test apps
|
||||
test/cmd/testapp1/testapp1
|
||||
test/cmd/simple/simple
|
4
vendor/github.com/mattermost/logr/.travis.yml
generated
vendored
4
vendor/github.com/mattermost/logr/.travis.yml
generated
vendored
@ -1,4 +0,0 @@
|
||||
language: go
|
||||
sudo: false
|
||||
go:
|
||||
- 1.x
|
21
vendor/github.com/mattermost/logr/LICENSE
generated
vendored
21
vendor/github.com/mattermost/logr/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 wiggin77
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
193
vendor/github.com/mattermost/logr/README.md
generated
vendored
193
vendor/github.com/mattermost/logr/README.md
generated
vendored
@ -1,193 +0,0 @@
|
||||
# logr
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/mattermost/logr?status.svg)](http://godoc.org/github.com/mattermost/logr)
|
||||
[![Report Card](https://goreportcard.com/badge/github.com/mattermost/logr)](https://goreportcard.com/report/github.com/mattermost/logr)
|
||||
|
||||
Logr is a fully asynchronous, contextual logger for Go.
|
||||
|
||||
It is very much inspired by [Logrus](https://github.com/sirupsen/logrus) but addresses two issues:
|
||||
|
||||
1. Logr is fully asynchronous, meaning that all formatting and writing is done in the background. Latency sensitive applications benefit from not waiting for logging to complete.
|
||||
|
||||
2. Logr provides custom filters which provide more flexibility than Trace, Debug, Info... levels. If you need to temporarily increase verbosity of logging while tracking down a problem you can avoid the fire-hose that typically comes from Debug or Trace by using custom filters.
|
||||
|
||||
## Concepts
|
||||
|
||||
<!-- markdownlint-disable MD033 -->
|
||||
| entity | description |
|
||||
| ------ | ----------- |
|
||||
| Logr | Engine instance typically instantiated once; used to configure logging.<br>```lgr := &Logr{}```|
|
||||
| Logger | Provides contextual logging via fields; lightweight, can be created once and accessed globally or create on demand.<br>```logger := lgr.NewLogger()```<br>```logger2 := logger.WithField("user", "Sam")```|
|
||||
| Target | A destination for log items such as console, file, database or just about anything that can be written to. Each target has its own filter/level and formatter, and any number of targets can be added to a Logr. Targets for syslog and any io.Writer are built-in and it is easy to create your own. You can also use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) via a simple [adapter](https://github.com/wiggin77/logrus4logr).|
|
||||
| Filter | Determines which logging calls get written versus filtered out. Also determines which logging calls generate a stack trace.<br>```filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Fatal}```|
|
||||
| Formatter | Formats the output. Logr includes built-in formatters for JSON and plain text with delimiters. It is easy to create your own formatters or you can also use any [Logrus formatters](https://github.com/sirupsen/logrus#formatters) via a simple [adapter](https://github.com/wiggin77/logrus4logr).<br>```formatter := &format.Plain{Delim: " \| "}```|
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
// Create Logr instance.
|
||||
lgr := &logr.Logr{}
|
||||
|
||||
// Create a filter and formatter. Both can be shared by multiple
|
||||
// targets.
|
||||
filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Error}
|
||||
formatter := &format.Plain{Delim: " | "}
|
||||
|
||||
// WriterTarget outputs to any io.Writer
|
||||
t := target.NewWriterTarget(filter, formatter, os.StdOut, 1000)
|
||||
lgr.AddTarget(t)
|
||||
|
||||
// One or more Loggers can be created, shared, used concurrently,
|
||||
// or created on demand.
|
||||
logger := lgr.NewLogger().WithField("user", "Sarah")
|
||||
|
||||
// Now we can log to the target(s).
|
||||
logger.Debug("login attempt")
|
||||
logger.Error("login failed")
|
||||
|
||||
// Ensure targets are drained before application exit.
|
||||
lgr.Shutdown()
|
||||
```
|
||||
|
||||
## Fields
|
||||
|
||||
Fields allow for contextual logging, meaning information can be added to log statements without changing the statements themselves. Information can be shared across multiple logging statements thus allowing log analysis tools to group them.
|
||||
|
||||
Fields are added via Loggers:
|
||||
|
||||
```go
|
||||
lgr := &Logr{}
|
||||
// ... add targets ...
|
||||
logger := lgr.NewLogger().WithFields(logr.Fields{
|
||||
"user": user,
|
||||
"role": role})
|
||||
logger.Info("login attempt")
|
||||
// ... later ...
|
||||
logger.Info("login successful")
|
||||
```
|
||||
|
||||
`Logger.WithFields` can be used to create additional Loggers that add more fields.
|
||||
|
||||
Logr fields are inspired by and work the same as [Logrus fields](https://github.com/sirupsen/logrus#fields).
|
||||
|
||||
## Filters
|
||||
|
||||
Logr supports the traditional seven log levels via `logr.StdFilter`: Panic, Fatal, Error, Warning, Info, Debug, and Trace.
|
||||
|
||||
```go
|
||||
// When added to a target, this filter will only allow
|
||||
// log statements with level severity Warn or higher.
|
||||
// It will also generate stack traces for Error or higher.
|
||||
filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Error}
|
||||
```
|
||||
|
||||
Logr also supports custom filters (logr.CustomFilter) which allow fine grained inclusion of log items without turning on the fire-hose.
|
||||
|
||||
```go
|
||||
// create custom levels; use IDs > 10.
|
||||
LoginLevel := logr.Level{ID: 100, Name: "login ", Stacktrace: false}
|
||||
LogoutLevel := logr.Level{ID: 101, Name: "logout", Stacktrace: false}
|
||||
|
||||
lgr := &logr.Logr{}
|
||||
|
||||
// create a custom filter with custom levels.
|
||||
filter := &logr.CustomFilter{}
|
||||
filter.Add(LoginLevel, LogoutLevel)
|
||||
|
||||
formatter := &format.Plain{Delim: " | "}
|
||||
tgr := target.NewWriterTarget(filter, formatter, os.StdOut, 1000)
|
||||
lgr.AddTarget(tgr)
|
||||
logger := lgr.NewLogger().WithFields(logr.Fields{"user": "Bob", "role": "admin"})
|
||||
|
||||
logger.Log(LoginLevel, "this item will get logged")
|
||||
logger.Debug("won't be logged since Debug wasn't added to custom filter")
|
||||
```
|
||||
|
||||
Both filter types allow you to determine which levels require a stack trace to be output. Note that generating stack traces cannot happen fully asynchronously and thus add latency to the calling goroutine.
|
||||
|
||||
## Targets
|
||||
|
||||
There are built-in targets for outputting to syslog, file, or any `io.Writer`. More will be added.
|
||||
|
||||
You can use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) via a simple [adapter](https://github.com/wiggin77/logrus4logr).
|
||||
|
||||
You can create your own target by implementing the [Target](./target.go) interface.
|
||||
|
||||
An easier method is to use the [logr.Basic](./target.go) type target and build your functionality on that. Basic handles all the queuing and other plumbing so you only need to implement two methods. Example target that outputs to `io.Writer`:
|
||||
|
||||
```go
|
||||
type Writer struct {
|
||||
logr.Basic
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
func NewWriterTarget(filter logr.Filter, formatter logr.Formatter, out io.Writer, maxQueue int) *Writer {
|
||||
w := &Writer{out: out}
|
||||
w.Basic.Start(w, w, filter, formatter, maxQueue)
|
||||
return w
|
||||
}
|
||||
|
||||
// Write will always be called by a single goroutine, so no locking needed.
|
||||
// Just convert a log record to a []byte using the formatter and output the
|
||||
// bytes to your sink.
|
||||
func (w *Writer) Write(rec *logr.LogRec) error {
|
||||
_, stacktrace := w.IsLevelEnabled(rec.Level())
|
||||
|
||||
// take a buffer from the pool to avoid allocations or just allocate a new one.
|
||||
buf := rec.Logger().Logr().BorrowBuffer()
|
||||
defer rec.Logger().Logr().ReleaseBuffer(buf)
|
||||
|
||||
buf, err := w.Formatter().Format(rec, stacktrace, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.out.Write(buf.Bytes())
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
## Formatters
|
||||
|
||||
Logr has two built-in formatters, one for JSON and the other plain, delimited text.
|
||||
|
||||
You can use any [Logrus formatters](https://github.com/sirupsen/logrus#formatters) via a simple [adapter](https://github.com/wiggin77/logrus4logr).
|
||||
|
||||
You can create your own formatter by implementing the [Formatter](./formatter.go) interface:
|
||||
|
||||
```go
|
||||
Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error)
|
||||
```
|
||||
|
||||
## Handlers
|
||||
|
||||
When creating the Logr instance, you can add several handlers that get called when exceptional events occur:
|
||||
|
||||
### ```Logr.OnLoggerError(err error)```
|
||||
|
||||
Called any time an internal logging error occurs. For example, this can happen when a target cannot connect to its data sink.
|
||||
|
||||
It may be tempting to log this error, however there is a danger that logging this will simply generate another error and so on. If you must log it, use a target and custom level specifically for this event and ensure it cannot generate more errors.
|
||||
|
||||
### ```Logr.OnQueueFull func(rec *LogRec, maxQueueSize int) bool```
|
||||
|
||||
Called on an attempt to add a log record to a full Logr queue. This generally means the Logr maximum queue size is too small, or at least one target is very slow. Logr maximum queue size can be changed before adding any targets via:
|
||||
|
||||
```go
|
||||
lgr := logr.Logr{MaxQueueSize: 10000}
|
||||
```
|
||||
|
||||
Returning true will drop the log record. False will block until the log record can be added, which creates a natural throttle at the expense of latency for the calling goroutine. The default is to block.
|
||||
|
||||
### ```Logr.OnTargetQueueFull func(target Target, rec *LogRec, maxQueueSize int) bool```
|
||||
|
||||
Called on an attempt to add a log record to a full target queue. This generally means your target's max queue size is too small, or the target is very slow to output.
|
||||
|
||||
As with the Logr queue, returning true will drop the log record. False will block until the log record can be added, which creates a natural throttle at the expense of latency for the calling goroutine. The default is to block.
|
||||
|
||||
### ```Logr.OnExit func(code int) and Logr.OnPanic func(err interface{})```
|
||||
|
||||
OnExit and OnPanic are called when the Logger.FatalXXX and Logger.PanicXXX functions are called respectively.
|
||||
|
||||
In both cases the default behavior is to shut down gracefully, draining all targets, and calling `os.Exit` or `panic` respectively.
|
||||
|
||||
When adding your own handlers, be sure to call `Logr.Shutdown` before exiting the application to avoid losing log records.
|
11
vendor/github.com/mattermost/logr/config.go
generated
vendored
11
vendor/github.com/mattermost/logr/config.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
package logr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/wiggin77/cfg"
|
||||
)
|
||||
|
||||
func ConfigLogger(config *cfg.Config) error {
|
||||
return fmt.Errorf("Not implemented yet")
|
||||
}
|
34
vendor/github.com/mattermost/logr/const.go
generated
vendored
34
vendor/github.com/mattermost/logr/const.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
package logr
|
||||
|
||||
import "time"
|
||||
|
||||
// Defaults.
|
||||
const (
|
||||
// DefaultMaxQueueSize is the default maximum queue size for Logr instances.
|
||||
DefaultMaxQueueSize = 1000
|
||||
|
||||
// DefaultMaxStackFrames is the default maximum max number of stack frames collected
|
||||
// when generating stack traces for logging.
|
||||
DefaultMaxStackFrames = 30
|
||||
|
||||
// MaxLevelID is the maximum value of a level ID. Some level cache implementations will
|
||||
// allocate a cache of this size. Cannot exceed uint.
|
||||
MaxLevelID = 256
|
||||
|
||||
// DefaultEnqueueTimeout is the default amount of time a log record can take to be queued.
|
||||
// This only applies to blocking enqueue which happen after `logr.OnQueueFull` is called
|
||||
// and returns false.
|
||||
DefaultEnqueueTimeout = time.Second * 30
|
||||
|
||||
// DefaultShutdownTimeout is the default amount of time `logr.Shutdown` can execute before
|
||||
// timing out.
|
||||
DefaultShutdownTimeout = time.Second * 30
|
||||
|
||||
// DefaultFlushTimeout is the default amount of time `logr.Flush` can execute before
|
||||
// timing out.
|
||||
DefaultFlushTimeout = time.Second * 30
|
||||
|
||||
// DefaultMaxPooledBuffer is the maximum size a pooled buffer can be.
|
||||
// Buffers that grow beyond this size are garbage collected.
|
||||
DefaultMaxPooledBuffer = 1024 * 1024
|
||||
)
|
26
vendor/github.com/mattermost/logr/filter.go
generated
vendored
26
vendor/github.com/mattermost/logr/filter.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package logr
|
||||
|
||||
// LevelID is the unique id of each level.
|
||||
type LevelID uint
|
||||
|
||||
// Level provides a mechanism to enable/disable specific log lines.
|
||||
type Level struct {
|
||||
ID LevelID
|
||||
Name string
|
||||
Stacktrace bool
|
||||
}
|
||||
|
||||
// String returns the name of this level.
|
||||
func (level Level) String() string {
|
||||
return level.Name
|
||||
}
|
||||
|
||||
// Filter allows targets to determine which Level(s) are active
|
||||
// for logging and which Level(s) require a stack trace to be output.
|
||||
// A default implementation using "panic, fatal..." is provided, and
|
||||
// a more flexible alternative implementation is also provided that
|
||||
// allows any number of custom levels.
|
||||
type Filter interface {
|
||||
IsEnabled(Level) bool
|
||||
IsStacktraceEnabled(Level) bool
|
||||
}
|
273
vendor/github.com/mattermost/logr/format/json.go
generated
vendored
273
vendor/github.com/mattermost/logr/format/json.go
generated
vendored
@ -1,273 +0,0 @@
|
||||
package format
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/francoispqt/gojay"
|
||||
"github.com/mattermost/logr"
|
||||
)
|
||||
|
||||
// ContextField is a name/value pair within the context fields.
|
||||
type ContextField struct {
|
||||
Key string
|
||||
Val interface{}
|
||||
}
|
||||
|
||||
// JSON formats log records as JSON.
|
||||
type JSON struct {
|
||||
// DisableTimestamp disables output of timestamp field.
|
||||
DisableTimestamp bool
|
||||
// DisableLevel disables output of level field.
|
||||
DisableLevel bool
|
||||
// DisableMsg disables output of msg field.
|
||||
DisableMsg bool
|
||||
// DisableContext disables output of all context fields.
|
||||
DisableContext bool
|
||||
// DisableStacktrace disables output of stack trace.
|
||||
DisableStacktrace bool
|
||||
|
||||
// TimestampFormat is an optional format for timestamps. If empty
|
||||
// then DefTimestampFormat is used.
|
||||
TimestampFormat string
|
||||
|
||||
// Deprecated: this has no effect.
|
||||
Indent string
|
||||
|
||||
// EscapeHTML determines if certain characters (e.g. `<`, `>`, `&`)
|
||||
// are escaped.
|
||||
EscapeHTML bool
|
||||
|
||||
// KeyTimestamp overrides the timestamp field key name.
|
||||
KeyTimestamp string
|
||||
|
||||
// KeyLevel overrides the level field key name.
|
||||
KeyLevel string
|
||||
|
||||
// KeyMsg overrides the msg field key name.
|
||||
KeyMsg string
|
||||
|
||||
// KeyContextFields when not empty will group all context fields
|
||||
// under this key.
|
||||
KeyContextFields string
|
||||
|
||||
// KeyStacktrace overrides the stacktrace field key name.
|
||||
KeyStacktrace string
|
||||
|
||||
// ContextSorter allows custom sorting for the context fields.
|
||||
ContextSorter func(fields logr.Fields) []ContextField
|
||||
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// Format converts a log record to bytes in JSON format.
|
||||
func (j *JSON) Format(rec *logr.LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) {
|
||||
j.once.Do(j.applyDefaultKeyNames)
|
||||
|
||||
if buf == nil {
|
||||
buf = &bytes.Buffer{}
|
||||
}
|
||||
enc := gojay.BorrowEncoder(buf)
|
||||
defer func() {
|
||||
enc.Release()
|
||||
}()
|
||||
|
||||
sorter := j.ContextSorter
|
||||
if sorter == nil {
|
||||
sorter = j.defaultContextSorter
|
||||
}
|
||||
|
||||
jlr := JSONLogRec{
|
||||
LogRec: rec,
|
||||
JSON: j,
|
||||
stacktrace: stacktrace,
|
||||
sorter: sorter,
|
||||
}
|
||||
|
||||
err := enc.EncodeObject(jlr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf.WriteByte('\n')
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func (j *JSON) applyDefaultKeyNames() {
|
||||
if j.KeyTimestamp == "" {
|
||||
j.KeyTimestamp = "timestamp"
|
||||
}
|
||||
if j.KeyLevel == "" {
|
||||
j.KeyLevel = "level"
|
||||
}
|
||||
if j.KeyMsg == "" {
|
||||
j.KeyMsg = "msg"
|
||||
}
|
||||
if j.KeyStacktrace == "" {
|
||||
j.KeyStacktrace = "stacktrace"
|
||||
}
|
||||
}
|
||||
|
||||
// defaultContextSorter sorts the context fields alphabetically by key.
|
||||
func (j *JSON) defaultContextSorter(fields logr.Fields) []ContextField {
|
||||
keys := make([]string, 0, len(fields))
|
||||
for k := range fields {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
cf := make([]ContextField, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
cf = append(cf, ContextField{Key: k, Val: fields[k]})
|
||||
}
|
||||
return cf
|
||||
}
|
||||
|
||||
// JSONLogRec decorates a LogRec adding JSON encoding.
|
||||
type JSONLogRec struct {
|
||||
*logr.LogRec
|
||||
*JSON
|
||||
stacktrace bool
|
||||
sorter func(fields logr.Fields) []ContextField
|
||||
}
|
||||
|
||||
// MarshalJSONObject encodes the LogRec as JSON.
|
||||
func (rec JSONLogRec) MarshalJSONObject(enc *gojay.Encoder) {
|
||||
if !rec.DisableTimestamp {
|
||||
timestampFmt := rec.TimestampFormat
|
||||
if timestampFmt == "" {
|
||||
timestampFmt = logr.DefTimestampFormat
|
||||
}
|
||||
time := rec.Time()
|
||||
enc.AddTimeKey(rec.KeyTimestamp, &time, timestampFmt)
|
||||
}
|
||||
if !rec.DisableLevel {
|
||||
enc.AddStringKey(rec.KeyLevel, rec.Level().Name)
|
||||
}
|
||||
if !rec.DisableMsg {
|
||||
enc.AddStringKey(rec.KeyMsg, rec.Msg())
|
||||
}
|
||||
if !rec.DisableContext {
|
||||
ctxFields := rec.sorter(rec.Fields())
|
||||
if rec.KeyContextFields != "" {
|
||||
enc.AddObjectKey(rec.KeyContextFields, jsonFields(ctxFields))
|
||||
} else {
|
||||
if len(ctxFields) > 0 {
|
||||
for _, cf := range ctxFields {
|
||||
key := rec.prefixCollision(cf.Key)
|
||||
encodeField(enc, key, cf.Val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if rec.stacktrace && !rec.DisableStacktrace {
|
||||
frames := rec.StackFrames()
|
||||
if len(frames) > 0 {
|
||||
enc.AddArrayKey(rec.KeyStacktrace, stackFrames(frames))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// IsNil returns true if the LogRec pointer is nil.
|
||||
func (rec JSONLogRec) IsNil() bool {
|
||||
return rec.LogRec == nil
|
||||
}
|
||||
|
||||
func (rec JSONLogRec) prefixCollision(key string) string {
|
||||
switch key {
|
||||
case rec.KeyTimestamp, rec.KeyLevel, rec.KeyMsg, rec.KeyStacktrace:
|
||||
return rec.prefixCollision("_" + key)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
type stackFrames []runtime.Frame
|
||||
|
||||
// MarshalJSONArray encodes stackFrames slice as JSON.
|
||||
func (s stackFrames) MarshalJSONArray(enc *gojay.Encoder) {
|
||||
for _, frame := range s {
|
||||
enc.AddObject(stackFrame(frame))
|
||||
}
|
||||
}
|
||||
|
||||
// IsNil returns true if stackFrames is empty slice.
|
||||
func (s stackFrames) IsNil() bool {
|
||||
return len(s) == 0
|
||||
}
|
||||
|
||||
type stackFrame runtime.Frame
|
||||
|
||||
// MarshalJSONArray encodes stackFrame as JSON.
|
||||
func (f stackFrame) MarshalJSONObject(enc *gojay.Encoder) {
|
||||
enc.AddStringKey("Function", f.Function)
|
||||
enc.AddStringKey("File", f.File)
|
||||
enc.AddIntKey("Line", f.Line)
|
||||
}
|
||||
|
||||
func (f stackFrame) IsNil() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type jsonFields []ContextField
|
||||
|
||||
// MarshalJSONObject encodes Fields map to JSON.
|
||||
func (f jsonFields) MarshalJSONObject(enc *gojay.Encoder) {
|
||||
for _, ctxField := range f {
|
||||
encodeField(enc, ctxField.Key, ctxField.Val)
|
||||
}
|
||||
}
|
||||
|
||||
// IsNil returns true if map is nil.
|
||||
func (f jsonFields) IsNil() bool {
|
||||
return f == nil
|
||||
}
|
||||
|
||||
func encodeField(enc *gojay.Encoder, key string, val interface{}) {
|
||||
switch vt := val.(type) {
|
||||
case gojay.MarshalerJSONObject:
|
||||
enc.AddObjectKey(key, vt)
|
||||
case gojay.MarshalerJSONArray:
|
||||
enc.AddArrayKey(key, vt)
|
||||
case string:
|
||||
enc.AddStringKey(key, vt)
|
||||
case error:
|
||||
enc.AddStringKey(key, vt.Error())
|
||||
case bool:
|
||||
enc.AddBoolKey(key, vt)
|
||||
case int:
|
||||
enc.AddIntKey(key, vt)
|
||||
case int64:
|
||||
enc.AddInt64Key(key, vt)
|
||||
case int32:
|
||||
enc.AddIntKey(key, int(vt))
|
||||
case int16:
|
||||
enc.AddIntKey(key, int(vt))
|
||||
case int8:
|
||||
enc.AddIntKey(key, int(vt))
|
||||
case uint64:
|
||||
enc.AddIntKey(key, int(vt))
|
||||
case uint32:
|
||||
enc.AddIntKey(key, int(vt))
|
||||
case uint16:
|
||||
enc.AddIntKey(key, int(vt))
|
||||
case uint8:
|
||||
enc.AddIntKey(key, int(vt))
|
||||
case float64:
|
||||
enc.AddFloatKey(key, vt)
|
||||
case float32:
|
||||
enc.AddFloat32Key(key, vt)
|
||||
case *gojay.EmbeddedJSON:
|
||||
enc.AddEmbeddedJSONKey(key, vt)
|
||||
case time.Time:
|
||||
enc.AddTimeKey(key, &vt, logr.DefTimestampFormat)
|
||||
case *time.Time:
|
||||
enc.AddTimeKey(key, vt, logr.DefTimestampFormat)
|
||||
default:
|
||||
s := fmt.Sprintf("%v", vt)
|
||||
enc.AddStringKey(key, s)
|
||||
}
|
||||
}
|
75
vendor/github.com/mattermost/logr/format/plain.go
generated
vendored
75
vendor/github.com/mattermost/logr/format/plain.go
generated
vendored
@ -1,75 +0,0 @@
|
||||
package format
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/mattermost/logr"
|
||||
)
|
||||
|
||||
// Plain is the simplest formatter, outputting only text with
|
||||
// no colors.
|
||||
type Plain struct {
|
||||
// DisableTimestamp disables output of timestamp field.
|
||||
DisableTimestamp bool
|
||||
// DisableLevel disables output of level field.
|
||||
DisableLevel bool
|
||||
// DisableMsg disables output of msg field.
|
||||
DisableMsg bool
|
||||
// DisableContext disables output of all context fields.
|
||||
DisableContext bool
|
||||
// DisableStacktrace disables output of stack trace.
|
||||
DisableStacktrace bool
|
||||
|
||||
// Delim is an optional delimiter output between each log field.
|
||||
// Defaults to a single space.
|
||||
Delim string
|
||||
|
||||
// TimestampFormat is an optional format for timestamps. If empty
|
||||
// then DefTimestampFormat is used.
|
||||
TimestampFormat string
|
||||
}
|
||||
|
||||
// Format converts a log record to bytes.
|
||||
func (p *Plain) Format(rec *logr.LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) {
|
||||
delim := p.Delim
|
||||
if delim == "" {
|
||||
delim = " "
|
||||
}
|
||||
if buf == nil {
|
||||
buf = &bytes.Buffer{}
|
||||
}
|
||||
|
||||
timestampFmt := p.TimestampFormat
|
||||
if timestampFmt == "" {
|
||||
timestampFmt = logr.DefTimestampFormat
|
||||
}
|
||||
|
||||
if !p.DisableTimestamp {
|
||||
var arr [128]byte
|
||||
tbuf := rec.Time().AppendFormat(arr[:0], timestampFmt)
|
||||
buf.Write(tbuf)
|
||||
buf.WriteString(delim)
|
||||
}
|
||||
if !p.DisableLevel {
|
||||
fmt.Fprintf(buf, "%v%s", rec.Level().Name, delim)
|
||||
}
|
||||
if !p.DisableMsg {
|
||||
fmt.Fprint(buf, rec.Msg(), delim)
|
||||
}
|
||||
if !p.DisableContext {
|
||||
ctx := rec.Fields()
|
||||
if len(ctx) > 0 {
|
||||
logr.WriteFields(buf, ctx, " ")
|
||||
}
|
||||
}
|
||||
if stacktrace && !p.DisableStacktrace {
|
||||
frames := rec.StackFrames()
|
||||
if len(frames) > 0 {
|
||||
buf.WriteString("\n")
|
||||
logr.WriteStacktrace(buf, rec.StackFrames())
|
||||
}
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
return buf, nil
|
||||
}
|
119
vendor/github.com/mattermost/logr/formatter.go
generated
vendored
119
vendor/github.com/mattermost/logr/formatter.go
generated
vendored
@ -1,119 +0,0 @@
|
||||
package logr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Formatter turns a LogRec into a formatted string.
|
||||
type Formatter interface {
|
||||
// Format converts a log record to bytes. If buf is not nil then it will be
|
||||
// be filled with the formatted results, otherwise a new buffer will be allocated.
|
||||
Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error)
|
||||
}
|
||||
|
||||
const (
|
||||
// DefTimestampFormat is the default time stamp format used by
|
||||
// Plain formatter and others.
|
||||
DefTimestampFormat = "2006-01-02 15:04:05.000 Z07:00"
|
||||
)
|
||||
|
||||
// DefaultFormatter is the default formatter, outputting only text with
|
||||
// no colors and a space delimiter. Use `format.Plain` instead.
|
||||
type DefaultFormatter struct {
|
||||
}
|
||||
|
||||
// Format converts a log record to bytes.
|
||||
func (p *DefaultFormatter) Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) {
|
||||
if buf == nil {
|
||||
buf = &bytes.Buffer{}
|
||||
}
|
||||
delim := " "
|
||||
timestampFmt := DefTimestampFormat
|
||||
|
||||
fmt.Fprintf(buf, "%s%s", rec.Time().Format(timestampFmt), delim)
|
||||
fmt.Fprintf(buf, "%v%s", rec.Level(), delim)
|
||||
fmt.Fprint(buf, rec.Msg(), delim)
|
||||
|
||||
ctx := rec.Fields()
|
||||
if len(ctx) > 0 {
|
||||
WriteFields(buf, ctx, " ")
|
||||
}
|
||||
|
||||
if stacktrace {
|
||||
frames := rec.StackFrames()
|
||||
if len(frames) > 0 {
|
||||
buf.WriteString("\n")
|
||||
WriteStacktrace(buf, rec.StackFrames())
|
||||
}
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// WriteFields writes zero or more name value pairs to the io.Writer.
|
||||
// The pairs are sorted by key name and output in key=value format
|
||||
// with optional separator between fields.
|
||||
func WriteFields(w io.Writer, flds Fields, separator string) {
|
||||
keys := make([]string, 0, len(flds))
|
||||
for k := range flds {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
sep := ""
|
||||
for _, key := range keys {
|
||||
writeField(w, key, flds[key], sep)
|
||||
sep = separator
|
||||
}
|
||||
}
|
||||
|
||||
func writeField(w io.Writer, key string, val interface{}, sep string) {
|
||||
var template string
|
||||
switch v := val.(type) {
|
||||
case error:
|
||||
val := v.Error()
|
||||
if shouldQuote(val) {
|
||||
template = "%s%s=%q"
|
||||
} else {
|
||||
template = "%s%s=%s"
|
||||
}
|
||||
case string:
|
||||
if shouldQuote(v) {
|
||||
template = "%s%s=%q"
|
||||
} else {
|
||||
template = "%s%s=%s"
|
||||
}
|
||||
default:
|
||||
template = "%s%s=%v"
|
||||
}
|
||||
fmt.Fprintf(w, template, sep, key, val)
|
||||
}
|
||||
|
||||
// shouldQuote returns true if val contains any characters that might be unsafe
|
||||
// when injecting log output into an aggregator, viewer or report.
|
||||
func shouldQuote(val string) bool {
|
||||
for _, c := range val {
|
||||
if !((c >= '0' && c <= '9') ||
|
||||
(c >= 'a' && c <= 'z') ||
|
||||
(c >= 'A' && c <= 'Z')) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// WriteStacktrace formats and outputs a stack trace to an io.Writer.
|
||||
func WriteStacktrace(w io.Writer, frames []runtime.Frame) {
|
||||
for _, frame := range frames {
|
||||
if frame.Function != "" {
|
||||
fmt.Fprintf(w, " %s\n", frame.Function)
|
||||
}
|
||||
if frame.File != "" {
|
||||
fmt.Fprintf(w, " %s:%d\n", frame.File, frame.Line)
|
||||
}
|
||||
}
|
||||
}
|
98
vendor/github.com/mattermost/logr/levelcache.go
generated
vendored
98
vendor/github.com/mattermost/logr/levelcache.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
package logr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// LevelStatus represents whether a level is enabled and
|
||||
// requires a stack trace.
|
||||
type LevelStatus struct {
|
||||
Enabled bool
|
||||
Stacktrace bool
|
||||
empty bool
|
||||
}
|
||||
|
||||
type levelCache interface {
|
||||
setup()
|
||||
get(id LevelID) (LevelStatus, bool)
|
||||
put(id LevelID, status LevelStatus) error
|
||||
clear()
|
||||
}
|
||||
|
||||
// syncMapLevelCache uses sync.Map which may better handle large concurrency
|
||||
// scenarios.
|
||||
type syncMapLevelCache struct {
|
||||
m sync.Map
|
||||
}
|
||||
|
||||
func (c *syncMapLevelCache) setup() {
|
||||
c.clear()
|
||||
}
|
||||
|
||||
func (c *syncMapLevelCache) get(id LevelID) (LevelStatus, bool) {
|
||||
if id > MaxLevelID {
|
||||
return LevelStatus{}, false
|
||||
}
|
||||
s, _ := c.m.Load(id)
|
||||
status := s.(LevelStatus)
|
||||
return status, !status.empty
|
||||
}
|
||||
|
||||
func (c *syncMapLevelCache) put(id LevelID, status LevelStatus) error {
|
||||
if id > MaxLevelID {
|
||||
return fmt.Errorf("level id cannot exceed MaxLevelID (%d)", MaxLevelID)
|
||||
}
|
||||
c.m.Store(id, status)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *syncMapLevelCache) clear() {
|
||||
var i LevelID
|
||||
for i = 0; i < MaxLevelID; i++ {
|
||||
c.m.Store(i, LevelStatus{empty: true})
|
||||
}
|
||||
}
|
||||
|
||||
// arrayLevelCache using array and a mutex.
|
||||
type arrayLevelCache struct {
|
||||
arr [MaxLevelID + 1]LevelStatus
|
||||
mux sync.RWMutex
|
||||
}
|
||||
|
||||
func (c *arrayLevelCache) setup() {
|
||||
c.clear()
|
||||
}
|
||||
|
||||
//var dummy = LevelStatus{}
|
||||
|
||||
func (c *arrayLevelCache) get(id LevelID) (LevelStatus, bool) {
|
||||
if id > MaxLevelID {
|
||||
return LevelStatus{}, false
|
||||
}
|
||||
c.mux.RLock()
|
||||
status := c.arr[id]
|
||||
ok := !status.empty
|
||||
c.mux.RUnlock()
|
||||
return status, ok
|
||||
}
|
||||
|
||||
func (c *arrayLevelCache) put(id LevelID, status LevelStatus) error {
|
||||
if id > MaxLevelID {
|
||||
return fmt.Errorf("level id cannot exceed MaxLevelID (%d)", MaxLevelID)
|
||||
}
|
||||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
|
||||
c.arr[id] = status
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *arrayLevelCache) clear() {
|
||||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
|
||||
for i := range c.arr {
|
||||
c.arr[i] = LevelStatus{empty: true}
|
||||
}
|
||||
}
|
45
vendor/github.com/mattermost/logr/levelcustom.go
generated
vendored
45
vendor/github.com/mattermost/logr/levelcustom.go
generated
vendored
@ -1,45 +0,0 @@
|
||||
package logr
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// CustomFilter allows targets to enable logging via a list of levels.
|
||||
type CustomFilter struct {
|
||||
mux sync.RWMutex
|
||||
levels map[LevelID]Level
|
||||
}
|
||||
|
||||
// IsEnabled returns true if the specified Level exists in this list.
|
||||
func (st *CustomFilter) IsEnabled(level Level) bool {
|
||||
st.mux.RLock()
|
||||
defer st.mux.RUnlock()
|
||||
_, ok := st.levels[level.ID]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsStacktraceEnabled returns true if the specified Level requires a stack trace.
|
||||
func (st *CustomFilter) IsStacktraceEnabled(level Level) bool {
|
||||
st.mux.RLock()
|
||||
defer st.mux.RUnlock()
|
||||
lvl, ok := st.levels[level.ID]
|
||||
if ok {
|
||||
return lvl.Stacktrace
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Add adds one or more levels to the list. Adding a level enables logging for
|
||||
// that level on any targets using this CustomFilter.
|
||||
func (st *CustomFilter) Add(levels ...Level) {
|
||||
st.mux.Lock()
|
||||
defer st.mux.Unlock()
|
||||
|
||||
if st.levels == nil {
|
||||
st.levels = make(map[LevelID]Level)
|
||||
}
|
||||
|
||||
for _, s := range levels {
|
||||
st.levels[s.ID] = s
|
||||
}
|
||||
}
|
37
vendor/github.com/mattermost/logr/levelstd.go
generated
vendored
37
vendor/github.com/mattermost/logr/levelstd.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
package logr
|
||||
|
||||
// StdFilter allows targets to filter via classic log levels where any level
|
||||
// beyond a certain verbosity/severity is enabled.
|
||||
type StdFilter struct {
|
||||
Lvl Level
|
||||
Stacktrace Level
|
||||
}
|
||||
|
||||
// IsEnabled returns true if the specified Level is at or above this verbosity. Also
|
||||
// determines if a stack trace is required.
|
||||
func (lt StdFilter) IsEnabled(level Level) bool {
|
||||
return level.ID <= lt.Lvl.ID
|
||||
}
|
||||
|
||||
// IsStacktraceEnabled returns true if the specified Level requires a stack trace.
|
||||
func (lt StdFilter) IsStacktraceEnabled(level Level) bool {
|
||||
return level.ID <= lt.Stacktrace.ID
|
||||
}
|
||||
|
||||
var (
|
||||
// Panic is the highest level of severity. Logs the message and then panics.
|
||||
Panic = Level{ID: 0, Name: "panic"}
|
||||
// Fatal designates a catastrophic error. Logs the message and then calls
|
||||
// `logr.Exit(1)`.
|
||||
Fatal = Level{ID: 1, Name: "fatal"}
|
||||
// Error designates a serious but possibly recoverable error.
|
||||
Error = Level{ID: 2, Name: "error"}
|
||||
// Warn designates non-critical error.
|
||||
Warn = Level{ID: 3, Name: "warn"}
|
||||
// Info designates information regarding application events.
|
||||
Info = Level{ID: 4, Name: "info"}
|
||||
// Debug designates verbose information typically used for debugging.
|
||||
Debug = Level{ID: 5, Name: "debug"}
|
||||
// Trace designates the highest verbosity of log output.
|
||||
Trace = Level{ID: 6, Name: "trace"}
|
||||
)
|
218
vendor/github.com/mattermost/logr/logger.go
generated
vendored
218
vendor/github.com/mattermost/logr/logger.go
generated
vendored
@ -1,218 +0,0 @@
|
||||
package logr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Fields type, used to pass to `WithFields`.
|
||||
type Fields map[string]interface{}
|
||||
|
||||
// Logger provides context for logging via fields.
|
||||
type Logger struct {
|
||||
logr *Logr
|
||||
fields Fields
|
||||
}
|
||||
|
||||
// Logr returns the `Logr` instance that created this `Logger`.
|
||||
func (logger Logger) Logr() *Logr {
|
||||
return logger.logr
|
||||
}
|
||||
|
||||
// WithField creates a new `Logger` with any existing fields
|
||||
// plus the new one.
|
||||
func (logger Logger) WithField(key string, value interface{}) Logger {
|
||||
return logger.WithFields(Fields{key: value})
|
||||
}
|
||||
|
||||
// WithFields creates a new `Logger` with any existing fields
|
||||
// plus the new ones.
|
||||
func (logger Logger) WithFields(fields Fields) Logger {
|
||||
l := Logger{logr: logger.logr}
|
||||
// if parent has no fields then avoid creating a new map.
|
||||
oldLen := len(logger.fields)
|
||||
if oldLen == 0 {
|
||||
l.fields = fields
|
||||
return l
|
||||
}
|
||||
|
||||
l.fields = make(Fields, len(fields)+oldLen)
|
||||
for k, v := range logger.fields {
|
||||
l.fields[k] = v
|
||||
}
|
||||
for k, v := range fields {
|
||||
l.fields[k] = v
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Log checks that the level matches one or more targets, and
|
||||
// if so, generates a log record that is added to the Logr queue.
|
||||
// Arguments are handled in the manner of fmt.Print.
|
||||
func (logger Logger) Log(lvl Level, args ...interface{}) {
|
||||
status := logger.logr.IsLevelEnabled(lvl)
|
||||
if status.Enabled {
|
||||
rec := NewLogRec(lvl, logger, "", args, status.Stacktrace)
|
||||
logger.logr.enqueue(rec)
|
||||
}
|
||||
}
|
||||
|
||||
// Trace is a convenience method equivalent to `Log(TraceLevel, args...)`.
|
||||
func (logger Logger) Trace(args ...interface{}) {
|
||||
logger.Log(Trace, args...)
|
||||
}
|
||||
|
||||
// Debug is a convenience method equivalent to `Log(DebugLevel, args...)`.
|
||||
func (logger Logger) Debug(args ...interface{}) {
|
||||
logger.Log(Debug, args...)
|
||||
}
|
||||
|
||||
// Print ensures compatibility with std lib logger.
|
||||
func (logger Logger) Print(args ...interface{}) {
|
||||
logger.Info(args...)
|
||||
}
|
||||
|
||||
// Info is a convenience method equivalent to `Log(InfoLevel, args...)`.
|
||||
func (logger Logger) Info(args ...interface{}) {
|
||||
logger.Log(Info, args...)
|
||||
}
|
||||
|
||||
// Warn is a convenience method equivalent to `Log(WarnLevel, args...)`.
|
||||
func (logger Logger) Warn(args ...interface{}) {
|
||||
logger.Log(Warn, args...)
|
||||
}
|
||||
|
||||
// Error is a convenience method equivalent to `Log(ErrorLevel, args...)`.
|
||||
func (logger Logger) Error(args ...interface{}) {
|
||||
logger.Log(Error, args...)
|
||||
}
|
||||
|
||||
// Fatal is a convenience method equivalent to `Log(FatalLevel, args...)`
|
||||
// followed by a call to os.Exit(1).
|
||||
func (logger Logger) Fatal(args ...interface{}) {
|
||||
logger.Log(Fatal, args...)
|
||||
logger.logr.exit(1)
|
||||
}
|
||||
|
||||
// Panic is a convenience method equivalent to `Log(PanicLevel, args...)`
|
||||
// followed by a call to panic().
|
||||
func (logger Logger) Panic(args ...interface{}) {
|
||||
logger.Log(Panic, args...)
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
//
|
||||
// Printf style
|
||||
//
|
||||
|
||||
// Logf checks that the level matches one or more targets, and
|
||||
// if so, generates a log record that is added to the main
|
||||
// queue (channel). Arguments are handled in the manner of fmt.Printf.
|
||||
func (logger Logger) Logf(lvl Level, format string, args ...interface{}) {
|
||||
status := logger.logr.IsLevelEnabled(lvl)
|
||||
if status.Enabled {
|
||||
rec := NewLogRec(lvl, logger, format, args, status.Stacktrace)
|
||||
logger.logr.enqueue(rec)
|
||||
}
|
||||
}
|
||||
|
||||
// Tracef is a convenience method equivalent to `Logf(TraceLevel, args...)`.
|
||||
func (logger Logger) Tracef(format string, args ...interface{}) {
|
||||
logger.Logf(Trace, format, args...)
|
||||
}
|
||||
|
||||
// Debugf is a convenience method equivalent to `Logf(DebugLevel, args...)`.
|
||||
func (logger Logger) Debugf(format string, args ...interface{}) {
|
||||
logger.Logf(Debug, format, args...)
|
||||
}
|
||||
|
||||
// Infof is a convenience method equivalent to `Logf(InfoLevel, args...)`.
|
||||
func (logger Logger) Infof(format string, args ...interface{}) {
|
||||
logger.Logf(Info, format, args...)
|
||||
}
|
||||
|
||||
// Printf ensures compatibility with std lib logger.
|
||||
func (logger Logger) Printf(format string, args ...interface{}) {
|
||||
logger.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warnf is a convenience method equivalent to `Logf(WarnLevel, args...)`.
|
||||
func (logger Logger) Warnf(format string, args ...interface{}) {
|
||||
logger.Logf(Warn, format, args...)
|
||||
}
|
||||
|
||||
// Errorf is a convenience method equivalent to `Logf(ErrorLevel, args...)`.
|
||||
func (logger Logger) Errorf(format string, args ...interface{}) {
|
||||
logger.Logf(Error, format, args...)
|
||||
}
|
||||
|
||||
// Fatalf is a convenience method equivalent to `Logf(FatalLevel, args...)`
|
||||
// followed by a call to os.Exit(1).
|
||||
func (logger Logger) Fatalf(format string, args ...interface{}) {
|
||||
logger.Logf(Fatal, format, args...)
|
||||
logger.logr.exit(1)
|
||||
}
|
||||
|
||||
// Panicf is a convenience method equivalent to `Logf(PanicLevel, args...)`
|
||||
// followed by a call to panic().
|
||||
func (logger Logger) Panicf(format string, args ...interface{}) {
|
||||
logger.Logf(Panic, format, args...)
|
||||
}
|
||||
|
||||
//
|
||||
// Println style
|
||||
//
|
||||
|
||||
// Logln checks that the level matches one or more targets, and
|
||||
// if so, generates a log record that is added to the main
|
||||
// queue (channel). Arguments are handled in the manner of fmt.Println.
|
||||
func (logger Logger) Logln(lvl Level, args ...interface{}) {
|
||||
status := logger.logr.IsLevelEnabled(lvl)
|
||||
if status.Enabled {
|
||||
rec := NewLogRec(lvl, logger, "", args, status.Stacktrace)
|
||||
rec.newline = true
|
||||
logger.logr.enqueue(rec)
|
||||
}
|
||||
}
|
||||
|
||||
// Traceln is a convenience method equivalent to `Logln(TraceLevel, args...)`.
|
||||
func (logger Logger) Traceln(args ...interface{}) {
|
||||
logger.Logln(Trace, args...)
|
||||
}
|
||||
|
||||
// Debugln is a convenience method equivalent to `Logln(DebugLevel, args...)`.
|
||||
func (logger Logger) Debugln(args ...interface{}) {
|
||||
logger.Logln(Debug, args...)
|
||||
}
|
||||
|
||||
// Infoln is a convenience method equivalent to `Logln(InfoLevel, args...)`.
|
||||
func (logger Logger) Infoln(args ...interface{}) {
|
||||
logger.Logln(Info, args...)
|
||||
}
|
||||
|
||||
// Println ensures compatibility with std lib logger.
|
||||
func (logger Logger) Println(args ...interface{}) {
|
||||
logger.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warnln is a convenience method equivalent to `Logln(WarnLevel, args...)`.
|
||||
func (logger Logger) Warnln(args ...interface{}) {
|
||||
logger.Logln(Warn, args...)
|
||||
}
|
||||
|
||||
// Errorln is a convenience method equivalent to `Logln(ErrorLevel, args...)`.
|
||||
func (logger Logger) Errorln(args ...interface{}) {
|
||||
logger.Logln(Error, args...)
|
||||
}
|
||||
|
||||
// Fatalln is a convenience method equivalent to `Logln(FatalLevel, args...)`
|
||||
// followed by a call to os.Exit(1).
|
||||
func (logger Logger) Fatalln(args ...interface{}) {
|
||||
logger.Logln(Fatal, args...)
|
||||
logger.logr.exit(1)
|
||||
}
|
||||
|
||||
// Panicln is a convenience method equivalent to `Logln(PanicLevel, args...)`
|
||||
// followed by a call to panic().
|
||||
func (logger Logger) Panicln(args ...interface{}) {
|
||||
logger.Logln(Panic, args...)
|
||||
}
|
664
vendor/github.com/mattermost/logr/logr.go
generated
vendored
664
vendor/github.com/mattermost/logr/logr.go
generated
vendored
@ -1,664 +0,0 @@
|
||||
package logr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/wiggin77/cfg"
|
||||
"github.com/wiggin77/merror"
|
||||
)
|
||||
|
||||
// Logr maintains a list of log targets and accepts incoming
|
||||
// log records.
|
||||
type Logr struct {
|
||||
tmux sync.RWMutex // target mutex
|
||||
targets []Target
|
||||
|
||||
mux sync.RWMutex
|
||||
maxQueueSizeActual int
|
||||
in chan *LogRec
|
||||
done chan struct{}
|
||||
once sync.Once
|
||||
shutdown bool
|
||||
lvlCache levelCache
|
||||
|
||||
metricsInitOnce sync.Once
|
||||
metricsCloseOnce sync.Once
|
||||
metricsDone chan struct{}
|
||||
metrics MetricsCollector
|
||||
queueSizeGauge Gauge
|
||||
loggedCounter Counter
|
||||
errorCounter Counter
|
||||
|
||||
bufferPool sync.Pool
|
||||
|
||||
// MaxQueueSize is the maximum number of log records that can be queued.
|
||||
// If exceeded, `OnQueueFull` is called which determines if the log
|
||||
// record will be dropped or block until add is successful.
|
||||
// If this is modified, it must be done before `Configure` or
|
||||
// `AddTarget`. Defaults to DefaultMaxQueueSize.
|
||||
MaxQueueSize int
|
||||
|
||||
// OnLoggerError, when not nil, is called any time an internal
|
||||
// logging error occurs. For example, this can happen when a
|
||||
// target cannot connect to its data sink.
|
||||
OnLoggerError func(error)
|
||||
|
||||
// OnQueueFull, when not nil, is called on an attempt to add
|
||||
// a log record to a full Logr queue.
|
||||
// `MaxQueueSize` can be used to modify the maximum queue size.
|
||||
// This function should return quickly, with a bool indicating whether
|
||||
// the log record should be dropped (true) or block until the log record
|
||||
// is successfully added (false). If nil then blocking (false) is assumed.
|
||||
OnQueueFull func(rec *LogRec, maxQueueSize int) bool
|
||||
|
||||
// OnTargetQueueFull, when not nil, is called on an attempt to add
|
||||
// a log record to a full target queue provided the target supports reporting
|
||||
// this condition.
|
||||
// This function should return quickly, with a bool indicating whether
|
||||
// the log record should be dropped (true) or block until the log record
|
||||
// is successfully added (false). If nil then blocking (false) is assumed.
|
||||
OnTargetQueueFull func(target Target, rec *LogRec, maxQueueSize int) bool
|
||||
|
||||
// OnExit, when not nil, is called when a FatalXXX style log API is called.
|
||||
// When nil, then the default behavior is to cleanly shut down this Logr and
|
||||
// call `os.Exit(code)`.
|
||||
OnExit func(code int)
|
||||
|
||||
// OnPanic, when not nil, is called when a PanicXXX style log API is called.
|
||||
// When nil, then the default behavior is to cleanly shut down this Logr and
|
||||
// call `panic(err)`.
|
||||
OnPanic func(err interface{})
|
||||
|
||||
// EnqueueTimeout is the amount of time a log record can take to be queued.
|
||||
// This only applies to blocking enqueue which happen after `logr.OnQueueFull`
|
||||
// is called and returns false.
|
||||
EnqueueTimeout time.Duration
|
||||
|
||||
// ShutdownTimeout is the amount of time `logr.Shutdown` can execute before
|
||||
// timing out.
|
||||
ShutdownTimeout time.Duration
|
||||
|
||||
// FlushTimeout is the amount of time `logr.Flush` can execute before
|
||||
// timing out.
|
||||
FlushTimeout time.Duration
|
||||
|
||||
// UseSyncMapLevelCache can be set to true before the first target is added
|
||||
// when high concurrency (e.g. >32 cores) is expected. This may improve
|
||||
// performance with large numbers of cores - benchmark for your use case.
|
||||
UseSyncMapLevelCache bool
|
||||
|
||||
// MaxPooledFormatBuffer determines the maximum size of a buffer that can be
|
||||
// pooled. To reduce allocations, the buffers needed during formatting (etc)
|
||||
// are pooled. A very large log item will grow a buffer that could stay in
|
||||
// memory indefinitely. This settings lets you control how big a pooled buffer
|
||||
// can be - anything larger will be garbage collected after use.
|
||||
// Defaults to 1MB.
|
||||
MaxPooledBuffer int
|
||||
|
||||
// DisableBufferPool when true disables the buffer pool. See MaxPooledBuffer.
|
||||
DisableBufferPool bool
|
||||
|
||||
// MetricsUpdateFreqMillis determines how often polled metrics are updated
|
||||
// when metrics are enabled.
|
||||
MetricsUpdateFreqMillis int64
|
||||
}
|
||||
|
||||
// Configure adds/removes targets via the supplied `Config`.
|
||||
func (logr *Logr) Configure(config *cfg.Config) error {
|
||||
// TODO
|
||||
return fmt.Errorf("not implemented yet")
|
||||
}
|
||||
|
||||
func (logr *Logr) ensureInit() {
|
||||
logr.once.Do(func() {
|
||||
defer func() {
|
||||
go logr.start()
|
||||
}()
|
||||
|
||||
logr.mux.Lock()
|
||||
defer logr.mux.Unlock()
|
||||
|
||||
logr.maxQueueSizeActual = logr.MaxQueueSize
|
||||
if logr.maxQueueSizeActual == 0 {
|
||||
logr.maxQueueSizeActual = DefaultMaxQueueSize
|
||||
}
|
||||
|
||||
if logr.maxQueueSizeActual < 0 {
|
||||
logr.maxQueueSizeActual = 0
|
||||
}
|
||||
|
||||
logr.in = make(chan *LogRec, logr.maxQueueSizeActual)
|
||||
logr.done = make(chan struct{})
|
||||
|
||||
if logr.UseSyncMapLevelCache {
|
||||
logr.lvlCache = &syncMapLevelCache{}
|
||||
} else {
|
||||
logr.lvlCache = &arrayLevelCache{}
|
||||
}
|
||||
|
||||
if logr.MaxPooledBuffer == 0 {
|
||||
logr.MaxPooledBuffer = DefaultMaxPooledBuffer
|
||||
}
|
||||
logr.bufferPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
logr.lvlCache.setup()
|
||||
})
|
||||
}
|
||||
|
||||
// AddTarget adds one or more targets to the logger which will receive
|
||||
// log records for outputting.
|
||||
func (logr *Logr) AddTarget(targets ...Target) error {
|
||||
if logr.IsShutdown() {
|
||||
return fmt.Errorf("AddTarget called after Logr shut down")
|
||||
}
|
||||
|
||||
logr.ensureInit()
|
||||
metrics := logr.getMetricsCollector()
|
||||
defer logr.ResetLevelCache() // call this after tmux is released
|
||||
|
||||
logr.tmux.Lock()
|
||||
defer logr.tmux.Unlock()
|
||||
|
||||
errs := merror.New()
|
||||
for _, t := range targets {
|
||||
if t == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
logr.targets = append(logr.targets, t)
|
||||
if metrics != nil {
|
||||
if tm, ok := t.(TargetWithMetrics); ok {
|
||||
if err := tm.EnableMetrics(metrics, logr.MetricsUpdateFreqMillis); err != nil {
|
||||
errs.Append(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return errs.ErrorOrNil()
|
||||
}
|
||||
|
||||
// NewLogger creates a Logger using defaults. A `Logger` is light-weight
|
||||
// enough to create on-demand, but typically one or more Loggers are
|
||||
// created and re-used.
|
||||
func (logr *Logr) NewLogger() Logger {
|
||||
logger := Logger{logr: logr}
|
||||
return logger
|
||||
}
|
||||
|
||||
var levelStatusDisabled = LevelStatus{}
|
||||
|
||||
// IsLevelEnabled returns true if at least one target has the specified
|
||||
// level enabled. The result is cached so that subsequent checks are fast.
|
||||
func (logr *Logr) IsLevelEnabled(lvl Level) LevelStatus {
|
||||
status, ok := logr.isLevelEnabledFromCache(lvl)
|
||||
if ok {
|
||||
return status
|
||||
}
|
||||
|
||||
// Check each target.
|
||||
logr.tmux.RLock()
|
||||
for _, t := range logr.targets {
|
||||
e, s := t.IsLevelEnabled(lvl)
|
||||
if e {
|
||||
status.Enabled = true
|
||||
if s {
|
||||
status.Stacktrace = true
|
||||
break // if both enabled then no sense checking more targets
|
||||
}
|
||||
}
|
||||
}
|
||||
logr.tmux.RUnlock()
|
||||
|
||||
// Cache and return the result.
|
||||
if err := logr.updateLevelCache(lvl.ID, status); err != nil {
|
||||
logr.ReportError(err)
|
||||
return LevelStatus{}
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (logr *Logr) isLevelEnabledFromCache(lvl Level) (LevelStatus, bool) {
|
||||
logr.mux.RLock()
|
||||
defer logr.mux.RUnlock()
|
||||
|
||||
// Don't accept new log records after shutdown.
|
||||
if logr.shutdown {
|
||||
return levelStatusDisabled, true
|
||||
}
|
||||
|
||||
// Check cache. lvlCache may still be nil if no targets added.
|
||||
if logr.lvlCache == nil {
|
||||
return levelStatusDisabled, true
|
||||
}
|
||||
status, ok := logr.lvlCache.get(lvl.ID)
|
||||
if ok {
|
||||
return status, true
|
||||
}
|
||||
return LevelStatus{}, false
|
||||
}
|
||||
|
||||
func (logr *Logr) updateLevelCache(id LevelID, status LevelStatus) error {
|
||||
logr.mux.RLock()
|
||||
defer logr.mux.RUnlock()
|
||||
if logr.lvlCache != nil {
|
||||
return logr.lvlCache.put(id, status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasTargets returns true only if at least one target exists within the Logr.
|
||||
func (logr *Logr) HasTargets() bool {
|
||||
logr.tmux.RLock()
|
||||
defer logr.tmux.RUnlock()
|
||||
return len(logr.targets) > 0
|
||||
}
|
||||
|
||||
// TargetInfo provides name and type for a Target.
|
||||
type TargetInfo struct {
|
||||
Name string
|
||||
Type string
|
||||
}
|
||||
|
||||
// TargetInfos enumerates all the targets added to this Logr.
|
||||
// The resulting slice represents a snapshot at time of calling.
|
||||
func (logr *Logr) TargetInfos() []TargetInfo {
|
||||
logr.tmux.RLock()
|
||||
defer logr.tmux.RUnlock()
|
||||
|
||||
infos := make([]TargetInfo, 0)
|
||||
|
||||
for _, t := range logr.targets {
|
||||
inf := TargetInfo{
|
||||
Name: fmt.Sprintf("%v", t),
|
||||
Type: fmt.Sprintf("%T", t),
|
||||
}
|
||||
infos = append(infos, inf)
|
||||
}
|
||||
return infos
|
||||
}
|
||||
|
||||
// RemoveTargets safely removes one or more targets based on the filtering method.
|
||||
// f should return true to delete the target, false to keep it.
|
||||
// When removing a target, best effort is made to write any queued log records before
|
||||
// closing, with cxt determining how much time can be spent in total.
|
||||
// Note, keep the timeout short since this method blocks certain logging operations.
|
||||
func (logr *Logr) RemoveTargets(cxt context.Context, f func(ti TargetInfo) bool) error {
|
||||
var removed bool
|
||||
defer func() {
|
||||
if removed {
|
||||
// call this after tmux is released since
|
||||
// it will lock mux and we don't want to
|
||||
// introduce possible deadlock.
|
||||
logr.ResetLevelCache()
|
||||
}
|
||||
}()
|
||||
|
||||
errs := merror.New()
|
||||
|
||||
logr.tmux.Lock()
|
||||
defer logr.tmux.Unlock()
|
||||
|
||||
cp := make([]Target, 0)
|
||||
|
||||
for _, t := range logr.targets {
|
||||
inf := TargetInfo{
|
||||
Name: fmt.Sprintf("%v", t),
|
||||
Type: fmt.Sprintf("%T", t),
|
||||
}
|
||||
if f(inf) {
|
||||
if err := t.Shutdown(cxt); err != nil {
|
||||
errs.Append(err)
|
||||
}
|
||||
removed = true
|
||||
} else {
|
||||
cp = append(cp, t)
|
||||
}
|
||||
}
|
||||
logr.targets = cp
|
||||
return errs.ErrorOrNil()
|
||||
}
|
||||
|
||||
// ResetLevelCache resets the cached results of `IsLevelEnabled`. This is
|
||||
// called any time a Target is added or a target's level is changed.
|
||||
func (logr *Logr) ResetLevelCache() {
|
||||
// Write lock so that new cache entries cannot be stored while we
|
||||
// clear the cache.
|
||||
logr.mux.Lock()
|
||||
defer logr.mux.Unlock()
|
||||
logr.resetLevelCache()
|
||||
}
|
||||
|
||||
// resetLevelCache empties the level cache without locking.
|
||||
// mux.Lock must be held before calling this function.
|
||||
func (logr *Logr) resetLevelCache() {
|
||||
// lvlCache may still be nil if no targets added.
|
||||
if logr.lvlCache != nil {
|
||||
logr.lvlCache.clear()
|
||||
}
|
||||
}
|
||||
|
||||
// enqueue adds a log record to the logr queue. If the queue is full then
|
||||
// this function either blocks or the log record is dropped, depending on
|
||||
// the result of calling `OnQueueFull`.
|
||||
func (logr *Logr) enqueue(rec *LogRec) {
|
||||
if logr.in == nil {
|
||||
logr.ReportError(fmt.Errorf("AddTarget or Configure must be called before enqueue"))
|
||||
}
|
||||
|
||||
select {
|
||||
case logr.in <- rec:
|
||||
default:
|
||||
if logr.OnQueueFull != nil && logr.OnQueueFull(rec, logr.maxQueueSizeActual) {
|
||||
return // drop the record
|
||||
}
|
||||
select {
|
||||
case <-time.After(logr.enqueueTimeout()):
|
||||
logr.ReportError(fmt.Errorf("enqueue timed out for log rec [%v]", rec))
|
||||
case logr.in <- rec: // block until success or timeout
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// exit is called by one of the FatalXXX style APIS. If `logr.OnExit` is not nil
|
||||
// then that method is called, otherwise the default behavior is to shut down this
|
||||
// Logr cleanly then call `os.Exit(code)`.
|
||||
func (logr *Logr) exit(code int) {
|
||||
if logr.OnExit != nil {
|
||||
logr.OnExit(code)
|
||||
return
|
||||
}
|
||||
|
||||
if err := logr.Shutdown(); err != nil {
|
||||
logr.ReportError(err)
|
||||
}
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// panic is called by one of the PanicXXX style APIS. If `logr.OnPanic` is not nil
|
||||
// then that method is called, otherwise the default behavior is to shut down this
|
||||
// Logr cleanly then call `panic(err)`.
|
||||
func (logr *Logr) panic(err interface{}) {
|
||||
if logr.OnPanic != nil {
|
||||
logr.OnPanic(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := logr.Shutdown(); err != nil {
|
||||
logr.ReportError(err)
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Flush blocks while flushing the logr queue and all target queues, by
|
||||
// writing existing log records to valid targets.
|
||||
// Any attempts to add new log records will block until flush is complete.
|
||||
// `logr.FlushTimeout` determines how long flush can execute before
|
||||
// timing out. Use `IsTimeoutError` to determine if the returned error is
|
||||
// due to a timeout.
|
||||
func (logr *Logr) Flush() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), logr.flushTimeout())
|
||||
defer cancel()
|
||||
return logr.FlushWithTimeout(ctx)
|
||||
}
|
||||
|
||||
// Flush blocks while flushing the logr queue and all target queues, by
|
||||
// writing existing log records to valid targets.
|
||||
// Any attempts to add new log records will block until flush is complete.
|
||||
// Use `IsTimeoutError` to determine if the returned error is
|
||||
// due to a timeout.
|
||||
func (logr *Logr) FlushWithTimeout(ctx context.Context) error {
|
||||
if !logr.HasTargets() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if logr.IsShutdown() {
|
||||
return errors.New("Flush called on shut down Logr")
|
||||
}
|
||||
|
||||
rec := newFlushLogRec(logr.NewLogger())
|
||||
logr.enqueue(rec)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return newTimeoutError("logr queue shutdown timeout")
|
||||
case <-rec.flush:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsShutdown returns true if this Logr instance has been shut down.
|
||||
// No further log records can be enqueued and no targets added after
|
||||
// shutdown.
|
||||
func (logr *Logr) IsShutdown() bool {
|
||||
logr.mux.Lock()
|
||||
defer logr.mux.Unlock()
|
||||
return logr.shutdown
|
||||
}
|
||||
|
||||
// Shutdown cleanly stops the logging engine after making best efforts
|
||||
// to flush all targets. Call this function right before application
|
||||
// exit - logr cannot be restarted once shut down.
|
||||
// `logr.ShutdownTimeout` determines how long shutdown can execute before
|
||||
// timing out. Use `IsTimeoutError` to determine if the returned error is
|
||||
// due to a timeout.
|
||||
func (logr *Logr) Shutdown() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), logr.shutdownTimeout())
|
||||
defer cancel()
|
||||
return logr.ShutdownWithTimeout(ctx)
|
||||
}
|
||||
|
||||
// Shutdown cleanly stops the logging engine after making best efforts
|
||||
// to flush all targets. Call this function right before application
|
||||
// exit - logr cannot be restarted once shut down.
|
||||
// Use `IsTimeoutError` to determine if the returned error is due to a
|
||||
// timeout.
|
||||
func (logr *Logr) ShutdownWithTimeout(ctx context.Context) error {
|
||||
logr.mux.Lock()
|
||||
if logr.shutdown {
|
||||
logr.mux.Unlock()
|
||||
return errors.New("Shutdown called again after shut down")
|
||||
}
|
||||
logr.shutdown = true
|
||||
logr.resetLevelCache()
|
||||
logr.mux.Unlock()
|
||||
|
||||
logr.metricsCloseOnce.Do(func() {
|
||||
if logr.metricsDone != nil {
|
||||
close(logr.metricsDone)
|
||||
}
|
||||
})
|
||||
|
||||
errs := merror.New()
|
||||
|
||||
// close the incoming channel and wait for read loop to exit.
|
||||
if logr.in != nil {
|
||||
close(logr.in)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
errs.Append(newTimeoutError("logr queue shutdown timeout"))
|
||||
case <-logr.done:
|
||||
}
|
||||
}
|
||||
|
||||
// logr.in channel should now be drained to targets and no more log records
|
||||
// can be added.
|
||||
logr.tmux.RLock()
|
||||
defer logr.tmux.RUnlock()
|
||||
for _, t := range logr.targets {
|
||||
err := t.Shutdown(ctx)
|
||||
if err != nil {
|
||||
errs.Append(err)
|
||||
}
|
||||
}
|
||||
return errs.ErrorOrNil()
|
||||
}
|
||||
|
||||
// ReportError is used to notify the host application of any internal logging errors.
|
||||
// If `OnLoggerError` is not nil, it is called with the error, otherwise the error is
|
||||
// output to `os.Stderr`.
|
||||
func (logr *Logr) ReportError(err interface{}) {
|
||||
logr.incErrorCounter()
|
||||
|
||||
if logr.OnLoggerError == nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return
|
||||
}
|
||||
logr.OnLoggerError(fmt.Errorf("%v", err))
|
||||
}
|
||||
|
||||
// BorrowBuffer borrows a buffer from the pool. Release the buffer to reduce garbage collection.
|
||||
func (logr *Logr) BorrowBuffer() *bytes.Buffer {
|
||||
if logr.DisableBufferPool {
|
||||
return &bytes.Buffer{}
|
||||
}
|
||||
return logr.bufferPool.Get().(*bytes.Buffer)
|
||||
}
|
||||
|
||||
// ReleaseBuffer returns a buffer to the pool to reduce garbage collection. The buffer is only
|
||||
// retained if less than MaxPooledBuffer.
|
||||
func (logr *Logr) ReleaseBuffer(buf *bytes.Buffer) {
|
||||
if !logr.DisableBufferPool && buf.Cap() < logr.MaxPooledBuffer {
|
||||
buf.Reset()
|
||||
logr.bufferPool.Put(buf)
|
||||
}
|
||||
}
|
||||
|
||||
// enqueueTimeout returns amount of time a log record can take to be queued.
|
||||
// This only applies to blocking enqueue which happen after `logr.OnQueueFull` is called
|
||||
// and returns false.
|
||||
func (logr *Logr) enqueueTimeout() time.Duration {
|
||||
if logr.EnqueueTimeout == 0 {
|
||||
return DefaultEnqueueTimeout
|
||||
}
|
||||
return logr.EnqueueTimeout
|
||||
}
|
||||
|
||||
// shutdownTimeout returns the timeout duration for `logr.Shutdown`.
|
||||
func (logr *Logr) shutdownTimeout() time.Duration {
|
||||
if logr.ShutdownTimeout == 0 {
|
||||
return DefaultShutdownTimeout
|
||||
}
|
||||
return logr.ShutdownTimeout
|
||||
}
|
||||
|
||||
// flushTimeout returns the timeout duration for `logr.Flush`.
|
||||
func (logr *Logr) flushTimeout() time.Duration {
|
||||
if logr.FlushTimeout == 0 {
|
||||
return DefaultFlushTimeout
|
||||
}
|
||||
return logr.FlushTimeout
|
||||
}
|
||||
|
||||
// start selects on incoming log records until done channel signals.
|
||||
// Incoming log records are fanned out to all log targets.
|
||||
func (logr *Logr) start() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logr.ReportError(r)
|
||||
go logr.start()
|
||||
}
|
||||
}()
|
||||
|
||||
for rec := range logr.in {
|
||||
if rec.flush != nil {
|
||||
logr.flush(rec.flush)
|
||||
} else {
|
||||
rec.prep()
|
||||
logr.fanout(rec)
|
||||
}
|
||||
}
|
||||
close(logr.done)
|
||||
}
|
||||
|
||||
// startMetricsUpdater updates the metrics for any polled values every `MetricsUpdateFreqSecs` seconds until
|
||||
// logr is closed.
|
||||
func (logr *Logr) startMetricsUpdater() {
|
||||
for {
|
||||
updateFreq := logr.getMetricsUpdateFreqMillis()
|
||||
if updateFreq == 0 {
|
||||
updateFreq = DefMetricsUpdateFreqMillis
|
||||
}
|
||||
if updateFreq < 250 {
|
||||
updateFreq = 250 // don't peg the CPU
|
||||
}
|
||||
|
||||
select {
|
||||
case <-logr.metricsDone:
|
||||
return
|
||||
case <-time.After(time.Duration(updateFreq) * time.Millisecond):
|
||||
logr.setQueueSizeGauge(float64(len(logr.in)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (logr *Logr) getMetricsUpdateFreqMillis() int64 {
|
||||
logr.mux.RLock()
|
||||
defer logr.mux.RUnlock()
|
||||
return logr.MetricsUpdateFreqMillis
|
||||
}
|
||||
|
||||
// fanout pushes a LogRec to all targets.
|
||||
func (logr *Logr) fanout(rec *LogRec) {
|
||||
var target Target
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logr.ReportError(fmt.Errorf("fanout failed for target %s, %v", target, r))
|
||||
}
|
||||
}()
|
||||
|
||||
var logged bool
|
||||
defer func() {
|
||||
if logged {
|
||||
logr.incLoggedCounter() // call this after tmux is released
|
||||
}
|
||||
}()
|
||||
|
||||
logr.tmux.RLock()
|
||||
defer logr.tmux.RUnlock()
|
||||
for _, target = range logr.targets {
|
||||
if enabled, _ := target.IsLevelEnabled(rec.Level()); enabled {
|
||||
target.Log(rec)
|
||||
logged = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// flush drains the queue and notifies when done.
|
||||
func (logr *Logr) flush(done chan<- struct{}) {
|
||||
// first drain the logr queue.
|
||||
loop:
|
||||
for {
|
||||
var rec *LogRec
|
||||
select {
|
||||
case rec = <-logr.in:
|
||||
if rec.flush == nil {
|
||||
rec.prep()
|
||||
logr.fanout(rec)
|
||||
}
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
logger := logr.NewLogger()
|
||||
|
||||
// drain all the targets; block until finished.
|
||||
logr.tmux.RLock()
|
||||
defer logr.tmux.RUnlock()
|
||||
for _, target := range logr.targets {
|
||||
rec := newFlushLogRec(logger)
|
||||
target.Log(rec)
|
||||
<-rec.flush
|
||||
}
|
||||
done <- struct{}{}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user