3
0
mirror of https://github.com/ergochat/ergo.git synced 2025-04-22 07:47:56 +02:00

Compare commits

..

No commits in common. "master" and "v2.14.0-rc1" have entirely different histories.

292 changed files with 10295 additions and 16461 deletions

View File

@ -12,14 +12,14 @@ on:
jobs: jobs:
build: build:
runs-on: "ubuntu-24.04" runs-on: "ubuntu-22.04"
steps: steps:
- name: "checkout repository" - name: "checkout repository"
uses: "actions/checkout@v3" uses: "actions/checkout@v3"
- name: "setup go" - name: "setup go"
uses: "actions/setup-go@v3" uses: "actions/setup-go@v3"
with: with:
go-version: "1.24" go-version: "1.22"
- name: "install python3-pytest" - name: "install python3-pytest"
run: "sudo apt install -y python3-pytest" run: "sudo apt install -y python3-pytest"
- name: "make install" - name: "make install"

View File

@ -1,6 +1,5 @@
# .goreleaser.yml # .goreleaser.yml
# Build customization # Build customization
version: 2
project_name: ergo project_name: ergo
builds: builds:
- main: ergo.go - main: ergo.go
@ -18,7 +17,6 @@ builds:
- amd64 - amd64
- arm - arm
- arm64 - arm64
- riscv64
goarm: goarm:
- 6 - 6
ignore: ignore:
@ -26,41 +24,30 @@ builds:
goarch: arm goarch: arm
- goos: windows - goos: windows
goarch: arm64 goarch: arm64
- goos: windows
goarch: riscv64
- goos: darwin - goos: darwin
goarch: arm goarch: arm
- goos: darwin
goarch: riscv64
- goos: freebsd - goos: freebsd
goarch: arm goarch: arm
- goos: freebsd - goos: freebsd
goarch: arm64 goarch: arm64
- goos: freebsd
goarch: riscv64
- goos: openbsd - goos: openbsd
goarch: arm goarch: arm
- goos: openbsd - goos: openbsd
goarch: arm64 goarch: arm64
- goos: openbsd
goarch: riscv64
- goos: plan9 - goos: plan9
goarch: arm goarch: arm
- goos: plan9 - goos: plan9
goarch: arm64 goarch: arm64
- goos: plan9
goarch: riscv64
flags: flags:
- -trimpath - -trimpath
archives: archives:
- -
name_template: >- name_template: "{{ .ProjectName }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
{{ .ProjectName }}-{{ .Version }}-
{{- if eq .Os "darwin" }}macos{{- else }}{{ .Os }}{{ end -}}-
{{- if eq .Arch "amd64" }}x86_64{{- else }}{{ .Arch }}{{ end -}}
{{ if .Arm }}v{{ .Arm }}{{ end -}}
format: tar.gz format: tar.gz
replacements:
amd64: x86_64
darwin: macos
format_overrides: format_overrides:
- goos: windows - goos: windows
format: zip format: zip
@ -71,7 +58,6 @@ archives:
- ergo.motd - ergo.motd
- default.yaml - default.yaml
- traditional.yaml - traditional.yaml
- docs/API.md
- docs/MANUAL.md - docs/MANUAL.md
- docs/USERGUIDE.md - docs/USERGUIDE.md
- languages/*.yaml - languages/*.yaml

View File

@ -1,41 +1,13 @@
# Changelog # Changelog
All notable changes to Ergo will be documented in this file. All notable changes to Ergo will be documented in this file.
## [2.15.0] - 2025-01-26 ## [2.14.0-rc1] - 2024-06-09
We're pleased to be publishing v2.15.0, a new stable release. This release adds support for mobile push notifications, via the [draft/webpush](https://github.com/ircv3/ircv3-specifications/pull/471) specification. More information on this is available in the [manual](https://github.com/ergochat/ergo/blob/ab2d842b270d9df217c779df9c7a5c594d85fdd5/docs/MANUAL.md#push-notifications) and [user guide](https://github.com/ergochat/ergo/blob/ab2d842b270d9df217c779df9c7a5c594d85fdd5/docs/USERGUIDE.md#push-notifications). This feature is still considered to be in an experimental state; `default.yaml` ships with it disabled, and its configuration may have backwards-incompatible changes in the future. We're pleased to be publishing the release candidate for v2.14.0 (the official release should follow within two weeks or so). This release contains primarily bug fixes, with the addition of some new authentication mechanisms for integrating with web clients.
This release includes changes to the config file format, all of which are fully backwards-compatible and do not require updating the file before upgrading.
This release includes a database change. If you have `datastore.autoupgrade` set to `true` in your configuration, it will be automatically applied when you restart Ergo. Otherwise, you can update the database manually by running `ergo upgradedb` (see the manual for complete instructions).
Many thanks to [@delthas](https://github.com/delthas), [@donatj](https://github.com/donatj), donio, [@emersion](https://github.com/emersion), and [@eskimo](https://github.com/eskimo) for contributing patches and helping test.
### Config changes
* Added `webpush` block to the config file to configure push notifications. See `default.yaml` for an example. Note that at this time, `default.yaml` ships with support for push notifications disabled; operators can enable them by setting `webpush.enabled: true`. In the absence of such a block, push notifications are disabled.
* We recommend the addition of `"WEBPUSH": 1` to `fakelag.command-budgets`, to speed up mobile reattach when web push is enabled. See `default.yaml` for an example.
### Added
* Added support for the [draft/webpush](https://github.com/ircv3/ircv3-specifications/pull/471) specification (#2205, thanks [@emersion](https://github.com/emersion), [@eskimo](https://github.com/eskimo)!)
* Added support for the [draft/extended-isupport](https://github.com/ircv3/ircv3-specifications/pull/543) specification (#2184, thanks [@emersion](https://github.com/emersion)!)
* `UBAN ADD` now accepts `REQUIRE-SASL` with NUH masks, i.e. k-lines (#2198, #2199)
* Ergo now publishes the `SAFELIST` ISUPPORT parameter (#2196, thanks [@delthas](https://github.com/delthas)!)
### Fixed
* Fixed incorrect parameters when pushing `005` (ISUPPORT) updates to clients on rehash (#2177, #2184)
### Internal
* Official release builds use Go 1.23.5
* Added a unique identifier to identify connections in debug logs. This has no privacy implications in a standard, non-debug configuration of Ergo. (#2206, thanks donio!)
* Added support for Solaris on amd64 CPUs (#2183)
## [2.14.0] - 2024-06-30
We're pleased to be publishing v2.14.0, a new stable release. This release contains primarily bug fixes, with the addition of some new authentication mechanisms for integrating with web clients.
This release includes changes to the config file format, all of which are fully backwards-compatible and do not require updating the file before upgrading. It includes no changes to the database file format. This release includes changes to the config file format, all of which are fully backwards-compatible and do not require updating the file before upgrading. It includes no changes to the database file format.
Many thanks to [@al3xandros](https://github.com/al3xandros), donio, [@eeeeeta](https://github.com/eeeeeta), [@emersion](https://github.com/emersion), [@Eriner](https://github.com/Eriner), [@eskimo](https://github.com/eskimo), [@Herringway](https://github.com/Herringway), [@jwheare](https://github.com/jwheare), [@knolley](https://github.com/knolley), [@mengzhuo](https://github.com/mengzhuo), pathof, [@poVoq](https://github.com/poVoq), [@progval](https://github.com/progval), [@RNDpacman](https://github.com/RNDpacman), and [@xnaas](https://github.com/xnaas) for contributing patches, reporting issues, and helping test. Many thanks to [@al3xandros](https://github.com/al3xandros), donio, [@eeeeeta](https://github.com/eeeeeta), [@emersion](https://github.com/emersion), [@Eriner](https://github.com/Eriner), [@eskimo](https://github.com/eskimo), [@Herringway](https://github.com/Herringway), [@jwheare](https://github.com/jwheare), [@knolley](https://github.com/knolley), pathof, [@poVoq](https://github.com/poVoq), [@progval](https://github.com/progval), [@RNDpacman](https://github.com/RNDpacman), and [@xnaas](https://github.com/xnaas) for contributing patches, reporting issues, and helping test.
### Config changes ### Config changes
* Added `accounts.oauth2` and `accounts.jwt-auth` blocks for configuring OAuth2 and JWT authentication (#2004) * Added `accounts.oauth2` and `accounts.jwt-auth` blocks for configuring OAuth2 and JWT authentication (#2004)
@ -51,7 +23,6 @@ Many thanks to [@al3xandros](https://github.com/al3xandros), donio, [@eeeeeta](h
* The new `ergo defaultconfig` subcommand prints a copy of the default config file to standard output (#2157, #2160, thanks [@al3xandros](https://github.com/al3xandros)!) * The new `ergo defaultconfig` subcommand prints a copy of the default config file to standard output (#2157, #2160, thanks [@al3xandros](https://github.com/al3xandros)!)
### Fixed ### Fixed
* Even with `allow-truncation: false` (the recommended default), some oversized messages were being accepted and relayed with truncation. These messages will now be rejected with `417 ERR_INPUTTOOLONG` as expected (#2170)
* NICK and QUIT from invisible members of auditorium channels are no longer recorded in history (#2133, #2137, thanks [@knolley](https://github.com/knolley) and [@poVoq](https://github.com/poVoq)!) * NICK and QUIT from invisible members of auditorium channels are no longer recorded in history (#2133, #2137, thanks [@knolley](https://github.com/knolley) and [@poVoq](https://github.com/poVoq)!)
* If channel registration was disabled, registered channels could become inaccessible after rehash; this has been fixed (#2130, thanks [@eeeeeta](https://github.com/eeeeeta)!) * If channel registration was disabled, registered channels could become inaccessible after rehash; this has been fixed (#2130, thanks [@eeeeeta](https://github.com/eeeeeta)!)
* Attempts to use unrecognized SASL mechanisms no longer count against the login throttle, improving compatibility with Pidgin (#2156, thanks donio and pathof!) * Attempts to use unrecognized SASL mechanisms no longer count against the login throttle, improving compatibility with Pidgin (#2156, thanks donio and pathof!)
@ -69,7 +40,6 @@ Many thanks to [@al3xandros](https://github.com/al3xandros), donio, [@eeeeeta](h
### Internal ### Internal
* Official release builds use Go 1.22.4 * Official release builds use Go 1.22.4
* Added a linux/riscv64 release (#2172, #2173, thanks [@mengzhuo](https://github.com/mengzhuo)!)
## [2.13.1] - 2024-05-06 ## [2.13.1] - 2024-05-06

View File

@ -1,5 +1,5 @@
## build ergo binary ## build ergo binary
FROM docker.io/golang:1.24-alpine AS build-env FROM docker.io/golang:1.22-alpine AS build-env
RUN apk upgrade -U --force-refresh --no-cache && apk add --no-cache --purge --clean-protected -l -u make git RUN apk upgrade -U --force-refresh --no-cache && apk add --no-cache --purge --clean-protected -l -u make git

View File

@ -18,7 +18,7 @@ build:
go build -v -ldflags "-X main.commit=$(GIT_COMMIT) -X main.version=$(GIT_TAG)" go build -v -ldflags "-X main.commit=$(GIT_COMMIT) -X main.version=$(GIT_TAG)"
release: release:
goreleaser --skip=publish --clean goreleaser --skip-publish --rm-dist
capdefs: capdefs:
python3 ./gencapdefs.py > ${capdef_file} python3 ./gencapdefs.py > ${capdef_file}

View File

@ -100,7 +100,6 @@ server:
max-connections-per-duration: 64 max-connections-per-duration: 64
# strict transport security, to get clients to automagically use TLS # strict transport security, to get clients to automagically use TLS
# (irrelevant in the recommended configuration, with no public plaintext listener)
sts: sts:
# whether to advertise STS # whether to advertise STS
# #
@ -376,17 +375,6 @@ server:
# if you don't want to publicize how popular the server is # if you don't want to publicize how popular the server is
suppress-lusers: false suppress-lusers: false
# publish additional key-value pairs in ISUPPORT (the 005 numeric).
# keys that collide with a key published by Ergo will be silently ignored.
additional-isupport:
#"draft/FILEHOST": "https://example.com/filehost"
#"draft/bazbat": "" # empty string means no value
# optionally map command alias names to existing ergo commands. most deployments
# should ignore this.
#command-aliases:
#"UMGEBUNG": "AMBIANCE"
# account options # account options
accounts: accounts:
# is account authentication enabled, i.e., can users log into existing accounts? # is account authentication enabled, i.e., can users log into existing accounts?
@ -788,7 +776,7 @@ logging:
# be logged, even if you explicitly include it # be logged, even if you explicitly include it
# #
# useful types include: # useful types include:
# * everything (usually used with excluding some types below) # * everything (usually used with exclusing some types below)
# server server startup, rehash, and shutdown events # server server startup, rehash, and shutdown events
# accounts account registration and authentication # accounts account registration and authentication
# channels channel creation and operations # channels channel creation and operations
@ -832,7 +820,7 @@ lock-file: "ircd.lock"
# datastore configuration # datastore configuration
datastore: datastore:
# path to the database file (used to store account and channel registrations): # path to the datastore
path: ircd.db path: ircd.db
# if the database schema requires an upgrade, `autoupgrade` will attempt to # if the database schema requires an upgrade, `autoupgrade` will attempt to
@ -934,7 +922,6 @@ fakelag:
"MARKREAD": 16 "MARKREAD": 16
"MONITOR": 1 "MONITOR": 1
"WHO": 4 "WHO": 4
"WEBPUSH": 1
# the roleplay commands are semi-standardized extensions to IRC that allow # the roleplay commands are semi-standardized extensions to IRC that allow
# sending and receiving messages from pseudo-nicknames. this can be used either # sending and receiving messages from pseudo-nicknames. this can be used either
@ -953,12 +940,6 @@ roleplay:
# add the real nickname, in parentheses, to the end of every roleplay message? # add the real nickname, in parentheses, to the end of every roleplay message?
add-suffix: true add-suffix: true
# allow customizing the NUH's sent for NPC and SCENE commands
# NPC: the first %s is the NPC name, the second is the user's real nick
#npc-nick-mask: "*%s*!%s@npc.fakeuser.invalid"
# SCENE: the %s is the client's real nick
#scene-nick-mask: "=Scene=!%s@npc.fakeuser.invalid"
# external services can integrate with the ircd using JSON Web Tokens (https://jwt.io). # external services can integrate with the ircd using JSON Web Tokens (https://jwt.io).
# in effect, the server can sign a token attesting that the client is present on # in effect, the server can sign a token attesting that the client is present on
# the server, is a member of a particular channel, etc. # the server, is a member of a particular channel, etc.
@ -1086,42 +1067,3 @@ history:
# whether to allow customization of the config at runtime using environment variables, # whether to allow customization of the config at runtime using environment variables,
# e.g., ERGO__SERVER__MAX_SENDQ=128k. see the manual for more details. # e.g., ERGO__SERVER__MAX_SENDQ=128k. see the manual for more details.
allow-environment-overrides: true allow-environment-overrides: true
# experimental support for mobile push notifications
# see the manual for potential security, privacy, and performance implications.
# DO NOT enable if you are running a Tor or I2P hidden service (i.e. one
# with no public IP listeners, only Tor/I2P listeners).
webpush:
# are push notifications enabled at all?
enabled: false
# request timeout for POST'ing the http notification
timeout: 10s
# delay sending the notification for this amount of time, then suppress it
# if the client sent MARKREAD to indicate that it was read on another device
delay: 0s
# subscriber field for the VAPID JWT authorization:
#subscriber: "https://your-website.com/"
# maximum number of push subscriptions per user
max-subscriptions: 4
# expiration time for a push subscription; it must be renewed within this time
# by the client reconnecting to IRC. we also detect whether the client is no longer
# successfully receiving push messages.
expiration: 14d
# HTTP API. we strongly recommend leaving this disabled unless you have a specific
# need for it.
api:
# is the API enabled at all?
enabled: false
# listen address:
listener: "127.0.0.1:8089"
# serve over TLS (strongly recommended if the listener is public):
#tls:
#cert: fullchain.pem
#key: privkey.pem
# one or more static bearer tokens accepted for HTTP bearer authentication.
# these must be strong, unique, high-entropy printable ASCII strings.
# to generate a new token, use `ergo gentoken` or:
# python3 -c "import secrets; print(secrets.token_urlsafe(32))"
bearer-tokens:
- "example"

View File

@ -53,14 +53,14 @@ For example, to create a new docker volume and then mount it:
```shell ```shell
docker volume create ergo-data docker volume create ergo-data
docker run --init --name ergo -d -v ergo-data:/ircd -p 6667:6667 -p 6697:6697 ghcr.io/ergochat/ergo:stable docker run --init -d -v ergo-data:/ircd -p 6667:6667 -p 6697:6697 ghcr.io/ergochat/ergo:stable
``` ```
Or to mount a folder from your host machine: Or to mount a folder from your host machine:
```shell ```shell
mkdir ergo-data mkdir ergo-data
docker run --init --name ergo -d -v $(pwd)/ergo-data:/ircd -p 6667:6667 -p 6697:6697 ghcr.io/ergochat/ergo:stable docker run --init -d -v $(PWD)/ergo-data:/ircd -p 6667:6667 -p 6697:6697 ghcr.io/ergochat/ergo:stable
``` ```
## Customising the config ## Customising the config
@ -85,8 +85,8 @@ docker kill -s SIGHUP ergo
## Using custom TLS certificates ## Using custom TLS certificates
TLS certs will by default be read from /ircd/fullchain.pem, with a private key TLS certs will by default be read from /ircd/tls.crt, with a private key
in /ircd/privkey.pem. You can customise this path in the ircd.yaml file if in /ircd/tls.key. You can customise this path in the ircd.yaml file if
you wish to mount the certificates from another volume. For information you wish to mount the certificates from another volume. For information
on using Let's Encrypt certificates, see on using Let's Encrypt certificates, see
[this manual entry](https://github.com/ergochat/ergo/blob/master/docs/MANUAL.md#using-valid-tls-certificates). [this manual entry](https://github.com/ergochat/ergo/blob/master/docs/MANUAL.md#using-valid-tls-certificates).

View File

@ -1,88 +0,0 @@
__ __ ______ ___ ______ ___
__/ // /_/ ____/ __ \/ ____/ __ \
/_ // __/ __/ / /_/ / / __/ / / /
/_ // __/ /___/ _, _/ /_/ / /_/ /
/_//_/ /_____/_/ |_|\____/\____/
Ergo IRCd API Documentation
https://ergo.chat/
_Copyright © Daniel Oaks <daniel@danieloaks.net>, Shivaram Lingamneni <slingamn@cs.stanford.edu>_
--------------------------------------------------------------------------------------------
Ergo has an experimental HTTP API. Some general information about the API:
1. All requests to the API are via POST.
1. All requests to the API are authenticated via bearer authentication. This is a header named `Authorization` with the value `Bearer <token>`. A list of valid tokens is hardcoded in the Ergo config. Future versions of Ergo may allow additional validation schemes for tokens.
1. The request parameters are sent as JSON in the POST body.
1. Any status code other than 200 is an error response; the response body is undefined in this case (likely human-readable text for debugging).
1. A 200 status code indicates successful execution of the request. The response body will be JSON and may indicate application-level success or failure (typically via the `success` field, which takes a boolean value).
API endpoints are versioned (currently all endpoints have a `/v1/` path prefix). Backwards-incompatible updates will most likely take the form of endpoints with new names, or an increased version prefix. Any exceptions to this will be specifically documented in the changelog.
All API endpoints should be considered highly privileged. Bearer tokens should be kept secret. Access to the API should be either over a trusted link (like loopback) or secured via verified TLS. See the `api` section of `default.yaml` for examples of how to configure this.
Here's an example of how to test an API configured to run over loopback TCP in plaintext:
```bash
curl -d '{"accountName": "invalidaccountname", "passphrase": "invalidpassphrase"}' -H 'Authorization: Bearer EYBbXVilnumTtfn4A9HE8_TiKLGWEGylre7FG6gEww0' -v http://127.0.0.1:8089/v1/check_auth
```
This returns:
```json
{"success":false}
```
Endpoints
=========
`/v1/account_details`
----------------
This endpoint fetches account details and returns them as JSON. The request is a JSON object with fields:
* `accountName`: string, name of the account
The response is a JSON object with fields:
* `success`: whether the account exists or not
* `accountName`: canonical, case-unfolded version of the account name
* `email`: email address of the account provided
`/v1/check_auth`
----------------
This endpoint verifies the credentials of a NickServ account; this allows Ergo to be used as the source of truth for authentication by another system. The request is a JSON object with fields:
* `accountName`: string, name of the account
* `passphrase`: string, alleged passphrase of the account
The response is a JSON object with fields:
* `success`: whether the credentials provided were valid
* `accountName`: canonical, case-unfolded version of the account name
`/v1/rehash`
------------
This endpoint rehashes the server (i.e. reloads the configuration file, TLS certificates, and other associated data). The body is ignored. The response is a JSON object with fields:
* `success`: boolean, indicates whether the rehash was successful
* `error`: string, optional, human-readable description of the failure
`/v1/saregister`
----------------
This endpoint registers an account in NickServ, with the same semantics as `NS SAREGISTER`. The request is a JSON object with fields:
* `accountName`: string, name of the account
* `passphrase`: string, passphrase of the account
The response is a JSON object with fields:
* `success`: whether the account creation succeeded
* `errorCode`: string, optional, machine-readable description of the error. Possible values include: `ACCOUNT_EXISTS`, `INVALID_PASSPHRASE`, `UNKNOWN_ERROR`.
* `error`: string, optional, human-readable description of the failure.

View File

@ -44,7 +44,6 @@ _Copyright © Daniel Oaks <daniel@danieloaks.net>, Shivaram Lingamneni <slingamn
- [Persistent history with MySQL](#persistent-history-with-mysql) - [Persistent history with MySQL](#persistent-history-with-mysql)
- [IP cloaking](#ip-cloaking) - [IP cloaking](#ip-cloaking)
- [Moderation](#moderation) - [Moderation](#moderation)
- [Push notifications](#push-notifications)
- [Frequently Asked Questions](#frequently-asked-questions) - [Frequently Asked Questions](#frequently-asked-questions)
- [IRC over TLS](#irc-over-tls) - [IRC over TLS](#irc-over-tls)
- [Redirect from plaintext to TLS](#how-can-i-redirect-users-from-plaintext-to-tls) - [Redirect from plaintext to TLS](#how-can-i-redirect-users-from-plaintext-to-tls)
@ -63,7 +62,6 @@ _Copyright © Daniel Oaks <daniel@danieloaks.net>, Shivaram Lingamneni <slingamn
- [Tor](#tor) - [Tor](#tor)
- [I2P](#i2p) - [I2P](#i2p)
- [ZNC](#znc) - [ZNC](#znc)
- [API](#api)
- [External authentication systems](#external-authentication-systems) - [External authentication systems](#external-authentication-systems)
- [DNSBLs and other IP checking systems](#dnsbls-and-other-ip-checking-systems) - [DNSBLs and other IP checking systems](#dnsbls-and-other-ip-checking-systems)
- [Acknowledgements](#acknowledgements) - [Acknowledgements](#acknowledgements)
@ -485,19 +483,6 @@ These techniques require operator privileges: `UBAN` requires the `ban` operator
For channel operators, `/msg ChanServ HOWTOBAN #channel nickname` will provide similar information about the best way to ban a user from a channel. For channel operators, `/msg ChanServ HOWTOBAN #channel nickname` will provide similar information about the best way to ban a user from a channel.
## Push notifications
Ergo now has experimental support for push notifications via the [draft/webpush](https://github.com/ircv3/ircv3-specifications/pull/471) IRCv3 specification. Support for push notifications is disabled by default; operators can enable it by setting `webpush.enabled` to `true` in the configuration file. This has security, privacy, and performance implications:
* If push notifications are enabled, Ergo will send HTTP POST requests to HTTP endpoints of the user's choosing. Although the user has limited control over the POST body (since it is encrypted with random key material), and Ergo disallows requests to local or internal IP addresses, this may potentially impact the IP reputation of the Ergo host, or allow an attacker to probe endpoints that whitelist the Ergo host's IP address.
* Push notifications result in the disclosure of metadata (that the user received a message, and the approximate time of the message) to third-party messaging infrastructure. In the typical case, this will include a push endpoint controlled by the application vendor, plus the push infrastructure controlled by Apple or Google.
* The message contents (including the sender's identity) are protected by [encryption](https://datatracker.ietf.org/doc/html/rfc8291) between the server and the user's endpoint device. However, the encryption algorithm is not forward-secret (a long-term private key is stored on the user's device) or post-quantum (the server retains a copy of the corresponding elliptic curve public key).
* Push notifications are relatively expensive to process, and may increase the impact of spam or denial-of-service attacks on the Ergo server.
* Push notifications negate the anonymization provided by Tor and I2P; an Ergo instance intended to run as a Tor onion service ("hidden service") or exclusively behind an I2P address must disable them in the Ergo configuration file.
Operators and end users are invited to share feedback about push notifications, either via the project issue tracker or the support channel. Note that in order to receive push notifications, the user must be logged in with always-on enabled, and must be using a client (e.g. Goguma) that supports them.
------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------------------
@ -1136,7 +1121,6 @@ Tor provides end-to-end encryption for onion services, so there's no need to ena
The second way is to run Ergo as a true hidden service, where the server's actual IP address is a secret. This requires hardening measures on the Ergo side: The second way is to run Ergo as a true hidden service, where the server's actual IP address is a secret. This requires hardening measures on the Ergo side:
* Ergo should not accept any connections on its public interfaces. You should remove any listener that starts with the address of a public interface, or with `:`, which means "listen on all available interfaces". You should listen only on `127.0.0.1:6667` and a Unix domain socket such as `/hidden_service_sockets/ergo_tor_sock`. * Ergo should not accept any connections on its public interfaces. You should remove any listener that starts with the address of a public interface, or with `:`, which means "listen on all available interfaces". You should listen only on `127.0.0.1:6667` and a Unix domain socket such as `/hidden_service_sockets/ergo_tor_sock`.
* Push notifications will reveal the server's true IP address, so they must be disabled; set `webpush.enabled` to `false`.
* In this mode, it is especially important that all operator passwords are strong and all operators are trusted (operators have a larger attack surface to deanonymize the server). * In this mode, it is especially important that all operator passwords are strong and all operators are trusted (operators have a larger attack surface to deanonymize the server).
* Onion services are at risk of being deanonymized if a client can trick the server into performing a non-Tor network request. Ergo should not perform any such requests (such as hostname resolution or ident lookups) in response to input received over a correctly configured Tor listener. However, Ergo has not been thoroughly audited against such deanonymization attacks --- therefore, Ergo should be deployed with additional sandboxing to protect against this: * Onion services are at risk of being deanonymized if a client can trick the server into performing a non-Tor network request. Ergo should not perform any such requests (such as hostname resolution or ident lookups) in response to input received over a correctly configured Tor listener. However, Ergo has not been thoroughly audited against such deanonymization attacks --- therefore, Ergo should be deployed with additional sandboxing to protect against this:
* Ergo should run with no direct network connectivity, e.g., by running in its own Linux network namespace. systemd implements this with the [PrivateNetwork](https://www.freedesktop.org/software/systemd/man/systemd.exec.html) configuration option: add `PrivateNetwork=true` to Ergo's systemd unit file. * Ergo should run with no direct network connectivity, e.g., by running in its own Linux network namespace. systemd implements this with the [PrivateNetwork](https://www.freedesktop.org/software/systemd/man/systemd.exec.html) configuration option: add `PrivateNetwork=true` to Ergo's systemd unit file.
@ -1176,10 +1160,6 @@ ZNC 1.6.x (still pretty common in distros that package old versions of IRC softw
Ergo can emulate certain capabilities of the ZNC bouncer for the benefit of clients, in particular the third-party [playback](https://wiki.znc.in/Playback) module. This enables clients with specific support for ZNC to receive selective history playback automatically. To configure this in [Textual](https://www.codeux.com/textual/), go to "Server properties", select "Vendor specific", uncheck "Do not automatically join channels on connect", and check "Only play back messages you missed". Other clients with support are listed on ZNC's wiki page. Ergo can emulate certain capabilities of the ZNC bouncer for the benefit of clients, in particular the third-party [playback](https://wiki.znc.in/Playback) module. This enables clients with specific support for ZNC to receive selective history playback automatically. To configure this in [Textual](https://www.codeux.com/textual/), go to "Server properties", select "Vendor specific", uncheck "Do not automatically join channels on connect", and check "Only play back messages you missed". Other clients with support are listed on ZNC's wiki page.
## API
Ergo offers an HTTP API that can be used to control Ergo, or to allow other applications to use Ergo as a source of truth for authentication. The API is documented separately; see [API.md](https://github.com/ergochat/ergo/blob/stable/docs/API.md) on the website, or the `API.md` file that was bundled with your release.
## External authentication systems ## External authentication systems
Ergo can be configured to call arbitrary scripts to authenticate users; see the `auth-script` section of the config. The API for these scripts is as follows: Ergo will invoke the script with a configurable set of arguments, then send it the authentication data as JSON on the first line (`\n`-terminated) of stdin. The input is a JSON dictionary with the following keys: Ergo can be configured to call arbitrary scripts to authenticate users; see the `auth-script` section of the config. The API for these scripts is as follows: Ergo will invoke the script with a configurable set of arguments, then send it the authentication data as JSON on the first line (`\n`-terminated) of stdin. The input is a JSON dictionary with the following keys:

View File

@ -23,7 +23,6 @@ _Copyright © Daniel Oaks <daniel@danieloaks.net>, Shivaram Lingamneni <slingamn
- [Always-on](#always-on) - [Always-on](#always-on)
- [Multiclient](#multiclient) - [Multiclient](#multiclient)
- [History](#history) - [History](#history)
- [Push notifications](#push-notifications)
-------------------------------------------------------------------------------------------- --------------------------------------------------------------------------------------------
@ -122,7 +121,3 @@ If you have registered a channel, you can make it private. The best way to do th
1. Identify the users you want to be able to access the channel. Ensure that they have registered their accounts (you should be able to see their registration status if you `/WHOIS` their nicknames). 1. Identify the users you want to be able to access the channel. Ensure that they have registered their accounts (you should be able to see their registration status if you `/WHOIS` their nicknames).
1. Add the desired nick/account names to the invite exception list (`/mode #example +I alice`) or give them persistent voice (`/msg ChanServ AMODE #example +v alice`) 1. Add the desired nick/account names to the invite exception list (`/mode #example +I alice`) or give them persistent voice (`/msg ChanServ AMODE #example +v alice`)
1. If you want to grant a persistent channel privilege to a user, you can do it with `CS AMODE` (`/msg ChanServ AMODE #example +o bob`) 1. If you want to grant a persistent channel privilege to a user, you can do it with `CS AMODE` (`/msg ChanServ AMODE #example +o bob`)
# Push notifications
Ergo has experimental support for mobile push notifications. The server operator must enable this functionality; to check whether this is the case, you can send `/msg NickServ push list`. You must additionally be using a client (e.g. Goguma) that supports the functionality, and your account must be set to always-on (`/msg NickServ set always-on true`, as described above).

View File

@ -21,7 +21,6 @@ import (
"github.com/ergochat/ergo/irc" "github.com/ergochat/ergo/irc"
"github.com/ergochat/ergo/irc/logger" "github.com/ergochat/ergo/irc/logger"
"github.com/ergochat/ergo/irc/mkcerts" "github.com/ergochat/ergo/irc/mkcerts"
"github.com/ergochat/ergo/irc/utils"
) )
// set via linker flags, either by make or by goreleaser: // set via linker flags, either by make or by goreleaser:
@ -100,7 +99,6 @@ Usage:
ergo genpasswd [--conf <filename>] [--quiet] ergo genpasswd [--conf <filename>] [--quiet]
ergo mkcerts [--conf <filename>] [--quiet] ergo mkcerts [--conf <filename>] [--quiet]
ergo defaultconfig ergo defaultconfig
ergo gentoken
ergo run [--conf <filename>] [--quiet] [--smoke] ergo run [--conf <filename>] [--quiet] [--smoke]
ergo -h | --help ergo -h | --help
ergo --version ergo --version
@ -143,9 +141,6 @@ Options:
} else if arguments["defaultconfig"].(bool) { } else if arguments["defaultconfig"].(bool) {
fmt.Print(defaultConfig) fmt.Print(defaultConfig)
return return
} else if arguments["gentoken"].(bool) {
fmt.Println(utils.GenerateSecretKey())
return
} else if arguments["mkcerts"].(bool) { } else if arguments["mkcerts"].(bool) {
doMkcerts(arguments["--conf"].(string), arguments["--quiet"].(bool)) doMkcerts(arguments["--conf"].(string), arguments["--quiet"].(bool))
return return
@ -193,7 +188,7 @@ Options:
// warning if running a non-final version // warning if running a non-final version
if strings.Contains(irc.Ver, "unreleased") { if strings.Contains(irc.Ver, "unreleased") {
logman.Warning("server", "You are currently running an unreleased beta version of Ergo that may be unstable and could corrupt your database.\nIf you are running a production network, please download the latest build from https://ergo.chat/about and run that instead.") logman.Warning("server", "You are currently running an unreleased beta version of Ergo that may be unstable and could corrupt your database.\nIf you are running a production network, please download the latest build from https://ergo.chat/downloads.html and run that instead.")
} }
server, err := irc.NewServer(config, logman) server, err := irc.NewServer(config, logman)

View File

@ -219,24 +219,6 @@ CAPDEFS = [
url="https://github.com/ircv3/ircv3-specifications/pull/527", url="https://github.com/ircv3/ircv3-specifications/pull/527",
standard="proposed IRCv3", standard="proposed IRCv3",
), ),
CapDef(
identifier="ExtendedISupport",
name="draft/extended-isupport",
url="https://github.com/ircv3/ircv3-specifications/pull/543",
standard="proposed IRCv3",
),
CapDef(
identifier="WebPush",
name="draft/webpush",
url="https://github.com/ircv3/ircv3-specifications/pull/471",
standard="proposed IRCv3",
),
CapDef(
identifier="SojuWebPush",
name="soju.im/webpush",
url="https://github.com/ircv3/ircv3-specifications/pull/471",
standard="Soju/Goguma vendor",
),
] ]
def validate_defs(): def validate_defs():

22
go.mod
View File

@ -1,6 +1,6 @@
module github.com/ergochat/ergo module github.com/ergochat/ergo
go 1.24 go 1.22
require ( require (
code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48 code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48
@ -8,27 +8,25 @@ require (
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815
github.com/ergochat/confusables v0.0.0-20201108231250-4ab98ab61fb1 github.com/ergochat/confusables v0.0.0-20201108231250-4ab98ab61fb1
github.com/ergochat/go-ident v0.0.0-20230911071154-8c30606d6881 github.com/ergochat/go-ident v0.0.0-20230911071154-8c30606d6881
github.com/ergochat/irc-go v0.5.0-rc2 github.com/ergochat/irc-go v0.5.0-rc1
github.com/go-sql-driver/mysql v1.7.0 github.com/go-sql-driver/mysql v1.7.0
github.com/go-test/deep v1.0.6 // indirect
github.com/gofrs/flock v0.8.1 github.com/gofrs/flock v0.8.1
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
github.com/onsi/ginkgo v1.12.0 // indirect github.com/onsi/ginkgo v1.12.0 // indirect
github.com/onsi/gomega v1.9.0 // indirect github.com/onsi/gomega v1.9.0 // indirect
github.com/stretchr/testify v1.4.0 // indirect github.com/stretchr/testify v1.4.0 // indirect
github.com/tidwall/buntdb v1.3.2 github.com/tidwall/buntdb v1.2.10
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208
github.com/xdg-go/scram v1.0.2 github.com/xdg-go/scram v1.0.2
golang.org/x/crypto v0.32.0 golang.org/x/crypto v0.17.0
golang.org/x/term v0.28.0 golang.org/x/term v0.15.0
golang.org/x/text v0.21.0 golang.org/x/text v0.14.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )
require ( require github.com/golang-jwt/jwt/v5 v5.2.0
github.com/emersion/go-msgauth v0.6.8
github.com/ergochat/webpush-go/v2 v2.0.0
github.com/golang-jwt/jwt/v5 v5.2.2
)
require ( require (
github.com/tidwall/btree v1.4.2 // indirect github.com/tidwall/btree v1.4.2 // indirect
@ -39,7 +37,7 @@ require (
github.com/tidwall/rtred v0.1.2 // indirect github.com/tidwall/rtred v0.1.2 // indirect
github.com/tidwall/tinyqueue v0.1.1 // indirect github.com/tidwall/tinyqueue v0.1.1 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect
golang.org/x/sys v0.29.0 // indirect golang.org/x/sys v0.15.0 // indirect
) )
replace github.com/gorilla/websocket => github.com/ergochat/websocket v1.4.2-oragono1 replace github.com/gorilla/websocket => github.com/ergochat/websocket v1.4.2-oragono1

42
go.sum
View File

@ -6,27 +6,27 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/emersion/go-msgauth v0.6.8 h1:kW/0E9E8Zx5CdKsERC/WnAvnXvX7q9wTHia1OA4944A=
github.com/emersion/go-msgauth v0.6.8/go.mod h1:YDwuyTCUHu9xxmAeVj0eW4INnwB6NNZoPdLerpSxRrc=
github.com/ergochat/confusables v0.0.0-20201108231250-4ab98ab61fb1 h1:WLHTOodthVyv5NvYLIvWl112kSFv5IInKKrRN2qpons= github.com/ergochat/confusables v0.0.0-20201108231250-4ab98ab61fb1 h1:WLHTOodthVyv5NvYLIvWl112kSFv5IInKKrRN2qpons=
github.com/ergochat/confusables v0.0.0-20201108231250-4ab98ab61fb1/go.mod h1:mov+uh1DPWsltdQnOdzn08UO9GsJ3MEvhtu0Ci37fdk= github.com/ergochat/confusables v0.0.0-20201108231250-4ab98ab61fb1/go.mod h1:mov+uh1DPWsltdQnOdzn08UO9GsJ3MEvhtu0Ci37fdk=
github.com/ergochat/go-ident v0.0.0-20230911071154-8c30606d6881 h1:+J5m88nvybxB5AnBVGzTXM/yHVytt48rXBGcJGzSbms= github.com/ergochat/go-ident v0.0.0-20230911071154-8c30606d6881 h1:+J5m88nvybxB5AnBVGzTXM/yHVytt48rXBGcJGzSbms=
github.com/ergochat/go-ident v0.0.0-20230911071154-8c30606d6881/go.mod h1:ASYJtQujNitna6cVHsNQTGrfWvMPJ5Sa2lZlmsH65uM= github.com/ergochat/go-ident v0.0.0-20230911071154-8c30606d6881/go.mod h1:ASYJtQujNitna6cVHsNQTGrfWvMPJ5Sa2lZlmsH65uM=
github.com/ergochat/irc-go v0.5.0-rc2 h1:VuSQJF5K4hWvYSzGa4b8vgL6kzw8HF6LSOejE+RWpAo= github.com/ergochat/irc-go v0.4.0 h1:0YibCKfAAtwxQdNjLQd9xpIEPisLcJ45f8FNsMHAuZc=
github.com/ergochat/irc-go v0.5.0-rc2/go.mod h1:2vi7KNpIPWnReB5hmLpl92eMywQvuIeIIGdt/FQCph0= github.com/ergochat/irc-go v0.4.0/go.mod h1:2vi7KNpIPWnReB5hmLpl92eMywQvuIeIIGdt/FQCph0=
github.com/ergochat/irc-go v0.5.0-rc1 h1:kFoIHExoNFQ2CV+iShAVna/H4xrXQB4t4jK5Sep2j9k=
github.com/ergochat/irc-go v0.5.0-rc1/go.mod h1:2vi7KNpIPWnReB5hmLpl92eMywQvuIeIIGdt/FQCph0=
github.com/ergochat/scram v1.0.2-ergo1 h1:2bYXiRFQH636pT0msOG39fmEYl4Eq+OuutcyDsCix/g= github.com/ergochat/scram v1.0.2-ergo1 h1:2bYXiRFQH636pT0msOG39fmEYl4Eq+OuutcyDsCix/g=
github.com/ergochat/scram v1.0.2-ergo1/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/ergochat/scram v1.0.2-ergo1/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
github.com/ergochat/webpush-go/v2 v2.0.0 h1:n6eoJk8RpzJFeBJ6gxvqo/dngnVEmJbzJwzKtCZbByo=
github.com/ergochat/webpush-go/v2 v2.0.0/go.mod h1:OQlhnq8JeHDzRzAy6bdDObr19uqbHliOV+z7mHbYr4c=
github.com/ergochat/websocket v1.4.2-oragono1 h1:plMUunFBM6UoSCIYCKKclTdy/TkkHfUslhOfJQzfueM= github.com/ergochat/websocket v1.4.2-oragono1 h1:plMUunFBM6UoSCIYCKKclTdy/TkkHfUslhOfJQzfueM=
github.com/ergochat/websocket v1.4.2-oragono1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/ergochat/websocket v1.4.2-oragono1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-test/deep v1.0.6 h1:UHSEyLZUwX9Qoi99vVwvewiMC8mM2bf7XEM2nqvzEn8=
github.com/go-test/deep v1.0.6/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@ -47,8 +47,8 @@ github.com/tidwall/assert v0.1.0 h1:aWcKyRBUAdLoVebxo95N7+YZVTFF/ASTr7BN4sLP6XI=
github.com/tidwall/assert v0.1.0/go.mod h1:QLYtGyeqse53vuELQheYl9dngGCJQ+mTtlxcktb+Kj8= github.com/tidwall/assert v0.1.0/go.mod h1:QLYtGyeqse53vuELQheYl9dngGCJQ+mTtlxcktb+Kj8=
github.com/tidwall/btree v1.4.2 h1:PpkaieETJMUxYNADsjgtNRcERX7mGc/GP2zp/r5FM3g= github.com/tidwall/btree v1.4.2 h1:PpkaieETJMUxYNADsjgtNRcERX7mGc/GP2zp/r5FM3g=
github.com/tidwall/btree v1.4.2/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE= github.com/tidwall/btree v1.4.2/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE=
github.com/tidwall/buntdb v1.3.2 h1:qd+IpdEGs0pZci37G4jF51+fSKlkuUTMXuHhXL1AkKg= github.com/tidwall/buntdb v1.2.10 h1:U/ebfkmYPBnyiNZIirUiWFcxA/mgzjbKlyPynFsPtyM=
github.com/tidwall/buntdb v1.3.2/go.mod h1:lZZrZUWzlyDJKlLQ6DKAy53LnG7m5kHyrEHvvcDmBpU= github.com/tidwall/buntdb v1.2.10/go.mod h1:lZZrZUWzlyDJKlLQ6DKAy53LnG7m5kHyrEHvvcDmBpU=
github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw= github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
@ -64,26 +64,28 @@ github.com/tidwall/rtred v0.1.2 h1:exmoQtOLvDoO8ud++6LwVsAMTu0KPzLTUrMln8u1yu8=
github.com/tidwall/rtred v0.1.2/go.mod h1:hd69WNXQ5RP9vHd7dqekAz+RIdtfBogmglkZSRxCHFQ= github.com/tidwall/rtred v0.1.2/go.mod h1:hd69WNXQ5RP9vHd7dqekAz+RIdtfBogmglkZSRxCHFQ=
github.com/tidwall/tinyqueue v0.1.1 h1:SpNEvEggbpyN5DIReaJ2/1ndroY8iyEGxPYxoSaymYE= github.com/tidwall/tinyqueue v0.1.1 h1:SpNEvEggbpyN5DIReaJ2/1ndroY8iyEGxPYxoSaymYE=
github.com/tidwall/tinyqueue v0.1.1/go.mod h1:O/QNHwrnjqr6IHItYrzoHAKYhBkLI67Q096fQP5zMYw= github.com/tidwall/tinyqueue v0.1.1/go.mod h1:O/QNHwrnjqr6IHItYrzoHAKYhBkLI67Q096fQP5zMYw=
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 h1:PM5hJF7HVfNWmCjMdEfbuOBNXSVF2cMFGgQTPdKCbwM=
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208/go.mod h1:BzWtXXrXzZUvMacR0oF/fbDDgUPO8L36tDMmRAf14ns=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc=
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -51,7 +51,6 @@ const (
// for an always-on client, a map of channel names they're in to their current modes // for an always-on client, a map of channel names they're in to their current modes
// (not to be confused with their amodes, which a non-always-on client can have): // (not to be confused with their amodes, which a non-always-on client can have):
keyAccountChannelToModes = "account.channeltomodes %s" keyAccountChannelToModes = "account.channeltomodes %s"
keyAccountPushSubscriptions = "account.pushsubscriptions %s"
maxCertfpsPerAccount = 5 maxCertfpsPerAccount = 5
) )
@ -136,7 +135,6 @@ func (am *AccountManager) createAlwaysOnClients(config *Config) {
am.loadTimeMap(keyAccountReadMarkers, accountName), am.loadTimeMap(keyAccountReadMarkers, accountName),
am.loadModes(accountName), am.loadModes(accountName),
am.loadRealname(accountName), am.loadRealname(accountName),
am.loadPushSubscriptions(accountName),
) )
} }
} }
@ -717,40 +715,6 @@ func (am *AccountManager) loadRealname(account string) (realname string) {
return return
} }
func (am *AccountManager) savePushSubscriptions(account string, subs []storedPushSubscription) {
j, err := json.Marshal(subs)
if err != nil {
am.server.logger.Error("internal", "error storing push subscriptions", err.Error())
return
}
val := string(j)
key := fmt.Sprintf(keyAccountPushSubscriptions, account)
am.server.store.Update(func(tx *buntdb.Tx) error {
tx.Set(key, val, nil)
return nil
})
return
}
func (am *AccountManager) loadPushSubscriptions(account string) (result []storedPushSubscription) {
key := fmt.Sprintf(keyAccountPushSubscriptions, account)
var val string
am.server.store.View(func(tx *buntdb.Tx) error {
val, _ = tx.Get(key)
return nil
})
if val == "" {
return nil
}
if err := json.Unmarshal([]byte(val), &result); err == nil {
return result
} else {
am.server.logger.Error("internal", "error loading push subscriptions", err.Error())
return nil
}
}
func (am *AccountManager) addRemoveCertfp(account, certfp string, add bool, hasPrivs bool) (err error) { func (am *AccountManager) addRemoveCertfp(account, certfp string, add bool, hasPrivs bool) (err error) {
certfp, err = utils.NormalizeCertfp(certfp) certfp, err = utils.NormalizeCertfp(certfp)
if err != nil { if err != nil {
@ -1157,7 +1121,7 @@ func (am *AccountManager) NsSendpass(client *Client, accountName string) (err er
message := email.ComposeMail(config.Accounts.Registration.EmailVerification, account.Settings.Email, subject) message := email.ComposeMail(config.Accounts.Registration.EmailVerification, account.Settings.Email, subject)
fmt.Fprintf(&message, client.t("We received a request to reset your password on %[1]s for account: %[2]s"), am.server.name, account.Name) fmt.Fprintf(&message, client.t("We received a request to reset your password on %[1]s for account: %[2]s"), am.server.name, account.Name)
message.WriteString("\r\n") message.WriteString("\r\n")
message.WriteString(client.t("If you did not initiate this request, you can safely ignore this message.")) fmt.Fprintf(&message, client.t("If you did not initiate this request, you can safely ignore this message."))
message.WriteString("\r\n") message.WriteString("\r\n")
message.WriteString("\r\n") message.WriteString("\r\n")
message.WriteString(client.t("Otherwise, to reset your password, issue the following command (replace `new_password` with your desired password):")) message.WriteString(client.t("Otherwise, to reset your password, issue the following command (replace `new_password` with your desired password):"))

View File

@ -1,224 +0,0 @@
package irc
import (
"crypto/subtle"
"encoding/json"
"fmt"
"net/http"
"strings"
)
func newAPIHandler(server *Server) http.Handler {
api := &ergoAPI{
server: server,
mux: http.NewServeMux(),
}
api.mux.HandleFunc("POST /v1/rehash", api.handleRehash)
api.mux.HandleFunc("POST /v1/check_auth", api.handleCheckAuth)
api.mux.HandleFunc("POST /v1/saregister", api.handleSaregister)
api.mux.HandleFunc("POST /v1/account_details", api.handleAccountDetails)
return api
}
type ergoAPI struct {
server *Server
mux *http.ServeMux
}
func (a *ergoAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer a.server.HandlePanic(nil)
defer a.server.logger.Debug("api", r.URL.Path)
if a.checkBearerAuth(r.Header.Get("Authorization")) {
a.mux.ServeHTTP(w, r)
} else {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
}
}
func (a *ergoAPI) checkBearerAuth(authHeader string) (authorized bool) {
if authHeader == "" {
return false
}
c := a.server.Config()
if !c.API.Enabled {
return false
}
spaceIdx := strings.IndexByte(authHeader, ' ')
if spaceIdx < 0 {
return false
}
if !strings.EqualFold("Bearer", authHeader[:spaceIdx]) {
return false
}
providedTokenBytes := []byte(authHeader[spaceIdx+1:])
for _, tokenBytes := range c.API.bearerTokenBytes {
if subtle.ConstantTimeCompare(tokenBytes, providedTokenBytes) == 1 {
return true
}
}
return false
}
func (a *ergoAPI) decodeJSONRequest(request any, w http.ResponseWriter, r *http.Request) (err error) {
err = json.NewDecoder(r.Body).Decode(request)
if err != nil {
http.Error(w, fmt.Sprintf("failed to deserialize json request: %v", err), http.StatusBadRequest)
}
return err
}
func (a *ergoAPI) writeJSONResponse(response any, w http.ResponseWriter, r *http.Request) {
j, err := json.Marshal(response)
if err == nil {
j = append(j, '\n') // less annoying in curl output
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(j)
} else {
a.server.logger.Error("internal", "failed to serialize API response", r.URL.Path, err.Error())
http.Error(w, fmt.Sprintf("failed to serialize json response: %v", err), http.StatusInternalServerError)
}
}
type apiGenericResponse struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
ErrorCode string `json:"errorCode,omitempty"`
}
func (a *ergoAPI) handleRehash(w http.ResponseWriter, r *http.Request) {
var response apiGenericResponse
err := a.server.rehash()
if err == nil {
response.Success = true
} else {
response.Success = false
response.Error = err.Error()
}
a.writeJSONResponse(response, w, r)
}
type apiCheckAuthResponse struct {
apiGenericResponse
AccountName string `json:"accountName,omitempty"`
}
func (a *ergoAPI) handleCheckAuth(w http.ResponseWriter, r *http.Request) {
var request AuthScriptInput
if err := a.decodeJSONRequest(&request, w, r); err != nil {
return
}
var response apiCheckAuthResponse
// try passphrase if present
if request.AccountName != "" && request.Passphrase != "" {
// TODO this only checks the internal database, not auth-script;
// it's a little weird to use both auth-script and the API but we should probably handle it
account, err := a.server.accounts.checkPassphrase(request.AccountName, request.Passphrase)
switch err {
case nil:
// success, no error
response.Success = true
response.AccountName = account.Name
case errAccountDoesNotExist, errAccountInvalidCredentials, errAccountUnverified, errAccountSuspended:
// fail, no error
response.Success = false
default:
response.Success = false
response.Error = err.Error()
}
}
// try certfp if present
if !response.Success && request.Certfp != "" {
// TODO support cerftp
}
a.writeJSONResponse(response, w, r)
}
type apiSaregisterRequest struct {
AccountName string `json:"accountName"`
Passphrase string `json:"passphrase"`
}
func (a *ergoAPI) handleSaregister(w http.ResponseWriter, r *http.Request) {
var request apiSaregisterRequest
if err := a.decodeJSONRequest(&request, w, r); err != nil {
return
}
var response apiGenericResponse
err := a.server.accounts.SARegister(request.AccountName, request.Passphrase)
if err == nil {
response.Success = true
} else {
response.Success = false
response.Error = err.Error()
switch err {
case errAccountAlreadyRegistered, errAccountAlreadyVerified, errNameReserved:
response.ErrorCode = "ACCOUNT_EXISTS"
case errAccountBadPassphrase:
response.ErrorCode = "INVALID_PASSPHRASE"
default:
response.ErrorCode = "UNKNOWN_ERROR"
}
}
a.writeJSONResponse(response, w, r)
}
type apiAccountDetailsResponse struct {
apiGenericResponse
AccountName string `json:"accountName,omitempty"`
Email string `json:"email,omitempty"`
}
type apiAccountDetailsRequest struct {
AccountName string `json:"accountName"`
}
func (a *ergoAPI) handleAccountDetails(w http.ResponseWriter, r *http.Request) {
var request apiAccountDetailsRequest
if err := a.decodeJSONRequest(&request, w, r); err != nil {
return
}
var response apiAccountDetailsResponse
// TODO could probably use better error handling and more details
if request.AccountName != "" {
accountData, err := a.server.accounts.LoadAccount(request.AccountName)
if err == nil {
if !accountData.Verified {
err = errAccountUnverified
} else if accountData.Suspended != nil {
err = errAccountSuspended
}
}
switch err {
case nil:
response.AccountName = accountData.Name
response.Email = accountData.Settings.Email
response.Success = true
case errAccountDoesNotExist, errAccountUnverified, errAccountSuspended:
response.Success = false
default:
response.Success = false
response.ErrorCode = "UNKNOWN_ERROR"
response.Error = err.Error()
}
} else {
response.Success = false
response.ErrorCode = "INVALID_REQUEST"
}
a.writeJSONResponse(response, w, r)
}

View File

@ -64,11 +64,10 @@ const (
BotTagName = "bot" BotTagName = "bot"
// https://ircv3.net/specs/extensions/chathistory // https://ircv3.net/specs/extensions/chathistory
ChathistoryTargetsBatchType = "draft/chathistory-targets" ChathistoryTargetsBatchType = "draft/chathistory-targets"
ExtendedISupportBatchType = "draft/extended-isupport"
) )
func init() { func init() {
nameToCapability = make(map[string]Capability, numCapabs) nameToCapability = make(map[string]Capability)
for capab, name := range capabilityNames { for capab, name := range capabilityNames {
nameToCapability[name] = Capability(capab) nameToCapability[name] = Capability(capab)
} }

View File

@ -7,7 +7,7 @@ package caps
const ( const (
// number of recognized capabilities: // number of recognized capabilities:
numCapabs = 37 numCapabs = 34
// length of the uint32 array that represents the bitset: // length of the uint32 array that represents the bitset:
bitsetLen = 2 bitsetLen = 2
) )
@ -53,10 +53,6 @@ const (
// https://github.com/ircv3/ircv3-specifications/pull/362 // https://github.com/ircv3/ircv3-specifications/pull/362
EventPlayback Capability = iota EventPlayback Capability = iota
// ExtendedISupport is the proposed IRCv3 capability named "draft/extended-isupport":
// https://github.com/ircv3/ircv3-specifications/pull/543
ExtendedISupport Capability = iota
// Languages is the proposed IRCv3 capability named "draft/languages": // Languages is the proposed IRCv3 capability named "draft/languages":
// https://gist.github.com/DanielOaks/8126122f74b26012a3de37db80e4e0c6 // https://gist.github.com/DanielOaks/8126122f74b26012a3de37db80e4e0c6
Languages Capability = iota Languages Capability = iota
@ -89,10 +85,6 @@ const (
// https://github.com/ircv3/ircv3-specifications/pull/417 // https://github.com/ircv3/ircv3-specifications/pull/417
Relaymsg Capability = iota Relaymsg Capability = iota
// WebPush is the proposed IRCv3 capability named "draft/webpush":
// https://github.com/ircv3/ircv3-specifications/pull/471
WebPush Capability = iota
// EchoMessage is the IRCv3 capability named "echo-message": // EchoMessage is the IRCv3 capability named "echo-message":
// https://ircv3.net/specs/extensions/echo-message-3.2.html // https://ircv3.net/specs/extensions/echo-message-3.2.html
EchoMessage Capability = iota EchoMessage Capability = iota
@ -137,10 +129,6 @@ const (
// https://ircv3.net/specs/extensions/setname.html // https://ircv3.net/specs/extensions/setname.html
SetName Capability = iota SetName Capability = iota
// SojuWebPush is the Soju/Goguma vendor capability named "soju.im/webpush":
// https://github.com/ircv3/ircv3-specifications/pull/471
SojuWebPush Capability = iota
// StandardReplies is the IRCv3 capability named "standard-replies": // StandardReplies is the IRCv3 capability named "standard-replies":
// https://github.com/ircv3/ircv3-specifications/pull/506 // https://github.com/ircv3/ircv3-specifications/pull/506
StandardReplies Capability = iota StandardReplies Capability = iota
@ -175,7 +163,6 @@ var (
"draft/channel-rename", "draft/channel-rename",
"draft/chathistory", "draft/chathistory",
"draft/event-playback", "draft/event-playback",
"draft/extended-isupport",
"draft/languages", "draft/languages",
"draft/message-redaction", "draft/message-redaction",
"draft/multiline", "draft/multiline",
@ -184,7 +171,6 @@ var (
"draft/pre-away", "draft/pre-away",
"draft/read-marker", "draft/read-marker",
"draft/relaymsg", "draft/relaymsg",
"draft/webpush",
"echo-message", "echo-message",
"ergo.chat/nope", "ergo.chat/nope",
"extended-join", "extended-join",
@ -196,7 +182,6 @@ var (
"sasl", "sasl",
"server-time", "server-time",
"setname", "setname",
"soju.im/webpush",
"standard-replies", "standard-replies",
"sts", "sts",
"userhost-in-names", "userhost-in-names",

View File

@ -21,7 +21,6 @@ import (
"github.com/ergochat/ergo/irc/history" "github.com/ergochat/ergo/irc/history"
"github.com/ergochat/ergo/irc/modes" "github.com/ergochat/ergo/irc/modes"
"github.com/ergochat/ergo/irc/utils" "github.com/ergochat/ergo/irc/utils"
"github.com/ergochat/ergo/irc/webpush"
) )
type ChannelSettings struct { type ChannelSettings struct {
@ -223,7 +222,7 @@ func (channel *Channel) wakeWriter() {
// equivalent of Socket.send() // equivalent of Socket.send()
func (channel *Channel) writeLoop() { func (channel *Channel) writeLoop() {
defer channel.server.HandlePanic(nil) defer channel.server.HandlePanic()
for { for {
// TODO(#357) check the error value of this and implement timed backoff // TODO(#357) check the error value of this and implement timed backoff
@ -1321,21 +1320,18 @@ func (channel *Channel) SendSplitMessage(command string, minPrefixMode modes.Mod
isBot := client.HasMode(modes.Bot) isBot := client.HasMode(modes.Bot)
chname := channel.Name() chname := channel.Name()
// STATUSMSG targets are prefixed with the supplied min-prefix, e.g., @#channel if !client.server.Config().Server.Compatibility.allowTruncation {
if minPrefixMode != modes.Mode(0) {
chname = fmt.Sprintf("%s%s", modes.ChannelModePrefixes[minPrefixMode], chname)
}
config := client.server.Config()
dispatchWebPush := false
if !config.Server.Compatibility.allowTruncation {
if !validateSplitMessageLen(histType, details.nickMask, chname, message) { if !validateSplitMessageLen(histType, details.nickMask, chname, message) {
rb.Add(nil, client.server.name, ERR_INPUTTOOLONG, details.nick, client.t("Line too long to be relayed without truncation")) rb.Add(nil, client.server.name, ERR_INPUTTOOLONG, details.nick, client.t("Line too long to be relayed without truncation"))
return return
} }
} }
// STATUSMSG targets are prefixed with the supplied min-prefix, e.g., @#channel
if minPrefixMode != modes.Mode(0) {
chname = fmt.Sprintf("%s%s", modes.ChannelModePrefixes[minPrefixMode], chname)
}
if channel.flags.HasMode(modes.OpModerated) { if channel.flags.HasMode(modes.OpModerated) {
channel.stateMutex.RLock() channel.stateMutex.RLock()
cuData, ok := channel.members[client] cuData, ok := channel.members[client]
@ -1359,9 +1355,6 @@ func (channel *Channel) SendSplitMessage(command string, minPrefixMode modes.Mod
continue continue
} }
// TODO consider when we might want to push TAGMSG
dispatchWebPush = dispatchWebPush || (config.WebPush.Enabled && histType != history.Tagmsg && member.hasPushSubscriptions())
for _, session := range member.Sessions() { for _, session := range member.Sessions() {
if session == rb.session { if session == rb.session {
continue // we already sent echo-message, if applicable continue // we already sent echo-message, if applicable
@ -1385,42 +1378,6 @@ func (channel *Channel) SendSplitMessage(command string, minPrefixMode modes.Mod
Tags: clientOnlyTags, Tags: clientOnlyTags,
IsBot: isBot, IsBot: isBot,
}, details.account) }, details.account)
if dispatchWebPush {
channel.dispatchWebPush(client, command, details.nickMask, details.accountName, chname, message)
}
}
}
func (channel *Channel) dispatchWebPush(client *Client, command, nuh, accountName, chname string, msg utils.SplitMessage) {
msgBytes, err := webpush.MakePushMessage(command, nuh, accountName, chname, msg)
if err != nil {
channel.server.logger.Error("internal", "can't serialize push message", err.Error())
return
}
messageText := strings.ToLower(msg.CombinedValue())
for _, member := range channel.Members() {
if member == client {
continue // don't push to the client's own devices even if they mentioned themself
}
if !member.hasPushSubscriptions() {
continue
}
// this is the casefolded account name for comparison to the casefolded message text:
account := member.Account()
if account == "" {
continue
}
if !webpush.IsHighlight(messageText, account) {
continue
}
member.dispatchPushMessage(pushMessage{
msg: msgBytes,
urgency: webpush.UrgencyHigh,
cftarget: channel.NameCasefolded(),
time: msg.Time,
})
} }
} }

View File

@ -6,7 +6,6 @@
package irc package irc
import ( import (
"context"
"crypto/x509" "crypto/x509"
"fmt" "fmt"
"maps" "maps"
@ -33,7 +32,6 @@ import (
"github.com/ergochat/ergo/irc/oauth2" "github.com/ergochat/ergo/irc/oauth2"
"github.com/ergochat/ergo/irc/sno" "github.com/ergochat/ergo/irc/sno"
"github.com/ergochat/ergo/irc/utils" "github.com/ergochat/ergo/irc/utils"
"github.com/ergochat/ergo/irc/webpush"
) )
const ( const (
@ -48,10 +46,6 @@ const (
// maximum total read markers that can be stored // maximum total read markers that can be stored
// (writeback of read markers is controlled by lastSeen logic) // (writeback of read markers is controlled by lastSeen logic)
maxReadMarkers = 256 maxReadMarkers = 256
// should be long enough to handle multiple notifications in rapid succession,
// short enough that it doesn't waste a lot of RAM per client
pushQueueLengthPerClient = 16
) )
const ( const (
@ -71,10 +65,6 @@ const (
PingCoalesceThreshold = time.Second PingCoalesceThreshold = time.Second
) )
const (
utf8BOM = "\xef\xbb\xbf"
)
var ( var (
MaxLineLen = DefaultMaxLineLen MaxLineLen = DefaultMaxLineLen
) )
@ -127,11 +117,6 @@ type Client struct {
history history.Buffer history history.Buffer
dirtyBits uint dirtyBits uint
writebackLock sync.Mutex // tier 1.5 writebackLock sync.Mutex // tier 1.5
pushSubscriptions map[string]*pushSubscription
cachedPushSubscriptions []storedPushSubscription
clearablePushMessages map[string]time.Time
pushSubscriptionsExist atomic.Uint32 // this is a cache on len(pushSubscriptions) != 0
pushQueue pushQueue
} }
type saslStatus struct { type saslStatus struct {
@ -167,8 +152,6 @@ const (
type Session struct { type Session struct {
client *Client client *Client
connID string // identifies the connection in debug logs
deviceID string deviceID string
ctime time.Time ctime time.Time
@ -196,8 +179,6 @@ type Session struct {
batchCounter atomic.Uint32 batchCounter atomic.Uint32
isupportSentPrereg bool
quitMessage string quitMessage string
awayMessage string awayMessage string
@ -213,8 +194,6 @@ type Session struct {
autoreplayMissedSince time.Time autoreplayMissedSince time.Time
batch MultilineBatch batch MultilineBatch
webPushEndpoint string // goroutine-local: web push endpoint registered by the current session
} }
// MultilineBatch tracks the state of a client-to-server multiline batch. // MultilineBatch tracks the state of a client-to-server multiline batch.
@ -353,8 +332,7 @@ func (server *Server) RunClient(conn IRCConn) {
return return
} }
connID := server.generateConnectionID() server.logger.Info("connect-ip", fmt.Sprintf("Client connecting: real IP %v, proxied IP %v", realIP, proxiedIP))
server.logger.Info("connect-ip", connID, fmt.Sprintf("Client connecting: real IP %v, proxied IP %v", realIP, proxiedIP))
now := time.Now().UTC() now := time.Now().UTC()
// give them 1k of grace over the limit: // give them 1k of grace over the limit:
@ -394,7 +372,6 @@ func (server *Server) RunClient(conn IRCConn) {
proxiedIP: proxiedIP, proxiedIP: proxiedIP,
isTor: wConn.Tor, isTor: wConn.Tor,
hideSTS: wConn.Tor || wConn.HideSTS, hideSTS: wConn.Tor || wConn.HideSTS,
connID: connID,
} }
session.sasl.Initialize() session.sasl.Initialize()
client.sessions = []*Session{session} client.sessions = []*Session{session}
@ -424,7 +401,7 @@ func (server *Server) RunClient(conn IRCConn) {
client.run(session) client.run(session)
} }
func (server *Server) AddAlwaysOnClient(account ClientAccount, channelToStatus map[string]alwaysOnChannelStatus, lastSeen, readMarkers map[string]time.Time, uModes modes.Modes, realname string, pushSubscriptions []storedPushSubscription) { func (server *Server) AddAlwaysOnClient(account ClientAccount, channelToStatus map[string]alwaysOnChannelStatus, lastSeen, readMarkers map[string]time.Time, uModes modes.Modes, realname string) {
now := time.Now().UTC() now := time.Now().UTC()
config := server.Config() config := server.Config()
if lastSeen == nil && account.Settings.AutoreplayMissed { if lastSeen == nil && account.Settings.AutoreplayMissed {
@ -501,14 +478,6 @@ func (server *Server) AddAlwaysOnClient(account ClientAccount, channelToStatus m
if persistenceEnabled(config.Accounts.Multiclient.AutoAway, client.accountSettings.AutoAway) { if persistenceEnabled(config.Accounts.Multiclient.AutoAway, client.accountSettings.AutoAway) {
client.setAutoAwayNoMutex(config) client.setAutoAwayNoMutex(config)
} }
if len(pushSubscriptions) != 0 {
client.pushSubscriptions = make(map[string]*pushSubscription, len(pushSubscriptions))
for _, sub := range pushSubscriptions {
client.pushSubscriptions[sub.Endpoint] = newPushSubscription(sub)
}
}
client.rebuildPushSubscriptionCache()
} }
func (client *Client) resizeHistory(config *Config) { func (client *Client) resizeHistory(config *Config) {
@ -686,7 +655,7 @@ func (client *Client) run(session *Session) {
if err == errInvalidUtf8 { if err == errInvalidUtf8 {
invalidUtf8 = true // handle as normal, including labeling invalidUtf8 = true // handle as normal, including labeling
} else if err != nil { } else if err != nil {
client.server.logger.Debug("connect-ip", session.connID, "read error from client", err.Error()) client.server.logger.Debug("connect-ip", "read error from client", err.Error())
var quitMessage string var quitMessage string
switch err { switch err {
case ircreader.ErrReadQ: case ircreader.ErrReadQ:
@ -699,7 +668,7 @@ func (client *Client) run(session *Session) {
} }
if client.server.logger.IsLoggingRawIO() { if client.server.logger.IsLoggingRawIO() {
client.server.logger.Debug("userinput", session.connID, client.nick, "<-", line) client.server.logger.Debug("userinput", client.nick, "<- ", line)
} }
// special-cased handling of PROXY protocol, see `handleProxyCommand` for details: // special-cased handling of PROXY protocol, see `handleProxyCommand` for details:
@ -731,12 +700,8 @@ func (client *Client) run(session *Session) {
} }
session.fakelag.Touch(command) session.fakelag.Touch(command)
} else { } else {
if session.registrationMessages == 0 && httpVerbs.Has(msg.Command) {
client.Send(nil, client.server.name, ERR_UNKNOWNERROR, msg.Command, "This is not an HTTP server")
break
}
session.registrationMessages++
// DoS hardening, #505 // DoS hardening, #505
session.registrationMessages++
if client.server.Config().Limits.RegistrationMessages < session.registrationMessages { if client.server.Config().Limits.RegistrationMessages < session.registrationMessages {
client.Send(nil, client.server.name, ERR_UNKNOWNERROR, "*", client.t("You have sent too many registration messages")) client.Send(nil, client.server.name, ERR_UNKNOWNERROR, "*", client.t("You have sent too many registration messages"))
break break
@ -754,16 +719,17 @@ func (client *Client) run(session *Session) {
continue continue
} // else: proceed with the truncated line } // else: proceed with the truncated line
} else if err != nil { } else if err != nil {
message := "Received malformed line" client.Quit(client.t("Received malformed line"), session)
if strings.HasPrefix(line, utf8BOM) {
message = "Received UTF-8 byte-order mark, which is invalid at the start of an IRC protocol message"
}
client.Quit(message, session)
break break
} }
var cmd Command cmd, exists := Commands[msg.Command]
msg.Command, cmd = client.server.resolveCommand(msg.Command, invalidUtf8) if !exists {
cmd = unknownCommand
} else if invalidUtf8 {
cmd = invalidUtf8Command
}
isExiting := cmd.Run(client.server, client, session, msg) isExiting := cmd.Run(client.server, client, session, msg)
if isExiting { if isExiting {
break break
@ -1194,18 +1160,12 @@ func (client *Client) LoggedIntoAccount() bool {
// (You must ensure separately that destroy() is called, e.g., by returning `true` from // (You must ensure separately that destroy() is called, e.g., by returning `true` from
// the command handler or calling it yourself.) // the command handler or calling it yourself.)
func (client *Client) Quit(message string, session *Session) { func (client *Client) Quit(message string, session *Session) {
nuh := client.NickMaskString()
now := time.Now().UTC()
setFinalData := func(sess *Session) { setFinalData := func(sess *Session) {
message := sess.quitMessage message := sess.quitMessage
var finalData []byte var finalData []byte
// #364: don't send QUIT lines to unregistered clients // #364: don't send QUIT lines to unregistered clients
if client.registered { if client.registered {
quitMsg := ircmsg.MakeMessage(nil, nuh, "QUIT", message) quitMsg := ircmsg.MakeMessage(nil, client.nickMaskString, "QUIT", message)
if sess.capabilities.Has(caps.ServerTime) {
quitMsg.SetTag("time", now.Format(utils.IRCv3TimestampFormat))
}
finalData, _ = quitMsg.LineBytesStrict(false, MaxLineLen) finalData, _ = quitMsg.LineBytesStrict(false, MaxLineLen)
} }
@ -1325,7 +1285,7 @@ func (client *Client) destroy(session *Session) {
if !shouldDestroy { if !shouldDestroy {
client.server.snomasks.Send(sno.LocalDisconnects, fmt.Sprintf(ircfmt.Unescape("Client session disconnected for [a:%s] [h:%s] [ip:%s]"), details.accountName, session.rawHostname, source)) client.server.snomasks.Send(sno.LocalDisconnects, fmt.Sprintf(ircfmt.Unescape("Client session disconnected for [a:%s] [h:%s] [ip:%s]"), details.accountName, session.rawHostname, source))
} }
client.server.logger.Info("connect-ip", session.connID, fmt.Sprintf("Disconnecting session of %s from %s", details.nick, source)) client.server.logger.Info("connect-ip", fmt.Sprintf("disconnecting session of %s from %s", details.nick, source))
} }
// decrement stats if we have no more sessions, even if the client will not be destroyed // decrement stats if we have no more sessions, even if the client will not be destroyed
@ -1535,7 +1495,7 @@ func (session *Session) SendRawMessage(message ircmsg.Message, blocking bool) er
func (session *Session) sendBytes(line []byte, blocking bool) (err error) { func (session *Session) sendBytes(line []byte, blocking bool) (err error) {
if session.client.server.logger.IsLoggingRawIO() { if session.client.server.logger.IsLoggingRawIO() {
logline := string(line[:len(line)-2]) // strip "\r\n" logline := string(line[:len(line)-2]) // strip "\r\n"
session.client.server.logger.Debug("useroutput", session.connID, session.client.Nick(), "->", logline) session.client.server.logger.Debug("useroutput", session.client.Nick(), " ->", logline)
} }
if blocking { if blocking {
@ -1544,7 +1504,7 @@ func (session *Session) sendBytes(line []byte, blocking bool) (err error) {
err = session.socket.Write(line) err = session.socket.Write(line)
} }
if err != nil { if err != nil {
session.client.server.logger.Info("quit", session.connID, "send error to client", session.client.Nick(), err.Error()) session.client.server.logger.Info("quit", "send error to client", fmt.Sprintf("%s [%d]", session.client.Nick(), session.sessionID), err.Error())
} }
return err return err
} }
@ -1814,7 +1774,6 @@ const (
IncludeChannels uint = 1 << iota IncludeChannels uint = 1 << iota
IncludeUserModes IncludeUserModes
IncludeRealname IncludeRealname
IncludePushSubscriptions
) )
func (client *Client) markDirty(dirtyBits uint) { func (client *Client) markDirty(dirtyBits uint) {
@ -1835,7 +1794,7 @@ func (client *Client) wakeWriter() {
} }
func (client *Client) writeLoop() { func (client *Client) writeLoop() {
defer client.server.HandlePanic(nil) defer client.server.HandlePanic()
for { for {
client.performWrite(0) client.performWrite(0)
@ -1893,9 +1852,6 @@ func (client *Client) performWrite(additionalDirtyBits uint) {
if (dirtyBits & IncludeRealname) != 0 { if (dirtyBits & IncludeRealname) != 0 {
client.server.accounts.saveRealname(account, client.realname) client.server.accounts.saveRealname(account, client.realname)
} }
if (dirtyBits & IncludePushSubscriptions) != 0 {
client.server.accounts.savePushSubscriptions(account, client.getPushSubscriptions(true))
}
} }
// Blocking store; see Channel.Store and Socket.BlockingWrite // Blocking store; see Channel.Store and Socket.BlockingWrite
@ -1915,134 +1871,3 @@ func (client *Client) Store(dirtyBits uint) (err error) {
client.performWrite(dirtyBits) client.performWrite(dirtyBits)
return nil return nil
} }
// pushSubscription represents all the data we track about the state of a push subscription;
// right now every field is persisted, but we may want to persist only a subset in future
type pushSubscription struct {
storedPushSubscription
}
// storedPushSubscription represents a subscription as stored in the database
type storedPushSubscription struct {
Endpoint string
Keys webpush.Keys
LastRefresh time.Time // last time the client sent WEBPUSH REGISTER for this endpoint
LastSuccess time.Time // last time we successfully pushed to this endpoint
}
func newPushSubscription(sub storedPushSubscription) *pushSubscription {
return &pushSubscription{
storedPushSubscription: sub,
// TODO any other initialization here, like rate limiting
}
}
type pushMessage struct {
msg []byte
urgency webpush.Urgency
originatingEndpoint string
cftarget string
time time.Time
}
type pushQueue struct {
workerLock sync.Mutex
queue chan pushMessage
once sync.Once
dropped atomic.Uint64
}
func (c *Client) ensurePushInitialized() {
c.pushQueue.once.Do(c.initializePush)
}
func (c *Client) initializePush() {
// allocate the queue
c.pushQueue.queue = make(chan pushMessage, pushQueueLengthPerClient)
}
func (client *Client) dispatchPushMessage(msg pushMessage) {
client.ensurePushInitialized()
select {
case client.pushQueue.queue <- msg:
if client.pushQueue.workerLock.TryLock() {
go client.pushWorker()
}
default:
client.pushQueue.dropped.Add(1)
}
}
func (client *Client) pushWorker() {
defer client.server.HandlePanic(nil)
defer client.pushQueue.workerLock.Unlock()
for {
select {
case msg := <-client.pushQueue.queue:
for _, sub := range client.getPushSubscriptions(false) {
if !client.skipPushMessage(msg) {
client.sendAndTrackPush(sub.Endpoint, sub.Keys, msg, true)
}
}
default:
// no more messages, end the goroutine and release the trylock
return
}
}
}
// skipPushMessage waits up to the configured delay for the client to send MARKREAD;
// it returns whether the message has been read
func (client *Client) skipPushMessage(msg pushMessage) bool {
if msg.cftarget == "" || msg.time.IsZero() {
return false
}
config := client.server.Config()
if config.WebPush.Delay == 0 {
return false
}
deadline := msg.time.Add(config.WebPush.Delay)
pause := time.Until(deadline)
if pause > 0 {
time.Sleep(pause)
}
readTimestamp, ok := client.getMarkreadTime(msg.cftarget)
return ok && utils.ReadMarkerLessThanOrEqual(msg.time, readTimestamp)
}
func (client *Client) sendAndTrackPush(endpoint string, keys webpush.Keys, msg pushMessage, updateDB bool) {
if endpoint == msg.originatingEndpoint {
return
}
if msg.cftarget != "" && !msg.time.IsZero() {
client.addClearablePushMessage(msg.cftarget, msg.time)
}
switch client.sendPush(endpoint, keys, msg.urgency, msg.msg) {
case nil:
client.recordPush(endpoint, true)
case webpush.Err404:
client.deletePushSubscription(endpoint, updateDB)
default:
client.recordPush(endpoint, false)
}
}
func (client *Client) sendPush(endpoint string, keys webpush.Keys, urgency webpush.Urgency, msg []byte) error {
config := client.server.Config()
// final sanity check
if !config.WebPush.Enabled {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), config.WebPush.Timeout)
defer cancel()
err := webpush.SendWebPush(ctx, endpoint, keys, config.WebPush.vapidKeys, webpush.UrgencyHigh, config.WebPush.Subscriber, msg)
if err == nil {
client.server.logger.Debug("webpush", "dispatched push to client", client.Nick(), endpoint)
} else {
client.server.logger.Debug("webpush", "failed to dispatch push to client", client.Nick(), endpoint, err.Error())
}
return err
}

View File

@ -253,14 +253,15 @@ func (clients *ClientManager) AllClients() (result []*Client) {
return return
} }
// AllWithCapsNotify returns all sessions that support cap-notify. // AllWithCapsNotify returns all clients with the given capabilities, and that support cap-notify.
func (clients *ClientManager) AllWithCapsNotify() (sessions []*Session) { func (clients *ClientManager) AllWithCapsNotify(capabs ...caps.Capability) (sessions []*Session) {
capabs = append(capabs, caps.CapNotify)
clients.RLock() clients.RLock()
defer clients.RUnlock() defer clients.RUnlock()
for _, client := range clients.byNick { for _, client := range clients.byNick {
for _, session := range client.Sessions() { for _, session := range client.Sessions() {
// cap-notify is implicit in cap version 302 and above // cap-notify is implicit in cap version 302 and above
if session.capabilities.Has(caps.CapNotify) || 302 <= session.capVersion { if session.capabilities.HasAll(capabs...) || 302 <= session.capVersion {
sessions = append(sessions, session) sessions = append(sessions, session)
} }
} }
@ -269,18 +270,6 @@ func (clients *ClientManager) AllWithCapsNotify() (sessions []*Session) {
return return
} }
// AllWithPushSubscriptions returns all clients that are always-on with an active push subscription.
func (clients *ClientManager) AllWithPushSubscriptions() (result []*Client) {
clients.RLock()
defer clients.RUnlock()
for _, client := range clients.byNick {
if client.hasPushSubscriptions() && client.AlwaysOn() {
result = append(result, client)
}
}
return result
}
// FindAll returns all clients that match the given userhost mask. // FindAll returns all clients that match the given userhost mask.
func (clients *ClientManager) FindAll(userhost string) (set ClientSet) { func (clients *ClientManager) FindAll(userhost string) (set ClientSet) {
set = make(ClientSet) set = make(ClientSet)

View File

@ -6,7 +6,7 @@ import (
"fmt" "fmt"
"net" "net"
"crypto/sha3" "golang.org/x/crypto/sha3"
"github.com/ergochat/ergo/irc/utils" "github.com/ergochat/ergo/irc/utils"
) )

View File

@ -18,24 +18,6 @@ type Command struct {
capabs []string capabs []string
} }
// resolveCommand returns the command to execute in response to a user input line.
// some invalid commands (unknown command verb, invalid UTF8) get a fake handler
// to ensure that labeled-response still works as expected.
func (server *Server) resolveCommand(command string, invalidUTF8 bool) (canonicalName string, result Command) {
if invalidUTF8 {
return command, invalidUtf8Command
}
if cmd, ok := Commands[command]; ok {
return command, cmd
}
if target, ok := server.Config().Server.CommandAliases[command]; ok {
if cmd, ok := Commands[target]; ok {
return target, cmd
}
}
return command, unknownCommand
}
// Run runs this command with the given client/message. // Run runs this command with the given client/message.
func (cmd *Command) Run(server *Server, client *Client, session *Session, msg ircmsg.Message) (exiting bool) { func (cmd *Command) Run(server *Server, client *Client, session *Session, msg ircmsg.Message) (exiting bool) {
rb := NewResponseBuffer(session) rb := NewResponseBuffer(session)
@ -170,10 +152,6 @@ func init() {
handler: isonHandler, handler: isonHandler,
minParams: 1, minParams: 1,
}, },
"ISUPPORT": {
handler: isupportHandler,
usablePreReg: true,
},
"JOIN": { "JOIN": {
handler: joinHandler, handler: joinHandler,
minParams: 1, minParams: 1,
@ -385,10 +363,6 @@ func init() {
usablePreReg: true, usablePreReg: true,
minParams: 4, minParams: 4,
}, },
"WEBPUSH": {
handler: webpushHandler,
minParams: 2,
},
"WHO": { "WHO": {
handler: whoHandler, handler: whoHandler,
minParams: 1, minParams: 1,

View File

@ -22,7 +22,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
"unicode/utf8"
"code.cloudfoundry.org/bytefmt" "code.cloudfoundry.org/bytefmt"
"github.com/ergochat/irc-go/ircfmt" "github.com/ergochat/irc-go/ircfmt"
@ -42,7 +41,6 @@ import (
"github.com/ergochat/ergo/irc/oauth2" "github.com/ergochat/ergo/irc/oauth2"
"github.com/ergochat/ergo/irc/passwd" "github.com/ergochat/ergo/irc/passwd"
"github.com/ergochat/ergo/irc/utils" "github.com/ergochat/ergo/irc/utils"
"github.com/ergochat/ergo/irc/webpush"
) )
// here's how this works: exported (capitalized) members of the config structs // here's how this works: exported (capitalized) members of the config structs
@ -609,27 +607,14 @@ type Config struct {
OverrideServicesHostname string `yaml:"override-services-hostname"` OverrideServicesHostname string `yaml:"override-services-hostname"`
MaxLineLen int `yaml:"max-line-len"` MaxLineLen int `yaml:"max-line-len"`
SuppressLusers bool `yaml:"suppress-lusers"` SuppressLusers bool `yaml:"suppress-lusers"`
AdditionalISupport map[string]string `yaml:"additional-isupport"`
CommandAliases map[string]string `yaml:"command-aliases"`
} }
API struct {
Enabled bool
Listener string
TLS TLSListenConfig
tlsConfig *tls.Config
BearerTokens []string `yaml:"bearer-tokens"`
bearerTokenBytes [][]byte
} `yaml:"api"`
Roleplay struct { Roleplay struct {
Enabled bool Enabled bool
RequireChanops bool `yaml:"require-chanops"` RequireChanops bool `yaml:"require-chanops"`
RequireOper bool `yaml:"require-oper"` RequireOper bool `yaml:"require-oper"`
AddSuffix *bool `yaml:"add-suffix"` AddSuffix *bool `yaml:"add-suffix"`
addSuffix bool addSuffix bool
NPCNickMask string `yaml:"npc-nick-mask"`
SceneNickMask string `yaml:"scene-nick-mask"`
} }
Extjwt struct { Extjwt struct {
@ -723,16 +708,6 @@ type Config struct {
} `yaml:"tagmsg-storage"` } `yaml:"tagmsg-storage"`
} }
WebPush struct {
Enabled bool
Timeout time.Duration
Delay time.Duration
Subscriber string
MaxSubscriptions int `yaml:"max-subscriptions"`
Expiration custime.Duration
vapidKeys *webpush.VAPIDKeys
} `yaml:"webpush"`
Filename string Filename string
} }
@ -1023,40 +998,6 @@ func (config *Config) processExtjwt() (err error) {
return nil return nil
} }
func (config *Config) processAPI() (err error) {
if !config.API.Enabled {
return nil
}
if config.API.Listener == "" {
return errors.New("config.api.enabled is true, but listener address is empty")
}
config.API.bearerTokenBytes = make([][]byte, len(config.API.BearerTokens))
for i, tok := range config.API.BearerTokens {
if tok == "" || tok == "example" {
continue
}
config.API.bearerTokenBytes[i] = []byte(tok)
}
var tlsConfig *tls.Config
if config.API.TLS.Cert != "" {
cert, err := loadCertWithLeaf(config.API.TLS.Cert, config.API.TLS.Key)
if err != nil {
return err
}
tlsConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
MinVersion: tls.VersionTLS12,
// TODO consider supporting client certificates
}
}
config.API.tlsConfig = tlsConfig
return nil
}
// LoadRawConfig loads the config without doing any consistency checks or postprocessing // LoadRawConfig loads the config without doing any consistency checks or postprocessing
func LoadRawConfig(filename string) (config *Config, err error) { func LoadRawConfig(filename string) (config *Config, err error) {
data, err := os.ReadFile(filename) data, err := os.ReadFile(filename)
@ -1578,7 +1519,6 @@ func LoadConfig(filename string) (config *Config, err error) {
config.Server.supportedCaps.Disable(caps.Chathistory) config.Server.supportedCaps.Disable(caps.Chathistory)
config.Server.supportedCaps.Disable(caps.EventPlayback) config.Server.supportedCaps.Disable(caps.EventPlayback)
config.Server.supportedCaps.Disable(caps.ZNCPlayback) config.Server.supportedCaps.Disable(caps.ZNCPlayback)
config.Server.supportedCaps.Disable(caps.MessageRedaction)
} }
if !config.History.Enabled || !config.History.Persistent.Enabled { if !config.History.Enabled || !config.History.Persistent.Enabled {
@ -1609,17 +1549,7 @@ func LoadConfig(filename string) (config *Config, err error) {
} }
} }
if !config.History.Retention.AllowIndividualDelete {
config.Server.supportedCaps.Disable(caps.MessageRedaction) // #2215
}
config.Roleplay.addSuffix = utils.BoolDefaultTrue(config.Roleplay.AddSuffix) config.Roleplay.addSuffix = utils.BoolDefaultTrue(config.Roleplay.AddSuffix)
if config.Roleplay.NPCNickMask == "" {
config.Roleplay.NPCNickMask = defaultNPCNickMask
}
if config.Roleplay.SceneNickMask == "" {
config.Roleplay.SceneNickMask = defaultSceneNickMask
}
config.Datastore.MySQL.ExpireTime = time.Duration(config.History.Restrictions.ExpireTime) config.Datastore.MySQL.ExpireTime = time.Duration(config.History.Restrictions.ExpireTime)
config.Datastore.MySQL.TrackAccountMessages = config.History.Retention.EnableAccountIndexing config.Datastore.MySQL.TrackAccountMessages = config.History.Retention.EnableAccountIndexing
@ -1642,39 +1572,6 @@ func LoadConfig(filename string) (config *Config, err error) {
return nil, err return nil, err
} }
if config.WebPush.Enabled {
if config.Accounts.Multiclient.AlwaysOn == PersistentDisabled {
return nil, fmt.Errorf("Cannot enable webpush if always-on is disabled")
}
if config.WebPush.Timeout == 0 {
config.WebPush.Timeout = 10 * time.Second
}
if config.WebPush.Subscriber == "" {
config.WebPush.Subscriber = "https://ergo.chat/about"
}
if config.WebPush.MaxSubscriptions <= 0 {
config.WebPush.MaxSubscriptions = 1
}
if config.WebPush.Expiration == 0 {
config.WebPush.Expiration = custime.Duration(14 * 24 * time.Hour)
} else if config.WebPush.Expiration < custime.Duration(3*24*time.Hour) {
return nil, fmt.Errorf("webpush.expiration is too short (should be several days)")
}
} else {
config.Server.supportedCaps.Disable(caps.WebPush)
config.Server.supportedCaps.Disable(caps.SojuWebPush)
}
err = config.processAPI()
if err != nil {
return nil, err
}
config.Server.CommandAliases, err = normalizeCommandAliases(config.Server.CommandAliases)
if err != nil {
return nil, err
}
// now that all postprocessing is complete, regenerate ISUPPORT: // now that all postprocessing is complete, regenerate ISUPPORT:
err = config.generateISupport() err = config.generateISupport()
if err != nil { if err != nil {
@ -1759,8 +1656,6 @@ func (config *Config) generateISupport() (err error) {
isupport.Add("RPCHAN", "E") isupport.Add("RPCHAN", "E")
isupport.Add("RPUSER", "E") isupport.Add("RPUSER", "E")
} }
isupport.Add("SAFELIST", "")
isupport.Add("SAFERATE", "")
isupport.Add("STATUSMSG", "~&@%+") isupport.Add("STATUSMSG", "~&@%+")
isupport.Add("TARGMAX", fmt.Sprintf("NAMES:1,LIST:1,KICK:,WHOIS:1,USERHOST:10,PRIVMSG:%s,TAGMSG:%s,NOTICE:%s,MONITOR:%d", maxTargetsString, maxTargetsString, maxTargetsString, config.Limits.MonitorEntries)) isupport.Add("TARGMAX", fmt.Sprintf("NAMES:1,LIST:1,KICK:,WHOIS:1,USERHOST:10,PRIVMSG:%s,TAGMSG:%s,NOTICE:%s,MONITOR:%d", maxTargetsString, maxTargetsString, maxTargetsString, config.Limits.MonitorEntries))
isupport.Add("TOPICLEN", strconv.Itoa(config.Limits.TopicLen)) isupport.Add("TOPICLEN", strconv.Itoa(config.Limits.TopicLen))
@ -1770,21 +1665,8 @@ func (config *Config) generateISupport() (err error) {
if config.Server.EnforceUtf8 { if config.Server.EnforceUtf8 {
isupport.Add("UTF8ONLY", "") isupport.Add("UTF8ONLY", "")
} }
if config.WebPush.Enabled {
// XXX we typically don't have this at config parse time, so we'll have to regenerate
// the cached reply later
if config.WebPush.vapidKeys != nil {
isupport.Add("VAPID", config.WebPush.vapidKeys.PublicKeyString())
}
}
isupport.Add("WHOX", "") isupport.Add("WHOX", "")
for key, value := range config.Server.AdditionalISupport {
if !isupport.Contains(key) {
isupport.Add(key, value)
}
}
err = isupport.RegenerateCachedReply() err = isupport.RegenerateCachedReply()
return return
} }
@ -1886,9 +1768,6 @@ func (config *Config) loadMOTD() error {
if config.Server.MOTDFormatting { if config.Server.MOTDFormatting {
lineToSend = ircfmt.Unescape(lineToSend) lineToSend = ircfmt.Unescape(lineToSend)
} }
if config.Server.EnforceUtf8 && !utf8.ValidString(lineToSend) {
return fmt.Errorf("Line %d of MOTD contains invalid UTF8", i+1)
}
// "- " is the required prefix for MOTD // "- " is the required prefix for MOTD
lineToSend = fmt.Sprintf("- %s", lineToSend) lineToSend = fmt.Sprintf("- %s", lineToSend)
config.Server.motdLines = append(config.Server.motdLines, lineToSend) config.Server.motdLines = append(config.Server.motdLines, lineToSend)
@ -1896,22 +1775,3 @@ func (config *Config) loadMOTD() error {
} }
return nil return nil
} }
func normalizeCommandAliases(aliases map[string]string) (normalizedAliases map[string]string, err error) {
if len(aliases) == 0 {
return nil, nil
}
normalizedAliases = make(map[string]string, len(aliases))
for alias, command := range aliases {
alias = strings.ToUpper(alias)
command = strings.ToUpper(command)
if _, found := Commands[alias]; found {
return nil, fmt.Errorf("Command alias `%s` collides with a real Ergo command", alias)
}
if _, found := Commands[command]; !found {
return nil, fmt.Errorf("Command alias `%s` mapped to non-existent Ergo command `%s`", alias, command)
}
normalizedAliases[alias] = command
}
return normalizedAliases, nil
}

View File

@ -18,7 +18,6 @@ import (
"github.com/ergochat/ergo/irc/datastore" "github.com/ergochat/ergo/irc/datastore"
"github.com/ergochat/ergo/irc/modes" "github.com/ergochat/ergo/irc/modes"
"github.com/ergochat/ergo/irc/utils" "github.com/ergochat/ergo/irc/utils"
"github.com/ergochat/ergo/irc/webpush"
"github.com/tidwall/buntdb" "github.com/tidwall/buntdb"
) )
@ -28,17 +27,15 @@ const (
// 'version' of the database schema // 'version' of the database schema
// latest schema of the db // latest schema of the db
latestDbSchema = 24 latestDbSchema = 23
) )
var ( var (
schemaVersionUUID = utils.UUID{0, 255, 85, 13, 212, 10, 191, 121, 245, 152, 142, 89, 97, 141, 219, 87} // AP9VDdQKv3n1mI5ZYY3bVw schemaVersionUUID = utils.UUID{0, 255, 85, 13, 212, 10, 191, 121, 245, 152, 142, 89, 97, 141, 219, 87} // AP9VDdQKv3n1mI5ZYY3bVw
cloakSecretUUID = utils.UUID{170, 214, 184, 208, 116, 181, 67, 75, 161, 23, 233, 16, 113, 251, 94, 229} // qta40HS1Q0uhF-kQcfte5Q cloakSecretUUID = utils.UUID{170, 214, 184, 208, 116, 181, 67, 75, 161, 23, 233, 16, 113, 251, 94, 229} // qta40HS1Q0uhF-kQcfte5Q
vapidKeysUUID = utils.UUID{87, 215, 189, 5, 65, 105, 249, 44, 65, 96, 170, 56, 187, 110, 12, 235} // V9e9BUFp-SxBYKo4u24M6w
keySchemaVersion = bunt.BuntKey(datastore.TableMetadata, schemaVersionUUID) keySchemaVersion = bunt.BuntKey(datastore.TableMetadata, schemaVersionUUID)
keyCloakSecret = bunt.BuntKey(datastore.TableMetadata, cloakSecretUUID) keyCloakSecret = bunt.BuntKey(datastore.TableMetadata, cloakSecretUUID)
keyVAPIDKeys = bunt.BuntKey(datastore.TableMetadata, vapidKeysUUID)
) )
type SchemaChanger func(*Config, *buntdb.Tx) error type SchemaChanger func(*Config, *buntdb.Tx) error
@ -83,15 +80,6 @@ func initializeDB(path string) error {
// set schema version // set schema version
tx.Set(keySchemaVersion, strconv.Itoa(latestDbSchema), nil) tx.Set(keySchemaVersion, strconv.Itoa(latestDbSchema), nil)
tx.Set(keyCloakSecret, utils.GenerateSecretKey(), nil) tx.Set(keyCloakSecret, utils.GenerateSecretKey(), nil)
vapidKeys, err := webpush.GenerateVAPIDKeys()
if err != nil {
return err
}
j, err := json.Marshal(vapidKeys)
if err != nil {
return err
}
tx.Set(keyVAPIDKeys, string(j), nil)
return nil return nil
}) })
@ -245,16 +233,6 @@ func StoreCloakSecret(dstore datastore.Datastore, secret string) {
dstore.Set(datastore.TableMetadata, cloakSecretUUID, []byte(secret), time.Time{}) dstore.Set(datastore.TableMetadata, cloakSecretUUID, []byte(secret), time.Time{})
} }
func LoadVAPIDKeys(dstore datastore.Datastore) (*webpush.VAPIDKeys, error) {
val, err := dstore.Get(datastore.TableMetadata, vapidKeysUUID)
if err != nil {
return nil, err
}
result := new(webpush.VAPIDKeys)
err = json.Unmarshal([]byte(val), result)
return result, nil
}
func schemaChangeV1toV2(config *Config, tx *buntdb.Tx) error { func schemaChangeV1toV2(config *Config, tx *buntdb.Tx) error {
// == version 1 -> 2 == // == version 1 -> 2 ==
// account key changes and account.verified key bugfix. // account key changes and account.verified key bugfix.
@ -1240,20 +1218,6 @@ func schemaChangeV22ToV23(config *Config, tx *buntdb.Tx) error {
return nil return nil
} }
// webpush signing key
func schemaChangeV23ToV24(config *Config, tx *buntdb.Tx) error {
keys, err := webpush.GenerateVAPIDKeys()
if err != nil {
return err
}
j, err := json.Marshal(keys)
if err != nil {
return err
}
tx.Set(keyVAPIDKeys, string(j), nil)
return nil
}
func getSchemaChange(initialVersion int) (result SchemaChange, ok bool) { func getSchemaChange(initialVersion int) (result SchemaChange, ok bool) {
for _, change := range allChanges { for _, change := range allChanges {
if initialVersion == change.InitialVersion { if initialVersion == change.InitialVersion {
@ -1374,9 +1338,4 @@ var allChanges = []SchemaChange{
TargetVersion: 23, TargetVersion: 23,
Changer: schemaChangeV22ToV23, Changer: schemaChangeV22ToV23,
}, },
{
InitialVersion: 23,
TargetVersion: 24,
Changer: schemaChangeV23ToV24,
},
} }

View File

@ -4,18 +4,9 @@
package email package email
import ( import (
"bytes"
"crypto"
"crypto/ed25519"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors" "errors"
"fmt" dkim "github.com/toorop/go-dkim"
"os" "os"
dkim "github.com/emersion/go-msgauth/dkim"
) )
var ( var (
@ -26,77 +17,38 @@ type DKIMConfig struct {
Domain string Domain string
Selector string Selector string
KeyFile string `yaml:"key-file"` KeyFile string `yaml:"key-file"`
privKey crypto.Signer keyBytes []byte
}
func (dkim *DKIMConfig) Enabled() bool {
return dkim.Domain != ""
} }
func (dkim *DKIMConfig) Postprocess() (err error) { func (dkim *DKIMConfig) Postprocess() (err error) {
if !dkim.Enabled() { if dkim.Domain != "" {
return nil
}
if dkim.Selector == "" || dkim.KeyFile == "" { if dkim.Selector == "" || dkim.KeyFile == "" {
return ErrMissingFields return ErrMissingFields
} }
dkim.keyBytes, err = os.ReadFile(dkim.KeyFile)
keyBytes, err := os.ReadFile(dkim.KeyFile)
if err != nil { if err != nil {
return fmt.Errorf("Could not read DKIM key file: %w", err) return err
} }
dkim.privKey, err = parseDKIMPrivKey(keyBytes)
if err != nil {
return fmt.Errorf("Could not parse DKIM key file: %w", err)
} }
return nil return nil
} }
func parseDKIMPrivKey(input []byte) (crypto.Signer, error) { var defaultOptions = dkim.SigOptions{
if len(input) == 0 { Version: 1,
return nil, errors.New("DKIM private key is empty") Canonicalization: "relaxed/relaxed",
} Algo: "rsa-sha256",
Headers: []string{"from", "to", "subject", "message-id", "date"},
// raw ed25519 private key format BodyLength: 0,
if len(input) == ed25519.PrivateKeySize { QueryMethods: []string{"dns/txt"},
return ed25519.PrivateKey(input), nil AddSignatureTimestamp: true,
} SignatureExpireIn: 0,
d, _ := pem.Decode(input)
if d == nil {
return nil, errors.New("Invalid PEM data for DKIM private key")
}
if rsaKey, err := x509.ParsePKCS1PrivateKey(d.Bytes); err == nil {
return rsaKey, nil
}
if k, err := x509.ParsePKCS8PrivateKey(d.Bytes); err == nil {
switch key := k.(type) {
case *rsa.PrivateKey:
return key, nil
case ed25519.PrivateKey:
return key, nil
default:
return nil, fmt.Errorf("Unacceptable type for DKIM private key: %T", k)
}
}
return nil, errors.New("No acceptable format for DKIM private key")
} }
func DKIMSign(message []byte, dkimConfig DKIMConfig) (result []byte, err error) { func DKIMSign(message []byte, dkimConfig DKIMConfig) (result []byte, err error) {
options := dkim.SignOptions{ options := defaultOptions
Domain: dkimConfig.Domain, options.PrivateKey = dkimConfig.keyBytes
Selector: dkimConfig.Selector, options.Domain = dkimConfig.Domain
Signer: dkimConfig.privKey, options.Selector = dkimConfig.Selector
HeaderCanonicalization: dkim.CanonicalizationRelaxed, err = dkim.Sign(&message, options)
BodyCanonicalization: dkim.CanonicalizationRelaxed, return message, err
}
input := bytes.NewBuffer(message)
output := bytes.NewBuffer(make([]byte, 0, len(message)+1024))
err = dkim.Sign(output, input, &options)
return output.Bytes(), err
} }

View File

@ -233,7 +233,7 @@ func SendMail(config MailtoConfig, recipient string, msg []byte) (err error) {
} }
} }
if config.DKIM.Enabled() { if config.DKIM.Domain != "" {
msg, err = DKIMSign(msg, config.DKIM) msg, err = DKIMSign(msg, config.DKIM)
if err != nil { if err != nil {
return return

View File

@ -1,4 +1,4 @@
//go:build !(plan9 || solaris) //go:build !plan9
package flock package flock

View File

@ -1,4 +1,4 @@
//go:build plan9 || solaris //go:build plan9
package flock package flock

View File

@ -92,7 +92,7 @@ func (client *Client) ApplyProxiedIP(session *Session, proxiedIP net.IP, tls boo
client.server.connectionLimiter.RemoveClient(flatip.FromNetIP(session.realIP)) client.server.connectionLimiter.RemoveClient(flatip.FromNetIP(session.realIP))
// given IP is sane! override the client's current IP // given IP is sane! override the client's current IP
client.server.logger.Info("connect-ip", session.connID, "Accepted proxy IP for client", proxiedIP.String()) client.server.logger.Info("connect-ip", "Accepted proxy IP for client", proxiedIP.String())
client.stateMutex.Lock() client.stateMutex.Lock()
defer client.stateMutex.Unlock() defer client.stateMutex.Unlock()

View File

@ -13,7 +13,6 @@ import (
"github.com/ergochat/ergo/irc/languages" "github.com/ergochat/ergo/irc/languages"
"github.com/ergochat/ergo/irc/modes" "github.com/ergochat/ergo/irc/modes"
"github.com/ergochat/ergo/irc/utils" "github.com/ergochat/ergo/irc/utils"
"github.com/ergochat/ergo/irc/webpush"
) )
func (server *Server) Config() (config *Config) { func (server *Server) Config() (config *Config) {
@ -55,7 +54,6 @@ type SessionData struct {
certfp string certfp string
deviceID string deviceID string
connInfo string connInfo string
connID string
sessionID int64 sessionID int64
caps []string caps []string
} }
@ -76,7 +74,6 @@ func (client *Client) AllSessionData(currentSession *Session, hasPrivs bool) (da
hostname: session.rawHostname, hostname: session.rawHostname,
certfp: session.certfp, certfp: session.certfp,
deviceID: session.deviceID, deviceID: session.deviceID,
connID: session.connID,
sessionID: session.sessionID, sessionID: session.sessionID,
} }
if session.proxiedIP != nil { if session.proxiedIP != nil {
@ -223,13 +220,6 @@ func (session *Session) SetAway(awayMessage string) (wasAway, nowAway string) {
return return
} }
func (session *Session) ConnID() string {
if session == nil {
return "*"
}
return session.connID
}
func (client *Client) autoAwayEnabledNoMutex(config *Config) bool { func (client *Client) autoAwayEnabledNoMutex(config *Config) bool {
return client.registered && client.alwaysOn && return client.registered && client.alwaysOn &&
persistenceEnabled(config.Accounts.Multiclient.AutoAway, client.accountSettings.AutoAway) persistenceEnabled(config.Accounts.Multiclient.AutoAway, client.accountSettings.AutoAway)
@ -519,13 +509,6 @@ func (client *Client) GetReadMarker(cfname string) (result string) {
return "*" return "*"
} }
func (client *Client) getMarkreadTime(cfname string) (timestamp time.Time, ok bool) {
client.stateMutex.RLock()
timestamp, ok = client.readMarkers[cfname]
client.stateMutex.RUnlock()
return
}
func (client *Client) copyReadMarkers() (result map[string]time.Time) { func (client *Client) copyReadMarkers() (result map[string]time.Time) {
client.stateMutex.RLock() client.stateMutex.RLock()
defer client.stateMutex.RUnlock() defer client.stateMutex.RUnlock()
@ -564,28 +547,6 @@ func updateLRUMap(lru map[string]time.Time, key string, val time.Time, maxItems
return val return val
} }
func (client *Client) addClearablePushMessage(cftarget string, messageTime time.Time) {
client.stateMutex.Lock()
defer client.stateMutex.Unlock()
if client.clearablePushMessages == nil {
client.clearablePushMessages = make(map[string]time.Time)
}
updateLRUMap(client.clearablePushMessages, cftarget, messageTime, maxReadMarkers)
}
func (client *Client) clearClearablePushMessage(cftarget string, readTimestamp time.Time) (ok bool) {
client.stateMutex.Lock()
defer client.stateMutex.Unlock()
pushMessageTime, ok := client.clearablePushMessages[cftarget]
if ok && utils.ReadMarkerLessThanOrEqual(pushMessageTime, readTimestamp) {
delete(client.clearablePushMessages, cftarget)
return true
}
return false
}
func (client *Client) shouldFlushTimestamps() (result bool) { func (client *Client) shouldFlushTimestamps() (result bool) {
client.stateMutex.Lock() client.stateMutex.Lock()
defer client.stateMutex.Unlock() defer client.stateMutex.Unlock()
@ -601,134 +562,6 @@ func (client *Client) setKlined() {
client.stateMutex.Unlock() client.stateMutex.Unlock()
} }
func (client *Client) refreshPushSubscription(endpoint string, keys webpush.Keys) bool {
// do not mark dirty --- defer the write to periodic maintenance
now := time.Now().UTC()
client.stateMutex.Lock()
defer client.stateMutex.Unlock()
sub, ok := client.pushSubscriptions[endpoint]
if ok && sub.Keys.Equal(keys) {
sub.LastRefresh = now
return true
}
return false // subscription doesn't exist, we need to send a test message
}
func (client *Client) addPushSubscription(endpoint string, keys webpush.Keys) error {
changed := false
defer func() {
if changed {
client.markDirty(IncludeAllAttrs)
}
}()
config := client.server.Config()
now := time.Now().UTC()
client.stateMutex.Lock()
defer client.stateMutex.Unlock()
if client.pushSubscriptions == nil {
client.pushSubscriptions = make(map[string]*pushSubscription)
}
sub, ok := client.pushSubscriptions[endpoint]
if ok {
changed = !sub.Keys.Equal(keys)
sub.Keys = keys
sub.LastRefresh = now
} else {
if len(client.pushSubscriptions) >= config.WebPush.MaxSubscriptions {
return errLimitExceeded
}
changed = true
sub = newPushSubscription(storedPushSubscription{
Endpoint: endpoint,
Keys: keys,
LastRefresh: now,
LastSuccess: now, // assume we just sent a successful message to confirm the sub
})
client.pushSubscriptions[endpoint] = sub
}
if changed {
client.rebuildPushSubscriptionCache()
}
return nil
}
func (client *Client) hasPushSubscriptions() bool {
return client.pushSubscriptionsExist.Load() != 0
}
func (client *Client) getPushSubscriptions(refresh bool) []storedPushSubscription {
if refresh {
func() {
client.stateMutex.Lock()
defer client.stateMutex.Unlock()
client.rebuildPushSubscriptionCache()
}()
}
client.stateMutex.RLock()
defer client.stateMutex.RUnlock()
return client.cachedPushSubscriptions
}
func (client *Client) rebuildPushSubscriptionCache() {
// must hold write lock
if len(client.pushSubscriptions) == 0 {
client.cachedPushSubscriptions = nil
client.pushSubscriptionsExist.Store(0)
return
}
client.cachedPushSubscriptions = make([]storedPushSubscription, 0, len(client.pushSubscriptions))
for _, subscription := range client.pushSubscriptions {
client.cachedPushSubscriptions = append(client.cachedPushSubscriptions, subscription.storedPushSubscription)
}
client.pushSubscriptionsExist.Store(1)
}
func (client *Client) deletePushSubscription(endpoint string, writeback bool) (changed bool) {
defer func() {
if writeback && changed {
client.markDirty(IncludeAllAttrs)
}
}()
client.stateMutex.Lock()
defer client.stateMutex.Unlock()
_, ok := client.pushSubscriptions[endpoint]
if ok {
changed = true
delete(client.pushSubscriptions, endpoint)
client.rebuildPushSubscriptionCache()
}
return
}
func (client *Client) recordPush(endpoint string, success bool) {
now := time.Now().UTC()
client.stateMutex.Lock()
defer client.stateMutex.Unlock()
subscription, ok := client.pushSubscriptions[endpoint]
if !ok {
return
}
if success {
subscription.LastSuccess = now
}
// TODO we may want to track failures in some way in the future
}
func (channel *Channel) Name() string { func (channel *Channel) Name() string {
channel.stateMutex.RLock() channel.stateMutex.RLock()
defer channel.stateMutex.RUnlock() defer channel.stateMutex.RUnlock()

View File

@ -33,7 +33,6 @@ import (
"github.com/ergochat/ergo/irc/oauth2" "github.com/ergochat/ergo/irc/oauth2"
"github.com/ergochat/ergo/irc/sno" "github.com/ergochat/ergo/irc/sno"
"github.com/ergochat/ergo/irc/utils" "github.com/ergochat/ergo/irc/utils"
"github.com/ergochat/ergo/irc/webpush"
) )
// helper function to parse ACC callbacks, e.g., mailto:person@example.com, tel:16505551234 // helper function to parse ACC callbacks, e.g., mailto:person@example.com, tel:16505551234
@ -137,7 +136,7 @@ func sendSuccessfulAccountAuth(service *ircService, client *Client, rb *Response
} }
} }
client.server.logger.Info("accounts", rb.session.ConnID(), details.nick, "logged into account", details.accountName) client.server.logger.Info("accounts", "client", details.nick, "logged into account", details.accountName)
} }
func (server *Server) sendLoginSnomask(nickMask, accountName string) { func (server *Server) sendLoginSnomask(nickMask, accountName string) {
@ -856,6 +855,7 @@ func debugHandler(server *Server, client *Client, msg ircmsg.Message, rb *Respon
switch param { switch param {
case "GCSTATS": case "GCSTATS":
stats := debug.GCStats{ stats := debug.GCStats{
Pause: make([]time.Duration, 10),
PauseQuantiles: make([]time.Duration, 5), PauseQuantiles: make([]time.Duration, 5),
} }
debug.ReadGCStats(&stats) debug.ReadGCStats(&stats)
@ -1321,15 +1321,6 @@ func isonHandler(server *Server, client *Client, msg ircmsg.Message, rb *Respons
return false return false
} }
// ISUPPORT
func isupportHandler(server *Server, client *Client, msg ircmsg.Message, rb *ResponseBuffer) bool {
server.RplISupport(client, rb)
if !client.registered {
rb.session.isupportSentPrereg = true
}
return false
}
// JOIN <channel>{,<channel>} [<key>{,<key>}] // JOIN <channel>{,<channel>} [<key>{,<key>}]
func joinHandler(server *Server, client *Client, msg ircmsg.Message, rb *ResponseBuffer) bool { func joinHandler(server *Server, client *Client, msg ircmsg.Message, rb *ResponseBuffer) bool {
// #1417: allow `JOIN 0` with a confirmation code // #1417: allow `JOIN 0` with a confirmation code
@ -1637,7 +1628,7 @@ func klineHandler(server *Server, client *Client, msg ircmsg.Message, rb *Respon
// get comment(s) // get comment(s)
reason, operReason := getReasonsFromParams(msg.Params, currentArg) reason, operReason := getReasonsFromParams(msg.Params, currentArg)
err = server.klines.AddMask(mask, duration, false, reason, operReason, operName) err = server.klines.AddMask(mask, duration, reason, operReason, operName)
if err != nil { if err != nil {
rb.Notice(fmt.Sprintf(client.t("Could not successfully save new K-LINE: %s"), err.Error())) rb.Notice(fmt.Sprintf(client.t("Could not successfully save new K-LINE: %s"), err.Error()))
return false return false
@ -1852,14 +1843,14 @@ func cmodeHandler(server *Server, client *Client, msg ircmsg.Message, rb *Respon
if 1 < len(msg.Params) { if 1 < len(msg.Params) {
// parse out real mode changes // parse out real mode changes
params := msg.Params[1:] params := msg.Params[1:]
var unknown []rune var unknown map[rune]bool
changes, unknown = modes.ParseChannelModeChanges(params...) changes, unknown = modes.ParseChannelModeChanges(params...)
// alert for unknown mode changes // alert for unknown mode changes
for _, char := range unknown { for char := range unknown {
rb.Add(nil, server.name, ERR_UNKNOWNMODE, client.nick, string(char), client.t("is an unknown mode character to me")) rb.Add(nil, server.name, ERR_UNKNOWNMODE, client.nick, string(char), client.t("is an unknown mode character to me"))
} }
if len(unknown) != 0 && len(changes) == 0 { if len(unknown) == 1 && len(changes) == 0 {
return false return false
} }
} }
@ -1943,10 +1934,10 @@ func umodeHandler(server *Server, client *Client, msg ircmsg.Message, rb *Respon
changes, unknown := modes.ParseUserModeChanges(params...) changes, unknown := modes.ParseUserModeChanges(params...)
// alert for unknown mode changes // alert for unknown mode changes
for _, char := range unknown { for char := range unknown {
rb.Add(nil, server.name, ERR_UNKNOWNMODE, cDetails.nick, string(char), client.t("is an unknown mode character to me")) rb.Add(nil, server.name, ERR_UNKNOWNMODE, cDetails.nick, string(char), client.t("is an unknown mode character to me"))
} }
if len(unknown) != 0 && len(changes) == 0 { if len(unknown) == 1 && len(changes) == 0 {
return false return false
} }
@ -2193,7 +2184,6 @@ func validateLineLen(msgType history.ItemType, source, target, payload string) (
default: default:
return true return true
} }
limit -= len(target)
limit -= len(payload) limit -= len(payload)
return limit >= 0 return limit >= 0
} }
@ -2466,20 +2456,6 @@ func dispatchMessageToTarget(client *Client, tags map[string]string, histType hi
Tags: tags, Tags: tags,
} }
client.addHistoryItem(user, item, &details, &tDetails, config) client.addHistoryItem(user, item, &details, &tDetails, config)
if config.WebPush.Enabled && histType != history.Tagmsg && user.hasPushSubscriptions() && client != user {
pushMsgBytes, err := webpush.MakePushMessage(command, nickMaskString, accountName, tnick, message)
if err == nil {
user.dispatchPushMessage(pushMessage{
msg: pushMsgBytes,
urgency: webpush.UrgencyHigh,
cftarget: details.nickCasefolded,
time: message.Time,
})
} else {
server.logger.Error("internal", "can't serialize push message", err.Error())
}
}
} }
} }
@ -3064,18 +3040,6 @@ func markReadHandler(server *Server, client *Client, msg ircmsg.Message, rb *Res
session.Send(nil, server.name, "MARKREAD", unfoldedTarget, readTimestamp) session.Send(nil, server.name, "MARKREAD", unfoldedTarget, readTimestamp)
} }
} }
if client.clearClearablePushMessage(cftarget, readTime) {
line, err := webpush.MakePushLine(time.Now().UTC(), "*", server.name, "MARKREAD", unfoldedTarget, readTimestamp)
if err == nil {
client.dispatchPushMessage(pushMessage{
msg: line,
originatingEndpoint: rb.session.webPushEndpoint,
urgency: webpush.UrgencyNormal, // copied from soju
})
} else {
server.logger.Error("internal", "couldn't serialize MARKREAD push message", err.Error())
}
}
} }
return return
} }
@ -3617,88 +3581,6 @@ func webircHandler(server *Server, client *Client, msg ircmsg.Message, rb *Respo
return true return true
} }
// WEBPUSH <subcommand> <endpoint> [key]
func webpushHandler(server *Server, client *Client, msg ircmsg.Message, rb *ResponseBuffer) bool {
subcommand := strings.ToUpper(msg.Params[0])
config := server.Config()
if !config.WebPush.Enabled {
rb.Add(nil, server.name, "FAIL", "WEBPUSH", "FORBIDDEN", subcommand, client.t("Web push is disabled"))
return false
}
if client.Account() == "" {
rb.Add(nil, server.name, "FAIL", "WEBPUSH", "FORBIDDEN", subcommand, client.t("You must be logged in to receive push messages"))
return false
}
// XXX web push can be used to deanonymize a Tor hidden service, but we do not know
// whether an Ergo deployment with a Tor listener is intended to run as a hidden
// service, or as a single onion service where Tor is optional. Hidden service operators
// should disable web push. However, as a sanity check, disallow enabling it over a Tor
// connection:
if rb.session.isTor {
rb.Add(nil, server.name, "FAIL", "WEBPUSH", "FORBIDDEN", subcommand, client.t("Web push cannot be enabled over Tor"))
return false
}
endpoint := msg.Params[1]
if err := webpush.SanityCheckWebPushEndpoint(endpoint); err != nil {
rb.Add(nil, server.name, "FAIL", "WEBPUSH", "INVALID_PARAMS", subcommand, client.t("Invalid web push URL"))
}
switch subcommand {
case "REGISTER":
// allow web push enable even if they are not always-on (they just won't get push messages)
if len(msg.Params) < 3 {
rb.Add(nil, server.name, "FAIL", "WEBPUSH", "INVALID_PARAMS", subcommand, client.t("Insufficient parameters for WEBPUSH REGISTER"))
return false
}
keys, err := webpush.DecodeSubscriptionKeys(msg.Params[2])
if err != nil {
rb.Add(nil, server.name, "FAIL", "WEBPUSH", "INVALID_PARAMS", subcommand, client.t("Invalid subscription keys for WEBPUSH REGISTER"))
return false
}
if client.refreshPushSubscription(endpoint, keys) {
// success, don't send a test message
rb.Add(nil, server.name, "WEBPUSH", "REGISTER", msg.Params[1], msg.Params[2])
rb.session.webPushEndpoint = endpoint
return false
}
// send a test message
if err := client.sendPush(
endpoint,
keys,
webpush.UrgencyHigh,
webpush.PingMessage,
); err == nil {
if err := client.addPushSubscription(endpoint, keys); err == nil {
rb.Add(nil, server.name, "WEBPUSH", "REGISTER", msg.Params[1], msg.Params[2])
rb.session.webPushEndpoint = endpoint
if !client.AlwaysOn() {
rb.Add(nil, server.name, "WARN", "WEBPUSH", "PERSISTENCE_REQUIRED", client.t("You have enabled push notifications, but you will not receive them unless you become always-on. Try: /msg nickserv set always-on true"))
}
} else if err == errLimitExceeded {
rb.Add(nil, server.name, "FAIL", "WEBPUSH", "FORBIDDEN", "REGISTER", client.t("You have too many push subscriptions already"))
} else {
server.logger.Error("webpush", "Failed to add webpush subscription", err.Error())
rb.Add(nil, server.name, "FAIL", "WEBPUSH", "INTERNAL_ERROR", "REGISTER", client.t("An error occurred"))
}
} else {
server.logger.Debug("webpush", "WEBPUSH REGISTER failed validation", endpoint, err.Error())
rb.Add(nil, server.name, "FAIL", "WEBPUSH", "INVALID_PARAMS", "REGISTER", client.t("Test push message failed to send"))
}
case "UNREGISTER":
client.deletePushSubscription(endpoint, true)
rb.session.webPushEndpoint = ""
// this always succeeds
rb.Add(nil, server.name, "WEBPUSH", "UNREGISTER", endpoint)
}
return false
}
type whoxFields uint32 // bitset to hold the WHOX field values, 'a' through 'z' type whoxFields uint32 // bitset to hold the WHOX field values, 'a' through 'z'
func (fields whoxFields) Add(field rune) (result whoxFields) { func (fields whoxFields) Add(field rune) (result whoxFields) {

View File

@ -259,11 +259,6 @@ appropriate channel privs.`,
text: `ISON <nickname>{ <nickname>} text: `ISON <nickname>{ <nickname>}
Returns whether the given nicks exist on the network.`, Returns whether the given nicks exist on the network.`,
},
"isupport": {
text: `ISUPPORT
Returns RPL_ISUPPORT lines describing the server's capabilities.`,
}, },
"join": { "join": {
text: `JOIN <channel>{,<channel>} [<key>{,<key>}] text: `JOIN <channel>{,<channel>} [<key>{,<key>}]
@ -610,11 +605,6 @@ ircv3.net/specs/extensions/webirc.html
the connection from the client to the gateway, such as: the connection from the client to the gateway, such as:
- tls: this flag indicates that the client->gateway connection is secure`, - tls: this flag indicates that the client->gateway connection is secure`,
},
"webpush": {
text: `WEBPUSH <subcommand> [arguments]
Configures web push settings. Not for direct use by end users.`,
}, },
"who": { "who": {
text: `WHO <name> [o] text: `WHO <name> [o]

View File

@ -177,7 +177,7 @@ func histservExportHandler(service *ircService, server *Server, client *Client,
} }
func histservExportAndNotify(service *ircService, server *Server, cfAccount string, outfile *os.File, filename, alertNick string) { func histservExportAndNotify(service *ircService, server *Server, cfAccount string, outfile *os.File, filename, alertNick string) {
defer server.HandlePanic(nil) defer server.HandlePanic()
defer outfile.Close() defer outfile.Close()
writer := bufio.NewWriter(outfile) writer := bufio.NewWriter(outfile)

View File

@ -17,7 +17,6 @@ import (
"github.com/ergochat/ergo/irc/datastore" "github.com/ergochat/ergo/irc/datastore"
"github.com/ergochat/ergo/irc/modes" "github.com/ergochat/ergo/irc/modes"
"github.com/ergochat/ergo/irc/utils" "github.com/ergochat/ergo/irc/utils"
"github.com/ergochat/ergo/irc/webpush"
) )
const ( const (
@ -25,7 +24,7 @@ const (
// XXX instead of referencing, e.g., keyAccountExists, we should write in the string literal // XXX instead of referencing, e.g., keyAccountExists, we should write in the string literal
// (to ensure that no matter what code changes happen elsewhere, we're still producing a // (to ensure that no matter what code changes happen elsewhere, we're still producing a
// db of the hardcoded version) // db of the hardcoded version)
importDBSchemaVersion = 24 importDBSchemaVersion = 23
) )
type userImport struct { type userImport struct {
@ -83,15 +82,6 @@ func doImportDBGeneric(config *Config, dbImport databaseImport, credsType Creden
tx.Set(keySchemaVersion, strconv.Itoa(importDBSchemaVersion), nil) tx.Set(keySchemaVersion, strconv.Itoa(importDBSchemaVersion), nil)
tx.Set(keyCloakSecret, utils.GenerateSecretKey(), nil) tx.Set(keyCloakSecret, utils.GenerateSecretKey(), nil)
vapidKeys, err := webpush.GenerateVAPIDKeys()
if err != nil {
return err
}
vapidKeysJSON, err := json.Marshal(vapidKeys)
if err != nil {
return err
}
tx.Set(keyVAPIDKeys, string(vapidKeysJSON), nil)
cfUsernames := make(utils.HashSet[string]) cfUsernames := make(utils.HashSet[string])
skeletonToUsername := make(map[string]string) skeletonToUsername := make(map[string]string)

View File

@ -5,12 +5,12 @@ package isupport
import ( import (
"fmt" "fmt"
"slices" "sort"
"strings" "strings"
) )
const ( const (
maxPayloadLength = 380 maxLastArgLength = 400
/* Modern: "As the maximum number of message parameters to any reply is 15, /* Modern: "As the maximum number of message parameters to any reply is 15,
the maximum number of RPL_ISUPPORT tokens that can be advertised is 13." the maximum number of RPL_ISUPPORT tokens that can be advertised is 13."
@ -47,12 +47,6 @@ func (il *List) AddNoValue(name string) {
il.Tokens[name] = "" il.Tokens[name] = ""
} }
// Contains returns whether the list already contains a token
func (il *List) Contains(name string) bool {
_, ok := il.Tokens[name]
return ok
}
// getTokenString gets the appropriate string for a token+value. // getTokenString gets the appropriate string for a token+value.
func getTokenString(name string, value string) string { func getTokenString(name string, value string) string {
if len(value) == 0 { if len(value) == 0 {
@ -64,7 +58,7 @@ func getTokenString(name string, value string) string {
// GetDifference returns the difference between two token lists. // GetDifference returns the difference between two token lists.
func (il *List) GetDifference(newil *List) [][]string { func (il *List) GetDifference(newil *List) [][]string {
var outTokens []string var outTokens sort.StringSlice
// append removed tokens // append removed tokens
for name := range il.Tokens { for name := range il.Tokens {
@ -90,7 +84,7 @@ func (il *List) GetDifference(newil *List) [][]string {
outTokens = append(outTokens, token) outTokens = append(outTokens, token)
} }
slices.Sort(outTokens) sort.Sort(outTokens)
// create output list // create output list
replies := make([][]string, 0) replies := make([][]string, 0)
@ -98,7 +92,7 @@ func (il *List) GetDifference(newil *List) [][]string {
var cache []string // Token list cache var cache []string // Token list cache
for _, token := range outTokens { for _, token := range outTokens {
if len(token)+length <= maxPayloadLength { if len(token)+length <= maxLastArgLength {
// account for the space separating tokens // account for the space separating tokens
if len(cache) > 0 { if len(cache) > 0 {
length++ length++
@ -107,7 +101,7 @@ func (il *List) GetDifference(newil *List) [][]string {
length += len(token) length += len(token)
} }
if len(cache) == maxParameters || len(token)+length >= maxPayloadLength { if len(cache) == maxParameters || len(token)+length >= maxLastArgLength {
replies = append(replies, cache) replies = append(replies, cache)
cache = make([]string, 0) cache = make([]string, 0)
length = 0 length = 0
@ -121,54 +115,40 @@ func (il *List) GetDifference(newil *List) [][]string {
return replies return replies
} }
func validateToken(token string) error {
if len(token) == 0 || token[0] == ':' || strings.Contains(token, " ") {
return fmt.Errorf("bad isupport token (cannot be sent as IRC parameter): `%s`", token)
}
if strings.ContainsAny(token, "\n\r\x00") {
return fmt.Errorf("bad isupport token (contains forbidden octets)")
}
// technically a token can be maxPayloadLength if it occurs alone,
// but fail it just to be safe
if len(token) >= maxPayloadLength {
return fmt.Errorf("bad isupport token (too long): `%s`", token)
}
return nil
}
// RegenerateCachedReply regenerates the cached RPL_ISUPPORT reply // RegenerateCachedReply regenerates the cached RPL_ISUPPORT reply
func (il *List) RegenerateCachedReply() (err error) { func (il *List) RegenerateCachedReply() (err error) {
var tokens []string il.CachedReply = make([][]string, 0)
for name, value := range il.Tokens { var length int // Length of the current cache
token := getTokenString(name, value) var cache []string // Token list cache
if tokenErr := validateToken(token); tokenErr == nil {
tokens = append(tokens, token)
} else {
err = tokenErr
}
}
// make sure we get a sorted list of tokens, needed for tests and looks nice // make sure we get a sorted list of tokens, needed for tests and looks nice
slices.Sort(tokens) var tokens sort.StringSlice
for name := range il.Tokens {
tokens = append(tokens, name)
}
sort.Sort(tokens)
var cache []string // Tokens in current line for _, name := range tokens {
var length int // Length of the current line token := getTokenString(name, il.Tokens[name])
if token[0] == ':' || strings.Contains(token, " ") {
for _, token := range tokens { err = fmt.Errorf("bad isupport token (cannot contain spaces or start with :): %s", token)
// account for the space separating tokens continue
if len(cache) == maxParameters || (len(token)+1)+length > maxPayloadLength {
il.CachedReply = append(il.CachedReply, cache)
cache = nil
length = 0
} }
if len(token)+length <= maxLastArgLength {
// account for the space separating tokens
if len(cache) > 0 { if len(cache) > 0 {
length++ length++
} }
length += len(token)
cache = append(cache, token) cache = append(cache, token)
length += len(token)
}
if len(cache) == maxParameters || len(token)+length >= maxLastArgLength {
il.CachedReply = append(il.CachedReply, cache)
cache = make([]string, 0)
length = 0
}
} }
if len(cache) > 0 { if len(cache) > 0 {

View File

@ -37,7 +37,7 @@ func TestISUPPORT(t *testing.T) {
} }
if !reflect.DeepEqual(tListLong.CachedReply, longReplies) { if !reflect.DeepEqual(tListLong.CachedReply, longReplies) {
t.Errorf("Multiple output replies did not match, got [%v]", tListLong.CachedReply) t.Errorf("Multiple output replies did not match, got [%v]", longReplies)
} }
// create first list // create first list

View File

@ -66,12 +66,11 @@ func (km *KLineManager) AllBans() map[string]IPBanInfo {
} }
// AddMask adds to the blocked list. // AddMask adds to the blocked list.
func (km *KLineManager) AddMask(mask string, duration time.Duration, requireSASL bool, reason, operReason, operName string) error { func (km *KLineManager) AddMask(mask string, duration time.Duration, reason, operReason, operName string) error {
km.persistenceMutex.Lock() km.persistenceMutex.Lock()
defer km.persistenceMutex.Unlock() defer km.persistenceMutex.Unlock()
info := IPBanInfo{ info := IPBanInfo{
RequireSASL: requireSASL,
Reason: reason, Reason: reason,
OperReason: operReason, OperReason: operReason,
OperName: operName, OperName: operName,
@ -209,14 +208,13 @@ func (km *KLineManager) CheckMasks(masks ...string) (isBanned bool, info IPBanIn
for _, entryInfo := range km.entries { for _, entryInfo := range km.entries {
for _, mask := range masks { for _, mask := range masks {
if entryInfo.Matcher.MatchString(mask) { if entryInfo.Matcher.MatchString(mask) {
// apply the most stringent ban (unconditional bans override require-sasl) return true, entryInfo.Info
if !isBanned || info.RequireSASL {
isBanned, info = true, entryInfo.Info
}
} }
} }
} }
// no matches!
isBanned = false
return return
} }

View File

@ -116,7 +116,7 @@ func ApplyUserModeChanges(client *Client, changes modes.ModeChanges, force bool,
} }
// parseDefaultModes uses the provided mode change parser to parse the rawModes. // parseDefaultModes uses the provided mode change parser to parse the rawModes.
func parseDefaultModes(rawModes string, parser func(params ...string) (modes.ModeChanges, []rune)) modes.Modes { func parseDefaultModes(rawModes string, parser func(params ...string) (modes.ModeChanges, map[rune]bool)) modes.Modes {
modeChangeStrings := strings.Fields(rawModes) modeChangeStrings := strings.Fields(rawModes)
modeChanges, _ := parser(modeChangeStrings...) modeChanges, _ := parser(modeChangeStrings...)
defaultModes := make(modes.Modes, 0) defaultModes := make(modes.Modes, 0)
@ -266,9 +266,9 @@ func (channel *Channel) ApplyChannelModeChanges(client *Client, isSamode bool, c
case modes.Add: case modes.Add:
ch := client.server.channels.Get(change.Arg) ch := client.server.channels.Get(change.Arg)
if ch == nil { if ch == nil {
rb.Add(nil, client.server.name, ERR_INVALIDMODEPARAM, details.nick, chname, string(change.Mode), utils.SafeErrorParam(change.Arg), client.t("No such channel")) rb.Add(nil, client.server.name, ERR_INVALIDMODEPARAM, details.nick, chname, string(change.Mode), utils.SafeErrorParam(change.Arg), fmt.Sprintf(client.t("No such channel")))
} else if ch == channel { } else if ch == channel {
rb.Add(nil, client.server.name, ERR_INVALIDMODEPARAM, details.nick, chname, string(change.Mode), utils.SafeErrorParam(change.Arg), client.t("You can't forward a channel to itself")) rb.Add(nil, client.server.name, ERR_INVALIDMODEPARAM, details.nick, chname, string(change.Mode), utils.SafeErrorParam(change.Arg), fmt.Sprintf(client.t("You can't forward a channel to itself")))
} else { } else {
if isSamode || ch.ClientIsAtLeast(client, modes.ChannelOperator) { if isSamode || ch.ClientIsAtLeast(client, modes.ChannelOperator) {
change.Arg = ch.Name() change.Arg = ch.Name()

View File

@ -7,7 +7,7 @@ package modes
import ( import (
"fmt" "fmt"
"slices" "sort"
"strings" "strings"
"github.com/ergochat/ergo/irc/utils" "github.com/ergochat/ergo/irc/utils"
@ -189,7 +189,10 @@ func GetLowestChannelModePrefix(prefixes string) (lowest Mode) {
// //
// ParseUserModeChanges returns the valid changes, and the list of unknown chars. // ParseUserModeChanges returns the valid changes, and the list of unknown chars.
func ParseUserModeChanges(params ...string) (changes ModeChanges, unknown []rune) { func ParseUserModeChanges(params ...string) (ModeChanges, map[rune]bool) {
changes := make(ModeChanges, 0)
unknown := make(map[rune]bool)
op := List op := List
if 0 < len(params) { if 0 < len(params) {
@ -216,19 +219,30 @@ func ParseUserModeChanges(params ...string) (changes ModeChanges, unknown []rune
} }
} }
if slices.Contains(SupportedUserModes, Mode(mode)) { var isKnown bool
changes = append(changes, change) for _, supportedMode := range SupportedUserModes {
} else { if rune(supportedMode) == mode {
unknown = append(unknown, mode) isKnown = true
break
} }
} }
if !isKnown {
unknown[mode] = true
continue
}
changes = append(changes, change)
}
} }
return changes, unknown return changes, unknown
} }
// ParseChannelModeChanges returns the valid changes, and the list of unknown chars. // ParseChannelModeChanges returns the valid changes, and the list of unknown chars.
func ParseChannelModeChanges(params ...string) (changes ModeChanges, unknown []rune) { func ParseChannelModeChanges(params ...string) (ModeChanges, map[rune]bool) {
changes := make(ModeChanges, 0)
unknown := make(map[rune]bool)
op := List op := List
if 0 < len(params) { if 0 < len(params) {
@ -290,12 +304,26 @@ func ParseChannelModeChanges(params ...string) (changes ModeChanges, unknown []r
} }
} }
if slices.Contains(SupportedChannelModes, Mode(mode)) || slices.Contains(ChannelUserModes, Mode(mode)) { var isKnown bool
changes = append(changes, change) for _, supportedMode := range SupportedChannelModes {
} else { if rune(supportedMode) == mode {
unknown = append(unknown, mode) isKnown = true
break
} }
} }
for _, supportedMode := range ChannelUserModes {
if rune(supportedMode) == mode {
isKnown = true
break
}
}
if !isKnown {
unknown[mode] = true
continue
}
changes = append(changes, change)
}
} }
return changes, unknown return changes, unknown
@ -400,37 +428,33 @@ func (set *ModeSet) HighestChannelUserMode() (result Mode) {
return return
} }
var ( type ByCodepoint Modes
rplMyInfo1, rplMyInfo2, rplMyInfo3, chanmodesToken string
)
func init() { func (a ByCodepoint) Len() int { return len(a) }
initRplMyInfo() func (a ByCodepoint) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
initChanmodesToken() func (a ByCodepoint) Less(i, j int) bool { return a[i] < a[j] }
}
func initRplMyInfo() { func RplMyInfo() (param1, param2, param3 string) {
// initialize constant strings published in initial numerics
userModes := make(Modes, len(SupportedUserModes), len(SupportedUserModes)+1) userModes := make(Modes, len(SupportedUserModes), len(SupportedUserModes)+1)
copy(userModes, SupportedUserModes) copy(userModes, SupportedUserModes)
// TLS is not in SupportedUserModes because it can't be modified // TLS is not in SupportedUserModes because it can't be modified
userModes = append(userModes, TLS) userModes = append(userModes, TLS)
slices.Sort(userModes) sort.Sort(ByCodepoint(userModes))
channelModes := make(Modes, len(SupportedChannelModes)+len(ChannelUserModes)) channelModes := make(Modes, len(SupportedChannelModes)+len(ChannelUserModes))
copy(channelModes, SupportedChannelModes) copy(channelModes, SupportedChannelModes)
copy(channelModes[len(SupportedChannelModes):], ChannelUserModes) copy(channelModes[len(SupportedChannelModes):], ChannelUserModes)
slices.Sort(channelModes) sort.Sort(ByCodepoint(channelModes))
// XXX enumerate these by hand, i can't see any way to DRY this // XXX enumerate these by hand, i can't see any way to DRY this
channelParametrizedModes := Modes{BanMask, ExceptMask, InviteMask, Key, UserLimit, Forward} channelParametrizedModes := Modes{BanMask, ExceptMask, InviteMask, Key, UserLimit, Forward}
channelParametrizedModes = append(channelParametrizedModes, ChannelUserModes...) channelParametrizedModes = append(channelParametrizedModes, ChannelUserModes...)
slices.Sort(channelParametrizedModes) sort.Sort(ByCodepoint(channelParametrizedModes))
rplMyInfo1, rplMyInfo2, rplMyInfo3 = userModes.String(), channelModes.String(), channelParametrizedModes.String() return userModes.String(), channelModes.String(), channelParametrizedModes.String()
} }
func initChanmodesToken() { func ChanmodesToken() (result string) {
// https://modern.ircdocs.horse#chanmodes-parameter // https://modern.ircdocs.horse#chanmodes-parameter
// type A: listable modes with parameters // type A: listable modes with parameters
A := Modes{BanMask, ExceptMask, InviteMask} A := Modes{BanMask, ExceptMask, InviteMask}
@ -441,18 +465,10 @@ func initChanmodesToken() {
// type D: modes without parameters // type D: modes without parameters
D := Modes{InviteOnly, Moderated, NoOutside, OpOnlyTopic, ChanRoleplaying, Secret, NoCTCP, RegisteredOnly, RegisteredOnlySpeak, Auditorium, OpModerated} D := Modes{InviteOnly, Moderated, NoOutside, OpOnlyTopic, ChanRoleplaying, Secret, NoCTCP, RegisteredOnly, RegisteredOnlySpeak, Auditorium, OpModerated}
slices.Sort(A) sort.Sort(ByCodepoint(A))
slices.Sort(B) sort.Sort(ByCodepoint(B))
slices.Sort(C) sort.Sort(ByCodepoint(C))
slices.Sort(D) sort.Sort(ByCodepoint(D))
chanmodesToken = fmt.Sprintf("%s,%s,%s,%s", A.String(), B.String(), C.String(), D.String()) return fmt.Sprintf("%s,%s,%s,%s", A.String(), B.String(), C.String(), D.String())
}
func RplMyInfo() (param1, param2, param3 string) {
return rplMyInfo1, rplMyInfo2, rplMyInfo3
}
func ChanmodesToken() (result string) {
return chanmodesToken
} }

View File

@ -5,7 +5,6 @@ package modes
import ( import (
"reflect" "reflect"
"slices"
"strings" "strings"
"testing" "testing"
) )
@ -17,7 +16,7 @@ func assertEqual(supplied, expected interface{}, t *testing.T) {
} }
func TestParseUserModeChanges(t *testing.T) { func TestParseUserModeChanges(t *testing.T) {
var emptyUnknown []rune emptyUnknown := make(map[rune]bool)
changes, unknown := ParseUserModeChanges("+i") changes, unknown := ParseUserModeChanges("+i")
assertEqual(unknown, emptyUnknown, t) assertEqual(unknown, emptyUnknown, t)
assertEqual(changes, ModeChanges{ModeChange{Op: Add, Mode: Invisible}}, t) assertEqual(changes, ModeChanges{ModeChange{Op: Add, Mode: Invisible}}, t)
@ -49,11 +48,10 @@ func TestParseUserModeChanges(t *testing.T) {
} }
func TestIssue874(t *testing.T) { func TestIssue874(t *testing.T) {
var emptyModeChanges ModeChanges emptyUnknown := make(map[rune]bool)
var emptyUnknown []rune
modes, unknown := ParseChannelModeChanges("+k") modes, unknown := ParseChannelModeChanges("+k")
assertEqual(unknown, emptyUnknown, t) assertEqual(unknown, emptyUnknown, t)
assertEqual(modes, emptyModeChanges, t) assertEqual(modes, ModeChanges{}, t)
modes, unknown = ParseChannelModeChanges("+k", "beer") modes, unknown = ParseChannelModeChanges("+k", "beer")
assertEqual(unknown, emptyUnknown, t) assertEqual(unknown, emptyUnknown, t)
@ -153,7 +151,7 @@ func TestParseChannelModeChanges(t *testing.T) {
} }
modes, unknown = ParseChannelModeChanges("+tx") modes, unknown = ParseChannelModeChanges("+tx")
if len(unknown) != 1 || !slices.Contains(unknown, 'x') { if len(unknown) != 1 || !unknown['x'] {
t.Errorf("expected that x is an unknown mode, instead: %v", unknown) t.Errorf("expected that x is an unknown mode, instead: %v", unknown)
} }
expected = ModeChange{ expected = ModeChange{

View File

@ -961,7 +961,7 @@ func (mysql *MySQL) listCorrespondentsInternal(ctx context.Context, target strin
} }
results = append(results, history.TargetListing{ results = append(results, history.TargetListing{
CfName: correspondent, CfName: correspondent,
Time: time.Unix(0, nanotime).UTC(), Time: time.Unix(0, nanotime),
}) })
} }
@ -1014,7 +1014,7 @@ func (mysql *MySQL) ListChannels(cfchannels []string) (results []history.TargetL
} }
results = append(results, history.TargetListing{ results = append(results, history.TargetListing{
CfName: target, CfName: target,
Time: time.Unix(0, nanotime).UTC(), Time: time.Unix(0, nanotime),
}) })
} }
return return

View File

@ -241,18 +241,6 @@ indicate an empty password, use * instead.`,
"password": { "password": {
aliasOf: "passwd", aliasOf: "passwd",
}, },
"push": {
handler: nsPushHandler,
help: `Syntax: $bPUSH LIST$b
Or: $bPUSH DELETE <endpoint>$b
PUSH lets you view or modify the state of your push subscriptions.`,
helpShort: `$bPUSH$b lets you view or modify your push subscriptions.`,
enabled: func(config *Config) bool {
return config.WebPush.Enabled
},
minParams: 1,
},
"get": { "get": {
handler: nsGetHandler, handler: nsGetHandler,
help: `Syntax: $bGET <setting>$b help: `Syntax: $bGET <setting>$b
@ -1324,9 +1312,6 @@ func nsClientsListHandler(service *ircService, server *Server, client *Client, p
if session.deviceID != "" { if session.deviceID != "" {
service.Notice(rb, fmt.Sprintf(client.t("Device ID: %s"), session.deviceID)) service.Notice(rb, fmt.Sprintf(client.t("Device ID: %s"), session.deviceID))
} }
if hasPrivs {
service.Notice(rb, fmt.Sprintf(client.t("Debug log ID: %s"), session.connID))
}
service.Notice(rb, fmt.Sprintf(client.t("IP address: %s"), session.ip.String())) service.Notice(rb, fmt.Sprintf(client.t("IP address: %s"), session.ip.String()))
service.Notice(rb, fmt.Sprintf(client.t("Hostname: %s"), session.hostname)) service.Notice(rb, fmt.Sprintf(client.t("Hostname: %s"), session.hostname))
if hasPrivs { if hasPrivs {
@ -1671,48 +1656,3 @@ func nsRenameHandler(service *ircService, server *Server, client *Client, comman
} }
} }
} }
func nsPushHandler(service *ircService, server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
switch strings.ToUpper(params[0]) {
case "LIST":
target := client
if len(params) > 1 && client.HasRoleCapabs("accreg") {
target = server.clients.Get(params[1])
if target == nil {
service.Notice(rb, client.t("No such nick"))
return
}
}
subscriptions := target.getPushSubscriptions(true)
service.Notice(rb, fmt.Sprintf(client.t("Nickname %[1]s has %[2]d push subscription(s)"), target.Nick(), len(subscriptions)))
for i, subscription := range subscriptions {
service.Notice(rb, fmt.Sprintf(client.t("Subscription %d:"), i+1))
service.Notice(rb, fmt.Sprintf(client.t("Endpoint: %s"), subscription.Endpoint))
service.Notice(rb, fmt.Sprintf(client.t("Last renewal: %s"), subscription.LastRefresh.Format(time.RFC1123)))
service.Notice(rb, fmt.Sprintf(client.t("Last push: %s"), subscription.LastSuccess.Format(time.RFC1123)))
}
case "DELETE":
if len(params) < 2 {
service.Notice(rb, client.t("Invalid parameters"))
return
}
target := client
endpoint := params[1]
if len(params) > 2 && client.HasRoleCapabs("accreg") {
target = server.clients.Get(params[1])
if target == nil {
service.Notice(rb, client.t("No such nick"))
return
}
endpoint = params[2]
}
changed := target.deletePushSubscription(endpoint, true)
if changed {
service.Notice(rb, client.t("Successfully deleted push subscription"))
} else {
service.Notice(rb, client.t("Push subscription not found"))
}
default:
service.Notice(rb, client.t("Invalid parameters"))
}
}

View File

@ -6,19 +6,14 @@ package irc
import ( import (
"fmt" "fmt"
"runtime/debug" "runtime/debug"
"time"
) )
// HandlePanic is a general-purpose panic handler for ad-hoc goroutines. // HandlePanic is a general-purpose panic handler for ad-hoc goroutines.
// Because of the semantics of `recover`, it must be called directly // Because of the semantics of `recover`, it must be called directly
// from the routine on whose call stack the panic would occur, with `defer`, // from the routine on whose call stack the panic would occur, with `defer`,
// e.g. `defer server.HandlePanic()` // e.g. `defer server.HandlePanic()`
func (server *Server) HandlePanic(restartable func()) { func (server *Server) HandlePanic() {
if r := recover(); r != nil { if r := recover(); r != nil {
server.logger.Error("internal", fmt.Sprintf("Panic encountered: %v\n%s", r, debug.Stack())) server.logger.Error("internal", fmt.Sprintf("Panic encountered: %v\n%s", r, debug.Stack()))
if restartable != nil {
time.Sleep(time.Second)
go restartable()
}
} }
} }

View File

@ -3,11 +3,8 @@
package passwd package passwd
import ( import "golang.org/x/crypto/bcrypt"
"crypto/sha3" import "golang.org/x/crypto/sha3"
"golang.org/x/crypto/bcrypt"
)
const ( const (
MinCost = bcrypt.MinCost MinCost = bcrypt.MinCost

View File

@ -13,8 +13,8 @@ import (
) )
const ( const (
defaultNPCNickMask = "*%s*!%s@npc.fakeuser.invalid" npcNickMask = "*%s*!%s@npc.fakeuser.invalid"
defaultSceneNickMask = "=Scene=!%s@npc.fakeuser.invalid" sceneNickMask = "=Scene=!%s@npc.fakeuser.invalid"
) )
func sendRoleplayMessage(server *Server, client *Client, source string, targetString string, isScene, isAction bool, messageParts []string, rb *ResponseBuffer) { func sendRoleplayMessage(server *Server, client *Client, source string, targetString string, isScene, isAction bool, messageParts []string, rb *ResponseBuffer) {
@ -30,7 +30,7 @@ func sendRoleplayMessage(server *Server, client *Client, source string, targetSt
var sourceMask string var sourceMask string
if isScene { if isScene {
sourceMask = fmt.Sprintf(server.Config().Roleplay.SceneNickMask, client.Nick()) sourceMask = fmt.Sprintf(sceneNickMask, client.Nick())
} else { } else {
cfSource, cfSourceErr := CasefoldName(source) cfSource, cfSourceErr := CasefoldName(source)
skelSource, skelErr := Skeleton(source) skelSource, skelErr := Skeleton(source)
@ -39,7 +39,7 @@ func sendRoleplayMessage(server *Server, client *Client, source string, targetSt
rb.Add(nil, client.server.name, ERR_CANNOTSENDRP, targetString, client.t("Invalid roleplay name")) rb.Add(nil, client.server.name, ERR_CANNOTSENDRP, targetString, client.t("Invalid roleplay name"))
return return
} }
sourceMask = fmt.Sprintf(server.Config().Roleplay.NPCNickMask, source, client.Nick()) sourceMask = fmt.Sprintf(npcNickMask, source, client.Nick())
} }
// block attempts to send CTCP messages to Tor clients // block attempts to send CTCP messages to Tor clients

View File

@ -36,12 +36,10 @@ import (
"github.com/ergochat/ergo/irc/mysql" "github.com/ergochat/ergo/irc/mysql"
"github.com/ergochat/ergo/irc/sno" "github.com/ergochat/ergo/irc/sno"
"github.com/ergochat/ergo/irc/utils" "github.com/ergochat/ergo/irc/utils"
"github.com/ergochat/ergo/irc/webpush"
) )
const ( const (
alwaysOnMaintenanceInterval = 30 * time.Minute alwaysOnMaintenanceInterval = 30 * time.Minute
pushMaintenanceInterval = 24 * time.Hour
) )
var ( var (
@ -63,8 +61,6 @@ var (
chanTypes = "#" chanTypes = "#"
throttleMessage = "You have attempted to connect too many times within a short duration. Wait a while, and you will be able to connect." throttleMessage = "You have attempted to connect too many times within a short duration. Wait a while, and you will be able to connect."
httpVerbs = utils.SetLiteral("CONNECT", "DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT", "TRACE")
) )
// Server is the main Oragono server. // Server is the main Oragono server.
@ -99,13 +95,7 @@ type Server struct {
stats Stats stats Stats
semaphores ServerSemaphores semaphores ServerSemaphores
flock flock.Flocker flock flock.Flocker
connIDCounter atomic.Uint64
defcon atomic.Uint32 defcon atomic.Uint32
// API stuff
apiHandler http.Handler // always initialized
apiListener *utils.ReloadableListener
apiServer *http.Server // nil if API is not enabled
} }
// NewServer returns a new Oragono server. // NewServer returns a new Oragono server.
@ -132,8 +122,6 @@ func NewServer(config *Config, logger *logger.Manager) (*Server, error) {
server.monitorManager.Initialize() server.monitorManager.Initialize()
server.snomasks.Initialize() server.snomasks.Initialize()
server.apiHandler = newAPIHandler(server)
if err := server.applyConfig(config); err != nil { if err := server.applyConfig(config); err != nil {
return nil, err return nil, err
} }
@ -146,7 +134,6 @@ func NewServer(config *Config, logger *logger.Manager) (*Server, error) {
} }
time.AfterFunc(alwaysOnMaintenanceInterval, server.periodicAlwaysOnMaintenance) time.AfterFunc(alwaysOnMaintenanceInterval, server.periodicAlwaysOnMaintenance)
time.AfterFunc(pushMaintenanceInterval, server.periodicPushMaintenance)
return server, nil return server, nil
} }
@ -279,7 +266,7 @@ func (server *Server) periodicAlwaysOnMaintenance() {
time.AfterFunc(alwaysOnMaintenanceInterval, server.periodicAlwaysOnMaintenance) time.AfterFunc(alwaysOnMaintenanceInterval, server.periodicAlwaysOnMaintenance)
}() }()
defer server.HandlePanic(nil) defer server.HandlePanic()
server.logger.Info("accounts", "Performing periodic always-on client checks") server.logger.Info("accounts", "Performing periodic always-on client checks")
server.performAlwaysOnMaintenance(true, true) server.performAlwaysOnMaintenance(true, true)
@ -303,47 +290,6 @@ func (server *Server) performAlwaysOnMaintenance(checkExpiration, flushTimestamp
} }
} }
func (server *Server) periodicPushMaintenance() {
defer func() {
// reschedule whether or not there was a panic
time.AfterFunc(pushMaintenanceInterval, server.periodicPushMaintenance)
}()
defer server.HandlePanic(nil)
if server.Config().WebPush.Enabled {
server.logger.Info("webpush", "Performing periodic push subscription maintenance")
server.performPushMaintenance()
} // else: reschedule and check again later, the operator may enable it via rehash
}
func (server *Server) performPushMaintenance() {
expiration := time.Duration(server.Config().WebPush.Expiration)
for _, client := range server.clients.AllWithPushSubscriptions() {
for _, sub := range client.getPushSubscriptions(true) {
now := time.Now()
// require both periodic successful push messages and renewal of the subscription via WEBPUSH REGISTER
if now.Sub(sub.LastSuccess) > expiration || now.Sub(sub.LastRefresh) > expiration {
server.logger.Debug("webpush", "expiring push subscription for client", client.Nick(), sub.Endpoint)
client.deletePushSubscription(sub.Endpoint, false)
} else if now.Sub(sub.LastSuccess) > expiration/2 {
// we haven't pushed to them recently, make an attempt
server.logger.Debug("webpush", "pinging push subscription for client", client.Nick(), sub.Endpoint)
client.sendAndTrackPush(
sub.Endpoint, sub.Keys,
pushMessage{
msg: webpush.PingMessage,
urgency: webpush.UrgencyNormal,
},
false,
)
}
}
// persist all push subscriptions on the assumption that the timestamps have changed
client.Store(IncludePushSubscriptions)
}
}
// handles server.ip-check-script.exempt-sasl: // handles server.ip-check-script.exempt-sasl:
// run the ip check script at the end of the handshake, only for anonymous connections // run the ip check script at the end of the handshake, only for anonymous connections
func (server *Server) checkBanScriptExemptSASL(config *Config, session *Session) (outcome AuthOutcome) { func (server *Server) checkBanScriptExemptSASL(config *Config, session *Session) (outcome AuthOutcome) {
@ -356,7 +302,7 @@ func (server *Server) checkBanScriptExemptSASL(config *Config, session *Session)
return authSuccess return authSuccess
} }
if output.Result == IPBanned || output.Result == IPRequireSASL { if output.Result == IPBanned || output.Result == IPRequireSASL {
server.logger.Info("connect-ip", session.connID, "Rejecting unauthenticated client due to ip-check-script", ipaddr.String()) server.logger.Info("connect-ip", "Rejecting unauthenticated client due to ip-check-script", ipaddr.String())
if output.BanMessage != "" { if output.BanMessage != "" {
session.client.requireSASLMessage = output.BanMessage session.client.requireSASLMessage = output.BanMessage
} }
@ -437,10 +383,10 @@ func (server *Server) tryRegister(c *Client, session *Session) (exiting bool) {
// check KLINEs (#671: ignore KLINEs for loopback connections) // check KLINEs (#671: ignore KLINEs for loopback connections)
if !session.IP().IsLoopback() || session.isTor { if !session.IP().IsLoopback() || session.isTor {
isBanned, info := server.klines.CheckMasks(c.AllNickmasks()...) isBanned, info := server.klines.CheckMasks(c.AllNickmasks()...)
if isBanned && !(info.RequireSASL && session.client.Account() != "") { if isBanned {
c.setKlined() c.setKlined()
c.Quit(info.BanMessage(c.t("You are banned from this server (%s)")), nil) c.Quit(info.BanMessage(c.t("You are banned from this server (%s)")), nil)
server.logger.Info("connect", session.connID, "Client rejected by k-line", c.NickMaskString()) server.logger.Info("connect", "Client rejected by k-line", c.NickMaskString())
return true return true
} }
} }
@ -472,7 +418,7 @@ func (server *Server) playRegistrationBurst(session *Session) {
c := session.client c := session.client
// continue registration // continue registration
d := c.Details() d := c.Details()
server.logger.Info("connect", session.connID, fmt.Sprintf("Client connected [%s] [u:%s] [r:%s]", d.nick, d.username, d.realname)) server.logger.Info("connect", fmt.Sprintf("Client connected [%s] [u:%s] [r:%s]", d.nick, d.username, d.realname))
server.snomasks.Send(sno.LocalConnects, fmt.Sprintf("Client connected [%s] [u:%s] [h:%s] [ip:%s] [r:%s]", d.nick, d.username, session.rawHostname, session.IP().String(), d.realname)) server.snomasks.Send(sno.LocalConnects, fmt.Sprintf("Client connected [%s] [u:%s] [h:%s] [ip:%s] [r:%s]", d.nick, d.username, session.rawHostname, session.IP().String(), d.realname))
if d.account != "" { if d.account != "" {
server.sendLoginSnomask(d.nickMask, d.accountName) server.sendLoginSnomask(d.nickMask, d.accountName)
@ -488,9 +434,7 @@ func (server *Server) playRegistrationBurst(session *Session) {
session.Send(nil, server.name, RPL_MYINFO, d.nick, server.name, Ver, rplMyInfo1, rplMyInfo2, rplMyInfo3) session.Send(nil, server.name, RPL_MYINFO, d.nick, server.name, Ver, rplMyInfo1, rplMyInfo2, rplMyInfo3)
rb := NewResponseBuffer(session) rb := NewResponseBuffer(session)
if !(rb.session.capabilities.Has(caps.ExtendedISupport) && rb.session.isupportSentPrereg) {
server.RplISupport(c, rb) server.RplISupport(c, rb)
}
if d.account != "" && session.capabilities.Has(caps.Persistence) { if d.account != "" && session.capabilities.Has(caps.Persistence) {
reportPersistenceStatus(c, rb, false) reportPersistenceStatus(c, rb, false)
} }
@ -512,22 +456,15 @@ func (server *Server) playRegistrationBurst(session *Session) {
// RplISupport outputs our ISUPPORT lines to the client. This is used on connection and in VERSION responses. // RplISupport outputs our ISUPPORT lines to the client. This is used on connection and in VERSION responses.
func (server *Server) RplISupport(client *Client, rb *ResponseBuffer) { func (server *Server) RplISupport(client *Client, rb *ResponseBuffer) {
server.sendRplISupportLines(client, rb, server.Config().Server.isupport.CachedReply) translatedISupport := client.t("are supported by this server")
}
func (server *Server) sendRplISupportLines(client *Client, rb *ResponseBuffer, lines [][]string) {
if rb.session.capabilities.Has(caps.ExtendedISupport) {
batchID := rb.StartNestedBatch(caps.ExtendedISupportBatchType)
defer rb.EndNestedBatch(batchID)
}
finalText := "are supported by this server"
nick := client.Nick() nick := client.Nick()
for _, cachedTokenLine := range lines { config := server.Config()
for _, cachedTokenLine := range config.Server.isupport.CachedReply {
length := len(cachedTokenLine) + 2 length := len(cachedTokenLine) + 2
tokenline := make([]string, length) tokenline := make([]string, length)
tokenline[0] = nick tokenline[0] = nick
copy(tokenline[1:], cachedTokenLine) copy(tokenline[1:], cachedTokenLine)
tokenline[length-1] = finalText tokenline[length-1] = translatedISupport
rb.Add(nil, server.name, RPL_ISUPPORT, tokenline...) rb.Add(nil, server.name, RPL_ISUPPORT, tokenline...)
} }
} }
@ -642,7 +579,7 @@ func (client *Client) getWhoisOf(target *Client, hasPrivs bool, rb *ResponseBuff
// rehash reloads the config and applies the changes from the config file. // rehash reloads the config and applies the changes from the config file.
func (server *Server) rehash() error { func (server *Server) rehash() error {
// #1570; this needs its own panic handling because it can be invoked via SIGHUP // #1570; this needs its own panic handling because it can be invoked via SIGHUP
defer server.HandlePanic(nil) defer server.HandlePanic()
server.logger.Info("server", "Attempting rehash") server.logger.Info("server", "Attempting rehash")
@ -796,16 +733,6 @@ func (server *Server) applyConfig(config *Config) (err error) {
return fmt.Errorf("Could not load cloak secret: %w", err) return fmt.Errorf("Could not load cloak secret: %w", err)
} }
config.Server.Cloaks.SetSecret(cloakSecret) config.Server.Cloaks.SetSecret(cloakSecret)
// similarly bring the VAPID keys into the config, which requires regenerating the 005
if config.WebPush.Enabled {
config.WebPush.vapidKeys, err = LoadVAPIDKeys(server.dstore)
if err != nil {
return fmt.Errorf("Could not load VAPID keys: %w", err)
}
if err = config.generateISupport(); err != nil {
return fmt.Errorf("Could not regenerate cached 005 for VAPID: %w", err)
}
}
// activate the new config // activate the new config
server.config.Store(config) server.config.Store(config)
@ -848,8 +775,6 @@ func (server *Server) applyConfig(config *Config) (err error) {
server.setupPprofListener(config) server.setupPprofListener(config)
server.setupAPIListener(config)
// set RPL_ISUPPORT // set RPL_ISUPPORT
var newISupportReplies [][]string var newISupportReplies [][]string
if oldConfig != nil { if oldConfig != nil {
@ -869,19 +794,13 @@ func (server *Server) applyConfig(config *Config) (err error) {
} }
if !initial { if !initial {
// send 005 updates (somewhat rare) // push new info to all of our clients
if len(newISupportReplies) != 0 {
for _, sClient := range server.clients.AllClients() { for _, sClient := range server.clients.AllClients() {
for _, session := range sClient.Sessions() { for _, tokenline := range newISupportReplies {
rb := NewResponseBuffer(session) sClient.Send(nil, server.name, RPL_ISUPPORT, append([]string{sClient.nick}, tokenline...)...)
server.sendRplISupportLines(sClient, rb, newISupportReplies)
rb.Send(false)
}
}
} }
if sendRawOutputNotice { if sendRawOutputNotice {
for _, sClient := range server.clients.AllClients() {
sClient.Notice(sClient.t("This server is in debug mode and is logging all user I/O. If you do not wish for everything you send to be readable by the server owner(s), please disconnect.")) sClient.Notice(sClient.t("This server is in debug mode and is logging all user I/O. If you do not wish for everything you send to be readable by the server owner(s), please disconnect."))
} }
} }
@ -918,46 +837,6 @@ func (server *Server) setupPprofListener(config *Config) {
} }
} }
func (server *Server) setupAPIListener(config *Config) {
if server.apiServer != nil {
if !config.API.Enabled || (config.API.Listener != server.apiServer.Addr) {
server.logger.Info("server", "Stopping API listener", server.apiServer.Addr)
server.apiServer.Close()
server.apiListener = nil
server.apiServer = nil
}
}
if !config.API.Enabled {
return
}
listenerConfig := utils.ListenerConfig{
TLSConfig: config.API.tlsConfig,
}
if server.apiListener != nil {
server.apiListener.Reload(listenerConfig)
return
}
listener, err := net.Listen("tcp", config.API.Listener)
if err != nil {
server.logger.Error("server", "Couldn't create API listener", config.API.Listener, err.Error())
return
}
server.apiListener = utils.NewReloadableListener(listener, listenerConfig)
server.apiServer = &http.Server{
Addr: config.API.Listener, // just informational since we created the listener ourselves
Handler: server.apiHandler,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 16384,
}
go func(hs *http.Server, listener net.Listener) {
if err := hs.Serve(listener); err != nil {
server.logger.Error("server", "API listener failed", err.Error())
}
}(server.apiServer, server.apiListener)
server.logger.Info("server", "Started API listener", server.apiServer.Addr)
}
func (server *Server) loadDatastore(config *Config) error { func (server *Server) loadDatastore(config *Config) error {
// open the datastore and load server state for which it (rather than config) // open the datastore and load server state for which it (rather than config)
// is the source of truth // is the source of truth
@ -1230,16 +1109,6 @@ func (server *Server) UnfoldName(cfname string) (name string) {
return server.clients.UnfoldNick(cfname) return server.clients.UnfoldNick(cfname)
} }
// generateConnectionID generates a unique string identifier for an incoming connection.
// this identifier is only used for debug logging.
func (server *Server) generateConnectionID() string {
id := server.connIDCounter.Add(1)
// pad with leading zeroes to a minimum length of 5 hex digits. this enhances greppability;
// the identifier length will be 6 for the first 1048576 connections, which is less important
// but makes the log slightly easier to read
return fmt.Sprintf("s%05x", id)
}
// elistMatcher takes and matches ELIST conditions // elistMatcher takes and matches ELIST conditions
type elistMatcher struct { type elistMatcher struct {
MinClientsActive bool MinClientsActive bool

View File

@ -7,7 +7,7 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"log" "log"
"slices" "sort"
"strings" "strings"
"time" "time"
@ -223,6 +223,7 @@ func serviceRunCommand(service *ircService, server *Server, client *Client, cmd
return return
} }
server.logger.Debug("services", fmt.Sprintf("Client %s ran %s command %s", client.Nick(), service.Name, commandName))
if commandName == "help" { if commandName == "help" {
serviceHelpHandler(service, server, client, params, rb) serviceHelpHandler(service, server, client, params, rb)
} else { } else {
@ -250,7 +251,7 @@ func serviceHelpHandler(service *ircService, server *Server, client *Client, par
client.t("Here are the commands you can use:"), client.t("Here are the commands you can use:"),
}...) }...)
// show general help // show general help
var shownHelpLines []string var shownHelpLines sort.StringSlice
var disabledCommands bool var disabledCommands bool
for _, commandInfo := range service.Commands { for _, commandInfo := range service.Commands {
// skip commands user can't access // skip commands user can't access
@ -268,13 +269,13 @@ func serviceHelpHandler(service *ircService, server *Server, client *Client, par
shownHelpLines = append(shownHelpLines, " "+ircfmt.Unescape(client.t(commandInfo.helpShort))) shownHelpLines = append(shownHelpLines, " "+ircfmt.Unescape(client.t(commandInfo.helpShort)))
} }
// sort help lines
slices.Sort(shownHelpLines)
if disabledCommands { if disabledCommands {
shownHelpLines = append(shownHelpLines, " "+client.t("... and other commands which have been disabled")) shownHelpLines = append(shownHelpLines, " "+client.t("... and other commands which have been disabled"))
} }
// sort help lines
sort.Sort(shownHelpLines)
// push out help text // push out help text
for _, line := range helpBannerLines { for _, line := range helpBannerLines {
sendNotice(line) sendNotice(line)

View File

@ -233,7 +233,7 @@ func (c *Client) Auth(a Auth) error {
} }
resp64 := make([]byte, encoding.EncodedLen(len(resp))) resp64 := make([]byte, encoding.EncodedLen(len(resp)))
encoding.Encode(resp64, resp) encoding.Encode(resp64, resp)
code, msg64, err := c.cmd(0, "%s", strings.TrimSpace(fmt.Sprintf("AUTH %s %s", mech, resp64))) code, msg64, err := c.cmd(0, strings.TrimSpace(fmt.Sprintf("AUTH %s %s", mech, resp64)))
for err == nil { for err == nil {
var msg []byte var msg []byte
switch code { switch code {
@ -259,7 +259,7 @@ func (c *Client) Auth(a Auth) error {
} }
resp64 = make([]byte, encoding.EncodedLen(len(resp))) resp64 = make([]byte, encoding.EncodedLen(len(resp)))
encoding.Encode(resp64, resp) encoding.Encode(resp64, resp)
code, msg64, err = c.cmd(0, "%s", resp64) code, msg64, err = c.cmd(0, string(resp64))
} }
return err return err
} }

View File

@ -163,7 +163,7 @@ func ubanAddHandler(client *Client, target ubanTarget, params []string, rb *Resp
case ubanCIDR: case ubanCIDR:
err = ubanAddCIDR(client, target, duration, requireSASL, operReason, rb) err = ubanAddCIDR(client, target, duration, requireSASL, operReason, rb)
case ubanNickmask: case ubanNickmask:
err = ubanAddNickmask(client, target, duration, requireSASL, operReason, rb) err = ubanAddNickmask(client, target, duration, operReason, rb)
case ubanNick: case ubanNick:
err = ubanAddAccount(client, target, duration, operReason, rb) err = ubanAddAccount(client, target, duration, operReason, rb)
} }
@ -242,8 +242,8 @@ func ubanAddCIDR(client *Client, target ubanTarget, duration time.Duration, requ
return return
} }
func ubanAddNickmask(client *Client, target ubanTarget, duration time.Duration, requireSASL bool, operReason string, rb *ResponseBuffer) (err error) { func ubanAddNickmask(client *Client, target ubanTarget, duration time.Duration, operReason string, rb *ResponseBuffer) (err error) {
err = client.server.klines.AddMask(target.nickOrMask, duration, requireSASL, "", operReason, client.Oper().Name) err = client.server.klines.AddMask(target.nickOrMask, duration, "", operReason, client.Oper().Name)
if err == nil { if err == nil {
rb.Notice(fmt.Sprintf(client.t("Successfully added UBAN for %s"), target.nickOrMask)) rb.Notice(fmt.Sprintf(client.t("Successfully added UBAN for %s"), target.nickOrMask))
} else { } else {
@ -455,7 +455,7 @@ func ubanInfoNick(client *Client, target ubanTarget, rb *ResponseBuffer) {
rb.Notice(client.t("Warning: banning this IP or a network that contains it may affect other users. Use /UBAN INFO on the candidate IP or network for more information.")) rb.Notice(client.t("Warning: banning this IP or a network that contains it may affect other users. Use /UBAN INFO on the candidate IP or network for more information."))
} }
} else { } else {
rb.Notice(client.t("No client is currently using that nickname")) rb.Notice(fmt.Sprintf(client.t("No client is currently using that nickname")))
} }
account, err := client.server.accounts.LoadAccount(target.nickOrMask) account, err := client.server.accounts.LoadAccount(target.nickOrMask)

35
irc/utils/sync.go Normal file
View File

@ -0,0 +1,35 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package utils
import (
"sync"
"sync/atomic"
)
// Once is a fork of sync.Once to expose a Done() method.
type Once struct {
done uint32
m sync.Mutex
}
func (o *Once) Do(f func()) {
if atomic.LoadUint32(&o.done) == 0 {
o.doSlow(f)
}
}
func (o *Once) doSlow(f func()) {
o.m.Lock()
defer o.m.Unlock()
if o.done == 0 {
defer atomic.StoreUint32(&o.done, 1)
f()
}
}
func (o *Once) Done() bool {
return atomic.LoadUint32(&o.done) == 1
}

View File

@ -95,20 +95,6 @@ func (sm *SplitMessage) Is512() bool {
return sm.Split == nil return sm.Split == nil
} }
func (sm *SplitMessage) CombinedValue() string {
if sm.Split == nil {
return sm.Message
}
var buf strings.Builder
for i := range sm.Split {
if i != 0 && !sm.Split[i].Concat {
buf.WriteRune('\n')
}
buf.WriteString(sm.Split[i].Message)
}
return buf.String()
}
// TokenLineBuilder is a helper for building IRC lines composed of delimited tokens, // TokenLineBuilder is a helper for building IRC lines composed of delimited tokens,
// with a maximum line length. // with a maximum line length.
type TokenLineBuilder struct { type TokenLineBuilder struct {

View File

@ -66,15 +66,3 @@ func BenchmarkTokenLines(b *testing.B) {
tl.Lines() tl.Lines()
} }
} }
func TestCombinedValue(t *testing.T) {
var split = SplitMessage{
Split: []MessagePair{
{"hi", false},
{"hi", false},
{" again", true},
{"you", false},
},
}
assertEqual(split.CombinedValue(), "hi\nhi again\nyou", t)
}

View File

@ -1,15 +0,0 @@
package utils
import (
"time"
)
// ReadMarkerLessThanOrEqual compares times from the standpoint of
// draft/read-marker (the presentation format of which truncates the time
// to the millisecond). In future we might want to consider proactively rounding,
// instead of truncating, the time, but this has complex implications.
func ReadMarkerLessThanOrEqual(t1, t2 time.Time) bool {
t1 = t1.Truncate(time.Millisecond)
t2 = t2.Truncate(time.Millisecond)
return t1.Before(t2) || t1.Equal(t2)
}

View File

@ -7,7 +7,7 @@ import "fmt"
const ( const (
// SemVer is the semantic version of Ergo. // SemVer is the semantic version of Ergo.
SemVer = "2.16.0-unreleased" SemVer = "2.14.0-rc1"
) )
var ( var (

View File

@ -1,60 +0,0 @@
// Copyright (c) 2021-2024 Simon Ser <contact@emersion.fr>
// Originally released under the AGPLv3, relicensed to the Ergo project under the MIT license
package webpush
import (
"strings"
"unicode"
"unicode/utf8"
)
func isWordBoundary(r rune) bool {
switch r {
case '-', '_', '|': // inspired from weechat.look.highlight_regex
return false
default:
return !unicode.IsLetter(r) && !unicode.IsNumber(r)
}
}
func isURIPrefix(text string) bool {
if i := strings.LastIndexFunc(text, unicode.IsSpace); i >= 0 {
text = text[i:]
}
i := strings.Index(text, "://")
if i < 0 {
return false
}
// See RFC 3986 section 3
r, _ := utf8.DecodeLastRuneInString(text[:i])
switch r {
case '+', '-', '.':
return true
default:
return ('0' <= r && r <= '9') || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z')
}
}
func IsHighlight(text, nick string) bool {
if len(nick) == 0 {
return false
}
for {
i := strings.Index(text, nick)
if i < 0 {
return false
}
left, _ := utf8.DecodeLastRuneInString(text[:i])
right, _ := utf8.DecodeRuneInString(text[i+len(nick):])
if isWordBoundary(left) && isWordBoundary(right) && !isURIPrefix(text[:i]) {
return true
}
text = text[i+len(nick):]
}
}

View File

@ -1,66 +0,0 @@
// Copyright (c) 2024 Shivaram Lingamneni <slingamn@cs.stanford.edu>
// Released under the MIT license
// Some portions of this code are:
// Copyright (c) 2024 Simon Ser <contact@emersion.fr>
// Originally released under the AGPLv3, relicensed to the Ergo project under the MIT license
package webpush
import (
"errors"
"fmt"
"net"
"net/http"
"net/netip"
"net/url"
"syscall"
)
var (
errInternalIP = errors.New("dialing an internal IP is forbidden")
)
func SanityCheckWebPushEndpoint(endpoint string) error {
u, err := url.Parse(endpoint)
if err != nil {
return err
}
if u.Scheme != "https" {
return fmt.Errorf("scheme must be HTTPS")
}
return nil
}
// makeExternalOnlyClient builds an http.Client that can only connect
// to external IP addresses.
func makeExternalOnlyClient() *http.Client {
dialer := &net.Dialer{
Control: func(network, address string, c syscall.RawConn) error {
ip, _, err := net.SplitHostPort(address)
if err != nil {
return err
}
parsedIP, err := netip.ParseAddr(ip)
if err != nil {
return err
}
if isInternalIP(parsedIP) {
return errInternalIP
}
return nil
},
}
return &http.Client{
Transport: &http.Transport{
DialContext: dialer.DialContext,
},
}
}
func isInternalIP(ip netip.Addr) bool {
return ip.IsLoopback() || ip.IsMulticast() || ip.IsPrivate()
}

View File

@ -1,21 +0,0 @@
package webpush
import (
"errors"
"testing"
)
func TestExternalOnlyHTTPClient(t *testing.T) {
client := makeExternalOnlyClient()
for _, url := range []string{
"https://127.0.0.2/test",
"https://127.0.0.2:8201",
"https://127.0.0.2:8201/asdf",
} {
_, err := client.Get(url)
if err == nil || !errors.Is(err, errInternalIP) {
t.Errorf("%s was not forbidden as expected (got %v)", url, err)
}
}
}

View File

@ -1,148 +0,0 @@
// Copyright (c) 2024 Shivaram Lingamneni <slingamn@cs.stanford.edu>
// Released under the MIT license
// Some portions of this code are:
// Copyright (c) 2021-2024 Simon Ser <contact@emersion.fr>
// Originally released under the AGPLv3, relicensed to the Ergo project under the MIT license
package webpush
import (
"context"
"errors"
"fmt"
"net/http"
"time"
"github.com/ergochat/irc-go/ircmsg"
webpush "github.com/ergochat/webpush-go/v2"
"github.com/ergochat/ergo/irc/utils"
)
// alias some public types and names from webpush-go
type VAPIDKeys = webpush.VAPIDKeys
type Keys = webpush.Keys
var (
GenerateVAPIDKeys = webpush.GenerateVAPIDKeys
)
// Urgency is a uint8 representation of urgency to save a few
// bytes on channel sizes.
type Urgency uint8
const (
// UrgencyVeryLow requires device state: on power and Wi-Fi
UrgencyVeryLow Urgency = iota // "very-low"
// UrgencyLow requires device state: on either power or Wi-Fi
UrgencyLow // "low"
// UrgencyNormal excludes device state: low battery
UrgencyNormal // "normal"
// UrgencyHigh admits device state: low battery
UrgencyHigh // "high"
)
var (
// PingMessage is a valid IRC message that we can send to test that the subscription
// is valid (i.e. responds to POSTs with a 20x). We do not expect that the client will
// actually connect to IRC and send PONG (although it might be nice to have a way to
// hint to a client that they should reconnect to renew their subscription?)
PingMessage = []byte("PING webpush")
)
func convertUrgency(u Urgency) webpush.Urgency {
switch u {
case UrgencyVeryLow:
return webpush.UrgencyVeryLow
case UrgencyLow:
return webpush.UrgencyLow
case UrgencyNormal:
return webpush.UrgencyNormal
case UrgencyHigh:
return webpush.UrgencyHigh
default:
return webpush.UrgencyNormal // shouldn't happen
}
}
var httpClient webpush.HTTPClient = makeExternalOnlyClient()
var (
Err404 = errors.New("endpoint returned a 404, indicating that the push subscription is no longer valid")
errInvalidKey = errors.New("invalid key format")
)
func DecodeSubscriptionKeys(keysParam string) (keys webpush.Keys, err error) {
// The keys parameter is tag-encoded, with each tag value being URL-safe base64 encoded:
// * One public key with the name p256dh set to the client's P-256 ECDH public key.
// * One shared key with the name auth set to a 16-byte client-generated authentication secret.
// since we don't have a separate tag parser implementation, wrap it in a fake IRC line for parsing:
fakeIRCLine := fmt.Sprintf("@%s PING", keysParam)
ircMsg, err := ircmsg.ParseLine(fakeIRCLine)
if err != nil {
return
}
_, auth := ircMsg.GetTag("auth")
_, p256 := ircMsg.GetTag("p256dh")
return webpush.DecodeSubscriptionKeys(auth, p256)
}
// MakePushMessage serializes a utils.SplitMessage as a web push message (the args are in
// logical order)
func MakePushMessage(command, nuh, accountName, target string, msg utils.SplitMessage) ([]byte, error) {
var messageForPush string
if msg.Is512() {
messageForPush = msg.Message
} else {
messageForPush = msg.Split[0].Message
}
return MakePushLine(msg.Time, accountName, nuh, command, target, messageForPush)
}
// MakePushLine serializes an arbitrary IRC line as a web push message (the args are in
// IRC syntax order)
func MakePushLine(time time.Time, accountName, source, command string, params ...string) ([]byte, error) {
pushMessage := ircmsg.MakeMessage(nil, source, command, params...)
pushMessage.SetTag("time", time.Format(utils.IRCv3TimestampFormat))
// "*" is canonical for the unset form of the unfolded account name, but check both:
if accountName != "*" && accountName != "" {
pushMessage.SetTag("account", accountName)
}
if line, err := pushMessage.LineBytesStrict(false, 512); err == nil {
// strip final \r\n
return line[:len(line)-2], nil
} else {
return nil, err
}
}
func SendWebPush(ctx context.Context, endpoint string, keys Keys, vapidKeys *VAPIDKeys, urgency Urgency, subscriber string, msg []byte) error {
wpsub := webpush.Subscription{
Endpoint: endpoint,
Keys: keys,
}
options := webpush.Options{
HTTPClient: httpClient,
VAPIDKeys: vapidKeys,
Subscriber: subscriber,
TTL: 7 * 24 * 60 * 60, // seconds
Urgency: convertUrgency(urgency),
RecordSize: 2048,
}
resp, err := webpush.SendNotification(ctx, msg, &wpsub, &options)
if err != nil {
return err
}
resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return Err404
} else if 200 <= resp.StatusCode && resp.StatusCode < 300 {
return nil
} else {
return fmt.Errorf("HTTP error: %v", resp.Status)
}
}

View File

@ -1,57 +0,0 @@
package webpush
import (
"strings"
"testing"
"time"
"github.com/ergochat/irc-go/ircmsg"
"github.com/ergochat/ergo/irc/utils"
)
func TestBuildPushLine(t *testing.T) {
now, err := time.Parse(utils.IRCv3TimestampFormat, "2025-01-12T00:55:44.403Z")
if err != nil {
panic(err)
}
line, err := MakePushLine(now, "*", "ergo.test", "MARKREAD", "#ergo", "timestamp=2025-01-12T00:07:57.972Z")
if err != nil {
t.Fatal(err)
}
if string(line) != "@time=2025-01-12T00:55:44.403Z :ergo.test MARKREAD #ergo timestamp=2025-01-12T00:07:57.972Z" {
t.Errorf("got wrong line output: %s", line)
}
}
func TestBuildPushMessage(t *testing.T) {
now, err := time.Parse(utils.IRCv3TimestampFormat, "2025-01-12T01:05:04.422Z")
if err != nil {
panic(err)
}
lineBytes, err := MakePushMessage("PRIVMSG", "shivaram!~u@kca7nfgniet7q.irc", "shivaram", "#redacted", utils.SplitMessage{
Message: "[redacted message contents]",
Msgid: "t8st5bb4b9qhed3zs3pwspinca",
Time: now,
})
if err != nil {
t.Fatal(err)
}
line := string(lineBytes)
parsed, err := ircmsg.ParseLineStrict(line, false, 512)
if err != nil {
t.Fatal(err)
}
if ok, account := parsed.GetTag("account"); !ok || account != "shivaram" {
t.Fatalf("bad account tag %s", account)
}
if ok, timestamp := parsed.GetTag("time"); !ok || timestamp != "2025-01-12T01:05:04.422Z" {
t.Fatal("bad time")
}
idx := strings.IndexByte(line, ' ')
if line[idx+1:] != ":shivaram!~u@kca7nfgniet7q.irc PRIVMSG #redacted :[redacted message contents]" {
t.Fatal("bad line")
}
}

@ -1 +1 @@
Subproject commit e9e37f5438bd5f02656b89dab0cd40ef113edac6 Subproject commit af980ed3b639b5e1b3749c74027872adb69922a7

View File

@ -74,7 +74,6 @@ server:
max-connections-per-duration: 64 max-connections-per-duration: 64
# strict transport security, to get clients to automagically use TLS # strict transport security, to get clients to automagically use TLS
# (irrelevant in the recommended configuration, with no public plaintext listener)
sts: sts:
# whether to advertise STS # whether to advertise STS
# #
@ -348,17 +347,6 @@ server:
# if you don't want to publicize how popular the server is # if you don't want to publicize how popular the server is
suppress-lusers: false suppress-lusers: false
# publish additional key-value pairs in ISUPPORT (the 005 numeric).
# keys that collide with a key published by Ergo will be silently ignored.
additional-isupport:
#"draft/FILEHOST": "https://example.com/filehost"
#"draft/bazbat": "" # empty string means no value
# optionally map command alias names to existing ergo commands. most deployments
# should ignore this.
#command-aliases:
#"UMGEBUNG": "AMBIANCE"
# account options # account options
accounts: accounts:
# is account authentication enabled, i.e., can users log into existing accounts? # is account authentication enabled, i.e., can users log into existing accounts?
@ -759,7 +747,7 @@ logging:
# be logged, even if you explicitly include it # be logged, even if you explicitly include it
# #
# useful types include: # useful types include:
# * everything (usually used with excluding some types below) # * everything (usually used with exclusing some types below)
# server server startup, rehash, and shutdown events # server server startup, rehash, and shutdown events
# accounts account registration and authentication # accounts account registration and authentication
# channels channel creation and operations # channels channel creation and operations
@ -803,7 +791,7 @@ lock-file: "ircd.lock"
# datastore configuration # datastore configuration
datastore: datastore:
# path to the database file (used to store account and channel registrations): # path to the datastore
path: ircd.db path: ircd.db
# if the database schema requires an upgrade, `autoupgrade` will attempt to # if the database schema requires an upgrade, `autoupgrade` will attempt to
@ -905,7 +893,6 @@ fakelag:
"MARKREAD": 16 "MARKREAD": 16
"MONITOR": 1 "MONITOR": 1
"WHO": 4 "WHO": 4
"WEBPUSH": 1
# the roleplay commands are semi-standardized extensions to IRC that allow # the roleplay commands are semi-standardized extensions to IRC that allow
# sending and receiving messages from pseudo-nicknames. this can be used either # sending and receiving messages from pseudo-nicknames. this can be used either
@ -924,12 +911,6 @@ roleplay:
# add the real nickname, in parentheses, to the end of every roleplay message? # add the real nickname, in parentheses, to the end of every roleplay message?
add-suffix: true add-suffix: true
# allow customizing the NUH's sent for NPC and SCENE commands
# NPC: the first %s is the NPC name, the second is the user's real nick
#npc-nick-mask: "*%s*!%s@npc.fakeuser.invalid"
# SCENE: the %s is the client's real nick
#scene-nick-mask: "=Scene=!%s@npc.fakeuser.invalid"
# external services can integrate with the ircd using JSON Web Tokens (https://jwt.io). # external services can integrate with the ircd using JSON Web Tokens (https://jwt.io).
# in effect, the server can sign a token attesting that the client is present on # in effect, the server can sign a token attesting that the client is present on
# the server, is a member of a particular channel, etc. # the server, is a member of a particular channel, etc.
@ -1057,42 +1038,3 @@ history:
# whether to allow customization of the config at runtime using environment variables, # whether to allow customization of the config at runtime using environment variables,
# e.g., ERGO__SERVER__MAX_SENDQ=128k. see the manual for more details. # e.g., ERGO__SERVER__MAX_SENDQ=128k. see the manual for more details.
allow-environment-overrides: true allow-environment-overrides: true
# experimental support for mobile push notifications
# see the manual for potential security, privacy, and performance implications.
# DO NOT enable if you are running a Tor or I2P hidden service (i.e. one
# with no public IP listeners, only Tor/I2P listeners).
webpush:
# are push notifications enabled at all?
enabled: false
# request timeout for POST'ing the http notification
timeout: 10s
# delay sending the notification for this amount of time, then suppress it
# if the client sent MARKREAD to indicate that it was read on another device
delay: 0s
# subscriber field for the VAPID JWT authorization:
#subscriber: "https://your-website.com/"
# maximum number of push subscriptions per user
max-subscriptions: 4
# expiration time for a push subscription; it must be renewed within this time
# by the client reconnecting to IRC. we also detect whether the client is no longer
# successfully receiving push messages.
expiration: 14d
# HTTP API. we strongly recommend leaving this disabled unless you have a specific
# need for it.
api:
# is the API enabled at all?
enabled: false
# listen address:
listener: "127.0.0.1:8089"
# serve over TLS (strongly recommended if the listener is public):
#tls:
#cert: fullchain.pem
#key: privkey.pem
# one or more static bearer tokens accepted for HTTP bearer authentication.
# these must be strong, unique, high-entropy printable ASCII strings.
# to generate a new token, use `ergo gentoken` or:
# python3 -c "import secrets; print(secrets.token_urlsafe(32))"
bearer-tokens:
- "example"

View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2017 emersion
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,199 +0,0 @@
package dkim
import (
"io"
"strings"
)
// Canonicalization is a canonicalization algorithm.
type Canonicalization string
const (
CanonicalizationSimple Canonicalization = "simple"
CanonicalizationRelaxed = "relaxed"
)
type canonicalizer interface {
CanonicalizeHeader(s string) string
CanonicalizeBody(w io.Writer) io.WriteCloser
}
var canonicalizers = map[Canonicalization]canonicalizer{
CanonicalizationSimple: new(simpleCanonicalizer),
CanonicalizationRelaxed: new(relaxedCanonicalizer),
}
// crlfFixer fixes any lone LF without a preceding CR.
type crlfFixer struct {
cr bool
}
func (cf *crlfFixer) Fix(b []byte) []byte {
res := make([]byte, 0, len(b))
for _, ch := range b {
prevCR := cf.cr
cf.cr = false
switch ch {
case '\r':
cf.cr = true
case '\n':
if !prevCR {
res = append(res, '\r')
}
}
res = append(res, ch)
}
return res
}
type simpleCanonicalizer struct{}
func (c *simpleCanonicalizer) CanonicalizeHeader(s string) string {
return s
}
type simpleBodyCanonicalizer struct {
w io.Writer
crlfBuf []byte
crlfFixer crlfFixer
}
func (c *simpleBodyCanonicalizer) Write(b []byte) (int, error) {
written := len(b)
b = append(c.crlfBuf, b...)
b = c.crlfFixer.Fix(b)
end := len(b)
// If it ends with \r, maybe the next write will begin with \n
if end > 0 && b[end-1] == '\r' {
end--
}
// Keep all \r\n sequences
for end >= 2 {
prev := b[end-2]
cur := b[end-1]
if prev != '\r' || cur != '\n' {
break
}
end -= 2
}
c.crlfBuf = b[end:]
var err error
if end > 0 {
_, err = c.w.Write(b[:end])
}
return written, err
}
func (c *simpleBodyCanonicalizer) Close() error {
// Flush crlfBuf if it ends with a single \r (without a matching \n)
if len(c.crlfBuf) > 0 && c.crlfBuf[len(c.crlfBuf)-1] == '\r' {
if _, err := c.w.Write(c.crlfBuf); err != nil {
return err
}
}
c.crlfBuf = nil
if _, err := c.w.Write([]byte(crlf)); err != nil {
return err
}
return nil
}
func (c *simpleCanonicalizer) CanonicalizeBody(w io.Writer) io.WriteCloser {
return &simpleBodyCanonicalizer{w: w}
}
type relaxedCanonicalizer struct{}
func (c *relaxedCanonicalizer) CanonicalizeHeader(s string) string {
k, v, ok := strings.Cut(s, ":")
if !ok {
return strings.TrimSpace(strings.ToLower(s)) + ":" + crlf
}
k = strings.TrimSpace(strings.ToLower(k))
v = strings.Join(strings.FieldsFunc(v, func(r rune) bool {
return r == ' ' || r == '\t' || r == '\n' || r == '\r'
}), " ")
return k + ":" + v + crlf
}
type relaxedBodyCanonicalizer struct {
w io.Writer
crlfBuf []byte
wsp bool
written bool
crlfFixer crlfFixer
}
func (c *relaxedBodyCanonicalizer) Write(b []byte) (int, error) {
written := len(b)
b = c.crlfFixer.Fix(b)
canonical := make([]byte, 0, len(b))
for _, ch := range b {
if ch == ' ' || ch == '\t' {
c.wsp = true
} else if ch == '\r' || ch == '\n' {
c.wsp = false
c.crlfBuf = append(c.crlfBuf, ch)
} else {
if len(c.crlfBuf) > 0 {
canonical = append(canonical, c.crlfBuf...)
c.crlfBuf = c.crlfBuf[:0]
}
if c.wsp {
canonical = append(canonical, ' ')
c.wsp = false
}
canonical = append(canonical, ch)
}
}
if !c.written && len(canonical) > 0 {
c.written = true
}
_, err := c.w.Write(canonical)
return written, err
}
func (c *relaxedBodyCanonicalizer) Close() error {
if c.written {
if _, err := c.w.Write([]byte(crlf)); err != nil {
return err
}
}
return nil
}
func (c *relaxedCanonicalizer) CanonicalizeBody(w io.Writer) io.WriteCloser {
return &relaxedBodyCanonicalizer{w: w}
}
type limitedWriter struct {
W io.Writer
N int64
}
func (w *limitedWriter) Write(b []byte) (int, error) {
if w.N <= 0 {
return len(b), nil
}
skipped := 0
if int64(len(b)) > w.N {
b = b[:w.N]
skipped = int(int64(len(b)) - w.N)
}
n, err := w.W.Write(b)
w.N -= int64(n)
return n + skipped, err
}

View File

@ -1,23 +0,0 @@
// Package dkim creates and verifies DKIM signatures, as specified in RFC 6376.
//
// # FAQ
//
// Why can't I verify a [net/mail.Message] directly? A [net/mail.Message]
// header is already parsed, and whitespace characters (especially continuation
// lines) are removed. Thus, the signature computed from the parsed header is
// not the same as the one computed from the raw header.
//
// How can I publish my public key? You have to add a TXT record to your DNS
// zone. See [RFC 6376 appendix C]. You can use the dkim-keygen tool included
// in go-msgauth to generate the key and the TXT record.
//
// [RFC 6376 appendix C]: https://tools.ietf.org/html/rfc6376#appendix-C
package dkim
import (
"time"
)
var now = time.Now
const headerFieldName = "DKIM-Signature"

View File

@ -1,167 +0,0 @@
package dkim
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"net/textproto"
"sort"
"strings"
)
const crlf = "\r\n"
type header []string
func readHeader(r *bufio.Reader) (header, error) {
tr := textproto.NewReader(r)
var h header
for {
l, err := tr.ReadLine()
if err != nil {
return h, fmt.Errorf("failed to read header: %v", err)
}
if len(l) == 0 {
break
} else if len(h) > 0 && (l[0] == ' ' || l[0] == '\t') {
// This is a continuation line
h[len(h)-1] += l + crlf
} else {
h = append(h, l+crlf)
}
}
return h, nil
}
func writeHeader(w io.Writer, h header) error {
for _, kv := range h {
if _, err := w.Write([]byte(kv)); err != nil {
return err
}
}
_, err := w.Write([]byte(crlf))
return err
}
func foldHeaderField(kv string) string {
buf := bytes.NewBufferString(kv)
line := make([]byte, 75) // 78 - len("\r\n\s")
first := true
var fold strings.Builder
for len, err := buf.Read(line); err != io.EOF; len, err = buf.Read(line) {
if first {
first = false
} else {
fold.WriteString("\r\n ")
}
fold.Write(line[:len])
}
return fold.String() + crlf
}
func parseHeaderField(s string) (string, string) {
key, value, _ := strings.Cut(s, ":")
return strings.TrimSpace(key), strings.TrimSpace(value)
}
func parseHeaderParams(s string) (map[string]string, error) {
pairs := strings.Split(s, ";")
params := make(map[string]string)
for _, s := range pairs {
key, value, ok := strings.Cut(s, "=")
if !ok {
if strings.TrimSpace(s) == "" {
continue
}
return params, errors.New("dkim: malformed header params")
}
params[strings.TrimSpace(key)] = strings.TrimSpace(value)
}
return params, nil
}
func formatHeaderParams(headerFieldName string, params map[string]string) string {
keys, bvalue, bfound := sortParams(params)
s := headerFieldName + ":"
var line string
for _, k := range keys {
v := params[k]
nextLength := 3 + len(line) + len(v) + len(k)
if nextLength > 75 {
s += line + crlf
line = ""
}
line = fmt.Sprintf("%v %v=%v;", line, k, v)
}
if line != "" {
s += line
}
if bfound {
bfiled := foldHeaderField(" b=" + bvalue)
s += crlf + bfiled
}
return s
}
func sortParams(params map[string]string) ([]string, string, bool) {
keys := make([]string, 0, len(params))
bfound := false
var bvalue string
for k := range params {
if k == "b" {
bvalue = params["b"]
bfound = true
} else {
keys = append(keys, k)
}
}
sort.Strings(keys)
return keys, bvalue, bfound
}
type headerPicker struct {
h header
picked map[string]int
}
func newHeaderPicker(h header) *headerPicker {
return &headerPicker{
h: h,
picked: make(map[string]int),
}
}
func (p *headerPicker) Pick(key string) string {
key = strings.ToLower(key)
at := p.picked[key]
for i := len(p.h) - 1; i >= 0; i-- {
kv := p.h[i]
k, _ := parseHeaderField(kv)
if !strings.EqualFold(k, key) {
continue
}
if at == 0 {
p.picked[key]++
return kv
}
at--
}
return ""
}

View File

@ -1,184 +0,0 @@
package dkim
import (
"crypto"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"net"
"strings"
"golang.org/x/crypto/ed25519"
)
type verifier interface {
Public() crypto.PublicKey
Verify(hash crypto.Hash, hashed []byte, sig []byte) error
}
type rsaVerifier struct {
*rsa.PublicKey
}
func (v rsaVerifier) Public() crypto.PublicKey {
return v.PublicKey
}
func (v rsaVerifier) Verify(hash crypto.Hash, hashed, sig []byte) error {
return rsa.VerifyPKCS1v15(v.PublicKey, hash, hashed, sig)
}
type ed25519Verifier struct {
ed25519.PublicKey
}
func (v ed25519Verifier) Public() crypto.PublicKey {
return v.PublicKey
}
func (v ed25519Verifier) Verify(hash crypto.Hash, hashed, sig []byte) error {
if !ed25519.Verify(v.PublicKey, hashed, sig) {
return errors.New("dkim: invalid Ed25519 signature")
}
return nil
}
type queryResult struct {
Verifier verifier
KeyAlgo string
HashAlgos []string
Notes string
Services []string
Flags []string
}
// QueryMethod is a DKIM query method.
type QueryMethod string
const (
// DNS TXT resource record (RR) lookup algorithm
QueryMethodDNSTXT QueryMethod = "dns/txt"
)
type txtLookupFunc func(domain string) ([]string, error)
type queryFunc func(domain, selector string, txtLookup txtLookupFunc) (*queryResult, error)
var queryMethods = map[QueryMethod]queryFunc{
QueryMethodDNSTXT: queryDNSTXT,
}
func queryDNSTXT(domain, selector string, txtLookup txtLookupFunc) (*queryResult, error) {
if txtLookup == nil {
txtLookup = net.LookupTXT
}
txts, err := txtLookup(selector + "._domainkey." + domain)
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
return nil, tempFailError("key unavailable: " + err.Error())
} else if err != nil {
return nil, permFailError("no key for signature: " + err.Error())
}
// net.LookupTXT will concatenate strings contained in a single TXT record.
// In other words, net.LookupTXT returns one entry per TXT record, even if
// a record contains multiple strings.
//
// RFC 6376 section 3.6.2.2 says multiple TXT records lead to undefined
// behavior, so reject that.
switch len(txts) {
case 0:
return nil, permFailError("no valid key found")
case 1:
return parsePublicKey(txts[0])
default:
return nil, permFailError("multiple TXT records found for key")
}
}
func parsePublicKey(s string) (*queryResult, error) {
params, err := parseHeaderParams(s)
if err != nil {
return nil, permFailError("key syntax error: " + err.Error())
}
res := new(queryResult)
if v, ok := params["v"]; ok && v != "DKIM1" {
return nil, permFailError("incompatible public key version")
}
p, ok := params["p"]
if !ok {
return nil, permFailError("key syntax error: missing public key data")
}
if p == "" {
return nil, permFailError("key revoked")
}
p = strings.ReplaceAll(p, " ", "")
b, err := base64.StdEncoding.DecodeString(p)
if err != nil {
return nil, permFailError("key syntax error: " + err.Error())
}
switch params["k"] {
case "rsa", "":
pub, err := x509.ParsePKIXPublicKey(b)
if err != nil {
// RFC 6376 is inconsistent about whether RSA public keys should
// be formatted as RSAPublicKey or SubjectPublicKeyInfo.
// Erratum 3017 (https://www.rfc-editor.org/errata/eid3017) proposes
// allowing both.
pub, err = x509.ParsePKCS1PublicKey(b)
if err != nil {
return nil, permFailError("key syntax error: " + err.Error())
}
}
rsaPub, ok := pub.(*rsa.PublicKey)
if !ok {
return nil, permFailError("key syntax error: not an RSA public key")
}
// RFC 8301 section 3.2: verifiers MUST NOT consider signatures using
// RSA keys of less than 1024 bits as valid signatures.
if rsaPub.Size()*8 < 1024 {
return nil, permFailError(fmt.Sprintf("key is too short: want 1024 bits, has %v bits", rsaPub.Size()*8))
}
res.Verifier = rsaVerifier{rsaPub}
res.KeyAlgo = "rsa"
case "ed25519":
if len(b) != ed25519.PublicKeySize {
return nil, permFailError(fmt.Sprintf("invalid Ed25519 public key size: %v bytes", len(b)))
}
ed25519Pub := ed25519.PublicKey(b)
res.Verifier = ed25519Verifier{ed25519Pub}
res.KeyAlgo = "ed25519"
default:
return nil, permFailError("unsupported key algorithm")
}
if hashesStr, ok := params["h"]; ok {
res.HashAlgos = parseTagList(hashesStr)
}
if notes, ok := params["n"]; ok {
res.Notes = notes
}
if servicesStr, ok := params["s"]; ok {
services := parseTagList(servicesStr)
hasWildcard := false
for _, s := range services {
if s == "*" {
hasWildcard = true
break
}
}
if !hasWildcard {
res.Services = services
}
}
if flagsStr, ok := params["t"]; ok {
res.Flags = parseTagList(flagsStr)
}
return res, nil
}

View File

@ -1,346 +0,0 @@
package dkim
import (
"bufio"
"bytes"
"crypto"
"crypto/rand"
"crypto/rsa"
"encoding/base64"
"fmt"
"io"
"strconv"
"strings"
"time"
"golang.org/x/crypto/ed25519"
)
var randReader io.Reader = rand.Reader
// SignOptions is used to configure Sign. Domain, Selector and Signer are
// mandatory.
type SignOptions struct {
// The SDID claiming responsibility for an introduction of a message into the
// mail stream. Hence, the SDID value is used to form the query for the public
// key. The SDID MUST correspond to a valid DNS name under which the DKIM key
// record is published.
//
// This can't be empty.
Domain string
// The selector subdividing the namespace for the domain.
//
// This can't be empty.
Selector string
// The Agent or User Identifier (AUID) on behalf of which the SDID is taking
// responsibility.
//
// This is optional.
Identifier string
// The key used to sign the message.
//
// Supported Signer.Public() values are *rsa.PublicKey and
// ed25519.PublicKey.
Signer crypto.Signer
// The hash algorithm used to sign the message. If zero, a default hash will
// be chosen.
//
// The only supported hash algorithm is crypto.SHA256.
Hash crypto.Hash
// Header and body canonicalization algorithms.
//
// If empty, CanonicalizationSimple is used.
HeaderCanonicalization Canonicalization
BodyCanonicalization Canonicalization
// A list of header fields to include in the signature. If nil, all headers
// will be included. If not nil, "From" MUST be in the list.
//
// See RFC 6376 section 5.4.1 for recommended header fields.
HeaderKeys []string
// The expiration time. A zero value means no expiration.
Expiration time.Time
// A list of query methods used to retrieve the public key.
//
// If nil, it is implicitly defined as QueryMethodDNSTXT.
QueryMethods []QueryMethod
}
// Signer generates a DKIM signature.
//
// The whole message header and body must be written to the Signer. Close should
// always be called (either after the whole message has been written, or after
// an error occurred and the signer won't be used anymore). Close may return an
// error in case signing fails.
//
// After a successful Close, Signature can be called to retrieve the
// DKIM-Signature header field that the caller should prepend to the message.
type Signer struct {
pw *io.PipeWriter
done <-chan error
sigParams map[string]string // only valid after done received nil
}
// NewSigner creates a new signer. It returns an error if SignOptions is
// invalid.
func NewSigner(options *SignOptions) (*Signer, error) {
if options == nil {
return nil, fmt.Errorf("dkim: no options specified")
}
if options.Domain == "" {
return nil, fmt.Errorf("dkim: no domain specified")
}
if options.Selector == "" {
return nil, fmt.Errorf("dkim: no selector specified")
}
if options.Signer == nil {
return nil, fmt.Errorf("dkim: no signer specified")
}
headerCan := options.HeaderCanonicalization
if headerCan == "" {
headerCan = CanonicalizationSimple
}
if _, ok := canonicalizers[headerCan]; !ok {
return nil, fmt.Errorf("dkim: unknown header canonicalization %q", headerCan)
}
bodyCan := options.BodyCanonicalization
if bodyCan == "" {
bodyCan = CanonicalizationSimple
}
if _, ok := canonicalizers[bodyCan]; !ok {
return nil, fmt.Errorf("dkim: unknown body canonicalization %q", bodyCan)
}
var keyAlgo string
switch options.Signer.Public().(type) {
case *rsa.PublicKey:
keyAlgo = "rsa"
case ed25519.PublicKey:
keyAlgo = "ed25519"
default:
return nil, fmt.Errorf("dkim: unsupported key algorithm %T", options.Signer.Public())
}
hash := options.Hash
var hashAlgo string
switch options.Hash {
case 0: // sha256 is the default
hash = crypto.SHA256
fallthrough
case crypto.SHA256:
hashAlgo = "sha256"
case crypto.SHA1:
return nil, fmt.Errorf("dkim: hash algorithm too weak: sha1")
default:
return nil, fmt.Errorf("dkim: unsupported hash algorithm")
}
if options.HeaderKeys != nil {
ok := false
for _, k := range options.HeaderKeys {
if strings.EqualFold(k, "From") {
ok = true
break
}
}
if !ok {
return nil, fmt.Errorf("dkim: the From header field must be signed")
}
}
done := make(chan error, 1)
pr, pw := io.Pipe()
s := &Signer{
pw: pw,
done: done,
}
closeReadWithError := func(err error) {
pr.CloseWithError(err)
done <- err
}
go func() {
defer close(done)
// Read header
br := bufio.NewReader(pr)
h, err := readHeader(br)
if err != nil {
closeReadWithError(err)
return
}
// Hash body
hasher := hash.New()
can := canonicalizers[bodyCan].CanonicalizeBody(hasher)
if _, err := io.Copy(can, br); err != nil {
closeReadWithError(err)
return
}
if err := can.Close(); err != nil {
closeReadWithError(err)
return
}
bodyHashed := hasher.Sum(nil)
params := map[string]string{
"v": "1",
"a": keyAlgo + "-" + hashAlgo,
"bh": base64.StdEncoding.EncodeToString(bodyHashed),
"c": string(headerCan) + "/" + string(bodyCan),
"d": options.Domain,
//"l": "", // TODO
"s": options.Selector,
"t": formatTime(now()),
//"z": "", // TODO
}
var headerKeys []string
if options.HeaderKeys != nil {
headerKeys = options.HeaderKeys
} else {
for _, kv := range h {
k, _ := parseHeaderField(kv)
headerKeys = append(headerKeys, k)
}
}
params["h"] = formatTagList(headerKeys)
if options.Identifier != "" {
params["i"] = options.Identifier
}
if options.QueryMethods != nil {
methods := make([]string, len(options.QueryMethods))
for i, method := range options.QueryMethods {
methods[i] = string(method)
}
params["q"] = formatTagList(methods)
}
if !options.Expiration.IsZero() {
params["x"] = formatTime(options.Expiration)
}
// Hash and sign headers
hasher.Reset()
picker := newHeaderPicker(h)
for _, k := range headerKeys {
kv := picker.Pick(k)
if kv == "" {
// The Signer MAY include more instances of a header field name
// in "h=" than there are actual corresponding header fields so
// that the signature will not verify if additional header
// fields of that name are added.
continue
}
kv = canonicalizers[headerCan].CanonicalizeHeader(kv)
if _, err := io.WriteString(hasher, kv); err != nil {
closeReadWithError(err)
return
}
}
params["b"] = ""
sigField := formatSignature(params)
sigField = canonicalizers[headerCan].CanonicalizeHeader(sigField)
sigField = strings.TrimRight(sigField, crlf)
if _, err := io.WriteString(hasher, sigField); err != nil {
closeReadWithError(err)
return
}
hashed := hasher.Sum(nil)
// Don't pass Hash to Sign for ed25519 as it doesn't support it
// and will return an error ("ed25519: cannot sign hashed message").
if keyAlgo == "ed25519" {
hash = crypto.Hash(0)
}
sig, err := options.Signer.Sign(randReader, hashed, hash)
if err != nil {
closeReadWithError(err)
return
}
params["b"] = base64.StdEncoding.EncodeToString(sig)
s.sigParams = params
closeReadWithError(nil)
}()
return s, nil
}
// Write implements io.WriteCloser.
func (s *Signer) Write(b []byte) (n int, err error) {
return s.pw.Write(b)
}
// Close implements io.WriteCloser. The error return by Close must be checked.
func (s *Signer) Close() error {
if err := s.pw.Close(); err != nil {
return err
}
return <-s.done
}
// Signature returns the whole DKIM-Signature header field. It can only be
// called after a successful Signer.Close call.
//
// The returned value contains both the header field name, its value and the
// final CRLF.
func (s *Signer) Signature() string {
if s.sigParams == nil {
panic("dkim: Signer.Signature must only be called after a succesful Signer.Close")
}
return formatSignature(s.sigParams)
}
// Sign signs a message. It reads it from r and writes the signed version to w.
func Sign(w io.Writer, r io.Reader, options *SignOptions) error {
s, err := NewSigner(options)
if err != nil {
return err
}
defer s.Close()
// We need to keep the message in a buffer so we can write the new DKIM
// header field before the rest of the message
var b bytes.Buffer
mw := io.MultiWriter(&b, s)
if _, err := io.Copy(mw, r); err != nil {
return err
}
if err := s.Close(); err != nil {
return err
}
if _, err := io.WriteString(w, s.Signature()); err != nil {
return err
}
_, err = io.Copy(w, &b)
return err
}
func formatSignature(params map[string]string) string {
sig := formatHeaderParams(headerFieldName, params)
return sig
}
func formatTagList(l []string) string {
return strings.Join(l, ":")
}
func formatTime(t time.Time) string {
return strconv.FormatInt(t.Unix(), 10)
}

View File

@ -1,462 +0,0 @@
package dkim
import (
"bufio"
"crypto"
"crypto/subtle"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"regexp"
"strconv"
"strings"
"time"
"unicode"
)
type permFailError string
func (err permFailError) Error() string {
return "dkim: " + string(err)
}
// IsPermFail returns true if the error returned by Verify is a permanent
// failure. A permanent failure is for instance a missing required field or a
// malformed header.
func IsPermFail(err error) bool {
_, ok := err.(permFailError)
return ok
}
type tempFailError string
func (err tempFailError) Error() string {
return "dkim: " + string(err)
}
// IsTempFail returns true if the error returned by Verify is a temporary
// failure.
func IsTempFail(err error) bool {
_, ok := err.(tempFailError)
return ok
}
type failError string
func (err failError) Error() string {
return "dkim: " + string(err)
}
// isFail returns true if the error returned by Verify is a signature error.
func isFail(err error) bool {
_, ok := err.(failError)
return ok
}
// ErrTooManySignatures is returned by Verify when the message exceeds the
// maximum number of signatures.
var ErrTooManySignatures = errors.New("dkim: too many signatures")
var requiredTags = []string{"v", "a", "b", "bh", "d", "h", "s"}
// A Verification is produced by Verify when it checks if one signature is
// valid. If the signature is valid, Err is nil.
type Verification struct {
// The SDID claiming responsibility for an introduction of a message into the
// mail stream.
Domain string
// The Agent or User Identifier (AUID) on behalf of which the SDID is taking
// responsibility.
Identifier string
// The list of signed header fields.
HeaderKeys []string
// The time that this signature was created. If unknown, it's set to zero.
Time time.Time
// The expiration time. If the signature doesn't expire, it's set to zero.
Expiration time.Time
// Err is nil if the signature is valid.
Err error
}
type signature struct {
i int
v string
}
// VerifyOptions allows to customize the default signature verification
// behavior.
type VerifyOptions struct {
// LookupTXT returns the DNS TXT records for the given domain name. If nil,
// net.LookupTXT is used.
LookupTXT func(domain string) ([]string, error)
// MaxVerifications controls the maximum number of signature verifications
// to perform. If more signatures are present, the first MaxVerifications
// signatures are verified, the rest are ignored and ErrTooManySignatures
// is returned. If zero, there is no maximum.
MaxVerifications int
}
// Verify checks if a message's signatures are valid. It returns one
// verification per signature.
//
// There is no guarantee that the reader will be completely consumed.
func Verify(r io.Reader) ([]*Verification, error) {
return VerifyWithOptions(r, nil)
}
// VerifyWithOptions performs the same task as Verify, but allows specifying
// verification options.
func VerifyWithOptions(r io.Reader, options *VerifyOptions) ([]*Verification, error) {
// Read header
bufr := bufio.NewReader(r)
h, err := readHeader(bufr)
if err != nil {
return nil, err
}
// Scan header fields for signatures
var signatures []*signature
for i, kv := range h {
k, v := parseHeaderField(kv)
if strings.EqualFold(k, headerFieldName) {
signatures = append(signatures, &signature{i, v})
}
}
tooManySignatures := false
if options != nil && options.MaxVerifications > 0 && len(signatures) > options.MaxVerifications {
tooManySignatures = true
signatures = signatures[:options.MaxVerifications]
}
var verifs []*Verification
if len(signatures) == 1 {
// If there is only one signature - just verify it.
v, err := verify(h, bufr, h[signatures[0].i], signatures[0].v, options)
if err != nil && !IsTempFail(err) && !IsPermFail(err) && !isFail(err) {
return nil, err
}
v.Err = err
verifs = []*Verification{v}
} else {
verifs, err = parallelVerify(bufr, h, signatures, options)
if err != nil {
return nil, err
}
}
if tooManySignatures {
return verifs, ErrTooManySignatures
}
return verifs, nil
}
func parallelVerify(r io.Reader, h header, signatures []*signature, options *VerifyOptions) ([]*Verification, error) {
pipeWriters := make([]*io.PipeWriter, len(signatures))
// We can't pass pipeWriter to io.MultiWriter directly,
// we need a slice of io.Writer, but we also need *io.PipeWriter
// to call Close on it.
writers := make([]io.Writer, len(signatures))
chans := make([]chan *Verification, len(signatures))
for i, sig := range signatures {
// Be careful with loop variables and goroutines.
i, sig := i, sig
chans[i] = make(chan *Verification, 1)
pr, pw := io.Pipe()
writers[i] = pw
pipeWriters[i] = pw
go func() {
v, err := verify(h, pr, h[sig.i], sig.v, options)
// Make sure we consume the whole reader, otherwise io.Copy on
// other side can block forever.
io.Copy(ioutil.Discard, pr)
v.Err = err
chans[i] <- v
}()
}
if _, err := io.Copy(io.MultiWriter(writers...), r); err != nil {
return nil, err
}
for _, wr := range pipeWriters {
wr.Close()
}
verifications := make([]*Verification, len(signatures))
for i, ch := range chans {
verifications[i] = <-ch
}
// Return unexpected failures as a separate error.
for _, v := range verifications {
err := v.Err
if err != nil && !IsTempFail(err) && !IsPermFail(err) && !isFail(err) {
v.Err = nil
return verifications, err
}
}
return verifications, nil
}
func verify(h header, r io.Reader, sigField, sigValue string, options *VerifyOptions) (*Verification, error) {
verif := new(Verification)
params, err := parseHeaderParams(sigValue)
if err != nil {
return verif, permFailError("malformed signature tags: " + err.Error())
}
if params["v"] != "1" {
return verif, permFailError("incompatible signature version")
}
verif.Domain = stripWhitespace(params["d"])
for _, tag := range requiredTags {
if _, ok := params[tag]; !ok {
return verif, permFailError("signature missing required tag")
}
}
if i, ok := params["i"]; ok {
verif.Identifier = stripWhitespace(i)
if !strings.HasSuffix(verif.Identifier, "@"+verif.Domain) && !strings.HasSuffix(verif.Identifier, "."+verif.Domain) {
return verif, permFailError("domain mismatch")
}
} else {
verif.Identifier = "@" + verif.Domain
}
headerKeys := parseTagList(params["h"])
ok := false
for _, k := range headerKeys {
if strings.EqualFold(k, "from") {
ok = true
break
}
}
if !ok {
return verif, permFailError("From field not signed")
}
verif.HeaderKeys = headerKeys
if timeStr, ok := params["t"]; ok {
t, err := parseTime(timeStr)
if err != nil {
return verif, permFailError("malformed time: " + err.Error())
}
verif.Time = t
}
if expiresStr, ok := params["x"]; ok {
t, err := parseTime(expiresStr)
if err != nil {
return verif, permFailError("malformed expiration time: " + err.Error())
}
verif.Expiration = t
if now().After(t) {
return verif, permFailError("signature has expired")
}
}
// Query public key
// TODO: compute hash in parallel
methods := []string{string(QueryMethodDNSTXT)}
if methodsStr, ok := params["q"]; ok {
methods = parseTagList(methodsStr)
}
var res *queryResult
for _, method := range methods {
if query, ok := queryMethods[QueryMethod(method)]; ok {
if options != nil {
res, err = query(verif.Domain, stripWhitespace(params["s"]), options.LookupTXT)
} else {
res, err = query(verif.Domain, stripWhitespace(params["s"]), nil)
}
break
}
}
if err != nil {
return verif, err
} else if res == nil {
return verif, permFailError("unsupported public key query method")
}
// Parse algos
keyAlgo, hashAlgo, ok := strings.Cut(stripWhitespace(params["a"]), "-")
if !ok {
return verif, permFailError("malformed algorithm name")
}
// Check hash algo
if res.HashAlgos != nil {
ok := false
for _, algo := range res.HashAlgos {
if algo == hashAlgo {
ok = true
break
}
}
if !ok {
return verif, permFailError("inappropriate hash algorithm")
}
}
var hash crypto.Hash
switch hashAlgo {
case "sha1":
// RFC 8301 section 3.1: rsa-sha1 MUST NOT be used for signing or
// verifying.
return verif, permFailError(fmt.Sprintf("hash algorithm too weak: %v", hashAlgo))
case "sha256":
hash = crypto.SHA256
default:
return verif, permFailError("unsupported hash algorithm")
}
// Check key algo
if res.KeyAlgo != keyAlgo {
return verif, permFailError("inappropriate key algorithm")
}
if res.Services != nil {
ok := false
for _, s := range res.Services {
if s == "email" {
ok = true
break
}
}
if !ok {
return verif, permFailError("inappropriate service")
}
}
headerCan, bodyCan := parseCanonicalization(params["c"])
if _, ok := canonicalizers[headerCan]; !ok {
return verif, permFailError("unsupported header canonicalization algorithm")
}
if _, ok := canonicalizers[bodyCan]; !ok {
return verif, permFailError("unsupported body canonicalization algorithm")
}
// The body length "l" parameter is insecure, because it allows parts of
// the message body to not be signed. Reject messages which have it set.
if _, ok := params["l"]; ok {
// TODO: technically should be policyError
return verif, failError("message contains an insecure body length tag")
}
// Parse body hash and signature
bodyHashed, err := decodeBase64String(params["bh"])
if err != nil {
return verif, permFailError("malformed body hash: " + err.Error())
}
sig, err := decodeBase64String(params["b"])
if err != nil {
return verif, permFailError("malformed signature: " + err.Error())
}
// Check body hash
hasher := hash.New()
wc := canonicalizers[bodyCan].CanonicalizeBody(hasher)
if _, err := io.Copy(wc, r); err != nil {
return verif, err
}
if err := wc.Close(); err != nil {
return verif, err
}
if subtle.ConstantTimeCompare(hasher.Sum(nil), bodyHashed) != 1 {
return verif, failError("body hash did not verify")
}
// Compute data hash
hasher.Reset()
picker := newHeaderPicker(h)
for _, key := range headerKeys {
kv := picker.Pick(key)
if kv == "" {
// The field MAY contain names of header fields that do not exist
// when signed; nonexistent header fields do not contribute to the
// signature computation
continue
}
kv = canonicalizers[headerCan].CanonicalizeHeader(kv)
if _, err := hasher.Write([]byte(kv)); err != nil {
return verif, err
}
}
canSigField := removeSignature(sigField)
canSigField = canonicalizers[headerCan].CanonicalizeHeader(canSigField)
canSigField = strings.TrimRight(canSigField, "\r\n")
if _, err := hasher.Write([]byte(canSigField)); err != nil {
return verif, err
}
hashed := hasher.Sum(nil)
// Check signature
if err := res.Verifier.Verify(hash, hashed, sig); err != nil {
return verif, failError("signature did not verify: " + err.Error())
}
return verif, nil
}
func parseTagList(s string) []string {
tags := strings.Split(s, ":")
for i, t := range tags {
tags[i] = stripWhitespace(t)
}
return tags
}
func parseCanonicalization(s string) (headerCan, bodyCan Canonicalization) {
headerCan = CanonicalizationSimple
bodyCan = CanonicalizationSimple
cans := strings.SplitN(stripWhitespace(s), "/", 2)
if cans[0] != "" {
headerCan = Canonicalization(cans[0])
}
if len(cans) > 1 {
bodyCan = Canonicalization(cans[1])
}
return
}
func parseTime(s string) (time.Time, error) {
sec, err := strconv.ParseInt(stripWhitespace(s), 10, 64)
if err != nil {
return time.Time{}, err
}
return time.Unix(sec, 0), nil
}
func decodeBase64String(s string) ([]byte, error) {
return base64.StdEncoding.DecodeString(stripWhitespace(s))
}
func stripWhitespace(s string) string {
return strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return -1
}
return r
}, s)
}
var sigRegex = regexp.MustCompile(`(b\s*=)[^;]+`)
func removeSignature(s string) string {
return sigRegex.ReplaceAllString(s, "$1")
}

View File

@ -196,15 +196,6 @@ func trimInitialSpaces(str string) string {
return str[i:] return str[i:]
} }
func isASCII(str string) bool {
for i := 0; i < len(str); i++ {
if str[i] > 127 {
return false
}
}
return true
}
func parseLine(line string, maxTagDataLength int, truncateLen int) (ircmsg Message, err error) { func parseLine(line string, maxTagDataLength int, truncateLen int) (ircmsg Message, err error) {
// remove either \n or \r\n from the end of the line: // remove either \n or \r\n from the end of the line:
line = strings.TrimSuffix(line, "\n") line = strings.TrimSuffix(line, "\n")
@ -274,16 +265,11 @@ func parseLine(line string, maxTagDataLength int, truncateLen int) (ircmsg Messa
commandEnd = len(line) commandEnd = len(line)
paramStart = len(line) paramStart = len(line)
} }
baseCommand := line[:commandEnd] // normalize command to uppercase:
if len(baseCommand) == 0 { ircmsg.Command = strings.ToUpper(line[:commandEnd])
if len(ircmsg.Command) == 0 {
return ircmsg, ErrorLineIsEmpty return ircmsg, ErrorLineIsEmpty
} }
// technically this must be either letters or a 3-digit numeric:
if !isASCII(baseCommand) {
return ircmsg, ErrorLineContainsBadChar
}
// normalize command to uppercase:
ircmsg.Command = strings.ToUpper(baseCommand)
line = line[paramStart:] line = line[paramStart:]
for { for {

View File

@ -3,6 +3,7 @@ package ircutils
import ( import (
"encoding/base64" "encoding/base64"
"errors" "errors"
"strings"
) )
var ( var (
@ -24,7 +25,6 @@ func EncodeSASLResponse(raw []byte) (result []string) {
} }
response := base64.StdEncoding.EncodeToString(raw) response := base64.StdEncoding.EncodeToString(raw)
result = make([]string, 0, (len(response)/400)+1)
lastLen := 0 lastLen := 0
for len(response) > 0 { for len(response) > 0 {
// TODO once we require go 1.21, this can be: lastLen = min(len(response), 400) // TODO once we require go 1.21, this can be: lastLen = min(len(response), 400)
@ -48,11 +48,11 @@ func EncodeSASLResponse(raw []byte) (result []string) {
// Do not copy a SASLBuffer after first use. // Do not copy a SASLBuffer after first use.
type SASLBuffer struct { type SASLBuffer struct {
maxLength int maxLength int
buf []byte buffer strings.Builder
} }
// NewSASLBuffer returns a new SASLBuffer. maxLength is the maximum amount of // NewSASLBuffer returns a new SASLBuffer. maxLength is the maximum amount of
// data to buffer (0 for no limit). // base64'ed data to buffer (0 for no limit).
func NewSASLBuffer(maxLength int) *SASLBuffer { func NewSASLBuffer(maxLength int) *SASLBuffer {
result := new(SASLBuffer) result := new(SASLBuffer)
result.Initialize(maxLength) result.Initialize(maxLength)
@ -69,43 +69,37 @@ func (b *SASLBuffer) Initialize(maxLength int) {
// response along with any decoding or protocol errors detected. // response along with any decoding or protocol errors detected.
func (b *SASLBuffer) Add(value string) (done bool, output []byte, err error) { func (b *SASLBuffer) Add(value string) (done bool, output []byte, err error) {
if value == "+" { if value == "+" {
// total size is a multiple of 400 (possibly 0) output, err = b.getAndReset()
output = b.buf return true, output, err
b.Clear()
return true, output, nil
} }
if len(value) > 400 { if len(value) > 400 {
b.Clear() b.buffer.Reset()
return true, nil, ErrSASLTooLong return true, nil, ErrSASLTooLong
} }
curLen := len(b.buf) if b.maxLength != 0 && (b.buffer.Len()+len(value)) > b.maxLength {
chunkDecodedLen := base64.StdEncoding.DecodedLen(len(value)) b.buffer.Reset()
if b.maxLength != 0 && (curLen+chunkDecodedLen) > b.maxLength {
b.Clear()
return true, nil, ErrSASLLimitExceeded return true, nil, ErrSASLLimitExceeded
} }
// "append-make pattern" as in the bytes.Buffer implementation: b.buffer.WriteString(value)
b.buf = append(b.buf, make([]byte, chunkDecodedLen)...)
n, err := base64.StdEncoding.Decode(b.buf[curLen:], []byte(value))
b.buf = b.buf[0 : curLen+n]
if err != nil {
b.Clear()
return true, nil, err
}
if len(value) < 400 { if len(value) < 400 {
output = b.buf output, err = b.getAndReset()
b.Clear() return true, output, err
return true, output, nil
} else { } else {
// 400 bytes, wait for continuation line or +
return false, nil, nil return false, nil, nil
} }
} }
// Clear resets the buffer state. // Clear resets the buffer state.
func (b *SASLBuffer) Clear() { func (b *SASLBuffer) Clear() {
// we can't reuse this buffer in general since we may have returned it b.buffer.Reset()
b.buf = nil }
func (b *SASLBuffer) getAndReset() (output []byte, err error) {
output, err = base64.StdEncoding.DecodeString(b.buffer.String())
b.buffer.Reset()
return
} }

View File

@ -1,13 +0,0 @@
#!/bin/bash
SOURCES="."
if [ "$1" = "--fix" ]; then
exec gofmt -s -w $SOURCES
fi
if [ -n "$(gofmt -s -l $SOURCES)" ]; then
echo "Go code is not formatted correctly with \`gofmt -s\`:"
gofmt -s -d $SOURCES
exit 1
fi

View File

@ -1,6 +0,0 @@
vendor/**
.DS_Store
*.out
*.swp

View File

@ -1,14 +0,0 @@
# Changelog
All notable changes to webpush-go will be documented in this file.
## [2.0.0] - 2025-01-16
* Update the `Keys` struct definition to store `Auth` as `[16]byte` and `P256dh` as `*ecdh.PublicKey`
* `Keys` can no longer be compared with `==`; use `(*Keys.Equal)` instead
* The JSON representation has not changed and is backwards and forwards compatible with v1
* `DecodeSubscriptionKeys` is a helper to decode base64-encoded auth and p256dh parameters into a `Keys`, with validation
* Update the `VAPIDKeys` struct to contain a `(*ecdsa.PrivateKey)`
* `VAPIDKeys` can no longer be compared with `==`; use `(*VAPIDKeys).Equal` instead
* The JSON representation is now a JSON string containing the PEM of the PKCS8-encoded private key
* To parse the legacy representation (raw bytes of the private key encoded in base64), use `DecodeLegacyVAPIDPrivateKey`
* Renamed `SendNotificationWithContext` to `SendNotification`, removing the earlier `SendNotification` API. (Pass `context.Background()` as the context to restore the former behavior.)

View File

@ -1,6 +0,0 @@
.PHONY: test
test:
go test .
go vet .
./.check-gofmt.sh

View File

@ -1,65 +0,0 @@
# webpush-go
[![GoDoc](https://godoc.org/github.com/ergochat/webpush-go?status.svg)](https://godoc.org/github.com/ergochat/webpush-go)
Web Push API Encryption with VAPID support.
This library is a fork of [SherClockHolmes/webpush-go](https://github.com/SherClockHolmes/webpush-go). See CHANGELOG.md for details on migrating from the upstream library.
```bash
go get -u github.com/ergochat/webpush-go/v2
```
## Example
For a full example, refer to the code in the [example](example/) directory.
```go
package main
import (
"encoding/json"
webpush "github.com/ergochat/webpush-go/v2"
)
func main() {
// Decode subscription
s := &webpush.Subscription{}
json.Unmarshal([]byte("<YOUR_SUBSCRIPTION>"), s)
vapidKeys := new(webpush.VAPIDKeys)
json.Unmarshal([]byte("<YOUR_VAPID_KEYS">), vapidKeys)
// Send Notification
resp, err := webpush.SendNotification([]byte("Test"), s, &webpush.Options{
Subscriber: "example@example.com",
VAPIDKeys: vapidKeys,
TTL: 3600, // seconds
})
if err != nil {
// TODO: Handle error
}
defer resp.Body.Close()
}
```
### Generating VAPID Keys
Use the helper method `GenerateVAPIDKeys` to generate the VAPID key pair.
```golang
vapidKeys, err := webpush.GenerateVAPIDKeys()
if err != nil {
// TODO: Handle error
}
```
## Development
1. Install [Go 1.20+](https://golang.org/)
2. `go mod vendor`
3. `go test`
#### For other language implementations visit:
[WebPush Libs](https://github.com/web-push-libs)

View File

@ -1,76 +0,0 @@
package webpush
import (
"crypto/ecdh"
"crypto/ecdsa"
"crypto/elliptic"
"encoding/base64"
"fmt"
"math/big"
)
// ecdhPublicKeyToECDSA converts an ECDH key to an ECDSA key.
// This is deprecated as per https://github.com/golang/go/issues/63963
// but we need to do it in order to parse the legacy private key format.
func ecdhPublicKeyToECDSA(key *ecdh.PublicKey) (*ecdsa.PublicKey, error) {
rawKey := key.Bytes()
switch key.Curve() {
case ecdh.P256():
return &ecdsa.PublicKey{
Curve: elliptic.P256(),
X: big.NewInt(0).SetBytes(rawKey[1:33]),
Y: big.NewInt(0).SetBytes(rawKey[33:]),
}, nil
case ecdh.P384():
return &ecdsa.PublicKey{
Curve: elliptic.P384(),
X: big.NewInt(0).SetBytes(rawKey[1:49]),
Y: big.NewInt(0).SetBytes(rawKey[49:]),
}, nil
case ecdh.P521():
return &ecdsa.PublicKey{
Curve: elliptic.P521(),
X: big.NewInt(0).SetBytes(rawKey[1:67]),
Y: big.NewInt(0).SetBytes(rawKey[67:]),
}, nil
default:
return nil, fmt.Errorf("cannot convert non-NIST *ecdh.PublicKey to *ecdsa.PublicKey")
}
}
func ecdhPrivateKeyToECDSA(key *ecdh.PrivateKey) (*ecdsa.PrivateKey, error) {
// see https://github.com/golang/go/issues/63963
pubKey, err := ecdhPublicKeyToECDSA(key.PublicKey())
if err != nil {
return nil, fmt.Errorf("converting PublicKey part of *ecdh.PrivateKey: %w", err)
}
return &ecdsa.PrivateKey{
PublicKey: *pubKey,
D: big.NewInt(0).SetBytes(key.Bytes()),
}, nil
}
// DecodeLegacyVAPIDPrivateKey decodes the legacy string private key format
// returned by GenerateVAPIDKeys in v1.
func DecodeLegacyVAPIDPrivateKey(key string) (*VAPIDKeys, error) {
bytes, err := decodeSubscriptionKey(key)
if err != nil {
return nil, err
}
ecdhPrivKey, err := ecdh.P256().NewPrivateKey(bytes)
if err != nil {
return nil, err
}
ecdsaPrivKey, err := ecdhPrivateKeyToECDSA(ecdhPrivKey)
if err != nil {
return nil, err
}
publicKey := base64.RawURLEncoding.EncodeToString(ecdhPrivKey.PublicKey().Bytes())
return &VAPIDKeys{
privateKey: ecdsaPrivKey,
publicKey: publicKey,
}, nil
}

View File

@ -1,26 +0,0 @@
package webpush
// Urgency indicates to the push service how important a message is to the user.
// This can be used by the push service to help conserve the battery life of a user's device
// by only waking up for important messages when battery is low.
type Urgency string
const (
// UrgencyVeryLow requires device state: on power and Wi-Fi
UrgencyVeryLow Urgency = "very-low"
// UrgencyLow requires device state: on either power or Wi-Fi
UrgencyLow Urgency = "low"
// UrgencyNormal excludes device state: low battery
UrgencyNormal Urgency = "normal"
// UrgencyHigh admits device state: low battery
UrgencyHigh Urgency = "high"
)
// Checking allowable values for the urgency header
func isValidUrgency(urgency Urgency) bool {
switch urgency {
case UrgencyVeryLow, UrgencyLow, UrgencyNormal, UrgencyHigh:
return true
}
return false
}

View File

@ -1,177 +0,0 @@
package webpush
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"net/url"
"strings"
"time"
jwt "github.com/golang-jwt/jwt/v5"
)
// VAPIDKeys is a public-private keypair for use in VAPID.
// It marshals to a JSON string containing the PEM of the PKCS8
// of the private key.
type VAPIDKeys struct {
privateKey *ecdsa.PrivateKey
publicKey string // raw bytes encoding in urlsafe base64, as per RFC
}
// PublicKeyString returns the base64url-encoded uncompressed public key of the keypair,
// as defined in RFC8292.
func (v *VAPIDKeys) PublicKeyString() string {
return v.publicKey
}
// PrivateKey returns the private key of the keypair.
func (v *VAPIDKeys) PrivateKey() *ecdsa.PrivateKey {
return v.privateKey
}
// Equal compares two VAPIDKeys for equality.
func (v *VAPIDKeys) Equal(o *VAPIDKeys) bool {
return v.privateKey.Equal(o.privateKey)
}
var _ json.Marshaler = (*VAPIDKeys)(nil)
var _ json.Unmarshaler = (*VAPIDKeys)(nil)
// MarshalJSON implements json.Marshaler, allowing serialization to JSON.
func (v *VAPIDKeys) MarshalJSON() ([]byte, error) {
pkcs8bytes, err := x509.MarshalPKCS8PrivateKey(v.privateKey)
if err != nil {
return nil, err
}
pemBlock := pem.Block{
Type: "PRIVATE KEY",
Bytes: pkcs8bytes,
}
pemBytes := pem.EncodeToMemory(&pemBlock)
if pemBytes == nil {
return nil, fmt.Errorf("could not encode VAPID keys as PEM")
}
return json.Marshal(string(pemBytes))
}
// MarshalJSON implements json.Unmarshaler, allowing deserialization from JSON.
func (v *VAPIDKeys) UnmarshalJSON(b []byte) error {
var pemKey string
if err := json.Unmarshal(b, &pemKey); err != nil {
return err
}
pemBlock, _ := pem.Decode([]byte(pemKey))
if pemBlock == nil {
return fmt.Errorf("could not decode PEM block with VAPID keys")
}
privKey, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes)
if err != nil {
return err
}
privateKey, ok := privKey.(*ecdsa.PrivateKey)
if !ok {
return fmt.Errorf("Invalid type of private key %T", privateKey)
}
if privateKey.Curve != elliptic.P256() {
return fmt.Errorf("Invalid curve for private key %v", privateKey.Curve)
}
publicKeyStr, err := makePublicKeyString(privateKey)
if err != nil {
return err // should not be possible since we confirmed P256 already
}
// success
v.privateKey = privateKey
v.publicKey = publicKeyStr
return nil
}
// GenerateVAPIDKeys generates a VAPID keypair (an ECDSA keypair on
// the P-256 curve).
func GenerateVAPIDKeys() (result *VAPIDKeys, err error) {
private, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return
}
pubKeyECDH, err := private.PublicKey.ECDH()
if err != nil {
return
}
publicKey := base64.RawURLEncoding.EncodeToString(pubKeyECDH.Bytes())
return &VAPIDKeys{
privateKey: private,
publicKey: publicKey,
}, nil
}
// ECDSAToVAPIDKeys wraps an existing ecdsa.PrivateKey in VAPIDKeys for use in
// VAPID header signing.
func ECDSAToVAPIDKeys(privKey *ecdsa.PrivateKey) (result *VAPIDKeys, err error) {
if privKey.Curve != elliptic.P256() {
return nil, fmt.Errorf("Invalid curve for private key %v", privKey.Curve)
}
publicKeyString, err := makePublicKeyString(privKey)
if err != nil {
return nil, err
}
return &VAPIDKeys{
privateKey: privKey,
publicKey: publicKeyString,
}, nil
}
func makePublicKeyString(privKey *ecdsa.PrivateKey) (result string, err error) {
// to get the raw bytes we have to convert the public key to *ecdh.PublicKey
// this type assertion (from the crypto.PublicKey returned by (*ecdsa.PrivateKey).Public()
// to *ecdsa.PublicKey) cannot fail:
publicKey, err := privKey.Public().(*ecdsa.PublicKey).ECDH()
if err != nil {
return // should not be possible if we confirmed P256 already
}
return base64.RawURLEncoding.EncodeToString(publicKey.Bytes()), nil
}
// getVAPIDAuthorizationHeader
func getVAPIDAuthorizationHeader(
endpoint string,
subscriber string,
vapidKeys *VAPIDKeys,
expiration time.Time,
) (string, error) {
if expiration.IsZero() {
expiration = time.Now().Add(time.Hour * 12)
}
// Create the JWT token
subURL, err := url.Parse(endpoint)
if err != nil {
return "", err
}
// Unless subscriber is an HTTPS URL, assume an e-mail address
if !strings.HasPrefix(subscriber, "https:") && !strings.HasPrefix(subscriber, "mailto:") {
subscriber = "mailto:" + subscriber
}
token := jwt.NewWithClaims(jwt.SigningMethodES256, jwt.MapClaims{
"aud": subURL.Scheme + "://" + subURL.Host,
"exp": expiration.Unix(),
"sub": subscriber,
})
// Sign token with private key
jwtString, err := token.SignedString(vapidKeys.privateKey)
if err != nil {
return "", err
}
return "vapid t=" + jwtString + ", k=" + vapidKeys.publicKey, nil
}

View File

@ -1,323 +0,0 @@
package webpush
import (
"bytes"
"context"
"crypto/aes"
"crypto/cipher"
"crypto/ecdh"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"golang.org/x/crypto/hkdf"
)
const MaxRecordSize uint32 = 4096
var (
ErrRecordSizeTooSmall = errors.New("record size too small for message")
invalidAuthKeyLength = errors.New("invalid auth key length (must be 16)")
defaultHTTPClient HTTPClient = &http.Client{}
)
// HTTPClient is an interface for sending the notification HTTP request / testing
type HTTPClient interface {
Do(*http.Request) (*http.Response, error)
}
// Options are config and extra params needed to send a notification
type Options struct {
HTTPClient HTTPClient // Will replace with *http.Client by default if not included
RecordSize uint32 // Limit the record size
Subscriber string // Sub in VAPID JWT token
Topic string // Set the Topic header to collapse a pending messages (Optional)
TTL int // Set the TTL on the endpoint POST request, in seconds
Urgency Urgency // Set the Urgency header to change a message priority (Optional)
VAPIDKeys *VAPIDKeys // VAPID public-private keypair to generate the VAPID Authorization header
VapidExpiration time.Time // optional expiration for VAPID JWT token (defaults to now + 12 hours)
}
// Keys represents a subscription's keys (its ECDH public key on the P-256 curve
// and its 16-byte authentication secret).
type Keys struct {
Auth [16]byte
P256dh *ecdh.PublicKey
}
// Equal compares two Keys for equality.
func (k *Keys) Equal(o Keys) bool {
return k.Auth == o.Auth && k.P256dh.Equal(o.P256dh)
}
var _ json.Marshaler = (*Keys)(nil)
var _ json.Unmarshaler = (*Keys)(nil)
type marshaledKeys struct {
Auth string `json:"auth"`
P256dh string `json:"p256dh"`
}
// MarshalJSON implements json.Marshaler, allowing serialization to JSON.
func (k *Keys) MarshalJSON() ([]byte, error) {
m := marshaledKeys{
Auth: base64.RawStdEncoding.EncodeToString(k.Auth[:]),
P256dh: base64.RawStdEncoding.EncodeToString(k.P256dh.Bytes()),
}
return json.Marshal(&m)
}
// MarshalJSON implements json.Unmarshaler, allowing deserialization from JSON.
func (k *Keys) UnmarshalJSON(b []byte) (err error) {
var m marshaledKeys
if err := json.Unmarshal(b, &m); err != nil {
return err
}
authBytes, err := decodeSubscriptionKey(m.Auth)
if err != nil {
return err
}
if len(authBytes) != 16 {
return fmt.Errorf("invalid auth bytes length %d (must be 16)", len(authBytes))
}
copy(k.Auth[:], authBytes)
rawDHKey, err := decodeSubscriptionKey(m.P256dh)
if err != nil {
return err
}
k.P256dh, err = ecdh.P256().NewPublicKey(rawDHKey)
return err
}
// DecodeSubscriptionKeys decodes and validates a base64-encoded pair of subscription keys
// (the authentication secret and ECDH public key).
func DecodeSubscriptionKeys(auth, p256dh string) (keys Keys, err error) {
authBytes, err := decodeSubscriptionKey(auth)
if err != nil {
return
}
if len(authBytes) != 16 {
err = invalidAuthKeyLength
return
}
copy(keys.Auth[:], authBytes)
dhBytes, err := decodeSubscriptionKey(p256dh)
if err != nil {
return
}
keys.P256dh, err = ecdh.P256().NewPublicKey(dhBytes)
if err != nil {
return
}
return
}
// Subscription represents a PushSubscription object from the Push API
type Subscription struct {
Endpoint string `json:"endpoint"`
Keys Keys `json:"keys"`
}
// SendNotification sends a push notification to a subscription's endpoint,
// applying encryption (RFC 8291) and adding a VAPID header (RFC 8292).
func SendNotification(ctx context.Context, message []byte, s *Subscription, options *Options) (*http.Response, error) {
// Compose message body (RFC8291 encryption of the message)
body, err := EncryptNotification(message, s.Keys, options.RecordSize)
if err != nil {
return nil, err
}
// Get VAPID Authorization header
vapidAuthHeader, err := getVAPIDAuthorizationHeader(
s.Endpoint,
options.Subscriber,
options.VAPIDKeys,
options.VapidExpiration,
)
if err != nil {
return nil, err
}
// Compose and send the HTTP request
return sendNotification(ctx, s.Endpoint, options, vapidAuthHeader, body)
}
// EncryptNotification implements the encryption algorithm specified by RFC 8291 for web push
// (RFC 8188's aes128gcm content-encoding, with the key material derived from
// elliptic curve Diffie-Hellman over the P-256 curve).
func EncryptNotification(message []byte, keys Keys, recordSize uint32) ([]byte, error) {
// Get the record size
if recordSize == 0 {
recordSize = MaxRecordSize
} else if recordSize < 128 {
return nil, ErrRecordSizeTooSmall
}
// Allocate buffer to hold the eventual message
// [ header block ] [ ciphertext ] [ 16 byte AEAD tag ], totaling RecordSize bytes
// the ciphertext is the encryption of: [ message ] [ \x02 ] [ 0 or more \x00 as needed ]
recordBuf := make([]byte, recordSize)
// remainingBuf tracks our current writing position in recordBuf:
remainingBuf := recordBuf
// Application server key pairs (single use)
localPrivateKey, err := ecdh.P256().GenerateKey(rand.Reader)
if err != nil {
return nil, err
}
localPublicKey := localPrivateKey.PublicKey()
// Encryption Content-Coding Header
// +-----------+--------+-----------+---------------+
// | salt (16) | rs (4) | idlen (1) | keyid (idlen) |
// +-----------+--------+-----------+---------------+
// in our case the keyid is localPublicKey.Bytes(), so 65 bytes
// First, generate the salt
_, err = rand.Read(remainingBuf[:16])
if err != nil {
return nil, err
}
salt := remainingBuf[:16]
remainingBuf = remainingBuf[16:]
binary.BigEndian.PutUint32(remainingBuf[:], recordSize)
remainingBuf = remainingBuf[4:]
localPublicKeyBytes := localPublicKey.Bytes()
remainingBuf[0] = byte(len(localPublicKeyBytes))
remainingBuf = remainingBuf[1:]
copy(remainingBuf[:], localPublicKeyBytes)
remainingBuf = remainingBuf[len(localPublicKeyBytes):]
// Combine application keys with receiver's EC public key to derive ECDH shared secret
sharedECDHSecret, err := localPrivateKey.ECDH(keys.P256dh)
if err != nil {
return nil, fmt.Errorf("deriving shared secret: %w", err)
}
// ikm
prkInfoBuf := bytes.NewBuffer([]byte("WebPush: info\x00"))
prkInfoBuf.Write(keys.P256dh.Bytes())
prkInfoBuf.Write(localPublicKey.Bytes())
prkHKDF := hkdf.New(sha256.New, sharedECDHSecret, keys.Auth[:], prkInfoBuf.Bytes())
ikm, err := getHKDFKey(prkHKDF, 32)
if err != nil {
return nil, err
}
// Derive Content Encryption Key
contentEncryptionKeyInfo := []byte("Content-Encoding: aes128gcm\x00")
contentHKDF := hkdf.New(sha256.New, ikm, salt, contentEncryptionKeyInfo)
contentEncryptionKey, err := getHKDFKey(contentHKDF, 16)
if err != nil {
return nil, err
}
// Derive the Nonce
nonceInfo := []byte("Content-Encoding: nonce\x00")
nonceHKDF := hkdf.New(sha256.New, ikm, salt, nonceInfo)
nonce, err := getHKDFKey(nonceHKDF, 12)
if err != nil {
return nil, err
}
// Cipher
c, err := aes.NewCipher(contentEncryptionKey)
if err != nil {
return nil, err
}
gcm, err := cipher.NewGCM(c)
if err != nil {
return nil, err
}
// need 1 byte for the 0x02 delimiter, 16 bytes for the AEAD tag
if len(remainingBuf) < len(message)+17 {
return nil, ErrRecordSizeTooSmall
}
// Copy the message plaintext into the buffer
copy(remainingBuf[:], message[:])
// The plaintext to be encrypted will include the padding delimiter and the padding;
// cut off the final 16 bytes that are reserved for the AEAD tag
plaintext := remainingBuf[:len(remainingBuf)-16]
remainingBuf = remainingBuf[len(message):]
// Add padding delimiter
remainingBuf[0] = '\x02'
remainingBuf = remainingBuf[1:]
// The rest of the buffer is already zero-padded
// Encipher the plaintext in place, then add the AEAD tag at the end.
// "To reuse plaintext's storage for the encrypted output, use plaintext[:0]
// as dst. Otherwise, the remaining capacity of dst must not overlap plaintext."
gcm.Seal(plaintext[:0], nonce, plaintext, nil)
return recordBuf, nil
}
func sendNotification(ctx context.Context, endpoint string, options *Options, vapidAuthHeader string, body []byte) (*http.Response, error) {
// POST request
req, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
if ctx != nil {
req = req.WithContext(ctx)
}
req.Header.Set("Content-Encoding", "aes128gcm")
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("TTL", strconv.Itoa(options.TTL))
// Сheck the optional headers
if len(options.Topic) > 0 {
req.Header.Set("Topic", options.Topic)
}
if isValidUrgency(options.Urgency) {
req.Header.Set("Urgency", string(options.Urgency))
}
req.Header.Set("Authorization", vapidAuthHeader)
// Send the request
var client HTTPClient
if options.HTTPClient != nil {
client = options.HTTPClient
} else {
client = defaultHTTPClient
}
return client.Do(req)
}
// decodeSubscriptionKey decodes a base64 subscription key.
func decodeSubscriptionKey(key string) ([]byte, error) {
key = strings.TrimRight(key, "=")
if strings.IndexByte(key, '+') != -1 || strings.IndexByte(key, '/') != -1 {
return base64.RawStdEncoding.DecodeString(key)
}
return base64.RawURLEncoding.DecodeString(key)
}
// Returns a key of length "length" given an hkdf function
func getHKDFKey(hkdf io.Reader, length int) ([]byte, error) {
key := make([]byte, length)
n, err := io.ReadFull(hkdf, key)
if n != len(key) || err != nil {
return key, err
}
return key, nil
}

View File

@ -10,11 +10,11 @@ implementation of [JSON Web
Tokens](https://datatracker.ietf.org/doc/html/rfc7519). Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0)
this project adds Go module support, but maintains backward compatibility with this project adds Go module support, but maintains backwards compatibility with
older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the
[`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version
v5.0.0 introduces major improvements to the validation of tokens, but is not v5.0.0 introduces major improvements to the validation of tokens, but is not
entirely backward compatible. entirely backwards compatible.
> After the original author of the library suggested migrating the maintenance > After the original author of the library suggested migrating the maintenance
> of `jwt-go`, a dedicated team of open source maintainers decided to clone the > of `jwt-go`, a dedicated team of open source maintainers decided to clone the
@ -24,7 +24,7 @@ entirely backward compatible.
**SECURITY NOTICE:** Some older versions of Go have a security issue in the **SECURITY NOTICE:** Some older versions of Go have a security issue in the
crypto/elliptic. The recommendation is to upgrade to at least 1.15 See issue crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue
[dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more
detail. detail.
@ -32,7 +32,7 @@ detail.
what you what you
expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/).
This library attempts to make it easy to do the right thing by requiring key This library attempts to make it easy to do the right thing by requiring key
types to match the expected alg, but you should take the extra step to verify it in types match the expected alg, but you should take the extra step to verify it in
your usage. See the examples provided. your usage. See the examples provided.
### Supported Go versions ### Supported Go versions
@ -41,7 +41,7 @@ Our support of Go versions is aligned with Go's [version release
policy](https://golang.org/doc/devel/release#policy). So we will support a major policy](https://golang.org/doc/devel/release#policy). So we will support a major
version of Go until there are two newer major releases. We no longer support version of Go until there are two newer major releases. We no longer support
building jwt-go with unsupported Go versions, as these contain security building jwt-go with unsupported Go versions, as these contain security
vulnerabilities that will not be fixed. vulnerabilities which will not be fixed.
## What the heck is a JWT? ## What the heck is a JWT?
@ -117,7 +117,7 @@ notable differences:
This library is considered production ready. Feedback and feature requests are This library is considered production ready. Feedback and feature requests are
appreciated. The API should be considered stable. There should be very few appreciated. The API should be considered stable. There should be very few
backward-incompatible changes outside of major version updates (and only with backwards-incompatible changes outside of major version updates (and only with
good reason). good reason).
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull
@ -125,8 +125,8 @@ requests will land on `main`. Periodically, versions will be tagged from
`main`. You can find all the releases on [the project releases `main`. You can find all the releases on [the project releases
page](https://github.com/golang-jwt/jwt/releases). page](https://github.com/golang-jwt/jwt/releases).
**BREAKING CHANGES:** A full list of breaking changes is available in **BREAKING CHANGES:*** A full list of breaking changes is available in
`VERSION_HISTORY.md`. See [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information on updating `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating
your code. your code.
## Extensions ## Extensions

View File

@ -2,11 +2,11 @@
## Supported Versions ## Supported Versions
As of November 2024 (and until this document is updated), the latest version `v5` is supported. In critical cases, we might supply back-ported patches for `v4`. As of February 2022 (and until this document is updated), the latest version `v4` is supported.
## Reporting a Vulnerability ## Reporting a Vulnerability
If you think you found a vulnerability, and even if you are not sure, please report it a [GitHub Security Advisory](https://github.com/golang-jwt/jwt/security/advisories/new). Please try be explicit, describe steps to reproduce the security issue with code example(s). If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s).
You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.

View File

@ -62,7 +62,7 @@ func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interf
case *ecdsa.PublicKey: case *ecdsa.PublicKey:
ecdsaKey = k ecdsaKey = k
default: default:
return newError("ECDSA verify expects *ecdsa.PublicKey", ErrInvalidKeyType) return newError("ECDSA verify expects *ecsda.PublicKey", ErrInvalidKeyType)
} }
if len(sig) != 2*m.KeySize { if len(sig) != 2*m.KeySize {
@ -96,7 +96,7 @@ func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte
case *ecdsa.PrivateKey: case *ecdsa.PrivateKey:
ecdsaKey = k ecdsaKey = k
default: default:
return nil, newError("ECDSA sign expects *ecdsa.PrivateKey", ErrInvalidKeyType) return nil, newError("ECDSA sign expects *ecsda.PrivateKey", ErrInvalidKeyType)
} }
// Create the hasher // Create the hasher

View File

@ -91,7 +91,7 @@ func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interfa
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) { func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) {
if keyBytes, ok := key.([]byte); ok { if keyBytes, ok := key.([]byte); ok {
if !m.Hash.Available() { if !m.Hash.Available() {
return nil, ErrHashUnavailable return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType)
} }
hasher := hmac.New(m.Hash.New, keyBytes) hasher := hmac.New(m.Hash.New, keyBytes)
@ -100,5 +100,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte,
return hasher.Sum(nil), nil return hasher.Sum(nil), nil
} }
return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType) return nil, ErrInvalidKeyType
} }

View File

@ -8,8 +8,6 @@ import (
"strings" "strings"
) )
const tokenDelimiter = "."
type Parser struct { type Parser struct {
// If populated, only these methods will be considered valid. // If populated, only these methods will be considered valid.
validMethods []string validMethods []string
@ -138,10 +136,9 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
// It's only ever useful in cases where you know the signature is valid (since it has already // It's only ever useful in cases where you know the signature is valid (since it has already
// been or will be checked elsewhere in the stack) and you want to extract values from it. // been or will be checked elsewhere in the stack) and you want to extract values from it.
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
var ok bool parts = strings.Split(tokenString, ".")
parts, ok = splitToken(tokenString) if len(parts) != 3 {
if !ok { return nil, parts, newError("token contains an invalid number of segments", ErrTokenMalformed)
return nil, nil, newError("token contains an invalid number of segments", ErrTokenMalformed)
} }
token = &Token{Raw: tokenString} token = &Token{Raw: tokenString}
@ -199,33 +196,6 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
return token, parts, nil return token, parts, nil
} }
// splitToken splits a token string into three parts: header, claims, and signature. It will only
// return true if the token contains exactly two delimiters and three parts. In all other cases, it
// will return nil parts and false.
func splitToken(token string) ([]string, bool) {
parts := make([]string, 3)
header, remain, ok := strings.Cut(token, tokenDelimiter)
if !ok {
return nil, false
}
parts[0] = header
claims, remain, ok := strings.Cut(remain, tokenDelimiter)
if !ok {
return nil, false
}
parts[1] = claims
// One more cut to ensure the signature is the last part of the token and there are no more
// delimiters. This avoids an issue where malicious input could contain additional delimiters
// causing unecessary overhead parsing tokens.
signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
if unexpected {
return nil, false
}
parts[2] = signature
return parts, true
}
// DecodeSegment decodes a JWT specific base64url encoding. This function will // DecodeSegment decodes a JWT specific base64url encoding. This function will
// take into account whether the [Parser] is configured with additional options, // take into account whether the [Parser] is configured with additional options,
// such as [WithStrictDecoding] or [WithPaddingAllowed]. // such as [WithStrictDecoding] or [WithPaddingAllowed].

View File

@ -10,7 +10,6 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"runtime"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -62,8 +61,6 @@ var (
ErrTxIterating = errors.New("tx is iterating") ErrTxIterating = errors.New("tx is iterating")
) )
const useAbsEx = true
// DB represents a collection of key-value pairs that persist on disk. // DB represents a collection of key-value pairs that persist on disk.
// Transactions are used for all forms of data access to the DB. // Transactions are used for all forms of data access to the DB.
type DB struct { type DB struct {
@ -754,7 +751,7 @@ func (db *DB) Shrink() error {
return err return err
} }
// Any failures below here are really bad. So just panic. // Any failures below here are really bad. So just panic.
if err := renameFile(tmpname, fname); err != nil { if err := os.Rename(tmpname, fname); err != nil {
panicErr(err) panicErr(err)
} }
db.file, err = os.OpenFile(fname, os.O_CREATE|os.O_RDWR, 0666) db.file, err = os.OpenFile(fname, os.O_CREATE|os.O_RDWR, 0666)
@ -774,18 +771,6 @@ func panicErr(err error) error {
panic(fmt.Errorf("buntdb: %w", err)) panic(fmt.Errorf("buntdb: %w", err))
} }
func renameFile(src, dest string) error {
var err error
if err = os.Rename(src, dest); err != nil {
if runtime.GOOS == "windows" {
if err = os.Remove(dest); err == nil {
err = os.Rename(src, dest)
}
}
}
return err
}
// readLoad reads from the reader and loads commands into the database. // readLoad reads from the reader and loads commands into the database.
// modTime is the modified time of the reader, should be no greater than // modTime is the modified time of the reader, should be no greater than
// the current time.Now(). // the current time.Now().
@ -910,35 +895,24 @@ func (db *DB) readLoad(rd io.Reader, modTime time.Time) (n int64, err error) {
return totalSize, ErrInvalid return totalSize, ErrInvalid
} }
if len(parts) == 5 { if len(parts) == 5 {
arg := strings.ToLower(parts[3]) if strings.ToLower(parts[3]) != "ex" {
if arg != "ex" && arg != "ae" {
return totalSize, ErrInvalid return totalSize, ErrInvalid
} }
ex, err := strconv.ParseInt(parts[4], 10, 64) ex, err := strconv.ParseUint(parts[4], 10, 64)
if err != nil { if err != nil {
return totalSize, err return totalSize, err
} }
var exat time.Time
now := time.Now() now := time.Now()
if arg == "ex" {
dur := (time.Duration(ex) * time.Second) - now.Sub(modTime) dur := (time.Duration(ex) * time.Second) - now.Sub(modTime)
exat = now.Add(dur) if dur > 0 {
} else {
exat = time.Unix(ex, 0)
}
if exat.After(now) {
db.insertIntoDatabase(&dbItem{ db.insertIntoDatabase(&dbItem{
key: parts[1], key: parts[1],
val: parts[2], val: parts[2],
opts: &dbItemOpts{ opts: &dbItemOpts{
ex: true, ex: true,
exat: exat, exat: now.Add(dur),
}, },
}) })
} else {
db.deleteFromDatabase(&dbItem{
key: parts[1],
})
} }
} else { } else {
db.insertIntoDatabase(&dbItem{key: parts[1], val: parts[2]}) db.insertIntoDatabase(&dbItem{key: parts[1], val: parts[2]})
@ -1356,19 +1330,13 @@ func appendBulkString(buf []byte, s string) []byte {
// writeSetTo writes an item as a single SET record to the a bufio Writer. // writeSetTo writes an item as a single SET record to the a bufio Writer.
func (dbi *dbItem) writeSetTo(buf []byte, now time.Time) []byte { func (dbi *dbItem) writeSetTo(buf []byte, now time.Time) []byte {
if dbi.opts != nil && dbi.opts.ex { if dbi.opts != nil && dbi.opts.ex {
ex := dbi.opts.exat.Sub(now) / time.Second
buf = appendArray(buf, 5) buf = appendArray(buf, 5)
buf = appendBulkString(buf, "set") buf = appendBulkString(buf, "set")
buf = appendBulkString(buf, dbi.key) buf = appendBulkString(buf, dbi.key)
buf = appendBulkString(buf, dbi.val) buf = appendBulkString(buf, dbi.val)
if useAbsEx {
ex := dbi.opts.exat.Unix()
buf = appendBulkString(buf, "ae")
buf = appendBulkString(buf, strconv.FormatUint(uint64(ex), 10))
} else {
ex := dbi.opts.exat.Sub(now) / time.Second
buf = appendBulkString(buf, "ex") buf = appendBulkString(buf, "ex")
buf = appendBulkString(buf, strconv.FormatUint(uint64(ex), 10)) buf = appendBulkString(buf, strconv.FormatUint(uint64(ex), 10))
}
} else { } else {
buf = appendArray(buf, 3) buf = appendArray(buf, 3)
buf = appendBulkString(buf, "set") buf = appendBulkString(buf, "set")
@ -1654,9 +1622,6 @@ func (tx *Tx) scan(desc, gt, lt bool, index, start, stop string,
// wrap a btree specific iterator around the user-defined iterator. // wrap a btree specific iterator around the user-defined iterator.
iter := func(item interface{}) bool { iter := func(item interface{}) bool {
dbi := item.(*dbItem) dbi := item.(*dbItem)
if dbi.expired() {
return true
}
return iterator(dbi.key, dbi.val) return iterator(dbi.key, dbi.val)
} }
var tr *btree.BTree var tr *btree.BTree

24
vendor/github.com/toorop/go-dkim/.gitignore generated vendored Normal file
View File

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -1,6 +1,6 @@
MIT License The MIT License (MIT)
Copyright (c) 2016 Ethan Holmes Copyright (c) 2015 Stéphane Depierrepont
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
@ -19,3 +19,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.

56
vendor/github.com/toorop/go-dkim/README.md generated vendored Normal file
View File

@ -0,0 +1,56 @@
# go-dkim
DKIM package for Golang
[![GoDoc](https://godoc.org/github.com/toorop/go-dkim?status.svg)](https://godoc.org/github.com/toorop/go-dkim)
## Getting started
### Install
```
go get github.com/toorop/go-dkim
```
Warning: you need to use Go 1.4.2-master or 1.4.3 (when it will be available)
see https://github.com/golang/go/issues/10482 fro more info.
### Sign email
```go
import (
dkim "github.com/toorop/go-dkim"
)
func main(){
// email is the email to sign (byte slice)
// privateKey the private key (pem encoded, byte slice )
options := dkim.NewSigOptions()
options.PrivateKey = privateKey
options.Domain = "mydomain.tld"
options.Selector = "myselector"
options.SignatureExpireIn = 3600
options.BodyLength = 50
options.Headers = []string{"from", "date", "mime-version", "received", "received"}
options.AddSignatureTimestamp = true
options.Canonicalization = "relaxed/relaxed"
err := dkim.Sign(&email, options)
// handle err..
// And... that's it, 'email' is signed ! Amazing© !!!
}
```
### Verify
```go
import (
dkim "github.com/toorop/go-dkim"
)
func main(){
// email is the email to verify (byte slice)
status, err := Verify(&email)
// handle status, err (see godoc for status)
}
```
## Todo
- [ ] handle z tag (copied header fields used for diagnostic use)

564
vendor/github.com/toorop/go-dkim/dkim.go generated vendored Normal file
View File

@ -0,0 +1,564 @@
// Package dkim provides tools for signing and verify a email according to RFC 6376
package dkim
import (
"bytes"
"container/list"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"hash"
"regexp"
"strings"
"time"
)
const (
CRLF = "\r\n"
TAB = " "
FWS = CRLF + TAB
MaxHeaderLineLength = 70
)
type verifyOutput int
const (
SUCCESS verifyOutput = 1 + iota
PERMFAIL
TEMPFAIL
NOTSIGNED
TESTINGSUCCESS
TESTINGPERMFAIL
TESTINGTEMPFAIL
)
// sigOptions represents signing options
type SigOptions struct {
// DKIM version (default 1)
Version uint
// Private key used for signing (required)
PrivateKey []byte
// Domain (required)
Domain string
// Selector (required)
Selector string
// The Agent of User IDentifier
Auid string
// Message canonicalization (plain-text; OPTIONAL, default is
// "simple/simple"). This tag informs the Verifier of the type of
// canonicalization used to prepare the message for signing.
Canonicalization string
// The algorithm used to generate the signature
//"rsa-sha1" or "rsa-sha256"
Algo string
// Signed header fields
Headers []string
// Body length count( if set to 0 this tag is ommited in Dkim header)
BodyLength uint
// Query Methods used to retrieve the public key
QueryMethods []string
// Add a signature timestamp
AddSignatureTimestamp bool
// Time validity of the signature (0=never)
SignatureExpireIn uint64
// CopiedHeaderFileds
CopiedHeaderFields []string
}
// NewSigOptions returns new sigoption with some defaults value
func NewSigOptions() SigOptions {
return SigOptions{
Version: 1,
Canonicalization: "simple/simple",
Algo: "rsa-sha256",
Headers: []string{"from"},
BodyLength: 0,
QueryMethods: []string{"dns/txt"},
AddSignatureTimestamp: true,
SignatureExpireIn: 0,
}
}
// Sign signs an email
func Sign(email *[]byte, options SigOptions) error {
var privateKey *rsa.PrivateKey
var err error
// PrivateKey
if len(options.PrivateKey) == 0 {
return ErrSignPrivateKeyRequired
}
d, _ := pem.Decode(options.PrivateKey)
if d == nil {
return ErrCandNotParsePrivateKey
}
// try to parse it as PKCS1 otherwise try PKCS8
if key, err := x509.ParsePKCS1PrivateKey(d.Bytes); err != nil {
if key, err := x509.ParsePKCS8PrivateKey(d.Bytes); err != nil {
return ErrCandNotParsePrivateKey
} else {
privateKey = key.(*rsa.PrivateKey)
}
} else {
privateKey = key
}
// Domain required
if options.Domain == "" {
return ErrSignDomainRequired
}
// Selector required
if options.Selector == "" {
return ErrSignSelectorRequired
}
// Canonicalization
options.Canonicalization, err = validateCanonicalization(strings.ToLower(options.Canonicalization))
if err != nil {
return err
}
// Algo
options.Algo = strings.ToLower(options.Algo)
if options.Algo != "rsa-sha1" && options.Algo != "rsa-sha256" {
return ErrSignBadAlgo
}
// Header must contain "from"
hasFrom := false
for i, h := range options.Headers {
h = strings.ToLower(h)
options.Headers[i] = h
if h == "from" {
hasFrom = true
}
}
if !hasFrom {
return ErrSignHeaderShouldContainsFrom
}
// Normalize
headers, body, err := canonicalize(email, options.Canonicalization, options.Headers)
if err != nil {
return err
}
signHash := strings.Split(options.Algo, "-")
// hash body
bodyHash, err := getBodyHash(&body, signHash[1], options.BodyLength)
if err != nil {
return err
}
// Get dkim header base
dkimHeader := newDkimHeaderBySigOptions(options)
dHeader := dkimHeader.getHeaderBaseForSigning(bodyHash)
canonicalizations := strings.Split(options.Canonicalization, "/")
dHeaderCanonicalized, err := canonicalizeHeader(dHeader, canonicalizations[0])
if err != nil {
return err
}
headers = append(headers, []byte(dHeaderCanonicalized)...)
headers = bytes.TrimRight(headers, " \r\n")
// sign
sig, err := getSignature(&headers, privateKey, signHash[1])
// add to DKIM-Header
subh := ""
l := len(subh)
for _, c := range sig {
subh += string(c)
l++
if l >= MaxHeaderLineLength {
dHeader += subh + FWS
subh = ""
l = 0
}
}
dHeader += subh + CRLF
*email = append([]byte(dHeader), *email...)
return nil
}
// Verify verifies an email an return
// state: SUCCESS or PERMFAIL or TEMPFAIL, TESTINGSUCCESS, TESTINGPERMFAIL
// TESTINGTEMPFAIL or NOTSIGNED
// error: if an error occurs during verification
func Verify(email *[]byte, opts ...DNSOpt) (verifyOutput, error) {
// parse email
dkimHeader, err := GetHeader(email)
if err != nil {
if err == ErrDkimHeaderNotFound {
return NOTSIGNED, ErrDkimHeaderNotFound
}
return PERMFAIL, err
}
// we do not set query method because if it's others, validation failed earlier
pubKey, verifyOutputOnError, err := NewPubKeyRespFromDNS(dkimHeader.Selector, dkimHeader.Domain, opts...)
if err != nil {
// fix https://github.com/toorop/go-dkim/issues/1
//return getVerifyOutput(verifyOutputOnError, err, pubKey.FlagTesting)
return verifyOutputOnError, err
}
// Normalize
headers, body, err := canonicalize(email, dkimHeader.MessageCanonicalization, dkimHeader.Headers)
if err != nil {
return getVerifyOutput(PERMFAIL, err, pubKey.FlagTesting)
}
sigHash := strings.Split(dkimHeader.Algorithm, "-")
// check if hash algo are compatible
compatible := false
for _, algo := range pubKey.HashAlgo {
if sigHash[1] == algo {
compatible = true
break
}
}
if !compatible {
return getVerifyOutput(PERMFAIL, ErrVerifyInappropriateHashAlgo, pubKey.FlagTesting)
}
// expired ?
if !dkimHeader.SignatureExpiration.IsZero() && dkimHeader.SignatureExpiration.Second() < time.Now().Second() {
return getVerifyOutput(PERMFAIL, ErrVerifySignatureHasExpired, pubKey.FlagTesting)
}
//println("|" + string(body) + "|")
// get body hash
bodyHash, err := getBodyHash(&body, sigHash[1], dkimHeader.BodyLength)
if err != nil {
return getVerifyOutput(PERMFAIL, err, pubKey.FlagTesting)
}
//println(bodyHash)
if bodyHash != dkimHeader.BodyHash {
return getVerifyOutput(PERMFAIL, ErrVerifyBodyHash, pubKey.FlagTesting)
}
// compute sig
dkimHeaderCano, err := canonicalizeHeader(dkimHeader.rawForSign, strings.Split(dkimHeader.MessageCanonicalization, "/")[0])
if err != nil {
return getVerifyOutput(TEMPFAIL, err, pubKey.FlagTesting)
}
toSignStr := string(headers) + dkimHeaderCano
toSign := bytes.TrimRight([]byte(toSignStr), " \r\n")
err = verifySignature(toSign, dkimHeader.SignatureData, &pubKey.PubKey, sigHash[1])
if err != nil {
return getVerifyOutput(PERMFAIL, err, pubKey.FlagTesting)
}
return SUCCESS, nil
}
// getVerifyOutput returns output of verify fct according to the testing flag
func getVerifyOutput(status verifyOutput, err error, flagTesting bool) (verifyOutput, error) {
if !flagTesting {
return status, err
}
switch status {
case SUCCESS:
return TESTINGSUCCESS, err
case PERMFAIL:
return TESTINGPERMFAIL, err
case TEMPFAIL:
return TESTINGTEMPFAIL, err
}
// should never happen but compilator sream whithout return
return status, err
}
// canonicalize returns canonicalized version of header and body
func canonicalize(email *[]byte, cano string, h []string) (headers, body []byte, err error) {
body = []byte{}
rxReduceWS := regexp.MustCompile(`[ \t]+`)
rawHeaders, rawBody, err := getHeadersBody(email)
if err != nil {
return nil, nil, err
}
canonicalizations := strings.Split(cano, "/")
// canonicalyze header
headersList, err := getHeadersList(&rawHeaders)
// pour chaque header a conserver on traverse tous les headers dispo
// If multi instance of a field we must keep it from the bottom to the top
var match *list.Element
headersToKeepList := list.New()
for _, headerToKeep := range h {
match = nil
headerToKeepToLower := strings.ToLower(headerToKeep)
for e := headersList.Front(); e != nil; e = e.Next() {
//fmt.Printf("|%s|\n", e.Value.(string))
t := strings.Split(e.Value.(string), ":")
if strings.ToLower(t[0]) == headerToKeepToLower {
match = e
}
}
if match != nil {
headersToKeepList.PushBack(match.Value.(string) + "\r\n")
headersList.Remove(match)
}
}
//if canonicalizations[0] == "simple" {
for e := headersToKeepList.Front(); e != nil; e = e.Next() {
cHeader, err := canonicalizeHeader(e.Value.(string), canonicalizations[0])
if err != nil {
return headers, body, err
}
headers = append(headers, []byte(cHeader)...)
}
// canonicalyze body
if canonicalizations[1] == "simple" {
// simple
// The "simple" body canonicalization algorithm ignores all empty lines
// at the end of the message body. An empty line is a line of zero
// length after removal of the line terminator. If there is no body or
// no trailing CRLF on the message body, a CRLF is added. It makes no
// other changes to the message body. In more formal terms, the
// "simple" body canonicalization algorithm converts "*CRLF" at the end
// of the body to a single "CRLF".
// Note that a completely empty or missing body is canonicalized as a
// single "CRLF"; that is, the canonicalized length will be 2 octets.
body = bytes.TrimRight(rawBody, "\r\n")
body = append(body, []byte{13, 10}...)
} else {
// relaxed
// Ignore all whitespace at the end of lines. Implementations
// MUST NOT remove the CRLF at the end of the line.
// Reduce all sequences of WSP within a line to a single SP
// character.
// Ignore all empty lines at the end of the message body. "Empty
// line" is defined in Section 3.4.3. If the body is non-empty but
// does not end with a CRLF, a CRLF is added. (For email, this is
// only possible when using extensions to SMTP or non-SMTP transport
// mechanisms.)
rawBody = rxReduceWS.ReplaceAll(rawBody, []byte(" "))
for _, line := range bytes.SplitAfter(rawBody, []byte{10}) {
line = bytes.TrimRight(line, " \r\n")
body = append(body, line...)
body = append(body, []byte{13, 10}...)
}
body = bytes.TrimRight(body, "\r\n")
body = append(body, []byte{13, 10}...)
}
return
}
// canonicalizeHeader returns canonicalized version of header
func canonicalizeHeader(header string, algo string) (string, error) {
//rxReduceWS := regexp.MustCompile(`[ \t]+`)
if algo == "simple" {
// The "simple" header canonicalization algorithm does not change header
// fields in any way. Header fields MUST be presented to the signing or
// verification algorithm exactly as they are in the message being
// signed or verified. In particular, header field names MUST NOT be
// case folded and whitespace MUST NOT be changed.
return header, nil
} else if algo == "relaxed" {
// The "relaxed" header canonicalization algorithm MUST apply the
// following steps in order:
// Convert all header field names (not the header field values) to
// lowercase. For example, convert "SUBJect: AbC" to "subject: AbC".
// Unfold all header field continuation lines as described in
// [RFC5322]; in particular, lines with terminators embedded in
// continued header field values (that is, CRLF sequences followed by
// WSP) MUST be interpreted without the CRLF. Implementations MUST
// NOT remove the CRLF at the end of the header field value.
// Convert all sequences of one or more WSP characters to a single SP
// character. WSP characters here include those before and after a
// line folding boundary.
// Delete all WSP characters at the end of each unfolded header field
// value.
// Delete any WSP characters remaining before and after the colon
// separating the header field name from the header field value. The
// colon separator MUST be retained.
kv := strings.SplitN(header, ":", 2)
if len(kv) != 2 {
return header, ErrBadMailFormatHeaders
}
k := strings.ToLower(kv[0])
k = strings.TrimSpace(k)
v := removeFWS(kv[1])
//v = rxReduceWS.ReplaceAllString(v, " ")
//v = strings.TrimSpace(v)
return k + ":" + v + CRLF, nil
}
return header, ErrSignBadCanonicalization
}
// getBodyHash return the hash (bas64encoded) of the body
func getBodyHash(body *[]byte, algo string, bodyLength uint) (string, error) {
var h hash.Hash
if algo == "sha1" {
h = sha1.New()
} else {
h = sha256.New()
}
toH := *body
// if l tag (body length)
if bodyLength != 0 {
if uint(len(toH)) < bodyLength {
return "", ErrBadDKimTagLBodyTooShort
}
toH = toH[0:bodyLength]
}
h.Write(toH)
return base64.StdEncoding.EncodeToString(h.Sum(nil)), nil
}
// getSignature return signature of toSign using key
func getSignature(toSign *[]byte, key *rsa.PrivateKey, algo string) (string, error) {
var h1 hash.Hash
var h2 crypto.Hash
switch algo {
case "sha1":
h1 = sha1.New()
h2 = crypto.SHA1
break
case "sha256":
h1 = sha256.New()
h2 = crypto.SHA256
break
default:
return "", ErrVerifyInappropriateHashAlgo
}
// sign
h1.Write(*toSign)
sig, err := rsa.SignPKCS1v15(rand.Reader, key, h2, h1.Sum(nil))
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(sig), nil
}
// verifySignature verify signature from pubkey
func verifySignature(toSign []byte, sig64 string, key *rsa.PublicKey, algo string) error {
var h1 hash.Hash
var h2 crypto.Hash
switch algo {
case "sha1":
h1 = sha1.New()
h2 = crypto.SHA1
break
case "sha256":
h1 = sha256.New()
h2 = crypto.SHA256
break
default:
return ErrVerifyInappropriateHashAlgo
}
h1.Write(toSign)
sig, err := base64.StdEncoding.DecodeString(sig64)
if err != nil {
return err
}
return rsa.VerifyPKCS1v15(key, h2, h1.Sum(nil), sig)
}
// removeFWS removes all FWS from string
func removeFWS(in string) string {
rxReduceWS := regexp.MustCompile(`[ \t]+`)
out := strings.Replace(in, "\n", "", -1)
out = strings.Replace(out, "\r", "", -1)
out = rxReduceWS.ReplaceAllString(out, " ")
return strings.TrimSpace(out)
}
// validateCanonicalization validate canonicalization (c flag)
func validateCanonicalization(cano string) (string, error) {
p := strings.Split(cano, "/")
if len(p) > 2 {
return "", ErrSignBadCanonicalization
}
if len(p) == 1 {
cano = cano + "/simple"
}
for _, c := range p {
if c != "simple" && c != "relaxed" {
return "", ErrSignBadCanonicalization
}
}
return cano, nil
}
// getHeadersList returns headers as list
func getHeadersList(rawHeader *[]byte) (*list.List, error) {
headersList := list.New()
currentHeader := []byte{}
for _, line := range bytes.SplitAfter(*rawHeader, []byte{10}) {
if line[0] == 32 || line[0] == 9 {
if len(currentHeader) == 0 {
return headersList, ErrBadMailFormatHeaders
}
currentHeader = append(currentHeader, line...)
} else {
// New header, save current if exists
if len(currentHeader) != 0 {
headersList.PushBack(string(bytes.TrimRight(currentHeader, "\r\n")))
currentHeader = []byte{}
}
currentHeader = append(currentHeader, line...)
}
}
headersList.PushBack(string(currentHeader))
return headersList, nil
}
// getHeadersBody return headers and body
func getHeadersBody(email *[]byte) ([]byte, []byte, error) {
substitutedEmail := *email
// only replace \n with \r\n when \r\n\r\n not exists
if bytes.Index(*email, []byte{13, 10, 13, 10}) < 0 {
// \n -> \r\n
substitutedEmail = bytes.Replace(*email, []byte{10}, []byte{13, 10}, -1)
}
parts := bytes.SplitN(substitutedEmail, []byte{13, 10, 13, 10}, 2)
if len(parts) != 2 {
return []byte{}, []byte{}, ErrBadMailFormat
}
// Empty body
if len(parts[1]) == 0 {
parts[1] = []byte{13, 10}
}
return parts[0], parts[1], nil
}

545
vendor/github.com/toorop/go-dkim/dkimHeader.go generated vendored Normal file
View File

@ -0,0 +1,545 @@
package dkim
import (
"bytes"
"fmt"
"net/mail"
"net/textproto"
"strconv"
"strings"
"time"
)
type DKIMHeader struct {
// Version This tag defines the version of DKIM
// specification that applies to the signature record.
// tag v
Version string
// The algorithm used to generate the signature..
// Verifiers MUST support "rsa-sha1" and "rsa-sha256";
// Signers SHOULD sign using "rsa-sha256".
// tag a
Algorithm string
// The signature data (base64).
// Whitespace is ignored in this value and MUST be
// ignored when reassembling the original signature.
// In particular, the signing process can safely insert
// FWS in this value in arbitrary places to conform to line-length
// limits.
// tag b
SignatureData string
// The hash of the canonicalized body part of the message as
// limited by the "l=" tag (base64; REQUIRED).
// Whitespace is ignored in this value and MUST be ignored when reassembling the original
// signature. In particular, the signing process can safely insert
// FWS in this value in arbitrary places to conform to line-length
// limits.
// tag bh
BodyHash string
// Message canonicalization (plain-text; OPTIONAL, default is
//"simple/simple"). This tag informs the Verifier of the type of
// canonicalization used to prepare the message for signing. It
// consists of two names separated by a "slash" (%d47) character,
// corresponding to the header and body canonicalization algorithms,
// respectively. These algorithms are described in Section 3.4. If
// only one algorithm is named, that algorithm is used for the header
// and "simple" is used for the body. For example, "c=relaxed" is
// treated the same as "c=relaxed/simple".
// tag c
MessageCanonicalization string
// The SDID claiming responsibility for an introduction of a message
// into the mail stream (plain-text; REQUIRED). Hence, the SDID
// value is used to form the query for the public key. The SDID MUST
// correspond to a valid DNS name under which the DKIM key record is
// published. The conventions and semantics used by a Signer to
// create and use a specific SDID are outside the scope of this
// specification, as is any use of those conventions and semantics.
// When presented with a signature that does not meet these
// requirements, Verifiers MUST consider the signature invalid.
// Internationalized domain names MUST be encoded as A-labels, as
// described in Section 2.3 of [RFC5890].
// tag d
Domain string
// Signed header fields (plain-text, but see description; REQUIRED).
// A colon-separated list of header field names that identify the
// header fields presented to the signing algorithm. The field MUST
// contain the complete list of header fields in the order presented
// to the signing algorithm. The field MAY contain names of header
// fields that do not exist when signed; nonexistent header fields do
// not contribute to the signature computation (that is, they are
// treated as the null input, including the header field name, the
// separating colon, the header field value, and any CRLF
// terminator). The field MAY contain multiple instances of a header
// field name, meaning multiple occurrences of the corresponding
// header field are included in the header hash. The field MUST NOT
// include the DKIM-Signature header field that is being created or
// verified but may include others. Folding whitespace (FWS) MAY be
// included on either side of the colon separator. Header field
// names MUST be compared against actual header field names in a
// case-insensitive manner. This list MUST NOT be empty. See
// Section 5.4 for a discussion of choosing header fields to sign and
// Section 5.4.2 for requirements when signing multiple instances of
// a single field.
// tag h
Headers []string
// The Agent or User Identifier (AUID) on behalf of which the SDID is
// taking responsibility (dkim-quoted-printable; OPTIONAL, default is
// an empty local-part followed by an "@" followed by the domain from
// the "d=" tag).
// The syntax is a standard email address where the local-part MAY be
// omitted. The domain part of the address MUST be the same as, or a
// subdomain of, the value of the "d=" tag.
// Internationalized domain names MUST be encoded as A-labels, as
// described in Section 2.3 of [RFC5890].
// tag i
Auid string
// Body length count (plain-text unsigned decimal integer; OPTIONAL,
// default is entire body). This tag informs the Verifier of the
// number of octets in the body of the email after canonicalization
// included in the cryptographic hash, starting from 0 immediately
// following the CRLF preceding the body. This value MUST NOT be
// larger than the actual number of octets in the canonicalized
// message body. See further discussion in Section 8.2.
// tag l
BodyLength uint
// A colon-separated list of query methods used to retrieve the
// public key (plain-text; OPTIONAL, default is "dns/txt"). Each
// query method is of the form "type[/options]", where the syntax and
// semantics of the options depend on the type and specified options.
// If there are multiple query mechanisms listed, the choice of query
// mechanism MUST NOT change the interpretation of the signature.
// Implementations MUST use the recognized query mechanisms in the
// order presented. Unrecognized query mechanisms MUST be ignored.
// Currently, the only valid value is "dns/txt", which defines the
// DNS TXT resource record (RR) lookup algorithm described elsewhere
// in this document. The only option defined for the "dns" query
// type is "txt", which MUST be included. Verifiers and Signers MUST
// support "dns/txt".
// tag q
QueryMethods []string
// The selector subdividing the namespace for the "d=" (domain) tag
// (plain-text; REQUIRED).
// Internationalized selector names MUST be encoded as A-labels, as
// described in Section 2.3 of [RFC5890].
// tag s
Selector string
// Signature Timestamp (plain-text unsigned decimal integer;
// RECOMMENDED, default is an unknown creation time). The time that
// this signature was created. The format is the number of seconds
// since 00:00:00 on January 1, 1970 in the UTC time zone. The value
// is expressed as an unsigned integer in decimal ASCII. This value
// is not constrained to fit into a 31- or 32-bit integer.
// Implementations SHOULD be prepared to handle values up to at least
// 10^12 (until approximately AD 200,000; this fits into 40 bits).
// To avoid denial-of-service attacks, implementations MAY consider
// any value longer than 12 digits to be infinite. Leap seconds are
// not counted. Implementations MAY ignore signatures that have a
// timestamp in the future.
// tag t
SignatureTimestamp time.Time
// Signature Expiration (plain-text unsigned decimal integer;
// RECOMMENDED, default is no expiration). The format is the same as
// in the "t=" tag, represented as an absolute date, not as a time
// delta from the signing timestamp. The value is expressed as an
// unsigned integer in decimal ASCII, with the same constraints on
// the value in the "t=" tag. Signatures MAY be considered invalid
// if the verification time at the Verifier is past the expiration
// date. The verification time should be the time that the message
// was first received at the administrative domain of the Verifier if
// that time is reliably available; otherwise, the current time
// should be used. The value of the "x=" tag MUST be greater than
// the value of the "t=" tag if both are present.
//tag x
SignatureExpiration time.Time
// Copied header fields (dkim-quoted-printable, but see description;
// OPTIONAL, default is null). A vertical-bar-separated list of
// selected header fields present when the message was signed,
// including both the field name and value. It is not required to
// include all header fields present at the time of signing. This
// field need not contain the same header fields listed in the "h="
// tag. The header field text itself must encode the vertical bar
// ("|", %x7C) character (i.e., vertical bars in the "z=" text are
// meta-characters, and any actual vertical bar characters in a
// copied header field must be encoded). Note that all whitespace
// must be encoded, including whitespace between the colon and the
// header field value. After encoding, FWS MAY be added at arbitrary
// locations in order to avoid excessively long lines; such
// whitespace is NOT part of the value of the header field and MUST
// be removed before decoding.
// The header fields referenced by the "h=" tag refer to the fields
// in the [RFC5322] header of the message, not to any copied fields
// in the "z=" tag. Copied header field values are for diagnostic
// use.
// tag z
CopiedHeaderFields []string
// HeaderMailFromDomain store the raw email address of the header Mail From
// used for verifying in case of multiple DKIM header (we will prioritise
// header with d = mail from domain)
//HeaderMailFromDomain string
// RawForsign represents the raw part (without canonicalization) of the header
// used for computint sig in verify process
rawForSign string
}
// NewDkimHeaderBySigOptions return a new DkimHeader initioalized with sigOptions value
func newDkimHeaderBySigOptions(options SigOptions) *DKIMHeader {
h := new(DKIMHeader)
h.Version = "1"
h.Algorithm = options.Algo
h.MessageCanonicalization = options.Canonicalization
h.Domain = options.Domain
h.Headers = options.Headers
h.Auid = options.Auid
h.BodyLength = options.BodyLength
h.QueryMethods = options.QueryMethods
h.Selector = options.Selector
if options.AddSignatureTimestamp {
h.SignatureTimestamp = time.Now()
}
if options.SignatureExpireIn > 0 {
h.SignatureExpiration = time.Now().Add(time.Duration(options.SignatureExpireIn) * time.Second)
}
h.CopiedHeaderFields = options.CopiedHeaderFields
return h
}
// GetHeader return a new DKIMHeader by parsing an email
// Note: according to RFC 6376 an email can have multiple DKIM Header
// in this case we return the last inserted or the last with d== mail from
func GetHeader(email *[]byte) (*DKIMHeader, error) {
m, err := mail.ReadMessage(bytes.NewReader(*email))
if err != nil {
return nil, err
}
// DKIM header ?
if len(m.Header[textproto.CanonicalMIMEHeaderKey("DKIM-Signature")]) == 0 {
return nil, ErrDkimHeaderNotFound
}
// Get mail from domain
mailFromDomain := ""
mailfrom, err := mail.ParseAddress(m.Header.Get(textproto.CanonicalMIMEHeaderKey("From")))
if err != nil {
if err.Error() != "mail: no address" {
return nil, err
}
} else {
t := strings.SplitAfter(mailfrom.Address, "@")
if len(t) > 1 {
mailFromDomain = strings.ToLower(t[1])
}
}
// get raw dkim header
// we can't use m.header because header key will be converted with textproto.CanonicalMIMEHeaderKey
// ie if key in header is not DKIM-Signature but Dkim-Signature or DKIM-signature ot... other
// combination of case, verify will fail.
rawHeaders, _, err := getHeadersBody(email)
if err != nil {
return nil, ErrBadMailFormat
}
rawHeadersList, err := getHeadersList(&rawHeaders)
if err != nil {
return nil, err
}
dkHeaders := []string{}
for h := rawHeadersList.Front(); h != nil; h = h.Next() {
if strings.HasPrefix(strings.ToLower(h.Value.(string)), "dkim-signature") {
dkHeaders = append(dkHeaders, h.Value.(string))
}
}
var keep *DKIMHeader
var keepErr error
//for _, dk := range m.Header[textproto.CanonicalMIMEHeaderKey("DKIM-Signature")] {
for _, h := range dkHeaders {
parsed, err := parseDkHeader(h)
// if malformed dkim header try next
if err != nil {
keepErr = err
continue
}
// Keep first dkim headers
if keep == nil {
keep = parsed
}
// if d flag == domain keep this header and return
if mailFromDomain == parsed.Domain {
return parsed, nil
}
}
if keep == nil {
return nil, keepErr
}
return keep, nil
}
// parseDkHeader parse raw dkim header
func parseDkHeader(header string) (dkh *DKIMHeader, err error) {
dkh = new(DKIMHeader)
keyVal := strings.SplitN(header, ":", 2)
t := strings.LastIndex(header, "b=")
if t == -1 {
return nil, ErrDkimHeaderBTagNotFound
}
dkh.rawForSign = header[0 : t+2]
p := strings.IndexByte(header[t:], ';')
if p != -1 {
dkh.rawForSign = dkh.rawForSign + header[t+p:]
}
// Mandatory
mandatoryFlags := make(map[string]bool, 7) //(b'v', b'a', b'b', b'bh', b'd', b'h', b's')
mandatoryFlags["v"] = false
mandatoryFlags["a"] = false
mandatoryFlags["b"] = false
mandatoryFlags["bh"] = false
mandatoryFlags["d"] = false
mandatoryFlags["h"] = false
mandatoryFlags["s"] = false
// default values
dkh.MessageCanonicalization = "simple/simple"
dkh.QueryMethods = []string{"dns/txt"}
// unfold && clean
val := removeFWS(keyVal[1])
val = strings.Replace(val, " ", "", -1)
fs := strings.Split(val, ";")
for _, f := range fs {
if f == "" {
continue
}
flagData := strings.SplitN(f, "=", 2)
// https://github.com/toorop/go-dkim/issues/2
// if flag is not in the form key=value (eg doesn't have "=")
if len(flagData) != 2 {
return nil, ErrDkimHeaderBadFormat
}
flag := strings.ToLower(strings.TrimSpace(flagData[0]))
data := strings.TrimSpace(flagData[1])
switch flag {
case "v":
if data != "1" {
return nil, ErrDkimVersionNotsupported
}
dkh.Version = data
mandatoryFlags["v"] = true
case "a":
dkh.Algorithm = strings.ToLower(data)
if dkh.Algorithm != "rsa-sha1" && dkh.Algorithm != "rsa-sha256" {
return nil, ErrSignBadAlgo
}
mandatoryFlags["a"] = true
case "b":
//dkh.SignatureData = removeFWS(data)
// remove all space
dkh.SignatureData = strings.Replace(removeFWS(data), " ", "", -1)
if len(dkh.SignatureData) != 0 {
mandatoryFlags["b"] = true
}
case "bh":
dkh.BodyHash = removeFWS(data)
if len(dkh.BodyHash) != 0 {
mandatoryFlags["bh"] = true
}
case "d":
dkh.Domain = strings.ToLower(data)
if len(dkh.Domain) != 0 {
mandatoryFlags["d"] = true
}
case "h":
data = strings.ToLower(data)
dkh.Headers = strings.Split(data, ":")
if len(dkh.Headers) != 0 {
mandatoryFlags["h"] = true
}
fromFound := false
for _, h := range dkh.Headers {
if h == "from" {
fromFound = true
}
}
if !fromFound {
return nil, ErrDkimHeaderNoFromInHTag
}
case "s":
dkh.Selector = strings.ToLower(data)
if len(dkh.Selector) != 0 {
mandatoryFlags["s"] = true
}
case "c":
dkh.MessageCanonicalization, err = validateCanonicalization(strings.ToLower(data))
if err != nil {
return nil, err
}
case "i":
if data != "" {
if !strings.HasSuffix(data, dkh.Domain) {
return nil, ErrDkimHeaderDomainMismatch
}
dkh.Auid = data
}
case "l":
ui, err := strconv.ParseUint(data, 10, 32)
if err != nil {
return nil, err
}
dkh.BodyLength = uint(ui)
case "q":
dkh.QueryMethods = strings.Split(data, ":")
if len(dkh.QueryMethods) == 0 || strings.ToLower(dkh.QueryMethods[0]) != "dns/txt" {
return nil, errQueryMethodNotsupported
}
case "t":
ts, err := strconv.ParseInt(data, 10, 64)
if err != nil {
return nil, err
}
dkh.SignatureTimestamp = time.Unix(ts, 0)
case "x":
ts, err := strconv.ParseInt(data, 10, 64)
if err != nil {
return nil, err
}
dkh.SignatureExpiration = time.Unix(ts, 0)
case "z":
dkh.CopiedHeaderFields = strings.Split(data, "|")
}
}
// All mandatory flags are in ?
for _, p := range mandatoryFlags {
if !p {
return nil, ErrDkimHeaderMissingRequiredTag
}
}
// default for i/Auid
if dkh.Auid == "" {
dkh.Auid = "@" + dkh.Domain
}
// defaut for query method
if len(dkh.QueryMethods) == 0 {
dkh.QueryMethods = []string{"dns/text"}
}
return dkh, nil
}
// GetHeaderBase return base header for signers
// Todo: some refactoring needed...
func (d *DKIMHeader) getHeaderBaseForSigning(bodyHash string) string {
h := "DKIM-Signature: v=" + d.Version + "; a=" + d.Algorithm + "; q=" + strings.Join(d.QueryMethods, ":") + "; c=" + d.MessageCanonicalization + ";" + CRLF + TAB
subh := "s=" + d.Selector + ";"
if len(subh)+len(d.Domain)+4 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += " d=" + d.Domain + ";"
// Auid
if len(d.Auid) != 0 {
if len(subh)+len(d.Auid)+4 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += " i=" + d.Auid + ";"
}
/*h := "DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=tmail.io; i=@tmail.io;" + FWS
subh := "q=dns/txt; s=test;"*/
// signature timestamp
if !d.SignatureTimestamp.IsZero() {
ts := d.SignatureTimestamp.Unix()
if len(subh)+14 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += " t=" + fmt.Sprintf("%d", ts) + ";"
}
if len(subh)+len(d.Domain)+4 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
// Expiration
if !d.SignatureExpiration.IsZero() {
ts := d.SignatureExpiration.Unix()
if len(subh)+14 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += " x=" + fmt.Sprintf("%d", ts) + ";"
}
// body length
if d.BodyLength != 0 {
bodyLengthStr := fmt.Sprintf("%d", d.BodyLength)
if len(subh)+len(bodyLengthStr)+4 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += " l=" + bodyLengthStr + ";"
}
// Headers
if len(subh)+len(d.Headers)+4 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += " h="
for _, header := range d.Headers {
if len(subh)+len(header)+1 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += header + ":"
}
subh = subh[:len(subh)-1] + ";"
// BodyHash
if len(subh)+5+len(bodyHash) > MaxHeaderLineLength {
h += subh + FWS
subh = ""
} else {
subh += " "
}
subh += "bh="
l := len(subh)
for _, c := range bodyHash {
subh += string(c)
l++
if l >= MaxHeaderLineLength {
h += subh + FWS
subh = ""
l = 0
}
}
h += subh + ";" + FWS + "b="
return h
}

94
vendor/github.com/toorop/go-dkim/errors.go generated vendored Normal file
View File

@ -0,0 +1,94 @@
package dkim
import (
"errors"
)
var (
// ErrSignPrivateKeyRequired when there not private key in config
ErrSignPrivateKeyRequired = errors.New("PrivateKey is required")
// ErrSignDomainRequired when there is no domain defined in config
ErrSignDomainRequired = errors.New("Domain is required")
// ErrSignSelectorRequired when there is no Selcteir defined in config
ErrSignSelectorRequired = errors.New("Selector is required")
// ErrSignHeaderShouldContainsFrom If Headers is specified it should at least contain 'from'
ErrSignHeaderShouldContainsFrom = errors.New("header must contains 'from' field")
// ErrSignBadCanonicalization If bad Canonicalization parameter
ErrSignBadCanonicalization = errors.New("bad Canonicalization parameter")
// ErrCandNotParsePrivateKey when unable to parse private key
ErrCandNotParsePrivateKey = errors.New("can not parse private key, check format (pem) and validity")
// ErrSignBadAlgo Bad algorithm
ErrSignBadAlgo = errors.New("bad algorithm. Only rsa-sha1 or rsa-sha256 are permitted")
// ErrBadMailFormat unable to parse mail
ErrBadMailFormat = errors.New("bad mail format")
// ErrBadMailFormatHeaders bad headers format (not DKIM Header)
ErrBadMailFormatHeaders = errors.New("bad mail format found in headers")
// ErrBadDKimTagLBodyTooShort bad l tag
ErrBadDKimTagLBodyTooShort = errors.New("bad tag l or bodyLength option. Body length < l value")
// ErrDkimHeaderBadFormat when errors found in DKIM header
ErrDkimHeaderBadFormat = errors.New("bad DKIM header format")
// ErrDkimHeaderNotFound when there's no DKIM-Signature header in an email we have to verify
ErrDkimHeaderNotFound = errors.New("no DKIM-Signature header field found ")
// ErrDkimHeaderBTagNotFound when there's no b tag
ErrDkimHeaderBTagNotFound = errors.New("no tag 'b' found in dkim header")
// ErrDkimHeaderNoFromInHTag when from is missing in h tag
ErrDkimHeaderNoFromInHTag = errors.New("'from' header is missing in h tag")
// ErrDkimHeaderMissingRequiredTag when a required tag is missing
ErrDkimHeaderMissingRequiredTag = errors.New("signature missing required tag")
// ErrDkimHeaderDomainMismatch if i tag is not a sub domain of d tag
ErrDkimHeaderDomainMismatch = errors.New("domain mismatch")
// ErrDkimVersionNotsupported version not supported
ErrDkimVersionNotsupported = errors.New("incompatible version")
// Query method unsupported
errQueryMethodNotsupported = errors.New("query method not supported")
// ErrVerifyBodyHash when body hash doesn't verify
ErrVerifyBodyHash = errors.New("body hash did not verify")
// ErrVerifyNoKeyForSignature no key
ErrVerifyNoKeyForSignature = errors.New("no key for verify")
// ErrVerifyKeyUnavailable when service (dns) is anavailable
ErrVerifyKeyUnavailable = errors.New("key unavailable")
// ErrVerifyTagVMustBeTheFirst if present the v tag must be the firts in the record
ErrVerifyTagVMustBeTheFirst = errors.New("pub key syntax error: v tag must be the first")
// ErrVerifyVersionMusBeDkim1 if présent flag v (version) must be DKIM1
ErrVerifyVersionMusBeDkim1 = errors.New("flag v must be set to DKIM1")
// ErrVerifyBadKeyType bad type for pub key (only rsa is accepted)
ErrVerifyBadKeyType = errors.New("bad type for key type")
// ErrVerifyRevokedKey key(s) for this selector is revoked (p is empty)
ErrVerifyRevokedKey = errors.New("revoked key")
// ErrVerifyBadKey when we can't parse pubkey
ErrVerifyBadKey = errors.New("unable to parse pub key")
// ErrVerifyNoKey when no key is found on DNS record
ErrVerifyNoKey = errors.New("no public key found in DNS TXT")
// ErrVerifySignatureHasExpired when signature has expired
ErrVerifySignatureHasExpired = errors.New("signature has expired")
// ErrVerifyInappropriateHashAlgo when h tag in pub key doesn't contain hash algo from a tag of DKIM header
ErrVerifyInappropriateHashAlgo = errors.New("inappropriate has algorithm")
)

181
vendor/github.com/toorop/go-dkim/pubKeyRep.go generated vendored Normal file
View File

@ -0,0 +1,181 @@
package dkim
import (
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"io/ioutil"
"mime/quotedprintable"
"net"
"strings"
)
// PubKeyRep represents a parsed version of public key record
type PubKeyRep struct {
Version string
HashAlgo []string
KeyType string
Note string
PubKey rsa.PublicKey
ServiceType []string
FlagTesting bool // flag y
FlagIMustBeD bool // flag i
}
// DNSOptions holds settings for looking up DNS records
type DNSOptions struct {
netLookupTXT func(name string) ([]string, error)
}
// DNSOpt represents an optional setting for looking up DNS records
type DNSOpt interface {
apply(*DNSOptions)
}
type dnsOpt func(*DNSOptions)
func (opt dnsOpt) apply(dnsOpts *DNSOptions) {
opt(dnsOpts)
}
// DNSOptLookupTXT sets the function to use to lookup TXT records.
//
// This should probably only be used in tests.
func DNSOptLookupTXT(netLookupTXT func(name string) ([]string, error)) DNSOpt {
return dnsOpt(func(opts *DNSOptions) {
opts.netLookupTXT = netLookupTXT
})
}
// NewPubKeyRespFromDNS retrieves the TXT record from DNS based on the specified domain and selector
// and parses it.
func NewPubKeyRespFromDNS(selector, domain string, opts ...DNSOpt) (*PubKeyRep, verifyOutput, error) {
dnsOpts := DNSOptions{}
for _, opt := range opts {
opt.apply(&dnsOpts)
}
if dnsOpts.netLookupTXT == nil {
dnsOpts.netLookupTXT = net.LookupTXT
}
txt, err := dnsOpts.netLookupTXT(selector + "._domainkey." + domain)
if err != nil {
if strings.HasSuffix(err.Error(), "no such host") {
return nil, PERMFAIL, ErrVerifyNoKeyForSignature
}
return nil, TEMPFAIL, ErrVerifyKeyUnavailable
}
// empty record
if len(txt) == 0 {
return nil, PERMFAIL, ErrVerifyNoKeyForSignature
}
// parsing, we keep the first record
// TODO: if there is multiple record
return NewPubKeyResp(txt[0])
}
// NewPubKeyResp parses DKIM record (usually from DNS)
func NewPubKeyResp(dkimRecord string) (*PubKeyRep, verifyOutput, error) {
pkr := new(PubKeyRep)
pkr.Version = "DKIM1"
pkr.HashAlgo = []string{"sha1", "sha256"}
pkr.KeyType = "rsa"
pkr.FlagTesting = false
pkr.FlagIMustBeD = false
p := strings.Split(dkimRecord, ";")
for i, data := range p {
keyVal := strings.SplitN(data, "=", 2)
val := ""
if len(keyVal) > 1 {
val = strings.TrimSpace(keyVal[1])
}
switch strings.ToLower(strings.TrimSpace(keyVal[0])) {
case "v":
// RFC: is this tag is specified it MUST be the first in the record
if i != 0 {
return nil, PERMFAIL, ErrVerifyTagVMustBeTheFirst
}
pkr.Version = val
if pkr.Version != "DKIM1" {
return nil, PERMFAIL, ErrVerifyVersionMusBeDkim1
}
case "h":
p := strings.Split(strings.ToLower(val), ":")
pkr.HashAlgo = []string{}
for _, h := range p {
h = strings.TrimSpace(h)
if h == "sha1" || h == "sha256" {
pkr.HashAlgo = append(pkr.HashAlgo, h)
}
}
// if empty switch back to default
if len(pkr.HashAlgo) == 0 {
pkr.HashAlgo = []string{"sha1", "sha256"}
}
case "k":
if strings.ToLower(val) != "rsa" {
return nil, PERMFAIL, ErrVerifyBadKeyType
}
case "n":
qp, err := ioutil.ReadAll(quotedprintable.NewReader(strings.NewReader(val)))
if err == nil {
val = string(qp)
}
pkr.Note = val
case "p":
rawkey := val
if rawkey == "" {
return nil, PERMFAIL, ErrVerifyRevokedKey
}
un64, err := base64.StdEncoding.DecodeString(rawkey)
if err != nil {
return nil, PERMFAIL, ErrVerifyBadKey
}
pk, err := x509.ParsePKIXPublicKey(un64)
if pk, ok := pk.(*rsa.PublicKey); ok {
pkr.PubKey = *pk
}
case "s":
t := strings.Split(strings.ToLower(val), ":")
for _, tt := range t {
tt = strings.TrimSpace(tt)
switch tt {
case "*":
pkr.ServiceType = append(pkr.ServiceType, "all")
case "email":
pkr.ServiceType = append(pkr.ServiceType, tt)
}
}
case "t":
flags := strings.Split(strings.ToLower(val), ":")
for _, flag := range flags {
flag = strings.TrimSpace(flag)
switch flag {
case "y":
pkr.FlagTesting = true
case "s":
pkr.FlagIMustBeD = true
}
}
}
}
// if no pubkey
if pkr.PubKey == (rsa.PublicKey{}) {
return nil, PERMFAIL, ErrVerifyNoKey
}
// No service type
if len(pkr.ServiceType) == 0 {
pkr.ServiceType = []string{"all"}
}
return pkr, SUCCESS, nil
}

4
vendor/github.com/toorop/go-dkim/watch generated vendored Normal file
View File

@ -0,0 +1,4 @@
while true
do
inotifywait -q -r -e modify,attrib,close_write,move,create,delete . && echo "--------------" && go test -v
done

4
vendor/golang.org/x/crypto/LICENSE generated vendored
View File

@ -1,4 +1,4 @@
Copyright 2009 The Go Authors. Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the in the documentation and/or other materials provided with the
distribution. distribution.
* Neither the name of Google LLC nor the names of its * Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from contributors may be used to endorse or promote products derived from
this software without specific prior written permission. this software without specific prior written permission.

View File

@ -4,7 +4,7 @@
// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing // Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf // algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
package bcrypt package bcrypt // import "golang.org/x/crypto/bcrypt"
// The code is a port of Provos and Mazières's C implementation. // The code is a port of Provos and Mazières's C implementation.
import ( import (

View File

@ -11,7 +11,7 @@
// Deprecated: any new system should use AES (from crypto/aes, if necessary in // Deprecated: any new system should use AES (from crypto/aes, if necessary in
// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from // an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
// golang.org/x/crypto/chacha20poly1305). // golang.org/x/crypto/chacha20poly1305).
package blowfish package blowfish // import "golang.org/x/crypto/blowfish"
// The code is a port of Bruce Schneier's C implementation. // The code is a port of Bruce Schneier's C implementation.
// See https://www.schneier.com/blowfish.html. // See https://www.schneier.com/blowfish.html.

View File

@ -1,69 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
//
// This package is a wrapper around the standard library crypto/ed25519 package.
package ed25519
import (
"crypto/ed25519"
"io"
)
const (
// PublicKeySize is the size, in bytes, of public keys as used in this package.
PublicKeySize = 32
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
SeedSize = 32
)
// PublicKey is the type of Ed25519 public keys.
//
// This type is an alias for crypto/ed25519's PublicKey type.
// See the crypto/ed25519 package for the methods on this type.
type PublicKey = ed25519.PublicKey
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
//
// This type is an alias for crypto/ed25519's PrivateKey type.
// See the crypto/ed25519 package for the methods on this type.
type PrivateKey = ed25519.PrivateKey
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
return ed25519.GenerateKey(rand)
}
// NewKeyFromSeed calculates a private key from a seed. It will panic if
// len(seed) is not SeedSize. This function is provided for interoperability
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
return ed25519.NewKeyFromSeed(seed)
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
return ed25519.Sign(privateKey, message)
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
return ed25519.Verify(publicKey, message, sig)
}

View File

@ -1,95 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation
// Function (HKDF) as defined in RFC 5869.
//
// HKDF is a cryptographic key derivation function (KDF) with the goal of
// expanding limited input keying material into one or more cryptographically
// strong secret keys.
package hkdf
import (
"crypto/hmac"
"errors"
"hash"
"io"
)
// Extract generates a pseudorandom key for use with Expand from an input secret
// and an optional independent salt.
//
// Only use this function if you need to reuse the extracted key with multiple
// Expand invocations and different context values. Most common scenarios,
// including the generation of multiple keys, should use New instead.
func Extract(hash func() hash.Hash, secret, salt []byte) []byte {
if salt == nil {
salt = make([]byte, hash().Size())
}
extractor := hmac.New(hash, salt)
extractor.Write(secret)
return extractor.Sum(nil)
}
type hkdf struct {
expander hash.Hash
size int
info []byte
counter byte
prev []byte
buf []byte
}
func (f *hkdf) Read(p []byte) (int, error) {
// Check whether enough data can be generated
need := len(p)
remains := len(f.buf) + int(255-f.counter+1)*f.size
if remains < need {
return 0, errors.New("hkdf: entropy limit reached")
}
// Read any leftover from the buffer
n := copy(p, f.buf)
p = p[n:]
// Fill the rest of the buffer
for len(p) > 0 {
if f.counter > 1 {
f.expander.Reset()
}
f.expander.Write(f.prev)
f.expander.Write(f.info)
f.expander.Write([]byte{f.counter})
f.prev = f.expander.Sum(f.prev[:0])
f.counter++
// Copy the new batch into p
f.buf = f.prev
n = copy(p, f.buf)
p = p[n:]
}
// Save leftovers for next run
f.buf = f.buf[n:]
return need, nil
}
// Expand returns a Reader, from which keys can be read, using the given
// pseudorandom key and optional context info, skipping the extraction step.
//
// The pseudorandomKey should have been generated by Extract, or be a uniformly
// random or pseudorandom cryptographically strong key. See RFC 5869, Section
// 3.3. Most common scenarios will want to use New instead.
func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader {
expander := hmac.New(hash, pseudorandomKey)
return &hkdf{expander, expander.Size(), info, 1, nil, nil}
}
// New returns a Reader, from which keys can be read, using the given hash,
// secret, salt and context info. Salt and info can be nil.
func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader {
prk := Extract(hash, secret, salt)
return Expand(hash, prk, info)
}

View File

@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
choose, you can pass the `New` functions from the different SHA packages to choose, you can pass the `New` functions from the different SHA packages to
pbkdf2.Key. pbkdf2.Key.
*/ */
package pbkdf2 package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
import ( import (
"crypto/hmac" "crypto/hmac"

Some files were not shown because too many files have changed in this diff Show More