3
0
mirror of https://github.com/ergochat/ergo.git synced 2025-04-20 23:07:54 +02:00

Compare commits

..

No commits in common. "master" and "v2.15.0-rc1" have entirely different histories.

212 changed files with 5822 additions and 4443 deletions

View File

@ -12,14 +12,14 @@ on:
jobs:
build:
runs-on: "ubuntu-24.04"
runs-on: "ubuntu-22.04"
steps:
- name: "checkout repository"
uses: "actions/checkout@v3"
- name: "setup go"
uses: "actions/setup-go@v3"
with:
go-version: "1.24"
go-version: "1.23"
- name: "install python3-pytest"
run: "sudo apt install -y python3-pytest"
- name: "make install"

View File

@ -71,7 +71,6 @@ archives:
- ergo.motd
- default.yaml
- traditional.yaml
- docs/API.md
- docs/MANUAL.md
- docs/USERGUIDE.md
- languages/*.yaml

View File

@ -1,9 +1,9 @@
# Changelog
All notable changes to Ergo will be documented in this file.
## [2.15.0] - 2025-01-26
## [2.15.0-rc1] - 2025-01-14
We're pleased to be publishing v2.15.0, a new stable release. This release adds support for mobile push notifications, via the [draft/webpush](https://github.com/ircv3/ircv3-specifications/pull/471) specification. More information on this is available in the [manual](https://github.com/ergochat/ergo/blob/ab2d842b270d9df217c779df9c7a5c594d85fdd5/docs/MANUAL.md#push-notifications) and [user guide](https://github.com/ergochat/ergo/blob/ab2d842b270d9df217c779df9c7a5c594d85fdd5/docs/USERGUIDE.md#push-notifications). This feature is still considered to be in an experimental state; `default.yaml` ships with it disabled, and its configuration may have backwards-incompatible changes in the future.
We're pleased to be publishing the release candidate for v2.15.0 (the official release should follow within two weeks or so). This release adds support for mobile push notifications, via the [draft/webpush](https://github.com/ircv3/ircv3-specifications/pull/471) specification. More information on this is available in the [manual](https://github.com/ergochat/ergo/blob/21ee867ebbe5fc5867665d0d487aa1fdebd29b34/docs/MANUAL.md#push-notifications) and [user guide](https://github.com/ergochat/ergo/blob/21ee867ebbe5fc5867665d0d487aa1fdebd29b34/docs/USERGUIDE.md#push-notifications). This feature is still considered to be in an experimental state; `default.yaml` ships with it disabled, and its configuration may have backwards-incompatible changes in the future.
This release includes changes to the config file format, all of which are fully backwards-compatible and do not require updating the file before upgrading.
@ -22,10 +22,10 @@ Many thanks to [@delthas](https://github.com/delthas), [@donatj](https://github.
* Ergo now publishes the `SAFELIST` ISUPPORT parameter (#2196, thanks [@delthas](https://github.com/delthas)!)
### Fixed
* Fixed incorrect parameters when pushing `005` (ISUPPORT) updates to clients on rehash (#2177, #2184)
* Fixed incorrect parameters when pushing `005` (ISUPPORT) updates to clients on rehash (#2184)
### Internal
* Official release builds use Go 1.23.5
* Official release builds use Go 1.23.4
* Added a unique identifier to identify connections in debug logs. This has no privacy implications in a standard, non-debug configuration of Ergo. (#2206, thanks donio!)
* Added support for Solaris on amd64 CPUs (#2183)

View File

@ -1,5 +1,5 @@
## build ergo binary
FROM docker.io/golang:1.24-alpine AS build-env
FROM docker.io/golang:1.23-alpine AS build-env
RUN apk upgrade -U --force-refresh --no-cache && apk add --no-cache --purge --clean-protected -l -u make git

View File

@ -100,7 +100,6 @@ server:
max-connections-per-duration: 64
# strict transport security, to get clients to automagically use TLS
# (irrelevant in the recommended configuration, with no public plaintext listener)
sts:
# whether to advertise STS
#
@ -376,17 +375,6 @@ server:
# if you don't want to publicize how popular the server is
suppress-lusers: false
# publish additional key-value pairs in ISUPPORT (the 005 numeric).
# keys that collide with a key published by Ergo will be silently ignored.
additional-isupport:
#"draft/FILEHOST": "https://example.com/filehost"
#"draft/bazbat": "" # empty string means no value
# optionally map command alias names to existing ergo commands. most deployments
# should ignore this.
#command-aliases:
#"UMGEBUNG": "AMBIANCE"
# account options
accounts:
# is account authentication enabled, i.e., can users log into existing accounts?
@ -953,12 +941,6 @@ roleplay:
# add the real nickname, in parentheses, to the end of every roleplay message?
add-suffix: true
# allow customizing the NUH's sent for NPC and SCENE commands
# NPC: the first %s is the NPC name, the second is the user's real nick
#npc-nick-mask: "*%s*!%s@npc.fakeuser.invalid"
# SCENE: the %s is the client's real nick
#scene-nick-mask: "=Scene=!%s@npc.fakeuser.invalid"
# external services can integrate with the ircd using JSON Web Tokens (https://jwt.io).
# in effect, the server can sign a token attesting that the client is present on
# the server, is a member of a particular channel, etc.
@ -1107,21 +1089,3 @@ webpush:
# by the client reconnecting to IRC. we also detect whether the client is no longer
# successfully receiving push messages.
expiration: 14d
# HTTP API. we strongly recommend leaving this disabled unless you have a specific
# need for it.
api:
# is the API enabled at all?
enabled: false
# listen address:
listener: "127.0.0.1:8089"
# serve over TLS (strongly recommended if the listener is public):
#tls:
#cert: fullchain.pem
#key: privkey.pem
# one or more static bearer tokens accepted for HTTP bearer authentication.
# these must be strong, unique, high-entropy printable ASCII strings.
# to generate a new token, use `ergo gentoken` or:
# python3 -c "import secrets; print(secrets.token_urlsafe(32))"
bearer-tokens:
- "example"

View File

@ -85,8 +85,8 @@ docker kill -s SIGHUP ergo
## Using custom TLS certificates
TLS certs will by default be read from /ircd/fullchain.pem, with a private key
in /ircd/privkey.pem. You can customise this path in the ircd.yaml file if
TLS certs will by default be read from /ircd/tls.crt, with a private key
in /ircd/tls.key. You can customise this path in the ircd.yaml file if
you wish to mount the certificates from another volume. For information
on using Let's Encrypt certificates, see
[this manual entry](https://github.com/ergochat/ergo/blob/master/docs/MANUAL.md#using-valid-tls-certificates).

View File

@ -1,88 +0,0 @@
__ __ ______ ___ ______ ___
__/ // /_/ ____/ __ \/ ____/ __ \
/_ // __/ __/ / /_/ / / __/ / / /
/_ // __/ /___/ _, _/ /_/ / /_/ /
/_//_/ /_____/_/ |_|\____/\____/
Ergo IRCd API Documentation
https://ergo.chat/
_Copyright © Daniel Oaks <daniel@danieloaks.net>, Shivaram Lingamneni <slingamn@cs.stanford.edu>_
--------------------------------------------------------------------------------------------
Ergo has an experimental HTTP API. Some general information about the API:
1. All requests to the API are via POST.
1. All requests to the API are authenticated via bearer authentication. This is a header named `Authorization` with the value `Bearer <token>`. A list of valid tokens is hardcoded in the Ergo config. Future versions of Ergo may allow additional validation schemes for tokens.
1. The request parameters are sent as JSON in the POST body.
1. Any status code other than 200 is an error response; the response body is undefined in this case (likely human-readable text for debugging).
1. A 200 status code indicates successful execution of the request. The response body will be JSON and may indicate application-level success or failure (typically via the `success` field, which takes a boolean value).
API endpoints are versioned (currently all endpoints have a `/v1/` path prefix). Backwards-incompatible updates will most likely take the form of endpoints with new names, or an increased version prefix. Any exceptions to this will be specifically documented in the changelog.
All API endpoints should be considered highly privileged. Bearer tokens should be kept secret. Access to the API should be either over a trusted link (like loopback) or secured via verified TLS. See the `api` section of `default.yaml` for examples of how to configure this.
Here's an example of how to test an API configured to run over loopback TCP in plaintext:
```bash
curl -d '{"accountName": "invalidaccountname", "passphrase": "invalidpassphrase"}' -H 'Authorization: Bearer EYBbXVilnumTtfn4A9HE8_TiKLGWEGylre7FG6gEww0' -v http://127.0.0.1:8089/v1/check_auth
```
This returns:
```json
{"success":false}
```
Endpoints
=========
`/v1/account_details`
----------------
This endpoint fetches account details and returns them as JSON. The request is a JSON object with fields:
* `accountName`: string, name of the account
The response is a JSON object with fields:
* `success`: whether the account exists or not
* `accountName`: canonical, case-unfolded version of the account name
* `email`: email address of the account provided
`/v1/check_auth`
----------------
This endpoint verifies the credentials of a NickServ account; this allows Ergo to be used as the source of truth for authentication by another system. The request is a JSON object with fields:
* `accountName`: string, name of the account
* `passphrase`: string, alleged passphrase of the account
The response is a JSON object with fields:
* `success`: whether the credentials provided were valid
* `accountName`: canonical, case-unfolded version of the account name
`/v1/rehash`
------------
This endpoint rehashes the server (i.e. reloads the configuration file, TLS certificates, and other associated data). The body is ignored. The response is a JSON object with fields:
* `success`: boolean, indicates whether the rehash was successful
* `error`: string, optional, human-readable description of the failure
`/v1/saregister`
----------------
This endpoint registers an account in NickServ, with the same semantics as `NS SAREGISTER`. The request is a JSON object with fields:
* `accountName`: string, name of the account
* `passphrase`: string, passphrase of the account
The response is a JSON object with fields:
* `success`: whether the account creation succeeded
* `errorCode`: string, optional, machine-readable description of the error. Possible values include: `ACCOUNT_EXISTS`, `INVALID_PASSPHRASE`, `UNKNOWN_ERROR`.
* `error`: string, optional, human-readable description of the failure.

View File

@ -63,7 +63,6 @@ _Copyright © Daniel Oaks <daniel@danieloaks.net>, Shivaram Lingamneni <slingamn
- [Tor](#tor)
- [I2P](#i2p)
- [ZNC](#znc)
- [API](#api)
- [External authentication systems](#external-authentication-systems)
- [DNSBLs and other IP checking systems](#dnsbls-and-other-ip-checking-systems)
- [Acknowledgements](#acknowledgements)
@ -493,7 +492,6 @@ Ergo now has experimental support for push notifications via the [draft/webpush]
* Push notifications result in the disclosure of metadata (that the user received a message, and the approximate time of the message) to third-party messaging infrastructure. In the typical case, this will include a push endpoint controlled by the application vendor, plus the push infrastructure controlled by Apple or Google.
* The message contents (including the sender's identity) are protected by [encryption](https://datatracker.ietf.org/doc/html/rfc8291) between the server and the user's endpoint device. However, the encryption algorithm is not forward-secret (a long-term private key is stored on the user's device) or post-quantum (the server retains a copy of the corresponding elliptic curve public key).
* Push notifications are relatively expensive to process, and may increase the impact of spam or denial-of-service attacks on the Ergo server.
* Push notifications negate the anonymization provided by Tor and I2P; an Ergo instance intended to run as a Tor onion service ("hidden service") or exclusively behind an I2P address must disable them in the Ergo configuration file.
Operators and end users are invited to share feedback about push notifications, either via the project issue tracker or the support channel. Note that in order to receive push notifications, the user must be logged in with always-on enabled, and must be using a client (e.g. Goguma) that supports them.
@ -1136,7 +1134,6 @@ Tor provides end-to-end encryption for onion services, so there's no need to ena
The second way is to run Ergo as a true hidden service, where the server's actual IP address is a secret. This requires hardening measures on the Ergo side:
* Ergo should not accept any connections on its public interfaces. You should remove any listener that starts with the address of a public interface, or with `:`, which means "listen on all available interfaces". You should listen only on `127.0.0.1:6667` and a Unix domain socket such as `/hidden_service_sockets/ergo_tor_sock`.
* Push notifications will reveal the server's true IP address, so they must be disabled; set `webpush.enabled` to `false`.
* In this mode, it is especially important that all operator passwords are strong and all operators are trusted (operators have a larger attack surface to deanonymize the server).
* Onion services are at risk of being deanonymized if a client can trick the server into performing a non-Tor network request. Ergo should not perform any such requests (such as hostname resolution or ident lookups) in response to input received over a correctly configured Tor listener. However, Ergo has not been thoroughly audited against such deanonymization attacks --- therefore, Ergo should be deployed with additional sandboxing to protect against this:
* Ergo should run with no direct network connectivity, e.g., by running in its own Linux network namespace. systemd implements this with the [PrivateNetwork](https://www.freedesktop.org/software/systemd/man/systemd.exec.html) configuration option: add `PrivateNetwork=true` to Ergo's systemd unit file.
@ -1176,10 +1173,6 @@ ZNC 1.6.x (still pretty common in distros that package old versions of IRC softw
Ergo can emulate certain capabilities of the ZNC bouncer for the benefit of clients, in particular the third-party [playback](https://wiki.znc.in/Playback) module. This enables clients with specific support for ZNC to receive selective history playback automatically. To configure this in [Textual](https://www.codeux.com/textual/), go to "Server properties", select "Vendor specific", uncheck "Do not automatically join channels on connect", and check "Only play back messages you missed". Other clients with support are listed on ZNC's wiki page.
## API
Ergo offers an HTTP API that can be used to control Ergo, or to allow other applications to use Ergo as a source of truth for authentication. The API is documented separately; see [API.md](https://github.com/ergochat/ergo/blob/stable/docs/API.md) on the website, or the `API.md` file that was bundled with your release.
## External authentication systems
Ergo can be configured to call arbitrary scripts to authenticate users; see the `auth-script` section of the config. The API for these scripts is as follows: Ergo will invoke the script with a configurable set of arguments, then send it the authentication data as JSON on the first line (`\n`-terminated) of stdin. The input is a JSON dictionary with the following keys:

View File

@ -21,7 +21,6 @@ import (
"github.com/ergochat/ergo/irc"
"github.com/ergochat/ergo/irc/logger"
"github.com/ergochat/ergo/irc/mkcerts"
"github.com/ergochat/ergo/irc/utils"
)
// set via linker flags, either by make or by goreleaser:
@ -100,7 +99,6 @@ Usage:
ergo genpasswd [--conf <filename>] [--quiet]
ergo mkcerts [--conf <filename>] [--quiet]
ergo defaultconfig
ergo gentoken
ergo run [--conf <filename>] [--quiet] [--smoke]
ergo -h | --help
ergo --version
@ -143,9 +141,6 @@ Options:
} else if arguments["defaultconfig"].(bool) {
fmt.Print(defaultConfig)
return
} else if arguments["gentoken"].(bool) {
fmt.Println(utils.GenerateSecretKey())
return
} else if arguments["mkcerts"].(bool) {
doMkcerts(arguments["--conf"].(string), arguments["--quiet"].(bool))
return
@ -193,7 +188,7 @@ Options:
// warning if running a non-final version
if strings.Contains(irc.Ver, "unreleased") {
logman.Warning("server", "You are currently running an unreleased beta version of Ergo that may be unstable and could corrupt your database.\nIf you are running a production network, please download the latest build from https://ergo.chat/about and run that instead.")
logman.Warning("server", "You are currently running an unreleased beta version of Ergo that may be unstable and could corrupt your database.\nIf you are running a production network, please download the latest build from https://ergo.chat/downloads.html and run that instead.")
}
server, err := irc.NewServer(config, logman)

19
go.mod
View File

@ -1,6 +1,6 @@
module github.com/ergochat/ergo
go 1.24
go 1.23
require (
code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48
@ -10,24 +10,25 @@ require (
github.com/ergochat/go-ident v0.0.0-20230911071154-8c30606d6881
github.com/ergochat/irc-go v0.5.0-rc2
github.com/go-sql-driver/mysql v1.7.0
github.com/go-test/deep v1.0.6 // indirect
github.com/gofrs/flock v0.8.1
github.com/gorilla/websocket v1.4.2
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
github.com/onsi/ginkgo v1.12.0 // indirect
github.com/onsi/gomega v1.9.0 // indirect
github.com/stretchr/testify v1.4.0 // indirect
github.com/tidwall/buntdb v1.3.2
github.com/tidwall/buntdb v1.3.1
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208
github.com/xdg-go/scram v1.0.2
golang.org/x/crypto v0.32.0
golang.org/x/term v0.28.0
golang.org/x/text v0.21.0
golang.org/x/crypto v0.25.0
golang.org/x/term v0.22.0
golang.org/x/text v0.16.0
gopkg.in/yaml.v2 v2.4.0
)
require (
github.com/emersion/go-msgauth v0.6.8
github.com/ergochat/webpush-go/v2 v2.0.0
github.com/golang-jwt/jwt/v5 v5.2.2
github.com/ergochat/webpush-go/v2 v2.0.0-rc1
github.com/golang-jwt/jwt/v5 v5.2.1
)
require (
@ -39,7 +40,7 @@ require (
github.com/tidwall/rtred v0.1.2 // indirect
github.com/tidwall/tinyqueue v0.1.1 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
golang.org/x/sys v0.29.0 // indirect
golang.org/x/sys v0.22.0 // indirect
)
replace github.com/gorilla/websocket => github.com/ergochat/websocket v1.4.2-oragono1

34
go.sum
View File

@ -6,8 +6,6 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/emersion/go-msgauth v0.6.8 h1:kW/0E9E8Zx5CdKsERC/WnAvnXvX7q9wTHia1OA4944A=
github.com/emersion/go-msgauth v0.6.8/go.mod h1:YDwuyTCUHu9xxmAeVj0eW4INnwB6NNZoPdLerpSxRrc=
github.com/ergochat/confusables v0.0.0-20201108231250-4ab98ab61fb1 h1:WLHTOodthVyv5NvYLIvWl112kSFv5IInKKrRN2qpons=
github.com/ergochat/confusables v0.0.0-20201108231250-4ab98ab61fb1/go.mod h1:mov+uh1DPWsltdQnOdzn08UO9GsJ3MEvhtu0Ci37fdk=
github.com/ergochat/go-ident v0.0.0-20230911071154-8c30606d6881 h1:+J5m88nvybxB5AnBVGzTXM/yHVytt48rXBGcJGzSbms=
@ -16,17 +14,19 @@ github.com/ergochat/irc-go v0.5.0-rc2 h1:VuSQJF5K4hWvYSzGa4b8vgL6kzw8HF6LSOejE+R
github.com/ergochat/irc-go v0.5.0-rc2/go.mod h1:2vi7KNpIPWnReB5hmLpl92eMywQvuIeIIGdt/FQCph0=
github.com/ergochat/scram v1.0.2-ergo1 h1:2bYXiRFQH636pT0msOG39fmEYl4Eq+OuutcyDsCix/g=
github.com/ergochat/scram v1.0.2-ergo1/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
github.com/ergochat/webpush-go/v2 v2.0.0 h1:n6eoJk8RpzJFeBJ6gxvqo/dngnVEmJbzJwzKtCZbByo=
github.com/ergochat/webpush-go/v2 v2.0.0/go.mod h1:OQlhnq8JeHDzRzAy6bdDObr19uqbHliOV+z7mHbYr4c=
github.com/ergochat/webpush-go/v2 v2.0.0-rc1 h1:CzSebM2OFM1zkAviYtkrBj5xtQc7Ka+Po607xbmZ+40=
github.com/ergochat/webpush-go/v2 v2.0.0-rc1/go.mod h1:OQlhnq8JeHDzRzAy6bdDObr19uqbHliOV+z7mHbYr4c=
github.com/ergochat/websocket v1.4.2-oragono1 h1:plMUunFBM6UoSCIYCKKclTdy/TkkHfUslhOfJQzfueM=
github.com/ergochat/websocket v1.4.2-oragono1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-test/deep v1.0.6 h1:UHSEyLZUwX9Qoi99vVwvewiMC8mM2bf7XEM2nqvzEn8=
github.com/go-test/deep v1.0.6/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@ -47,8 +47,8 @@ github.com/tidwall/assert v0.1.0 h1:aWcKyRBUAdLoVebxo95N7+YZVTFF/ASTr7BN4sLP6XI=
github.com/tidwall/assert v0.1.0/go.mod h1:QLYtGyeqse53vuELQheYl9dngGCJQ+mTtlxcktb+Kj8=
github.com/tidwall/btree v1.4.2 h1:PpkaieETJMUxYNADsjgtNRcERX7mGc/GP2zp/r5FM3g=
github.com/tidwall/btree v1.4.2/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE=
github.com/tidwall/buntdb v1.3.2 h1:qd+IpdEGs0pZci37G4jF51+fSKlkuUTMXuHhXL1AkKg=
github.com/tidwall/buntdb v1.3.2/go.mod h1:lZZrZUWzlyDJKlLQ6DKAy53LnG7m5kHyrEHvvcDmBpU=
github.com/tidwall/buntdb v1.3.1 h1:HKoDF01/aBhl9RjYtbaLnvX9/OuenwvQiC3OP1CcL4o=
github.com/tidwall/buntdb v1.3.1/go.mod h1:lZZrZUWzlyDJKlLQ6DKAy53LnG7m5kHyrEHvvcDmBpU=
github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
@ -64,26 +64,28 @@ github.com/tidwall/rtred v0.1.2 h1:exmoQtOLvDoO8ud++6LwVsAMTu0KPzLTUrMln8u1yu8=
github.com/tidwall/rtred v0.1.2/go.mod h1:hd69WNXQ5RP9vHd7dqekAz+RIdtfBogmglkZSRxCHFQ=
github.com/tidwall/tinyqueue v0.1.1 h1:SpNEvEggbpyN5DIReaJ2/1ndroY8iyEGxPYxoSaymYE=
github.com/tidwall/tinyqueue v0.1.1/go.mod h1:O/QNHwrnjqr6IHItYrzoHAKYhBkLI67Q096fQP5zMYw=
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 h1:PM5hJF7HVfNWmCjMdEfbuOBNXSVF2cMFGgQTPdKCbwM=
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208/go.mod h1:BzWtXXrXzZUvMacR0oF/fbDDgUPO8L36tDMmRAf14ns=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc=
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -1157,7 +1157,7 @@ func (am *AccountManager) NsSendpass(client *Client, accountName string) (err er
message := email.ComposeMail(config.Accounts.Registration.EmailVerification, account.Settings.Email, subject)
fmt.Fprintf(&message, client.t("We received a request to reset your password on %[1]s for account: %[2]s"), am.server.name, account.Name)
message.WriteString("\r\n")
message.WriteString(client.t("If you did not initiate this request, you can safely ignore this message."))
fmt.Fprintf(&message, client.t("If you did not initiate this request, you can safely ignore this message."))
message.WriteString("\r\n")
message.WriteString("\r\n")
message.WriteString(client.t("Otherwise, to reset your password, issue the following command (replace `new_password` with your desired password):"))

View File

@ -1,224 +0,0 @@
package irc
import (
"crypto/subtle"
"encoding/json"
"fmt"
"net/http"
"strings"
)
func newAPIHandler(server *Server) http.Handler {
api := &ergoAPI{
server: server,
mux: http.NewServeMux(),
}
api.mux.HandleFunc("POST /v1/rehash", api.handleRehash)
api.mux.HandleFunc("POST /v1/check_auth", api.handleCheckAuth)
api.mux.HandleFunc("POST /v1/saregister", api.handleSaregister)
api.mux.HandleFunc("POST /v1/account_details", api.handleAccountDetails)
return api
}
type ergoAPI struct {
server *Server
mux *http.ServeMux
}
func (a *ergoAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer a.server.HandlePanic(nil)
defer a.server.logger.Debug("api", r.URL.Path)
if a.checkBearerAuth(r.Header.Get("Authorization")) {
a.mux.ServeHTTP(w, r)
} else {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
}
}
func (a *ergoAPI) checkBearerAuth(authHeader string) (authorized bool) {
if authHeader == "" {
return false
}
c := a.server.Config()
if !c.API.Enabled {
return false
}
spaceIdx := strings.IndexByte(authHeader, ' ')
if spaceIdx < 0 {
return false
}
if !strings.EqualFold("Bearer", authHeader[:spaceIdx]) {
return false
}
providedTokenBytes := []byte(authHeader[spaceIdx+1:])
for _, tokenBytes := range c.API.bearerTokenBytes {
if subtle.ConstantTimeCompare(tokenBytes, providedTokenBytes) == 1 {
return true
}
}
return false
}
func (a *ergoAPI) decodeJSONRequest(request any, w http.ResponseWriter, r *http.Request) (err error) {
err = json.NewDecoder(r.Body).Decode(request)
if err != nil {
http.Error(w, fmt.Sprintf("failed to deserialize json request: %v", err), http.StatusBadRequest)
}
return err
}
func (a *ergoAPI) writeJSONResponse(response any, w http.ResponseWriter, r *http.Request) {
j, err := json.Marshal(response)
if err == nil {
j = append(j, '\n') // less annoying in curl output
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(j)
} else {
a.server.logger.Error("internal", "failed to serialize API response", r.URL.Path, err.Error())
http.Error(w, fmt.Sprintf("failed to serialize json response: %v", err), http.StatusInternalServerError)
}
}
type apiGenericResponse struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
ErrorCode string `json:"errorCode,omitempty"`
}
func (a *ergoAPI) handleRehash(w http.ResponseWriter, r *http.Request) {
var response apiGenericResponse
err := a.server.rehash()
if err == nil {
response.Success = true
} else {
response.Success = false
response.Error = err.Error()
}
a.writeJSONResponse(response, w, r)
}
type apiCheckAuthResponse struct {
apiGenericResponse
AccountName string `json:"accountName,omitempty"`
}
func (a *ergoAPI) handleCheckAuth(w http.ResponseWriter, r *http.Request) {
var request AuthScriptInput
if err := a.decodeJSONRequest(&request, w, r); err != nil {
return
}
var response apiCheckAuthResponse
// try passphrase if present
if request.AccountName != "" && request.Passphrase != "" {
// TODO this only checks the internal database, not auth-script;
// it's a little weird to use both auth-script and the API but we should probably handle it
account, err := a.server.accounts.checkPassphrase(request.AccountName, request.Passphrase)
switch err {
case nil:
// success, no error
response.Success = true
response.AccountName = account.Name
case errAccountDoesNotExist, errAccountInvalidCredentials, errAccountUnverified, errAccountSuspended:
// fail, no error
response.Success = false
default:
response.Success = false
response.Error = err.Error()
}
}
// try certfp if present
if !response.Success && request.Certfp != "" {
// TODO support cerftp
}
a.writeJSONResponse(response, w, r)
}
type apiSaregisterRequest struct {
AccountName string `json:"accountName"`
Passphrase string `json:"passphrase"`
}
func (a *ergoAPI) handleSaregister(w http.ResponseWriter, r *http.Request) {
var request apiSaregisterRequest
if err := a.decodeJSONRequest(&request, w, r); err != nil {
return
}
var response apiGenericResponse
err := a.server.accounts.SARegister(request.AccountName, request.Passphrase)
if err == nil {
response.Success = true
} else {
response.Success = false
response.Error = err.Error()
switch err {
case errAccountAlreadyRegistered, errAccountAlreadyVerified, errNameReserved:
response.ErrorCode = "ACCOUNT_EXISTS"
case errAccountBadPassphrase:
response.ErrorCode = "INVALID_PASSPHRASE"
default:
response.ErrorCode = "UNKNOWN_ERROR"
}
}
a.writeJSONResponse(response, w, r)
}
type apiAccountDetailsResponse struct {
apiGenericResponse
AccountName string `json:"accountName,omitempty"`
Email string `json:"email,omitempty"`
}
type apiAccountDetailsRequest struct {
AccountName string `json:"accountName"`
}
func (a *ergoAPI) handleAccountDetails(w http.ResponseWriter, r *http.Request) {
var request apiAccountDetailsRequest
if err := a.decodeJSONRequest(&request, w, r); err != nil {
return
}
var response apiAccountDetailsResponse
// TODO could probably use better error handling and more details
if request.AccountName != "" {
accountData, err := a.server.accounts.LoadAccount(request.AccountName)
if err == nil {
if !accountData.Verified {
err = errAccountUnverified
} else if accountData.Suspended != nil {
err = errAccountSuspended
}
}
switch err {
case nil:
response.AccountName = accountData.Name
response.Email = accountData.Settings.Email
response.Success = true
case errAccountDoesNotExist, errAccountUnverified, errAccountSuspended:
response.Success = false
default:
response.Success = false
response.ErrorCode = "UNKNOWN_ERROR"
response.Error = err.Error()
}
} else {
response.Success = false
response.ErrorCode = "INVALID_REQUEST"
}
a.writeJSONResponse(response, w, r)
}

View File

@ -682,7 +682,7 @@ func (client *Client) run(session *Session) {
if err == errInvalidUtf8 {
invalidUtf8 = true // handle as normal, including labeling
} else if err != nil {
client.server.logger.Debug("connect-ip", session.connID, "read error from client", err.Error())
client.server.logger.Debug("connect-ip", "read error from client", err.Error())
var quitMessage string
switch err {
case ircreader.ErrReadQ:
@ -727,12 +727,8 @@ func (client *Client) run(session *Session) {
}
session.fakelag.Touch(command)
} else {
if session.registrationMessages == 0 && httpVerbs.Has(msg.Command) {
client.Send(nil, client.server.name, ERR_UNKNOWNERROR, msg.Command, "This is not an HTTP server")
break
}
session.registrationMessages++
// DoS hardening, #505
session.registrationMessages++
if client.server.Config().Limits.RegistrationMessages < session.registrationMessages {
client.Send(nil, client.server.name, ERR_UNKNOWNERROR, "*", client.t("You have sent too many registration messages"))
break
@ -754,8 +750,13 @@ func (client *Client) run(session *Session) {
break
}
var cmd Command
msg.Command, cmd = client.server.resolveCommand(msg.Command, invalidUtf8)
cmd, exists := Commands[msg.Command]
if !exists {
cmd = unknownCommand
} else if invalidUtf8 {
cmd = invalidUtf8Command
}
isExiting := cmd.Run(client.server, client, session, msg)
if isExiting {
break
@ -1186,18 +1187,12 @@ func (client *Client) LoggedIntoAccount() bool {
// (You must ensure separately that destroy() is called, e.g., by returning `true` from
// the command handler or calling it yourself.)
func (client *Client) Quit(message string, session *Session) {
nuh := client.NickMaskString()
now := time.Now().UTC()
setFinalData := func(sess *Session) {
message := sess.quitMessage
var finalData []byte
// #364: don't send QUIT lines to unregistered clients
if client.registered {
quitMsg := ircmsg.MakeMessage(nil, nuh, "QUIT", message)
if sess.capabilities.Has(caps.ServerTime) {
quitMsg.SetTag("time", now.Format(utils.IRCv3TimestampFormat))
}
quitMsg := ircmsg.MakeMessage(nil, client.nickMaskString, "QUIT", message)
finalData, _ = quitMsg.LineBytesStrict(false, MaxLineLen)
}
@ -1886,7 +1881,7 @@ func (client *Client) performWrite(additionalDirtyBits uint) {
client.server.accounts.saveRealname(account, client.realname)
}
if (dirtyBits & IncludePushSubscriptions) != 0 {
client.server.accounts.savePushSubscriptions(account, client.getPushSubscriptions(true))
client.server.accounts.savePushSubscriptions(account, client.getPushSubscriptions())
}
}
@ -1973,7 +1968,7 @@ func (client *Client) pushWorker() {
for {
select {
case msg := <-client.pushQueue.queue:
for _, sub := range client.getPushSubscriptions(false) {
for _, sub := range client.getPushSubscriptions() {
if !client.skipPushMessage(msg) {
client.sendAndTrackPush(sub.Endpoint, sub.Keys, msg, true)
}

View File

@ -6,7 +6,7 @@ import (
"fmt"
"net"
"crypto/sha3"
"golang.org/x/crypto/sha3"
"github.com/ergochat/ergo/irc/utils"
)

View File

@ -18,24 +18,6 @@ type Command struct {
capabs []string
}
// resolveCommand returns the command to execute in response to a user input line.
// some invalid commands (unknown command verb, invalid UTF8) get a fake handler
// to ensure that labeled-response still works as expected.
func (server *Server) resolveCommand(command string, invalidUTF8 bool) (canonicalName string, result Command) {
if invalidUTF8 {
return command, invalidUtf8Command
}
if cmd, ok := Commands[command]; ok {
return command, cmd
}
if target, ok := server.Config().Server.CommandAliases[command]; ok {
if cmd, ok := Commands[target]; ok {
return target, cmd
}
}
return command, unknownCommand
}
// Run runs this command with the given client/message.
func (cmd *Command) Run(server *Server, client *Client, session *Session, msg ircmsg.Message) (exiting bool) {
rb := NewResponseBuffer(session)

View File

@ -22,7 +22,6 @@ import (
"strconv"
"strings"
"time"
"unicode/utf8"
"code.cloudfoundry.org/bytefmt"
"github.com/ergochat/irc-go/ircfmt"
@ -609,27 +608,14 @@ type Config struct {
OverrideServicesHostname string `yaml:"override-services-hostname"`
MaxLineLen int `yaml:"max-line-len"`
SuppressLusers bool `yaml:"suppress-lusers"`
AdditionalISupport map[string]string `yaml:"additional-isupport"`
CommandAliases map[string]string `yaml:"command-aliases"`
}
API struct {
Enabled bool
Listener string
TLS TLSListenConfig
tlsConfig *tls.Config
BearerTokens []string `yaml:"bearer-tokens"`
bearerTokenBytes [][]byte
} `yaml:"api"`
Roleplay struct {
Enabled bool
RequireChanops bool `yaml:"require-chanops"`
RequireOper bool `yaml:"require-oper"`
AddSuffix *bool `yaml:"add-suffix"`
addSuffix bool
NPCNickMask string `yaml:"npc-nick-mask"`
SceneNickMask string `yaml:"scene-nick-mask"`
}
Extjwt struct {
@ -1023,40 +1009,6 @@ func (config *Config) processExtjwt() (err error) {
return nil
}
func (config *Config) processAPI() (err error) {
if !config.API.Enabled {
return nil
}
if config.API.Listener == "" {
return errors.New("config.api.enabled is true, but listener address is empty")
}
config.API.bearerTokenBytes = make([][]byte, len(config.API.BearerTokens))
for i, tok := range config.API.BearerTokens {
if tok == "" || tok == "example" {
continue
}
config.API.bearerTokenBytes[i] = []byte(tok)
}
var tlsConfig *tls.Config
if config.API.TLS.Cert != "" {
cert, err := loadCertWithLeaf(config.API.TLS.Cert, config.API.TLS.Key)
if err != nil {
return err
}
tlsConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
MinVersion: tls.VersionTLS12,
// TODO consider supporting client certificates
}
}
config.API.tlsConfig = tlsConfig
return nil
}
// LoadRawConfig loads the config without doing any consistency checks or postprocessing
func LoadRawConfig(filename string) (config *Config, err error) {
data, err := os.ReadFile(filename)
@ -1578,7 +1530,6 @@ func LoadConfig(filename string) (config *Config, err error) {
config.Server.supportedCaps.Disable(caps.Chathistory)
config.Server.supportedCaps.Disable(caps.EventPlayback)
config.Server.supportedCaps.Disable(caps.ZNCPlayback)
config.Server.supportedCaps.Disable(caps.MessageRedaction)
}
if !config.History.Enabled || !config.History.Persistent.Enabled {
@ -1609,17 +1560,7 @@ func LoadConfig(filename string) (config *Config, err error) {
}
}
if !config.History.Retention.AllowIndividualDelete {
config.Server.supportedCaps.Disable(caps.MessageRedaction) // #2215
}
config.Roleplay.addSuffix = utils.BoolDefaultTrue(config.Roleplay.AddSuffix)
if config.Roleplay.NPCNickMask == "" {
config.Roleplay.NPCNickMask = defaultNPCNickMask
}
if config.Roleplay.SceneNickMask == "" {
config.Roleplay.SceneNickMask = defaultSceneNickMask
}
config.Datastore.MySQL.ExpireTime = time.Duration(config.History.Restrictions.ExpireTime)
config.Datastore.MySQL.TrackAccountMessages = config.History.Retention.EnableAccountIndexing
@ -1665,16 +1606,6 @@ func LoadConfig(filename string) (config *Config, err error) {
config.Server.supportedCaps.Disable(caps.SojuWebPush)
}
err = config.processAPI()
if err != nil {
return nil, err
}
config.Server.CommandAliases, err = normalizeCommandAliases(config.Server.CommandAliases)
if err != nil {
return nil, err
}
// now that all postprocessing is complete, regenerate ISUPPORT:
err = config.generateISupport()
if err != nil {
@ -1760,7 +1691,6 @@ func (config *Config) generateISupport() (err error) {
isupport.Add("RPUSER", "E")
}
isupport.Add("SAFELIST", "")
isupport.Add("SAFERATE", "")
isupport.Add("STATUSMSG", "~&@%+")
isupport.Add("TARGMAX", fmt.Sprintf("NAMES:1,LIST:1,KICK:,WHOIS:1,USERHOST:10,PRIVMSG:%s,TAGMSG:%s,NOTICE:%s,MONITOR:%d", maxTargetsString, maxTargetsString, maxTargetsString, config.Limits.MonitorEntries))
isupport.Add("TOPICLEN", strconv.Itoa(config.Limits.TopicLen))
@ -1779,12 +1709,6 @@ func (config *Config) generateISupport() (err error) {
}
isupport.Add("WHOX", "")
for key, value := range config.Server.AdditionalISupport {
if !isupport.Contains(key) {
isupport.Add(key, value)
}
}
err = isupport.RegenerateCachedReply()
return
}
@ -1886,9 +1810,6 @@ func (config *Config) loadMOTD() error {
if config.Server.MOTDFormatting {
lineToSend = ircfmt.Unescape(lineToSend)
}
if config.Server.EnforceUtf8 && !utf8.ValidString(lineToSend) {
return fmt.Errorf("Line %d of MOTD contains invalid UTF8", i+1)
}
// "- " is the required prefix for MOTD
lineToSend = fmt.Sprintf("- %s", lineToSend)
config.Server.motdLines = append(config.Server.motdLines, lineToSend)
@ -1896,22 +1817,3 @@ func (config *Config) loadMOTD() error {
}
return nil
}
func normalizeCommandAliases(aliases map[string]string) (normalizedAliases map[string]string, err error) {
if len(aliases) == 0 {
return nil, nil
}
normalizedAliases = make(map[string]string, len(aliases))
for alias, command := range aliases {
alias = strings.ToUpper(alias)
command = strings.ToUpper(command)
if _, found := Commands[alias]; found {
return nil, fmt.Errorf("Command alias `%s` collides with a real Ergo command", alias)
}
if _, found := Commands[command]; !found {
return nil, fmt.Errorf("Command alias `%s` mapped to non-existent Ergo command `%s`", alias, command)
}
normalizedAliases[alias] = command
}
return normalizedAliases, nil
}

View File

@ -4,18 +4,9 @@
package email
import (
"bytes"
"crypto"
"crypto/ed25519"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
dkim "github.com/toorop/go-dkim"
"os"
dkim "github.com/emersion/go-msgauth/dkim"
)
var (
@ -26,77 +17,38 @@ type DKIMConfig struct {
Domain string
Selector string
KeyFile string `yaml:"key-file"`
privKey crypto.Signer
}
func (dkim *DKIMConfig) Enabled() bool {
return dkim.Domain != ""
keyBytes []byte
}
func (dkim *DKIMConfig) Postprocess() (err error) {
if !dkim.Enabled() {
return nil
if dkim.Domain != "" {
if dkim.Selector == "" || dkim.KeyFile == "" {
return ErrMissingFields
}
dkim.keyBytes, err = os.ReadFile(dkim.KeyFile)
if err != nil {
return err
}
}
if dkim.Selector == "" || dkim.KeyFile == "" {
return ErrMissingFields
}
keyBytes, err := os.ReadFile(dkim.KeyFile)
if err != nil {
return fmt.Errorf("Could not read DKIM key file: %w", err)
}
dkim.privKey, err = parseDKIMPrivKey(keyBytes)
if err != nil {
return fmt.Errorf("Could not parse DKIM key file: %w", err)
}
return nil
}
func parseDKIMPrivKey(input []byte) (crypto.Signer, error) {
if len(input) == 0 {
return nil, errors.New("DKIM private key is empty")
}
// raw ed25519 private key format
if len(input) == ed25519.PrivateKeySize {
return ed25519.PrivateKey(input), nil
}
d, _ := pem.Decode(input)
if d == nil {
return nil, errors.New("Invalid PEM data for DKIM private key")
}
if rsaKey, err := x509.ParsePKCS1PrivateKey(d.Bytes); err == nil {
return rsaKey, nil
}
if k, err := x509.ParsePKCS8PrivateKey(d.Bytes); err == nil {
switch key := k.(type) {
case *rsa.PrivateKey:
return key, nil
case ed25519.PrivateKey:
return key, nil
default:
return nil, fmt.Errorf("Unacceptable type for DKIM private key: %T", k)
}
}
return nil, errors.New("No acceptable format for DKIM private key")
var defaultOptions = dkim.SigOptions{
Version: 1,
Canonicalization: "relaxed/relaxed",
Algo: "rsa-sha256",
Headers: []string{"from", "to", "subject", "message-id", "date"},
BodyLength: 0,
QueryMethods: []string{"dns/txt"},
AddSignatureTimestamp: true,
SignatureExpireIn: 0,
}
func DKIMSign(message []byte, dkimConfig DKIMConfig) (result []byte, err error) {
options := dkim.SignOptions{
Domain: dkimConfig.Domain,
Selector: dkimConfig.Selector,
Signer: dkimConfig.privKey,
HeaderCanonicalization: dkim.CanonicalizationRelaxed,
BodyCanonicalization: dkim.CanonicalizationRelaxed,
}
input := bytes.NewBuffer(message)
output := bytes.NewBuffer(make([]byte, 0, len(message)+1024))
err = dkim.Sign(output, input, &options)
return output.Bytes(), err
options := defaultOptions
options.PrivateKey = dkimConfig.keyBytes
options.Domain = dkimConfig.Domain
options.Selector = dkimConfig.Selector
err = dkim.Sign(&message, options)
return message, err
}

View File

@ -233,7 +233,7 @@ func SendMail(config MailtoConfig, recipient string, msg []byte) (err error) {
}
}
if config.DKIM.Enabled() {
if config.DKIM.Domain != "" {
msg, err = DKIMSign(msg, config.DKIM)
if err != nil {
return

View File

@ -223,13 +223,6 @@ func (session *Session) SetAway(awayMessage string) (wasAway, nowAway string) {
return
}
func (session *Session) ConnID() string {
if session == nil {
return "*"
}
return session.connID
}
func (client *Client) autoAwayEnabledNoMutex(config *Config) bool {
return client.registered && client.alwaysOn &&
persistenceEnabled(config.Accounts.Multiclient.AutoAway, client.accountSettings.AutoAway)
@ -665,17 +658,10 @@ func (client *Client) hasPushSubscriptions() bool {
return client.pushSubscriptionsExist.Load() != 0
}
func (client *Client) getPushSubscriptions(refresh bool) []storedPushSubscription {
if refresh {
func() {
client.stateMutex.Lock()
defer client.stateMutex.Unlock()
client.rebuildPushSubscriptionCache()
}()
}
func (client *Client) getPushSubscriptions() []storedPushSubscription {
client.stateMutex.RLock()
defer client.stateMutex.RUnlock()
return client.cachedPushSubscriptions
}

View File

@ -137,7 +137,7 @@ func sendSuccessfulAccountAuth(service *ircService, client *Client, rb *Response
}
}
client.server.logger.Info("accounts", rb.session.ConnID(), details.nick, "logged into account", details.accountName)
client.server.logger.Info("accounts", "client", details.nick, "logged into account", details.accountName)
}
func (server *Server) sendLoginSnomask(nickMask, accountName string) {
@ -1852,14 +1852,14 @@ func cmodeHandler(server *Server, client *Client, msg ircmsg.Message, rb *Respon
if 1 < len(msg.Params) {
// parse out real mode changes
params := msg.Params[1:]
var unknown []rune
var unknown map[rune]bool
changes, unknown = modes.ParseChannelModeChanges(params...)
// alert for unknown mode changes
for _, char := range unknown {
for char := range unknown {
rb.Add(nil, server.name, ERR_UNKNOWNMODE, client.nick, string(char), client.t("is an unknown mode character to me"))
}
if len(unknown) != 0 && len(changes) == 0 {
if len(unknown) == 1 && len(changes) == 0 {
return false
}
}
@ -1943,10 +1943,10 @@ func umodeHandler(server *Server, client *Client, msg ircmsg.Message, rb *Respon
changes, unknown := modes.ParseUserModeChanges(params...)
// alert for unknown mode changes
for _, char := range unknown {
for char := range unknown {
rb.Add(nil, server.name, ERR_UNKNOWNMODE, cDetails.nick, string(char), client.t("is an unknown mode character to me"))
}
if len(unknown) != 0 && len(changes) == 0 {
if len(unknown) == 1 && len(changes) == 0 {
return false
}

View File

@ -5,12 +5,12 @@ package isupport
import (
"fmt"
"slices"
"sort"
"strings"
)
const (
maxPayloadLength = 380
maxLastArgLength = 400
/* Modern: "As the maximum number of message parameters to any reply is 15,
the maximum number of RPL_ISUPPORT tokens that can be advertised is 13."
@ -47,12 +47,6 @@ func (il *List) AddNoValue(name string) {
il.Tokens[name] = ""
}
// Contains returns whether the list already contains a token
func (il *List) Contains(name string) bool {
_, ok := il.Tokens[name]
return ok
}
// getTokenString gets the appropriate string for a token+value.
func getTokenString(name string, value string) string {
if len(value) == 0 {
@ -64,7 +58,7 @@ func getTokenString(name string, value string) string {
// GetDifference returns the difference between two token lists.
func (il *List) GetDifference(newil *List) [][]string {
var outTokens []string
var outTokens sort.StringSlice
// append removed tokens
for name := range il.Tokens {
@ -90,7 +84,7 @@ func (il *List) GetDifference(newil *List) [][]string {
outTokens = append(outTokens, token)
}
slices.Sort(outTokens)
sort.Sort(outTokens)
// create output list
replies := make([][]string, 0)
@ -98,7 +92,7 @@ func (il *List) GetDifference(newil *List) [][]string {
var cache []string // Token list cache
for _, token := range outTokens {
if len(token)+length <= maxPayloadLength {
if len(token)+length <= maxLastArgLength {
// account for the space separating tokens
if len(cache) > 0 {
length++
@ -107,7 +101,7 @@ func (il *List) GetDifference(newil *List) [][]string {
length += len(token)
}
if len(cache) == maxParameters || len(token)+length >= maxPayloadLength {
if len(cache) == maxParameters || len(token)+length >= maxLastArgLength {
replies = append(replies, cache)
cache = make([]string, 0)
length = 0
@ -121,54 +115,40 @@ func (il *List) GetDifference(newil *List) [][]string {
return replies
}
func validateToken(token string) error {
if len(token) == 0 || token[0] == ':' || strings.Contains(token, " ") {
return fmt.Errorf("bad isupport token (cannot be sent as IRC parameter): `%s`", token)
}
if strings.ContainsAny(token, "\n\r\x00") {
return fmt.Errorf("bad isupport token (contains forbidden octets)")
}
// technically a token can be maxPayloadLength if it occurs alone,
// but fail it just to be safe
if len(token) >= maxPayloadLength {
return fmt.Errorf("bad isupport token (too long): `%s`", token)
}
return nil
}
// RegenerateCachedReply regenerates the cached RPL_ISUPPORT reply
func (il *List) RegenerateCachedReply() (err error) {
var tokens []string
for name, value := range il.Tokens {
token := getTokenString(name, value)
if tokenErr := validateToken(token); tokenErr == nil {
tokens = append(tokens, token)
} else {
err = tokenErr
}
}
il.CachedReply = make([][]string, 0)
var length int // Length of the current cache
var cache []string // Token list cache
// make sure we get a sorted list of tokens, needed for tests and looks nice
slices.Sort(tokens)
var tokens sort.StringSlice
for name := range il.Tokens {
tokens = append(tokens, name)
}
sort.Sort(tokens)
var cache []string // Tokens in current line
var length int // Length of the current line
for _, name := range tokens {
token := getTokenString(name, il.Tokens[name])
if token[0] == ':' || strings.Contains(token, " ") {
err = fmt.Errorf("bad isupport token (cannot contain spaces or start with :): %s", token)
continue
}
for _, token := range tokens {
// account for the space separating tokens
if len(cache) == maxParameters || (len(token)+1)+length > maxPayloadLength {
if len(token)+length <= maxLastArgLength {
// account for the space separating tokens
if len(cache) > 0 {
length++
}
cache = append(cache, token)
length += len(token)
}
if len(cache) == maxParameters || len(token)+length >= maxLastArgLength {
il.CachedReply = append(il.CachedReply, cache)
cache = nil
cache = make([]string, 0)
length = 0
}
if len(cache) > 0 {
length++
}
length += len(token)
cache = append(cache, token)
}
if len(cache) > 0 {

View File

@ -37,7 +37,7 @@ func TestISUPPORT(t *testing.T) {
}
if !reflect.DeepEqual(tListLong.CachedReply, longReplies) {
t.Errorf("Multiple output replies did not match, got [%v]", tListLong.CachedReply)
t.Errorf("Multiple output replies did not match, got [%v]", longReplies)
}
// create first list

View File

@ -116,7 +116,7 @@ func ApplyUserModeChanges(client *Client, changes modes.ModeChanges, force bool,
}
// parseDefaultModes uses the provided mode change parser to parse the rawModes.
func parseDefaultModes(rawModes string, parser func(params ...string) (modes.ModeChanges, []rune)) modes.Modes {
func parseDefaultModes(rawModes string, parser func(params ...string) (modes.ModeChanges, map[rune]bool)) modes.Modes {
modeChangeStrings := strings.Fields(rawModes)
modeChanges, _ := parser(modeChangeStrings...)
defaultModes := make(modes.Modes, 0)
@ -266,9 +266,9 @@ func (channel *Channel) ApplyChannelModeChanges(client *Client, isSamode bool, c
case modes.Add:
ch := client.server.channels.Get(change.Arg)
if ch == nil {
rb.Add(nil, client.server.name, ERR_INVALIDMODEPARAM, details.nick, chname, string(change.Mode), utils.SafeErrorParam(change.Arg), client.t("No such channel"))
rb.Add(nil, client.server.name, ERR_INVALIDMODEPARAM, details.nick, chname, string(change.Mode), utils.SafeErrorParam(change.Arg), fmt.Sprintf(client.t("No such channel")))
} else if ch == channel {
rb.Add(nil, client.server.name, ERR_INVALIDMODEPARAM, details.nick, chname, string(change.Mode), utils.SafeErrorParam(change.Arg), client.t("You can't forward a channel to itself"))
rb.Add(nil, client.server.name, ERR_INVALIDMODEPARAM, details.nick, chname, string(change.Mode), utils.SafeErrorParam(change.Arg), fmt.Sprintf(client.t("You can't forward a channel to itself")))
} else {
if isSamode || ch.ClientIsAtLeast(client, modes.ChannelOperator) {
change.Arg = ch.Name()

View File

@ -7,7 +7,7 @@ package modes
import (
"fmt"
"slices"
"sort"
"strings"
"github.com/ergochat/ergo/irc/utils"
@ -189,7 +189,10 @@ func GetLowestChannelModePrefix(prefixes string) (lowest Mode) {
//
// ParseUserModeChanges returns the valid changes, and the list of unknown chars.
func ParseUserModeChanges(params ...string) (changes ModeChanges, unknown []rune) {
func ParseUserModeChanges(params ...string) (ModeChanges, map[rune]bool) {
changes := make(ModeChanges, 0)
unknown := make(map[rune]bool)
op := List
if 0 < len(params) {
@ -216,11 +219,19 @@ func ParseUserModeChanges(params ...string) (changes ModeChanges, unknown []rune
}
}
if slices.Contains(SupportedUserModes, Mode(mode)) {
changes = append(changes, change)
} else {
unknown = append(unknown, mode)
var isKnown bool
for _, supportedMode := range SupportedUserModes {
if rune(supportedMode) == mode {
isKnown = true
break
}
}
if !isKnown {
unknown[mode] = true
continue
}
changes = append(changes, change)
}
}
@ -228,7 +239,10 @@ func ParseUserModeChanges(params ...string) (changes ModeChanges, unknown []rune
}
// ParseChannelModeChanges returns the valid changes, and the list of unknown chars.
func ParseChannelModeChanges(params ...string) (changes ModeChanges, unknown []rune) {
func ParseChannelModeChanges(params ...string) (ModeChanges, map[rune]bool) {
changes := make(ModeChanges, 0)
unknown := make(map[rune]bool)
op := List
if 0 < len(params) {
@ -290,11 +304,25 @@ func ParseChannelModeChanges(params ...string) (changes ModeChanges, unknown []r
}
}
if slices.Contains(SupportedChannelModes, Mode(mode)) || slices.Contains(ChannelUserModes, Mode(mode)) {
changes = append(changes, change)
} else {
unknown = append(unknown, mode)
var isKnown bool
for _, supportedMode := range SupportedChannelModes {
if rune(supportedMode) == mode {
isKnown = true
break
}
}
for _, supportedMode := range ChannelUserModes {
if rune(supportedMode) == mode {
isKnown = true
break
}
}
if !isKnown {
unknown[mode] = true
continue
}
changes = append(changes, change)
}
}
@ -400,37 +428,33 @@ func (set *ModeSet) HighestChannelUserMode() (result Mode) {
return
}
var (
rplMyInfo1, rplMyInfo2, rplMyInfo3, chanmodesToken string
)
type ByCodepoint Modes
func init() {
initRplMyInfo()
initChanmodesToken()
}
func (a ByCodepoint) Len() int { return len(a) }
func (a ByCodepoint) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByCodepoint) Less(i, j int) bool { return a[i] < a[j] }
func initRplMyInfo() {
// initialize constant strings published in initial numerics
func RplMyInfo() (param1, param2, param3 string) {
userModes := make(Modes, len(SupportedUserModes), len(SupportedUserModes)+1)
copy(userModes, SupportedUserModes)
// TLS is not in SupportedUserModes because it can't be modified
userModes = append(userModes, TLS)
slices.Sort(userModes)
sort.Sort(ByCodepoint(userModes))
channelModes := make(Modes, len(SupportedChannelModes)+len(ChannelUserModes))
copy(channelModes, SupportedChannelModes)
copy(channelModes[len(SupportedChannelModes):], ChannelUserModes)
slices.Sort(channelModes)
sort.Sort(ByCodepoint(channelModes))
// XXX enumerate these by hand, i can't see any way to DRY this
channelParametrizedModes := Modes{BanMask, ExceptMask, InviteMask, Key, UserLimit, Forward}
channelParametrizedModes = append(channelParametrizedModes, ChannelUserModes...)
slices.Sort(channelParametrizedModes)
sort.Sort(ByCodepoint(channelParametrizedModes))
rplMyInfo1, rplMyInfo2, rplMyInfo3 = userModes.String(), channelModes.String(), channelParametrizedModes.String()
return userModes.String(), channelModes.String(), channelParametrizedModes.String()
}
func initChanmodesToken() {
func ChanmodesToken() (result string) {
// https://modern.ircdocs.horse#chanmodes-parameter
// type A: listable modes with parameters
A := Modes{BanMask, ExceptMask, InviteMask}
@ -441,18 +465,10 @@ func initChanmodesToken() {
// type D: modes without parameters
D := Modes{InviteOnly, Moderated, NoOutside, OpOnlyTopic, ChanRoleplaying, Secret, NoCTCP, RegisteredOnly, RegisteredOnlySpeak, Auditorium, OpModerated}
slices.Sort(A)
slices.Sort(B)
slices.Sort(C)
slices.Sort(D)
sort.Sort(ByCodepoint(A))
sort.Sort(ByCodepoint(B))
sort.Sort(ByCodepoint(C))
sort.Sort(ByCodepoint(D))
chanmodesToken = fmt.Sprintf("%s,%s,%s,%s", A.String(), B.String(), C.String(), D.String())
}
func RplMyInfo() (param1, param2, param3 string) {
return rplMyInfo1, rplMyInfo2, rplMyInfo3
}
func ChanmodesToken() (result string) {
return chanmodesToken
return fmt.Sprintf("%s,%s,%s,%s", A.String(), B.String(), C.String(), D.String())
}

View File

@ -5,7 +5,6 @@ package modes
import (
"reflect"
"slices"
"strings"
"testing"
)
@ -17,7 +16,7 @@ func assertEqual(supplied, expected interface{}, t *testing.T) {
}
func TestParseUserModeChanges(t *testing.T) {
var emptyUnknown []rune
emptyUnknown := make(map[rune]bool)
changes, unknown := ParseUserModeChanges("+i")
assertEqual(unknown, emptyUnknown, t)
assertEqual(changes, ModeChanges{ModeChange{Op: Add, Mode: Invisible}}, t)
@ -49,11 +48,10 @@ func TestParseUserModeChanges(t *testing.T) {
}
func TestIssue874(t *testing.T) {
var emptyModeChanges ModeChanges
var emptyUnknown []rune
emptyUnknown := make(map[rune]bool)
modes, unknown := ParseChannelModeChanges("+k")
assertEqual(unknown, emptyUnknown, t)
assertEqual(modes, emptyModeChanges, t)
assertEqual(modes, ModeChanges{}, t)
modes, unknown = ParseChannelModeChanges("+k", "beer")
assertEqual(unknown, emptyUnknown, t)
@ -153,7 +151,7 @@ func TestParseChannelModeChanges(t *testing.T) {
}
modes, unknown = ParseChannelModeChanges("+tx")
if len(unknown) != 1 || !slices.Contains(unknown, 'x') {
if len(unknown) != 1 || !unknown['x'] {
t.Errorf("expected that x is an unknown mode, instead: %v", unknown)
}
expected = ModeChange{

View File

@ -961,7 +961,7 @@ func (mysql *MySQL) listCorrespondentsInternal(ctx context.Context, target strin
}
results = append(results, history.TargetListing{
CfName: correspondent,
Time: time.Unix(0, nanotime).UTC(),
Time: time.Unix(0, nanotime),
})
}
@ -1014,7 +1014,7 @@ func (mysql *MySQL) ListChannels(cfchannels []string) (results []history.TargetL
}
results = append(results, history.TargetListing{
CfName: target,
Time: time.Unix(0, nanotime).UTC(),
Time: time.Unix(0, nanotime),
})
}
return

View File

@ -1324,9 +1324,6 @@ func nsClientsListHandler(service *ircService, server *Server, client *Client, p
if session.deviceID != "" {
service.Notice(rb, fmt.Sprintf(client.t("Device ID: %s"), session.deviceID))
}
if hasPrivs {
service.Notice(rb, fmt.Sprintf(client.t("Debug log ID: %s"), session.connID))
}
service.Notice(rb, fmt.Sprintf(client.t("IP address: %s"), session.ip.String()))
service.Notice(rb, fmt.Sprintf(client.t("Hostname: %s"), session.hostname))
if hasPrivs {
@ -1342,6 +1339,9 @@ func nsClientsListHandler(service *ircService, server *Server, client *Client, p
service.Notice(rb, fmt.Sprintf(client.t("IRCv3 CAPs: %s"), capStr))
}
}
if hasPrivs {
service.Notice(rb, fmt.Sprintf(client.t("Debug log ID: %s"), session.connID))
}
}
}
@ -1683,13 +1683,10 @@ func nsPushHandler(service *ircService, server *Server, client *Client, command
return
}
}
subscriptions := target.getPushSubscriptions(true)
subscriptions := target.getPushSubscriptions()
service.Notice(rb, fmt.Sprintf(client.t("Nickname %[1]s has %[2]d push subscription(s)"), target.Nick(), len(subscriptions)))
for i, subscription := range subscriptions {
service.Notice(rb, fmt.Sprintf(client.t("Subscription %d:"), i+1))
service.Notice(rb, fmt.Sprintf(client.t("Endpoint: %s"), subscription.Endpoint))
service.Notice(rb, fmt.Sprintf(client.t("Last renewal: %s"), subscription.LastRefresh.Format(time.RFC1123)))
service.Notice(rb, fmt.Sprintf(client.t("Last push: %s"), subscription.LastSuccess.Format(time.RFC1123)))
service.Notice(rb, fmt.Sprintf("%d: %s", i, subscription.Endpoint))
}
case "DELETE":
if len(params) < 2 {

View File

@ -3,11 +3,8 @@
package passwd
import (
"crypto/sha3"
"golang.org/x/crypto/bcrypt"
)
import "golang.org/x/crypto/bcrypt"
import "golang.org/x/crypto/sha3"
const (
MinCost = bcrypt.MinCost

View File

@ -13,8 +13,8 @@ import (
)
const (
defaultNPCNickMask = "*%s*!%s@npc.fakeuser.invalid"
defaultSceneNickMask = "=Scene=!%s@npc.fakeuser.invalid"
npcNickMask = "*%s*!%s@npc.fakeuser.invalid"
sceneNickMask = "=Scene=!%s@npc.fakeuser.invalid"
)
func sendRoleplayMessage(server *Server, client *Client, source string, targetString string, isScene, isAction bool, messageParts []string, rb *ResponseBuffer) {
@ -30,7 +30,7 @@ func sendRoleplayMessage(server *Server, client *Client, source string, targetSt
var sourceMask string
if isScene {
sourceMask = fmt.Sprintf(server.Config().Roleplay.SceneNickMask, client.Nick())
sourceMask = fmt.Sprintf(sceneNickMask, client.Nick())
} else {
cfSource, cfSourceErr := CasefoldName(source)
skelSource, skelErr := Skeleton(source)
@ -39,7 +39,7 @@ func sendRoleplayMessage(server *Server, client *Client, source string, targetSt
rb.Add(nil, client.server.name, ERR_CANNOTSENDRP, targetString, client.t("Invalid roleplay name"))
return
}
sourceMask = fmt.Sprintf(server.Config().Roleplay.NPCNickMask, source, client.Nick())
sourceMask = fmt.Sprintf(npcNickMask, source, client.Nick())
}
// block attempts to send CTCP messages to Tor clients

View File

@ -63,8 +63,6 @@ var (
chanTypes = "#"
throttleMessage = "You have attempted to connect too many times within a short duration. Wait a while, and you will be able to connect."
httpVerbs = utils.SetLiteral("CONNECT", "DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT", "TRACE")
)
// Server is the main Oragono server.
@ -101,11 +99,6 @@ type Server struct {
flock flock.Flocker
connIDCounter atomic.Uint64
defcon atomic.Uint32
// API stuff
apiHandler http.Handler // always initialized
apiListener *utils.ReloadableListener
apiServer *http.Server // nil if API is not enabled
}
// NewServer returns a new Oragono server.
@ -132,8 +125,6 @@ func NewServer(config *Config, logger *logger.Manager) (*Server, error) {
server.monitorManager.Initialize()
server.snomasks.Initialize()
server.apiHandler = newAPIHandler(server)
if err := server.applyConfig(config); err != nil {
return nil, err
}
@ -320,7 +311,7 @@ func (server *Server) periodicPushMaintenance() {
func (server *Server) performPushMaintenance() {
expiration := time.Duration(server.Config().WebPush.Expiration)
for _, client := range server.clients.AllWithPushSubscriptions() {
for _, sub := range client.getPushSubscriptions(true) {
for _, sub := range client.getPushSubscriptions() {
now := time.Now()
// require both periodic successful push messages and renewal of the subscription via WEBPUSH REGISTER
if now.Sub(sub.LastSuccess) > expiration || now.Sub(sub.LastRefresh) > expiration {
@ -520,14 +511,14 @@ func (server *Server) sendRplISupportLines(client *Client, rb *ResponseBuffer, l
batchID := rb.StartNestedBatch(caps.ExtendedISupportBatchType)
defer rb.EndNestedBatch(batchID)
}
finalText := "are supported by this server"
translatedISupport := client.t("are supported by this server")
nick := client.Nick()
for _, cachedTokenLine := range lines {
length := len(cachedTokenLine) + 2
tokenline := make([]string, length)
tokenline[0] = nick
copy(tokenline[1:], cachedTokenLine)
tokenline[length-1] = finalText
tokenline[length-1] = translatedISupport
rb.Add(nil, server.name, RPL_ISUPPORT, tokenline...)
}
}
@ -848,8 +839,6 @@ func (server *Server) applyConfig(config *Config) (err error) {
server.setupPprofListener(config)
server.setupAPIListener(config)
// set RPL_ISUPPORT
var newISupportReplies [][]string
if oldConfig != nil {
@ -918,46 +907,6 @@ func (server *Server) setupPprofListener(config *Config) {
}
}
func (server *Server) setupAPIListener(config *Config) {
if server.apiServer != nil {
if !config.API.Enabled || (config.API.Listener != server.apiServer.Addr) {
server.logger.Info("server", "Stopping API listener", server.apiServer.Addr)
server.apiServer.Close()
server.apiListener = nil
server.apiServer = nil
}
}
if !config.API.Enabled {
return
}
listenerConfig := utils.ListenerConfig{
TLSConfig: config.API.tlsConfig,
}
if server.apiListener != nil {
server.apiListener.Reload(listenerConfig)
return
}
listener, err := net.Listen("tcp", config.API.Listener)
if err != nil {
server.logger.Error("server", "Couldn't create API listener", config.API.Listener, err.Error())
return
}
server.apiListener = utils.NewReloadableListener(listener, listenerConfig)
server.apiServer = &http.Server{
Addr: config.API.Listener, // just informational since we created the listener ourselves
Handler: server.apiHandler,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 16384,
}
go func(hs *http.Server, listener net.Listener) {
if err := hs.Serve(listener); err != nil {
server.logger.Error("server", "API listener failed", err.Error())
}
}(server.apiServer, server.apiListener)
server.logger.Info("server", "Started API listener", server.apiServer.Addr)
}
func (server *Server) loadDatastore(config *Config) error {
// open the datastore and load server state for which it (rather than config)
// is the source of truth

View File

@ -7,7 +7,7 @@ import (
"bytes"
"fmt"
"log"
"slices"
"sort"
"strings"
"time"
@ -223,6 +223,7 @@ func serviceRunCommand(service *ircService, server *Server, client *Client, cmd
return
}
server.logger.Debug("services", fmt.Sprintf("Client %s ran %s command %s", client.Nick(), service.Name, commandName))
if commandName == "help" {
serviceHelpHandler(service, server, client, params, rb)
} else {
@ -250,7 +251,7 @@ func serviceHelpHandler(service *ircService, server *Server, client *Client, par
client.t("Here are the commands you can use:"),
}...)
// show general help
var shownHelpLines []string
var shownHelpLines sort.StringSlice
var disabledCommands bool
for _, commandInfo := range service.Commands {
// skip commands user can't access
@ -268,13 +269,13 @@ func serviceHelpHandler(service *ircService, server *Server, client *Client, par
shownHelpLines = append(shownHelpLines, " "+ircfmt.Unescape(client.t(commandInfo.helpShort)))
}
// sort help lines
slices.Sort(shownHelpLines)
if disabledCommands {
shownHelpLines = append(shownHelpLines, " "+client.t("... and other commands which have been disabled"))
}
// sort help lines
sort.Sort(shownHelpLines)
// push out help text
for _, line := range helpBannerLines {
sendNotice(line)

View File

@ -233,7 +233,7 @@ func (c *Client) Auth(a Auth) error {
}
resp64 := make([]byte, encoding.EncodedLen(len(resp)))
encoding.Encode(resp64, resp)
code, msg64, err := c.cmd(0, "%s", strings.TrimSpace(fmt.Sprintf("AUTH %s %s", mech, resp64)))
code, msg64, err := c.cmd(0, strings.TrimSpace(fmt.Sprintf("AUTH %s %s", mech, resp64)))
for err == nil {
var msg []byte
switch code {
@ -259,7 +259,7 @@ func (c *Client) Auth(a Auth) error {
}
resp64 = make([]byte, encoding.EncodedLen(len(resp)))
encoding.Encode(resp64, resp)
code, msg64, err = c.cmd(0, "%s", resp64)
code, msg64, err = c.cmd(0, string(resp64))
}
return err
}

View File

@ -455,7 +455,7 @@ func ubanInfoNick(client *Client, target ubanTarget, rb *ResponseBuffer) {
rb.Notice(client.t("Warning: banning this IP or a network that contains it may affect other users. Use /UBAN INFO on the candidate IP or network for more information."))
}
} else {
rb.Notice(client.t("No client is currently using that nickname"))
rb.Notice(fmt.Sprintf(client.t("No client is currently using that nickname")))
}
account, err := client.server.accounts.LoadAccount(target.nickOrMask)

View File

@ -7,7 +7,7 @@ import "fmt"
const (
// SemVer is the semantic version of Ergo.
SemVer = "2.16.0-unreleased"
SemVer = "2.15.0-rc1"
)
var (

@ -1 +1 @@
Subproject commit e9e37f5438bd5f02656b89dab0cd40ef113edac6
Subproject commit a1324407893b603fe6b55ce7c4ee385938291ae1

View File

@ -74,7 +74,6 @@ server:
max-connections-per-duration: 64
# strict transport security, to get clients to automagically use TLS
# (irrelevant in the recommended configuration, with no public plaintext listener)
sts:
# whether to advertise STS
#
@ -348,17 +347,6 @@ server:
# if you don't want to publicize how popular the server is
suppress-lusers: false
# publish additional key-value pairs in ISUPPORT (the 005 numeric).
# keys that collide with a key published by Ergo will be silently ignored.
additional-isupport:
#"draft/FILEHOST": "https://example.com/filehost"
#"draft/bazbat": "" # empty string means no value
# optionally map command alias names to existing ergo commands. most deployments
# should ignore this.
#command-aliases:
#"UMGEBUNG": "AMBIANCE"
# account options
accounts:
# is account authentication enabled, i.e., can users log into existing accounts?
@ -924,12 +912,6 @@ roleplay:
# add the real nickname, in parentheses, to the end of every roleplay message?
add-suffix: true
# allow customizing the NUH's sent for NPC and SCENE commands
# NPC: the first %s is the NPC name, the second is the user's real nick
#npc-nick-mask: "*%s*!%s@npc.fakeuser.invalid"
# SCENE: the %s is the client's real nick
#scene-nick-mask: "=Scene=!%s@npc.fakeuser.invalid"
# external services can integrate with the ircd using JSON Web Tokens (https://jwt.io).
# in effect, the server can sign a token attesting that the client is present on
# the server, is a member of a particular channel, etc.
@ -1078,21 +1060,3 @@ webpush:
# by the client reconnecting to IRC. we also detect whether the client is no longer
# successfully receiving push messages.
expiration: 14d
# HTTP API. we strongly recommend leaving this disabled unless you have a specific
# need for it.
api:
# is the API enabled at all?
enabled: false
# listen address:
listener: "127.0.0.1:8089"
# serve over TLS (strongly recommended if the listener is public):
#tls:
#cert: fullchain.pem
#key: privkey.pem
# one or more static bearer tokens accepted for HTTP bearer authentication.
# these must be strong, unique, high-entropy printable ASCII strings.
# to generate a new token, use `ergo gentoken` or:
# python3 -c "import secrets; print(secrets.token_urlsafe(32))"
bearer-tokens:
- "example"

View File

@ -1,199 +0,0 @@
package dkim
import (
"io"
"strings"
)
// Canonicalization is a canonicalization algorithm.
type Canonicalization string
const (
CanonicalizationSimple Canonicalization = "simple"
CanonicalizationRelaxed = "relaxed"
)
type canonicalizer interface {
CanonicalizeHeader(s string) string
CanonicalizeBody(w io.Writer) io.WriteCloser
}
var canonicalizers = map[Canonicalization]canonicalizer{
CanonicalizationSimple: new(simpleCanonicalizer),
CanonicalizationRelaxed: new(relaxedCanonicalizer),
}
// crlfFixer fixes any lone LF without a preceding CR.
type crlfFixer struct {
cr bool
}
func (cf *crlfFixer) Fix(b []byte) []byte {
res := make([]byte, 0, len(b))
for _, ch := range b {
prevCR := cf.cr
cf.cr = false
switch ch {
case '\r':
cf.cr = true
case '\n':
if !prevCR {
res = append(res, '\r')
}
}
res = append(res, ch)
}
return res
}
type simpleCanonicalizer struct{}
func (c *simpleCanonicalizer) CanonicalizeHeader(s string) string {
return s
}
type simpleBodyCanonicalizer struct {
w io.Writer
crlfBuf []byte
crlfFixer crlfFixer
}
func (c *simpleBodyCanonicalizer) Write(b []byte) (int, error) {
written := len(b)
b = append(c.crlfBuf, b...)
b = c.crlfFixer.Fix(b)
end := len(b)
// If it ends with \r, maybe the next write will begin with \n
if end > 0 && b[end-1] == '\r' {
end--
}
// Keep all \r\n sequences
for end >= 2 {
prev := b[end-2]
cur := b[end-1]
if prev != '\r' || cur != '\n' {
break
}
end -= 2
}
c.crlfBuf = b[end:]
var err error
if end > 0 {
_, err = c.w.Write(b[:end])
}
return written, err
}
func (c *simpleBodyCanonicalizer) Close() error {
// Flush crlfBuf if it ends with a single \r (without a matching \n)
if len(c.crlfBuf) > 0 && c.crlfBuf[len(c.crlfBuf)-1] == '\r' {
if _, err := c.w.Write(c.crlfBuf); err != nil {
return err
}
}
c.crlfBuf = nil
if _, err := c.w.Write([]byte(crlf)); err != nil {
return err
}
return nil
}
func (c *simpleCanonicalizer) CanonicalizeBody(w io.Writer) io.WriteCloser {
return &simpleBodyCanonicalizer{w: w}
}
type relaxedCanonicalizer struct{}
func (c *relaxedCanonicalizer) CanonicalizeHeader(s string) string {
k, v, ok := strings.Cut(s, ":")
if !ok {
return strings.TrimSpace(strings.ToLower(s)) + ":" + crlf
}
k = strings.TrimSpace(strings.ToLower(k))
v = strings.Join(strings.FieldsFunc(v, func(r rune) bool {
return r == ' ' || r == '\t' || r == '\n' || r == '\r'
}), " ")
return k + ":" + v + crlf
}
type relaxedBodyCanonicalizer struct {
w io.Writer
crlfBuf []byte
wsp bool
written bool
crlfFixer crlfFixer
}
func (c *relaxedBodyCanonicalizer) Write(b []byte) (int, error) {
written := len(b)
b = c.crlfFixer.Fix(b)
canonical := make([]byte, 0, len(b))
for _, ch := range b {
if ch == ' ' || ch == '\t' {
c.wsp = true
} else if ch == '\r' || ch == '\n' {
c.wsp = false
c.crlfBuf = append(c.crlfBuf, ch)
} else {
if len(c.crlfBuf) > 0 {
canonical = append(canonical, c.crlfBuf...)
c.crlfBuf = c.crlfBuf[:0]
}
if c.wsp {
canonical = append(canonical, ' ')
c.wsp = false
}
canonical = append(canonical, ch)
}
}
if !c.written && len(canonical) > 0 {
c.written = true
}
_, err := c.w.Write(canonical)
return written, err
}
func (c *relaxedBodyCanonicalizer) Close() error {
if c.written {
if _, err := c.w.Write([]byte(crlf)); err != nil {
return err
}
}
return nil
}
func (c *relaxedCanonicalizer) CanonicalizeBody(w io.Writer) io.WriteCloser {
return &relaxedBodyCanonicalizer{w: w}
}
type limitedWriter struct {
W io.Writer
N int64
}
func (w *limitedWriter) Write(b []byte) (int, error) {
if w.N <= 0 {
return len(b), nil
}
skipped := 0
if int64(len(b)) > w.N {
b = b[:w.N]
skipped = int(int64(len(b)) - w.N)
}
n, err := w.W.Write(b)
w.N -= int64(n)
return n + skipped, err
}

View File

@ -1,23 +0,0 @@
// Package dkim creates and verifies DKIM signatures, as specified in RFC 6376.
//
// # FAQ
//
// Why can't I verify a [net/mail.Message] directly? A [net/mail.Message]
// header is already parsed, and whitespace characters (especially continuation
// lines) are removed. Thus, the signature computed from the parsed header is
// not the same as the one computed from the raw header.
//
// How can I publish my public key? You have to add a TXT record to your DNS
// zone. See [RFC 6376 appendix C]. You can use the dkim-keygen tool included
// in go-msgauth to generate the key and the TXT record.
//
// [RFC 6376 appendix C]: https://tools.ietf.org/html/rfc6376#appendix-C
package dkim
import (
"time"
)
var now = time.Now
const headerFieldName = "DKIM-Signature"

View File

@ -1,167 +0,0 @@
package dkim
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"net/textproto"
"sort"
"strings"
)
const crlf = "\r\n"
type header []string
func readHeader(r *bufio.Reader) (header, error) {
tr := textproto.NewReader(r)
var h header
for {
l, err := tr.ReadLine()
if err != nil {
return h, fmt.Errorf("failed to read header: %v", err)
}
if len(l) == 0 {
break
} else if len(h) > 0 && (l[0] == ' ' || l[0] == '\t') {
// This is a continuation line
h[len(h)-1] += l + crlf
} else {
h = append(h, l+crlf)
}
}
return h, nil
}
func writeHeader(w io.Writer, h header) error {
for _, kv := range h {
if _, err := w.Write([]byte(kv)); err != nil {
return err
}
}
_, err := w.Write([]byte(crlf))
return err
}
func foldHeaderField(kv string) string {
buf := bytes.NewBufferString(kv)
line := make([]byte, 75) // 78 - len("\r\n\s")
first := true
var fold strings.Builder
for len, err := buf.Read(line); err != io.EOF; len, err = buf.Read(line) {
if first {
first = false
} else {
fold.WriteString("\r\n ")
}
fold.Write(line[:len])
}
return fold.String() + crlf
}
func parseHeaderField(s string) (string, string) {
key, value, _ := strings.Cut(s, ":")
return strings.TrimSpace(key), strings.TrimSpace(value)
}
func parseHeaderParams(s string) (map[string]string, error) {
pairs := strings.Split(s, ";")
params := make(map[string]string)
for _, s := range pairs {
key, value, ok := strings.Cut(s, "=")
if !ok {
if strings.TrimSpace(s) == "" {
continue
}
return params, errors.New("dkim: malformed header params")
}
params[strings.TrimSpace(key)] = strings.TrimSpace(value)
}
return params, nil
}
func formatHeaderParams(headerFieldName string, params map[string]string) string {
keys, bvalue, bfound := sortParams(params)
s := headerFieldName + ":"
var line string
for _, k := range keys {
v := params[k]
nextLength := 3 + len(line) + len(v) + len(k)
if nextLength > 75 {
s += line + crlf
line = ""
}
line = fmt.Sprintf("%v %v=%v;", line, k, v)
}
if line != "" {
s += line
}
if bfound {
bfiled := foldHeaderField(" b=" + bvalue)
s += crlf + bfiled
}
return s
}
func sortParams(params map[string]string) ([]string, string, bool) {
keys := make([]string, 0, len(params))
bfound := false
var bvalue string
for k := range params {
if k == "b" {
bvalue = params["b"]
bfound = true
} else {
keys = append(keys, k)
}
}
sort.Strings(keys)
return keys, bvalue, bfound
}
type headerPicker struct {
h header
picked map[string]int
}
func newHeaderPicker(h header) *headerPicker {
return &headerPicker{
h: h,
picked: make(map[string]int),
}
}
func (p *headerPicker) Pick(key string) string {
key = strings.ToLower(key)
at := p.picked[key]
for i := len(p.h) - 1; i >= 0; i-- {
kv := p.h[i]
k, _ := parseHeaderField(kv)
if !strings.EqualFold(k, key) {
continue
}
if at == 0 {
p.picked[key]++
return kv
}
at--
}
return ""
}

View File

@ -1,184 +0,0 @@
package dkim
import (
"crypto"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"net"
"strings"
"golang.org/x/crypto/ed25519"
)
type verifier interface {
Public() crypto.PublicKey
Verify(hash crypto.Hash, hashed []byte, sig []byte) error
}
type rsaVerifier struct {
*rsa.PublicKey
}
func (v rsaVerifier) Public() crypto.PublicKey {
return v.PublicKey
}
func (v rsaVerifier) Verify(hash crypto.Hash, hashed, sig []byte) error {
return rsa.VerifyPKCS1v15(v.PublicKey, hash, hashed, sig)
}
type ed25519Verifier struct {
ed25519.PublicKey
}
func (v ed25519Verifier) Public() crypto.PublicKey {
return v.PublicKey
}
func (v ed25519Verifier) Verify(hash crypto.Hash, hashed, sig []byte) error {
if !ed25519.Verify(v.PublicKey, hashed, sig) {
return errors.New("dkim: invalid Ed25519 signature")
}
return nil
}
type queryResult struct {
Verifier verifier
KeyAlgo string
HashAlgos []string
Notes string
Services []string
Flags []string
}
// QueryMethod is a DKIM query method.
type QueryMethod string
const (
// DNS TXT resource record (RR) lookup algorithm
QueryMethodDNSTXT QueryMethod = "dns/txt"
)
type txtLookupFunc func(domain string) ([]string, error)
type queryFunc func(domain, selector string, txtLookup txtLookupFunc) (*queryResult, error)
var queryMethods = map[QueryMethod]queryFunc{
QueryMethodDNSTXT: queryDNSTXT,
}
func queryDNSTXT(domain, selector string, txtLookup txtLookupFunc) (*queryResult, error) {
if txtLookup == nil {
txtLookup = net.LookupTXT
}
txts, err := txtLookup(selector + "._domainkey." + domain)
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
return nil, tempFailError("key unavailable: " + err.Error())
} else if err != nil {
return nil, permFailError("no key for signature: " + err.Error())
}
// net.LookupTXT will concatenate strings contained in a single TXT record.
// In other words, net.LookupTXT returns one entry per TXT record, even if
// a record contains multiple strings.
//
// RFC 6376 section 3.6.2.2 says multiple TXT records lead to undefined
// behavior, so reject that.
switch len(txts) {
case 0:
return nil, permFailError("no valid key found")
case 1:
return parsePublicKey(txts[0])
default:
return nil, permFailError("multiple TXT records found for key")
}
}
func parsePublicKey(s string) (*queryResult, error) {
params, err := parseHeaderParams(s)
if err != nil {
return nil, permFailError("key syntax error: " + err.Error())
}
res := new(queryResult)
if v, ok := params["v"]; ok && v != "DKIM1" {
return nil, permFailError("incompatible public key version")
}
p, ok := params["p"]
if !ok {
return nil, permFailError("key syntax error: missing public key data")
}
if p == "" {
return nil, permFailError("key revoked")
}
p = strings.ReplaceAll(p, " ", "")
b, err := base64.StdEncoding.DecodeString(p)
if err != nil {
return nil, permFailError("key syntax error: " + err.Error())
}
switch params["k"] {
case "rsa", "":
pub, err := x509.ParsePKIXPublicKey(b)
if err != nil {
// RFC 6376 is inconsistent about whether RSA public keys should
// be formatted as RSAPublicKey or SubjectPublicKeyInfo.
// Erratum 3017 (https://www.rfc-editor.org/errata/eid3017) proposes
// allowing both.
pub, err = x509.ParsePKCS1PublicKey(b)
if err != nil {
return nil, permFailError("key syntax error: " + err.Error())
}
}
rsaPub, ok := pub.(*rsa.PublicKey)
if !ok {
return nil, permFailError("key syntax error: not an RSA public key")
}
// RFC 8301 section 3.2: verifiers MUST NOT consider signatures using
// RSA keys of less than 1024 bits as valid signatures.
if rsaPub.Size()*8 < 1024 {
return nil, permFailError(fmt.Sprintf("key is too short: want 1024 bits, has %v bits", rsaPub.Size()*8))
}
res.Verifier = rsaVerifier{rsaPub}
res.KeyAlgo = "rsa"
case "ed25519":
if len(b) != ed25519.PublicKeySize {
return nil, permFailError(fmt.Sprintf("invalid Ed25519 public key size: %v bytes", len(b)))
}
ed25519Pub := ed25519.PublicKey(b)
res.Verifier = ed25519Verifier{ed25519Pub}
res.KeyAlgo = "ed25519"
default:
return nil, permFailError("unsupported key algorithm")
}
if hashesStr, ok := params["h"]; ok {
res.HashAlgos = parseTagList(hashesStr)
}
if notes, ok := params["n"]; ok {
res.Notes = notes
}
if servicesStr, ok := params["s"]; ok {
services := parseTagList(servicesStr)
hasWildcard := false
for _, s := range services {
if s == "*" {
hasWildcard = true
break
}
}
if !hasWildcard {
res.Services = services
}
}
if flagsStr, ok := params["t"]; ok {
res.Flags = parseTagList(flagsStr)
}
return res, nil
}

View File

@ -1,346 +0,0 @@
package dkim
import (
"bufio"
"bytes"
"crypto"
"crypto/rand"
"crypto/rsa"
"encoding/base64"
"fmt"
"io"
"strconv"
"strings"
"time"
"golang.org/x/crypto/ed25519"
)
var randReader io.Reader = rand.Reader
// SignOptions is used to configure Sign. Domain, Selector and Signer are
// mandatory.
type SignOptions struct {
// The SDID claiming responsibility for an introduction of a message into the
// mail stream. Hence, the SDID value is used to form the query for the public
// key. The SDID MUST correspond to a valid DNS name under which the DKIM key
// record is published.
//
// This can't be empty.
Domain string
// The selector subdividing the namespace for the domain.
//
// This can't be empty.
Selector string
// The Agent or User Identifier (AUID) on behalf of which the SDID is taking
// responsibility.
//
// This is optional.
Identifier string
// The key used to sign the message.
//
// Supported Signer.Public() values are *rsa.PublicKey and
// ed25519.PublicKey.
Signer crypto.Signer
// The hash algorithm used to sign the message. If zero, a default hash will
// be chosen.
//
// The only supported hash algorithm is crypto.SHA256.
Hash crypto.Hash
// Header and body canonicalization algorithms.
//
// If empty, CanonicalizationSimple is used.
HeaderCanonicalization Canonicalization
BodyCanonicalization Canonicalization
// A list of header fields to include in the signature. If nil, all headers
// will be included. If not nil, "From" MUST be in the list.
//
// See RFC 6376 section 5.4.1 for recommended header fields.
HeaderKeys []string
// The expiration time. A zero value means no expiration.
Expiration time.Time
// A list of query methods used to retrieve the public key.
//
// If nil, it is implicitly defined as QueryMethodDNSTXT.
QueryMethods []QueryMethod
}
// Signer generates a DKIM signature.
//
// The whole message header and body must be written to the Signer. Close should
// always be called (either after the whole message has been written, or after
// an error occurred and the signer won't be used anymore). Close may return an
// error in case signing fails.
//
// After a successful Close, Signature can be called to retrieve the
// DKIM-Signature header field that the caller should prepend to the message.
type Signer struct {
pw *io.PipeWriter
done <-chan error
sigParams map[string]string // only valid after done received nil
}
// NewSigner creates a new signer. It returns an error if SignOptions is
// invalid.
func NewSigner(options *SignOptions) (*Signer, error) {
if options == nil {
return nil, fmt.Errorf("dkim: no options specified")
}
if options.Domain == "" {
return nil, fmt.Errorf("dkim: no domain specified")
}
if options.Selector == "" {
return nil, fmt.Errorf("dkim: no selector specified")
}
if options.Signer == nil {
return nil, fmt.Errorf("dkim: no signer specified")
}
headerCan := options.HeaderCanonicalization
if headerCan == "" {
headerCan = CanonicalizationSimple
}
if _, ok := canonicalizers[headerCan]; !ok {
return nil, fmt.Errorf("dkim: unknown header canonicalization %q", headerCan)
}
bodyCan := options.BodyCanonicalization
if bodyCan == "" {
bodyCan = CanonicalizationSimple
}
if _, ok := canonicalizers[bodyCan]; !ok {
return nil, fmt.Errorf("dkim: unknown body canonicalization %q", bodyCan)
}
var keyAlgo string
switch options.Signer.Public().(type) {
case *rsa.PublicKey:
keyAlgo = "rsa"
case ed25519.PublicKey:
keyAlgo = "ed25519"
default:
return nil, fmt.Errorf("dkim: unsupported key algorithm %T", options.Signer.Public())
}
hash := options.Hash
var hashAlgo string
switch options.Hash {
case 0: // sha256 is the default
hash = crypto.SHA256
fallthrough
case crypto.SHA256:
hashAlgo = "sha256"
case crypto.SHA1:
return nil, fmt.Errorf("dkim: hash algorithm too weak: sha1")
default:
return nil, fmt.Errorf("dkim: unsupported hash algorithm")
}
if options.HeaderKeys != nil {
ok := false
for _, k := range options.HeaderKeys {
if strings.EqualFold(k, "From") {
ok = true
break
}
}
if !ok {
return nil, fmt.Errorf("dkim: the From header field must be signed")
}
}
done := make(chan error, 1)
pr, pw := io.Pipe()
s := &Signer{
pw: pw,
done: done,
}
closeReadWithError := func(err error) {
pr.CloseWithError(err)
done <- err
}
go func() {
defer close(done)
// Read header
br := bufio.NewReader(pr)
h, err := readHeader(br)
if err != nil {
closeReadWithError(err)
return
}
// Hash body
hasher := hash.New()
can := canonicalizers[bodyCan].CanonicalizeBody(hasher)
if _, err := io.Copy(can, br); err != nil {
closeReadWithError(err)
return
}
if err := can.Close(); err != nil {
closeReadWithError(err)
return
}
bodyHashed := hasher.Sum(nil)
params := map[string]string{
"v": "1",
"a": keyAlgo + "-" + hashAlgo,
"bh": base64.StdEncoding.EncodeToString(bodyHashed),
"c": string(headerCan) + "/" + string(bodyCan),
"d": options.Domain,
//"l": "", // TODO
"s": options.Selector,
"t": formatTime(now()),
//"z": "", // TODO
}
var headerKeys []string
if options.HeaderKeys != nil {
headerKeys = options.HeaderKeys
} else {
for _, kv := range h {
k, _ := parseHeaderField(kv)
headerKeys = append(headerKeys, k)
}
}
params["h"] = formatTagList(headerKeys)
if options.Identifier != "" {
params["i"] = options.Identifier
}
if options.QueryMethods != nil {
methods := make([]string, len(options.QueryMethods))
for i, method := range options.QueryMethods {
methods[i] = string(method)
}
params["q"] = formatTagList(methods)
}
if !options.Expiration.IsZero() {
params["x"] = formatTime(options.Expiration)
}
// Hash and sign headers
hasher.Reset()
picker := newHeaderPicker(h)
for _, k := range headerKeys {
kv := picker.Pick(k)
if kv == "" {
// The Signer MAY include more instances of a header field name
// in "h=" than there are actual corresponding header fields so
// that the signature will not verify if additional header
// fields of that name are added.
continue
}
kv = canonicalizers[headerCan].CanonicalizeHeader(kv)
if _, err := io.WriteString(hasher, kv); err != nil {
closeReadWithError(err)
return
}
}
params["b"] = ""
sigField := formatSignature(params)
sigField = canonicalizers[headerCan].CanonicalizeHeader(sigField)
sigField = strings.TrimRight(sigField, crlf)
if _, err := io.WriteString(hasher, sigField); err != nil {
closeReadWithError(err)
return
}
hashed := hasher.Sum(nil)
// Don't pass Hash to Sign for ed25519 as it doesn't support it
// and will return an error ("ed25519: cannot sign hashed message").
if keyAlgo == "ed25519" {
hash = crypto.Hash(0)
}
sig, err := options.Signer.Sign(randReader, hashed, hash)
if err != nil {
closeReadWithError(err)
return
}
params["b"] = base64.StdEncoding.EncodeToString(sig)
s.sigParams = params
closeReadWithError(nil)
}()
return s, nil
}
// Write implements io.WriteCloser.
func (s *Signer) Write(b []byte) (n int, err error) {
return s.pw.Write(b)
}
// Close implements io.WriteCloser. The error return by Close must be checked.
func (s *Signer) Close() error {
if err := s.pw.Close(); err != nil {
return err
}
return <-s.done
}
// Signature returns the whole DKIM-Signature header field. It can only be
// called after a successful Signer.Close call.
//
// The returned value contains both the header field name, its value and the
// final CRLF.
func (s *Signer) Signature() string {
if s.sigParams == nil {
panic("dkim: Signer.Signature must only be called after a succesful Signer.Close")
}
return formatSignature(s.sigParams)
}
// Sign signs a message. It reads it from r and writes the signed version to w.
func Sign(w io.Writer, r io.Reader, options *SignOptions) error {
s, err := NewSigner(options)
if err != nil {
return err
}
defer s.Close()
// We need to keep the message in a buffer so we can write the new DKIM
// header field before the rest of the message
var b bytes.Buffer
mw := io.MultiWriter(&b, s)
if _, err := io.Copy(mw, r); err != nil {
return err
}
if err := s.Close(); err != nil {
return err
}
if _, err := io.WriteString(w, s.Signature()); err != nil {
return err
}
_, err = io.Copy(w, &b)
return err
}
func formatSignature(params map[string]string) string {
sig := formatHeaderParams(headerFieldName, params)
return sig
}
func formatTagList(l []string) string {
return strings.Join(l, ":")
}
func formatTime(t time.Time) string {
return strconv.FormatInt(t.Unix(), 10)
}

View File

@ -1,462 +0,0 @@
package dkim
import (
"bufio"
"crypto"
"crypto/subtle"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"regexp"
"strconv"
"strings"
"time"
"unicode"
)
type permFailError string
func (err permFailError) Error() string {
return "dkim: " + string(err)
}
// IsPermFail returns true if the error returned by Verify is a permanent
// failure. A permanent failure is for instance a missing required field or a
// malformed header.
func IsPermFail(err error) bool {
_, ok := err.(permFailError)
return ok
}
type tempFailError string
func (err tempFailError) Error() string {
return "dkim: " + string(err)
}
// IsTempFail returns true if the error returned by Verify is a temporary
// failure.
func IsTempFail(err error) bool {
_, ok := err.(tempFailError)
return ok
}
type failError string
func (err failError) Error() string {
return "dkim: " + string(err)
}
// isFail returns true if the error returned by Verify is a signature error.
func isFail(err error) bool {
_, ok := err.(failError)
return ok
}
// ErrTooManySignatures is returned by Verify when the message exceeds the
// maximum number of signatures.
var ErrTooManySignatures = errors.New("dkim: too many signatures")
var requiredTags = []string{"v", "a", "b", "bh", "d", "h", "s"}
// A Verification is produced by Verify when it checks if one signature is
// valid. If the signature is valid, Err is nil.
type Verification struct {
// The SDID claiming responsibility for an introduction of a message into the
// mail stream.
Domain string
// The Agent or User Identifier (AUID) on behalf of which the SDID is taking
// responsibility.
Identifier string
// The list of signed header fields.
HeaderKeys []string
// The time that this signature was created. If unknown, it's set to zero.
Time time.Time
// The expiration time. If the signature doesn't expire, it's set to zero.
Expiration time.Time
// Err is nil if the signature is valid.
Err error
}
type signature struct {
i int
v string
}
// VerifyOptions allows to customize the default signature verification
// behavior.
type VerifyOptions struct {
// LookupTXT returns the DNS TXT records for the given domain name. If nil,
// net.LookupTXT is used.
LookupTXT func(domain string) ([]string, error)
// MaxVerifications controls the maximum number of signature verifications
// to perform. If more signatures are present, the first MaxVerifications
// signatures are verified, the rest are ignored and ErrTooManySignatures
// is returned. If zero, there is no maximum.
MaxVerifications int
}
// Verify checks if a message's signatures are valid. It returns one
// verification per signature.
//
// There is no guarantee that the reader will be completely consumed.
func Verify(r io.Reader) ([]*Verification, error) {
return VerifyWithOptions(r, nil)
}
// VerifyWithOptions performs the same task as Verify, but allows specifying
// verification options.
func VerifyWithOptions(r io.Reader, options *VerifyOptions) ([]*Verification, error) {
// Read header
bufr := bufio.NewReader(r)
h, err := readHeader(bufr)
if err != nil {
return nil, err
}
// Scan header fields for signatures
var signatures []*signature
for i, kv := range h {
k, v := parseHeaderField(kv)
if strings.EqualFold(k, headerFieldName) {
signatures = append(signatures, &signature{i, v})
}
}
tooManySignatures := false
if options != nil && options.MaxVerifications > 0 && len(signatures) > options.MaxVerifications {
tooManySignatures = true
signatures = signatures[:options.MaxVerifications]
}
var verifs []*Verification
if len(signatures) == 1 {
// If there is only one signature - just verify it.
v, err := verify(h, bufr, h[signatures[0].i], signatures[0].v, options)
if err != nil && !IsTempFail(err) && !IsPermFail(err) && !isFail(err) {
return nil, err
}
v.Err = err
verifs = []*Verification{v}
} else {
verifs, err = parallelVerify(bufr, h, signatures, options)
if err != nil {
return nil, err
}
}
if tooManySignatures {
return verifs, ErrTooManySignatures
}
return verifs, nil
}
func parallelVerify(r io.Reader, h header, signatures []*signature, options *VerifyOptions) ([]*Verification, error) {
pipeWriters := make([]*io.PipeWriter, len(signatures))
// We can't pass pipeWriter to io.MultiWriter directly,
// we need a slice of io.Writer, but we also need *io.PipeWriter
// to call Close on it.
writers := make([]io.Writer, len(signatures))
chans := make([]chan *Verification, len(signatures))
for i, sig := range signatures {
// Be careful with loop variables and goroutines.
i, sig := i, sig
chans[i] = make(chan *Verification, 1)
pr, pw := io.Pipe()
writers[i] = pw
pipeWriters[i] = pw
go func() {
v, err := verify(h, pr, h[sig.i], sig.v, options)
// Make sure we consume the whole reader, otherwise io.Copy on
// other side can block forever.
io.Copy(ioutil.Discard, pr)
v.Err = err
chans[i] <- v
}()
}
if _, err := io.Copy(io.MultiWriter(writers...), r); err != nil {
return nil, err
}
for _, wr := range pipeWriters {
wr.Close()
}
verifications := make([]*Verification, len(signatures))
for i, ch := range chans {
verifications[i] = <-ch
}
// Return unexpected failures as a separate error.
for _, v := range verifications {
err := v.Err
if err != nil && !IsTempFail(err) && !IsPermFail(err) && !isFail(err) {
v.Err = nil
return verifications, err
}
}
return verifications, nil
}
func verify(h header, r io.Reader, sigField, sigValue string, options *VerifyOptions) (*Verification, error) {
verif := new(Verification)
params, err := parseHeaderParams(sigValue)
if err != nil {
return verif, permFailError("malformed signature tags: " + err.Error())
}
if params["v"] != "1" {
return verif, permFailError("incompatible signature version")
}
verif.Domain = stripWhitespace(params["d"])
for _, tag := range requiredTags {
if _, ok := params[tag]; !ok {
return verif, permFailError("signature missing required tag")
}
}
if i, ok := params["i"]; ok {
verif.Identifier = stripWhitespace(i)
if !strings.HasSuffix(verif.Identifier, "@"+verif.Domain) && !strings.HasSuffix(verif.Identifier, "."+verif.Domain) {
return verif, permFailError("domain mismatch")
}
} else {
verif.Identifier = "@" + verif.Domain
}
headerKeys := parseTagList(params["h"])
ok := false
for _, k := range headerKeys {
if strings.EqualFold(k, "from") {
ok = true
break
}
}
if !ok {
return verif, permFailError("From field not signed")
}
verif.HeaderKeys = headerKeys
if timeStr, ok := params["t"]; ok {
t, err := parseTime(timeStr)
if err != nil {
return verif, permFailError("malformed time: " + err.Error())
}
verif.Time = t
}
if expiresStr, ok := params["x"]; ok {
t, err := parseTime(expiresStr)
if err != nil {
return verif, permFailError("malformed expiration time: " + err.Error())
}
verif.Expiration = t
if now().After(t) {
return verif, permFailError("signature has expired")
}
}
// Query public key
// TODO: compute hash in parallel
methods := []string{string(QueryMethodDNSTXT)}
if methodsStr, ok := params["q"]; ok {
methods = parseTagList(methodsStr)
}
var res *queryResult
for _, method := range methods {
if query, ok := queryMethods[QueryMethod(method)]; ok {
if options != nil {
res, err = query(verif.Domain, stripWhitespace(params["s"]), options.LookupTXT)
} else {
res, err = query(verif.Domain, stripWhitespace(params["s"]), nil)
}
break
}
}
if err != nil {
return verif, err
} else if res == nil {
return verif, permFailError("unsupported public key query method")
}
// Parse algos
keyAlgo, hashAlgo, ok := strings.Cut(stripWhitespace(params["a"]), "-")
if !ok {
return verif, permFailError("malformed algorithm name")
}
// Check hash algo
if res.HashAlgos != nil {
ok := false
for _, algo := range res.HashAlgos {
if algo == hashAlgo {
ok = true
break
}
}
if !ok {
return verif, permFailError("inappropriate hash algorithm")
}
}
var hash crypto.Hash
switch hashAlgo {
case "sha1":
// RFC 8301 section 3.1: rsa-sha1 MUST NOT be used for signing or
// verifying.
return verif, permFailError(fmt.Sprintf("hash algorithm too weak: %v", hashAlgo))
case "sha256":
hash = crypto.SHA256
default:
return verif, permFailError("unsupported hash algorithm")
}
// Check key algo
if res.KeyAlgo != keyAlgo {
return verif, permFailError("inappropriate key algorithm")
}
if res.Services != nil {
ok := false
for _, s := range res.Services {
if s == "email" {
ok = true
break
}
}
if !ok {
return verif, permFailError("inappropriate service")
}
}
headerCan, bodyCan := parseCanonicalization(params["c"])
if _, ok := canonicalizers[headerCan]; !ok {
return verif, permFailError("unsupported header canonicalization algorithm")
}
if _, ok := canonicalizers[bodyCan]; !ok {
return verif, permFailError("unsupported body canonicalization algorithm")
}
// The body length "l" parameter is insecure, because it allows parts of
// the message body to not be signed. Reject messages which have it set.
if _, ok := params["l"]; ok {
// TODO: technically should be policyError
return verif, failError("message contains an insecure body length tag")
}
// Parse body hash and signature
bodyHashed, err := decodeBase64String(params["bh"])
if err != nil {
return verif, permFailError("malformed body hash: " + err.Error())
}
sig, err := decodeBase64String(params["b"])
if err != nil {
return verif, permFailError("malformed signature: " + err.Error())
}
// Check body hash
hasher := hash.New()
wc := canonicalizers[bodyCan].CanonicalizeBody(hasher)
if _, err := io.Copy(wc, r); err != nil {
return verif, err
}
if err := wc.Close(); err != nil {
return verif, err
}
if subtle.ConstantTimeCompare(hasher.Sum(nil), bodyHashed) != 1 {
return verif, failError("body hash did not verify")
}
// Compute data hash
hasher.Reset()
picker := newHeaderPicker(h)
for _, key := range headerKeys {
kv := picker.Pick(key)
if kv == "" {
// The field MAY contain names of header fields that do not exist
// when signed; nonexistent header fields do not contribute to the
// signature computation
continue
}
kv = canonicalizers[headerCan].CanonicalizeHeader(kv)
if _, err := hasher.Write([]byte(kv)); err != nil {
return verif, err
}
}
canSigField := removeSignature(sigField)
canSigField = canonicalizers[headerCan].CanonicalizeHeader(canSigField)
canSigField = strings.TrimRight(canSigField, "\r\n")
if _, err := hasher.Write([]byte(canSigField)); err != nil {
return verif, err
}
hashed := hasher.Sum(nil)
// Check signature
if err := res.Verifier.Verify(hash, hashed, sig); err != nil {
return verif, failError("signature did not verify: " + err.Error())
}
return verif, nil
}
func parseTagList(s string) []string {
tags := strings.Split(s, ":")
for i, t := range tags {
tags[i] = stripWhitespace(t)
}
return tags
}
func parseCanonicalization(s string) (headerCan, bodyCan Canonicalization) {
headerCan = CanonicalizationSimple
bodyCan = CanonicalizationSimple
cans := strings.SplitN(stripWhitespace(s), "/", 2)
if cans[0] != "" {
headerCan = Canonicalization(cans[0])
}
if len(cans) > 1 {
bodyCan = Canonicalization(cans[1])
}
return
}
func parseTime(s string) (time.Time, error) {
sec, err := strconv.ParseInt(stripWhitespace(s), 10, 64)
if err != nil {
return time.Time{}, err
}
return time.Unix(sec, 0), nil
}
func decodeBase64String(s string) ([]byte, error) {
return base64.StdEncoding.DecodeString(stripWhitespace(s))
}
func stripWhitespace(s string) string {
return strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return -1
}
return r
}, s)
}
var sigRegex = regexp.MustCompile(`(b\s*=)[^;]+`)
func removeSignature(s string) string {
return sigRegex.ReplaceAllString(s, "$1")
}

View File

@ -1,7 +1,7 @@
# Changelog
All notable changes to webpush-go will be documented in this file.
## [2.0.0] - 2025-01-16
## [2.0.0] - 2025-01-01
* Update the `Keys` struct definition to store `Auth` as `[16]byte` and `P256dh` as `*ecdh.PublicKey`
* `Keys` can no longer be compared with `==`; use `(*Keys.Equal)` instead

View File

@ -4,7 +4,7 @@
Web Push API Encryption with VAPID support.
This library is a fork of [SherClockHolmes/webpush-go](https://github.com/SherClockHolmes/webpush-go). See CHANGELOG.md for details on migrating from the upstream library.
This library is a fork of [SherClockHolmes/webpush-go](https://github.com/SherClockHolmes/webpush-go).
```bash
go get -u github.com/ergochat/webpush-go/v2

View File

@ -29,7 +29,7 @@ var (
invalidAuthKeyLength = errors.New("invalid auth key length (must be 16)")
defaultHTTPClient HTTPClient = &http.Client{}
defaultHTTPClient = &http.Client{}
)
// HTTPClient is an interface for sending the notification HTTP request / testing

View File

@ -10,11 +10,11 @@ implementation of [JSON Web
Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0)
this project adds Go module support, but maintains backward compatibility with
this project adds Go module support, but maintains backwards compatibility with
older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the
[`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version
v5.0.0 introduces major improvements to the validation of tokens, but is not
entirely backward compatible.
entirely backwards compatible.
> After the original author of the library suggested migrating the maintenance
> of `jwt-go`, a dedicated team of open source maintainers decided to clone the
@ -24,7 +24,7 @@ entirely backward compatible.
**SECURITY NOTICE:** Some older versions of Go have a security issue in the
crypto/elliptic. The recommendation is to upgrade to at least 1.15 See issue
crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue
[dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more
detail.
@ -32,7 +32,7 @@ detail.
what you
expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/).
This library attempts to make it easy to do the right thing by requiring key
types to match the expected alg, but you should take the extra step to verify it in
types match the expected alg, but you should take the extra step to verify it in
your usage. See the examples provided.
### Supported Go versions
@ -41,7 +41,7 @@ Our support of Go versions is aligned with Go's [version release
policy](https://golang.org/doc/devel/release#policy). So we will support a major
version of Go until there are two newer major releases. We no longer support
building jwt-go with unsupported Go versions, as these contain security
vulnerabilities that will not be fixed.
vulnerabilities which will not be fixed.
## What the heck is a JWT?
@ -117,7 +117,7 @@ notable differences:
This library is considered production ready. Feedback and feature requests are
appreciated. The API should be considered stable. There should be very few
backward-incompatible changes outside of major version updates (and only with
backwards-incompatible changes outside of major version updates (and only with
good reason).
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull
@ -125,8 +125,8 @@ requests will land on `main`. Periodically, versions will be tagged from
`main`. You can find all the releases on [the project releases
page](https://github.com/golang-jwt/jwt/releases).
**BREAKING CHANGES:** A full list of breaking changes is available in
`VERSION_HISTORY.md`. See [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information on updating
**BREAKING CHANGES:*** A full list of breaking changes is available in
`VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating
your code.
## Extensions

View File

@ -2,11 +2,11 @@
## Supported Versions
As of November 2024 (and until this document is updated), the latest version `v5` is supported. In critical cases, we might supply back-ported patches for `v4`.
As of February 2022 (and until this document is updated), the latest version `v4` is supported.
## Reporting a Vulnerability
If you think you found a vulnerability, and even if you are not sure, please report it a [GitHub Security Advisory](https://github.com/golang-jwt/jwt/security/advisories/new). Please try be explicit, describe steps to reproduce the security issue with code example(s).
If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s).
You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.

View File

@ -8,8 +8,6 @@ import (
"strings"
)
const tokenDelimiter = "."
type Parser struct {
// If populated, only these methods will be considered valid.
validMethods []string
@ -138,10 +136,9 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
// It's only ever useful in cases where you know the signature is valid (since it has already
// been or will be checked elsewhere in the stack) and you want to extract values from it.
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
var ok bool
parts, ok = splitToken(tokenString)
if !ok {
return nil, nil, newError("token contains an invalid number of segments", ErrTokenMalformed)
parts = strings.Split(tokenString, ".")
if len(parts) != 3 {
return nil, parts, newError("token contains an invalid number of segments", ErrTokenMalformed)
}
token = &Token{Raw: tokenString}
@ -199,33 +196,6 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
return token, parts, nil
}
// splitToken splits a token string into three parts: header, claims, and signature. It will only
// return true if the token contains exactly two delimiters and three parts. In all other cases, it
// will return nil parts and false.
func splitToken(token string) ([]string, bool) {
parts := make([]string, 3)
header, remain, ok := strings.Cut(token, tokenDelimiter)
if !ok {
return nil, false
}
parts[0] = header
claims, remain, ok := strings.Cut(remain, tokenDelimiter)
if !ok {
return nil, false
}
parts[1] = claims
// One more cut to ensure the signature is the last part of the token and there are no more
// delimiters. This avoids an issue where malicious input could contain additional delimiters
// causing unecessary overhead parsing tokens.
signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
if unexpected {
return nil, false
}
parts[2] = signature
return parts, true
}
// DecodeSegment decodes a JWT specific base64url encoding. This function will
// take into account whether the [Parser] is configured with additional options,
// such as [WithStrictDecoding] or [WithPaddingAllowed].

View File

@ -75,7 +75,7 @@ func (t *Token) SignedString(key interface{}) (string, error) {
}
// SigningString generates the signing string. This is the most expensive part
// of the whole deal. Unless you need this for something special, just go
// of the whole deal. Unless you need this for something special, just go
// straight for the SignedString.
func (t *Token) SigningString() (string, error) {
h, err := json.Marshal(t.Header)

View File

@ -10,7 +10,6 @@ import (
"fmt"
"io"
"os"
"runtime"
"sort"
"strconv"
"strings"
@ -754,7 +753,7 @@ func (db *DB) Shrink() error {
return err
}
// Any failures below here are really bad. So just panic.
if err := renameFile(tmpname, fname); err != nil {
if err := os.Rename(tmpname, fname); err != nil {
panicErr(err)
}
db.file, err = os.OpenFile(fname, os.O_CREATE|os.O_RDWR, 0666)
@ -774,18 +773,6 @@ func panicErr(err error) error {
panic(fmt.Errorf("buntdb: %w", err))
}
func renameFile(src, dest string) error {
var err error
if err = os.Rename(src, dest); err != nil {
if runtime.GOOS == "windows" {
if err = os.Remove(dest); err == nil {
err = os.Rename(src, dest)
}
}
}
return err
}
// readLoad reads from the reader and loads commands into the database.
// modTime is the modified time of the reader, should be no greater than
// the current time.Now().

24
vendor/github.com/toorop/go-dkim/.gitignore generated vendored Normal file
View File

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -1,6 +1,6 @@
MIT License
The MIT License (MIT)
Copyright (c) 2017 emersion
Copyright (c) 2015 Stéphane Depierrepont
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@ -19,3 +19,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

56
vendor/github.com/toorop/go-dkim/README.md generated vendored Normal file
View File

@ -0,0 +1,56 @@
# go-dkim
DKIM package for Golang
[![GoDoc](https://godoc.org/github.com/toorop/go-dkim?status.svg)](https://godoc.org/github.com/toorop/go-dkim)
## Getting started
### Install
```
go get github.com/toorop/go-dkim
```
Warning: you need to use Go 1.4.2-master or 1.4.3 (when it will be available)
see https://github.com/golang/go/issues/10482 fro more info.
### Sign email
```go
import (
dkim "github.com/toorop/go-dkim"
)
func main(){
// email is the email to sign (byte slice)
// privateKey the private key (pem encoded, byte slice )
options := dkim.NewSigOptions()
options.PrivateKey = privateKey
options.Domain = "mydomain.tld"
options.Selector = "myselector"
options.SignatureExpireIn = 3600
options.BodyLength = 50
options.Headers = []string{"from", "date", "mime-version", "received", "received"}
options.AddSignatureTimestamp = true
options.Canonicalization = "relaxed/relaxed"
err := dkim.Sign(&email, options)
// handle err..
// And... that's it, 'email' is signed ! Amazing© !!!
}
```
### Verify
```go
import (
dkim "github.com/toorop/go-dkim"
)
func main(){
// email is the email to verify (byte slice)
status, err := Verify(&email)
// handle status, err (see godoc for status)
}
```
## Todo
- [ ] handle z tag (copied header fields used for diagnostic use)

564
vendor/github.com/toorop/go-dkim/dkim.go generated vendored Normal file
View File

@ -0,0 +1,564 @@
// Package dkim provides tools for signing and verify a email according to RFC 6376
package dkim
import (
"bytes"
"container/list"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"hash"
"regexp"
"strings"
"time"
)
const (
CRLF = "\r\n"
TAB = " "
FWS = CRLF + TAB
MaxHeaderLineLength = 70
)
type verifyOutput int
const (
SUCCESS verifyOutput = 1 + iota
PERMFAIL
TEMPFAIL
NOTSIGNED
TESTINGSUCCESS
TESTINGPERMFAIL
TESTINGTEMPFAIL
)
// sigOptions represents signing options
type SigOptions struct {
// DKIM version (default 1)
Version uint
// Private key used for signing (required)
PrivateKey []byte
// Domain (required)
Domain string
// Selector (required)
Selector string
// The Agent of User IDentifier
Auid string
// Message canonicalization (plain-text; OPTIONAL, default is
// "simple/simple"). This tag informs the Verifier of the type of
// canonicalization used to prepare the message for signing.
Canonicalization string
// The algorithm used to generate the signature
//"rsa-sha1" or "rsa-sha256"
Algo string
// Signed header fields
Headers []string
// Body length count( if set to 0 this tag is ommited in Dkim header)
BodyLength uint
// Query Methods used to retrieve the public key
QueryMethods []string
// Add a signature timestamp
AddSignatureTimestamp bool
// Time validity of the signature (0=never)
SignatureExpireIn uint64
// CopiedHeaderFileds
CopiedHeaderFields []string
}
// NewSigOptions returns new sigoption with some defaults value
func NewSigOptions() SigOptions {
return SigOptions{
Version: 1,
Canonicalization: "simple/simple",
Algo: "rsa-sha256",
Headers: []string{"from"},
BodyLength: 0,
QueryMethods: []string{"dns/txt"},
AddSignatureTimestamp: true,
SignatureExpireIn: 0,
}
}
// Sign signs an email
func Sign(email *[]byte, options SigOptions) error {
var privateKey *rsa.PrivateKey
var err error
// PrivateKey
if len(options.PrivateKey) == 0 {
return ErrSignPrivateKeyRequired
}
d, _ := pem.Decode(options.PrivateKey)
if d == nil {
return ErrCandNotParsePrivateKey
}
// try to parse it as PKCS1 otherwise try PKCS8
if key, err := x509.ParsePKCS1PrivateKey(d.Bytes); err != nil {
if key, err := x509.ParsePKCS8PrivateKey(d.Bytes); err != nil {
return ErrCandNotParsePrivateKey
} else {
privateKey = key.(*rsa.PrivateKey)
}
} else {
privateKey = key
}
// Domain required
if options.Domain == "" {
return ErrSignDomainRequired
}
// Selector required
if options.Selector == "" {
return ErrSignSelectorRequired
}
// Canonicalization
options.Canonicalization, err = validateCanonicalization(strings.ToLower(options.Canonicalization))
if err != nil {
return err
}
// Algo
options.Algo = strings.ToLower(options.Algo)
if options.Algo != "rsa-sha1" && options.Algo != "rsa-sha256" {
return ErrSignBadAlgo
}
// Header must contain "from"
hasFrom := false
for i, h := range options.Headers {
h = strings.ToLower(h)
options.Headers[i] = h
if h == "from" {
hasFrom = true
}
}
if !hasFrom {
return ErrSignHeaderShouldContainsFrom
}
// Normalize
headers, body, err := canonicalize(email, options.Canonicalization, options.Headers)
if err != nil {
return err
}
signHash := strings.Split(options.Algo, "-")
// hash body
bodyHash, err := getBodyHash(&body, signHash[1], options.BodyLength)
if err != nil {
return err
}
// Get dkim header base
dkimHeader := newDkimHeaderBySigOptions(options)
dHeader := dkimHeader.getHeaderBaseForSigning(bodyHash)
canonicalizations := strings.Split(options.Canonicalization, "/")
dHeaderCanonicalized, err := canonicalizeHeader(dHeader, canonicalizations[0])
if err != nil {
return err
}
headers = append(headers, []byte(dHeaderCanonicalized)...)
headers = bytes.TrimRight(headers, " \r\n")
// sign
sig, err := getSignature(&headers, privateKey, signHash[1])
// add to DKIM-Header
subh := ""
l := len(subh)
for _, c := range sig {
subh += string(c)
l++
if l >= MaxHeaderLineLength {
dHeader += subh + FWS
subh = ""
l = 0
}
}
dHeader += subh + CRLF
*email = append([]byte(dHeader), *email...)
return nil
}
// Verify verifies an email an return
// state: SUCCESS or PERMFAIL or TEMPFAIL, TESTINGSUCCESS, TESTINGPERMFAIL
// TESTINGTEMPFAIL or NOTSIGNED
// error: if an error occurs during verification
func Verify(email *[]byte, opts ...DNSOpt) (verifyOutput, error) {
// parse email
dkimHeader, err := GetHeader(email)
if err != nil {
if err == ErrDkimHeaderNotFound {
return NOTSIGNED, ErrDkimHeaderNotFound
}
return PERMFAIL, err
}
// we do not set query method because if it's others, validation failed earlier
pubKey, verifyOutputOnError, err := NewPubKeyRespFromDNS(dkimHeader.Selector, dkimHeader.Domain, opts...)
if err != nil {
// fix https://github.com/toorop/go-dkim/issues/1
//return getVerifyOutput(verifyOutputOnError, err, pubKey.FlagTesting)
return verifyOutputOnError, err
}
// Normalize
headers, body, err := canonicalize(email, dkimHeader.MessageCanonicalization, dkimHeader.Headers)
if err != nil {
return getVerifyOutput(PERMFAIL, err, pubKey.FlagTesting)
}
sigHash := strings.Split(dkimHeader.Algorithm, "-")
// check if hash algo are compatible
compatible := false
for _, algo := range pubKey.HashAlgo {
if sigHash[1] == algo {
compatible = true
break
}
}
if !compatible {
return getVerifyOutput(PERMFAIL, ErrVerifyInappropriateHashAlgo, pubKey.FlagTesting)
}
// expired ?
if !dkimHeader.SignatureExpiration.IsZero() && dkimHeader.SignatureExpiration.Second() < time.Now().Second() {
return getVerifyOutput(PERMFAIL, ErrVerifySignatureHasExpired, pubKey.FlagTesting)
}
//println("|" + string(body) + "|")
// get body hash
bodyHash, err := getBodyHash(&body, sigHash[1], dkimHeader.BodyLength)
if err != nil {
return getVerifyOutput(PERMFAIL, err, pubKey.FlagTesting)
}
//println(bodyHash)
if bodyHash != dkimHeader.BodyHash {
return getVerifyOutput(PERMFAIL, ErrVerifyBodyHash, pubKey.FlagTesting)
}
// compute sig
dkimHeaderCano, err := canonicalizeHeader(dkimHeader.rawForSign, strings.Split(dkimHeader.MessageCanonicalization, "/")[0])
if err != nil {
return getVerifyOutput(TEMPFAIL, err, pubKey.FlagTesting)
}
toSignStr := string(headers) + dkimHeaderCano
toSign := bytes.TrimRight([]byte(toSignStr), " \r\n")
err = verifySignature(toSign, dkimHeader.SignatureData, &pubKey.PubKey, sigHash[1])
if err != nil {
return getVerifyOutput(PERMFAIL, err, pubKey.FlagTesting)
}
return SUCCESS, nil
}
// getVerifyOutput returns output of verify fct according to the testing flag
func getVerifyOutput(status verifyOutput, err error, flagTesting bool) (verifyOutput, error) {
if !flagTesting {
return status, err
}
switch status {
case SUCCESS:
return TESTINGSUCCESS, err
case PERMFAIL:
return TESTINGPERMFAIL, err
case TEMPFAIL:
return TESTINGTEMPFAIL, err
}
// should never happen but compilator sream whithout return
return status, err
}
// canonicalize returns canonicalized version of header and body
func canonicalize(email *[]byte, cano string, h []string) (headers, body []byte, err error) {
body = []byte{}
rxReduceWS := regexp.MustCompile(`[ \t]+`)
rawHeaders, rawBody, err := getHeadersBody(email)
if err != nil {
return nil, nil, err
}
canonicalizations := strings.Split(cano, "/")
// canonicalyze header
headersList, err := getHeadersList(&rawHeaders)
// pour chaque header a conserver on traverse tous les headers dispo
// If multi instance of a field we must keep it from the bottom to the top
var match *list.Element
headersToKeepList := list.New()
for _, headerToKeep := range h {
match = nil
headerToKeepToLower := strings.ToLower(headerToKeep)
for e := headersList.Front(); e != nil; e = e.Next() {
//fmt.Printf("|%s|\n", e.Value.(string))
t := strings.Split(e.Value.(string), ":")
if strings.ToLower(t[0]) == headerToKeepToLower {
match = e
}
}
if match != nil {
headersToKeepList.PushBack(match.Value.(string) + "\r\n")
headersList.Remove(match)
}
}
//if canonicalizations[0] == "simple" {
for e := headersToKeepList.Front(); e != nil; e = e.Next() {
cHeader, err := canonicalizeHeader(e.Value.(string), canonicalizations[0])
if err != nil {
return headers, body, err
}
headers = append(headers, []byte(cHeader)...)
}
// canonicalyze body
if canonicalizations[1] == "simple" {
// simple
// The "simple" body canonicalization algorithm ignores all empty lines
// at the end of the message body. An empty line is a line of zero
// length after removal of the line terminator. If there is no body or
// no trailing CRLF on the message body, a CRLF is added. It makes no
// other changes to the message body. In more formal terms, the
// "simple" body canonicalization algorithm converts "*CRLF" at the end
// of the body to a single "CRLF".
// Note that a completely empty or missing body is canonicalized as a
// single "CRLF"; that is, the canonicalized length will be 2 octets.
body = bytes.TrimRight(rawBody, "\r\n")
body = append(body, []byte{13, 10}...)
} else {
// relaxed
// Ignore all whitespace at the end of lines. Implementations
// MUST NOT remove the CRLF at the end of the line.
// Reduce all sequences of WSP within a line to a single SP
// character.
// Ignore all empty lines at the end of the message body. "Empty
// line" is defined in Section 3.4.3. If the body is non-empty but
// does not end with a CRLF, a CRLF is added. (For email, this is
// only possible when using extensions to SMTP or non-SMTP transport
// mechanisms.)
rawBody = rxReduceWS.ReplaceAll(rawBody, []byte(" "))
for _, line := range bytes.SplitAfter(rawBody, []byte{10}) {
line = bytes.TrimRight(line, " \r\n")
body = append(body, line...)
body = append(body, []byte{13, 10}...)
}
body = bytes.TrimRight(body, "\r\n")
body = append(body, []byte{13, 10}...)
}
return
}
// canonicalizeHeader returns canonicalized version of header
func canonicalizeHeader(header string, algo string) (string, error) {
//rxReduceWS := regexp.MustCompile(`[ \t]+`)
if algo == "simple" {
// The "simple" header canonicalization algorithm does not change header
// fields in any way. Header fields MUST be presented to the signing or
// verification algorithm exactly as they are in the message being
// signed or verified. In particular, header field names MUST NOT be
// case folded and whitespace MUST NOT be changed.
return header, nil
} else if algo == "relaxed" {
// The "relaxed" header canonicalization algorithm MUST apply the
// following steps in order:
// Convert all header field names (not the header field values) to
// lowercase. For example, convert "SUBJect: AbC" to "subject: AbC".
// Unfold all header field continuation lines as described in
// [RFC5322]; in particular, lines with terminators embedded in
// continued header field values (that is, CRLF sequences followed by
// WSP) MUST be interpreted without the CRLF. Implementations MUST
// NOT remove the CRLF at the end of the header field value.
// Convert all sequences of one or more WSP characters to a single SP
// character. WSP characters here include those before and after a
// line folding boundary.
// Delete all WSP characters at the end of each unfolded header field
// value.
// Delete any WSP characters remaining before and after the colon
// separating the header field name from the header field value. The
// colon separator MUST be retained.
kv := strings.SplitN(header, ":", 2)
if len(kv) != 2 {
return header, ErrBadMailFormatHeaders
}
k := strings.ToLower(kv[0])
k = strings.TrimSpace(k)
v := removeFWS(kv[1])
//v = rxReduceWS.ReplaceAllString(v, " ")
//v = strings.TrimSpace(v)
return k + ":" + v + CRLF, nil
}
return header, ErrSignBadCanonicalization
}
// getBodyHash return the hash (bas64encoded) of the body
func getBodyHash(body *[]byte, algo string, bodyLength uint) (string, error) {
var h hash.Hash
if algo == "sha1" {
h = sha1.New()
} else {
h = sha256.New()
}
toH := *body
// if l tag (body length)
if bodyLength != 0 {
if uint(len(toH)) < bodyLength {
return "", ErrBadDKimTagLBodyTooShort
}
toH = toH[0:bodyLength]
}
h.Write(toH)
return base64.StdEncoding.EncodeToString(h.Sum(nil)), nil
}
// getSignature return signature of toSign using key
func getSignature(toSign *[]byte, key *rsa.PrivateKey, algo string) (string, error) {
var h1 hash.Hash
var h2 crypto.Hash
switch algo {
case "sha1":
h1 = sha1.New()
h2 = crypto.SHA1
break
case "sha256":
h1 = sha256.New()
h2 = crypto.SHA256
break
default:
return "", ErrVerifyInappropriateHashAlgo
}
// sign
h1.Write(*toSign)
sig, err := rsa.SignPKCS1v15(rand.Reader, key, h2, h1.Sum(nil))
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(sig), nil
}
// verifySignature verify signature from pubkey
func verifySignature(toSign []byte, sig64 string, key *rsa.PublicKey, algo string) error {
var h1 hash.Hash
var h2 crypto.Hash
switch algo {
case "sha1":
h1 = sha1.New()
h2 = crypto.SHA1
break
case "sha256":
h1 = sha256.New()
h2 = crypto.SHA256
break
default:
return ErrVerifyInappropriateHashAlgo
}
h1.Write(toSign)
sig, err := base64.StdEncoding.DecodeString(sig64)
if err != nil {
return err
}
return rsa.VerifyPKCS1v15(key, h2, h1.Sum(nil), sig)
}
// removeFWS removes all FWS from string
func removeFWS(in string) string {
rxReduceWS := regexp.MustCompile(`[ \t]+`)
out := strings.Replace(in, "\n", "", -1)
out = strings.Replace(out, "\r", "", -1)
out = rxReduceWS.ReplaceAllString(out, " ")
return strings.TrimSpace(out)
}
// validateCanonicalization validate canonicalization (c flag)
func validateCanonicalization(cano string) (string, error) {
p := strings.Split(cano, "/")
if len(p) > 2 {
return "", ErrSignBadCanonicalization
}
if len(p) == 1 {
cano = cano + "/simple"
}
for _, c := range p {
if c != "simple" && c != "relaxed" {
return "", ErrSignBadCanonicalization
}
}
return cano, nil
}
// getHeadersList returns headers as list
func getHeadersList(rawHeader *[]byte) (*list.List, error) {
headersList := list.New()
currentHeader := []byte{}
for _, line := range bytes.SplitAfter(*rawHeader, []byte{10}) {
if line[0] == 32 || line[0] == 9 {
if len(currentHeader) == 0 {
return headersList, ErrBadMailFormatHeaders
}
currentHeader = append(currentHeader, line...)
} else {
// New header, save current if exists
if len(currentHeader) != 0 {
headersList.PushBack(string(bytes.TrimRight(currentHeader, "\r\n")))
currentHeader = []byte{}
}
currentHeader = append(currentHeader, line...)
}
}
headersList.PushBack(string(currentHeader))
return headersList, nil
}
// getHeadersBody return headers and body
func getHeadersBody(email *[]byte) ([]byte, []byte, error) {
substitutedEmail := *email
// only replace \n with \r\n when \r\n\r\n not exists
if bytes.Index(*email, []byte{13, 10, 13, 10}) < 0 {
// \n -> \r\n
substitutedEmail = bytes.Replace(*email, []byte{10}, []byte{13, 10}, -1)
}
parts := bytes.SplitN(substitutedEmail, []byte{13, 10, 13, 10}, 2)
if len(parts) != 2 {
return []byte{}, []byte{}, ErrBadMailFormat
}
// Empty body
if len(parts[1]) == 0 {
parts[1] = []byte{13, 10}
}
return parts[0], parts[1], nil
}

545
vendor/github.com/toorop/go-dkim/dkimHeader.go generated vendored Normal file
View File

@ -0,0 +1,545 @@
package dkim
import (
"bytes"
"fmt"
"net/mail"
"net/textproto"
"strconv"
"strings"
"time"
)
type DKIMHeader struct {
// Version This tag defines the version of DKIM
// specification that applies to the signature record.
// tag v
Version string
// The algorithm used to generate the signature..
// Verifiers MUST support "rsa-sha1" and "rsa-sha256";
// Signers SHOULD sign using "rsa-sha256".
// tag a
Algorithm string
// The signature data (base64).
// Whitespace is ignored in this value and MUST be
// ignored when reassembling the original signature.
// In particular, the signing process can safely insert
// FWS in this value in arbitrary places to conform to line-length
// limits.
// tag b
SignatureData string
// The hash of the canonicalized body part of the message as
// limited by the "l=" tag (base64; REQUIRED).
// Whitespace is ignored in this value and MUST be ignored when reassembling the original
// signature. In particular, the signing process can safely insert
// FWS in this value in arbitrary places to conform to line-length
// limits.
// tag bh
BodyHash string
// Message canonicalization (plain-text; OPTIONAL, default is
//"simple/simple"). This tag informs the Verifier of the type of
// canonicalization used to prepare the message for signing. It
// consists of two names separated by a "slash" (%d47) character,
// corresponding to the header and body canonicalization algorithms,
// respectively. These algorithms are described in Section 3.4. If
// only one algorithm is named, that algorithm is used for the header
// and "simple" is used for the body. For example, "c=relaxed" is
// treated the same as "c=relaxed/simple".
// tag c
MessageCanonicalization string
// The SDID claiming responsibility for an introduction of a message
// into the mail stream (plain-text; REQUIRED). Hence, the SDID
// value is used to form the query for the public key. The SDID MUST
// correspond to a valid DNS name under which the DKIM key record is
// published. The conventions and semantics used by a Signer to
// create and use a specific SDID are outside the scope of this
// specification, as is any use of those conventions and semantics.
// When presented with a signature that does not meet these
// requirements, Verifiers MUST consider the signature invalid.
// Internationalized domain names MUST be encoded as A-labels, as
// described in Section 2.3 of [RFC5890].
// tag d
Domain string
// Signed header fields (plain-text, but see description; REQUIRED).
// A colon-separated list of header field names that identify the
// header fields presented to the signing algorithm. The field MUST
// contain the complete list of header fields in the order presented
// to the signing algorithm. The field MAY contain names of header
// fields that do not exist when signed; nonexistent header fields do
// not contribute to the signature computation (that is, they are
// treated as the null input, including the header field name, the
// separating colon, the header field value, and any CRLF
// terminator). The field MAY contain multiple instances of a header
// field name, meaning multiple occurrences of the corresponding
// header field are included in the header hash. The field MUST NOT
// include the DKIM-Signature header field that is being created or
// verified but may include others. Folding whitespace (FWS) MAY be
// included on either side of the colon separator. Header field
// names MUST be compared against actual header field names in a
// case-insensitive manner. This list MUST NOT be empty. See
// Section 5.4 for a discussion of choosing header fields to sign and
// Section 5.4.2 for requirements when signing multiple instances of
// a single field.
// tag h
Headers []string
// The Agent or User Identifier (AUID) on behalf of which the SDID is
// taking responsibility (dkim-quoted-printable; OPTIONAL, default is
// an empty local-part followed by an "@" followed by the domain from
// the "d=" tag).
// The syntax is a standard email address where the local-part MAY be
// omitted. The domain part of the address MUST be the same as, or a
// subdomain of, the value of the "d=" tag.
// Internationalized domain names MUST be encoded as A-labels, as
// described in Section 2.3 of [RFC5890].
// tag i
Auid string
// Body length count (plain-text unsigned decimal integer; OPTIONAL,
// default is entire body). This tag informs the Verifier of the
// number of octets in the body of the email after canonicalization
// included in the cryptographic hash, starting from 0 immediately
// following the CRLF preceding the body. This value MUST NOT be
// larger than the actual number of octets in the canonicalized
// message body. See further discussion in Section 8.2.
// tag l
BodyLength uint
// A colon-separated list of query methods used to retrieve the
// public key (plain-text; OPTIONAL, default is "dns/txt"). Each
// query method is of the form "type[/options]", where the syntax and
// semantics of the options depend on the type and specified options.
// If there are multiple query mechanisms listed, the choice of query
// mechanism MUST NOT change the interpretation of the signature.
// Implementations MUST use the recognized query mechanisms in the
// order presented. Unrecognized query mechanisms MUST be ignored.
// Currently, the only valid value is "dns/txt", which defines the
// DNS TXT resource record (RR) lookup algorithm described elsewhere
// in this document. The only option defined for the "dns" query
// type is "txt", which MUST be included. Verifiers and Signers MUST
// support "dns/txt".
// tag q
QueryMethods []string
// The selector subdividing the namespace for the "d=" (domain) tag
// (plain-text; REQUIRED).
// Internationalized selector names MUST be encoded as A-labels, as
// described in Section 2.3 of [RFC5890].
// tag s
Selector string
// Signature Timestamp (plain-text unsigned decimal integer;
// RECOMMENDED, default is an unknown creation time). The time that
// this signature was created. The format is the number of seconds
// since 00:00:00 on January 1, 1970 in the UTC time zone. The value
// is expressed as an unsigned integer in decimal ASCII. This value
// is not constrained to fit into a 31- or 32-bit integer.
// Implementations SHOULD be prepared to handle values up to at least
// 10^12 (until approximately AD 200,000; this fits into 40 bits).
// To avoid denial-of-service attacks, implementations MAY consider
// any value longer than 12 digits to be infinite. Leap seconds are
// not counted. Implementations MAY ignore signatures that have a
// timestamp in the future.
// tag t
SignatureTimestamp time.Time
// Signature Expiration (plain-text unsigned decimal integer;
// RECOMMENDED, default is no expiration). The format is the same as
// in the "t=" tag, represented as an absolute date, not as a time
// delta from the signing timestamp. The value is expressed as an
// unsigned integer in decimal ASCII, with the same constraints on
// the value in the "t=" tag. Signatures MAY be considered invalid
// if the verification time at the Verifier is past the expiration
// date. The verification time should be the time that the message
// was first received at the administrative domain of the Verifier if
// that time is reliably available; otherwise, the current time
// should be used. The value of the "x=" tag MUST be greater than
// the value of the "t=" tag if both are present.
//tag x
SignatureExpiration time.Time
// Copied header fields (dkim-quoted-printable, but see description;
// OPTIONAL, default is null). A vertical-bar-separated list of
// selected header fields present when the message was signed,
// including both the field name and value. It is not required to
// include all header fields present at the time of signing. This
// field need not contain the same header fields listed in the "h="
// tag. The header field text itself must encode the vertical bar
// ("|", %x7C) character (i.e., vertical bars in the "z=" text are
// meta-characters, and any actual vertical bar characters in a
// copied header field must be encoded). Note that all whitespace
// must be encoded, including whitespace between the colon and the
// header field value. After encoding, FWS MAY be added at arbitrary
// locations in order to avoid excessively long lines; such
// whitespace is NOT part of the value of the header field and MUST
// be removed before decoding.
// The header fields referenced by the "h=" tag refer to the fields
// in the [RFC5322] header of the message, not to any copied fields
// in the "z=" tag. Copied header field values are for diagnostic
// use.
// tag z
CopiedHeaderFields []string
// HeaderMailFromDomain store the raw email address of the header Mail From
// used for verifying in case of multiple DKIM header (we will prioritise
// header with d = mail from domain)
//HeaderMailFromDomain string
// RawForsign represents the raw part (without canonicalization) of the header
// used for computint sig in verify process
rawForSign string
}
// NewDkimHeaderBySigOptions return a new DkimHeader initioalized with sigOptions value
func newDkimHeaderBySigOptions(options SigOptions) *DKIMHeader {
h := new(DKIMHeader)
h.Version = "1"
h.Algorithm = options.Algo
h.MessageCanonicalization = options.Canonicalization
h.Domain = options.Domain
h.Headers = options.Headers
h.Auid = options.Auid
h.BodyLength = options.BodyLength
h.QueryMethods = options.QueryMethods
h.Selector = options.Selector
if options.AddSignatureTimestamp {
h.SignatureTimestamp = time.Now()
}
if options.SignatureExpireIn > 0 {
h.SignatureExpiration = time.Now().Add(time.Duration(options.SignatureExpireIn) * time.Second)
}
h.CopiedHeaderFields = options.CopiedHeaderFields
return h
}
// GetHeader return a new DKIMHeader by parsing an email
// Note: according to RFC 6376 an email can have multiple DKIM Header
// in this case we return the last inserted or the last with d== mail from
func GetHeader(email *[]byte) (*DKIMHeader, error) {
m, err := mail.ReadMessage(bytes.NewReader(*email))
if err != nil {
return nil, err
}
// DKIM header ?
if len(m.Header[textproto.CanonicalMIMEHeaderKey("DKIM-Signature")]) == 0 {
return nil, ErrDkimHeaderNotFound
}
// Get mail from domain
mailFromDomain := ""
mailfrom, err := mail.ParseAddress(m.Header.Get(textproto.CanonicalMIMEHeaderKey("From")))
if err != nil {
if err.Error() != "mail: no address" {
return nil, err
}
} else {
t := strings.SplitAfter(mailfrom.Address, "@")
if len(t) > 1 {
mailFromDomain = strings.ToLower(t[1])
}
}
// get raw dkim header
// we can't use m.header because header key will be converted with textproto.CanonicalMIMEHeaderKey
// ie if key in header is not DKIM-Signature but Dkim-Signature or DKIM-signature ot... other
// combination of case, verify will fail.
rawHeaders, _, err := getHeadersBody(email)
if err != nil {
return nil, ErrBadMailFormat
}
rawHeadersList, err := getHeadersList(&rawHeaders)
if err != nil {
return nil, err
}
dkHeaders := []string{}
for h := rawHeadersList.Front(); h != nil; h = h.Next() {
if strings.HasPrefix(strings.ToLower(h.Value.(string)), "dkim-signature") {
dkHeaders = append(dkHeaders, h.Value.(string))
}
}
var keep *DKIMHeader
var keepErr error
//for _, dk := range m.Header[textproto.CanonicalMIMEHeaderKey("DKIM-Signature")] {
for _, h := range dkHeaders {
parsed, err := parseDkHeader(h)
// if malformed dkim header try next
if err != nil {
keepErr = err
continue
}
// Keep first dkim headers
if keep == nil {
keep = parsed
}
// if d flag == domain keep this header and return
if mailFromDomain == parsed.Domain {
return parsed, nil
}
}
if keep == nil {
return nil, keepErr
}
return keep, nil
}
// parseDkHeader parse raw dkim header
func parseDkHeader(header string) (dkh *DKIMHeader, err error) {
dkh = new(DKIMHeader)
keyVal := strings.SplitN(header, ":", 2)
t := strings.LastIndex(header, "b=")
if t == -1 {
return nil, ErrDkimHeaderBTagNotFound
}
dkh.rawForSign = header[0 : t+2]
p := strings.IndexByte(header[t:], ';')
if p != -1 {
dkh.rawForSign = dkh.rawForSign + header[t+p:]
}
// Mandatory
mandatoryFlags := make(map[string]bool, 7) //(b'v', b'a', b'b', b'bh', b'd', b'h', b's')
mandatoryFlags["v"] = false
mandatoryFlags["a"] = false
mandatoryFlags["b"] = false
mandatoryFlags["bh"] = false
mandatoryFlags["d"] = false
mandatoryFlags["h"] = false
mandatoryFlags["s"] = false
// default values
dkh.MessageCanonicalization = "simple/simple"
dkh.QueryMethods = []string{"dns/txt"}
// unfold && clean
val := removeFWS(keyVal[1])
val = strings.Replace(val, " ", "", -1)
fs := strings.Split(val, ";")
for _, f := range fs {
if f == "" {
continue
}
flagData := strings.SplitN(f, "=", 2)
// https://github.com/toorop/go-dkim/issues/2
// if flag is not in the form key=value (eg doesn't have "=")
if len(flagData) != 2 {
return nil, ErrDkimHeaderBadFormat
}
flag := strings.ToLower(strings.TrimSpace(flagData[0]))
data := strings.TrimSpace(flagData[1])
switch flag {
case "v":
if data != "1" {
return nil, ErrDkimVersionNotsupported
}
dkh.Version = data
mandatoryFlags["v"] = true
case "a":
dkh.Algorithm = strings.ToLower(data)
if dkh.Algorithm != "rsa-sha1" && dkh.Algorithm != "rsa-sha256" {
return nil, ErrSignBadAlgo
}
mandatoryFlags["a"] = true
case "b":
//dkh.SignatureData = removeFWS(data)
// remove all space
dkh.SignatureData = strings.Replace(removeFWS(data), " ", "", -1)
if len(dkh.SignatureData) != 0 {
mandatoryFlags["b"] = true
}
case "bh":
dkh.BodyHash = removeFWS(data)
if len(dkh.BodyHash) != 0 {
mandatoryFlags["bh"] = true
}
case "d":
dkh.Domain = strings.ToLower(data)
if len(dkh.Domain) != 0 {
mandatoryFlags["d"] = true
}
case "h":
data = strings.ToLower(data)
dkh.Headers = strings.Split(data, ":")
if len(dkh.Headers) != 0 {
mandatoryFlags["h"] = true
}
fromFound := false
for _, h := range dkh.Headers {
if h == "from" {
fromFound = true
}
}
if !fromFound {
return nil, ErrDkimHeaderNoFromInHTag
}
case "s":
dkh.Selector = strings.ToLower(data)
if len(dkh.Selector) != 0 {
mandatoryFlags["s"] = true
}
case "c":
dkh.MessageCanonicalization, err = validateCanonicalization(strings.ToLower(data))
if err != nil {
return nil, err
}
case "i":
if data != "" {
if !strings.HasSuffix(data, dkh.Domain) {
return nil, ErrDkimHeaderDomainMismatch
}
dkh.Auid = data
}
case "l":
ui, err := strconv.ParseUint(data, 10, 32)
if err != nil {
return nil, err
}
dkh.BodyLength = uint(ui)
case "q":
dkh.QueryMethods = strings.Split(data, ":")
if len(dkh.QueryMethods) == 0 || strings.ToLower(dkh.QueryMethods[0]) != "dns/txt" {
return nil, errQueryMethodNotsupported
}
case "t":
ts, err := strconv.ParseInt(data, 10, 64)
if err != nil {
return nil, err
}
dkh.SignatureTimestamp = time.Unix(ts, 0)
case "x":
ts, err := strconv.ParseInt(data, 10, 64)
if err != nil {
return nil, err
}
dkh.SignatureExpiration = time.Unix(ts, 0)
case "z":
dkh.CopiedHeaderFields = strings.Split(data, "|")
}
}
// All mandatory flags are in ?
for _, p := range mandatoryFlags {
if !p {
return nil, ErrDkimHeaderMissingRequiredTag
}
}
// default for i/Auid
if dkh.Auid == "" {
dkh.Auid = "@" + dkh.Domain
}
// defaut for query method
if len(dkh.QueryMethods) == 0 {
dkh.QueryMethods = []string{"dns/text"}
}
return dkh, nil
}
// GetHeaderBase return base header for signers
// Todo: some refactoring needed...
func (d *DKIMHeader) getHeaderBaseForSigning(bodyHash string) string {
h := "DKIM-Signature: v=" + d.Version + "; a=" + d.Algorithm + "; q=" + strings.Join(d.QueryMethods, ":") + "; c=" + d.MessageCanonicalization + ";" + CRLF + TAB
subh := "s=" + d.Selector + ";"
if len(subh)+len(d.Domain)+4 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += " d=" + d.Domain + ";"
// Auid
if len(d.Auid) != 0 {
if len(subh)+len(d.Auid)+4 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += " i=" + d.Auid + ";"
}
/*h := "DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=tmail.io; i=@tmail.io;" + FWS
subh := "q=dns/txt; s=test;"*/
// signature timestamp
if !d.SignatureTimestamp.IsZero() {
ts := d.SignatureTimestamp.Unix()
if len(subh)+14 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += " t=" + fmt.Sprintf("%d", ts) + ";"
}
if len(subh)+len(d.Domain)+4 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
// Expiration
if !d.SignatureExpiration.IsZero() {
ts := d.SignatureExpiration.Unix()
if len(subh)+14 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += " x=" + fmt.Sprintf("%d", ts) + ";"
}
// body length
if d.BodyLength != 0 {
bodyLengthStr := fmt.Sprintf("%d", d.BodyLength)
if len(subh)+len(bodyLengthStr)+4 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += " l=" + bodyLengthStr + ";"
}
// Headers
if len(subh)+len(d.Headers)+4 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += " h="
for _, header := range d.Headers {
if len(subh)+len(header)+1 > MaxHeaderLineLength {
h += subh + FWS
subh = ""
}
subh += header + ":"
}
subh = subh[:len(subh)-1] + ";"
// BodyHash
if len(subh)+5+len(bodyHash) > MaxHeaderLineLength {
h += subh + FWS
subh = ""
} else {
subh += " "
}
subh += "bh="
l := len(subh)
for _, c := range bodyHash {
subh += string(c)
l++
if l >= MaxHeaderLineLength {
h += subh + FWS
subh = ""
l = 0
}
}
h += subh + ";" + FWS + "b="
return h
}

94
vendor/github.com/toorop/go-dkim/errors.go generated vendored Normal file
View File

@ -0,0 +1,94 @@
package dkim
import (
"errors"
)
var (
// ErrSignPrivateKeyRequired when there not private key in config
ErrSignPrivateKeyRequired = errors.New("PrivateKey is required")
// ErrSignDomainRequired when there is no domain defined in config
ErrSignDomainRequired = errors.New("Domain is required")
// ErrSignSelectorRequired when there is no Selcteir defined in config
ErrSignSelectorRequired = errors.New("Selector is required")
// ErrSignHeaderShouldContainsFrom If Headers is specified it should at least contain 'from'
ErrSignHeaderShouldContainsFrom = errors.New("header must contains 'from' field")
// ErrSignBadCanonicalization If bad Canonicalization parameter
ErrSignBadCanonicalization = errors.New("bad Canonicalization parameter")
// ErrCandNotParsePrivateKey when unable to parse private key
ErrCandNotParsePrivateKey = errors.New("can not parse private key, check format (pem) and validity")
// ErrSignBadAlgo Bad algorithm
ErrSignBadAlgo = errors.New("bad algorithm. Only rsa-sha1 or rsa-sha256 are permitted")
// ErrBadMailFormat unable to parse mail
ErrBadMailFormat = errors.New("bad mail format")
// ErrBadMailFormatHeaders bad headers format (not DKIM Header)
ErrBadMailFormatHeaders = errors.New("bad mail format found in headers")
// ErrBadDKimTagLBodyTooShort bad l tag
ErrBadDKimTagLBodyTooShort = errors.New("bad tag l or bodyLength option. Body length < l value")
// ErrDkimHeaderBadFormat when errors found in DKIM header
ErrDkimHeaderBadFormat = errors.New("bad DKIM header format")
// ErrDkimHeaderNotFound when there's no DKIM-Signature header in an email we have to verify
ErrDkimHeaderNotFound = errors.New("no DKIM-Signature header field found ")
// ErrDkimHeaderBTagNotFound when there's no b tag
ErrDkimHeaderBTagNotFound = errors.New("no tag 'b' found in dkim header")
// ErrDkimHeaderNoFromInHTag when from is missing in h tag
ErrDkimHeaderNoFromInHTag = errors.New("'from' header is missing in h tag")
// ErrDkimHeaderMissingRequiredTag when a required tag is missing
ErrDkimHeaderMissingRequiredTag = errors.New("signature missing required tag")
// ErrDkimHeaderDomainMismatch if i tag is not a sub domain of d tag
ErrDkimHeaderDomainMismatch = errors.New("domain mismatch")
// ErrDkimVersionNotsupported version not supported
ErrDkimVersionNotsupported = errors.New("incompatible version")
// Query method unsupported
errQueryMethodNotsupported = errors.New("query method not supported")
// ErrVerifyBodyHash when body hash doesn't verify
ErrVerifyBodyHash = errors.New("body hash did not verify")
// ErrVerifyNoKeyForSignature no key
ErrVerifyNoKeyForSignature = errors.New("no key for verify")
// ErrVerifyKeyUnavailable when service (dns) is anavailable
ErrVerifyKeyUnavailable = errors.New("key unavailable")
// ErrVerifyTagVMustBeTheFirst if present the v tag must be the firts in the record
ErrVerifyTagVMustBeTheFirst = errors.New("pub key syntax error: v tag must be the first")
// ErrVerifyVersionMusBeDkim1 if présent flag v (version) must be DKIM1
ErrVerifyVersionMusBeDkim1 = errors.New("flag v must be set to DKIM1")
// ErrVerifyBadKeyType bad type for pub key (only rsa is accepted)
ErrVerifyBadKeyType = errors.New("bad type for key type")
// ErrVerifyRevokedKey key(s) for this selector is revoked (p is empty)
ErrVerifyRevokedKey = errors.New("revoked key")
// ErrVerifyBadKey when we can't parse pubkey
ErrVerifyBadKey = errors.New("unable to parse pub key")
// ErrVerifyNoKey when no key is found on DNS record
ErrVerifyNoKey = errors.New("no public key found in DNS TXT")
// ErrVerifySignatureHasExpired when signature has expired
ErrVerifySignatureHasExpired = errors.New("signature has expired")
// ErrVerifyInappropriateHashAlgo when h tag in pub key doesn't contain hash algo from a tag of DKIM header
ErrVerifyInappropriateHashAlgo = errors.New("inappropriate has algorithm")
)

181
vendor/github.com/toorop/go-dkim/pubKeyRep.go generated vendored Normal file
View File

@ -0,0 +1,181 @@
package dkim
import (
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"io/ioutil"
"mime/quotedprintable"
"net"
"strings"
)
// PubKeyRep represents a parsed version of public key record
type PubKeyRep struct {
Version string
HashAlgo []string
KeyType string
Note string
PubKey rsa.PublicKey
ServiceType []string
FlagTesting bool // flag y
FlagIMustBeD bool // flag i
}
// DNSOptions holds settings for looking up DNS records
type DNSOptions struct {
netLookupTXT func(name string) ([]string, error)
}
// DNSOpt represents an optional setting for looking up DNS records
type DNSOpt interface {
apply(*DNSOptions)
}
type dnsOpt func(*DNSOptions)
func (opt dnsOpt) apply(dnsOpts *DNSOptions) {
opt(dnsOpts)
}
// DNSOptLookupTXT sets the function to use to lookup TXT records.
//
// This should probably only be used in tests.
func DNSOptLookupTXT(netLookupTXT func(name string) ([]string, error)) DNSOpt {
return dnsOpt(func(opts *DNSOptions) {
opts.netLookupTXT = netLookupTXT
})
}
// NewPubKeyRespFromDNS retrieves the TXT record from DNS based on the specified domain and selector
// and parses it.
func NewPubKeyRespFromDNS(selector, domain string, opts ...DNSOpt) (*PubKeyRep, verifyOutput, error) {
dnsOpts := DNSOptions{}
for _, opt := range opts {
opt.apply(&dnsOpts)
}
if dnsOpts.netLookupTXT == nil {
dnsOpts.netLookupTXT = net.LookupTXT
}
txt, err := dnsOpts.netLookupTXT(selector + "._domainkey." + domain)
if err != nil {
if strings.HasSuffix(err.Error(), "no such host") {
return nil, PERMFAIL, ErrVerifyNoKeyForSignature
}
return nil, TEMPFAIL, ErrVerifyKeyUnavailable
}
// empty record
if len(txt) == 0 {
return nil, PERMFAIL, ErrVerifyNoKeyForSignature
}
// parsing, we keep the first record
// TODO: if there is multiple record
return NewPubKeyResp(txt[0])
}
// NewPubKeyResp parses DKIM record (usually from DNS)
func NewPubKeyResp(dkimRecord string) (*PubKeyRep, verifyOutput, error) {
pkr := new(PubKeyRep)
pkr.Version = "DKIM1"
pkr.HashAlgo = []string{"sha1", "sha256"}
pkr.KeyType = "rsa"
pkr.FlagTesting = false
pkr.FlagIMustBeD = false
p := strings.Split(dkimRecord, ";")
for i, data := range p {
keyVal := strings.SplitN(data, "=", 2)
val := ""
if len(keyVal) > 1 {
val = strings.TrimSpace(keyVal[1])
}
switch strings.ToLower(strings.TrimSpace(keyVal[0])) {
case "v":
// RFC: is this tag is specified it MUST be the first in the record
if i != 0 {
return nil, PERMFAIL, ErrVerifyTagVMustBeTheFirst
}
pkr.Version = val
if pkr.Version != "DKIM1" {
return nil, PERMFAIL, ErrVerifyVersionMusBeDkim1
}
case "h":
p := strings.Split(strings.ToLower(val), ":")
pkr.HashAlgo = []string{}
for _, h := range p {
h = strings.TrimSpace(h)
if h == "sha1" || h == "sha256" {
pkr.HashAlgo = append(pkr.HashAlgo, h)
}
}
// if empty switch back to default
if len(pkr.HashAlgo) == 0 {
pkr.HashAlgo = []string{"sha1", "sha256"}
}
case "k":
if strings.ToLower(val) != "rsa" {
return nil, PERMFAIL, ErrVerifyBadKeyType
}
case "n":
qp, err := ioutil.ReadAll(quotedprintable.NewReader(strings.NewReader(val)))
if err == nil {
val = string(qp)
}
pkr.Note = val
case "p":
rawkey := val
if rawkey == "" {
return nil, PERMFAIL, ErrVerifyRevokedKey
}
un64, err := base64.StdEncoding.DecodeString(rawkey)
if err != nil {
return nil, PERMFAIL, ErrVerifyBadKey
}
pk, err := x509.ParsePKIXPublicKey(un64)
if pk, ok := pk.(*rsa.PublicKey); ok {
pkr.PubKey = *pk
}
case "s":
t := strings.Split(strings.ToLower(val), ":")
for _, tt := range t {
tt = strings.TrimSpace(tt)
switch tt {
case "*":
pkr.ServiceType = append(pkr.ServiceType, "all")
case "email":
pkr.ServiceType = append(pkr.ServiceType, tt)
}
}
case "t":
flags := strings.Split(strings.ToLower(val), ":")
for _, flag := range flags {
flag = strings.TrimSpace(flag)
switch flag {
case "y":
pkr.FlagTesting = true
case "s":
pkr.FlagIMustBeD = true
}
}
}
}
// if no pubkey
if pkr.PubKey == (rsa.PublicKey{}) {
return nil, PERMFAIL, ErrVerifyNoKey
}
// No service type
if len(pkr.ServiceType) == 0 {
pkr.ServiceType = []string{"all"}
}
return pkr, SUCCESS, nil
}

4
vendor/github.com/toorop/go-dkim/watch generated vendored Normal file
View File

@ -0,0 +1,4 @@
while true
do
inotifywait -q -r -e modify,attrib,close_write,move,create,delete . && echo "--------------" && go test -v
done

4
vendor/golang.org/x/crypto/LICENSE generated vendored
View File

@ -1,4 +1,4 @@
Copyright 2009 The Go Authors.
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

View File

@ -1,69 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
//
// This package is a wrapper around the standard library crypto/ed25519 package.
package ed25519
import (
"crypto/ed25519"
"io"
)
const (
// PublicKeySize is the size, in bytes, of public keys as used in this package.
PublicKeySize = 32
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
SeedSize = 32
)
// PublicKey is the type of Ed25519 public keys.
//
// This type is an alias for crypto/ed25519's PublicKey type.
// See the crypto/ed25519 package for the methods on this type.
type PublicKey = ed25519.PublicKey
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
//
// This type is an alias for crypto/ed25519's PrivateKey type.
// See the crypto/ed25519 package for the methods on this type.
type PrivateKey = ed25519.PrivateKey
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
return ed25519.GenerateKey(rand)
}
// NewKeyFromSeed calculates a private key from a seed. It will panic if
// len(seed) is not SeedSize. This function is provided for interoperability
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
return ed25519.NewKeyFromSeed(seed)
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
return ed25519.Sign(privateKey, message)
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
return ed25519.Verify(publicKey, message, sig)
}

62
vendor/golang.org/x/crypto/sha3/doc.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha3 implements the SHA-3 fixed-output-length hash functions and
// the SHAKE variable-output-length hash functions defined by FIPS-202.
//
// Both types of hash function use the "sponge" construction and the Keccak
// permutation. For a detailed specification see http://keccak.noekeon.org/
//
// # Guidance
//
// If you aren't sure what function you need, use SHAKE256 with at least 64
// bytes of output. The SHAKE instances are faster than the SHA3 instances;
// the latter have to allocate memory to conform to the hash.Hash interface.
//
// If you need a secret-key MAC (message authentication code), prepend the
// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
// output.
//
// # Security strengths
//
// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
// strength against preimage attacks of x bits. Since they only produce "x"
// bits of output, their collision-resistance is only "x/2" bits.
//
// The SHAKE-256 and -128 functions have a generic security strength of 256 and
// 128 bits against all attacks, provided that at least 2x bits of their output
// is used. Requesting more than 64 or 32 bytes of output, respectively, does
// not increase the collision-resistance of the SHAKE functions.
//
// # The sponge construction
//
// A sponge builds a pseudo-random function from a public pseudo-random
// permutation, by applying the permutation to a state of "rate + capacity"
// bytes, but hiding "capacity" of the bytes.
//
// A sponge starts out with a zero state. To hash an input using a sponge, up
// to "rate" bytes of the input are XORed into the sponge's state. The sponge
// is then "full" and the permutation is applied to "empty" it. This process is
// repeated until all the input has been "absorbed". The input is then padded.
// The digest is "squeezed" from the sponge in the same way, except that output
// is copied out instead of input being XORed in.
//
// A sponge is parameterized by its generic security strength, which is equal
// to half its capacity; capacity + rate is equal to the permutation's width.
// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
//
// # Recommendations
//
// The SHAKE functions are recommended for most new uses. They can produce
// output of arbitrary length. SHAKE256, with an output length of at least
// 64 bytes, provides 256-bit security against all attacks. The Keccak team
// recommends it for most applications upgrading from SHA2-512. (NIST chose a
// much stronger, but much slower, sponge instance for SHA3-512.)
//
// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
// They produce output of the same length, with the same security strengths
// against all attacks. This means, in particular, that SHA3-256 only has
// 128-bit collision resistance, because its output length is 32 bytes.
package sha3

109
vendor/golang.org/x/crypto/sha3/hashes.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file provides functions for creating instances of the SHA-3
// and SHAKE hash functions, as well as utility functions for hashing
// bytes.
import (
"crypto"
"hash"
)
// New224 creates a new SHA3-224 hash.
// Its generic security strength is 224 bits against preimage attacks,
// and 112 bits against collision attacks.
func New224() hash.Hash {
return new224()
}
// New256 creates a new SHA3-256 hash.
// Its generic security strength is 256 bits against preimage attacks,
// and 128 bits against collision attacks.
func New256() hash.Hash {
return new256()
}
// New384 creates a new SHA3-384 hash.
// Its generic security strength is 384 bits against preimage attacks,
// and 192 bits against collision attacks.
func New384() hash.Hash {
return new384()
}
// New512 creates a new SHA3-512 hash.
// Its generic security strength is 512 bits against preimage attacks,
// and 256 bits against collision attacks.
func New512() hash.Hash {
return new512()
}
func init() {
crypto.RegisterHash(crypto.SHA3_224, New224)
crypto.RegisterHash(crypto.SHA3_256, New256)
crypto.RegisterHash(crypto.SHA3_384, New384)
crypto.RegisterHash(crypto.SHA3_512, New512)
}
func new224Generic() *state {
return &state{rate: 144, outputLen: 28, dsbyte: 0x06}
}
func new256Generic() *state {
return &state{rate: 136, outputLen: 32, dsbyte: 0x06}
}
func new384Generic() *state {
return &state{rate: 104, outputLen: 48, dsbyte: 0x06}
}
func new512Generic() *state {
return &state{rate: 72, outputLen: 64, dsbyte: 0x06}
}
// NewLegacyKeccak256 creates a new Keccak-256 hash.
//
// Only use this function if you require compatibility with an existing cryptosystem
// that uses non-standard padding. All other users should use New256 instead.
func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
// NewLegacyKeccak512 creates a new Keccak-512 hash.
//
// Only use this function if you require compatibility with an existing cryptosystem
// that uses non-standard padding. All other users should use New512 instead.
func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
// Sum224 returns the SHA3-224 digest of the data.
func Sum224(data []byte) (digest [28]byte) {
h := New224()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum256 returns the SHA3-256 digest of the data.
func Sum256(data []byte) (digest [32]byte) {
h := New256()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum384 returns the SHA3-384 digest of the data.
func Sum384(data []byte) (digest [48]byte) {
h := New384()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum512 returns the SHA3-512 digest of the data.
func Sum512(data []byte) (digest [64]byte) {
h := New512()
h.Write(data)
h.Sum(digest[:0])
return
}

23
vendor/golang.org/x/crypto/sha3/hashes_noasm.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !gc || purego || !s390x
package sha3
func new224() *state {
return new224Generic()
}
func new256() *state {
return new256Generic()
}
func new384() *state {
return new384Generic()
}
func new512() *state {
return new512Generic()
}

414
vendor/golang.org/x/crypto/sha3/keccakf.go generated vendored Normal file
View File

@ -0,0 +1,414 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !amd64 || purego || !gc
package sha3
import "math/bits"
// rc stores the round constants for use in the ι step.
var rc = [24]uint64{
0x0000000000000001,
0x0000000000008082,
0x800000000000808A,
0x8000000080008000,
0x000000000000808B,
0x0000000080000001,
0x8000000080008081,
0x8000000000008009,
0x000000000000008A,
0x0000000000000088,
0x0000000080008009,
0x000000008000000A,
0x000000008000808B,
0x800000000000008B,
0x8000000000008089,
0x8000000000008003,
0x8000000000008002,
0x8000000000000080,
0x000000000000800A,
0x800000008000000A,
0x8000000080008081,
0x8000000000008080,
0x0000000080000001,
0x8000000080008008,
}
// keccakF1600 applies the Keccak permutation to a 1600b-wide
// state represented as a slice of 25 uint64s.
func keccakF1600(a *[25]uint64) {
// Implementation translated from Keccak-inplace.c
// in the keccak reference code.
var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
for i := 0; i < 24; i += 4 {
// Combines the 5 steps in each round into 2 steps.
// Unrolls 4 rounds per loop and spreads some steps across rounds.
// Round 1
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[6] ^ d1
bc1 = bits.RotateLeft64(t, 44)
t = a[12] ^ d2
bc2 = bits.RotateLeft64(t, 43)
t = a[18] ^ d3
bc3 = bits.RotateLeft64(t, 21)
t = a[24] ^ d4
bc4 = bits.RotateLeft64(t, 14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
a[6] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc2 = bits.RotateLeft64(t, 3)
t = a[16] ^ d1
bc3 = bits.RotateLeft64(t, 45)
t = a[22] ^ d2
bc4 = bits.RotateLeft64(t, 61)
t = a[3] ^ d3
bc0 = bits.RotateLeft64(t, 28)
t = a[9] ^ d4
bc1 = bits.RotateLeft64(t, 20)
a[10] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc4 = bits.RotateLeft64(t, 18)
t = a[1] ^ d1
bc0 = bits.RotateLeft64(t, 1)
t = a[7] ^ d2
bc1 = bits.RotateLeft64(t, 6)
t = a[13] ^ d3
bc2 = bits.RotateLeft64(t, 25)
t = a[19] ^ d4
bc3 = bits.RotateLeft64(t, 8)
a[20] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc1 = bits.RotateLeft64(t, 36)
t = a[11] ^ d1
bc2 = bits.RotateLeft64(t, 10)
t = a[17] ^ d2
bc3 = bits.RotateLeft64(t, 15)
t = a[23] ^ d3
bc4 = bits.RotateLeft64(t, 56)
t = a[4] ^ d4
bc0 = bits.RotateLeft64(t, 27)
a[5] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc3 = bits.RotateLeft64(t, 41)
t = a[21] ^ d1
bc4 = bits.RotateLeft64(t, 2)
t = a[2] ^ d2
bc0 = bits.RotateLeft64(t, 62)
t = a[8] ^ d3
bc1 = bits.RotateLeft64(t, 55)
t = a[14] ^ d4
bc2 = bits.RotateLeft64(t, 39)
a[15] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
// Round 2
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[16] ^ d1
bc1 = bits.RotateLeft64(t, 44)
t = a[7] ^ d2
bc2 = bits.RotateLeft64(t, 43)
t = a[23] ^ d3
bc3 = bits.RotateLeft64(t, 21)
t = a[14] ^ d4
bc4 = bits.RotateLeft64(t, 14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
a[16] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc2 = bits.RotateLeft64(t, 3)
t = a[11] ^ d1
bc3 = bits.RotateLeft64(t, 45)
t = a[2] ^ d2
bc4 = bits.RotateLeft64(t, 61)
t = a[18] ^ d3
bc0 = bits.RotateLeft64(t, 28)
t = a[9] ^ d4
bc1 = bits.RotateLeft64(t, 20)
a[20] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc4 = bits.RotateLeft64(t, 18)
t = a[6] ^ d1
bc0 = bits.RotateLeft64(t, 1)
t = a[22] ^ d2
bc1 = bits.RotateLeft64(t, 6)
t = a[13] ^ d3
bc2 = bits.RotateLeft64(t, 25)
t = a[4] ^ d4
bc3 = bits.RotateLeft64(t, 8)
a[15] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc1 = bits.RotateLeft64(t, 36)
t = a[1] ^ d1
bc2 = bits.RotateLeft64(t, 10)
t = a[17] ^ d2
bc3 = bits.RotateLeft64(t, 15)
t = a[8] ^ d3
bc4 = bits.RotateLeft64(t, 56)
t = a[24] ^ d4
bc0 = bits.RotateLeft64(t, 27)
a[10] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc3 = bits.RotateLeft64(t, 41)
t = a[21] ^ d1
bc4 = bits.RotateLeft64(t, 2)
t = a[12] ^ d2
bc0 = bits.RotateLeft64(t, 62)
t = a[3] ^ d3
bc1 = bits.RotateLeft64(t, 55)
t = a[19] ^ d4
bc2 = bits.RotateLeft64(t, 39)
a[5] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
// Round 3
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[11] ^ d1
bc1 = bits.RotateLeft64(t, 44)
t = a[22] ^ d2
bc2 = bits.RotateLeft64(t, 43)
t = a[8] ^ d3
bc3 = bits.RotateLeft64(t, 21)
t = a[19] ^ d4
bc4 = bits.RotateLeft64(t, 14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
a[11] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc2 = bits.RotateLeft64(t, 3)
t = a[1] ^ d1
bc3 = bits.RotateLeft64(t, 45)
t = a[12] ^ d2
bc4 = bits.RotateLeft64(t, 61)
t = a[23] ^ d3
bc0 = bits.RotateLeft64(t, 28)
t = a[9] ^ d4
bc1 = bits.RotateLeft64(t, 20)
a[15] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc4 = bits.RotateLeft64(t, 18)
t = a[16] ^ d1
bc0 = bits.RotateLeft64(t, 1)
t = a[2] ^ d2
bc1 = bits.RotateLeft64(t, 6)
t = a[13] ^ d3
bc2 = bits.RotateLeft64(t, 25)
t = a[24] ^ d4
bc3 = bits.RotateLeft64(t, 8)
a[5] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc1 = bits.RotateLeft64(t, 36)
t = a[6] ^ d1
bc2 = bits.RotateLeft64(t, 10)
t = a[17] ^ d2
bc3 = bits.RotateLeft64(t, 15)
t = a[3] ^ d3
bc4 = bits.RotateLeft64(t, 56)
t = a[14] ^ d4
bc0 = bits.RotateLeft64(t, 27)
a[20] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc3 = bits.RotateLeft64(t, 41)
t = a[21] ^ d1
bc4 = bits.RotateLeft64(t, 2)
t = a[7] ^ d2
bc0 = bits.RotateLeft64(t, 62)
t = a[18] ^ d3
bc1 = bits.RotateLeft64(t, 55)
t = a[4] ^ d4
bc2 = bits.RotateLeft64(t, 39)
a[10] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
// Round 4
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[1] ^ d1
bc1 = bits.RotateLeft64(t, 44)
t = a[2] ^ d2
bc2 = bits.RotateLeft64(t, 43)
t = a[3] ^ d3
bc3 = bits.RotateLeft64(t, 21)
t = a[4] ^ d4
bc4 = bits.RotateLeft64(t, 14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
a[1] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc2 = bits.RotateLeft64(t, 3)
t = a[6] ^ d1
bc3 = bits.RotateLeft64(t, 45)
t = a[7] ^ d2
bc4 = bits.RotateLeft64(t, 61)
t = a[8] ^ d3
bc0 = bits.RotateLeft64(t, 28)
t = a[9] ^ d4
bc1 = bits.RotateLeft64(t, 20)
a[5] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc4 = bits.RotateLeft64(t, 18)
t = a[11] ^ d1
bc0 = bits.RotateLeft64(t, 1)
t = a[12] ^ d2
bc1 = bits.RotateLeft64(t, 6)
t = a[13] ^ d3
bc2 = bits.RotateLeft64(t, 25)
t = a[14] ^ d4
bc3 = bits.RotateLeft64(t, 8)
a[10] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc1 = bits.RotateLeft64(t, 36)
t = a[16] ^ d1
bc2 = bits.RotateLeft64(t, 10)
t = a[17] ^ d2
bc3 = bits.RotateLeft64(t, 15)
t = a[18] ^ d3
bc4 = bits.RotateLeft64(t, 56)
t = a[19] ^ d4
bc0 = bits.RotateLeft64(t, 27)
a[15] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc3 = bits.RotateLeft64(t, 41)
t = a[21] ^ d1
bc4 = bits.RotateLeft64(t, 2)
t = a[22] ^ d2
bc0 = bits.RotateLeft64(t, 62)
t = a[23] ^ d3
bc1 = bits.RotateLeft64(t, 55)
t = a[24] ^ d4
bc2 = bits.RotateLeft64(t, 39)
a[20] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
}
}

13
vendor/golang.org/x/crypto/sha3/keccakf_amd64.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 && !purego && gc
package sha3
// This function is implemented in keccakf_amd64.s.
//go:noescape
func keccakF1600(a *[25]uint64)

390
vendor/golang.org/x/crypto/sha3/keccakf_amd64.s generated vendored Normal file
View File

@ -0,0 +1,390 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 && !purego && gc
// This code was translated into a form compatible with 6a from the public
// domain sources at https://github.com/gvanas/KeccakCodePackage
// Offsets in state
#define _ba (0*8)
#define _be (1*8)
#define _bi (2*8)
#define _bo (3*8)
#define _bu (4*8)
#define _ga (5*8)
#define _ge (6*8)
#define _gi (7*8)
#define _go (8*8)
#define _gu (9*8)
#define _ka (10*8)
#define _ke (11*8)
#define _ki (12*8)
#define _ko (13*8)
#define _ku (14*8)
#define _ma (15*8)
#define _me (16*8)
#define _mi (17*8)
#define _mo (18*8)
#define _mu (19*8)
#define _sa (20*8)
#define _se (21*8)
#define _si (22*8)
#define _so (23*8)
#define _su (24*8)
// Temporary registers
#define rT1 AX
// Round vars
#define rpState DI
#define rpStack SP
#define rDa BX
#define rDe CX
#define rDi DX
#define rDo R8
#define rDu R9
#define rBa R10
#define rBe R11
#define rBi R12
#define rBo R13
#define rBu R14
#define rCa SI
#define rCe BP
#define rCi rBi
#define rCo rBo
#define rCu R15
#define MOVQ_RBI_RCE MOVQ rBi, rCe
#define XORQ_RT1_RCA XORQ rT1, rCa
#define XORQ_RT1_RCE XORQ rT1, rCe
#define XORQ_RBA_RCU XORQ rBa, rCu
#define XORQ_RBE_RCU XORQ rBe, rCu
#define XORQ_RDU_RCU XORQ rDu, rCu
#define XORQ_RDA_RCA XORQ rDa, rCa
#define XORQ_RDE_RCE XORQ rDe, rCe
#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \
/* Prepare round */ \
MOVQ rCe, rDa; \
ROLQ $1, rDa; \
\
MOVQ _bi(iState), rCi; \
XORQ _gi(iState), rDi; \
XORQ rCu, rDa; \
XORQ _ki(iState), rCi; \
XORQ _mi(iState), rDi; \
XORQ rDi, rCi; \
\
MOVQ rCi, rDe; \
ROLQ $1, rDe; \
\
MOVQ _bo(iState), rCo; \
XORQ _go(iState), rDo; \
XORQ rCa, rDe; \
XORQ _ko(iState), rCo; \
XORQ _mo(iState), rDo; \
XORQ rDo, rCo; \
\
MOVQ rCo, rDi; \
ROLQ $1, rDi; \
\
MOVQ rCu, rDo; \
XORQ rCe, rDi; \
ROLQ $1, rDo; \
\
MOVQ rCa, rDu; \
XORQ rCi, rDo; \
ROLQ $1, rDu; \
\
/* Result b */ \
MOVQ _ba(iState), rBa; \
MOVQ _ge(iState), rBe; \
XORQ rCo, rDu; \
MOVQ _ki(iState), rBi; \
MOVQ _mo(iState), rBo; \
MOVQ _su(iState), rBu; \
XORQ rDe, rBe; \
ROLQ $44, rBe; \
XORQ rDi, rBi; \
XORQ rDa, rBa; \
ROLQ $43, rBi; \
\
MOVQ rBe, rCa; \
MOVQ rc, rT1; \
ORQ rBi, rCa; \
XORQ rBa, rT1; \
XORQ rT1, rCa; \
MOVQ rCa, _ba(oState); \
\
XORQ rDu, rBu; \
ROLQ $14, rBu; \
MOVQ rBa, rCu; \
ANDQ rBe, rCu; \
XORQ rBu, rCu; \
MOVQ rCu, _bu(oState); \
\
XORQ rDo, rBo; \
ROLQ $21, rBo; \
MOVQ rBo, rT1; \
ANDQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _bi(oState); \
\
NOTQ rBi; \
ORQ rBa, rBu; \
ORQ rBo, rBi; \
XORQ rBo, rBu; \
XORQ rBe, rBi; \
MOVQ rBu, _bo(oState); \
MOVQ rBi, _be(oState); \
B_RBI_RCE; \
\
/* Result g */ \
MOVQ _gu(iState), rBe; \
XORQ rDu, rBe; \
MOVQ _ka(iState), rBi; \
ROLQ $20, rBe; \
XORQ rDa, rBi; \
ROLQ $3, rBi; \
MOVQ _bo(iState), rBa; \
MOVQ rBe, rT1; \
ORQ rBi, rT1; \
XORQ rDo, rBa; \
MOVQ _me(iState), rBo; \
MOVQ _si(iState), rBu; \
ROLQ $28, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ga(oState); \
G_RT1_RCA; \
\
XORQ rDe, rBo; \
ROLQ $45, rBo; \
MOVQ rBi, rT1; \
ANDQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _ge(oState); \
G_RT1_RCE; \
\
XORQ rDi, rBu; \
ROLQ $61, rBu; \
MOVQ rBu, rT1; \
ORQ rBa, rT1; \
XORQ rBo, rT1; \
MOVQ rT1, _go(oState); \
\
ANDQ rBe, rBa; \
XORQ rBu, rBa; \
MOVQ rBa, _gu(oState); \
NOTQ rBu; \
G_RBA_RCU; \
\
ORQ rBu, rBo; \
XORQ rBi, rBo; \
MOVQ rBo, _gi(oState); \
\
/* Result k */ \
MOVQ _be(iState), rBa; \
MOVQ _gi(iState), rBe; \
MOVQ _ko(iState), rBi; \
MOVQ _mu(iState), rBo; \
MOVQ _sa(iState), rBu; \
XORQ rDi, rBe; \
ROLQ $6, rBe; \
XORQ rDo, rBi; \
ROLQ $25, rBi; \
MOVQ rBe, rT1; \
ORQ rBi, rT1; \
XORQ rDe, rBa; \
ROLQ $1, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ka(oState); \
K_RT1_RCA; \
\
XORQ rDu, rBo; \
ROLQ $8, rBo; \
MOVQ rBi, rT1; \
ANDQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _ke(oState); \
K_RT1_RCE; \
\
XORQ rDa, rBu; \
ROLQ $18, rBu; \
NOTQ rBo; \
MOVQ rBo, rT1; \
ANDQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _ki(oState); \
\
MOVQ rBu, rT1; \
ORQ rBa, rT1; \
XORQ rBo, rT1; \
MOVQ rT1, _ko(oState); \
\
ANDQ rBe, rBa; \
XORQ rBu, rBa; \
MOVQ rBa, _ku(oState); \
K_RBA_RCU; \
\
/* Result m */ \
MOVQ _ga(iState), rBe; \
XORQ rDa, rBe; \
MOVQ _ke(iState), rBi; \
ROLQ $36, rBe; \
XORQ rDe, rBi; \
MOVQ _bu(iState), rBa; \
ROLQ $10, rBi; \
MOVQ rBe, rT1; \
MOVQ _mi(iState), rBo; \
ANDQ rBi, rT1; \
XORQ rDu, rBa; \
MOVQ _so(iState), rBu; \
ROLQ $27, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ma(oState); \
M_RT1_RCA; \
\
XORQ rDi, rBo; \
ROLQ $15, rBo; \
MOVQ rBi, rT1; \
ORQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _me(oState); \
M_RT1_RCE; \
\
XORQ rDo, rBu; \
ROLQ $56, rBu; \
NOTQ rBo; \
MOVQ rBo, rT1; \
ORQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _mi(oState); \
\
ORQ rBa, rBe; \
XORQ rBu, rBe; \
MOVQ rBe, _mu(oState); \
\
ANDQ rBa, rBu; \
XORQ rBo, rBu; \
MOVQ rBu, _mo(oState); \
M_RBE_RCU; \
\
/* Result s */ \
MOVQ _bi(iState), rBa; \
MOVQ _go(iState), rBe; \
MOVQ _ku(iState), rBi; \
XORQ rDi, rBa; \
MOVQ _ma(iState), rBo; \
ROLQ $62, rBa; \
XORQ rDo, rBe; \
MOVQ _se(iState), rBu; \
ROLQ $55, rBe; \
\
XORQ rDu, rBi; \
MOVQ rBa, rDu; \
XORQ rDe, rBu; \
ROLQ $2, rBu; \
ANDQ rBe, rDu; \
XORQ rBu, rDu; \
MOVQ rDu, _su(oState); \
\
ROLQ $39, rBi; \
S_RDU_RCU; \
NOTQ rBe; \
XORQ rDa, rBo; \
MOVQ rBe, rDa; \
ANDQ rBi, rDa; \
XORQ rBa, rDa; \
MOVQ rDa, _sa(oState); \
S_RDA_RCA; \
\
ROLQ $41, rBo; \
MOVQ rBi, rDe; \
ORQ rBo, rDe; \
XORQ rBe, rDe; \
MOVQ rDe, _se(oState); \
S_RDE_RCE; \
\
MOVQ rBo, rDi; \
MOVQ rBu, rDo; \
ANDQ rBu, rDi; \
ORQ rBa, rDo; \
XORQ rBi, rDi; \
XORQ rBo, rDo; \
MOVQ rDi, _si(oState); \
MOVQ rDo, _so(oState) \
// func keccakF1600(a *[25]uint64)
TEXT ·keccakF1600(SB), 0, $200-8
MOVQ a+0(FP), rpState
// Convert the user state into an internal state
NOTQ _be(rpState)
NOTQ _bi(rpState)
NOTQ _go(rpState)
NOTQ _ki(rpState)
NOTQ _mi(rpState)
NOTQ _sa(rpState)
// Execute the KeccakF permutation
MOVQ _ba(rpState), rCa
MOVQ _be(rpState), rCe
MOVQ _bu(rpState), rCu
XORQ _ga(rpState), rCa
XORQ _ge(rpState), rCe
XORQ _gu(rpState), rCu
XORQ _ka(rpState), rCa
XORQ _ke(rpState), rCe
XORQ _ku(rpState), rCu
XORQ _ma(rpState), rCa
XORQ _me(rpState), rCe
XORQ _mu(rpState), rCu
XORQ _sa(rpState), rCa
XORQ _se(rpState), rCe
MOVQ _si(rpState), rDi
MOVQ _so(rpState), rDo
XORQ _su(rpState), rCu
mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP)
// Revert the internal state to the user state
NOTQ _be(rpState)
NOTQ _bi(rpState)
NOTQ _go(rpState)
NOTQ _ki(rpState)
NOTQ _mi(rpState)
NOTQ _sa(rpState)
RET

185
vendor/golang.org/x/crypto/sha3/sha3.go generated vendored Normal file
View File

@ -0,0 +1,185 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// spongeDirection indicates the direction bytes are flowing through the sponge.
type spongeDirection int
const (
// spongeAbsorbing indicates that the sponge is absorbing input.
spongeAbsorbing spongeDirection = iota
// spongeSqueezing indicates that the sponge is being squeezed.
spongeSqueezing
)
const (
// maxRate is the maximum size of the internal buffer. SHAKE-256
// currently needs the largest buffer.
maxRate = 168
)
type state struct {
// Generic sponge components.
a [25]uint64 // main state of the hash
rate int // the number of bytes of state to use
// dsbyte contains the "domain separation" bits and the first bit of
// the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
// SHA-3 and SHAKE functions by appending bitstrings to the message.
// Using a little-endian bit-ordering convention, these are "01" for SHA-3
// and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
// padding rule from section 5.1 is applied to pad the message to a multiple
// of the rate, which involves adding a "1" bit, zero or more "0" bits, and
// a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
// giving 00000110b (0x06) and 00011111b (0x1f).
// [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
// "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
// Extendable-Output Functions (May 2014)"
dsbyte byte
i, n int // storage[i:n] is the buffer, i is only used while squeezing
storage [maxRate]byte
// Specific to SHA-3 and SHAKE.
outputLen int // the default output size in bytes
state spongeDirection // whether the sponge is absorbing or squeezing
}
// BlockSize returns the rate of sponge underlying this hash function.
func (d *state) BlockSize() int { return d.rate }
// Size returns the output size of the hash function in bytes.
func (d *state) Size() int { return d.outputLen }
// Reset clears the internal state by zeroing the sponge state and
// the buffer indexes, and setting Sponge.state to absorbing.
func (d *state) Reset() {
// Zero the permutation's state.
for i := range d.a {
d.a[i] = 0
}
d.state = spongeAbsorbing
d.i, d.n = 0, 0
}
func (d *state) clone() *state {
ret := *d
return &ret
}
// permute applies the KeccakF-1600 permutation. It handles
// any input-output buffering.
func (d *state) permute() {
switch d.state {
case spongeAbsorbing:
// If we're absorbing, we need to xor the input into the state
// before applying the permutation.
xorIn(d, d.storage[:d.rate])
d.n = 0
keccakF1600(&d.a)
case spongeSqueezing:
// If we're squeezing, we need to apply the permutation before
// copying more output.
keccakF1600(&d.a)
d.i = 0
copyOut(d, d.storage[:d.rate])
}
}
// pads appends the domain separation bits in dsbyte, applies
// the multi-bitrate 10..1 padding rule, and permutes the state.
func (d *state) padAndPermute() {
// Pad with this instance's domain-separator bits. We know that there's
// at least one byte of space in d.buf because, if it were full,
// permute would have been called to empty it. dsbyte also contains the
// first one bit for the padding. See the comment in the state struct.
d.storage[d.n] = d.dsbyte
d.n++
for d.n < d.rate {
d.storage[d.n] = 0
d.n++
}
// This adds the final one bit for the padding. Because of the way that
// bits are numbered from the LSB upwards, the final bit is the MSB of
// the last byte.
d.storage[d.rate-1] ^= 0x80
// Apply the permutation
d.permute()
d.state = spongeSqueezing
d.n = d.rate
copyOut(d, d.storage[:d.rate])
}
// Write absorbs more data into the hash's state. It panics if any
// output has already been read.
func (d *state) Write(p []byte) (written int, err error) {
if d.state != spongeAbsorbing {
panic("sha3: Write after Read")
}
written = len(p)
for len(p) > 0 {
if d.n == 0 && len(p) >= d.rate {
// The fast path; absorb a full "rate" bytes of input and apply the permutation.
xorIn(d, p[:d.rate])
p = p[d.rate:]
keccakF1600(&d.a)
} else {
// The slow path; buffer the input until we can fill the sponge, and then xor it in.
todo := d.rate - d.n
if todo > len(p) {
todo = len(p)
}
d.n += copy(d.storage[d.n:], p[:todo])
p = p[todo:]
// If the sponge is full, apply the permutation.
if d.n == d.rate {
d.permute()
}
}
}
return
}
// Read squeezes an arbitrary number of bytes from the sponge.
func (d *state) Read(out []byte) (n int, err error) {
// If we're still absorbing, pad and apply the permutation.
if d.state == spongeAbsorbing {
d.padAndPermute()
}
n = len(out)
// Now, do the squeezing.
for len(out) > 0 {
n := copy(out, d.storage[d.i:d.n])
d.i += n
out = out[n:]
// Apply the permutation if we've squeezed the sponge dry.
if d.i == d.rate {
d.permute()
}
}
return
}
// Sum applies padding to the hash state and then squeezes out the desired
// number of output bytes. It panics if any output has already been read.
func (d *state) Sum(in []byte) []byte {
if d.state != spongeAbsorbing {
panic("sha3: Sum after Read")
}
// Make a copy of the original hash so that caller can keep writing
// and summing.
dup := d.clone()
hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation
dup.Read(hash)
return append(in, hash...)
}

303
vendor/golang.org/x/crypto/sha3/sha3_s390x.go generated vendored Normal file
View File

@ -0,0 +1,303 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
package sha3
// This file contains code for using the 'compute intermediate
// message digest' (KIMD) and 'compute last message digest' (KLMD)
// instructions to compute SHA-3 and SHAKE hashes on IBM Z.
import (
"hash"
"golang.org/x/sys/cpu"
)
// codes represent 7-bit KIMD/KLMD function codes as defined in
// the Principles of Operation.
type code uint64
const (
// function codes for KIMD/KLMD
sha3_224 code = 32
sha3_256 = 33
sha3_384 = 34
sha3_512 = 35
shake_128 = 36
shake_256 = 37
nopad = 0x100
)
// kimd is a wrapper for the 'compute intermediate message digest' instruction.
// src must be a multiple of the rate for the given function code.
//
//go:noescape
func kimd(function code, chain *[200]byte, src []byte)
// klmd is a wrapper for the 'compute last message digest' instruction.
// src padding is handled by the instruction.
//
//go:noescape
func klmd(function code, chain *[200]byte, dst, src []byte)
type asmState struct {
a [200]byte // 1600 bit state
buf []byte // care must be taken to ensure cap(buf) is a multiple of rate
rate int // equivalent to block size
storage [3072]byte // underlying storage for buf
outputLen int // output length for full security
function code // KIMD/KLMD function code
state spongeDirection // whether the sponge is absorbing or squeezing
}
func newAsmState(function code) *asmState {
var s asmState
s.function = function
switch function {
case sha3_224:
s.rate = 144
s.outputLen = 28
case sha3_256:
s.rate = 136
s.outputLen = 32
case sha3_384:
s.rate = 104
s.outputLen = 48
case sha3_512:
s.rate = 72
s.outputLen = 64
case shake_128:
s.rate = 168
s.outputLen = 32
case shake_256:
s.rate = 136
s.outputLen = 64
default:
panic("sha3: unrecognized function code")
}
// limit s.buf size to a multiple of s.rate
s.resetBuf()
return &s
}
func (s *asmState) clone() *asmState {
c := *s
c.buf = c.storage[:len(s.buf):cap(s.buf)]
return &c
}
// copyIntoBuf copies b into buf. It will panic if there is not enough space to
// store all of b.
func (s *asmState) copyIntoBuf(b []byte) {
bufLen := len(s.buf)
s.buf = s.buf[:len(s.buf)+len(b)]
copy(s.buf[bufLen:], b)
}
// resetBuf points buf at storage, sets the length to 0 and sets cap to be a
// multiple of the rate.
func (s *asmState) resetBuf() {
max := (cap(s.storage) / s.rate) * s.rate
s.buf = s.storage[:0:max]
}
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
func (s *asmState) Write(b []byte) (int, error) {
if s.state != spongeAbsorbing {
panic("sha3: Write after Read")
}
length := len(b)
for len(b) > 0 {
if len(s.buf) == 0 && len(b) >= cap(s.buf) {
// Hash the data directly and push any remaining bytes
// into the buffer.
remainder := len(b) % s.rate
kimd(s.function, &s.a, b[:len(b)-remainder])
if remainder != 0 {
s.copyIntoBuf(b[len(b)-remainder:])
}
return length, nil
}
if len(s.buf) == cap(s.buf) {
// flush the buffer
kimd(s.function, &s.a, s.buf)
s.buf = s.buf[:0]
}
// copy as much as we can into the buffer
n := len(b)
if len(b) > cap(s.buf)-len(s.buf) {
n = cap(s.buf) - len(s.buf)
}
s.copyIntoBuf(b[:n])
b = b[n:]
}
return length, nil
}
// Read squeezes an arbitrary number of bytes from the sponge.
func (s *asmState) Read(out []byte) (n int, err error) {
// The 'compute last message digest' instruction only stores the digest
// at the first operand (dst) for SHAKE functions.
if s.function != shake_128 && s.function != shake_256 {
panic("sha3: can only call Read for SHAKE functions")
}
n = len(out)
// need to pad if we were absorbing
if s.state == spongeAbsorbing {
s.state = spongeSqueezing
// write hash directly into out if possible
if len(out)%s.rate == 0 {
klmd(s.function, &s.a, out, s.buf) // len(out) may be 0
s.buf = s.buf[:0]
return
}
// write hash into buffer
max := cap(s.buf)
if max > len(out) {
max = (len(out)/s.rate)*s.rate + s.rate
}
klmd(s.function, &s.a, s.buf[:max], s.buf)
s.buf = s.buf[:max]
}
for len(out) > 0 {
// flush the buffer
if len(s.buf) != 0 {
c := copy(out, s.buf)
out = out[c:]
s.buf = s.buf[c:]
continue
}
// write hash directly into out if possible
if len(out)%s.rate == 0 {
klmd(s.function|nopad, &s.a, out, nil)
return
}
// write hash into buffer
s.resetBuf()
if cap(s.buf) > len(out) {
s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate]
}
klmd(s.function|nopad, &s.a, s.buf, nil)
}
return
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (s *asmState) Sum(b []byte) []byte {
if s.state != spongeAbsorbing {
panic("sha3: Sum after Read")
}
// Copy the state to preserve the original.
a := s.a
// Hash the buffer. Note that we don't clear it because we
// aren't updating the state.
switch s.function {
case sha3_224, sha3_256, sha3_384, sha3_512:
klmd(s.function, &a, nil, s.buf)
return append(b, a[:s.outputLen]...)
case shake_128, shake_256:
d := make([]byte, s.outputLen, 64)
klmd(s.function, &a, d, s.buf)
return append(b, d[:s.outputLen]...)
default:
panic("sha3: unknown function")
}
}
// Reset resets the Hash to its initial state.
func (s *asmState) Reset() {
for i := range s.a {
s.a[i] = 0
}
s.resetBuf()
s.state = spongeAbsorbing
}
// Size returns the number of bytes Sum will return.
func (s *asmState) Size() int {
return s.outputLen
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (s *asmState) BlockSize() int {
return s.rate
}
// Clone returns a copy of the ShakeHash in its current state.
func (s *asmState) Clone() ShakeHash {
return s.clone()
}
// new224 returns an assembly implementation of SHA3-224 if available,
// otherwise it returns a generic implementation.
func new224() hash.Hash {
if cpu.S390X.HasSHA3 {
return newAsmState(sha3_224)
}
return new224Generic()
}
// new256 returns an assembly implementation of SHA3-256 if available,
// otherwise it returns a generic implementation.
func new256() hash.Hash {
if cpu.S390X.HasSHA3 {
return newAsmState(sha3_256)
}
return new256Generic()
}
// new384 returns an assembly implementation of SHA3-384 if available,
// otherwise it returns a generic implementation.
func new384() hash.Hash {
if cpu.S390X.HasSHA3 {
return newAsmState(sha3_384)
}
return new384Generic()
}
// new512 returns an assembly implementation of SHA3-512 if available,
// otherwise it returns a generic implementation.
func new512() hash.Hash {
if cpu.S390X.HasSHA3 {
return newAsmState(sha3_512)
}
return new512Generic()
}
// newShake128 returns an assembly implementation of SHAKE-128 if available,
// otherwise it returns a generic implementation.
func newShake128() ShakeHash {
if cpu.S390X.HasSHA3 {
return newAsmState(shake_128)
}
return newShake128Generic()
}
// newShake256 returns an assembly implementation of SHAKE-256 if available,
// otherwise it returns a generic implementation.
func newShake256() ShakeHash {
if cpu.S390X.HasSHA3 {
return newAsmState(shake_256)
}
return newShake256Generic()
}

33
vendor/golang.org/x/crypto/sha3/sha3_s390x.s generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
#include "textflag.h"
// func kimd(function code, chain *[200]byte, src []byte)
TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40
MOVD function+0(FP), R0
MOVD chain+8(FP), R1
LMG src+16(FP), R2, R3 // R2=base, R3=len
continue:
WORD $0xB93E0002 // KIMD --, R2
BVS continue // continue if interrupted
MOVD $0, R0 // reset R0 for pre-go1.8 compilers
RET
// func klmd(function code, chain *[200]byte, dst, src []byte)
TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64
// TODO: SHAKE support
MOVD function+0(FP), R0
MOVD chain+8(FP), R1
LMG dst+16(FP), R2, R3 // R2=base, R3=len
LMG src+40(FP), R4, R5 // R4=base, R5=len
continue:
WORD $0xB93F0024 // KLMD R2, R4
BVS continue // continue if interrupted
MOVD $0, R0 // reset R0 for pre-go1.8 compilers
RET

174
vendor/golang.org/x/crypto/sha3/shake.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file defines the ShakeHash interface, and provides
// functions for creating SHAKE and cSHAKE instances, as well as utility
// functions for hashing bytes to arbitrary-length output.
//
//
// SHAKE implementation is based on FIPS PUB 202 [1]
// cSHAKE implementations is based on NIST SP 800-185 [2]
//
// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
// [2] https://doi.org/10.6028/NIST.SP.800-185
import (
"encoding/binary"
"hash"
"io"
)
// ShakeHash defines the interface to hash functions that support
// arbitrary-length output. When used as a plain [hash.Hash], it
// produces minimum-length outputs that provide full-strength generic
// security.
type ShakeHash interface {
hash.Hash
// Read reads more output from the hash; reading affects the hash's
// state. (ShakeHash.Read is thus very different from Hash.Sum)
// It never returns an error, but subsequent calls to Write or Sum
// will panic.
io.Reader
// Clone returns a copy of the ShakeHash in its current state.
Clone() ShakeHash
}
// cSHAKE specific context
type cshakeState struct {
*state // SHA-3 state context and Read/Write operations
// initBlock is the cSHAKE specific initialization set of bytes. It is initialized
// by newCShake function and stores concatenation of N followed by S, encoded
// by the method specified in 3.3 of [1].
// It is stored here in order for Reset() to be able to put context into
// initial state.
initBlock []byte
}
// Consts for configuring initial SHA-3 state
const (
dsbyteShake = 0x1f
dsbyteCShake = 0x04
rate128 = 168
rate256 = 136
)
func bytepad(input []byte, w int) []byte {
// leftEncode always returns max 9 bytes
buf := make([]byte, 0, 9+len(input)+w)
buf = append(buf, leftEncode(uint64(w))...)
buf = append(buf, input...)
padlen := w - (len(buf) % w)
return append(buf, make([]byte, padlen)...)
}
func leftEncode(value uint64) []byte {
var b [9]byte
binary.BigEndian.PutUint64(b[1:], value)
// Trim all but last leading zero bytes
i := byte(1)
for i < 8 && b[i] == 0 {
i++
}
// Prepend number of encoded bytes
b[i-1] = 9 - i
return b[i-1:]
}
func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash {
c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}}
// leftEncode returns max 9 bytes
c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...)
c.initBlock = append(c.initBlock, N...)
c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...)
c.initBlock = append(c.initBlock, S...)
c.Write(bytepad(c.initBlock, c.rate))
return &c
}
// Reset resets the hash to initial state.
func (c *cshakeState) Reset() {
c.state.Reset()
c.Write(bytepad(c.initBlock, c.rate))
}
// Clone returns copy of a cSHAKE context within its current state.
func (c *cshakeState) Clone() ShakeHash {
b := make([]byte, len(c.initBlock))
copy(b, c.initBlock)
return &cshakeState{state: c.clone(), initBlock: b}
}
// Clone returns copy of SHAKE context within its current state.
func (c *state) Clone() ShakeHash {
return c.clone()
}
// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 128 bits against all attacks if at
// least 32 bytes of its output are used.
func NewShake128() ShakeHash {
return newShake128()
}
// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
// Its generic security strength is 256 bits against all attacks if
// at least 64 bytes of its output are used.
func NewShake256() ShakeHash {
return newShake256()
}
func newShake128Generic() *state {
return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake}
}
func newShake256Generic() *state {
return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake}
}
// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash,
// a customizable variant of SHAKE128.
// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
// desired. S is a customization byte string used for domain separation - two cSHAKE
// computations on same input with different S yield unrelated outputs.
// When N and S are both empty, this is equivalent to NewShake128.
func NewCShake128(N, S []byte) ShakeHash {
if len(N) == 0 && len(S) == 0 {
return NewShake128()
}
return newCShake(N, S, rate128, 32, dsbyteCShake)
}
// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash,
// a customizable variant of SHAKE256.
// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
// desired. S is a customization byte string used for domain separation - two cSHAKE
// computations on same input with different S yield unrelated outputs.
// When N and S are both empty, this is equivalent to NewShake256.
func NewCShake256(N, S []byte) ShakeHash {
if len(N) == 0 && len(S) == 0 {
return NewShake256()
}
return newCShake(N, S, rate256, 64, dsbyteCShake)
}
// ShakeSum128 writes an arbitrary-length digest of data into hash.
func ShakeSum128(hash, data []byte) {
h := NewShake128()
h.Write(data)
h.Read(hash)
}
// ShakeSum256 writes an arbitrary-length digest of data into hash.
func ShakeSum256(hash, data []byte) {
h := NewShake256()
h.Write(data)
h.Read(hash)
}

15
vendor/golang.org/x/crypto/sha3/shake_noasm.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !gc || purego || !s390x
package sha3
func newShake128() *state {
return newShake128Generic()
}
func newShake256() *state {
return newShake256Generic()
}

40
vendor/golang.org/x/crypto/sha3/xor.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
import (
"crypto/subtle"
"encoding/binary"
"unsafe"
"golang.org/x/sys/cpu"
)
// xorIn xors the bytes in buf into the state.
func xorIn(d *state, buf []byte) {
if cpu.IsBigEndian {
for i := 0; len(buf) >= 8; i++ {
a := binary.LittleEndian.Uint64(buf)
d.a[i] ^= a
buf = buf[8:]
}
} else {
ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a))
subtle.XORBytes(ab[:], ab[:], buf)
}
}
// copyOut copies uint64s to a byte buffer.
func copyOut(d *state, b []byte) {
if cpu.IsBigEndian {
for i := 0; len(b) >= 8; i++ {
binary.LittleEndian.PutUint64(b, d.a[i])
b = b[8:]
}
} else {
ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a))
copy(b, ab[:])
}
}

4
vendor/golang.org/x/sys/LICENSE generated vendored
View File

@ -1,4 +1,4 @@
Copyright 2009 The Go Authors.
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

17
vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
#include "textflag.h"
//
// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
//
TEXT ·syscall6(SB),NOSPLIT,$0-88
JMP syscall·syscall6(SB)
TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
JMP syscall·rawSyscall6(SB)

66
vendor/golang.org/x/sys/cpu/byteorder.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import (
"runtime"
)
// byteOrder is a subset of encoding/binary.ByteOrder.
type byteOrder interface {
Uint32([]byte) uint32
Uint64([]byte) uint64
}
type littleEndian struct{}
type bigEndian struct{}
func (littleEndian) Uint32(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func (littleEndian) Uint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func (bigEndian) Uint32(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
}
func (bigEndian) Uint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
}
// hostByteOrder returns littleEndian on little-endian machines and
// bigEndian on big-endian machines.
func hostByteOrder() byteOrder {
switch runtime.GOARCH {
case "386", "amd64", "amd64p32",
"alpha",
"arm", "arm64",
"loong64",
"mipsle", "mips64le", "mips64p32le",
"nios2",
"ppc64le",
"riscv", "riscv64",
"sh":
return littleEndian{}
case "armbe", "arm64be",
"m68k",
"mips", "mips64", "mips64p32",
"ppc", "ppc64",
"s390", "s390x",
"shbe",
"sparc", "sparc64":
return bigEndian{}
}
panic("unknown architecture")
}

291
vendor/golang.org/x/sys/cpu/cpu.go generated vendored Normal file
View File

@ -0,0 +1,291 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cpu implements processor feature detection for
// various CPU architectures.
package cpu
import (
"os"
"strings"
)
// Initialized reports whether the CPU features were initialized.
//
// For some GOOS/GOARCH combinations initialization of the CPU features depends
// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm
// Initialized will report false if reading the file fails.
var Initialized bool
// CacheLinePad is used to pad structs to avoid false sharing.
type CacheLinePad struct{ _ [cacheLineSize]byte }
// X86 contains the supported CPU features of the
// current X86/AMD64 platform. If the current platform
// is not X86/AMD64 then all feature flags are false.
//
// X86 is padded to avoid false sharing. Further the HasAVX
// and HasAVX2 are only set if the OS supports XMM and YMM
// registers in addition to the CPUID feature bit being set.
var X86 struct {
_ CacheLinePad
HasAES bool // AES hardware implementation (AES NI)
HasADX bool // Multi-precision add-carry instruction extensions
HasAVX bool // Advanced vector extension
HasAVX2 bool // Advanced vector extension 2
HasAVX512 bool // Advanced vector extension 512
HasAVX512F bool // Advanced vector extension 512 Foundation Instructions
HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions
HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions
HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions
HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions
HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions
HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions
HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add
HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions
HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision
HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision
HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions
HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations
HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions
HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions
HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions
HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2
HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms
HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions
HasAMXTile bool // Advanced Matrix Extension Tile instructions
HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions
HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions
HasBMI1 bool // Bit manipulation instruction set 1
HasBMI2 bool // Bit manipulation instruction set 2
HasCX16 bool // Compare and exchange 16 Bytes
HasERMS bool // Enhanced REP for MOVSB and STOSB
HasFMA bool // Fused-multiply-add instructions
HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers.
HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM
HasPOPCNT bool // Hamming weight instruction POPCNT.
HasRDRAND bool // RDRAND instruction (on-chip random number generator)
HasRDSEED bool // RDSEED instruction (on-chip random number generator)
HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64)
HasSSE3 bool // Streaming SIMD extension 3
HasSSSE3 bool // Supplemental streaming SIMD extension 3
HasSSE41 bool // Streaming SIMD extension 4 and 4.1
HasSSE42 bool // Streaming SIMD extension 4 and 4.2
_ CacheLinePad
}
// ARM64 contains the supported CPU features of the
// current ARMv8(aarch64) platform. If the current platform
// is not arm64 then all feature flags are false.
var ARM64 struct {
_ CacheLinePad
HasFP bool // Floating-point instruction set (always available)
HasASIMD bool // Advanced SIMD (always available)
HasEVTSTRM bool // Event stream support
HasAES bool // AES hardware implementation
HasPMULL bool // Polynomial multiplication instruction set
HasSHA1 bool // SHA1 hardware implementation
HasSHA2 bool // SHA2 hardware implementation
HasCRC32 bool // CRC32 hardware implementation
HasATOMICS bool // Atomic memory operation instruction set
HasFPHP bool // Half precision floating-point instruction set
HasASIMDHP bool // Advanced SIMD half precision instruction set
HasCPUID bool // CPUID identification scheme registers
HasASIMDRDM bool // Rounding double multiply add/subtract instruction set
HasJSCVT bool // Javascript conversion from floating-point to integer
HasFCMA bool // Floating-point multiplication and addition of complex numbers
HasLRCPC bool // Release Consistent processor consistent support
HasDCPOP bool // Persistent memory support
HasSHA3 bool // SHA3 hardware implementation
HasSM3 bool // SM3 hardware implementation
HasSM4 bool // SM4 hardware implementation
HasASIMDDP bool // Advanced SIMD double precision instruction set
HasSHA512 bool // SHA512 hardware implementation
HasSVE bool // Scalable Vector Extensions
HasSVE2 bool // Scalable Vector Extensions 2
HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32
_ CacheLinePad
}
// ARM contains the supported CPU features of the current ARM (32-bit) platform.
// All feature flags are false if:
// 1. the current platform is not arm, or
// 2. the current operating system is not Linux.
var ARM struct {
_ CacheLinePad
HasSWP bool // SWP instruction support
HasHALF bool // Half-word load and store support
HasTHUMB bool // ARM Thumb instruction set
Has26BIT bool // Address space limited to 26-bits
HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support
HasFPA bool // Floating point arithmetic support
HasVFP bool // Vector floating point support
HasEDSP bool // DSP Extensions support
HasJAVA bool // Java instruction set
HasIWMMXT bool // Intel Wireless MMX technology support
HasCRUNCH bool // MaverickCrunch context switching and handling
HasTHUMBEE bool // Thumb EE instruction set
HasNEON bool // NEON instruction set
HasVFPv3 bool // Vector floating point version 3 support
HasVFPv3D16 bool // Vector floating point version 3 D8-D15
HasTLS bool // Thread local storage support
HasVFPv4 bool // Vector floating point version 4 support
HasIDIVA bool // Integer divide instruction support in ARM mode
HasIDIVT bool // Integer divide instruction support in Thumb mode
HasVFPD32 bool // Vector floating point version 3 D15-D31
HasLPAE bool // Large Physical Address Extensions
HasEVTSTRM bool // Event stream support
HasAES bool // AES hardware implementation
HasPMULL bool // Polynomial multiplication instruction set
HasSHA1 bool // SHA1 hardware implementation
HasSHA2 bool // SHA2 hardware implementation
HasCRC32 bool // CRC32 hardware implementation
_ CacheLinePad
}
// MIPS64X contains the supported CPU features of the current mips64/mips64le
// platforms. If the current platform is not mips64/mips64le or the current
// operating system is not Linux then all feature flags are false.
var MIPS64X struct {
_ CacheLinePad
HasMSA bool // MIPS SIMD architecture
_ CacheLinePad
}
// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms.
// If the current platform is not ppc64/ppc64le then all feature flags are false.
//
// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00,
// since there are no optional categories. There are some exceptions that also
// require kernel support to work (DARN, SCV), so there are feature bits for
// those as well. The struct is padded to avoid false sharing.
var PPC64 struct {
_ CacheLinePad
HasDARN bool // Hardware random number generator (requires kernel enablement)
HasSCV bool // Syscall vectored (requires kernel enablement)
IsPOWER8 bool // ISA v2.07 (POWER8)
IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8
_ CacheLinePad
}
// S390X contains the supported CPU features of the current IBM Z
// (s390x) platform. If the current platform is not IBM Z then all
// feature flags are false.
//
// S390X is padded to avoid false sharing. Further HasVX is only set
// if the OS supports vector registers in addition to the STFLE
// feature bit being set.
var S390X struct {
_ CacheLinePad
HasZARCH bool // z/Architecture mode is active [mandatory]
HasSTFLE bool // store facility list extended
HasLDISP bool // long (20-bit) displacements
HasEIMM bool // 32-bit immediates
HasDFP bool // decimal floating point
HasETF3EH bool // ETF-3 enhanced
HasMSA bool // message security assist (CPACF)
HasAES bool // KM-AES{128,192,256} functions
HasAESCBC bool // KMC-AES{128,192,256} functions
HasAESCTR bool // KMCTR-AES{128,192,256} functions
HasAESGCM bool // KMA-GCM-AES{128,192,256} functions
HasGHASH bool // KIMD-GHASH function
HasSHA1 bool // K{I,L}MD-SHA-1 functions
HasSHA256 bool // K{I,L}MD-SHA-256 functions
HasSHA512 bool // K{I,L}MD-SHA-512 functions
HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions
HasVX bool // vector facility
HasVXE bool // vector-enhancements facility 1
_ CacheLinePad
}
func init() {
archInit()
initOptions()
processOptions()
}
// options contains the cpu debug options that can be used in GODEBUG.
// Options are arch dependent and are added by the arch specific initOptions functions.
// Features that are mandatory for the specific GOARCH should have the Required field set
// (e.g. SSE2 on amd64).
var options []option
// Option names should be lower case. e.g. avx instead of AVX.
type option struct {
Name string
Feature *bool
Specified bool // whether feature value was specified in GODEBUG
Enable bool // whether feature should be enabled
Required bool // whether feature is mandatory and can not be disabled
}
func processOptions() {
env := os.Getenv("GODEBUG")
field:
for env != "" {
field := ""
i := strings.IndexByte(env, ',')
if i < 0 {
field, env = env, ""
} else {
field, env = env[:i], env[i+1:]
}
if len(field) < 4 || field[:4] != "cpu." {
continue
}
i = strings.IndexByte(field, '=')
if i < 0 {
print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n")
continue
}
key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on"
var enable bool
switch value {
case "on":
enable = true
case "off":
enable = false
default:
print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n")
continue field
}
if key == "all" {
for i := range options {
options[i].Specified = true
options[i].Enable = enable || options[i].Required
}
continue field
}
for i := range options {
if options[i].Name == key {
options[i].Specified = true
options[i].Enable = enable
continue field
}
}
print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n")
}
for _, o := range options {
if !o.Specified {
continue
}
if o.Enable && !*o.Feature {
print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n")
continue
}
if !o.Enable && o.Required {
print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n")
continue
}
*o.Feature = o.Enable
}
}

33
vendor/golang.org/x/sys/cpu/cpu_aix.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix
package cpu
const (
// getsystemcfg constants
_SC_IMPL = 2
_IMPL_POWER8 = 0x10000
_IMPL_POWER9 = 0x20000
)
func archInit() {
impl := getsystemcfg(_SC_IMPL)
if impl&_IMPL_POWER8 != 0 {
PPC64.IsPOWER8 = true
}
if impl&_IMPL_POWER9 != 0 {
PPC64.IsPOWER8 = true
PPC64.IsPOWER9 = true
}
Initialized = true
}
func getsystemcfg(label int) (n uint64) {
r0, _ := callgetsystemcfg(label)
n = uint64(r0)
return
}

73
vendor/golang.org/x/sys/cpu/cpu_arm.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
const cacheLineSize = 32
// HWCAP/HWCAP2 bits.
// These are specific to Linux.
const (
hwcap_SWP = 1 << 0
hwcap_HALF = 1 << 1
hwcap_THUMB = 1 << 2
hwcap_26BIT = 1 << 3
hwcap_FAST_MULT = 1 << 4
hwcap_FPA = 1 << 5
hwcap_VFP = 1 << 6
hwcap_EDSP = 1 << 7
hwcap_JAVA = 1 << 8
hwcap_IWMMXT = 1 << 9
hwcap_CRUNCH = 1 << 10
hwcap_THUMBEE = 1 << 11
hwcap_NEON = 1 << 12
hwcap_VFPv3 = 1 << 13
hwcap_VFPv3D16 = 1 << 14
hwcap_TLS = 1 << 15
hwcap_VFPv4 = 1 << 16
hwcap_IDIVA = 1 << 17
hwcap_IDIVT = 1 << 18
hwcap_VFPD32 = 1 << 19
hwcap_LPAE = 1 << 20
hwcap_EVTSTRM = 1 << 21
hwcap2_AES = 1 << 0
hwcap2_PMULL = 1 << 1
hwcap2_SHA1 = 1 << 2
hwcap2_SHA2 = 1 << 3
hwcap2_CRC32 = 1 << 4
)
func initOptions() {
options = []option{
{Name: "pmull", Feature: &ARM.HasPMULL},
{Name: "sha1", Feature: &ARM.HasSHA1},
{Name: "sha2", Feature: &ARM.HasSHA2},
{Name: "swp", Feature: &ARM.HasSWP},
{Name: "thumb", Feature: &ARM.HasTHUMB},
{Name: "thumbee", Feature: &ARM.HasTHUMBEE},
{Name: "tls", Feature: &ARM.HasTLS},
{Name: "vfp", Feature: &ARM.HasVFP},
{Name: "vfpd32", Feature: &ARM.HasVFPD32},
{Name: "vfpv3", Feature: &ARM.HasVFPv3},
{Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16},
{Name: "vfpv4", Feature: &ARM.HasVFPv4},
{Name: "half", Feature: &ARM.HasHALF},
{Name: "26bit", Feature: &ARM.Has26BIT},
{Name: "fastmul", Feature: &ARM.HasFASTMUL},
{Name: "fpa", Feature: &ARM.HasFPA},
{Name: "edsp", Feature: &ARM.HasEDSP},
{Name: "java", Feature: &ARM.HasJAVA},
{Name: "iwmmxt", Feature: &ARM.HasIWMMXT},
{Name: "crunch", Feature: &ARM.HasCRUNCH},
{Name: "neon", Feature: &ARM.HasNEON},
{Name: "idivt", Feature: &ARM.HasIDIVT},
{Name: "idiva", Feature: &ARM.HasIDIVA},
{Name: "lpae", Feature: &ARM.HasLPAE},
{Name: "evtstrm", Feature: &ARM.HasEVTSTRM},
{Name: "aes", Feature: &ARM.HasAES},
{Name: "crc32", Feature: &ARM.HasCRC32},
}
}

182
vendor/golang.org/x/sys/cpu/cpu_arm64.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import "runtime"
// cacheLineSize is used to prevent false sharing of cache lines.
// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size.
// It doesn't cost much and is much more future-proof.
const cacheLineSize = 128
func initOptions() {
options = []option{
{Name: "fp", Feature: &ARM64.HasFP},
{Name: "asimd", Feature: &ARM64.HasASIMD},
{Name: "evstrm", Feature: &ARM64.HasEVTSTRM},
{Name: "aes", Feature: &ARM64.HasAES},
{Name: "fphp", Feature: &ARM64.HasFPHP},
{Name: "jscvt", Feature: &ARM64.HasJSCVT},
{Name: "lrcpc", Feature: &ARM64.HasLRCPC},
{Name: "pmull", Feature: &ARM64.HasPMULL},
{Name: "sha1", Feature: &ARM64.HasSHA1},
{Name: "sha2", Feature: &ARM64.HasSHA2},
{Name: "sha3", Feature: &ARM64.HasSHA3},
{Name: "sha512", Feature: &ARM64.HasSHA512},
{Name: "sm3", Feature: &ARM64.HasSM3},
{Name: "sm4", Feature: &ARM64.HasSM4},
{Name: "sve", Feature: &ARM64.HasSVE},
{Name: "sve2", Feature: &ARM64.HasSVE2},
{Name: "crc32", Feature: &ARM64.HasCRC32},
{Name: "atomics", Feature: &ARM64.HasATOMICS},
{Name: "asimdhp", Feature: &ARM64.HasASIMDHP},
{Name: "cpuid", Feature: &ARM64.HasCPUID},
{Name: "asimrdm", Feature: &ARM64.HasASIMDRDM},
{Name: "fcma", Feature: &ARM64.HasFCMA},
{Name: "dcpop", Feature: &ARM64.HasDCPOP},
{Name: "asimddp", Feature: &ARM64.HasASIMDDP},
{Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM},
}
}
func archInit() {
switch runtime.GOOS {
case "freebsd":
readARM64Registers()
case "linux", "netbsd", "openbsd":
doinit()
default:
// Many platforms don't seem to allow reading these registers.
setMinimalFeatures()
}
}
// setMinimalFeatures fakes the minimal ARM64 features expected by
// TestARM64minimalFeatures.
func setMinimalFeatures() {
ARM64.HasASIMD = true
ARM64.HasFP = true
}
func readARM64Registers() {
Initialized = true
parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0())
}
func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
// ID_AA64ISAR0_EL1
switch extractBits(isar0, 4, 7) {
case 1:
ARM64.HasAES = true
case 2:
ARM64.HasAES = true
ARM64.HasPMULL = true
}
switch extractBits(isar0, 8, 11) {
case 1:
ARM64.HasSHA1 = true
}
switch extractBits(isar0, 12, 15) {
case 1:
ARM64.HasSHA2 = true
case 2:
ARM64.HasSHA2 = true
ARM64.HasSHA512 = true
}
switch extractBits(isar0, 16, 19) {
case 1:
ARM64.HasCRC32 = true
}
switch extractBits(isar0, 20, 23) {
case 2:
ARM64.HasATOMICS = true
}
switch extractBits(isar0, 28, 31) {
case 1:
ARM64.HasASIMDRDM = true
}
switch extractBits(isar0, 32, 35) {
case 1:
ARM64.HasSHA3 = true
}
switch extractBits(isar0, 36, 39) {
case 1:
ARM64.HasSM3 = true
}
switch extractBits(isar0, 40, 43) {
case 1:
ARM64.HasSM4 = true
}
switch extractBits(isar0, 44, 47) {
case 1:
ARM64.HasASIMDDP = true
}
// ID_AA64ISAR1_EL1
switch extractBits(isar1, 0, 3) {
case 1:
ARM64.HasDCPOP = true
}
switch extractBits(isar1, 12, 15) {
case 1:
ARM64.HasJSCVT = true
}
switch extractBits(isar1, 16, 19) {
case 1:
ARM64.HasFCMA = true
}
switch extractBits(isar1, 20, 23) {
case 1:
ARM64.HasLRCPC = true
}
// ID_AA64PFR0_EL1
switch extractBits(pfr0, 16, 19) {
case 0:
ARM64.HasFP = true
case 1:
ARM64.HasFP = true
ARM64.HasFPHP = true
}
switch extractBits(pfr0, 20, 23) {
case 0:
ARM64.HasASIMD = true
case 1:
ARM64.HasASIMD = true
ARM64.HasASIMDHP = true
}
switch extractBits(pfr0, 32, 35) {
case 1:
ARM64.HasSVE = true
parseARM64SVERegister(getzfr0())
}
}
func parseARM64SVERegister(zfr0 uint64) {
switch extractBits(zfr0, 0, 3) {
case 1:
ARM64.HasSVE2 = true
}
}
func extractBits(data uint64, start, end uint) uint {
return (uint)(data>>start) & ((1 << (end - start + 1)) - 1)
}

39
vendor/golang.org/x/sys/cpu/cpu_arm64.s generated vendored Normal file
View File

@ -0,0 +1,39 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
#include "textflag.h"
// func getisar0() uint64
TEXT ·getisar0(SB),NOSPLIT,$0-8
// get Instruction Set Attributes 0 into x0
// mrs x0, ID_AA64ISAR0_EL1 = d5380600
WORD $0xd5380600
MOVD R0, ret+0(FP)
RET
// func getisar1() uint64
TEXT ·getisar1(SB),NOSPLIT,$0-8
// get Instruction Set Attributes 1 into x0
// mrs x0, ID_AA64ISAR1_EL1 = d5380620
WORD $0xd5380620
MOVD R0, ret+0(FP)
RET
// func getpfr0() uint64
TEXT ·getpfr0(SB),NOSPLIT,$0-8
// get Processor Feature Register 0 into x0
// mrs x0, ID_AA64PFR0_EL1 = d5380400
WORD $0xd5380400
MOVD R0, ret+0(FP)
RET
// func getzfr0() uint64
TEXT ·getzfr0(SB),NOSPLIT,$0-8
// get SVE Feature Register 0 into x0
// mrs x0, ID_AA64ZFR0_EL1 = d5380480
WORD $0xd5380480
MOVD R0, ret+0(FP)
RET

12
vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
package cpu
func getisar0() uint64
func getisar1() uint64
func getpfr0() uint64
func getzfr0() uint64

21
vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
package cpu
// haveAsmFunctions reports whether the other functions in this file can
// be safely called.
func haveAsmFunctions() bool { return true }
// The following feature detection functions are defined in cpu_s390x.s.
// They are likely to be expensive to call so the results should be cached.
func stfle() facilityList
func kmQuery() queryResult
func kmcQuery() queryResult
func kmctrQuery() queryResult
func kmaQuery() queryResult
func kimdQuery() queryResult
func klmdQuery() queryResult

15
vendor/golang.org/x/sys/cpu/cpu_gc_x86.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (386 || amd64 || amd64p32) && gc
package cpu
// cpuid is implemented in cpu_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func xgetbv() (eax, edx uint32)

11
vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gccgo
package cpu
func getisar0() uint64 { return 0 }
func getisar1() uint64 { return 0 }
func getpfr0() uint64 { return 0 }

22
vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gccgo
package cpu
// haveAsmFunctions reports whether the other functions in this file can
// be safely called.
func haveAsmFunctions() bool { return false }
// TODO(mundaym): the following feature detection functions are currently
// stubs. See https://golang.org/cl/162887 for how to fix this.
// They are likely to be expensive to call so the results should be cached.
func stfle() facilityList { panic("not implemented for gccgo") }
func kmQuery() queryResult { panic("not implemented for gccgo") }
func kmcQuery() queryResult { panic("not implemented for gccgo") }
func kmctrQuery() queryResult { panic("not implemented for gccgo") }
func kmaQuery() queryResult { panic("not implemented for gccgo") }
func kimdQuery() queryResult { panic("not implemented for gccgo") }
func klmdQuery() queryResult { panic("not implemented for gccgo") }

37
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (386 || amd64 || amd64p32) && gccgo
#include <cpuid.h>
#include <stdint.h>
#include <x86intrin.h>
// Need to wrap __get_cpuid_count because it's declared as static.
int
gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
{
return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx);
}
#pragma GCC diagnostic ignored "-Wunknown-pragmas"
#pragma GCC push_options
#pragma GCC target("xsave")
#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function)
// xgetbv reads the contents of an XCR (Extended Control Register)
// specified in the ECX register into registers EDX:EAX.
// Currently, the only supported value for XCR is 0.
void
gccgoXgetbv(uint32_t *eax, uint32_t *edx)
{
uint64_t v = _xgetbv(0);
*eax = v & 0xffffffff;
*edx = v >> 32;
}
#pragma clang attribute pop
#pragma GCC pop_options

31
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (386 || amd64 || amd64p32) && gccgo
package cpu
//extern gccgoGetCpuidCount
func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32)
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) {
var a, b, c, d uint32
gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d)
return a, b, c, d
}
//extern gccgoXgetbv
func gccgoXgetbv(eax, edx *uint32)
func xgetbv() (eax, edx uint32) {
var a, d uint32
gccgoXgetbv(&a, &d)
return a, d
}
// gccgo doesn't build on Darwin, per:
// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76
func darwinSupportsAVX512() bool {
return false
}

15
vendor/golang.org/x/sys/cpu/cpu_linux.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !386 && !amd64 && !amd64p32 && !arm64
package cpu
func archInit() {
if err := readHWCAP(); err != nil {
return
}
doinit()
Initialized = true
}

39
vendor/golang.org/x/sys/cpu/cpu_linux_arm.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
func doinit() {
ARM.HasSWP = isSet(hwCap, hwcap_SWP)
ARM.HasHALF = isSet(hwCap, hwcap_HALF)
ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB)
ARM.Has26BIT = isSet(hwCap, hwcap_26BIT)
ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT)
ARM.HasFPA = isSet(hwCap, hwcap_FPA)
ARM.HasVFP = isSet(hwCap, hwcap_VFP)
ARM.HasEDSP = isSet(hwCap, hwcap_EDSP)
ARM.HasJAVA = isSet(hwCap, hwcap_JAVA)
ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT)
ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH)
ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE)
ARM.HasNEON = isSet(hwCap, hwcap_NEON)
ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3)
ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16)
ARM.HasTLS = isSet(hwCap, hwcap_TLS)
ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4)
ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA)
ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT)
ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32)
ARM.HasLPAE = isSet(hwCap, hwcap_LPAE)
ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM)
ARM.HasAES = isSet(hwCap2, hwcap2_AES)
ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL)
ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1)
ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2)
ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32)
}
func isSet(hwc uint, value uint) bool {
return hwc&value != 0
}

116
vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go generated vendored Normal file
View File

@ -0,0 +1,116 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import (
"strings"
"syscall"
)
// HWCAP/HWCAP2 bits. These are exposed by Linux.
const (
hwcap_FP = 1 << 0
hwcap_ASIMD = 1 << 1
hwcap_EVTSTRM = 1 << 2
hwcap_AES = 1 << 3
hwcap_PMULL = 1 << 4
hwcap_SHA1 = 1 << 5
hwcap_SHA2 = 1 << 6
hwcap_CRC32 = 1 << 7
hwcap_ATOMICS = 1 << 8
hwcap_FPHP = 1 << 9
hwcap_ASIMDHP = 1 << 10
hwcap_CPUID = 1 << 11
hwcap_ASIMDRDM = 1 << 12
hwcap_JSCVT = 1 << 13
hwcap_FCMA = 1 << 14
hwcap_LRCPC = 1 << 15
hwcap_DCPOP = 1 << 16
hwcap_SHA3 = 1 << 17
hwcap_SM3 = 1 << 18
hwcap_SM4 = 1 << 19
hwcap_ASIMDDP = 1 << 20
hwcap_SHA512 = 1 << 21
hwcap_SVE = 1 << 22
hwcap_ASIMDFHM = 1 << 23
hwcap2_SVE2 = 1 << 1
)
// linuxKernelCanEmulateCPUID reports whether we're running
// on Linux 4.11+. Ideally we'd like to ask the question about
// whether the current kernel contains
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2
// but the version number will have to do.
func linuxKernelCanEmulateCPUID() bool {
var un syscall.Utsname
syscall.Uname(&un)
var sb strings.Builder
for _, b := range un.Release[:] {
if b == 0 {
break
}
sb.WriteByte(byte(b))
}
major, minor, _, ok := parseRelease(sb.String())
return ok && (major > 4 || major == 4 && minor >= 11)
}
func doinit() {
if err := readHWCAP(); err != nil {
// We failed to read /proc/self/auxv. This can happen if the binary has
// been given extra capabilities(7) with /bin/setcap.
//
// When this happens, we have two options. If the Linux kernel is new
// enough (4.11+), we can read the arm64 registers directly which'll
// trap into the kernel and then return back to userspace.
//
// But on older kernels, such as Linux 4.4.180 as used on many Synology
// devices, calling readARM64Registers (specifically getisar0) will
// cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo
// instead.
//
// See golang/go#57336.
if linuxKernelCanEmulateCPUID() {
readARM64Registers()
} else {
readLinuxProcCPUInfo()
}
return
}
// HWCAP feature bits
ARM64.HasFP = isSet(hwCap, hwcap_FP)
ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD)
ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM)
ARM64.HasAES = isSet(hwCap, hwcap_AES)
ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL)
ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1)
ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2)
ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32)
ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS)
ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP)
ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP)
ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID)
ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM)
ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT)
ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA)
ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC)
ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP)
ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3)
ARM64.HasSM3 = isSet(hwCap, hwcap_SM3)
ARM64.HasSM4 = isSet(hwCap, hwcap_SM4)
ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP)
ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512)
ARM64.HasSVE = isSet(hwCap, hwcap_SVE)
ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM)
// HWCAP2 feature bits
ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2)
}
func isSet(hwc uint, value uint) bool {
return hwc&value != 0
}

22
vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && (mips64 || mips64le)
package cpu
// HWCAP bits. These are exposed by the Linux kernel 5.4.
const (
// CPU features
hwcap_MIPS_MSA = 1 << 1
)
func doinit() {
// HWCAP feature bits
MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA)
}
func isSet(hwc uint, value uint) bool {
return hwc&value != 0
}

9
vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x
package cpu
func doinit() {}

30
vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && (ppc64 || ppc64le)
package cpu
// HWCAP/HWCAP2 bits. These are exposed by the kernel.
const (
// ISA Level
_PPC_FEATURE2_ARCH_2_07 = 0x80000000
_PPC_FEATURE2_ARCH_3_00 = 0x00800000
// CPU features
_PPC_FEATURE2_DARN = 0x00200000
_PPC_FEATURE2_SCV = 0x00100000
)
func doinit() {
// HWCAP2 feature bits
PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07)
PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00)
PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN)
PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV)
}
func isSet(hwc uint, value uint) bool {
return hwc&value != 0
}

40
vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
const (
// bit mask values from /usr/include/bits/hwcap.h
hwcap_ZARCH = 2
hwcap_STFLE = 4
hwcap_MSA = 8
hwcap_LDISP = 16
hwcap_EIMM = 32
hwcap_DFP = 64
hwcap_ETF3EH = 256
hwcap_VX = 2048
hwcap_VXE = 8192
)
func initS390Xbase() {
// test HWCAP bit vector
has := func(featureMask uint) bool {
return hwCap&featureMask == featureMask
}
// mandatory
S390X.HasZARCH = has(hwcap_ZARCH)
// optional
S390X.HasSTFLE = has(hwcap_STFLE)
S390X.HasLDISP = has(hwcap_LDISP)
S390X.HasEIMM = has(hwcap_EIMM)
S390X.HasETF3EH = has(hwcap_ETF3EH)
S390X.HasDFP = has(hwcap_DFP)
S390X.HasMSA = has(hwcap_MSA)
S390X.HasVX = has(hwcap_VX)
if S390X.HasVX {
S390X.HasVXE = has(hwcap_VXE)
}
}

12
vendor/golang.org/x/sys/cpu/cpu_loong64.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build loong64
package cpu
const cacheLineSize = 64
func initOptions() {
}

15
vendor/golang.org/x/sys/cpu/cpu_mips64x.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build mips64 || mips64le
package cpu
const cacheLineSize = 32
func initOptions() {
options = []option{
{Name: "msa", Feature: &MIPS64X.HasMSA},
}
}

11
vendor/golang.org/x/sys/cpu/cpu_mipsx.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build mips || mipsle
package cpu
const cacheLineSize = 32
func initOptions() {}

173
vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import (
"syscall"
"unsafe"
)
// Minimal copy of functionality from x/sys/unix so the cpu package can call
// sysctl without depending on x/sys/unix.
const (
_CTL_QUERY = -2
_SYSCTL_VERS_1 = 0x1000000
)
var _zero uintptr
func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
var _p0 unsafe.Pointer
if len(mib) > 0 {
_p0 = unsafe.Pointer(&mib[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, errno := syscall.Syscall6(
syscall.SYS___SYSCTL,
uintptr(_p0),
uintptr(len(mib)),
uintptr(unsafe.Pointer(old)),
uintptr(unsafe.Pointer(oldlen)),
uintptr(unsafe.Pointer(new)),
uintptr(newlen))
if errno != 0 {
return errno
}
return nil
}
type sysctlNode struct {
Flags uint32
Num int32
Name [32]int8
Ver uint32
__rsvd uint32
Un [16]byte
_sysctl_size [8]byte
_sysctl_func [8]byte
_sysctl_parent [8]byte
_sysctl_desc [8]byte
}
func sysctlNodes(mib []int32) ([]sysctlNode, error) {
var olen uintptr
// Get a list of all sysctl nodes below the given MIB by performing
// a sysctl for the given MIB with CTL_QUERY appended.
mib = append(mib, _CTL_QUERY)
qnode := sysctlNode{Flags: _SYSCTL_VERS_1}
qp := (*byte)(unsafe.Pointer(&qnode))
sz := unsafe.Sizeof(qnode)
if err := sysctl(mib, nil, &olen, qp, sz); err != nil {
return nil, err
}
// Now that we know the size, get the actual nodes.
nodes := make([]sysctlNode, olen/sz)
np := (*byte)(unsafe.Pointer(&nodes[0]))
if err := sysctl(mib, np, &olen, qp, sz); err != nil {
return nil, err
}
return nodes, nil
}
func nametomib(name string) ([]int32, error) {
// Split name into components.
var parts []string
last := 0
for i := 0; i < len(name); i++ {
if name[i] == '.' {
parts = append(parts, name[last:i])
last = i + 1
}
}
parts = append(parts, name[last:])
mib := []int32{}
// Discover the nodes and construct the MIB OID.
for partno, part := range parts {
nodes, err := sysctlNodes(mib)
if err != nil {
return nil, err
}
for _, node := range nodes {
n := make([]byte, 0)
for i := range node.Name {
if node.Name[i] != 0 {
n = append(n, byte(node.Name[i]))
}
}
if string(n) == part {
mib = append(mib, int32(node.Num))
break
}
}
if len(mib) != partno+1 {
return nil, err
}
}
return mib, nil
}
// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's <aarch64/armreg.h>
type aarch64SysctlCPUID struct {
midr uint64 /* Main ID Register */
revidr uint64 /* Revision ID Register */
mpidr uint64 /* Multiprocessor Affinity Register */
aa64dfr0 uint64 /* A64 Debug Feature Register 0 */
aa64dfr1 uint64 /* A64 Debug Feature Register 1 */
aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */
aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */
aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */
aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */
aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */
aa64pfr0 uint64 /* A64 Processor Feature Register 0 */
aa64pfr1 uint64 /* A64 Processor Feature Register 1 */
aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */
mvfr0 uint32 /* Media and VFP Feature Register 0 */
mvfr1 uint32 /* Media and VFP Feature Register 1 */
mvfr2 uint32 /* Media and VFP Feature Register 2 */
pad uint32
clidr uint64 /* Cache Level ID Register */
ctr uint64 /* Cache Type Register */
}
func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) {
mib, err := nametomib(name)
if err != nil {
return nil, err
}
out := aarch64SysctlCPUID{}
n := unsafe.Sizeof(out)
_, _, errno := syscall.Syscall6(
syscall.SYS___SYSCTL,
uintptr(unsafe.Pointer(&mib[0])),
uintptr(len(mib)),
uintptr(unsafe.Pointer(&out)),
uintptr(unsafe.Pointer(&n)),
uintptr(0),
uintptr(0))
if errno != 0 {
return nil, errno
}
return &out, nil
}
func doinit() {
cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id")
if err != nil {
setMinimalFeatures()
return
}
parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0)
Initialized = true
}

65
vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go generated vendored Normal file
View File

@ -0,0 +1,65 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import (
"syscall"
"unsafe"
)
// Minimal copy of functionality from x/sys/unix so the cpu package can call
// sysctl without depending on x/sys/unix.
const (
// From OpenBSD's sys/sysctl.h.
_CTL_MACHDEP = 7
// From OpenBSD's machine/cpu.h.
_CPU_ID_AA64ISAR0 = 2
_CPU_ID_AA64ISAR1 = 3
)
// Implemented in the runtime package (runtime/sys_openbsd3.go)
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
//go:linkname syscall_syscall6 syscall.syscall6
func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
_, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
if errno != 0 {
return errno
}
return nil
}
var libc_sysctl_trampoline_addr uintptr
//go:cgo_import_dynamic libc_sysctl sysctl "libc.so"
func sysctlUint64(mib []uint32) (uint64, bool) {
var out uint64
nout := unsafe.Sizeof(out)
if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil {
return 0, false
}
return out, true
}
func doinit() {
setMinimalFeatures()
// Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl.
isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0})
if !ok {
return
}
isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1})
if !ok {
return
}
parseARM64SystemRegisters(isar0, isar1, 0)
Initialized = true
}

Some files were not shown because too many files have changed in this diff Show More