diff --git a/.gitmodules b/.gitmodules index 3f6c07b5..e69de29b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "vendor"] - path = vendor - url = https://github.com/oragono/oragono-vendor.git diff --git a/DEVELOPING.md b/DEVELOPING.md index 761f7309..708d5a20 100644 --- a/DEVELOPING.md +++ b/DEVELOPING.md @@ -7,9 +7,7 @@ This is just a bunch of tips and tricks we keep in mind while developing Oragono You should use the [latest distribution of the Go language for your OS and architecture](https://golang.org/dl/). (If `uname -m` on your Raspberry Pi reports `armv7l`, use the `armv6l` distribution of Go; if it reports v8, you may be able to use the `arm64` distribution.) -Oragono vendors all its dependencies. The vendored code is tracked via a git submodule: `vendor/` is a submodule pointing to the [oragono-vendor](https://github.com/oragono/oragono-vendor) repository. As long as you're not modifying the vendored dependencies, `make` should take care of everything for you --- but if you are, see the "vendor" section below. - -Because of this, Oragono is self-contained and you should not need to fetch any dependencies with `go get`. Doing so is not recommended, since it may fetch incompatible versions of the dependencies. If you're having trouble building the code, it's very likely because your clone of the repository is in the wrong place: Go is very opinionated about where you should keep your code. Take a look at the [go workspaces documentation](https://golang.org/doc/code.html) if you're having trouble. +Oragono vendors all its dependencies. Because of this, Oragono is self-contained and you should not need to fetch any dependencies with `go get`. Doing so is not recommended, since it may fetch incompatible versions of the dependencies. ## Branches @@ -59,23 +57,6 @@ New release of Oragono! -## Updating `vendor/` - -The `vendor/` directory holds our dependencies. When we import new repos, we need to update this folder to contain these new deps. This is something that I'll mostly be handling. - -To update this folder: - -1. Install https://github.com/golang/dep -2. `cd` to Oragono folder -3. `dep ensure -update` -4. `cd vendor` -5. Commit the changes with the message `"Updated packages"` -6. `cd ..` -4. Commit the result with the message `"vendor: Updated submodules"` - -This will make sure things stay nice and up-to-date for users. - - ## Fuzzing and Testing Fuzzing can be useful. We don't have testing done inside the IRCd itself, but this fuzzer I've written works alright and has helped shake out various bugs: [irc_fuzz.py](https://gist.github.com/DanielOaks/63ae611039cdf591dfa4). diff --git a/Makefile b/Makefile index 3a2fa6ce..d06dd07f 100644 --- a/Makefile +++ b/Makefile @@ -1,24 +1,21 @@ -.PHONY: all install build release capdefs deps test +.PHONY: all install build release capdefs test capdef_file = ./irc/caps/defs.go all: install -install: deps +install: go install -v -build: deps +build: go build -v -release: deps +release: goreleaser --skip-publish --rm-dist capdefs: python3 ./gencapdefs.py > ${capdef_file} -deps: - git submodule update --init - test: python3 ./gencapdefs.py | diff - ${capdef_file} cd irc && go test . && go vet . diff --git a/vendor b/vendor deleted file mode 160000 index 6e49b8a2..00000000 --- a/vendor +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 6e49b8a260f1ba3351c17876c2e2d0044c315078 diff --git a/vendor/code.cloudfoundry.org/bytefmt/LICENSE b/vendor/code.cloudfoundry.org/bytefmt/LICENSE new file mode 100644 index 00000000..f49a4e16 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bytefmt/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/bytefmt/NOTICE b/vendor/code.cloudfoundry.org/bytefmt/NOTICE new file mode 100644 index 00000000..8625a7f4 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bytefmt/NOTICE @@ -0,0 +1,20 @@ +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +This project contains software that is Copyright (c) 2013-2015 Pivotal Software, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/bytefmt/README.md b/vendor/code.cloudfoundry.org/bytefmt/README.md new file mode 100644 index 00000000..44d287d1 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bytefmt/README.md @@ -0,0 +1,15 @@ +bytefmt +======= + +**Note**: This repository should be imported as `code.cloudfoundry.org/bytefmt`. + +Human-readable byte formatter. + +Example: + +```go +bytefmt.ByteSize(100.5*bytefmt.MEGABYTE) // returns "100.5M" +bytefmt.ByteSize(uint64(1024)) // returns "1K" +``` + +For documentation, please see http://godoc.org/code.cloudfoundry.org/bytefmt diff --git a/vendor/code.cloudfoundry.org/bytefmt/bytes.go b/vendor/code.cloudfoundry.org/bytefmt/bytes.go new file mode 100644 index 00000000..73956047 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bytefmt/bytes.go @@ -0,0 +1,121 @@ +// Package bytefmt contains helper methods and constants for converting to and from a human-readable byte format. +// +// bytefmt.ByteSize(100.5*bytefmt.MEGABYTE) // "100.5M" +// bytefmt.ByteSize(uint64(1024)) // "1K" +// +package bytefmt + +import ( + "errors" + "strconv" + "strings" + "unicode" +) + +const ( + BYTE = 1 << (10 * iota) + KILOBYTE + MEGABYTE + GIGABYTE + TERABYTE + PETABYTE + EXABYTE +) + +var invalidByteQuantityError = errors.New("byte quantity must be a positive integer with a unit of measurement like M, MB, MiB, G, GiB, or GB") + +// ByteSize returns a human-readable byte string of the form 10M, 12.5K, and so forth. The following units are available: +// E: Exabyte +// P: Petabyte +// T: Terabyte +// G: Gigabyte +// M: Megabyte +// K: Kilobyte +// B: Byte +// The unit that results in the smallest number greater than or equal to 1 is always chosen. +func ByteSize(bytes uint64) string { + unit := "" + value := float64(bytes) + + switch { + case bytes >= EXABYTE: + unit = "E" + value = value / EXABYTE + case bytes >= PETABYTE: + unit = "P" + value = value / PETABYTE + case bytes >= TERABYTE: + unit = "T" + value = value / TERABYTE + case bytes >= GIGABYTE: + unit = "G" + value = value / GIGABYTE + case bytes >= MEGABYTE: + unit = "M" + value = value / MEGABYTE + case bytes >= KILOBYTE: + unit = "K" + value = value / KILOBYTE + case bytes >= BYTE: + unit = "B" + case bytes == 0: + return "0B" + } + + result := strconv.FormatFloat(value, 'f', 1, 64) + result = strings.TrimSuffix(result, ".0") + return result + unit +} + +// ToMegabytes parses a string formatted by ByteSize as megabytes. +func ToMegabytes(s string) (uint64, error) { + bytes, err := ToBytes(s) + if err != nil { + return 0, err + } + + return bytes / MEGABYTE, nil +} + +// ToBytes parses a string formatted by ByteSize as bytes. Note binary-prefixed and SI prefixed units both mean a base-2 units +// KB = K = KiB = 1024 +// MB = M = MiB = 1024 * K +// GB = G = GiB = 1024 * M +// TB = T = TiB = 1024 * G +// PB = P = PiB = 1024 * T +// EB = E = EiB = 1024 * P +func ToBytes(s string) (uint64, error) { + s = strings.TrimSpace(s) + s = strings.ToUpper(s) + + i := strings.IndexFunc(s, unicode.IsLetter) + + if i == -1 { + return 0, invalidByteQuantityError + } + + bytesString, multiple := s[:i], s[i:] + bytes, err := strconv.ParseFloat(bytesString, 64) + if err != nil || bytes < 0 { + return 0, invalidByteQuantityError + } + + switch multiple { + case "E", "EB", "EIB": + return uint64(bytes * EXABYTE), nil + case "P", "PB", "PIB": + return uint64(bytes * PETABYTE), nil + case "T", "TB", "TIB": + return uint64(bytes * TERABYTE), nil + case "G", "GB", "GIB": + return uint64(bytes * GIGABYTE), nil + case "M", "MB", "MIB": + return uint64(bytes * MEGABYTE), nil + case "K", "KB", "KIB": + return uint64(bytes * KILOBYTE), nil + case "B": + return uint64(bytes), nil + default: + return 0, invalidByteQuantityError + } +} diff --git a/vendor/code.cloudfoundry.org/bytefmt/package.go b/vendor/code.cloudfoundry.org/bytefmt/package.go new file mode 100644 index 00000000..03429300 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bytefmt/package.go @@ -0,0 +1 @@ +package bytefmt // import "code.cloudfoundry.org/bytefmt" diff --git a/vendor/github.com/docopt/docopt-go/.gitignore b/vendor/github.com/docopt/docopt-go/.gitignore new file mode 100644 index 00000000..49ad16c6 --- /dev/null +++ b/vendor/github.com/docopt/docopt-go/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# coverage droppings +profile.cov diff --git a/vendor/github.com/docopt/docopt-go/.travis.yml b/vendor/github.com/docopt/docopt-go/.travis.yml new file mode 100644 index 00000000..db820dc3 --- /dev/null +++ b/vendor/github.com/docopt/docopt-go/.travis.yml @@ -0,0 +1,32 @@ +# Travis CI (http://travis-ci.org/) is a continuous integration +# service for open source projects. This file configures it +# to run unit tests for docopt-go. + +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - 1.9 + - tip + +matrix: + fast_finish: true + +before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + +install: + - go get -d -v ./... && go build -v ./... + +script: + - go vet -x ./... + - go test -v ./... + - go test -covermode=count -coverprofile=profile.cov . + +after_script: + - $HOME/gopath/bin/goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/docopt/docopt-go/LICENSE b/vendor/github.com/docopt/docopt-go/LICENSE new file mode 100644 index 00000000..5e51f73e --- /dev/null +++ b/vendor/github.com/docopt/docopt-go/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Keith Batten +Copyright (c) 2016 David Irvine + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docopt/docopt-go/README.md b/vendor/github.com/docopt/docopt-go/README.md new file mode 100644 index 00000000..d03f8da5 --- /dev/null +++ b/vendor/github.com/docopt/docopt-go/README.md @@ -0,0 +1,116 @@ +docopt-go +========= + +[![Build Status](https://travis-ci.org/docopt/docopt.go.svg?branch=master)](https://travis-ci.org/docopt/docopt.go) +[![Coverage Status](https://coveralls.io/repos/github/docopt/docopt.go/badge.svg)](https://coveralls.io/github/docopt/docopt.go) +[![GoDoc](https://godoc.org/github.com/docopt/docopt.go?status.svg)](https://godoc.org/github.com/docopt/docopt.go) + +An implementation of [docopt](http://docopt.org/) in the [Go](http://golang.org/) programming language. + +**docopt** helps you create *beautiful* command-line interfaces easily: + +```go +package main + +import ( + "fmt" + "github.com/docopt/docopt-go" +) + +func main() { + usage := `Naval Fate. + +Usage: + naval_fate ship new ... + naval_fate ship move [--speed=] + naval_fate ship shoot + naval_fate mine (set|remove) [--moored|--drifting] + naval_fate -h | --help + naval_fate --version + +Options: + -h --help Show this screen. + --version Show version. + --speed= Speed in knots [default: 10]. + --moored Moored (anchored) mine. + --drifting Drifting mine.` + + arguments, _ := docopt.ParseDoc(usage) + fmt.Println(arguments) +} +``` + +**docopt** parses command-line arguments based on a help message. Don't write parser code: a good help message already has all the necessary information in it. + +## Installation + +⚠ Use the alias "docopt-go". To use docopt in your Go code: + +```go +import "github.com/docopt/docopt-go" +``` + +To install docopt in your `$GOPATH`: + +```console +$ go get github.com/docopt/docopt-go +``` + +## API + +Given a conventional command-line help message, docopt processes the arguments. See https://github.com/docopt/docopt#help-message-format for a description of the help message format. + +This package exposes three different APIs, depending on the level of control required. The first, simplest way to parse your docopt usage is to just call: + +```go +docopt.ParseDoc(usage) +``` + +This will use `os.Args[1:]` as the argv slice, and use the default parser options. If you want to provide your own version string and args, then use: + +```go +docopt.ParseArgs(usage, argv, "1.2.3") +``` + +If the last parameter (version) is a non-empty string, it will be printed when `--version` is given in the argv slice. Finally, we can instantiate our own `docopt.Parser` which gives us control over how things like help messages are printed and whether to exit after displaying usage messages, etc. + +```go +parser := &docopt.Parser{ + HelpHandler: docopt.PrintHelpOnly, + OptionsFirst: true, +} +opts, err := parser.ParseArgs(usage, argv, "") +``` + +In particular, setting your own custom `HelpHandler` function makes unit testing your own docs with example command line invocations much more enjoyable. + +All three of these return a map of option names to the values parsed from argv, and an error or nil. You can get the values using the helpers, or just treat it as a regular map: + +```go +flag, _ := opts.Bool("--flag") +secs, _ := opts.Int("") +``` + +Additionally, you can `Bind` these to a struct, assigning option values to the +exported fields of that struct, all at once. + +```go +var config struct { + Command string `docopt:""` + Tries int `docopt:"-n"` + Force bool // Gets the value of --force +} +opts.Bind(&config) +``` + +More documentation is available at [godoc.org](https://godoc.org/github.com/docopt/docopt-go). + +## Unit Testing + +Unit testing your own usage docs is recommended, so you can be sure that for a given command line invocation, the expected options are set. An example of how to do this is [in the examples folder](examples/unit_test/unit_test.go). + +## Tests + +All tests from the Python version are implemented and passing at [Travis CI](https://travis-ci.org/docopt/docopt-go). New language-agnostic tests have been added to [test_golang.docopt](test_golang.docopt). + +To run tests for docopt-go, use `go test`. diff --git a/vendor/github.com/docopt/docopt-go/doc.go b/vendor/github.com/docopt/docopt-go/doc.go new file mode 100644 index 00000000..c56ee12a --- /dev/null +++ b/vendor/github.com/docopt/docopt-go/doc.go @@ -0,0 +1,49 @@ +/* +Package docopt parses command-line arguments based on a help message. + +Given a conventional command-line help message, docopt processes the arguments. +See https://github.com/docopt/docopt#help-message-format for a description of +the help message format. + +This package exposes three different APIs, depending on the level of control +required. The first, simplest way to parse your docopt usage is to just call: + + docopt.ParseDoc(usage) + +This will use os.Args[1:] as the argv slice, and use the default parser +options. If you want to provide your own version string and args, then use: + + docopt.ParseArgs(usage, argv, "1.2.3") + +If the last parameter (version) is a non-empty string, it will be printed when +--version is given in the argv slice. Finally, we can instantiate our own +docopt.Parser which gives us control over how things like help messages are +printed and whether to exit after displaying usage messages, etc. + + parser := &docopt.Parser{ + HelpHandler: docopt.PrintHelpOnly, + OptionsFirst: true, + } + opts, err := parser.ParseArgs(usage, argv, "") + +In particular, setting your own custom HelpHandler function makes unit testing +your own docs with example command line invocations much more enjoyable. + +All three of these return a map of option names to the values parsed from argv, +and an error or nil. You can get the values using the helpers, or just treat it +as a regular map: + + flag, _ := opts.Bool("--flag") + secs, _ := opts.Int("") + +Additionally, you can `Bind` these to a struct, assigning option values to the +exported fields of that struct, all at once. + + var config struct { + Command string `docopt:""` + Tries int `docopt:"-n"` + Force bool // Gets the value of --force + } + opts.Bind(&config) +*/ +package docopt diff --git a/vendor/github.com/docopt/docopt-go/docopt.go b/vendor/github.com/docopt/docopt-go/docopt.go new file mode 100644 index 00000000..c22feb7f --- /dev/null +++ b/vendor/github.com/docopt/docopt-go/docopt.go @@ -0,0 +1,575 @@ +// Licensed under terms of MIT license (see LICENSE-MIT) +// Copyright (c) 2013 Keith Batten, kbatten@gmail.com +// Copyright (c) 2016 David Irvine + +package docopt + +import ( + "fmt" + "os" + "regexp" + "strings" +) + +type Parser struct { + // HelpHandler is called when we encounter bad user input, or when the user + // asks for help. + // By default, this calls os.Exit(0) if it handled a built-in option such + // as -h, --help or --version. If the user errored with a wrong command or + // options, we exit with a return code of 1. + HelpHandler func(err error, usage string) + // OptionsFirst requires that option flags always come before positional + // arguments; otherwise they can overlap. + OptionsFirst bool + // SkipHelpFlags tells the parser not to look for -h and --help flags and + // call the HelpHandler. + SkipHelpFlags bool +} + +var PrintHelpAndExit = func(err error, usage string) { + if err != nil { + fmt.Fprintln(os.Stderr, usage) + os.Exit(1) + } else { + fmt.Println(usage) + os.Exit(0) + } +} + +var PrintHelpOnly = func(err error, usage string) { + if err != nil { + fmt.Fprintln(os.Stderr, usage) + } else { + fmt.Println(usage) + } +} + +var NoHelpHandler = func(err error, usage string) {} + +var DefaultParser = &Parser{ + HelpHandler: PrintHelpAndExit, + OptionsFirst: false, + SkipHelpFlags: false, +} + +// ParseDoc parses os.Args[1:] based on the interface described in doc, using the default parser options. +func ParseDoc(doc string) (Opts, error) { + return ParseArgs(doc, nil, "") +} + +// ParseArgs parses custom arguments based on the interface described in doc. If you provide a non-empty version +// string, then this will be displayed when the --version flag is found. This method uses the default parser options. +func ParseArgs(doc string, argv []string, version string) (Opts, error) { + return DefaultParser.ParseArgs(doc, argv, version) +} + +// ParseArgs parses custom arguments based on the interface described in doc. If you provide a non-empty version +// string, then this will be displayed when the --version flag is found. +func (p *Parser) ParseArgs(doc string, argv []string, version string) (Opts, error) { + return p.parse(doc, argv, version) +} + +// Deprecated: Parse is provided for backward compatibility with the original docopt.go package. +// Please rather make use of ParseDoc, ParseArgs, or use your own custom Parser. +func Parse(doc string, argv []string, help bool, version string, optionsFirst bool, exit ...bool) (map[string]interface{}, error) { + exitOk := true + if len(exit) > 0 { + exitOk = exit[0] + } + p := &Parser{ + OptionsFirst: optionsFirst, + SkipHelpFlags: !help, + } + if exitOk { + p.HelpHandler = PrintHelpAndExit + } else { + p.HelpHandler = PrintHelpOnly + } + return p.parse(doc, argv, version) +} + +func (p *Parser) parse(doc string, argv []string, version string) (map[string]interface{}, error) { + if argv == nil { + argv = os.Args[1:] + } + if p.HelpHandler == nil { + p.HelpHandler = DefaultParser.HelpHandler + } + args, output, err := parse(doc, argv, !p.SkipHelpFlags, version, p.OptionsFirst) + if _, ok := err.(*UserError); ok { + // the user gave us bad input + p.HelpHandler(err, output) + } else if len(output) > 0 && err == nil { + // the user asked for help or --version + p.HelpHandler(err, output) + } + return args, err +} + +// ----------------------------------------------------------------------------- + +// parse and return a map of args, output and all errors +func parse(doc string, argv []string, help bool, version string, optionsFirst bool) (args map[string]interface{}, output string, err error) { + if argv == nil && len(os.Args) > 1 { + argv = os.Args[1:] + } + + usageSections := parseSection("usage:", doc) + + if len(usageSections) == 0 { + err = newLanguageError("\"usage:\" (case-insensitive) not found.") + return + } + if len(usageSections) > 1 { + err = newLanguageError("More than one \"usage:\" (case-insensitive).") + return + } + usage := usageSections[0] + + options := parseDefaults(doc) + formal, err := formalUsage(usage) + if err != nil { + output = handleError(err, usage) + return + } + + pat, err := parsePattern(formal, &options) + if err != nil { + output = handleError(err, usage) + return + } + + patternArgv, err := parseArgv(newTokenList(argv, errorUser), &options, optionsFirst) + if err != nil { + output = handleError(err, usage) + return + } + patFlat, err := pat.flat(patternOption) + if err != nil { + output = handleError(err, usage) + return + } + patternOptions := patFlat.unique() + + patFlat, err = pat.flat(patternOptionSSHORTCUT) + if err != nil { + output = handleError(err, usage) + return + } + for _, optionsShortcut := range patFlat { + docOptions := parseDefaults(doc) + optionsShortcut.children = docOptions.unique().diff(patternOptions) + } + + if output = extras(help, version, patternArgv, doc); len(output) > 0 { + return + } + + err = pat.fix() + if err != nil { + output = handleError(err, usage) + return + } + matched, left, collected := pat.match(&patternArgv, nil) + if matched && len(*left) == 0 { + patFlat, err = pat.flat(patternDefault) + if err != nil { + output = handleError(err, usage) + return + } + args = append(patFlat, *collected...).dictionary() + return + } + + err = newUserError("") + output = handleError(err, usage) + return +} + +func handleError(err error, usage string) string { + if _, ok := err.(*UserError); ok { + return strings.TrimSpace(fmt.Sprintf("%s\n%s", err, usage)) + } + return "" +} + +func parseSection(name, source string) []string { + p := regexp.MustCompile(`(?im)^([^\n]*` + name + `[^\n]*\n?(?:[ \t].*?(?:\n|$))*)`) + s := p.FindAllString(source, -1) + if s == nil { + s = []string{} + } + for i, v := range s { + s[i] = strings.TrimSpace(v) + } + return s +} + +func parseDefaults(doc string) patternList { + defaults := patternList{} + p := regexp.MustCompile(`\n[ \t]*(-\S+?)`) + for _, s := range parseSection("options:", doc) { + // FIXME corner case "bla: options: --foo" + _, _, s = stringPartition(s, ":") // get rid of "options:" + split := p.Split("\n"+s, -1)[1:] + match := p.FindAllStringSubmatch("\n"+s, -1) + for i := range split { + optionDescription := match[i][1] + split[i] + if strings.HasPrefix(optionDescription, "-") { + defaults = append(defaults, parseOption(optionDescription)) + } + } + } + return defaults +} + +func parsePattern(source string, options *patternList) (*pattern, error) { + tokens := tokenListFromPattern(source) + result, err := parseExpr(tokens, options) + if err != nil { + return nil, err + } + if tokens.current() != nil { + return nil, tokens.errorFunc("unexpected ending: %s" + strings.Join(tokens.tokens, " ")) + } + return newRequired(result...), nil +} + +func parseArgv(tokens *tokenList, options *patternList, optionsFirst bool) (patternList, error) { + /* + Parse command-line argument vector. + + If options_first: + argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ; + else: + argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ; + */ + parsed := patternList{} + for tokens.current() != nil { + if tokens.current().eq("--") { + for _, v := range tokens.tokens { + parsed = append(parsed, newArgument("", v)) + } + return parsed, nil + } else if tokens.current().hasPrefix("--") { + pl, err := parseLong(tokens, options) + if err != nil { + return nil, err + } + parsed = append(parsed, pl...) + } else if tokens.current().hasPrefix("-") && !tokens.current().eq("-") { + ps, err := parseShorts(tokens, options) + if err != nil { + return nil, err + } + parsed = append(parsed, ps...) + } else if optionsFirst { + for _, v := range tokens.tokens { + parsed = append(parsed, newArgument("", v)) + } + return parsed, nil + } else { + parsed = append(parsed, newArgument("", tokens.move().String())) + } + } + return parsed, nil +} + +func parseOption(optionDescription string) *pattern { + optionDescription = strings.TrimSpace(optionDescription) + options, _, description := stringPartition(optionDescription, " ") + options = strings.Replace(options, ",", " ", -1) + options = strings.Replace(options, "=", " ", -1) + + short := "" + long := "" + argcount := 0 + var value interface{} + value = false + + reDefault := regexp.MustCompile(`(?i)\[default: (.*)\]`) + for _, s := range strings.Fields(options) { + if strings.HasPrefix(s, "--") { + long = s + } else if strings.HasPrefix(s, "-") { + short = s + } else { + argcount = 1 + } + if argcount > 0 { + matched := reDefault.FindAllStringSubmatch(description, -1) + if len(matched) > 0 { + value = matched[0][1] + } else { + value = nil + } + } + } + return newOption(short, long, argcount, value) +} + +func parseExpr(tokens *tokenList, options *patternList) (patternList, error) { + // expr ::= seq ( '|' seq )* ; + seq, err := parseSeq(tokens, options) + if err != nil { + return nil, err + } + if !tokens.current().eq("|") { + return seq, nil + } + var result patternList + if len(seq) > 1 { + result = patternList{newRequired(seq...)} + } else { + result = seq + } + for tokens.current().eq("|") { + tokens.move() + seq, err = parseSeq(tokens, options) + if err != nil { + return nil, err + } + if len(seq) > 1 { + result = append(result, newRequired(seq...)) + } else { + result = append(result, seq...) + } + } + if len(result) > 1 { + return patternList{newEither(result...)}, nil + } + return result, nil +} + +func parseSeq(tokens *tokenList, options *patternList) (patternList, error) { + // seq ::= ( atom [ '...' ] )* ; + result := patternList{} + for !tokens.current().match(true, "]", ")", "|") { + atom, err := parseAtom(tokens, options) + if err != nil { + return nil, err + } + if tokens.current().eq("...") { + atom = patternList{newOneOrMore(atom...)} + tokens.move() + } + result = append(result, atom...) + } + return result, nil +} + +func parseAtom(tokens *tokenList, options *patternList) (patternList, error) { + // atom ::= '(' expr ')' | '[' expr ']' | 'options' | long | shorts | argument | command ; + tok := tokens.current() + result := patternList{} + if tokens.current().match(false, "(", "[") { + tokens.move() + var matching string + pl, err := parseExpr(tokens, options) + if err != nil { + return nil, err + } + if tok.eq("(") { + matching = ")" + result = patternList{newRequired(pl...)} + } else if tok.eq("[") { + matching = "]" + result = patternList{newOptional(pl...)} + } + moved := tokens.move() + if !moved.eq(matching) { + return nil, tokens.errorFunc("unmatched '%s', expected: '%s' got: '%s'", tok, matching, moved) + } + return result, nil + } else if tok.eq("options") { + tokens.move() + return patternList{newOptionsShortcut()}, nil + } else if tok.hasPrefix("--") && !tok.eq("--") { + return parseLong(tokens, options) + } else if tok.hasPrefix("-") && !tok.eq("-") && !tok.eq("--") { + return parseShorts(tokens, options) + } else if tok.hasPrefix("<") && tok.hasSuffix(">") || tok.isUpper() { + return patternList{newArgument(tokens.move().String(), nil)}, nil + } + return patternList{newCommand(tokens.move().String(), false)}, nil +} + +func parseLong(tokens *tokenList, options *patternList) (patternList, error) { + // long ::= '--' chars [ ( ' ' | '=' ) chars ] ; + long, eq, v := stringPartition(tokens.move().String(), "=") + var value interface{} + var opt *pattern + if eq == "" && v == "" { + value = nil + } else { + value = v + } + + if !strings.HasPrefix(long, "--") { + return nil, newError("long option '%s' doesn't start with --", long) + } + similar := patternList{} + for _, o := range *options { + if o.long == long { + similar = append(similar, o) + } + } + if tokens.err == errorUser && len(similar) == 0 { // if no exact match + similar = patternList{} + for _, o := range *options { + if strings.HasPrefix(o.long, long) { + similar = append(similar, o) + } + } + } + if len(similar) > 1 { // might be simply specified ambiguously 2+ times? + similarLong := make([]string, len(similar)) + for i, s := range similar { + similarLong[i] = s.long + } + return nil, tokens.errorFunc("%s is not a unique prefix: %s?", long, strings.Join(similarLong, ", ")) + } else if len(similar) < 1 { + argcount := 0 + if eq == "=" { + argcount = 1 + } + opt = newOption("", long, argcount, false) + *options = append(*options, opt) + if tokens.err == errorUser { + var val interface{} + if argcount > 0 { + val = value + } else { + val = true + } + opt = newOption("", long, argcount, val) + } + } else { + opt = newOption(similar[0].short, similar[0].long, similar[0].argcount, similar[0].value) + if opt.argcount == 0 { + if value != nil { + return nil, tokens.errorFunc("%s must not have an argument", opt.long) + } + } else { + if value == nil { + if tokens.current().match(true, "--") { + return nil, tokens.errorFunc("%s requires argument", opt.long) + } + moved := tokens.move() + if moved != nil { + value = moved.String() // only set as string if not nil + } + } + } + if tokens.err == errorUser { + if value != nil { + opt.value = value + } else { + opt.value = true + } + } + } + + return patternList{opt}, nil +} + +func parseShorts(tokens *tokenList, options *patternList) (patternList, error) { + // shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ; + tok := tokens.move() + if !tok.hasPrefix("-") || tok.hasPrefix("--") { + return nil, newError("short option '%s' doesn't start with -", tok) + } + left := strings.TrimLeft(tok.String(), "-") + parsed := patternList{} + for left != "" { + var opt *pattern + short := "-" + left[0:1] + left = left[1:] + similar := patternList{} + for _, o := range *options { + if o.short == short { + similar = append(similar, o) + } + } + if len(similar) > 1 { + return nil, tokens.errorFunc("%s is specified ambiguously %d times", short, len(similar)) + } else if len(similar) < 1 { + opt = newOption(short, "", 0, false) + *options = append(*options, opt) + if tokens.err == errorUser { + opt = newOption(short, "", 0, true) + } + } else { // why copying is necessary here? + opt = newOption(short, similar[0].long, similar[0].argcount, similar[0].value) + var value interface{} + if opt.argcount > 0 { + if left == "" { + if tokens.current().match(true, "--") { + return nil, tokens.errorFunc("%s requires argument", short) + } + value = tokens.move().String() + } else { + value = left + left = "" + } + } + if tokens.err == errorUser { + if value != nil { + opt.value = value + } else { + opt.value = true + } + } + } + parsed = append(parsed, opt) + } + return parsed, nil +} + +func formalUsage(section string) (string, error) { + _, _, section = stringPartition(section, ":") // drop "usage:" + pu := strings.Fields(section) + + if len(pu) == 0 { + return "", newLanguageError("no fields found in usage (perhaps a spacing error).") + } + + result := "( " + for _, s := range pu[1:] { + if s == pu[0] { + result += ") | ( " + } else { + result += s + " " + } + } + result += ")" + + return result, nil +} + +func extras(help bool, version string, options patternList, doc string) string { + if help { + for _, o := range options { + if (o.name == "-h" || o.name == "--help") && o.value == true { + return strings.Trim(doc, "\n") + } + } + } + if version != "" { + for _, o := range options { + if (o.name == "--version") && o.value == true { + return version + } + } + } + return "" +} + +func stringPartition(s, sep string) (string, string, string) { + sepPos := strings.Index(s, sep) + if sepPos == -1 { // no seperator found + return s, "", "" + } + split := strings.SplitN(s, sep, 2) + return split[0], sep, split[1] +} diff --git a/vendor/github.com/docopt/docopt-go/error.go b/vendor/github.com/docopt/docopt-go/error.go new file mode 100644 index 00000000..bd26460f --- /dev/null +++ b/vendor/github.com/docopt/docopt-go/error.go @@ -0,0 +1,49 @@ +package docopt + +import ( + "fmt" +) + +type errorType int + +const ( + errorUser errorType = iota + errorLanguage +) + +func (e errorType) String() string { + switch e { + case errorUser: + return "errorUser" + case errorLanguage: + return "errorLanguage" + } + return "" +} + +// UserError records an error with program arguments. +type UserError struct { + msg string + Usage string +} + +func (e UserError) Error() string { + return e.msg +} +func newUserError(msg string, f ...interface{}) error { + return &UserError{fmt.Sprintf(msg, f...), ""} +} + +// LanguageError records an error with the doc string. +type LanguageError struct { + msg string +} + +func (e LanguageError) Error() string { + return e.msg +} +func newLanguageError(msg string, f ...interface{}) error { + return &LanguageError{fmt.Sprintf(msg, f...)} +} + +var newError = fmt.Errorf diff --git a/vendor/github.com/docopt/docopt-go/opts.go b/vendor/github.com/docopt/docopt-go/opts.go new file mode 100644 index 00000000..36320fbc --- /dev/null +++ b/vendor/github.com/docopt/docopt-go/opts.go @@ -0,0 +1,264 @@ +package docopt + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "unicode" +) + +func errKey(key string) error { + return fmt.Errorf("no such key: %q", key) +} +func errType(key string) error { + return fmt.Errorf("key: %q failed type conversion", key) +} +func errStrconv(key string, convErr error) error { + return fmt.Errorf("key: %q failed type conversion: %s", key, convErr) +} + +// Opts is a map of command line options to their values, with some convenience +// methods for value type conversion (bool, float64, int, string). For example, +// to get an option value as an int: +// +// opts, _ := docopt.ParseDoc("Usage: sleep ") +// secs, _ := opts.Int("") +// +// Additionally, Opts.Bind allows you easily populate a struct's fields with the +// values of each option value. See below for examples. +// +// Lastly, you can still treat Opts as a regular map, and do any type checking +// and conversion that you want to yourself. For example: +// +// if s, ok := opts[""].(string); ok { +// if val, err := strconv.ParseUint(s, 2, 64); err != nil { ... } +// } +// +// Note that any non-boolean option / flag will have a string value in the +// underlying map. +type Opts map[string]interface{} + +func (o Opts) String(key string) (s string, err error) { + v, ok := o[key] + if !ok { + err = errKey(key) + return + } + s, ok = v.(string) + if !ok { + err = errType(key) + } + return +} + +func (o Opts) Bool(key string) (b bool, err error) { + v, ok := o[key] + if !ok { + err = errKey(key) + return + } + b, ok = v.(bool) + if !ok { + err = errType(key) + } + return +} + +func (o Opts) Int(key string) (i int, err error) { + s, err := o.String(key) + if err != nil { + return + } + i, err = strconv.Atoi(s) + if err != nil { + err = errStrconv(key, err) + } + return +} + +func (o Opts) Float64(key string) (f float64, err error) { + s, err := o.String(key) + if err != nil { + return + } + f, err = strconv.ParseFloat(s, 64) + if err != nil { + err = errStrconv(key, err) + } + return +} + +// Bind populates the fields of a given struct with matching option values. +// Each key in Opts will be mapped to an exported field of the struct pointed +// to by `v`, as follows: +// +// abc int // Unexported field, ignored +// Abc string // Mapped from `--abc`, ``, or `abc` +// // (case insensitive) +// A string // Mapped from `-a`, `` or `a` +// // (case insensitive) +// Abc int `docopt:"XYZ"` // Mapped from `XYZ` +// Abc bool `docopt:"-"` // Mapped from `-` +// Abc bool `docopt:"-x,--xyz"` // Mapped from `-x` or `--xyz` +// // (first non-zero value found) +// +// Tagged (annotated) fields will always be mapped first. If no field is tagged +// with an option's key, Bind will try to map the option to an appropriately +// named field (as above). +// +// Bind also handles conversion to bool, float, int or string types. +func (o Opts) Bind(v interface{}) error { + structVal := reflect.ValueOf(v) + if structVal.Kind() != reflect.Ptr { + return newError("'v' argument is not pointer to struct type") + } + for structVal.Kind() == reflect.Ptr { + structVal = structVal.Elem() + } + if structVal.Kind() != reflect.Struct { + return newError("'v' argument is not pointer to struct type") + } + structType := structVal.Type() + + tagged := make(map[string]int) // Tagged field tags + untagged := make(map[string]int) // Untagged field names + + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + if isUnexportedField(field) || field.Anonymous { + continue + } + tag := field.Tag.Get("docopt") + if tag == "" { + untagged[field.Name] = i + continue + } + for _, t := range strings.Split(tag, ",") { + tagged[t] = i + } + } + + // Get the index of the struct field to use, based on the option key. + // Second argument is true/false on whether something was matched. + getFieldIndex := func(key string) (int, bool) { + if i, ok := tagged[key]; ok { + return i, true + } + if i, ok := untagged[guessUntaggedField(key)]; ok { + return i, true + } + return -1, false + } + + indexMap := make(map[string]int) // Option keys to field index + + // Pre-check that option keys are mapped to fields and fields are zero valued, before populating them. + for k := range o { + i, ok := getFieldIndex(k) + if !ok { + if k == "--help" || k == "--version" { // Don't require these to be mapped. + continue + } + return newError("mapping of %q is not found in given struct, or is an unexported field", k) + } + fieldVal := structVal.Field(i) + zeroVal := reflect.Zero(fieldVal.Type()) + if !reflect.DeepEqual(fieldVal.Interface(), zeroVal.Interface()) { + return newError("%q field is non-zero, will be overwritten by value of %q", structType.Field(i).Name, k) + } + indexMap[k] = i + } + + // Populate fields with option values. + for k, v := range o { + i, ok := indexMap[k] + if !ok { + continue // Not mapped. + } + field := structVal.Field(i) + if !reflect.DeepEqual(field.Interface(), reflect.Zero(field.Type()).Interface()) { + // The struct's field is already non-zero (by our doing), so don't change it. + // This happens with comma separated tags, e.g. `docopt:"-h,--help"` which is a + // convenient way of checking if one of multiple boolean flags are set. + continue + } + optVal := reflect.ValueOf(v) + // Option value is the zero Value, so we can't get its .Type(). No need to assign anyway, so move along. + if !optVal.IsValid() { + continue + } + if !field.CanSet() { + return newError("%q field cannot be set", structType.Field(i).Name) + } + // Try to assign now if able. bool and string values should be assignable already. + if optVal.Type().AssignableTo(field.Type()) { + field.Set(optVal) + continue + } + // Try to convert the value and assign if able. + switch field.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if x, err := o.Int(k); err == nil { + field.SetInt(int64(x)) + continue + } + case reflect.Float32, reflect.Float64: + if x, err := o.Float64(k); err == nil { + field.SetFloat(x) + continue + } + } + // TODO: Something clever (recursive?) with non-string slices. + // case reflect.Slice: + // if optVal.Kind() == reflect.Slice { + // for i := 0; i < optVal.Len(); i++ { + // sliceVal := optVal.Index(i) + // fmt.Printf("%v", sliceVal) + // } + // fmt.Printf("\n") + // } + return newError("value of %q is not assignable to %q field", k, structType.Field(i).Name) + } + + return nil +} + +// isUnexportedField returns whether the field is unexported. +// isUnexportedField is to avoid the bug in versions older than Go1.3. +// See following links: +// https://code.google.com/p/go/issues/detail?id=7247 +// http://golang.org/ref/spec#Exported_identifiers +func isUnexportedField(field reflect.StructField) bool { + return !(field.PkgPath == "" && unicode.IsUpper(rune(field.Name[0]))) +} + +// Convert a string like "--my-special-flag" to "MySpecialFlag". +func titleCaseDashes(key string) string { + nextToUpper := true + mapFn := func(r rune) rune { + if r == '-' { + nextToUpper = true + return -1 + } + if nextToUpper { + nextToUpper = false + return unicode.ToUpper(r) + } + return r + } + return strings.Map(mapFn, key) +} + +// Best guess which field.Name in a struct to assign for an option key. +func guessUntaggedField(key string) string { + switch { + case strings.HasPrefix(key, "--") && len(key[2:]) > 1: + return titleCaseDashes(key[2:]) + case strings.HasPrefix(key, "-") && len(key[1:]) == 1: + return titleCaseDashes(key[1:]) + case strings.HasPrefix(key, "<") && strings.HasSuffix(key, ">"): + key = key[1 : len(key)-1] + } + return strings.Title(strings.ToLower(key)) +} diff --git a/vendor/github.com/docopt/docopt-go/pattern.go b/vendor/github.com/docopt/docopt-go/pattern.go new file mode 100644 index 00000000..0a296671 --- /dev/null +++ b/vendor/github.com/docopt/docopt-go/pattern.go @@ -0,0 +1,550 @@ +package docopt + +import ( + "fmt" + "reflect" + "strings" +) + +type patternType uint + +const ( + // leaf + patternArgument patternType = 1 << iota + patternCommand + patternOption + + // branch + patternRequired + patternOptionAL + patternOptionSSHORTCUT // Marker/placeholder for [options] shortcut. + patternOneOrMore + patternEither + + patternLeaf = patternArgument + + patternCommand + + patternOption + patternBranch = patternRequired + + patternOptionAL + + patternOptionSSHORTCUT + + patternOneOrMore + + patternEither + patternAll = patternLeaf + patternBranch + patternDefault = 0 +) + +func (pt patternType) String() string { + switch pt { + case patternArgument: + return "argument" + case patternCommand: + return "command" + case patternOption: + return "option" + case patternRequired: + return "required" + case patternOptionAL: + return "optional" + case patternOptionSSHORTCUT: + return "optionsshortcut" + case patternOneOrMore: + return "oneormore" + case patternEither: + return "either" + case patternLeaf: + return "leaf" + case patternBranch: + return "branch" + case patternAll: + return "all" + case patternDefault: + return "default" + } + return "" +} + +type pattern struct { + t patternType + + children patternList + + name string + value interface{} + + short string + long string + argcount int +} + +type patternList []*pattern + +func newBranchPattern(t patternType, pl ...*pattern) *pattern { + var p pattern + p.t = t + p.children = make(patternList, len(pl)) + copy(p.children, pl) + return &p +} + +func newRequired(pl ...*pattern) *pattern { + return newBranchPattern(patternRequired, pl...) +} + +func newEither(pl ...*pattern) *pattern { + return newBranchPattern(patternEither, pl...) +} + +func newOneOrMore(pl ...*pattern) *pattern { + return newBranchPattern(patternOneOrMore, pl...) +} + +func newOptional(pl ...*pattern) *pattern { + return newBranchPattern(patternOptionAL, pl...) +} + +func newOptionsShortcut() *pattern { + var p pattern + p.t = patternOptionSSHORTCUT + return &p +} + +func newLeafPattern(t patternType, name string, value interface{}) *pattern { + // default: value=nil + var p pattern + p.t = t + p.name = name + p.value = value + return &p +} + +func newArgument(name string, value interface{}) *pattern { + // default: value=nil + return newLeafPattern(patternArgument, name, value) +} + +func newCommand(name string, value interface{}) *pattern { + // default: value=false + var p pattern + p.t = patternCommand + p.name = name + p.value = value + return &p +} + +func newOption(short, long string, argcount int, value interface{}) *pattern { + // default: "", "", 0, false + var p pattern + p.t = patternOption + p.short = short + p.long = long + if long != "" { + p.name = long + } else { + p.name = short + } + p.argcount = argcount + if value == false && argcount > 0 { + p.value = nil + } else { + p.value = value + } + return &p +} + +func (p *pattern) flat(types patternType) (patternList, error) { + if p.t&patternLeaf != 0 { + if types == patternDefault { + types = patternAll + } + if p.t&types != 0 { + return patternList{p}, nil + } + return patternList{}, nil + } + + if p.t&patternBranch != 0 { + if p.t&types != 0 { + return patternList{p}, nil + } + result := patternList{} + for _, child := range p.children { + childFlat, err := child.flat(types) + if err != nil { + return nil, err + } + result = append(result, childFlat...) + } + return result, nil + } + return nil, newError("unknown pattern type: %d, %d", p.t, types) +} + +func (p *pattern) fix() error { + err := p.fixIdentities(nil) + if err != nil { + return err + } + p.fixRepeatingArguments() + return nil +} + +func (p *pattern) fixIdentities(uniq patternList) error { + // Make pattern-tree tips point to same object if they are equal. + if p.t&patternBranch == 0 { + return nil + } + if uniq == nil { + pFlat, err := p.flat(patternDefault) + if err != nil { + return err + } + uniq = pFlat.unique() + } + for i, child := range p.children { + if child.t&patternBranch == 0 { + ind, err := uniq.index(child) + if err != nil { + return err + } + p.children[i] = uniq[ind] + } else { + err := child.fixIdentities(uniq) + if err != nil { + return err + } + } + } + return nil +} + +func (p *pattern) fixRepeatingArguments() { + // Fix elements that should accumulate/increment values. + var either []patternList + + for _, child := range p.transform().children { + either = append(either, child.children) + } + for _, cas := range either { + casMultiple := patternList{} + for _, e := range cas { + if cas.count(e) > 1 { + casMultiple = append(casMultiple, e) + } + } + for _, e := range casMultiple { + if e.t == patternArgument || e.t == patternOption && e.argcount > 0 { + switch e.value.(type) { + case string: + e.value = strings.Fields(e.value.(string)) + case []string: + default: + e.value = []string{} + } + } + if e.t == patternCommand || e.t == patternOption && e.argcount == 0 { + e.value = 0 + } + } + } +} + +func (p *pattern) match(left *patternList, collected *patternList) (bool, *patternList, *patternList) { + if collected == nil { + collected = &patternList{} + } + if p.t&patternRequired != 0 { + l := left + c := collected + for _, p := range p.children { + var matched bool + matched, l, c = p.match(l, c) + if !matched { + return false, left, collected + } + } + return true, l, c + } else if p.t&patternOptionAL != 0 || p.t&patternOptionSSHORTCUT != 0 { + for _, p := range p.children { + _, left, collected = p.match(left, collected) + } + return true, left, collected + } else if p.t&patternOneOrMore != 0 { + if len(p.children) != 1 { + panic("OneOrMore.match(): assert len(p.children) == 1") + } + l := left + c := collected + var lAlt *patternList + matched := true + times := 0 + for matched { + // could it be that something didn't match but changed l or c? + matched, l, c = p.children[0].match(l, c) + if matched { + times++ + } + if lAlt == l { + break + } + lAlt = l + } + if times >= 1 { + return true, l, c + } + return false, left, collected + } else if p.t&patternEither != 0 { + type outcomeStruct struct { + matched bool + left *patternList + collected *patternList + length int + } + outcomes := []outcomeStruct{} + for _, p := range p.children { + matched, l, c := p.match(left, collected) + outcome := outcomeStruct{matched, l, c, len(*l)} + if matched { + outcomes = append(outcomes, outcome) + } + } + if len(outcomes) > 0 { + minLen := outcomes[0].length + minIndex := 0 + for i, v := range outcomes { + if v.length < minLen { + minIndex = i + } + } + return outcomes[minIndex].matched, outcomes[minIndex].left, outcomes[minIndex].collected + } + return false, left, collected + } else if p.t&patternLeaf != 0 { + pos, match := p.singleMatch(left) + var increment interface{} + if match == nil { + return false, left, collected + } + leftAlt := make(patternList, len((*left)[:pos]), len((*left)[:pos])+len((*left)[pos+1:])) + copy(leftAlt, (*left)[:pos]) + leftAlt = append(leftAlt, (*left)[pos+1:]...) + sameName := patternList{} + for _, a := range *collected { + if a.name == p.name { + sameName = append(sameName, a) + } + } + + switch p.value.(type) { + case int, []string: + switch p.value.(type) { + case int: + increment = 1 + case []string: + switch match.value.(type) { + case string: + increment = []string{match.value.(string)} + default: + increment = match.value + } + } + if len(sameName) == 0 { + match.value = increment + collectedMatch := make(patternList, len(*collected), len(*collected)+1) + copy(collectedMatch, *collected) + collectedMatch = append(collectedMatch, match) + return true, &leftAlt, &collectedMatch + } + switch sameName[0].value.(type) { + case int: + sameName[0].value = sameName[0].value.(int) + increment.(int) + case []string: + sameName[0].value = append(sameName[0].value.([]string), increment.([]string)...) + } + return true, &leftAlt, collected + } + collectedMatch := make(patternList, len(*collected), len(*collected)+1) + copy(collectedMatch, *collected) + collectedMatch = append(collectedMatch, match) + return true, &leftAlt, &collectedMatch + } + panic("unmatched type") +} + +func (p *pattern) singleMatch(left *patternList) (int, *pattern) { + if p.t&patternArgument != 0 { + for n, pat := range *left { + if pat.t&patternArgument != 0 { + return n, newArgument(p.name, pat.value) + } + } + return -1, nil + } else if p.t&patternCommand != 0 { + for n, pat := range *left { + if pat.t&patternArgument != 0 { + if pat.value == p.name { + return n, newCommand(p.name, true) + } + break + } + } + return -1, nil + } else if p.t&patternOption != 0 { + for n, pat := range *left { + if p.name == pat.name { + return n, pat + } + } + return -1, nil + } + panic("unmatched type") +} + +func (p *pattern) String() string { + if p.t&patternOption != 0 { + return fmt.Sprintf("%s(%s, %s, %d, %+v)", p.t, p.short, p.long, p.argcount, p.value) + } else if p.t&patternLeaf != 0 { + return fmt.Sprintf("%s(%s, %+v)", p.t, p.name, p.value) + } else if p.t&patternBranch != 0 { + result := "" + for i, child := range p.children { + if i > 0 { + result += ", " + } + result += child.String() + } + return fmt.Sprintf("%s(%s)", p.t, result) + } + panic("unmatched type") +} + +func (p *pattern) transform() *pattern { + /* + Expand pattern into an (almost) equivalent one, but with single Either. + + Example: ((-a | -b) (-c | -d)) => (-a -c | -a -d | -b -c | -b -d) + Quirks: [-a] => (-a), (-a...) => (-a -a) + */ + result := []patternList{} + groups := []patternList{patternList{p}} + parents := patternRequired + + patternOptionAL + + patternOptionSSHORTCUT + + patternEither + + patternOneOrMore + for len(groups) > 0 { + children := groups[0] + groups = groups[1:] + var child *pattern + for _, c := range children { + if c.t&parents != 0 { + child = c + break + } + } + if child != nil { + children.remove(child) + if child.t&patternEither != 0 { + for _, c := range child.children { + r := patternList{} + r = append(r, c) + r = append(r, children...) + groups = append(groups, r) + } + } else if child.t&patternOneOrMore != 0 { + r := patternList{} + r = append(r, child.children.double()...) + r = append(r, children...) + groups = append(groups, r) + } else { + r := patternList{} + r = append(r, child.children...) + r = append(r, children...) + groups = append(groups, r) + } + } else { + result = append(result, children) + } + } + either := patternList{} + for _, e := range result { + either = append(either, newRequired(e...)) + } + return newEither(either...) +} + +func (p *pattern) eq(other *pattern) bool { + return reflect.DeepEqual(p, other) +} + +func (pl patternList) unique() patternList { + table := make(map[string]bool) + result := patternList{} + for _, v := range pl { + if !table[v.String()] { + table[v.String()] = true + result = append(result, v) + } + } + return result +} + +func (pl patternList) index(p *pattern) (int, error) { + for i, c := range pl { + if c.eq(p) { + return i, nil + } + } + return -1, newError("%s not in list", p) +} + +func (pl patternList) count(p *pattern) int { + count := 0 + for _, c := range pl { + if c.eq(p) { + count++ + } + } + return count +} + +func (pl patternList) diff(l patternList) patternList { + lAlt := make(patternList, len(l)) + copy(lAlt, l) + result := make(patternList, 0, len(pl)) + for _, v := range pl { + if v != nil { + match := false + for i, w := range lAlt { + if w.eq(v) { + match = true + lAlt[i] = nil + break + } + } + if match == false { + result = append(result, v) + } + } + } + return result +} + +func (pl patternList) double() patternList { + l := len(pl) + result := make(patternList, l*2) + copy(result, pl) + copy(result[l:2*l], pl) + return result +} + +func (pl *patternList) remove(p *pattern) { + (*pl) = pl.diff(patternList{p}) +} + +func (pl patternList) dictionary() map[string]interface{} { + dict := make(map[string]interface{}) + for _, a := range pl { + dict[a.name] = a.value + } + return dict +} diff --git a/vendor/github.com/docopt/docopt-go/test_golang.docopt b/vendor/github.com/docopt/docopt-go/test_golang.docopt new file mode 100644 index 00000000..323fd67d --- /dev/null +++ b/vendor/github.com/docopt/docopt-go/test_golang.docopt @@ -0,0 +1,9 @@ +r"""usage: prog [NAME_-2]...""" +$ prog 10 20 +{"NAME_-2": ["10", "20"]} + +$ prog 10 +{"NAME_-2": ["10"]} + +$ prog +{"NAME_-2": []} diff --git a/vendor/github.com/docopt/docopt-go/testcases.docopt b/vendor/github.com/docopt/docopt-go/testcases.docopt new file mode 100644 index 00000000..efe9a07f --- /dev/null +++ b/vendor/github.com/docopt/docopt-go/testcases.docopt @@ -0,0 +1,957 @@ +r"""Usage: prog + +""" +$ prog +{} + +$ prog --xxx +"user-error" + + +r"""Usage: prog [options] + +Options: -a All. + +""" +$ prog +{"-a": false} + +$ prog -a +{"-a": true} + +$ prog -x +"user-error" + + +r"""Usage: prog [options] + +Options: --all All. + +""" +$ prog +{"--all": false} + +$ prog --all +{"--all": true} + +$ prog --xxx +"user-error" + + +r"""Usage: prog [options] + +Options: -v, --verbose Verbose. + +""" +$ prog --verbose +{"--verbose": true} + +$ prog --ver +{"--verbose": true} + +$ prog -v +{"--verbose": true} + + +r"""Usage: prog [options] + +Options: -p PATH + +""" +$ prog -p home/ +{"-p": "home/"} + +$ prog -phome/ +{"-p": "home/"} + +$ prog -p +"user-error" + + +r"""Usage: prog [options] + +Options: --path + +""" +$ prog --path home/ +{"--path": "home/"} + +$ prog --path=home/ +{"--path": "home/"} + +$ prog --pa home/ +{"--path": "home/"} + +$ prog --pa=home/ +{"--path": "home/"} + +$ prog --path +"user-error" + + +r"""Usage: prog [options] + +Options: -p PATH, --path= Path to files. + +""" +$ prog -proot +{"--path": "root"} + + +r"""Usage: prog [options] + +Options: -p --path PATH Path to files. + +""" +$ prog -p root +{"--path": "root"} + +$ prog --path root +{"--path": "root"} + + +r"""Usage: prog [options] + +Options: + -p PATH Path to files [default: ./] + +""" +$ prog +{"-p": "./"} + +$ prog -phome +{"-p": "home"} + + +r"""UsAgE: prog [options] + +OpTiOnS: --path= Path to files + [dEfAuLt: /root] + +""" +$ prog +{"--path": "/root"} + +$ prog --path=home +{"--path": "home"} + + +r"""usage: prog [options] + +options: + -a Add + -r Remote + -m Message + +""" +$ prog -a -r -m Hello +{"-a": true, + "-r": true, + "-m": "Hello"} + +$ prog -armyourass +{"-a": true, + "-r": true, + "-m": "yourass"} + +$ prog -a -r +{"-a": true, + "-r": true, + "-m": null} + + +r"""Usage: prog [options] + +Options: --version + --verbose + +""" +$ prog --version +{"--version": true, + "--verbose": false} + +$ prog --verbose +{"--version": false, + "--verbose": true} + +$ prog --ver +"user-error" + +$ prog --verb +{"--version": false, + "--verbose": true} + + +r"""usage: prog [-a -r -m ] + +options: + -a Add + -r Remote + -m Message + +""" +$ prog -armyourass +{"-a": true, + "-r": true, + "-m": "yourass"} + + +r"""usage: prog [-armmsg] + +options: -a Add + -r Remote + -m Message + +""" +$ prog -a -r -m Hello +{"-a": true, + "-r": true, + "-m": "Hello"} + + +r"""usage: prog -a -b + +options: + -a + -b + +""" +$ prog -a -b +{"-a": true, "-b": true} + +$ prog -b -a +{"-a": true, "-b": true} + +$ prog -a +"user-error" + +$ prog +"user-error" + + +r"""usage: prog (-a -b) + +options: -a + -b + +""" +$ prog -a -b +{"-a": true, "-b": true} + +$ prog -b -a +{"-a": true, "-b": true} + +$ prog -a +"user-error" + +$ prog +"user-error" + + +r"""usage: prog [-a] -b + +options: -a + -b + +""" +$ prog -a -b +{"-a": true, "-b": true} + +$ prog -b -a +{"-a": true, "-b": true} + +$ prog -a +"user-error" + +$ prog -b +{"-a": false, "-b": true} + +$ prog +"user-error" + + +r"""usage: prog [(-a -b)] + +options: -a + -b + +""" +$ prog -a -b +{"-a": true, "-b": true} + +$ prog -b -a +{"-a": true, "-b": true} + +$ prog -a +"user-error" + +$ prog -b +"user-error" + +$ prog +{"-a": false, "-b": false} + + +r"""usage: prog (-a|-b) + +options: -a + -b + +""" +$ prog -a -b +"user-error" + +$ prog +"user-error" + +$ prog -a +{"-a": true, "-b": false} + +$ prog -b +{"-a": false, "-b": true} + + +r"""usage: prog [ -a | -b ] + +options: -a + -b + +""" +$ prog -a -b +"user-error" + +$ prog +{"-a": false, "-b": false} + +$ prog -a +{"-a": true, "-b": false} + +$ prog -b +{"-a": false, "-b": true} + + +r"""usage: prog """ +$ prog 10 +{"": "10"} + +$ prog 10 20 +"user-error" + +$ prog +"user-error" + + +r"""usage: prog []""" +$ prog 10 +{"": "10"} + +$ prog 10 20 +"user-error" + +$ prog +{"": null} + + +r"""usage: prog """ +$ prog 10 20 40 +{"": "10", "": "20", "": "40"} + +$ prog 10 20 +"user-error" + +$ prog +"user-error" + + +r"""usage: prog [ ]""" +$ prog 10 20 40 +{"": "10", "": "20", "": "40"} + +$ prog 10 20 +{"": "10", "": "20", "": null} + +$ prog +"user-error" + + +r"""usage: prog [ | ]""" +$ prog 10 20 40 +"user-error" + +$ prog 20 40 +{"": null, "": "20", "": "40"} + +$ prog +{"": null, "": null, "": null} + + +r"""usage: prog ( --all | ) + +options: + --all + +""" +$ prog 10 --all +{"": "10", "--all": true, "": null} + +$ prog 10 +{"": null, "--all": false, "": "10"} + +$ prog +"user-error" + + +r"""usage: prog [ ]""" +$ prog 10 20 +{"": ["10", "20"]} + +$ prog 10 +{"": ["10"]} + +$ prog +{"": []} + + +r"""usage: prog [( )]""" +$ prog 10 20 +{"": ["10", "20"]} + +$ prog 10 +"user-error" + +$ prog +{"": []} + + +r"""usage: prog NAME...""" +$ prog 10 20 +{"NAME": ["10", "20"]} + +$ prog 10 +{"NAME": ["10"]} + +$ prog +"user-error" + + +r"""usage: prog [NAME]...""" +$ prog 10 20 +{"NAME": ["10", "20"]} + +$ prog 10 +{"NAME": ["10"]} + +$ prog +{"NAME": []} + + +r"""usage: prog [NAME...]""" +$ prog 10 20 +{"NAME": ["10", "20"]} + +$ prog 10 +{"NAME": ["10"]} + +$ prog +{"NAME": []} + + +r"""usage: prog [NAME [NAME ...]]""" +$ prog 10 20 +{"NAME": ["10", "20"]} + +$ prog 10 +{"NAME": ["10"]} + +$ prog +{"NAME": []} + + +r"""usage: prog (NAME | --foo NAME) + +options: --foo + +""" +$ prog 10 +{"NAME": "10", "--foo": false} + +$ prog --foo 10 +{"NAME": "10", "--foo": true} + +$ prog --foo=10 +"user-error" + + +r"""usage: prog (NAME | --foo) [--bar | NAME] + +options: --foo +options: --bar + +""" +$ prog 10 +{"NAME": ["10"], "--foo": false, "--bar": false} + +$ prog 10 20 +{"NAME": ["10", "20"], "--foo": false, "--bar": false} + +$ prog --foo --bar +{"NAME": [], "--foo": true, "--bar": true} + + +r"""Naval Fate. + +Usage: + prog ship new ... + prog ship [] move [--speed=] + prog ship shoot + prog mine (set|remove) [--moored|--drifting] + prog -h | --help + prog --version + +Options: + -h --help Show this screen. + --version Show version. + --speed= Speed in knots [default: 10]. + --moored Mored (anchored) mine. + --drifting Drifting mine. + +""" +$ prog ship Guardian move 150 300 --speed=20 +{"--drifting": false, + "--help": false, + "--moored": false, + "--speed": "20", + "--version": false, + "": ["Guardian"], + "": "150", + "": "300", + "mine": false, + "move": true, + "new": false, + "remove": false, + "set": false, + "ship": true, + "shoot": false} + + +r"""usage: prog --hello""" +$ prog --hello +{"--hello": true} + + +r"""usage: prog [--hello=]""" +$ prog +{"--hello": null} + +$ prog --hello wrld +{"--hello": "wrld"} + + +r"""usage: prog [-o]""" +$ prog +{"-o": false} + +$ prog -o +{"-o": true} + + +r"""usage: prog [-opr]""" +$ prog -op +{"-o": true, "-p": true, "-r": false} + + +r"""usage: prog --aabb | --aa""" +$ prog --aa +{"--aabb": false, "--aa": true} + +$ prog --a +"user-error" # not a unique prefix + +# +# Counting number of flags +# + +r"""Usage: prog -v""" +$ prog -v +{"-v": true} + + +r"""Usage: prog [-v -v]""" +$ prog +{"-v": 0} + +$ prog -v +{"-v": 1} + +$ prog -vv +{"-v": 2} + + +r"""Usage: prog -v ...""" +$ prog +"user-error" + +$ prog -v +{"-v": 1} + +$ prog -vv +{"-v": 2} + +$ prog -vvvvvv +{"-v": 6} + + +r"""Usage: prog [-v | -vv | -vvv] + +This one is probably most readable user-friednly variant. + +""" +$ prog +{"-v": 0} + +$ prog -v +{"-v": 1} + +$ prog -vv +{"-v": 2} + +$ prog -vvvv +"user-error" + + +r"""usage: prog [--ver --ver]""" +$ prog --ver --ver +{"--ver": 2} + + +# +# Counting commands +# + +r"""usage: prog [go]""" +$ prog go +{"go": true} + + +r"""usage: prog [go go]""" +$ prog +{"go": 0} + +$ prog go +{"go": 1} + +$ prog go go +{"go": 2} + +$ prog go go go +"user-error" + +r"""usage: prog go...""" +$ prog go go go go go +{"go": 5} + +# +# [options] does not include options from usage-pattern +# +r"""usage: prog [options] [-a] + +options: -a + -b +""" +$ prog -a +{"-a": true, "-b": false} + +$ prog -aa +"user-error" + +# +# Test [options] shourtcut +# + +r"""Usage: prog [options] A +Options: + -q Be quiet + -v Be verbose. + +""" +$ prog arg +{"A": "arg", "-v": false, "-q": false} + +$ prog -v arg +{"A": "arg", "-v": true, "-q": false} + +$ prog -q arg +{"A": "arg", "-v": false, "-q": true} + +# +# Test single dash +# + +r"""usage: prog [-]""" + +$ prog - +{"-": true} + +$ prog +{"-": false} + +# +# If argument is repeated, its value should always be a list +# + +r"""usage: prog [NAME [NAME ...]]""" + +$ prog a b +{"NAME": ["a", "b"]} + +$ prog +{"NAME": []} + +# +# Option's argument defaults to null/None +# + +r"""usage: prog [options] +options: + -a Add + -m Message + +""" +$ prog -a +{"-m": null, "-a": true} + +# +# Test options without description +# + +r"""usage: prog --hello""" +$ prog --hello +{"--hello": true} + +r"""usage: prog [--hello=]""" +$ prog +{"--hello": null} + +$ prog --hello wrld +{"--hello": "wrld"} + +r"""usage: prog [-o]""" +$ prog +{"-o": false} + +$ prog -o +{"-o": true} + +r"""usage: prog [-opr]""" +$ prog -op +{"-o": true, "-p": true, "-r": false} + +r"""usage: git [-v | --verbose]""" +$ prog -v +{"-v": true, "--verbose": false} + +r"""usage: git remote [-v | --verbose]""" +$ prog remote -v +{"remote": true, "-v": true, "--verbose": false} + +# +# Test empty usage pattern +# + +r"""usage: prog""" +$ prog +{} + +r"""usage: prog + prog +""" +$ prog 1 2 +{"": "1", "": "2"} + +$ prog +{"": null, "": null} + +r"""usage: prog + prog +""" +$ prog +{"": null, "": null} + +# +# Option's argument should not capture default value from usage pattern +# + +r"""usage: prog [--file=]""" +$ prog +{"--file": null} + +r"""usage: prog [--file=] + +options: --file + +""" +$ prog +{"--file": null} + +r"""Usage: prog [-a ] + +Options: -a, --address TCP address [default: localhost:6283]. + +""" +$ prog +{"--address": "localhost:6283"} + +# +# If option with argument could be repeated, +# its arguments should be accumulated into a list +# + +r"""usage: prog --long= ...""" + +$ prog --long one +{"--long": ["one"]} + +$ prog --long one --long two +{"--long": ["one", "two"]} + +# +# Test multiple elements repeated at once +# + +r"""usage: prog (go --speed=)...""" +$ prog go left --speed=5 go right --speed=9 +{"go": 2, "": ["left", "right"], "--speed": ["5", "9"]} + +# +# Required options should work with option shortcut +# + +r"""usage: prog [options] -a + +options: -a + +""" +$ prog -a +{"-a": true} + +# +# If option could be repeated its defaults should be split into a list +# + +r"""usage: prog [-o ]... + +options: -o [default: x] + +""" +$ prog -o this -o that +{"-o": ["this", "that"]} + +$ prog +{"-o": ["x"]} + +r"""usage: prog [-o ]... + +options: -o [default: x y] + +""" +$ prog -o this +{"-o": ["this"]} + +$ prog +{"-o": ["x", "y"]} + +# +# Test stacked option's argument +# + +r"""usage: prog -pPATH + +options: -p PATH + +""" +$ prog -pHOME +{"-p": "HOME"} + +# +# Issue 56: Repeated mutually exclusive args give nested lists sometimes +# + +r"""Usage: foo (--xx=x|--yy=y)...""" +$ prog --xx=1 --yy=2 +{"--xx": ["1"], "--yy": ["2"]} + +# +# POSIXly correct tokenization +# + +r"""usage: prog []""" +$ prog f.txt +{"": "f.txt"} + +r"""usage: prog [--input=]...""" +$ prog --input a.txt --input=b.txt +{"--input": ["a.txt", "b.txt"]} + +# +# Issue 85: `[options]` shourtcut with multiple subcommands +# + +r"""usage: prog good [options] + prog fail [options] + +options: --loglevel=N + +""" +$ prog fail --loglevel 5 +{"--loglevel": "5", "fail": true, "good": false} + +# +# Usage-section syntax +# + +r"""usage:prog --foo""" +$ prog --foo +{"--foo": true} + +r"""PROGRAM USAGE: prog --foo""" +$ prog --foo +{"--foo": true} + +r"""Usage: prog --foo + prog --bar +NOT PART OF SECTION""" +$ prog --foo +{"--foo": true, "--bar": false} + +r"""Usage: + prog --foo + prog --bar + +NOT PART OF SECTION""" +$ prog --foo +{"--foo": true, "--bar": false} + +r"""Usage: + prog --foo + prog --bar +NOT PART OF SECTION""" +$ prog --foo +{"--foo": true, "--bar": false} + +# +# Options-section syntax +# + +r"""Usage: prog [options] + +global options: --foo +local options: --baz + --bar +other options: + --egg + --spam +-not-an-option- + +""" +$ prog --baz --egg +{"--foo": false, "--baz": true, "--bar": false, "--egg": true, "--spam": false} diff --git a/vendor/github.com/docopt/docopt-go/token.go b/vendor/github.com/docopt/docopt-go/token.go new file mode 100644 index 00000000..cc18ec9f --- /dev/null +++ b/vendor/github.com/docopt/docopt-go/token.go @@ -0,0 +1,126 @@ +package docopt + +import ( + "regexp" + "strings" + "unicode" +) + +type tokenList struct { + tokens []string + errorFunc func(string, ...interface{}) error + err errorType +} +type token string + +func newTokenList(source []string, err errorType) *tokenList { + errorFunc := newError + if err == errorUser { + errorFunc = newUserError + } else if err == errorLanguage { + errorFunc = newLanguageError + } + return &tokenList{source, errorFunc, err} +} + +func tokenListFromString(source string) *tokenList { + return newTokenList(strings.Fields(source), errorUser) +} + +func tokenListFromPattern(source string) *tokenList { + p := regexp.MustCompile(`([\[\]\(\)\|]|\.\.\.)`) + source = p.ReplaceAllString(source, ` $1 `) + p = regexp.MustCompile(`\s+|(\S*<.*?>)`) + split := p.Split(source, -1) + match := p.FindAllStringSubmatch(source, -1) + var result []string + l := len(split) + for i := 0; i < l; i++ { + if len(split[i]) > 0 { + result = append(result, split[i]) + } + if i < l-1 && len(match[i][1]) > 0 { + result = append(result, match[i][1]) + } + } + return newTokenList(result, errorLanguage) +} + +func (t *token) eq(s string) bool { + if t == nil { + return false + } + return string(*t) == s +} +func (t *token) match(matchNil bool, tokenStrings ...string) bool { + if t == nil && matchNil { + return true + } else if t == nil && !matchNil { + return false + } + + for _, tok := range tokenStrings { + if tok == string(*t) { + return true + } + } + return false +} +func (t *token) hasPrefix(prefix string) bool { + if t == nil { + return false + } + return strings.HasPrefix(string(*t), prefix) +} +func (t *token) hasSuffix(suffix string) bool { + if t == nil { + return false + } + return strings.HasSuffix(string(*t), suffix) +} +func (t *token) isUpper() bool { + if t == nil { + return false + } + return isStringUppercase(string(*t)) +} +func (t *token) String() string { + if t == nil { + return "" + } + return string(*t) +} + +func (tl *tokenList) current() *token { + if len(tl.tokens) > 0 { + return (*token)(&(tl.tokens[0])) + } + return nil +} + +func (tl *tokenList) length() int { + return len(tl.tokens) +} + +func (tl *tokenList) move() *token { + if len(tl.tokens) > 0 { + t := tl.tokens[0] + tl.tokens = tl.tokens[1:] + return (*token)(&t) + } + return nil +} + +// returns true if all cased characters in the string are uppercase +// and there are there is at least one cased charcter +func isStringUppercase(s string) bool { + if strings.ToUpper(s) != s { + return false + } + for _, c := range []rune(s) { + if unicode.IsUpper(c) { + return true + } + } + return false +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml b/vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml new file mode 100644 index 00000000..4ad2067b --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml @@ -0,0 +1,38 @@ +language: go +matrix: + include: + - go: 1.2.x + env: GOOS=linux GOARCH=amd64 + - go: 1.2.x + env: GOOS=linux GOARCH=386 + - go: 1.2.x + env: GOOS=windows GOARCH=amd64 + - go: 1.2.x + env: GOOS=windows GOARCH=386 + - go: 1.3.x + - go: 1.4.x + - go: 1.5.x + - go: 1.6.x + - go: 1.7.x + - go: 1.8.x + - go: 1.9.x + - go: 1.10.x + - go: 1.11.x + - go: 1.12.x + - go: 1.13.x + env: GOOS=linux GOARCH=amd64 + - go: 1.13.x + env: GOOS=linux GOARCH=386 + - go: 1.13.x + env: GOOS=windows GOARCH=amd64 + - go: 1.13.x + env: GOOS=windows GOARCH=386 + - go: tip +go_import_path: gopkg.in/asn-ber.v1 +install: + - go list -f '{{range .Imports}}{{.}} {{end}}' ./... | xargs go get -v + - go list -f '{{range .TestImports}}{{.}} {{end}}' ./... | xargs go get -v + - go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover + - go build -v ./... +script: + - go test -v -cover ./... || go test -v ./... diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE b/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE new file mode 100644 index 00000000..23f94253 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2016 go-asn1-ber Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/README.md b/vendor/github.com/go-asn1-ber/asn1-ber/README.md new file mode 100644 index 00000000..e3a9560d --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/README.md @@ -0,0 +1,24 @@ +[![GoDoc](https://godoc.org/gopkg.in/asn1-ber.v1?status.svg)](https://godoc.org/gopkg.in/asn1-ber.v1) [![Build Status](https://travis-ci.org/go-asn1-ber/asn1-ber.svg)](https://travis-ci.org/go-asn1-ber/asn1-ber) + + +ASN1 BER Encoding / Decoding Library for the GO programming language. +--------------------------------------------------------------------- + +Required libraries: + None + +Working: + Very basic encoding / decoding needed for LDAP protocol + +Tests Implemented: + A few + +TODO: + Fix all encoding / decoding to conform to ASN1 BER spec + Implement Tests / Benchmarks + +--- + +The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/) +The design is licensed under the Creative Commons 3.0 Attributions license. +Read this article for more details: http://blog.golang.org/gopher diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/ber.go b/vendor/github.com/go-asn1-ber/asn1-ber/ber.go new file mode 100644 index 00000000..6153f460 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/ber.go @@ -0,0 +1,512 @@ +package ber + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "os" + "reflect" +) + +// MaxPacketLengthBytes specifies the maximum allowed packet size when calling ReadPacket or DecodePacket. Set to 0 for +// no limit. +var MaxPacketLengthBytes int64 = math.MaxInt32 + +type Packet struct { + Identifier + Value interface{} + ByteValue []byte + Data *bytes.Buffer + Children []*Packet + Description string +} + +type Identifier struct { + ClassType Class + TagType Type + Tag Tag +} + +type Tag uint64 + +const ( + TagEOC Tag = 0x00 + TagBoolean Tag = 0x01 + TagInteger Tag = 0x02 + TagBitString Tag = 0x03 + TagOctetString Tag = 0x04 + TagNULL Tag = 0x05 + TagObjectIdentifier Tag = 0x06 + TagObjectDescriptor Tag = 0x07 + TagExternal Tag = 0x08 + TagRealFloat Tag = 0x09 + TagEnumerated Tag = 0x0a + TagEmbeddedPDV Tag = 0x0b + TagUTF8String Tag = 0x0c + TagRelativeOID Tag = 0x0d + TagSequence Tag = 0x10 + TagSet Tag = 0x11 + TagNumericString Tag = 0x12 + TagPrintableString Tag = 0x13 + TagT61String Tag = 0x14 + TagVideotexString Tag = 0x15 + TagIA5String Tag = 0x16 + TagUTCTime Tag = 0x17 + TagGeneralizedTime Tag = 0x18 + TagGraphicString Tag = 0x19 + TagVisibleString Tag = 0x1a + TagGeneralString Tag = 0x1b + TagUniversalString Tag = 0x1c + TagCharacterString Tag = 0x1d + TagBMPString Tag = 0x1e + TagBitmask Tag = 0x1f // xxx11111b + + // HighTag indicates the start of a high-tag byte sequence + HighTag Tag = 0x1f // xxx11111b + // HighTagContinueBitmask indicates the high-tag byte sequence should continue + HighTagContinueBitmask Tag = 0x80 // 10000000b + // HighTagValueBitmask obtains the tag value from a high-tag byte sequence byte + HighTagValueBitmask Tag = 0x7f // 01111111b +) + +const ( + // LengthLongFormBitmask is the mask to apply to the length byte to see if a long-form byte sequence is used + LengthLongFormBitmask = 0x80 + // LengthValueBitmask is the mask to apply to the length byte to get the number of bytes in the long-form byte sequence + LengthValueBitmask = 0x7f + + // LengthIndefinite is returned from readLength to indicate an indefinite length + LengthIndefinite = -1 +) + +var tagMap = map[Tag]string{ + TagEOC: "EOC (End-of-Content)", + TagBoolean: "Boolean", + TagInteger: "Integer", + TagBitString: "Bit String", + TagOctetString: "Octet String", + TagNULL: "NULL", + TagObjectIdentifier: "Object Identifier", + TagObjectDescriptor: "Object Descriptor", + TagExternal: "External", + TagRealFloat: "Real (float)", + TagEnumerated: "Enumerated", + TagEmbeddedPDV: "Embedded PDV", + TagUTF8String: "UTF8 String", + TagRelativeOID: "Relative-OID", + TagSequence: "Sequence and Sequence of", + TagSet: "Set and Set OF", + TagNumericString: "Numeric String", + TagPrintableString: "Printable String", + TagT61String: "T61 String", + TagVideotexString: "Videotex String", + TagIA5String: "IA5 String", + TagUTCTime: "UTC Time", + TagGeneralizedTime: "Generalized Time", + TagGraphicString: "Graphic String", + TagVisibleString: "Visible String", + TagGeneralString: "General String", + TagUniversalString: "Universal String", + TagCharacterString: "Character String", + TagBMPString: "BMP String", +} + +type Class uint8 + +const ( + ClassUniversal Class = 0 // 00xxxxxxb + ClassApplication Class = 64 // 01xxxxxxb + ClassContext Class = 128 // 10xxxxxxb + ClassPrivate Class = 192 // 11xxxxxxb + ClassBitmask Class = 192 // 11xxxxxxb +) + +var ClassMap = map[Class]string{ + ClassUniversal: "Universal", + ClassApplication: "Application", + ClassContext: "Context", + ClassPrivate: "Private", +} + +type Type uint8 + +const ( + TypePrimitive Type = 0 // xx0xxxxxb + TypeConstructed Type = 32 // xx1xxxxxb + TypeBitmask Type = 32 // xx1xxxxxb +) + +var TypeMap = map[Type]string{ + TypePrimitive: "Primitive", + TypeConstructed: "Constructed", +} + +var Debug bool = false + +func PrintBytes(out io.Writer, buf []byte, indent string) { + data_lines := make([]string, (len(buf)/30)+1) + num_lines := make([]string, (len(buf)/30)+1) + + for i, b := range buf { + data_lines[i/30] += fmt.Sprintf("%02x ", b) + num_lines[i/30] += fmt.Sprintf("%02d ", (i+1)%100) + } + + for i := 0; i < len(data_lines); i++ { + out.Write([]byte(indent + data_lines[i] + "\n")) + out.Write([]byte(indent + num_lines[i] + "\n\n")) + } +} + +func PrintPacket(p *Packet) { + printPacket(os.Stdout, p, 0, false) +} + +func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) { + indent_str := "" + + for len(indent_str) != indent { + indent_str += " " + } + + class_str := ClassMap[p.ClassType] + + tagtype_str := TypeMap[p.TagType] + + tag_str := fmt.Sprintf("0x%02X", p.Tag) + + if p.ClassType == ClassUniversal { + tag_str = tagMap[p.Tag] + } + + value := fmt.Sprint(p.Value) + description := "" + + if p.Description != "" { + description = p.Description + ": " + } + + fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indent_str, description, class_str, tagtype_str, tag_str, p.Data.Len(), value) + + if printBytes { + PrintBytes(out, p.Bytes(), indent_str) + } + + for _, child := range p.Children { + printPacket(out, child, indent+1, printBytes) + } +} + +// ReadPacket reads a single Packet from the reader +func ReadPacket(reader io.Reader) (*Packet, error) { + p, _, err := readPacket(reader) + if err != nil { + return nil, err + } + return p, nil +} + +func DecodeString(data []byte) string { + return string(data) +} + +func ParseInt64(bytes []byte) (ret int64, err error) { + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = fmt.Errorf("integer too large") + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= int64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +func encodeInteger(i int64) []byte { + n := int64Length(i) + out := make([]byte, n) + + var j int + for ; n > 0; n-- { + out[j] = (byte(i >> uint((n-1)*8))) + j++ + } + + return out +} + +func int64Length(i int64) (numBytes int) { + numBytes = 1 + + for i > 127 { + numBytes++ + i >>= 8 + } + + for i < -128 { + numBytes++ + i >>= 8 + } + + return +} + +// DecodePacket decodes the given bytes into a single Packet +// If a decode error is encountered, nil is returned. +func DecodePacket(data []byte) *Packet { + p, _, _ := readPacket(bytes.NewBuffer(data)) + + return p +} + +// DecodePacketErr decodes the given bytes into a single Packet +// If a decode error is encountered, nil is returned +func DecodePacketErr(data []byte) (*Packet, error) { + p, _, err := readPacket(bytes.NewBuffer(data)) + if err != nil { + return nil, err + } + return p, nil +} + +// readPacket reads a single Packet from the reader, returning the number of bytes read +func readPacket(reader io.Reader) (*Packet, int, error) { + identifier, length, read, err := readHeader(reader) + if err != nil { + return nil, read, err + } + + p := &Packet{ + Identifier: identifier, + } + + p.Data = new(bytes.Buffer) + p.Children = make([]*Packet, 0, 2) + p.Value = nil + + if p.TagType == TypeConstructed { + // TODO: if universal, ensure tag type is allowed to be constructed + + // Track how much content we've read + contentRead := 0 + for { + if length != LengthIndefinite { + // End if we've read what we've been told to + if contentRead == length { + break + } + // Detect if a packet boundary didn't fall on the expected length + if contentRead > length { + return nil, read, fmt.Errorf("expected to read %d bytes, read %d", length, contentRead) + } + } + + // Read the next packet + child, r, err := readPacket(reader) + if err != nil { + return nil, read, err + } + contentRead += r + read += r + + // Test is this is the EOC marker for our packet + if isEOCPacket(child) { + if length == LengthIndefinite { + break + } + return nil, read, errors.New("eoc child not allowed with definite length") + } + + // Append and continue + p.AppendChild(child) + } + return p, read, nil + } + + if length == LengthIndefinite { + return nil, read, errors.New("indefinite length used with primitive type") + } + + // Read definite-length content + if MaxPacketLengthBytes > 0 && int64(length) > MaxPacketLengthBytes { + return nil, read, fmt.Errorf("length %d greater than maximum %d", length, MaxPacketLengthBytes) + } + content := make([]byte, length, length) + if length > 0 { + _, err := io.ReadFull(reader, content) + if err != nil { + if err == io.EOF { + return nil, read, io.ErrUnexpectedEOF + } + return nil, read, err + } + read += length + } + + if p.ClassType == ClassUniversal { + p.Data.Write(content) + p.ByteValue = content + + switch p.Tag { + case TagEOC: + case TagBoolean: + val, _ := ParseInt64(content) + + p.Value = val != 0 + case TagInteger: + p.Value, _ = ParseInt64(content) + case TagBitString: + case TagOctetString: + // the actual string encoding is not known here + // (e.g. for LDAP content is already an UTF8-encoded + // string). Return the data without further processing + p.Value = DecodeString(content) + case TagNULL: + case TagObjectIdentifier: + case TagObjectDescriptor: + case TagExternal: + case TagRealFloat: + case TagEnumerated: + p.Value, _ = ParseInt64(content) + case TagEmbeddedPDV: + case TagUTF8String: + p.Value = DecodeString(content) + case TagRelativeOID: + case TagSequence: + case TagSet: + case TagNumericString: + case TagPrintableString: + p.Value = DecodeString(content) + case TagT61String: + case TagVideotexString: + case TagIA5String: + case TagUTCTime: + case TagGeneralizedTime: + case TagGraphicString: + case TagVisibleString: + case TagGeneralString: + case TagUniversalString: + case TagCharacterString: + case TagBMPString: + } + } else { + p.Data.Write(content) + } + + return p, read, nil +} + +func (p *Packet) Bytes() []byte { + var out bytes.Buffer + + out.Write(encodeIdentifier(p.Identifier)) + out.Write(encodeLength(p.Data.Len())) + out.Write(p.Data.Bytes()) + + return out.Bytes() +} + +func (p *Packet) AppendChild(child *Packet) { + p.Data.Write(child.Bytes()) + p.Children = append(p.Children, child) +} + +func Encode(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet { + p := new(Packet) + + p.ClassType = ClassType + p.TagType = TagType + p.Tag = Tag + p.Data = new(bytes.Buffer) + + p.Children = make([]*Packet, 0, 2) + + p.Value = Value + p.Description = Description + + if Value != nil { + v := reflect.ValueOf(Value) + + if ClassType == ClassUniversal { + switch Tag { + case TagOctetString: + sv, ok := v.Interface().(string) + + if ok { + p.Data.Write([]byte(sv)) + } + } + } + } + + return p +} + +func NewSequence(Description string) *Packet { + return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, Description) +} + +func NewBoolean(ClassType Class, TagType Type, Tag Tag, Value bool, Description string) *Packet { + intValue := int64(0) + + if Value { + intValue = 1 + } + + p := Encode(ClassType, TagType, Tag, nil, Description) + + p.Value = Value + p.Data.Write(encodeInteger(intValue)) + + return p +} + +func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet { + p := Encode(ClassType, TagType, Tag, nil, Description) + + p.Value = Value + switch v := Value.(type) { + case int: + p.Data.Write(encodeInteger(int64(v))) + case uint: + p.Data.Write(encodeInteger(int64(v))) + case int64: + p.Data.Write(encodeInteger(v)) + case uint64: + // TODO : check range or add encodeUInt... + p.Data.Write(encodeInteger(int64(v))) + case int32: + p.Data.Write(encodeInteger(int64(v))) + case uint32: + p.Data.Write(encodeInteger(int64(v))) + case int16: + p.Data.Write(encodeInteger(int64(v))) + case uint16: + p.Data.Write(encodeInteger(int64(v))) + case int8: + p.Data.Write(encodeInteger(int64(v))) + case uint8: + p.Data.Write(encodeInteger(int64(v))) + default: + // TODO : add support for big.Int ? + panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v)) + } + + return p +} + +func NewString(ClassType Class, TagType Type, Tag Tag, Value, Description string) *Packet { + p := Encode(ClassType, TagType, Tag, nil, Description) + + p.Value = Value + p.Data.Write([]byte(Value)) + + return p +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go b/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go new file mode 100644 index 00000000..1858b74b --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go @@ -0,0 +1,25 @@ +package ber + +func encodeUnsignedInteger(i uint64) []byte { + n := uint64Length(i) + out := make([]byte, n) + + var j int + for ; n > 0; n-- { + out[j] = (byte(i >> uint((n-1)*8))) + j++ + } + + return out +} + +func uint64Length(i uint64) (numBytes int) { + numBytes = 1 + + for i > 255 { + numBytes++ + i >>= 8 + } + + return +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/go.mod b/vendor/github.com/go-asn1-ber/asn1-ber/go.mod new file mode 100644 index 00000000..ee0b4be2 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/go.mod @@ -0,0 +1,3 @@ +module github.com/go-asn1-ber/asn1-ber + +go 1.13 diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/header.go b/vendor/github.com/go-asn1-ber/asn1-ber/header.go new file mode 100644 index 00000000..71615621 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/header.go @@ -0,0 +1,35 @@ +package ber + +import ( + "errors" + "fmt" + "io" +) + +func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) { + if i, c, err := readIdentifier(reader); err != nil { + return Identifier{}, 0, read, err + } else { + identifier = i + read += c + } + + if l, c, err := readLength(reader); err != nil { + return Identifier{}, 0, read, err + } else { + length = l + read += c + } + + // Validate length type with identifier (x.600, 8.1.3.2.a) + if length == LengthIndefinite && identifier.TagType == TypePrimitive { + return Identifier{}, 0, read, errors.New("indefinite length used with primitive type") + } + + if length < LengthIndefinite { + err = fmt.Errorf("length cannot be less than %d", LengthIndefinite) + return + } + + return identifier, length, read, nil +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go b/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go new file mode 100644 index 00000000..e8c43574 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go @@ -0,0 +1,112 @@ +package ber + +import ( + "errors" + "fmt" + "io" +) + +func readIdentifier(reader io.Reader) (Identifier, int, error) { + identifier := Identifier{} + read := 0 + + // identifier byte + b, err := readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading identifier byte: %v\n", err) + } + return Identifier{}, read, err + } + read++ + + identifier.ClassType = Class(b) & ClassBitmask + identifier.TagType = Type(b) & TypeBitmask + + if tag := Tag(b) & TagBitmask; tag != HighTag { + // short-form tag + identifier.Tag = tag + return identifier, read, nil + } + + // high-tag-number tag + tagBytes := 0 + for { + b, err := readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err) + } + return Identifier{}, read, err + } + tagBytes++ + read++ + + // Lowest 7 bits get appended to the tag value (x.690, 8.1.2.4.2.b) + identifier.Tag <<= 7 + identifier.Tag |= Tag(b) & HighTagValueBitmask + + // First byte may not be all zeros (x.690, 8.1.2.4.2.c) + if tagBytes == 1 && identifier.Tag == 0 { + return Identifier{}, read, errors.New("invalid first high-tag-number tag byte") + } + // Overflow of int64 + // TODO: support big int tags? + if tagBytes > 9 { + return Identifier{}, read, errors.New("high-tag-number tag overflow") + } + + // Top bit of 0 means this is the last byte in the high-tag-number tag (x.690, 8.1.2.4.2.a) + if Tag(b)&HighTagContinueBitmask == 0 { + break + } + } + + return identifier, read, nil +} + +func encodeIdentifier(identifier Identifier) []byte { + b := []byte{0x0} + b[0] |= byte(identifier.ClassType) + b[0] |= byte(identifier.TagType) + + if identifier.Tag < HighTag { + // Short-form + b[0] |= byte(identifier.Tag) + } else { + // high-tag-number + b[0] |= byte(HighTag) + + tag := identifier.Tag + + b = append(b, encodeHighTag(tag)...) + } + return b +} + +func encodeHighTag(tag Tag) []byte { + // set cap=4 to hopefully avoid additional allocations + b := make([]byte, 0, 4) + for tag != 0 { + // t := last 7 bits of tag (HighTagValueBitmask = 0x7F) + t := tag & HighTagValueBitmask + + // right shift tag 7 to remove what was just pulled off + tag >>= 7 + + // if b already has entries this entry needs a continuation bit (0x80) + if len(b) != 0 { + t |= HighTagContinueBitmask + } + + b = append(b, byte(t)) + } + // reverse + // since bits were pulled off 'tag' small to high the byte slice is in reverse order. + // example: tag = 0xFF results in {0x7F, 0x01 + 0x80 (continuation bit)} + // this needs to be reversed into 0x81 0x7F + for i, j := 0, len(b)-1; i < len(b)/2; i++ { + b[i], b[j-i] = b[j-i], b[i] + } + return b +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/length.go b/vendor/github.com/go-asn1-ber/asn1-ber/length.go new file mode 100644 index 00000000..750e8f44 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/length.go @@ -0,0 +1,81 @@ +package ber + +import ( + "errors" + "fmt" + "io" +) + +func readLength(reader io.Reader) (length int, read int, err error) { + // length byte + b, err := readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading length byte: %v\n", err) + } + return 0, 0, err + } + read++ + + switch { + case b == 0xFF: + // Invalid 0xFF (x.600, 8.1.3.5.c) + return 0, read, errors.New("invalid length byte 0xff") + + case b == LengthLongFormBitmask: + // Indefinite form, we have to decode packets until we encounter an EOC packet (x.600, 8.1.3.6) + length = LengthIndefinite + + case b&LengthLongFormBitmask == 0: + // Short definite form, extract the length from the bottom 7 bits (x.600, 8.1.3.4) + length = int(b) & LengthValueBitmask + + case b&LengthLongFormBitmask != 0: + // Long definite form, extract the number of length bytes to follow from the bottom 7 bits (x.600, 8.1.3.5.b) + lengthBytes := int(b) & LengthValueBitmask + // Protect against overflow + // TODO: support big int length? + if lengthBytes > 8 { + return 0, read, errors.New("long-form length overflow") + } + + // Accumulate into a 64-bit variable + var length64 int64 + for i := 0; i < lengthBytes; i++ { + b, err = readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading long-form length byte %d: %v\n", i, err) + } + return 0, read, err + } + read++ + + // x.600, 8.1.3.5 + length64 <<= 8 + length64 |= int64(b) + } + + // Cast to a platform-specific integer + length = int(length64) + // Ensure we didn't overflow + if int64(length) != length64 { + return 0, read, errors.New("long-form length overflow") + } + + default: + return 0, read, errors.New("invalid length byte") + } + + return length, read, nil +} + +func encodeLength(length int) []byte { + length_bytes := encodeUnsignedInteger(uint64(length)) + if length > 127 || len(length_bytes) > 1 { + longFormBytes := []byte{(LengthLongFormBitmask | byte(len(length_bytes)))} + longFormBytes = append(longFormBytes, length_bytes...) + length_bytes = longFormBytes + } + return length_bytes +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/util.go b/vendor/github.com/go-asn1-ber/asn1-ber/util.go new file mode 100644 index 00000000..3e56b66c --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/util.go @@ -0,0 +1,24 @@ +package ber + +import "io" + +func readByte(reader io.Reader) (byte, error) { + bytes := make([]byte, 1, 1) + _, err := io.ReadFull(reader, bytes) + if err != nil { + if err == io.EOF { + return 0, io.ErrUnexpectedEOF + } + return 0, err + } + return bytes[0], nil +} + +func isEOCPacket(p *Packet) bool { + return p != nil && + p.Tag == TagEOC && + p.ClassType == ClassUniversal && + p.TagType == TypePrimitive && + len(p.ByteValue) == 0 && + len(p.Children) == 0 +} diff --git a/vendor/github.com/go-ldap/ldap/v3/LICENSE b/vendor/github.com/go-ldap/ldap/v3/LICENSE new file mode 100644 index 00000000..6c0ed4b3 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2016 go-ldap Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-ldap/ldap/v3/add.go b/vendor/github.com/go-ldap/ldap/v3/add.go new file mode 100644 index 00000000..baecd787 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/add.go @@ -0,0 +1,91 @@ +package ldap + +import ( + "log" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// Attribute represents an LDAP attribute +type Attribute struct { + // Type is the name of the LDAP attribute + Type string + // Vals are the LDAP attribute values + Vals []string +} + +func (a *Attribute) encode() *ber.Packet { + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute") + seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type")) + set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue") + for _, value := range a.Vals { + set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals")) + } + seq.AppendChild(set) + return seq +} + +// AddRequest represents an LDAP AddRequest operation +type AddRequest struct { + // DN identifies the entry being added + DN string + // Attributes list the attributes of the new entry + Attributes []Attribute + // Controls hold optional controls to send with the request + Controls []Control +} + +func (req *AddRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN")) + attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes") + for _, attribute := range req.Attributes { + attributes.AppendChild(attribute.encode()) + } + pkt.AppendChild(attributes) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil +} + +// Attribute adds an attribute with the given type and values +func (req *AddRequest) Attribute(attrType string, attrVals []string) { + req.Attributes = append(req.Attributes, Attribute{Type: attrType, Vals: attrVals}) +} + +// NewAddRequest returns an AddRequest for the given DN, with no attributes +func NewAddRequest(dn string, controls []Control) *AddRequest { + return &AddRequest{ + DN: dn, + Controls: controls, + } + +} + +// Add performs the given AddRequest +func (l *Conn) Add(addRequest *AddRequest) error { + msgCtx, err := l.doRequest(addRequest) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return err + } + + if packet.Children[1].Tag == ApplicationAddResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/bind.go b/vendor/github.com/go-ldap/ldap/v3/bind.go new file mode 100644 index 00000000..15307f50 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/bind.go @@ -0,0 +1,152 @@ +package ldap + +import ( + "errors" + "fmt" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// SimpleBindRequest represents a username/password bind operation +type SimpleBindRequest struct { + // Username is the name of the Directory object that the client wishes to bind as + Username string + // Password is the credentials to bind with + Password string + // Controls are optional controls to send with the bind request + Controls []Control + // AllowEmptyPassword sets whether the client allows binding with an empty password + // (normally used for unauthenticated bind). + AllowEmptyPassword bool +} + +// SimpleBindResult contains the response from the server +type SimpleBindResult struct { + Controls []Control +} + +// NewSimpleBindRequest returns a bind request +func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest { + return &SimpleBindRequest{ + Username: username, + Password: password, + Controls: controls, + AllowEmptyPassword: false, + } +} + +func (req *SimpleBindRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Username, "User Name")) + pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.Password, "Password")) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil +} + +// SimpleBind performs the simple bind operation defined in the given request +func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) { + if simpleBindRequest.Password == "" && !simpleBindRequest.AllowEmptyPassword { + return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client")) + } + + msgCtx, err := l.doRequest(simpleBindRequest) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return nil, err + } + + result := &SimpleBindResult{ + Controls: make([]Control, 0), + } + + if len(packet.Children) == 3 { + for _, child := range packet.Children[2].Children { + decodedChild, decodeErr := DecodeControl(child) + if decodeErr != nil { + return nil, fmt.Errorf("failed to decode child control: %s", decodeErr) + } + result.Controls = append(result.Controls, decodedChild) + } + } + + err = GetLDAPError(packet) + return result, err +} + +// Bind performs a bind with the given username and password. +// +// It does not allow unauthenticated bind (i.e. empty password). Use the UnauthenticatedBind method +// for that. +func (l *Conn) Bind(username, password string) error { + req := &SimpleBindRequest{ + Username: username, + Password: password, + AllowEmptyPassword: false, + } + _, err := l.SimpleBind(req) + return err +} + +// UnauthenticatedBind performs an unauthenticated bind. +// +// A username may be provided for trace (e.g. logging) purpose only, but it is normally not +// authenticated or otherwise validated by the LDAP server. +// +// See https://tools.ietf.org/html/rfc4513#section-5.1.2 . +// See https://tools.ietf.org/html/rfc4513#section-6.3.1 . +func (l *Conn) UnauthenticatedBind(username string) error { + req := &SimpleBindRequest{ + Username: username, + Password: "", + AllowEmptyPassword: true, + } + _, err := l.SimpleBind(req) + return err +} + +var externalBindRequest = requestFunc(func(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name")) + + saslAuth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication") + saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "EXTERNAL", "SASL Mech")) + saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "SASL Cred")) + + pkt.AppendChild(saslAuth) + + envelope.AppendChild(pkt) + + return nil +}) + +// ExternalBind performs SASL/EXTERNAL authentication. +// +// Use ldap.DialURL("ldapi://") to connect to the Unix socket before ExternalBind. +// +// See https://tools.ietf.org/html/rfc4422#appendix-A +func (l *Conn) ExternalBind() error { + msgCtx, err := l.doRequest(externalBindRequest) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return err + } + + return GetLDAPError(packet) +} diff --git a/vendor/github.com/go-ldap/ldap/v3/client.go b/vendor/github.com/go-ldap/ldap/v3/client.go new file mode 100644 index 00000000..619677c7 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/client.go @@ -0,0 +1,30 @@ +package ldap + +import ( + "crypto/tls" + "time" +) + +// Client knows how to interact with an LDAP server +type Client interface { + Start() + StartTLS(*tls.Config) error + Close() + SetTimeout(time.Duration) + + Bind(username, password string) error + UnauthenticatedBind(username string) error + SimpleBind(*SimpleBindRequest) (*SimpleBindResult, error) + ExternalBind() error + + Add(*AddRequest) error + Del(*DelRequest) error + Modify(*ModifyRequest) error + ModifyDN(*ModifyDNRequest) error + + Compare(dn, attribute, value string) (bool, error) + PasswordModify(*PasswordModifyRequest) (*PasswordModifyResult, error) + + Search(*SearchRequest) (*SearchResult, error) + SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) +} diff --git a/vendor/github.com/go-ldap/ldap/v3/compare.go b/vendor/github.com/go-ldap/ldap/v3/compare.go new file mode 100644 index 00000000..cd43e4c5 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/compare.go @@ -0,0 +1,61 @@ +package ldap + +import ( + "fmt" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// CompareRequest represents an LDAP CompareRequest operation. +type CompareRequest struct { + DN string + Attribute string + Value string +} + +func (req *CompareRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN")) + + ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion") + ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Attribute, "AttributeDesc")) + ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Value, "AssertionValue")) + + pkt.AppendChild(ava) + + envelope.AppendChild(pkt) + + return nil +} + +// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise +// false with any error that occurs if any. +func (l *Conn) Compare(dn, attribute, value string) (bool, error) { + msgCtx, err := l.doRequest(&CompareRequest{ + DN: dn, + Attribute: attribute, + Value: value}) + if err != nil { + return false, err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return false, err + } + + if packet.Children[1].Tag == ApplicationCompareResponse { + err := GetLDAPError(packet) + + switch { + case IsErrorWithCode(err, LDAPResultCompareTrue): + return true, nil + case IsErrorWithCode(err, LDAPResultCompareFalse): + return false, nil + default: + return false, err + } + } + return false, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag) +} diff --git a/vendor/github.com/go-ldap/ldap/v3/conn.go b/vendor/github.com/go-ldap/ldap/v3/conn.go new file mode 100644 index 00000000..b09b7aa0 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/conn.go @@ -0,0 +1,565 @@ +package ldap + +import ( + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "net/url" + "sync" + "sync/atomic" + "time" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +const ( + // MessageQuit causes the processMessages loop to exit + MessageQuit = 0 + // MessageRequest sends a request to the server + MessageRequest = 1 + // MessageResponse receives a response from the server + MessageResponse = 2 + // MessageFinish indicates the client considers a particular message ID to be finished + MessageFinish = 3 + // MessageTimeout indicates the client-specified timeout for a particular message ID has been reached + MessageTimeout = 4 +) + +const ( + // DefaultLdapPort default ldap port for pure TCP connection + DefaultLdapPort = "389" + // DefaultLdapsPort default ldap port for SSL connection + DefaultLdapsPort = "636" +) + +// PacketResponse contains the packet or error encountered reading a response +type PacketResponse struct { + // Packet is the packet read from the server + Packet *ber.Packet + // Error is an error encountered while reading + Error error +} + +// ReadPacket returns the packet or an error +func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) { + if (pr == nil) || (pr.Packet == nil && pr.Error == nil) { + return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response")) + } + return pr.Packet, pr.Error +} + +type messageContext struct { + id int64 + // close(done) should only be called from finishMessage() + done chan struct{} + // close(responses) should only be called from processMessages(), and only sent to from sendResponse() + responses chan *PacketResponse +} + +// sendResponse should only be called within the processMessages() loop which +// is also responsible for closing the responses channel. +func (msgCtx *messageContext) sendResponse(packet *PacketResponse) { + select { + case msgCtx.responses <- packet: + // Successfully sent packet to message handler. + case <-msgCtx.done: + // The request handler is done and will not receive more + // packets. + } +} + +type messagePacket struct { + Op int + MessageID int64 + Packet *ber.Packet + Context *messageContext +} + +type sendMessageFlags uint + +const ( + startTLS sendMessageFlags = 1 << iota +) + +// Conn represents an LDAP Connection +type Conn struct { + // requestTimeout is loaded atomically + // so we need to ensure 64-bit alignment on 32-bit platforms. + requestTimeout int64 + conn net.Conn + isTLS bool + closing uint32 + closeErr atomic.Value + isStartingTLS bool + Debug debugging + chanConfirm chan struct{} + messageContexts map[int64]*messageContext + chanMessage chan *messagePacket + chanMessageID chan int64 + wgClose sync.WaitGroup + outstandingRequests uint + messageMutex sync.Mutex +} + +var _ Client = &Conn{} + +// DefaultTimeout is a package-level variable that sets the timeout value +// used for the Dial and DialTLS methods. +// +// WARNING: since this is a package-level variable, setting this value from +// multiple places will probably result in undesired behaviour. +var DefaultTimeout = 60 * time.Second + +// DialOpt configures DialContext. +type DialOpt func(*DialContext) + +// DialWithDialer updates net.Dialer in DialContext. +func DialWithDialer(d *net.Dialer) DialOpt { + return func(dc *DialContext) { + dc.d = d + } +} + +// DialWithTLSConfig updates tls.Config in DialContext. +func DialWithTLSConfig(tc *tls.Config) DialOpt { + return func(dc *DialContext) { + dc.tc = tc + } +} + +// DialContext contains necessary parameters to dial the given ldap URL. +type DialContext struct { + d *net.Dialer + tc *tls.Config +} + +func (dc *DialContext) dial(u *url.URL) (net.Conn, error) { + if u.Scheme == "ldapi" { + if u.Path == "" || u.Path == "/" { + u.Path = "/var/run/slapd/ldapi" + } + return dc.d.Dial("unix", u.Path) + } + + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // we asume that error is due to missing port + host = u.Host + port = "" + } + + switch u.Scheme { + case "ldap": + if port == "" { + port = DefaultLdapPort + } + return dc.d.Dial("tcp", net.JoinHostPort(host, port)) + case "ldaps": + if port == "" { + port = DefaultLdapsPort + } + return tls.DialWithDialer(dc.d, "tcp", net.JoinHostPort(host, port), dc.tc) + } + + return nil, fmt.Errorf("Unknown scheme '%s'", u.Scheme) +} + +// Dial connects to the given address on the given network using net.Dial +// and then returns a new Conn for the connection. +// @deprecated Use DialURL instead. +func Dial(network, addr string) (*Conn, error) { + c, err := net.DialTimeout(network, addr, DefaultTimeout) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + conn := NewConn(c, false) + conn.Start() + return conn, nil +} + +// DialTLS connects to the given address on the given network using tls.Dial +// and then returns a new Conn for the connection. +// @deprecated Use DialURL instead. +func DialTLS(network, addr string, config *tls.Config) (*Conn, error) { + c, err := tls.DialWithDialer(&net.Dialer{Timeout: DefaultTimeout}, network, addr, config) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + conn := NewConn(c, true) + conn.Start() + return conn, nil +} + +// DialURL connects to the given ldap URL. +// The following schemas are supported: ldap://, ldaps://, ldapi://. +// On success a new Conn for the connection is returned. +func DialURL(addr string, opts ...DialOpt) (*Conn, error) { + u, err := url.Parse(addr) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + + var dc DialContext + for _, opt := range opts { + opt(&dc) + } + if dc.d == nil { + dc.d = &net.Dialer{Timeout: DefaultTimeout} + } + + c, err := dc.dial(u) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + + conn := NewConn(c, u.Scheme == "ldaps") + conn.Start() + return conn, nil +} + +// NewConn returns a new Conn using conn for network I/O. +func NewConn(conn net.Conn, isTLS bool) *Conn { + return &Conn{ + conn: conn, + chanConfirm: make(chan struct{}), + chanMessageID: make(chan int64), + chanMessage: make(chan *messagePacket, 10), + messageContexts: map[int64]*messageContext{}, + requestTimeout: 0, + isTLS: isTLS, + } +} + +// Start initializes goroutines to read responses and process messages +func (l *Conn) Start() { + l.wgClose.Add(1) + go l.reader() + go l.processMessages() +} + +// IsClosing returns whether or not we're currently closing. +func (l *Conn) IsClosing() bool { + return atomic.LoadUint32(&l.closing) == 1 +} + +// setClosing sets the closing value to true +func (l *Conn) setClosing() bool { + return atomic.CompareAndSwapUint32(&l.closing, 0, 1) +} + +// Close closes the connection. +func (l *Conn) Close() { + l.messageMutex.Lock() + defer l.messageMutex.Unlock() + + if l.setClosing() { + l.Debug.Printf("Sending quit message and waiting for confirmation") + l.chanMessage <- &messagePacket{Op: MessageQuit} + <-l.chanConfirm + close(l.chanMessage) + + l.Debug.Printf("Closing network connection") + if err := l.conn.Close(); err != nil { + log.Println(err) + } + + l.wgClose.Done() + } + l.wgClose.Wait() +} + +// SetTimeout sets the time after a request is sent that a MessageTimeout triggers +func (l *Conn) SetTimeout(timeout time.Duration) { + if timeout > 0 { + atomic.StoreInt64(&l.requestTimeout, int64(timeout)) + } +} + +// Returns the next available messageID +func (l *Conn) nextMessageID() int64 { + if messageID, ok := <-l.chanMessageID; ok { + return messageID + } + return 0 +} + +// StartTLS sends the command to start a TLS session and then creates a new TLS Client +func (l *Conn) StartTLS(config *tls.Config) error { + if l.isTLS { + return NewError(ErrorNetwork, errors.New("ldap: already encrypted")) + } + + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS") + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command")) + packet.AppendChild(request) + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessageWithFlags(packet, startTLS) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + l.Close() + return err + } + l.Debug.PrintPacket(packet) + } + + if err := GetLDAPError(packet); err == nil { + conn := tls.Client(l.conn, config) + + if connErr := conn.Handshake(); connErr != nil { + l.Close() + return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", connErr)) + } + + l.isTLS = true + l.conn = conn + } else { + return err + } + go l.reader() + + return nil +} + +// TLSConnectionState returns the client's TLS connection state. +// The return values are their zero values if StartTLS did +// not succeed. +func (l *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) { + tc, ok := l.conn.(*tls.Conn) + if !ok { + return + } + return tc.ConnectionState(), true +} + +func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) { + return l.sendMessageWithFlags(packet, 0) +} + +func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) { + if l.IsClosing() { + return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed")) + } + l.messageMutex.Lock() + l.Debug.Printf("flags&startTLS = %d", flags&startTLS) + if l.isStartingTLS { + l.messageMutex.Unlock() + return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase")) + } + if flags&startTLS != 0 { + if l.outstandingRequests != 0 { + l.messageMutex.Unlock() + return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests")) + } + l.isStartingTLS = true + } + l.outstandingRequests++ + + l.messageMutex.Unlock() + + responses := make(chan *PacketResponse) + messageID := packet.Children[0].Value.(int64) + message := &messagePacket{ + Op: MessageRequest, + MessageID: messageID, + Packet: packet, + Context: &messageContext{ + id: messageID, + done: make(chan struct{}), + responses: responses, + }, + } + l.sendProcessMessage(message) + return message.Context, nil +} + +func (l *Conn) finishMessage(msgCtx *messageContext) { + close(msgCtx.done) + + if l.IsClosing() { + return + } + + l.messageMutex.Lock() + l.outstandingRequests-- + if l.isStartingTLS { + l.isStartingTLS = false + } + l.messageMutex.Unlock() + + message := &messagePacket{ + Op: MessageFinish, + MessageID: msgCtx.id, + } + l.sendProcessMessage(message) +} + +func (l *Conn) sendProcessMessage(message *messagePacket) bool { + l.messageMutex.Lock() + defer l.messageMutex.Unlock() + if l.IsClosing() { + return false + } + l.chanMessage <- message + return true +} + +func (l *Conn) processMessages() { + defer func() { + if err := recover(); err != nil { + log.Printf("ldap: recovered panic in processMessages: %v", err) + } + for messageID, msgCtx := range l.messageContexts { + // If we are closing due to an error, inform anyone who + // is waiting about the error. + if l.IsClosing() && l.closeErr.Load() != nil { + msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)}) + } + l.Debug.Printf("Closing channel for MessageID %d", messageID) + close(msgCtx.responses) + delete(l.messageContexts, messageID) + } + close(l.chanMessageID) + close(l.chanConfirm) + }() + + var messageID int64 = 1 + for { + select { + case l.chanMessageID <- messageID: + messageID++ + case message := <-l.chanMessage: + switch message.Op { + case MessageQuit: + l.Debug.Printf("Shutting down - quit message received") + return + case MessageRequest: + // Add to message list and write to network + l.Debug.Printf("Sending message %d", message.MessageID) + + buf := message.Packet.Bytes() + _, err := l.conn.Write(buf) + if err != nil { + l.Debug.Printf("Error Sending Message: %s", err.Error()) + message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)}) + close(message.Context.responses) + break + } + + // Only add to messageContexts if we were able to + // successfully write the message. + l.messageContexts[message.MessageID] = message.Context + + // Add timeout if defined + requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout)) + if requestTimeout > 0 { + go func() { + defer func() { + if err := recover(); err != nil { + log.Printf("ldap: recovered panic in RequestTimeout: %v", err) + } + }() + time.Sleep(requestTimeout) + timeoutMessage := &messagePacket{ + Op: MessageTimeout, + MessageID: message.MessageID, + } + l.sendProcessMessage(timeoutMessage) + }() + } + case MessageResponse: + l.Debug.Printf("Receiving message %d", message.MessageID) + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + msgCtx.sendResponse(&PacketResponse{message.Packet, nil}) + } else { + log.Printf("Received unexpected message %d, %v", message.MessageID, l.IsClosing()) + l.Debug.PrintPacket(message.Packet) + } + case MessageTimeout: + // Handle the timeout by closing the channel + // All reads will return immediately + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + l.Debug.Printf("Receiving message timeout for %d", message.MessageID) + msgCtx.sendResponse(&PacketResponse{message.Packet, errors.New("ldap: connection timed out")}) + delete(l.messageContexts, message.MessageID) + close(msgCtx.responses) + } + case MessageFinish: + l.Debug.Printf("Finished message %d", message.MessageID) + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + delete(l.messageContexts, message.MessageID) + close(msgCtx.responses) + } + } + } + } +} + +func (l *Conn) reader() { + cleanstop := false + defer func() { + if err := recover(); err != nil { + log.Printf("ldap: recovered panic in reader: %v", err) + } + if !cleanstop { + l.Close() + } + }() + + for { + if cleanstop { + l.Debug.Printf("reader clean stopping (without closing the connection)") + return + } + packet, err := ber.ReadPacket(l.conn) + if err != nil { + // A read error is expected here if we are closing the connection... + if !l.IsClosing() { + l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err)) + l.Debug.Printf("reader error: %s", err) + } + return + } + if err := addLDAPDescriptions(packet); err != nil { + l.Debug.Printf("descriptions error: %s", err) + } + if len(packet.Children) == 0 { + l.Debug.Printf("Received bad ldap packet") + continue + } + l.messageMutex.Lock() + if l.isStartingTLS { + cleanstop = true + } + l.messageMutex.Unlock() + message := &messagePacket{ + Op: MessageResponse, + MessageID: packet.Children[0].Value.(int64), + Packet: packet, + } + if !l.sendProcessMessage(message) { + return + } + } +} diff --git a/vendor/github.com/go-ldap/ldap/v3/control.go b/vendor/github.com/go-ldap/ldap/v3/control.go new file mode 100644 index 00000000..463fe3a3 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/control.go @@ -0,0 +1,499 @@ +package ldap + +import ( + "fmt" + "strconv" + + "github.com/go-asn1-ber/asn1-ber" +) + +const ( + // ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt + ControlTypePaging = "1.2.840.113556.1.4.319" + // ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10 + ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1" + // ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 + ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4" + // ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 + ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5" + // ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296 + ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2" + + // ControlTypeMicrosoftNotification - https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx + ControlTypeMicrosoftNotification = "1.2.840.113556.1.4.528" + // ControlTypeMicrosoftShowDeleted - https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx + ControlTypeMicrosoftShowDeleted = "1.2.840.113556.1.4.417" +) + +// ControlTypeMap maps controls to text descriptions +var ControlTypeMap = map[string]string{ + ControlTypePaging: "Paging", + ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft", + ControlTypeManageDsaIT: "Manage DSA IT", + ControlTypeMicrosoftNotification: "Change Notification - Microsoft", + ControlTypeMicrosoftShowDeleted: "Show Deleted Objects - Microsoft", +} + +// Control defines an interface controls provide to encode and describe themselves +type Control interface { + // GetControlType returns the OID + GetControlType() string + // Encode returns the ber packet representation + Encode() *ber.Packet + // String returns a human-readable description + String() string +} + +// ControlString implements the Control interface for simple controls +type ControlString struct { + ControlType string + Criticality bool + ControlValue string +} + +// GetControlType returns the OID +func (c *ControlString) GetControlType() string { + return c.ControlType +} + +// Encode returns the ber packet representation +func (c *ControlString) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")")) + if c.Criticality { + packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality")) + } + if c.ControlValue != "" { + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value")) + } + return packet +} + +// String returns a human-readable description +func (c *ControlString) String() string { + return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue) +} + +// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt +type ControlPaging struct { + // PagingSize indicates the page size + PagingSize uint32 + // Cookie is an opaque value returned by the server to track a paging cursor + Cookie []byte +} + +// GetControlType returns the OID +func (c *ControlPaging) GetControlType() string { + return ControlTypePaging +} + +// Encode returns the ber packet representation +func (c *ControlPaging) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")")) + + p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)") + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value") + seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size")) + cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie") + cookie.Value = c.Cookie + cookie.Data.Write(c.Cookie) + seq.AppendChild(cookie) + p2.AppendChild(seq) + + packet.AppendChild(p2) + return packet +} + +// String returns a human-readable description +func (c *ControlPaging) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q", + ControlTypeMap[ControlTypePaging], + ControlTypePaging, + false, + c.PagingSize, + c.Cookie) +} + +// SetCookie stores the given cookie in the paging control +func (c *ControlPaging) SetCookie(cookie []byte) { + c.Cookie = cookie +} + +// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10 +type ControlBeheraPasswordPolicy struct { + // Expire contains the number of seconds before a password will expire + Expire int64 + // Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password + Grace int64 + // Error indicates the error code + Error int8 + // ErrorString is a human readable error + ErrorString string +} + +// GetControlType returns the OID +func (c *ControlBeheraPasswordPolicy) GetControlType() string { + return ControlTypeBeheraPasswordPolicy +} + +// Encode returns the ber packet representation +func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlBeheraPasswordPolicy) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s", + ControlTypeMap[ControlTypeBeheraPasswordPolicy], + ControlTypeBeheraPasswordPolicy, + false, + c.Expire, + c.Grace, + c.Error, + c.ErrorString) +} + +// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 +type ControlVChuPasswordMustChange struct { + // MustChange indicates if the password is required to be changed + MustChange bool +} + +// GetControlType returns the OID +func (c *ControlVChuPasswordMustChange) GetControlType() string { + return ControlTypeVChuPasswordMustChange +} + +// Encode returns the ber packet representation +func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet { + return nil +} + +// String returns a human-readable description +func (c *ControlVChuPasswordMustChange) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t MustChange: %v", + ControlTypeMap[ControlTypeVChuPasswordMustChange], + ControlTypeVChuPasswordMustChange, + false, + c.MustChange) +} + +// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 +type ControlVChuPasswordWarning struct { + // Expire indicates the time in seconds until the password expires + Expire int64 +} + +// GetControlType returns the OID +func (c *ControlVChuPasswordWarning) GetControlType() string { + return ControlTypeVChuPasswordWarning +} + +// Encode returns the ber packet representation +func (c *ControlVChuPasswordWarning) Encode() *ber.Packet { + return nil +} + +// String returns a human-readable description +func (c *ControlVChuPasswordWarning) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t Expire: %b", + ControlTypeMap[ControlTypeVChuPasswordWarning], + ControlTypeVChuPasswordWarning, + false, + c.Expire) +} + +// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296 +type ControlManageDsaIT struct { + // Criticality indicates if this control is required + Criticality bool +} + +// GetControlType returns the OID +func (c *ControlManageDsaIT) GetControlType() string { + return ControlTypeManageDsaIT +} + +// Encode returns the ber packet representation +func (c *ControlManageDsaIT) Encode() *ber.Packet { + //FIXME + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")")) + if c.Criticality { + packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality")) + } + return packet +} + +// String returns a human-readable description +func (c *ControlManageDsaIT) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t", + ControlTypeMap[ControlTypeManageDsaIT], + ControlTypeManageDsaIT, + c.Criticality) +} + +// NewControlManageDsaIT returns a ControlManageDsaIT control +func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT { + return &ControlManageDsaIT{Criticality: Criticality} +} + +// ControlMicrosoftNotification implements the control described in https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx +type ControlMicrosoftNotification struct{} + +// GetControlType returns the OID +func (c *ControlMicrosoftNotification) GetControlType() string { + return ControlTypeMicrosoftNotification +} + +// Encode returns the ber packet representation +func (c *ControlMicrosoftNotification) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftNotification, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftNotification]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlMicrosoftNotification) String() string { + return fmt.Sprintf( + "Control Type: %s (%q)", + ControlTypeMap[ControlTypeMicrosoftNotification], + ControlTypeMicrosoftNotification) +} + +// NewControlMicrosoftNotification returns a ControlMicrosoftNotification control +func NewControlMicrosoftNotification() *ControlMicrosoftNotification { + return &ControlMicrosoftNotification{} +} + +// ControlMicrosoftShowDeleted implements the control described in https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx +type ControlMicrosoftShowDeleted struct{} + +// GetControlType returns the OID +func (c *ControlMicrosoftShowDeleted) GetControlType() string { + return ControlTypeMicrosoftShowDeleted +} + +// Encode returns the ber packet representation +func (c *ControlMicrosoftShowDeleted) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftShowDeleted, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftShowDeleted]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlMicrosoftShowDeleted) String() string { + return fmt.Sprintf( + "Control Type: %s (%q)", + ControlTypeMap[ControlTypeMicrosoftShowDeleted], + ControlTypeMicrosoftShowDeleted) +} + +// NewControlMicrosoftShowDeleted returns a ControlMicrosoftShowDeleted control +func NewControlMicrosoftShowDeleted() *ControlMicrosoftShowDeleted { + return &ControlMicrosoftShowDeleted{} +} + +// FindControl returns the first control of the given type in the list, or nil +func FindControl(controls []Control, controlType string) Control { + for _, c := range controls { + if c.GetControlType() == controlType { + return c + } + } + return nil +} + +// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made +func DecodeControl(packet *ber.Packet) (Control, error) { + var ( + ControlType = "" + Criticality = false + value *ber.Packet + ) + + switch len(packet.Children) { + case 0: + // at least one child is required for control type + return nil, fmt.Errorf("at least one child is required for control type") + + case 1: + // just type, no criticality or value + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + case 2: + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + // Children[1] could be criticality or value (both are optional) + // duck-type on whether this is a boolean + if _, ok := packet.Children[1].Value.(bool); ok { + packet.Children[1].Description = "Criticality" + Criticality = packet.Children[1].Value.(bool) + } else { + packet.Children[1].Description = "Control Value" + value = packet.Children[1] + } + + case 3: + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + packet.Children[1].Description = "Criticality" + Criticality = packet.Children[1].Value.(bool) + + packet.Children[2].Description = "Control Value" + value = packet.Children[2] + + default: + // more than 3 children is invalid + return nil, fmt.Errorf("more than 3 children is invalid for controls") + } + + switch ControlType { + case ControlTypeManageDsaIT: + return NewControlManageDsaIT(Criticality), nil + case ControlTypePaging: + value.Description += " (Paging)" + c := new(ControlPaging) + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + value = value.Children[0] + value.Description = "Search Control Value" + value.Children[0].Description = "Paging Size" + value.Children[1].Description = "Cookie" + c.PagingSize = uint32(value.Children[0].Value.(int64)) + c.Cookie = value.Children[1].Data.Bytes() + value.Children[1].Value = c.Cookie + return c, nil + case ControlTypeBeheraPasswordPolicy: + value.Description += " (Password Policy - Behera)" + c := NewControlBeheraPasswordPolicy() + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + + sequence := value.Children[0] + + for _, child := range sequence.Children { + if child.Tag == 0 { + //Warning + warningPacket := child.Children[0] + packet, err := ber.DecodePacketErr(warningPacket.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + val, ok := packet.Value.(int64) + if ok { + if warningPacket.Tag == 0 { + //timeBeforeExpiration + c.Expire = val + warningPacket.Value = c.Expire + } else if warningPacket.Tag == 1 { + //graceAuthNsRemaining + c.Grace = val + warningPacket.Value = c.Grace + } + } + } else if child.Tag == 1 { + // Error + packet, err := ber.DecodePacketErr(child.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + val, ok := packet.Value.(int8) + if !ok { + // what to do? + val = -1 + } + c.Error = val + child.Value = c.Error + c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error] + } + } + return c, nil + case ControlTypeVChuPasswordMustChange: + c := &ControlVChuPasswordMustChange{MustChange: true} + return c, nil + case ControlTypeVChuPasswordWarning: + c := &ControlVChuPasswordWarning{Expire: -1} + expireStr := ber.DecodeString(value.Data.Bytes()) + + expire, err := strconv.ParseInt(expireStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse value as int: %s", err) + } + c.Expire = expire + value.Value = c.Expire + + return c, nil + case ControlTypeMicrosoftNotification: + return NewControlMicrosoftNotification(), nil + case ControlTypeMicrosoftShowDeleted: + return NewControlMicrosoftShowDeleted(), nil + default: + c := new(ControlString) + c.ControlType = ControlType + c.Criticality = Criticality + if value != nil { + c.ControlValue = value.Value.(string) + } + return c, nil + } +} + +// NewControlString returns a generic control +func NewControlString(controlType string, criticality bool, controlValue string) *ControlString { + return &ControlString{ + ControlType: controlType, + Criticality: criticality, + ControlValue: controlValue, + } +} + +// NewControlPaging returns a paging control +func NewControlPaging(pagingSize uint32) *ControlPaging { + return &ControlPaging{PagingSize: pagingSize} +} + +// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy +func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy { + return &ControlBeheraPasswordPolicy{ + Expire: -1, + Grace: -1, + Error: -1, + } +} + +func encodeControls(controls []Control) *ber.Packet { + packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls") + for _, control := range controls { + packet.AppendChild(control.Encode()) + } + return packet +} diff --git a/vendor/github.com/go-ldap/ldap/v3/debug.go b/vendor/github.com/go-ldap/ldap/v3/debug.go new file mode 100644 index 00000000..2c0b30c8 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/debug.go @@ -0,0 +1,30 @@ +package ldap + +import ( + "log" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// debugging type +// - has a Printf method to write the debug output +type debugging bool + +// Enable controls debugging mode. +func (debug *debugging) Enable(b bool) { + *debug = debugging(b) +} + +// Printf writes debug output. +func (debug debugging) Printf(format string, args ...interface{}) { + if debug { + log.Printf(format, args...) + } +} + +// PrintPacket dumps a packet. +func (debug debugging) PrintPacket(packet *ber.Packet) { + if debug { + ber.PrintPacket(packet) + } +} diff --git a/vendor/github.com/go-ldap/ldap/v3/del.go b/vendor/github.com/go-ldap/ldap/v3/del.go new file mode 100644 index 00000000..6e987267 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/del.go @@ -0,0 +1,59 @@ +package ldap + +import ( + "log" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// DelRequest implements an LDAP deletion request +type DelRequest struct { + // DN is the name of the directory entry to delete + DN string + // Controls hold optional controls to send with the request + Controls []Control +} + +func (req *DelRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, req.DN, "Del Request") + pkt.Data.Write([]byte(req.DN)) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil +} + +// NewDelRequest creates a delete request for the given DN and controls +func NewDelRequest(DN string, Controls []Control) *DelRequest { + return &DelRequest{ + DN: DN, + Controls: Controls, + } +} + +// Del executes the given delete request +func (l *Conn) Del(delRequest *DelRequest) error { + msgCtx, err := l.doRequest(delRequest) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return err + } + + if packet.Children[1].Tag == ApplicationDelResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/dn.go b/vendor/github.com/go-ldap/ldap/v3/dn.go new file mode 100644 index 00000000..bff137cc --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/dn.go @@ -0,0 +1,207 @@ +package ldap + +import ( + "bytes" + enchex "encoding/hex" + "errors" + "fmt" + "strings" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514 +type AttributeTypeAndValue struct { + // Type is the attribute type + Type string + // Value is the attribute value + Value string +} + +// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514 +type RelativeDN struct { + Attributes []*AttributeTypeAndValue +} + +// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514 +type DN struct { + RDNs []*RelativeDN +} + +// ParseDN returns a distinguishedName or an error. +// The function respects https://tools.ietf.org/html/rfc4514 +func ParseDN(str string) (*DN, error) { + dn := new(DN) + dn.RDNs = make([]*RelativeDN, 0) + rdn := new(RelativeDN) + rdn.Attributes = make([]*AttributeTypeAndValue, 0) + buffer := bytes.Buffer{} + attribute := new(AttributeTypeAndValue) + escaping := false + + unescapedTrailingSpaces := 0 + stringFromBuffer := func() string { + s := buffer.String() + s = s[0 : len(s)-unescapedTrailingSpaces] + buffer.Reset() + unescapedTrailingSpaces = 0 + return s + } + + for i := 0; i < len(str); i++ { + char := str[i] + switch { + case escaping: + unescapedTrailingSpaces = 0 + escaping = false + switch char { + case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\': + buffer.WriteByte(char) + continue + } + // Not a special character, assume hex encoded octet + if len(str) == i+1 { + return nil, errors.New("got corrupted escaped character") + } + + dst := []byte{0} + n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2])) + if err != nil { + return nil, fmt.Errorf("failed to decode escaped character: %s", err) + } else if n != 1 { + return nil, fmt.Errorf("expected 1 byte when un-escaping, got %d", n) + } + buffer.WriteByte(dst[0]) + i++ + case char == '\\': + unescapedTrailingSpaces = 0 + escaping = true + case char == '=': + attribute.Type = stringFromBuffer() + // Special case: If the first character in the value is # the + // following data is BER encoded so we can just fast forward + // and decode. + if len(str) > i+1 && str[i+1] == '#' { + i += 2 + index := strings.IndexAny(str[i:], ",+") + data := str + if index > 0 { + data = str[i : i+index] + } else { + data = str[i:] + } + rawBER, err := enchex.DecodeString(data) + if err != nil { + return nil, fmt.Errorf("failed to decode BER encoding: %s", err) + } + packet, err := ber.DecodePacketErr(rawBER) + if err != nil { + return nil, fmt.Errorf("failed to decode BER packet: %s", err) + } + buffer.WriteString(packet.Data.String()) + i += len(data) - 1 + } + case char == ',' || char == '+': + // We're done with this RDN or value, push it + if len(attribute.Type) == 0 { + return nil, errors.New("incomplete type, value pair") + } + attribute.Value = stringFromBuffer() + rdn.Attributes = append(rdn.Attributes, attribute) + attribute = new(AttributeTypeAndValue) + if char == ',' { + dn.RDNs = append(dn.RDNs, rdn) + rdn = new(RelativeDN) + rdn.Attributes = make([]*AttributeTypeAndValue, 0) + } + case char == ' ' && buffer.Len() == 0: + // ignore unescaped leading spaces + continue + default: + if char == ' ' { + // Track unescaped spaces in case they are trailing and we need to remove them + unescapedTrailingSpaces++ + } else { + // Reset if we see a non-space char + unescapedTrailingSpaces = 0 + } + buffer.WriteByte(char) + } + } + if buffer.Len() > 0 { + if len(attribute.Type) == 0 { + return nil, errors.New("DN ended with incomplete type, value pair") + } + attribute.Value = stringFromBuffer() + rdn.Attributes = append(rdn.Attributes, attribute) + dn.RDNs = append(dn.RDNs, rdn) + } + return dn, nil +} + +// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch). +// Returns true if they have the same number of relative distinguished names +// and corresponding relative distinguished names (by position) are the same. +func (d *DN) Equal(other *DN) bool { + if len(d.RDNs) != len(other.RDNs) { + return false + } + for i := range d.RDNs { + if !d.RDNs[i].Equal(other.RDNs[i]) { + return false + } + } + return true +} + +// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN. +// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com" +// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com" +// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com" +func (d *DN) AncestorOf(other *DN) bool { + if len(d.RDNs) >= len(other.RDNs) { + return false + } + // Take the last `len(d.RDNs)` RDNs from the other DN to compare against + otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):] + for i := range d.RDNs { + if !d.RDNs[i].Equal(otherRDNs[i]) { + return false + } + } + return true +} + +// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch). +// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues +// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type. +// The order of attributes is not significant. +// Case of attribute types is not significant. +func (r *RelativeDN) Equal(other *RelativeDN) bool { + if len(r.Attributes) != len(other.Attributes) { + return false + } + return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes) +} + +func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool { + for _, attr := range attrs { + found := false + for _, myattr := range r.Attributes { + if myattr.Equal(attr) { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue +// Case of the attribute type is not significant +func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool { + return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value +} diff --git a/vendor/github.com/go-ldap/ldap/v3/doc.go b/vendor/github.com/go-ldap/ldap/v3/doc.go new file mode 100644 index 00000000..f20d39bc --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/doc.go @@ -0,0 +1,4 @@ +/* +Package ldap provides basic LDAP v3 functionality. +*/ +package ldap diff --git a/vendor/github.com/go-ldap/ldap/v3/error.go b/vendor/github.com/go-ldap/ldap/v3/error.go new file mode 100644 index 00000000..b1fda2d8 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/error.go @@ -0,0 +1,236 @@ +package ldap + +import ( + "fmt" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// LDAP Result Codes +const ( + LDAPResultSuccess = 0 + LDAPResultOperationsError = 1 + LDAPResultProtocolError = 2 + LDAPResultTimeLimitExceeded = 3 + LDAPResultSizeLimitExceeded = 4 + LDAPResultCompareFalse = 5 + LDAPResultCompareTrue = 6 + LDAPResultAuthMethodNotSupported = 7 + LDAPResultStrongAuthRequired = 8 + LDAPResultReferral = 10 + LDAPResultAdminLimitExceeded = 11 + LDAPResultUnavailableCriticalExtension = 12 + LDAPResultConfidentialityRequired = 13 + LDAPResultSaslBindInProgress = 14 + LDAPResultNoSuchAttribute = 16 + LDAPResultUndefinedAttributeType = 17 + LDAPResultInappropriateMatching = 18 + LDAPResultConstraintViolation = 19 + LDAPResultAttributeOrValueExists = 20 + LDAPResultInvalidAttributeSyntax = 21 + LDAPResultNoSuchObject = 32 + LDAPResultAliasProblem = 33 + LDAPResultInvalidDNSyntax = 34 + LDAPResultIsLeaf = 35 + LDAPResultAliasDereferencingProblem = 36 + LDAPResultInappropriateAuthentication = 48 + LDAPResultInvalidCredentials = 49 + LDAPResultInsufficientAccessRights = 50 + LDAPResultBusy = 51 + LDAPResultUnavailable = 52 + LDAPResultUnwillingToPerform = 53 + LDAPResultLoopDetect = 54 + LDAPResultSortControlMissing = 60 + LDAPResultOffsetRangeError = 61 + LDAPResultNamingViolation = 64 + LDAPResultObjectClassViolation = 65 + LDAPResultNotAllowedOnNonLeaf = 66 + LDAPResultNotAllowedOnRDN = 67 + LDAPResultEntryAlreadyExists = 68 + LDAPResultObjectClassModsProhibited = 69 + LDAPResultResultsTooLarge = 70 + LDAPResultAffectsMultipleDSAs = 71 + LDAPResultVirtualListViewErrorOrControlError = 76 + LDAPResultOther = 80 + LDAPResultServerDown = 81 + LDAPResultLocalError = 82 + LDAPResultEncodingError = 83 + LDAPResultDecodingError = 84 + LDAPResultTimeout = 85 + LDAPResultAuthUnknown = 86 + LDAPResultFilterError = 87 + LDAPResultUserCanceled = 88 + LDAPResultParamError = 89 + LDAPResultNoMemory = 90 + LDAPResultConnectError = 91 + LDAPResultNotSupported = 92 + LDAPResultControlNotFound = 93 + LDAPResultNoResultsReturned = 94 + LDAPResultMoreResultsToReturn = 95 + LDAPResultClientLoop = 96 + LDAPResultReferralLimitExceeded = 97 + LDAPResultInvalidResponse = 100 + LDAPResultAmbiguousResponse = 101 + LDAPResultTLSNotSupported = 112 + LDAPResultIntermediateResponse = 113 + LDAPResultUnknownType = 114 + LDAPResultCanceled = 118 + LDAPResultNoSuchOperation = 119 + LDAPResultTooLate = 120 + LDAPResultCannotCancel = 121 + LDAPResultAssertionFailed = 122 + LDAPResultAuthorizationDenied = 123 + LDAPResultSyncRefreshRequired = 4096 + + ErrorNetwork = 200 + ErrorFilterCompile = 201 + ErrorFilterDecompile = 202 + ErrorDebugging = 203 + ErrorUnexpectedMessage = 204 + ErrorUnexpectedResponse = 205 + ErrorEmptyPassword = 206 +) + +// LDAPResultCodeMap contains string descriptions for LDAP error codes +var LDAPResultCodeMap = map[uint16]string{ + LDAPResultSuccess: "Success", + LDAPResultOperationsError: "Operations Error", + LDAPResultProtocolError: "Protocol Error", + LDAPResultTimeLimitExceeded: "Time Limit Exceeded", + LDAPResultSizeLimitExceeded: "Size Limit Exceeded", + LDAPResultCompareFalse: "Compare False", + LDAPResultCompareTrue: "Compare True", + LDAPResultAuthMethodNotSupported: "Auth Method Not Supported", + LDAPResultStrongAuthRequired: "Strong Auth Required", + LDAPResultReferral: "Referral", + LDAPResultAdminLimitExceeded: "Admin Limit Exceeded", + LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension", + LDAPResultConfidentialityRequired: "Confidentiality Required", + LDAPResultSaslBindInProgress: "Sasl Bind In Progress", + LDAPResultNoSuchAttribute: "No Such Attribute", + LDAPResultUndefinedAttributeType: "Undefined Attribute Type", + LDAPResultInappropriateMatching: "Inappropriate Matching", + LDAPResultConstraintViolation: "Constraint Violation", + LDAPResultAttributeOrValueExists: "Attribute Or Value Exists", + LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax", + LDAPResultNoSuchObject: "No Such Object", + LDAPResultAliasProblem: "Alias Problem", + LDAPResultInvalidDNSyntax: "Invalid DN Syntax", + LDAPResultIsLeaf: "Is Leaf", + LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem", + LDAPResultInappropriateAuthentication: "Inappropriate Authentication", + LDAPResultInvalidCredentials: "Invalid Credentials", + LDAPResultInsufficientAccessRights: "Insufficient Access Rights", + LDAPResultBusy: "Busy", + LDAPResultUnavailable: "Unavailable", + LDAPResultUnwillingToPerform: "Unwilling To Perform", + LDAPResultLoopDetect: "Loop Detect", + LDAPResultSortControlMissing: "Sort Control Missing", + LDAPResultOffsetRangeError: "Result Offset Range Error", + LDAPResultNamingViolation: "Naming Violation", + LDAPResultObjectClassViolation: "Object Class Violation", + LDAPResultResultsTooLarge: "Results Too Large", + LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf", + LDAPResultNotAllowedOnRDN: "Not Allowed On RDN", + LDAPResultEntryAlreadyExists: "Entry Already Exists", + LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited", + LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs", + LDAPResultVirtualListViewErrorOrControlError: "Failed because of a problem related to the virtual list view", + LDAPResultOther: "Other", + LDAPResultServerDown: "Cannot establish a connection", + LDAPResultLocalError: "An error occurred", + LDAPResultEncodingError: "LDAP encountered an error while encoding", + LDAPResultDecodingError: "LDAP encountered an error while decoding", + LDAPResultTimeout: "LDAP timeout while waiting for a response from the server", + LDAPResultAuthUnknown: "The auth method requested in a bind request is unknown", + LDAPResultFilterError: "An error occurred while encoding the given search filter", + LDAPResultUserCanceled: "The user canceled the operation", + LDAPResultParamError: "An invalid parameter was specified", + LDAPResultNoMemory: "Out of memory error", + LDAPResultConnectError: "A connection to the server could not be established", + LDAPResultNotSupported: "An attempt has been made to use a feature not supported LDAP", + LDAPResultControlNotFound: "The controls required to perform the requested operation were not found", + LDAPResultNoResultsReturned: "No results were returned from the server", + LDAPResultMoreResultsToReturn: "There are more results in the chain of results", + LDAPResultClientLoop: "A loop has been detected. For example when following referrals", + LDAPResultReferralLimitExceeded: "The referral hop limit has been exceeded", + LDAPResultCanceled: "Operation was canceled", + LDAPResultNoSuchOperation: "Server has no knowledge of the operation requested for cancellation", + LDAPResultTooLate: "Too late to cancel the outstanding operation", + LDAPResultCannotCancel: "The identified operation does not support cancellation or the cancel operation cannot be performed", + LDAPResultAssertionFailed: "An assertion control given in the LDAP operation evaluated to false causing the operation to not be performed", + LDAPResultSyncRefreshRequired: "Refresh Required", + LDAPResultInvalidResponse: "Invalid Response", + LDAPResultAmbiguousResponse: "Ambiguous Response", + LDAPResultTLSNotSupported: "Tls Not Supported", + LDAPResultIntermediateResponse: "Intermediate Response", + LDAPResultUnknownType: "Unknown Type", + LDAPResultAuthorizationDenied: "Authorization Denied", + + ErrorNetwork: "Network Error", + ErrorFilterCompile: "Filter Compile Error", + ErrorFilterDecompile: "Filter Decompile Error", + ErrorDebugging: "Debugging Error", + ErrorUnexpectedMessage: "Unexpected Message", + ErrorUnexpectedResponse: "Unexpected Response", + ErrorEmptyPassword: "Empty password not allowed by the client", +} + +// Error holds LDAP error information +type Error struct { + // Err is the underlying error + Err error + // ResultCode is the LDAP error code + ResultCode uint16 + // MatchedDN is the matchedDN returned if any + MatchedDN string +} + +func (e *Error) Error() string { + return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error()) +} + +// GetLDAPError creates an Error out of a BER packet representing a LDAPResult +// The return is an error object. It can be casted to a Error structure. +// This function returns nil if resultCode in the LDAPResult sequence is success(0). +func GetLDAPError(packet *ber.Packet) error { + if packet == nil { + return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty packet")} + } + + if len(packet.Children) >= 2 { + response := packet.Children[1] + if response == nil { + return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty response in packet")} + } + if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 { + resultCode := uint16(response.Children[0].Value.(int64)) + if resultCode == 0 { // No error + return nil + } + return &Error{ResultCode: resultCode, MatchedDN: response.Children[1].Value.(string), + Err: fmt.Errorf("%s", response.Children[2].Value.(string))} + } + } + + return &Error{ResultCode: ErrorNetwork, Err: fmt.Errorf("Invalid packet format")} +} + +// NewError creates an LDAP error with the given code and underlying error +func NewError(resultCode uint16, err error) error { + return &Error{ResultCode: resultCode, Err: err} +} + +// IsErrorWithCode returns true if the given error is an LDAP error with the given result code +func IsErrorWithCode(err error, desiredResultCode uint16) bool { + if err == nil { + return false + } + + serverError, ok := err.(*Error) + if !ok { + return false + } + + return serverError.ResultCode == desiredResultCode +} diff --git a/vendor/github.com/go-ldap/ldap/v3/filter.go b/vendor/github.com/go-ldap/ldap/v3/filter.go new file mode 100644 index 00000000..73505e79 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/filter.go @@ -0,0 +1,487 @@ +package ldap + +import ( + "bytes" + hexpac "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "unicode" + "unicode/utf8" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// Filter choices +const ( + FilterAnd = 0 + FilterOr = 1 + FilterNot = 2 + FilterEqualityMatch = 3 + FilterSubstrings = 4 + FilterGreaterOrEqual = 5 + FilterLessOrEqual = 6 + FilterPresent = 7 + FilterApproxMatch = 8 + FilterExtensibleMatch = 9 +) + +// FilterMap contains human readable descriptions of Filter choices +var FilterMap = map[uint64]string{ + FilterAnd: "And", + FilterOr: "Or", + FilterNot: "Not", + FilterEqualityMatch: "Equality Match", + FilterSubstrings: "Substrings", + FilterGreaterOrEqual: "Greater Or Equal", + FilterLessOrEqual: "Less Or Equal", + FilterPresent: "Present", + FilterApproxMatch: "Approx Match", + FilterExtensibleMatch: "Extensible Match", +} + +// SubstringFilter options +const ( + FilterSubstringsInitial = 0 + FilterSubstringsAny = 1 + FilterSubstringsFinal = 2 +) + +// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices +var FilterSubstringsMap = map[uint64]string{ + FilterSubstringsInitial: "Substrings Initial", + FilterSubstringsAny: "Substrings Any", + FilterSubstringsFinal: "Substrings Final", +} + +// MatchingRuleAssertion choices +const ( + MatchingRuleAssertionMatchingRule = 1 + MatchingRuleAssertionType = 2 + MatchingRuleAssertionMatchValue = 3 + MatchingRuleAssertionDNAttributes = 4 +) + +// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices +var MatchingRuleAssertionMap = map[uint64]string{ + MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule", + MatchingRuleAssertionType: "Matching Rule Assertion Type", + MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value", + MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes", +} + +var _SymbolAny = []byte{'*'} + +// CompileFilter converts a string representation of a filter into a BER-encoded packet +func CompileFilter(filter string) (*ber.Packet, error) { + if len(filter) == 0 || filter[0] != '(' { + return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('")) + } + packet, pos, err := compileFilter(filter, 1) + if err != nil { + return nil, err + } + switch { + case pos > len(filter): + return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + case pos < len(filter): + return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:]))) + } + return packet, nil +} + +// DecompileFilter converts a packet representation of a filter into a string representation +func DecompileFilter(packet *ber.Packet) (_ string, err error) { + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter")) + } + }() + + buf := bytes.NewBuffer(nil) + buf.WriteByte('(') + childStr := "" + + switch packet.Tag { + case FilterAnd: + buf.WriteByte('&') + for _, child := range packet.Children { + childStr, err = DecompileFilter(child) + if err != nil { + return + } + buf.WriteString(childStr) + } + case FilterOr: + buf.WriteByte('|') + for _, child := range packet.Children { + childStr, err = DecompileFilter(child) + if err != nil { + return + } + buf.WriteString(childStr) + } + case FilterNot: + buf.WriteByte('!') + childStr, err = DecompileFilter(packet.Children[0]) + if err != nil { + return + } + buf.WriteString(childStr) + + case FilterSubstrings: + buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes())) + buf.WriteByte('=') + for i, child := range packet.Children[1].Children { + if i == 0 && child.Tag != FilterSubstringsInitial { + buf.Write(_SymbolAny) + } + buf.WriteString(EscapeFilter(ber.DecodeString(child.Data.Bytes()))) + if child.Tag != FilterSubstringsFinal { + buf.Write(_SymbolAny) + } + } + case FilterEqualityMatch: + buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes())) + buf.WriteByte('=') + buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))) + case FilterGreaterOrEqual: + buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes())) + buf.WriteString(">=") + buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))) + case FilterLessOrEqual: + buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes())) + buf.WriteString("<=") + buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))) + case FilterPresent: + buf.WriteString(ber.DecodeString(packet.Data.Bytes())) + buf.WriteString("=*") + case FilterApproxMatch: + buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes())) + buf.WriteString("~=") + buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))) + case FilterExtensibleMatch: + attr := "" + dnAttributes := false + matchingRule := "" + value := "" + + for _, child := range packet.Children { + switch child.Tag { + case MatchingRuleAssertionMatchingRule: + matchingRule = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionType: + attr = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionMatchValue: + value = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionDNAttributes: + dnAttributes = child.Value.(bool) + } + } + + if len(attr) > 0 { + buf.WriteString(attr) + } + if dnAttributes { + buf.WriteString(":dn") + } + if len(matchingRule) > 0 { + buf.WriteString(":") + buf.WriteString(matchingRule) + } + buf.WriteString(":=") + buf.WriteString(EscapeFilter(value)) + } + + buf.WriteByte(')') + + return buf.String(), nil +} + +func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) { + for pos < len(filter) && filter[pos] == '(' { + child, newPos, err := compileFilter(filter, pos+1) + if err != nil { + return pos, err + } + pos = newPos + parent.AppendChild(child) + } + if pos == len(filter) { + return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + } + + return pos + 1, nil +} + +func compileFilter(filter string, pos int) (*ber.Packet, int, error) { + var ( + packet *ber.Packet + err error + ) + + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter")) + } + }() + newPos := pos + + currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:]) + + switch currentRune { + case utf8.RuneError: + return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos)) + case '(': + packet, newPos, err = compileFilter(filter, pos+currentWidth) + newPos++ + return packet, newPos, err + case '&': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd]) + newPos, err = compileFilterSet(filter, pos+currentWidth, packet) + return packet, newPos, err + case '|': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr]) + newPos, err = compileFilterSet(filter, pos+currentWidth, packet) + return packet, newPos, err + case '!': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot]) + var child *ber.Packet + child, newPos, err = compileFilter(filter, pos+currentWidth) + packet.AppendChild(child) + return packet, newPos, err + default: + const ( + stateReadingAttr = 0 + stateReadingExtensibleMatchingRule = 1 + stateReadingCondition = 2 + ) + + state := stateReadingAttr + attribute := bytes.NewBuffer(nil) + extensibleDNAttributes := false + extensibleMatchingRule := bytes.NewBuffer(nil) + condition := bytes.NewBuffer(nil) + + for newPos < len(filter) { + remainingFilter := filter[newPos:] + currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter) + if currentRune == ')' { + break + } + if currentRune == utf8.RuneError { + return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos)) + } + + switch state { + case stateReadingAttr: + switch { + // Extensible rule, with only DN-matching + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + extensibleDNAttributes = true + state = stateReadingCondition + newPos += 5 + + // Extensible rule, with DN-matching and a matching OID + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + extensibleDNAttributes = true + state = stateReadingExtensibleMatchingRule + newPos += 4 + + // Extensible rule, with attr only + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + state = stateReadingCondition + newPos += 2 + + // Extensible rule, with no DN attribute matching + case currentRune == ':': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + state = stateReadingExtensibleMatchingRule + newPos++ + + // Equality condition + case currentRune == '=': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch]) + state = stateReadingCondition + newPos++ + + // Greater-than or equal + case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual]) + state = stateReadingCondition + newPos += 2 + + // Less-than or equal + case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual]) + state = stateReadingCondition + newPos += 2 + + // Approx + case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch]) + state = stateReadingCondition + newPos += 2 + + // Still reading the attribute name + default: + attribute.WriteRune(currentRune) + newPos += currentWidth + } + + case stateReadingExtensibleMatchingRule: + switch { + + // Matching rule OID is done + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="): + state = stateReadingCondition + newPos += 2 + + // Still reading the matching rule oid + default: + extensibleMatchingRule.WriteRune(currentRune) + newPos += currentWidth + } + + case stateReadingCondition: + // append to the condition + condition.WriteRune(currentRune) + newPos += currentWidth + } + } + + if newPos == len(filter) { + err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + return packet, newPos, err + } + if packet == nil { + err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter")) + return packet, newPos, err + } + + switch { + case packet.Tag == FilterExtensibleMatch: + // MatchingRuleAssertion ::= SEQUENCE { + // matchingRule [1] MatchingRuleID OPTIONAL, + // type [2] AttributeDescription OPTIONAL, + // matchValue [3] AssertionValue, + // dnAttributes [4] BOOLEAN DEFAULT FALSE + // } + + // Include the matching rule oid, if specified + if extensibleMatchingRule.Len() > 0 { + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule.String(), MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule])) + } + + // Include the attribute, if specified + if attribute.Len() > 0 { + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute.String(), MatchingRuleAssertionMap[MatchingRuleAssertionType])) + } + + // Add the value (only required child) + encodedString, encodeErr := decodeEscapedSymbols(condition.Bytes()) + if encodeErr != nil { + return packet, newPos, encodeErr + } + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue])) + + // Defaults to false, so only include in the sequence if true + if extensibleDNAttributes { + packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes])) + } + + case packet.Tag == FilterEqualityMatch && bytes.Equal(condition.Bytes(), _SymbolAny): + packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute.String(), FilterMap[FilterPresent]) + case packet.Tag == FilterEqualityMatch && bytes.Index(condition.Bytes(), _SymbolAny) > -1: + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute.String(), "Attribute")) + packet.Tag = FilterSubstrings + packet.Description = FilterMap[uint64(packet.Tag)] + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings") + parts := bytes.Split(condition.Bytes(), _SymbolAny) + for i, part := range parts { + if len(part) == 0 { + continue + } + var tag ber.Tag + switch i { + case 0: + tag = FilterSubstringsInitial + case len(parts) - 1: + tag = FilterSubstringsFinal + default: + tag = FilterSubstringsAny + } + encodedString, encodeErr := decodeEscapedSymbols(part) + if encodeErr != nil { + return packet, newPos, encodeErr + } + seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)])) + } + packet.AppendChild(seq) + default: + encodedString, encodeErr := decodeEscapedSymbols(condition.Bytes()) + if encodeErr != nil { + return packet, newPos, encodeErr + } + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute.String(), "Attribute")) + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition")) + } + + newPos += currentWidth + return packet, newPos, err + } +} + +// Convert from "ABC\xx\xx\xx" form to literal bytes for transport +func decodeEscapedSymbols(src []byte) (string, error) { + + var ( + buffer bytes.Buffer + offset int + reader = bytes.NewReader(src) + byteHex []byte + byteVal []byte + ) + + for { + runeVal, runeSize, err := reader.ReadRune() + if err == io.EOF { + return buffer.String(), nil + } else if err != nil { + return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: failed to read filter: %v", err)) + } else if runeVal == unicode.ReplacementChar { + return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", offset)) + } + + if runeVal == '\\' { + // http://tools.ietf.org/search/rfc4515 + // \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not + // being a member of UTF1SUBSET. + if byteHex == nil { + byteHex = make([]byte, 2) + byteVal = make([]byte, 1) + } + + if _, err := io.ReadFull(reader, byteHex); err != nil { + if err == io.ErrUnexpectedEOF { + return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter")) + } + return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: invalid characters for escape in filter: %v", err)) + } + + if _, err := hexpac.Decode(byteVal, byteHex); err != nil { + return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: invalid characters for escape in filter: %v", err)) + } + + buffer.Write(byteVal) + } else { + buffer.WriteRune(runeVal) + } + + offset += runeSize + } +} diff --git a/vendor/github.com/go-ldap/ldap/v3/go.mod b/vendor/github.com/go-ldap/ldap/v3/go.mod new file mode 100644 index 00000000..9816ce0e --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/go.mod @@ -0,0 +1,5 @@ +module github.com/go-ldap/ldap/v3 + +go 1.13 + +require github.com/go-asn1-ber/asn1-ber v1.3.1 diff --git a/vendor/github.com/go-ldap/ldap/v3/go.sum b/vendor/github.com/go-ldap/ldap/v3/go.sum new file mode 100644 index 00000000..c8b9085b --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/go.sum @@ -0,0 +1,2 @@ +github.com/go-asn1-ber/asn1-ber v1.3.1 h1:gvPdv/Hr++TRFCl0UbPFHC54P9N9jgsRPnmnr419Uck= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= diff --git a/vendor/github.com/go-ldap/ldap/v3/ldap.go b/vendor/github.com/go-ldap/ldap/v3/ldap.go new file mode 100644 index 00000000..b29c018a --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/ldap.go @@ -0,0 +1,340 @@ +package ldap + +import ( + "fmt" + "io/ioutil" + "os" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// LDAP Application Codes +const ( + ApplicationBindRequest = 0 + ApplicationBindResponse = 1 + ApplicationUnbindRequest = 2 + ApplicationSearchRequest = 3 + ApplicationSearchResultEntry = 4 + ApplicationSearchResultDone = 5 + ApplicationModifyRequest = 6 + ApplicationModifyResponse = 7 + ApplicationAddRequest = 8 + ApplicationAddResponse = 9 + ApplicationDelRequest = 10 + ApplicationDelResponse = 11 + ApplicationModifyDNRequest = 12 + ApplicationModifyDNResponse = 13 + ApplicationCompareRequest = 14 + ApplicationCompareResponse = 15 + ApplicationAbandonRequest = 16 + ApplicationSearchResultReference = 19 + ApplicationExtendedRequest = 23 + ApplicationExtendedResponse = 24 +) + +// ApplicationMap contains human readable descriptions of LDAP Application Codes +var ApplicationMap = map[uint8]string{ + ApplicationBindRequest: "Bind Request", + ApplicationBindResponse: "Bind Response", + ApplicationUnbindRequest: "Unbind Request", + ApplicationSearchRequest: "Search Request", + ApplicationSearchResultEntry: "Search Result Entry", + ApplicationSearchResultDone: "Search Result Done", + ApplicationModifyRequest: "Modify Request", + ApplicationModifyResponse: "Modify Response", + ApplicationAddRequest: "Add Request", + ApplicationAddResponse: "Add Response", + ApplicationDelRequest: "Del Request", + ApplicationDelResponse: "Del Response", + ApplicationModifyDNRequest: "Modify DN Request", + ApplicationModifyDNResponse: "Modify DN Response", + ApplicationCompareRequest: "Compare Request", + ApplicationCompareResponse: "Compare Response", + ApplicationAbandonRequest: "Abandon Request", + ApplicationSearchResultReference: "Search Result Reference", + ApplicationExtendedRequest: "Extended Request", + ApplicationExtendedResponse: "Extended Response", +} + +// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10) +const ( + BeheraPasswordExpired = 0 + BeheraAccountLocked = 1 + BeheraChangeAfterReset = 2 + BeheraPasswordModNotAllowed = 3 + BeheraMustSupplyOldPassword = 4 + BeheraInsufficientPasswordQuality = 5 + BeheraPasswordTooShort = 6 + BeheraPasswordTooYoung = 7 + BeheraPasswordInHistory = 8 +) + +// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes +var BeheraPasswordPolicyErrorMap = map[int8]string{ + BeheraPasswordExpired: "Password expired", + BeheraAccountLocked: "Account locked", + BeheraChangeAfterReset: "Password must be changed", + BeheraPasswordModNotAllowed: "Policy prevents password modification", + BeheraMustSupplyOldPassword: "Policy requires old password in order to change password", + BeheraInsufficientPasswordQuality: "Password fails quality checks", + BeheraPasswordTooShort: "Password is too short for policy", + BeheraPasswordTooYoung: "Password has been changed too recently", + BeheraPasswordInHistory: "New password is in list of old passwords", +} + +// Adds descriptions to an LDAP Response packet for debugging +func addLDAPDescriptions(packet *ber.Packet) (err error) { + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorDebugging, fmt.Errorf("ldap: cannot process packet to add descriptions: %s", r)) + } + }() + packet.Description = "LDAP Response" + packet.Children[0].Description = "Message ID" + + application := uint8(packet.Children[1].Tag) + packet.Children[1].Description = ApplicationMap[application] + + switch application { + case ApplicationBindRequest: + err = addRequestDescriptions(packet) + case ApplicationBindResponse: + err = addDefaultLDAPResponseDescriptions(packet) + case ApplicationUnbindRequest: + err = addRequestDescriptions(packet) + case ApplicationSearchRequest: + err = addRequestDescriptions(packet) + case ApplicationSearchResultEntry: + packet.Children[1].Children[0].Description = "Object Name" + packet.Children[1].Children[1].Description = "Attributes" + for _, child := range packet.Children[1].Children[1].Children { + child.Description = "Attribute" + child.Children[0].Description = "Attribute Name" + child.Children[1].Description = "Attribute Values" + for _, grandchild := range child.Children[1].Children { + grandchild.Description = "Attribute Value" + } + } + if len(packet.Children) == 3 { + err = addControlDescriptions(packet.Children[2]) + } + case ApplicationSearchResultDone: + err = addDefaultLDAPResponseDescriptions(packet) + case ApplicationModifyRequest: + err = addRequestDescriptions(packet) + case ApplicationModifyResponse: + case ApplicationAddRequest: + err = addRequestDescriptions(packet) + case ApplicationAddResponse: + case ApplicationDelRequest: + err = addRequestDescriptions(packet) + case ApplicationDelResponse: + case ApplicationModifyDNRequest: + err = addRequestDescriptions(packet) + case ApplicationModifyDNResponse: + case ApplicationCompareRequest: + err = addRequestDescriptions(packet) + case ApplicationCompareResponse: + case ApplicationAbandonRequest: + err = addRequestDescriptions(packet) + case ApplicationSearchResultReference: + case ApplicationExtendedRequest: + err = addRequestDescriptions(packet) + case ApplicationExtendedResponse: + } + + return err +} + +func addControlDescriptions(packet *ber.Packet) error { + packet.Description = "Controls" + for _, child := range packet.Children { + var value *ber.Packet + controlType := "" + child.Description = "Control" + switch len(child.Children) { + case 0: + // at least one child is required for control type + return fmt.Errorf("at least one child is required for control type") + + case 1: + // just type, no criticality or value + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + + case 2: + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + // Children[1] could be criticality or value (both are optional) + // duck-type on whether this is a boolean + if _, ok := child.Children[1].Value.(bool); ok { + child.Children[1].Description = "Criticality" + } else { + child.Children[1].Description = "Control Value" + value = child.Children[1] + } + + case 3: + // criticality and value present + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + child.Children[1].Description = "Criticality" + child.Children[2].Description = "Control Value" + value = child.Children[2] + + default: + // more than 3 children is invalid + return fmt.Errorf("more than 3 children for control packet found") + } + + if value == nil { + continue + } + switch controlType { + case ControlTypePaging: + value.Description += " (Paging)" + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes() + value.AppendChild(valueChildren) + } + value.Children[0].Description = "Real Search Control Value" + value.Children[0].Children[0].Description = "Paging Size" + value.Children[0].Children[1].Description = "Cookie" + + case ControlTypeBeheraPasswordPolicy: + value.Description += " (Password Policy - Behera Draft)" + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + sequence := value.Children[0] + for _, child := range sequence.Children { + if child.Tag == 0 { + //Warning + warningPacket := child.Children[0] + packet, err := ber.DecodePacketErr(warningPacket.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + val, ok := packet.Value.(int64) + if ok { + if warningPacket.Tag == 0 { + //timeBeforeExpiration + value.Description += " (TimeBeforeExpiration)" + warningPacket.Value = val + } else if warningPacket.Tag == 1 { + //graceAuthNsRemaining + value.Description += " (GraceAuthNsRemaining)" + warningPacket.Value = val + } + } + } else if child.Tag == 1 { + // Error + packet, err := ber.DecodePacketErr(child.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + val, ok := packet.Value.(int8) + if !ok { + val = -1 + } + child.Description = "Error" + child.Value = val + } + } + } + } + return nil +} + +func addRequestDescriptions(packet *ber.Packet) error { + packet.Description = "LDAP Request" + packet.Children[0].Description = "Message ID" + packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)] + if len(packet.Children) == 3 { + return addControlDescriptions(packet.Children[2]) + } + return nil +} + +func addDefaultLDAPResponseDescriptions(packet *ber.Packet) error { + err := GetLDAPError(packet) + if err == nil { + return nil + } + packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[err.(*Error).ResultCode] + ")" + packet.Children[1].Children[1].Description = "Matched DN (" + err.(*Error).MatchedDN + ")" + packet.Children[1].Children[2].Description = "Error Message" + if len(packet.Children[1].Children) > 3 { + packet.Children[1].Children[3].Description = "Referral" + } + if len(packet.Children) == 3 { + return addControlDescriptions(packet.Children[2]) + } + return nil +} + +// DebugBinaryFile reads and prints packets from the given filename +func DebugBinaryFile(fileName string) error { + file, err := ioutil.ReadFile(fileName) + if err != nil { + return NewError(ErrorDebugging, err) + } + ber.PrintBytes(os.Stdout, file, "") + packet, err := ber.DecodePacketErr(file) + if err != nil { + return fmt.Errorf("failed to decode packet: %s", err) + } + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + + return nil +} + +var hex = "0123456789abcdef" + +func mustEscape(c byte) bool { + return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0 +} + +// EscapeFilter escapes from the provided LDAP filter string the special +// characters in the set `()*\` and those out of the range 0 < c < 0x80, +// as defined in RFC4515. +func EscapeFilter(filter string) string { + escape := 0 + for i := 0; i < len(filter); i++ { + if mustEscape(filter[i]) { + escape++ + } + } + if escape == 0 { + return filter + } + buf := make([]byte, len(filter)+escape*2) + for i, j := 0, 0; i < len(filter); i++ { + c := filter[i] + if mustEscape(c) { + buf[j+0] = '\\' + buf[j+1] = hex[c>>4] + buf[j+2] = hex[c&0xf] + j += 3 + } else { + buf[j] = c + j++ + } + } + return string(buf) +} diff --git a/vendor/github.com/go-ldap/ldap/v3/moddn.go b/vendor/github.com/go-ldap/ldap/v3/moddn.go new file mode 100644 index 00000000..380b8cf6 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/moddn.go @@ -0,0 +1,75 @@ +package ldap + +import ( + "log" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// ModifyDNRequest holds the request to modify a DN +type ModifyDNRequest struct { + DN string + NewRDN string + DeleteOldRDN bool + NewSuperior string +} + +// NewModifyDNRequest creates a new request which can be passed to ModifyDN(). +// +// To move an object in the tree, set the "newSup" to the new parent entry DN. Use an +// empty string for just changing the object's RDN. +// +// For moving the object without renaming, the "rdn" must be the first +// RDN of the given DN. +// +// A call like +// mdnReq := NewModifyDNRequest("uid=someone,dc=example,dc=org", "uid=newname", true, "") +// will setup the request to just rename uid=someone,dc=example,dc=org to +// uid=newname,dc=example,dc=org. +func NewModifyDNRequest(dn string, rdn string, delOld bool, newSup string) *ModifyDNRequest { + return &ModifyDNRequest{ + DN: dn, + NewRDN: rdn, + DeleteOldRDN: delOld, + NewSuperior: newSup, + } +} + +func (req *ModifyDNRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyDNRequest, nil, "Modify DN Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN")) + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.NewRDN, "New RDN")) + pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.DeleteOldRDN, "Delete old RDN")) + if req.NewSuperior != "" { + pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.NewSuperior, "New Superior")) + } + + envelope.AppendChild(pkt) + + return nil +} + +// ModifyDN renames the given DN and optionally move to another base (when the "newSup" argument +// to NewModifyDNRequest() is not ""). +func (l *Conn) ModifyDN(m *ModifyDNRequest) error { + msgCtx, err := l.doRequest(m) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return err + } + + if packet.Children[1].Tag == ApplicationModifyDNResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/modify.go b/vendor/github.com/go-ldap/ldap/v3/modify.go new file mode 100644 index 00000000..ee712890 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/modify.go @@ -0,0 +1,132 @@ +package ldap + +import ( + "log" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// Change operation choices +const ( + AddAttribute = 0 + DeleteAttribute = 1 + ReplaceAttribute = 2 + IncrementAttribute = 3 // (https://tools.ietf.org/html/rfc4525) +) + +// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type PartialAttribute struct { + // Type is the type of the partial attribute + Type string + // Vals are the values of the partial attribute + Vals []string +} + +func (p *PartialAttribute) encode() *ber.Packet { + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute") + seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type")) + set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue") + for _, value := range p.Vals { + set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals")) + } + seq.AppendChild(set) + return seq +} + +// Change for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type Change struct { + // Operation is the type of change to be made + Operation uint + // Modification is the attribute to be modified + Modification PartialAttribute +} + +func (c *Change) encode() *ber.Packet { + change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change") + change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(c.Operation), "Operation")) + change.AppendChild(c.Modification.encode()) + return change +} + +// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type ModifyRequest struct { + // DN is the distinguishedName of the directory entry to modify + DN string + // Changes contain the attributes to modify + Changes []Change + // Controls hold optional controls to send with the request + Controls []Control +} + +// Add appends the given attribute to the list of changes to be made +func (req *ModifyRequest) Add(attrType string, attrVals []string) { + req.appendChange(AddAttribute, attrType, attrVals) +} + +// Delete appends the given attribute to the list of changes to be made +func (req *ModifyRequest) Delete(attrType string, attrVals []string) { + req.appendChange(DeleteAttribute, attrType, attrVals) +} + +// Replace appends the given attribute to the list of changes to be made +func (req *ModifyRequest) Replace(attrType string, attrVals []string) { + req.appendChange(ReplaceAttribute, attrType, attrVals) +} + +// Increment appends the given attribute to the list of changes to be made +func (req *ModifyRequest) Increment(attrType string, attrVal string) { + req.appendChange(IncrementAttribute, attrType, []string{attrVal}) +} + +func (req *ModifyRequest) appendChange(operation uint, attrType string, attrVals []string) { + req.Changes = append(req.Changes, Change{operation, PartialAttribute{Type: attrType, Vals: attrVals}}) +} + +func (req *ModifyRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN")) + changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes") + for _, change := range req.Changes { + changes.AppendChild(change.encode()) + } + pkt.AppendChild(changes) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil +} + +// NewModifyRequest creates a modify request for the given DN +func NewModifyRequest(dn string, controls []Control) *ModifyRequest { + return &ModifyRequest{ + DN: dn, + Controls: controls, + } +} + +// Modify performs the ModifyRequest +func (l *Conn) Modify(modifyRequest *ModifyRequest) error { + msgCtx, err := l.doRequest(modifyRequest) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return err + } + + if packet.Children[1].Tag == ApplicationModifyResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/passwdmodify.go b/vendor/github.com/go-ldap/ldap/v3/passwdmodify.go new file mode 100644 index 00000000..62a11084 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/passwdmodify.go @@ -0,0 +1,126 @@ +package ldap + +import ( + "fmt" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +const ( + passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1" +) + +// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt +type PasswordModifyRequest struct { + // UserIdentity is an optional string representation of the user associated with the request. + // This string may or may not be an LDAPDN [RFC2253]. + // If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session + UserIdentity string + // OldPassword, if present, contains the user's current password + OldPassword string + // NewPassword, if present, contains the desired password for this user + NewPassword string +} + +// PasswordModifyResult holds the server response to a PasswordModifyRequest +type PasswordModifyResult struct { + // GeneratedPassword holds a password generated by the server, if present + GeneratedPassword string + // Referral are the returned referral + Referral string +} + +func (req *PasswordModifyRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation") + pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID")) + + extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request") + passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request") + if req.UserIdentity != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.UserIdentity, "User Identity")) + } + if req.OldPassword != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, req.OldPassword, "Old Password")) + } + if req.NewPassword != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, req.NewPassword, "New Password")) + } + extendedRequestValue.AppendChild(passwordModifyRequestValue) + + pkt.AppendChild(extendedRequestValue) + + envelope.AppendChild(pkt) + + return nil +} + +// NewPasswordModifyRequest creates a new PasswordModifyRequest +// +// According to the RFC 3602 (https://tools.ietf.org/html/rfc3062): +// userIdentity is a string representing the user associated with the request. +// This string may or may not be an LDAPDN (RFC 2253). +// If userIdentity is empty then the operation will act on the user associated +// with the session. +// +// oldPassword is the current user's password, it can be empty or it can be +// needed depending on the session user access rights (usually an administrator +// can change a user's password without knowing the current one) and the +// password policy (see pwdSafeModify password policy's attribute) +// +// newPassword is the desired user's password. If empty the server can return +// an error or generate a new password that will be available in the +// PasswordModifyResult.GeneratedPassword +// +func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest { + return &PasswordModifyRequest{ + UserIdentity: userIdentity, + OldPassword: oldPassword, + NewPassword: newPassword, + } +} + +// PasswordModify performs the modification request +func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) { + msgCtx, err := l.doRequest(passwordModifyRequest) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return nil, err + } + + result := &PasswordModifyResult{} + + if packet.Children[1].Tag == ApplicationExtendedResponse { + err := GetLDAPError(packet) + if err != nil { + if IsErrorWithCode(err, LDAPResultReferral) { + for _, child := range packet.Children[1].Children { + if child.Tag == 3 { + result.Referral = child.Children[0].Value.(string) + } + } + } + return result, err + } + } else { + return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag)) + } + + extendedResponse := packet.Children[1] + for _, child := range extendedResponse.Children { + if child.Tag == 11 { + passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes()) + if len(passwordModifyResponseValue.Children) == 1 { + if passwordModifyResponseValue.Children[0].Tag == 0 { + result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes()) + } + } + } + } + + return result, nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/request.go b/vendor/github.com/go-ldap/ldap/v3/request.go new file mode 100644 index 00000000..8c68f34a --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/request.go @@ -0,0 +1,66 @@ +package ldap + +import ( + "errors" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +var ( + errRespChanClosed = errors.New("ldap: response channel closed") + errCouldNotRetMsg = errors.New("ldap: could not retrieve message") +) + +type request interface { + appendTo(*ber.Packet) error +} + +type requestFunc func(*ber.Packet) error + +func (f requestFunc) appendTo(p *ber.Packet) error { + return f(p) +} + +func (l *Conn) doRequest(req request) (*messageContext, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + if err := req.appendTo(packet); err != nil { + return nil, err + } + + if l.Debug { + l.Debug.PrintPacket(packet) + } + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + l.Debug.Printf("%d: returning", msgCtx.id) + return msgCtx, nil +} + +func (l *Conn) readPacket(msgCtx *messageContext) (*ber.Packet, error) { + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errRespChanClosed) + } + packet, err := packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if packet == nil { + return nil, NewError(ErrorNetwork, errCouldNotRetMsg) + } + + if l.Debug { + if err = addLDAPDescriptions(packet); err != nil { + return nil, err + } + l.Debug.PrintPacket(packet) + } + return packet, nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/search.go b/vendor/github.com/go-ldap/ldap/v3/search.go new file mode 100644 index 00000000..9a5c2c86 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/search.go @@ -0,0 +1,370 @@ +package ldap + +import ( + "errors" + "fmt" + "sort" + "strings" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// scope choices +const ( + ScopeBaseObject = 0 + ScopeSingleLevel = 1 + ScopeWholeSubtree = 2 +) + +// ScopeMap contains human readable descriptions of scope choices +var ScopeMap = map[int]string{ + ScopeBaseObject: "Base Object", + ScopeSingleLevel: "Single Level", + ScopeWholeSubtree: "Whole Subtree", +} + +// derefAliases +const ( + NeverDerefAliases = 0 + DerefInSearching = 1 + DerefFindingBaseObj = 2 + DerefAlways = 3 +) + +// DerefMap contains human readable descriptions of derefAliases choices +var DerefMap = map[int]string{ + NeverDerefAliases: "NeverDerefAliases", + DerefInSearching: "DerefInSearching", + DerefFindingBaseObj: "DerefFindingBaseObj", + DerefAlways: "DerefAlways", +} + +// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs. +// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the +// same input map of attributes, the output entry will contain the same order of attributes +func NewEntry(dn string, attributes map[string][]string) *Entry { + var attributeNames []string + for attributeName := range attributes { + attributeNames = append(attributeNames, attributeName) + } + sort.Strings(attributeNames) + + var encodedAttributes []*EntryAttribute + for _, attributeName := range attributeNames { + encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName])) + } + return &Entry{ + DN: dn, + Attributes: encodedAttributes, + } +} + +// Entry represents a single search result entry +type Entry struct { + // DN is the distinguished name of the entry + DN string + // Attributes are the returned attributes for the entry + Attributes []*EntryAttribute +} + +// GetAttributeValues returns the values for the named attribute, or an empty list +func (e *Entry) GetAttributeValues(attribute string) []string { + for _, attr := range e.Attributes { + if attr.Name == attribute { + return attr.Values + } + } + return []string{} +} + +// GetRawAttributeValues returns the byte values for the named attribute, or an empty list +func (e *Entry) GetRawAttributeValues(attribute string) [][]byte { + for _, attr := range e.Attributes { + if attr.Name == attribute { + return attr.ByteValues + } + } + return [][]byte{} +} + +// GetAttributeValue returns the first value for the named attribute, or "" +func (e *Entry) GetAttributeValue(attribute string) string { + values := e.GetAttributeValues(attribute) + if len(values) == 0 { + return "" + } + return values[0] +} + +// GetRawAttributeValue returns the first value for the named attribute, or an empty slice +func (e *Entry) GetRawAttributeValue(attribute string) []byte { + values := e.GetRawAttributeValues(attribute) + if len(values) == 0 { + return []byte{} + } + return values[0] +} + +// Print outputs a human-readable description +func (e *Entry) Print() { + fmt.Printf("DN: %s\n", e.DN) + for _, attr := range e.Attributes { + attr.Print() + } +} + +// PrettyPrint outputs a human-readable description indenting +func (e *Entry) PrettyPrint(indent int) { + fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN) + for _, attr := range e.Attributes { + attr.PrettyPrint(indent + 2) + } +} + +// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair +func NewEntryAttribute(name string, values []string) *EntryAttribute { + var bytes [][]byte + for _, value := range values { + bytes = append(bytes, []byte(value)) + } + return &EntryAttribute{ + Name: name, + Values: values, + ByteValues: bytes, + } +} + +// EntryAttribute holds a single attribute +type EntryAttribute struct { + // Name is the name of the attribute + Name string + // Values contain the string values of the attribute + Values []string + // ByteValues contain the raw values of the attribute + ByteValues [][]byte +} + +// Print outputs a human-readable description +func (e *EntryAttribute) Print() { + fmt.Printf("%s: %s\n", e.Name, e.Values) +} + +// PrettyPrint outputs a human-readable description with indenting +func (e *EntryAttribute) PrettyPrint(indent int) { + fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values) +} + +// SearchResult holds the server's response to a search request +type SearchResult struct { + // Entries are the returned entries + Entries []*Entry + // Referrals are the returned referrals + Referrals []string + // Controls are the returned controls + Controls []Control +} + +// Print outputs a human-readable description +func (s *SearchResult) Print() { + for _, entry := range s.Entries { + entry.Print() + } +} + +// PrettyPrint outputs a human-readable description with indenting +func (s *SearchResult) PrettyPrint(indent int) { + for _, entry := range s.Entries { + entry.PrettyPrint(indent) + } +} + +// SearchRequest represents a search request to send to the server +type SearchRequest struct { + BaseDN string + Scope int + DerefAliases int + SizeLimit int + TimeLimit int + TypesOnly bool + Filter string + Attributes []string + Controls []Control +} + +func (req *SearchRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.BaseDN, "Base DN")) + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.Scope), "Scope")) + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.DerefAliases), "Deref Aliases")) + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.SizeLimit), "Size Limit")) + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.TimeLimit), "Time Limit")) + pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.TypesOnly, "Types Only")) + // compile and encode filter + filterPacket, err := CompileFilter(req.Filter) + if err != nil { + return err + } + pkt.AppendChild(filterPacket) + // encode attributes + attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes") + for _, attribute := range req.Attributes { + attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute")) + } + pkt.AppendChild(attributesPacket) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil +} + +// NewSearchRequest creates a new search request +func NewSearchRequest( + BaseDN string, + Scope, DerefAliases, SizeLimit, TimeLimit int, + TypesOnly bool, + Filter string, + Attributes []string, + Controls []Control, +) *SearchRequest { + return &SearchRequest{ + BaseDN: BaseDN, + Scope: Scope, + DerefAliases: DerefAliases, + SizeLimit: SizeLimit, + TimeLimit: TimeLimit, + TypesOnly: TypesOnly, + Filter: Filter, + Attributes: Attributes, + Controls: Controls, + } +} + +// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the +// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically. +// The following four cases are possible given the arguments: +// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size +// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries +// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request +// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries +// A requested pagingSize of 0 is interpreted as no limit by LDAP servers. +func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) { + var pagingControl *ControlPaging + + control := FindControl(searchRequest.Controls, ControlTypePaging) + if control == nil { + pagingControl = NewControlPaging(pagingSize) + searchRequest.Controls = append(searchRequest.Controls, pagingControl) + } else { + castControl, ok := control.(*ControlPaging) + if !ok { + return nil, fmt.Errorf("expected paging control to be of type *ControlPaging, got %v", control) + } + if castControl.PagingSize != pagingSize { + return nil, fmt.Errorf("paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize) + } + pagingControl = castControl + } + + searchResult := new(SearchResult) + for { + result, err := l.Search(searchRequest) + l.Debug.Printf("Looking for Paging Control...") + if err != nil { + return searchResult, err + } + if result == nil { + return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received")) + } + + for _, entry := range result.Entries { + searchResult.Entries = append(searchResult.Entries, entry) + } + for _, referral := range result.Referrals { + searchResult.Referrals = append(searchResult.Referrals, referral) + } + for _, control := range result.Controls { + searchResult.Controls = append(searchResult.Controls, control) + } + + l.Debug.Printf("Looking for Paging Control...") + pagingResult := FindControl(result.Controls, ControlTypePaging) + if pagingResult == nil { + pagingControl = nil + l.Debug.Printf("Could not find paging control. Breaking...") + break + } + + cookie := pagingResult.(*ControlPaging).Cookie + if len(cookie) == 0 { + pagingControl = nil + l.Debug.Printf("Could not find cookie. Breaking...") + break + } + pagingControl.SetCookie(cookie) + } + + if pagingControl != nil { + l.Debug.Printf("Abandoning Paging...") + pagingControl.PagingSize = 0 + l.Search(searchRequest) + } + + return searchResult, nil +} + +// Search performs the given search request +func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) { + msgCtx, err := l.doRequest(searchRequest) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + result := &SearchResult{ + Entries: make([]*Entry, 0), + Referrals: make([]string, 0), + Controls: make([]Control, 0)} + + for { + packet, err := l.readPacket(msgCtx) + if err != nil { + return nil, err + } + + switch packet.Children[1].Tag { + case 4: + entry := new(Entry) + entry.DN = packet.Children[1].Children[0].Value.(string) + for _, child := range packet.Children[1].Children[1].Children { + attr := new(EntryAttribute) + attr.Name = child.Children[0].Value.(string) + for _, value := range child.Children[1].Children { + attr.Values = append(attr.Values, value.Value.(string)) + attr.ByteValues = append(attr.ByteValues, value.ByteValue) + } + entry.Attributes = append(entry.Attributes, attr) + } + result.Entries = append(result.Entries, entry) + case 5: + err := GetLDAPError(packet) + if err != nil { + return nil, err + } + if len(packet.Children) == 3 { + for _, child := range packet.Children[2].Children { + decodedChild, err := DecodeControl(child) + if err != nil { + return nil, fmt.Errorf("failed to decode child control: %s", err) + } + result.Controls = append(result.Controls, decodedChild) + } + } + return result, nil + case 19: + result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string)) + } + } +} diff --git a/vendor/github.com/goshuirc/e-nfa/.travis.yml b/vendor/github.com/goshuirc/e-nfa/.travis.yml new file mode 100644 index 00000000..9e4d19ec --- /dev/null +++ b/vendor/github.com/goshuirc/e-nfa/.travis.yml @@ -0,0 +1,19 @@ +language: go + +go: + - 1.4 + - tip + +before_install: + - go get golang.org/x/tools/cmd/cover + - go get golang.org/x/tools/cmd/vet + - go get golang.org/x/tools/cmd/goimports + - go get github.com/golang/lint/golint + - go get github.com/mattn/goveralls + +script: + - go vet ./... +# - $HOME/gopath/bin/goveralls -coverprofile=coverage.cov -service=travis-ci +# - bash <(curl -s https://codecov.io/bash) + - go test -bench=. -benchmem ./... + #- sh ./install_all_cmd.sh \ No newline at end of file diff --git a/vendor/github.com/goshuirc/e-nfa/README.md b/vendor/github.com/goshuirc/e-nfa/README.md new file mode 100644 index 00000000..cf13e416 --- /dev/null +++ b/vendor/github.com/goshuirc/e-nfa/README.md @@ -0,0 +1,122 @@ +ε-NFA: Epsilon-Nondeterministic finite automaton +============== + +[![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://raw.githubusercontent.com/kkdai/e-nfa/master/LICENSE) [![GoDoc](https://godoc.org/github.com/kkdai/e-nfa?status.svg)](https://godoc.org/github.com/kkdai/e-nfa) [![Build Status](https://travis-ci.org/kkdai/e-nfa.svg?branch=master)](https://travis-ci.org/kkdai/e-nfa) + + + +![image](https://upload.wikimedia.org/wikipedia/commons/thumb/0/0e/NFAexample.svg/250px-NFAexample.svg.png) + + + +What is Epsilon-Nondeterministic finite automaton +============= + +`ε-NFA`: Epsilon-Nondeterministic finite automaton (so call:Nondeterministic finite automaton with ε-moves) + +In the automata theory, a nondeterministic finite automaton with ε-moves (NFA-ε)(also known as NFA-λ) is an extension of nondeterministic finite automaton(NFA), which allows a transformation to a new state without consuming any input symbols. The transitions without consuming an input symbol are called ε-transitions or λ-transitions. In the state diagrams, they are usually labeled with the Greek letter ε or λ. + +(sited from [here](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton)) + + +Looking for DFA implement? +============= + +I also write a DFA implenent in Go here. [https://github.com/kkdai/dfa](https://github.com/kkdai/dfa) + +Looking for NFA implement? +============= + +I also write a NFA implenent in Go here. [https://github.com/kkdai/nfa](https://github.com/kkdai/nfa) + + +Installation and Usage +============= + + +Install +--------------- + + go get github.com/kkdai/e-nfa + + + +Usage +--------------- + +Following is sample code to implement a epsilon-NFA automata diagram as follow: + +![image](image/eNFA.png) + + + +```go + +package main + +import ( + "github.com/kkdai/enfa" + "fmt" +) + +func main() { + + nfa := NewENFA(0, false) + nfa.AddState(1, false) + nfa.AddState(2, false) + nfa.AddState(3, true) + nfa.AddState(4, false) + nfa.AddState(5, false) + + nfa.AddTransition(0, "1", 1) + nfa.AddTransition(0, "0", 4) + + nfa.AddTransition(1, "1", 2) + nfa.AddTransition(1, "", 3) //epsilon + nfa.AddTransition(2, "1", 3) + nfa.AddTransition(4, "0", 5) + nfa.AddTransition(4, "", 1, 2) //E -> epsilon B C + nfa.AddTransition(5, "0", 3) + + nfa.PrintTransitionTable() + + if !nfa.VerifyInputs([]string{"1"}) { + fmt.Printf("Verify inputs is failed") + } + + nfa.Reset() + + if !nfa.VerifyInputs([]string{"1", "1", "1"}) { + fmt.Printf("Verify inputs is failed") + } + + nfa.Reset() + + if !nfa.VerifyInputs([]string{"0", "1"}) { + fmt.Printf"Verify inputs is failed") + } + + nfa.Reset() + if !nfa.VerifyInputs([]string{"0", "0", "0"}) { + fmt.Printf("Verify inputs is failed") + } +} + +``` + +Inspired By +============= + +- [ε-NFA: Wiki](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton_with_%CE%B5-moves) +- [Coursera: Automata](https://class.coursera.org/automata-004/) + +Project52 +--------------- + +It is one of my [project 52](https://github.com/kkdai/project52). + + +License +--------------- + +This package is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/goshuirc/e-nfa/enfa.go b/vendor/github.com/goshuirc/e-nfa/enfa.go new file mode 100644 index 00000000..e1d92be4 --- /dev/null +++ b/vendor/github.com/goshuirc/e-nfa/enfa.go @@ -0,0 +1,185 @@ +package enfa + +import "fmt" + +type transitionInput struct { + srcState int + input string +} + +type destState map[int]bool + +type ENFA struct { + initState int + currentState map[int]bool + totalStates []int + finalStates []int + transition map[transitionInput]destState + inputMap map[string]bool +} + +//New a new NFA +func NewENFA(initState int, isFinal bool) *ENFA { + + retNFA := &ENFA{ + transition: make(map[transitionInput]destState), + inputMap: make(map[string]bool), + initState: initState} + + retNFA.currentState = make(map[int]bool) + retNFA.currentState[initState] = true + retNFA.AddState(initState, isFinal) + return retNFA +} + +//Add new state in this NFA +func (d *ENFA) AddState(state int, isFinal bool) { + if state == -1 { + fmt.Println("Cannot add state as -1, it is dead state") + return + } + + d.totalStates = append(d.totalStates, state) + if isFinal { + d.finalStates = append(d.finalStates, state) + } +} + +//Add new transition function into NFA +func (d *ENFA) AddTransition(srcState int, input string, dstStateList ...int) { + find := false + + //find input if exist in NFA input List + if _, ok := d.inputMap[input]; !ok { + //not exist, new input in this NFA + d.inputMap[input] = true + } + + for _, v := range d.totalStates { + if v == srcState { + find = true + } + } + + if !find { + fmt.Println("No such state:", srcState, " in current NFA") + return + } + + dstMap := make(map[int]bool) + for _, destState := range dstStateList { + dstMap[destState] = true + } + + targetTrans := transitionInput{srcState: srcState, input: input} + d.transition[targetTrans] = dstMap +} + +func (d *ENFA) CheckPathExist(src int, input string, dst int) bool { + retMap, _ := d.transition[transitionInput{srcState: src, input: input}] + if _, ok := retMap[dst]; ok { + return true + } + return false +} + +func (d *ENFA) Input(testInput string) []int { + updateCurrentState := make(map[int]bool) + for current, _ := range d.currentState { + for _, realTestInput := range []string{testInput, "*", "?"} { + intputTrans := transitionInput{srcState: current, input: realTestInput} + valMap, ok := d.transition[intputTrans] + if ok { + for dst, _ := range valMap { + updateCurrentState[dst] = true + + //Update epsilon input way... if exist + epsilonTrans := transitionInput{srcState: dst} + if eMap, ok := d.transition[epsilonTrans]; ok { + for eDst, _ := range eMap { + updateCurrentState[eDst] = true + } + } + } + } else { + //dead state, remove in current state + //do nothing. + } + } + } + + //update curret state + d.currentState = updateCurrentState + + //return result + var ret []int + for state, _ := range updateCurrentState { + ret = append(ret, state) + } + return ret +} + +//To verify current state if it is final state +func (d *ENFA) Verify() bool { + for _, val := range d.finalStates { + for cState, _ := range d.currentState { + if val == cState { + return true + } + } + } + return false +} + +//Reset NFA state to initilize state, but all state and transition function will remain +func (d *ENFA) Reset() { + initState := make(map[int]bool) + initState[d.initState] = true + d.currentState = initState +} + +//Verify if list of input could be accept by NFA or not +func (d *ENFA) VerifyInputs(inputs []string) bool { + for _, v := range inputs { + d.Input(v) + } + return d.Verify() +} + +//To print detail transition table contain of current NFA +func (d *ENFA) PrintTransitionTable() { + fmt.Println("===================================================") + //list all inputs + var inputList []string + for key, _ := range d.inputMap { + if key == "" { + fmt.Printf("\tε|") + } else { + fmt.Printf("\t%s|", key) + } + inputList = append(inputList, key) + } + + fmt.Printf("\n") + fmt.Println("---------------------------------------------------") + + for _, state := range d.totalStates { + fmt.Printf("%d |", state) + for _, key := range inputList { + checkInput := transitionInput{srcState: state, input: key} + if dstState, ok := d.transition[checkInput]; ok { + fmt.Printf("\t") + for val, _ := range dstState { + fmt.Printf("%d,", val) + } + fmt.Printf("|") + } else { + fmt.Printf("\tNA|") + } + } + fmt.Printf("\n") + } + + fmt.Println("---------------------------------------------------") + fmt.Println("===================================================") +} diff --git a/vendor/github.com/goshuirc/irc-go/LICENSE b/vendor/github.com/goshuirc/irc-go/LICENSE new file mode 100644 index 00000000..05ac8863 --- /dev/null +++ b/vendor/github.com/goshuirc/irc-go/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2016-2017 Daniel Oaks + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/goshuirc/irc-go/ircfmt/doc.go b/vendor/github.com/goshuirc/irc-go/ircfmt/doc.go new file mode 100644 index 00000000..5e17c636 --- /dev/null +++ b/vendor/github.com/goshuirc/irc-go/ircfmt/doc.go @@ -0,0 +1,86 @@ +// written by Daniel Oaks +// released under the ISC license + +/* +Package ircfmt handles IRC formatting codes, escaping and unescaping. + +This allows for a simpler representation of strings that contain colour codes, +bold codes, and such, without having to write and handle raw bytes when +assembling outgoing messages. + +This lets you turn raw IRC messages into our escaped versions, and turn escaped +versions back into raw messages suitable for sending on IRC connections. This +is designed to be used on things like PRIVMSG / NOTICE commands, MOTD blocks, +and such. + +The escape character we use in this library is the dollar sign ("$"), along +with the given escape characters: + + -------------------------------- + Name | Escape | Raw + -------------------------------- + Dollarsign | $$ | $ + Bold | $b | 0x02 + Colour | $c | 0x03 + Monospace | $m | 0x11 + Reverse Colour | $v | 0x16 + Italic | $i | 0x1d + Strikethrough | $s | 0x1e + Underscore | $u | 0x1f + Reset | $r | 0x0f + -------------------------------- + +Colours are escaped in a slightly different way, using the actual names of them +rather than just the raw numbers. + +In our escaped format, the colours for the fore and background are contained in +square brackets after the colour ("$c") escape. For example: + + Red foreground: + Escaped: This is a $c[red]cool message! + Raw: This is a 0x034cool message! + + Blue foreground, green background: + Escaped: This is a $c[blue,green]rad message! + Raw: This is a 0x032,3rad message! + +When assembling a raw message, we make sure to use the full colour code +("02" vs just "2") when it could become confused due to numbers just after the +colour escape code. For instance, lines like this will be unescaped correctly: + + No number after colour escape: + Escaped: This is a $c[red]cool message! + Raw: This is a 0x034cool message! + + Number after colour escape: + Escaped: This is $c[blue]20% cooler! + Raw: This is 0x030220% cooler + +Here are the colour names and codes we recognise: + + -------------------- + Code | Name + -------------------- + 00 | white + 01 | black + 02 | blue + 03 | green + 04 | red + 05 | brown + 06 | magenta + 07 | orange + 08 | yellow + 09 | light green + 10 | cyan + 11 | light cyan + 12 | light blue + 13 | pink + 14 | grey + 15 | light grey + 99 | default + -------------------- + +These other colours aren't given names: +https://modern.ircdocs.horse/formatting.html#colors-16-98 +*/ +package ircfmt diff --git a/vendor/github.com/goshuirc/irc-go/ircfmt/ircfmt.go b/vendor/github.com/goshuirc/irc-go/ircfmt/ircfmt.go new file mode 100644 index 00000000..4cc78ce4 --- /dev/null +++ b/vendor/github.com/goshuirc/irc-go/ircfmt/ircfmt.go @@ -0,0 +1,330 @@ +// written by Daniel Oaks +// released under the ISC license + +package ircfmt + +import ( + "strings" +) + +const ( + // raw bytes and strings to do replacing with + bold string = "\x02" + colour string = "\x03" + monospace string = "\x11" + reverseColour string = "\x16" + italic string = "\x1d" + strikethrough string = "\x1e" + underline string = "\x1f" + reset string = "\x0f" + + runecolour rune = '\x03' + runebold rune = '\x02' + runemonospace rune = '\x11' + runereverseColour rune = '\x16' + runeitalic rune = '\x1d' + runestrikethrough rune = '\x1e' + runereset rune = '\x0f' + runeunderline rune = '\x1f' + + // valid characters in a colour code character, for speed + colours1 string = "0123456789" +) + +var ( + // valtoescape replaces most of IRC characters with our escapes. + valtoescape = strings.NewReplacer("$", "$$", colour, "$c", reverseColour, "$v", bold, "$b", italic, "$i", strikethrough, "$s", underline, "$u", monospace, "$m", reset, "$r") + // valToStrip replaces most of the IRC characters with nothing + valToStrip = strings.NewReplacer(colour, "$c", reverseColour, "", bold, "", italic, "", strikethrough, "", underline, "", monospace, "", reset, "") + + // escapetoval contains most of our escapes and how they map to real IRC characters. + // intentionally skips colour, since that's handled elsewhere. + escapetoval = map[rune]string{ + '$': "$", + 'b': bold, + 'i': italic, + 'v': reverseColour, + 's': strikethrough, + 'u': underline, + 'm': monospace, + 'r': reset, + } + + // valid colour codes + numtocolour = map[string]string{ + "99": "default", + "15": "light grey", + "14": "grey", + "13": "pink", + "12": "light blue", + "11": "light cyan", + "10": "cyan", + "09": "light green", + "08": "yellow", + "07": "orange", + "06": "magenta", + "05": "brown", + "04": "red", + "03": "green", + "02": "blue", + "01": "black", + "00": "white", + "9": "light green", + "8": "yellow", + "7": "orange", + "6": "magenta", + "5": "brown", + "4": "red", + "3": "green", + "2": "blue", + "1": "black", + "0": "white", + } + + // full and truncated colour codes + colourcodesFull = map[string]string{ + "white": "00", + "black": "01", + "blue": "02", + "green": "03", + "red": "04", + "brown": "05", + "magenta": "06", + "orange": "07", + "yellow": "08", + "light green": "09", + "cyan": "10", + "light cyan": "11", + "light blue": "12", + "pink": "13", + "grey": "14", + "light grey": "15", + "default": "99", + } + colourcodesTruncated = map[string]string{ + "white": "0", + "black": "1", + "blue": "2", + "green": "3", + "red": "4", + "brown": "5", + "magenta": "6", + "orange": "7", + "yellow": "8", + "light green": "9", + "cyan": "10", + "light cyan": "11", + "light blue": "12", + "pink": "13", + "grey": "14", + "light grey": "15", + "default": "99", + } +) + +// Escape takes a raw IRC string and returns it with our escapes. +// +// IE, it turns this: "This is a \x02cool\x02, \x034red\x0f message!" +// into: "This is a $bcool$b, $c[red]red$r message!" +func Escape(in string) string { + // replace all our usual escapes + in = valtoescape.Replace(in) + + inRunes := []rune(in) + //var out string + out := strings.Builder{} + for 0 < len(inRunes) { + if 1 < len(inRunes) && inRunes[0] == '$' && inRunes[1] == 'c' { + // handle colours + out.WriteString("$c") + inRunes = inRunes[2:] // strip colour code chars + + if len(inRunes) < 1 || !strings.Contains(colours1, string(inRunes[0])) { + out.WriteString("[]") + continue + } + + var foreBuffer, backBuffer string + foreBuffer += string(inRunes[0]) + inRunes = inRunes[1:] + if 0 < len(inRunes) && strings.Contains(colours1, string(inRunes[0])) { + foreBuffer += string(inRunes[0]) + inRunes = inRunes[1:] + } + if 1 < len(inRunes) && inRunes[0] == ',' && strings.Contains(colours1, string(inRunes[1])) { + backBuffer += string(inRunes[1]) + inRunes = inRunes[2:] + if 0 < len(inRunes) && strings.Contains(colours1, string(inRunes[0])) { + backBuffer += string(inRunes[0]) + inRunes = inRunes[1:] + } + } + + foreName, exists := numtocolour[foreBuffer] + if !exists { + foreName = foreBuffer + } + backName, exists := numtocolour[backBuffer] + if !exists { + backName = backBuffer + } + + out.WriteRune('[') + out.WriteString(foreName) + if backName != "" { + out.WriteRune(',') + out.WriteString(backName) + } + out.WriteRune(']') + + } else { + // special case for $$c + if len(inRunes) > 2 && inRunes[0] == '$' && inRunes[1] == '$' && inRunes[2] == 'c' { + out.WriteRune(inRunes[0]) + out.WriteRune(inRunes[1]) + out.WriteRune(inRunes[2]) + inRunes = inRunes[3:] + } else { + out.WriteRune(inRunes[0]) + inRunes = inRunes[1:] + } + } + } + + return out.String() +} + +// Strip takes a raw IRC string and removes it with all formatting codes removed +// IE, it turns this: "This is a \x02cool\x02, \x034red\x0f message!" +// into: "This is a cool, red message!" +func Strip(in string) string { + out := strings.Builder{} + runes := []rune(in) + if out.Len() < len(runes) { // Reduce allocations where needed + out.Grow(len(in) - out.Len()) + } + for len(runes) > 0 { + switch runes[0] { + case runebold, runemonospace, runereverseColour, runeitalic, runestrikethrough, runeunderline, runereset: + runes = runes[1:] + case runecolour: + runes = removeColour(runes) + default: + out.WriteRune(runes[0]) + runes = runes[1:] + } + } + return out.String() +} + +func removeNumber(runes []rune) []rune { + if len(runes) > 0 && runes[0] >= '0' && runes[0] <= '9' { + runes = runes[1:] + } + return runes +} + +func removeColour(runes []rune) []rune { + if runes[0] != runecolour { + return runes + } + + runes = runes[1:] + runes = removeNumber(runes) + runes = removeNumber(runes) + + if len(runes) > 1 && runes[0] == ',' && runes[1] >= '0' && runes[1] <= '9' { + runes = runes[2:] + } else { + return runes // Nothing else because we dont have a comma + } + runes = removeNumber(runes) + return runes +} + +// Unescape takes our escaped string and returns a raw IRC string. +// +// IE, it turns this: "This is a $bcool$b, $c[red]red$r message!" +// into this: "This is a \x02cool\x02, \x034red\x0f message!" +func Unescape(in string) string { + out := strings.Builder{} + + remaining := []rune(in) + for 0 < len(remaining) { + char := remaining[0] + remaining = remaining[1:] + + if char == '$' && 0 < len(remaining) { + char = remaining[0] + remaining = remaining[1:] + + val, exists := escapetoval[char] + if exists { + out.WriteString(val) + } else if char == 'c' { + out.WriteString(colour) + + if len(remaining) < 2 || remaining[0] != '[' { + continue + } + + // get colour names + var coloursBuffer string + remaining = remaining[1:] + for remaining[0] != ']' { + coloursBuffer += string(remaining[0]) + remaining = remaining[1:] + } + remaining = remaining[1:] // strip final ']' + + colours := strings.Split(coloursBuffer, ",") + var foreColour, backColour string + foreColour = colours[0] + if 1 < len(colours) { + backColour = colours[1] + } + + // decide whether we can use truncated colour codes + canUseTruncated := len(remaining) < 1 || !strings.Contains(colours1, string(remaining[0])) + + // turn colour names into real codes + var foreColourCode, backColourCode string + var exists bool + + if backColour != "" || canUseTruncated { + foreColourCode, exists = colourcodesTruncated[foreColour] + } else { + foreColourCode, exists = colourcodesFull[foreColour] + } + if exists { + foreColour = foreColourCode + } + + if backColour != "" { + if canUseTruncated { + backColourCode, exists = colourcodesTruncated[backColour] + } else { + backColourCode, exists = colourcodesFull[backColour] + } + if exists { + backColour = backColourCode + } + } + + // output colour codes + out.WriteString(foreColour) + if backColour != "" { + out.WriteRune(',') + out.WriteString(backColour) + } + } else { + // unknown char + out.WriteRune(char) + } + } else { + out.WriteRune(char) + } + } + + return out.String() +} diff --git a/vendor/github.com/goshuirc/irc-go/ircmatch/doc.go b/vendor/github.com/goshuirc/irc-go/ircmatch/doc.go new file mode 100644 index 00000000..0561b4d0 --- /dev/null +++ b/vendor/github.com/goshuirc/irc-go/ircmatch/doc.go @@ -0,0 +1,7 @@ +// written by Daniel Oaks +// released under the ISC license + +/* +Package ircmatch handles matching IRC strings with the traditional glob-like syntax. +*/ +package ircmatch diff --git a/vendor/github.com/goshuirc/irc-go/ircmatch/ircmatch.go b/vendor/github.com/goshuirc/irc-go/ircmatch/ircmatch.go new file mode 100644 index 00000000..03e7cf54 --- /dev/null +++ b/vendor/github.com/goshuirc/irc-go/ircmatch/ircmatch.go @@ -0,0 +1,57 @@ +package ircmatch + +import enfa "github.com/goshuirc/e-nfa" + +// Matcher represents an object that can match IRC strings. +type Matcher struct { + internalENFA *enfa.ENFA +} + +// MakeMatch creates a Matcher. +func MakeMatch(globTemplate string) Matcher { + var newmatch Matcher + + // assemble internal enfa + newmatch.internalENFA = enfa.NewENFA(0, false) + + var currentState int + var lastWasStar bool + for _, char := range globTemplate { + if char == '*' { + if lastWasStar { + continue + } + newmatch.internalENFA.AddTransition(currentState, "*", currentState) + lastWasStar = true + continue + } else if char == '?' { + newmatch.internalENFA.AddState(currentState+1, false) + newmatch.internalENFA.AddTransition(currentState, "?", currentState+1) + currentState++ + } else { + newmatch.internalENFA.AddState(currentState+1, false) + newmatch.internalENFA.AddTransition(currentState, string(char), currentState+1) + currentState++ + } + + lastWasStar = false + } + + // create end state + newmatch.internalENFA.AddState(currentState+1, true) + newmatch.internalENFA.AddTransition(currentState, "", currentState+1) + + return newmatch +} + +// Match returns true if the given string matches this glob. +func (menfa *Matcher) Match(search string) bool { + var searchChars []string + for _, char := range search { + searchChars = append(searchChars, string(char)) + } + + isMatch := menfa.internalENFA.VerifyInputs(searchChars) + menfa.internalENFA.Reset() + return isMatch +} diff --git a/vendor/github.com/goshuirc/irc-go/ircmsg/doc.go b/vendor/github.com/goshuirc/irc-go/ircmsg/doc.go new file mode 100644 index 00000000..d836d81a --- /dev/null +++ b/vendor/github.com/goshuirc/irc-go/ircmsg/doc.go @@ -0,0 +1,7 @@ +// written by Daniel Oaks +// released under the ISC license + +/* +Package ircmsg helps parse and create lines for IRC connections. +*/ +package ircmsg diff --git a/vendor/github.com/goshuirc/irc-go/ircmsg/message.go b/vendor/github.com/goshuirc/irc-go/ircmsg/message.go new file mode 100644 index 00000000..c6afb291 --- /dev/null +++ b/vendor/github.com/goshuirc/irc-go/ircmsg/message.go @@ -0,0 +1,401 @@ +// Copyright (c) 2016-2019 Daniel Oaks +// Copyright (c) 2018-2019 Shivaram Lingamneni + +// released under the ISC license + +package ircmsg + +import ( + "bytes" + "errors" + "strings" +) + +const ( + // "The size limit for message tags is 8191 bytes, including the leading + // '@' (0x40) and trailing space ' ' (0x20) characters." + MaxlenTags = 8191 + + // MaxlenTags - ('@' + ' ') + MaxlenTagData = MaxlenTags - 2 + + // "Clients MUST NOT send messages with tag data exceeding 4094 bytes, + // this includes tags with or without the client-only prefix." + MaxlenClientTagData = 4094 + + // "Servers MUST NOT add tag data exceeding 4094 bytes to messages." + MaxlenServerTagData = 4094 + + // '@' + MaxlenClientTagData + ' ' + // this is the analogue of MaxlenTags when the source of the message is a client + MaxlenTagsFromClient = MaxlenClientTagData + 2 +) + +var ( + // ErrorLineIsEmpty indicates that the given IRC line was empty. + ErrorLineIsEmpty = errors.New("Line is empty") + // ErrorLineContainsBadChar indicates that the line contained invalid characters + ErrorLineContainsBadChar = errors.New("Line contains invalid characters") + // ErrorLineTooLong indicates that the message exceeded the maximum tag length + // (the name references 417 ERR_INPUTTOOLONG; we reserve the right to return it + // for messages that exceed the non-tag length limit) + ErrorLineTooLong = errors.New("Line could not be parsed because a specified length limit was exceeded") + + ErrorCommandMissing = errors.New("IRC messages MUST have a command") + ErrorBadParam = errors.New("Cannot have an empty param, a param with spaces, or a param that starts with ':' before the last parameter") +) + +// IrcMessage represents an IRC message, as defined by the RFCs and as +// extended by the IRCv3 Message Tags specification with the introduction +// of message tags. +type IrcMessage struct { + Prefix string + Command string + Params []string + tags map[string]string + clientOnlyTags map[string]string +} + +// GetTag returns whether a tag is present, and if so, what its value is. +func (msg *IrcMessage) GetTag(tagName string) (present bool, value string) { + if len(tagName) == 0 { + return + } else if tagName[0] == '+' { + value, present = msg.clientOnlyTags[tagName] + return + } else { + value, present = msg.tags[tagName] + return + } +} + +// HasTag returns whether a tag is present. +func (msg *IrcMessage) HasTag(tagName string) (present bool) { + present, _ = msg.GetTag(tagName) + return +} + +// SetTag sets a tag. +func (msg *IrcMessage) SetTag(tagName, tagValue string) { + if len(tagName) == 0 { + return + } else if tagName[0] == '+' { + if msg.clientOnlyTags == nil { + msg.clientOnlyTags = make(map[string]string) + } + msg.clientOnlyTags[tagName] = tagValue + } else { + if msg.tags == nil { + msg.tags = make(map[string]string) + } + msg.tags[tagName] = tagValue + } +} + +// DeleteTag deletes a tag. +func (msg *IrcMessage) DeleteTag(tagName string) { + if len(tagName) == 0 { + return + } else if tagName[0] == '+' { + delete(msg.clientOnlyTags, tagName) + } else { + delete(msg.tags, tagName) + } +} + +// UpdateTags is a convenience to set multiple tags at once. +func (msg *IrcMessage) UpdateTags(tags map[string]string) { + for name, value := range tags { + msg.SetTag(name, value) + } +} + +// AllTags returns all tags as a single map. +func (msg *IrcMessage) AllTags() (result map[string]string) { + result = make(map[string]string, len(msg.tags)+len(msg.clientOnlyTags)) + for name, value := range msg.tags { + result[name] = value + } + for name, value := range msg.clientOnlyTags { + result[name] = value + } + return +} + +// ClientOnlyTags returns the client-only tags (the tags with the + prefix). +// The returned map may be internal storage of the IrcMessage object and +// should not be modified. +func (msg *IrcMessage) ClientOnlyTags() map[string]string { + return msg.clientOnlyTags +} + +// ParseLine creates and returns a message from the given IRC line. +func ParseLine(line string) (ircmsg IrcMessage, err error) { + return parseLine(line, 0, 0) +} + +// ParseLineStrict creates and returns an IrcMessage from the given IRC line, +// taking the maximum length into account and truncating the message as appropriate. +// If fromClient is true, it enforces the client limit on tag data length (4094 bytes), +// allowing the server to return ERR_INPUTTOOLONG as appropriate. If truncateLen is +// nonzero, it is the length at which the non-tag portion of the message is truncated. +func ParseLineStrict(line string, fromClient bool, truncateLen int) (ircmsg IrcMessage, err error) { + maxTagDataLength := MaxlenTagData + if fromClient { + maxTagDataLength = MaxlenClientTagData + } + return parseLine(line, maxTagDataLength, truncateLen) +} + +// slice off any amount of ' ' from the front of the string +func trimInitialSpaces(str string) string { + var i int + for i = 0; i < len(str) && str[i] == ' '; i += 1 { + } + return str[i:] +} + +func parseLine(line string, maxTagDataLength int, truncateLen int) (ircmsg IrcMessage, err error) { + if strings.IndexByte(line, '\x00') != -1 { + err = ErrorLineContainsBadChar + return + } + + // trim to the first appearance of either '\r' or '\n': + lineEnd := strings.IndexByte(line, '\r') + newlineIndex := strings.IndexByte(line, '\n') + if newlineIndex != -1 && (lineEnd == -1 || newlineIndex < lineEnd) { + lineEnd = newlineIndex + } + if lineEnd != -1 { + line = line[:lineEnd] + } + + if len(line) < 1 { + return ircmsg, ErrorLineIsEmpty + } + + // tags + if line[0] == '@' { + tagEnd := strings.IndexByte(line, ' ') + if tagEnd == -1 { + return ircmsg, ErrorLineIsEmpty + } + tags := line[1:tagEnd] + if 0 < maxTagDataLength && maxTagDataLength < len(tags) { + return ircmsg, ErrorLineTooLong + } + err = ircmsg.parseTags(tags) + if err != nil { + return + } + // skip over the tags and the separating space + line = line[tagEnd+1:] + } + + // truncate if desired + if 0 < truncateLen && truncateLen < len(line) { + line = line[:truncateLen] + } + + // modern: "These message parts, and parameters themselves, are separated + // by one or more ASCII SPACE characters" + line = trimInitialSpaces(line) + + // prefix + if 0 < len(line) && line[0] == ':' { + prefixEnd := strings.IndexByte(line, ' ') + if prefixEnd == -1 { + return ircmsg, ErrorLineIsEmpty + } + ircmsg.Prefix = line[1:prefixEnd] + // skip over the prefix and the separating space + line = line[prefixEnd+1:] + } + + line = trimInitialSpaces(line) + + // command + commandEnd := strings.IndexByte(line, ' ') + paramStart := commandEnd + 1 + if commandEnd == -1 { + commandEnd = len(line) + paramStart = len(line) + } + // normalize command to uppercase: + ircmsg.Command = strings.ToUpper(line[:commandEnd]) + if len(ircmsg.Command) == 0 { + return ircmsg, ErrorLineIsEmpty + } + line = line[paramStart:] + + for { + line = trimInitialSpaces(line) + if len(line) == 0 { + break + } + // handle trailing + if line[0] == ':' { + ircmsg.Params = append(ircmsg.Params, line[1:]) + break + } + paramEnd := strings.IndexByte(line, ' ') + if paramEnd == -1 { + ircmsg.Params = append(ircmsg.Params, line) + break + } + ircmsg.Params = append(ircmsg.Params, line[:paramEnd]) + line = line[paramEnd+1:] + } + + return ircmsg, nil +} + +// helper to parse tags +func (ircmsg *IrcMessage) parseTags(tags string) (err error) { + for 0 < len(tags) { + tagEnd := strings.IndexByte(tags, ';') + endPos := tagEnd + nextPos := tagEnd + 1 + if tagEnd == -1 { + endPos = len(tags) + nextPos = len(tags) + } + tagPair := tags[:endPos] + equalsIndex := strings.IndexByte(tagPair, '=') + var tagName, tagValue string + if equalsIndex == -1 { + // tag with no value + tagName = tagPair + } else { + tagName, tagValue = tagPair[:equalsIndex], tagPair[equalsIndex+1:] + } + ircmsg.SetTag(tagName, UnescapeTagValue(tagValue)) + // skip over the tag just processed, plus the delimiting ; if any + tags = tags[nextPos:] + } + return nil +} + +// MakeMessage provides a simple way to create a new IrcMessage. +func MakeMessage(tags map[string]string, prefix string, command string, params ...string) (ircmsg IrcMessage) { + ircmsg.Prefix = prefix + ircmsg.Command = command + ircmsg.Params = params + ircmsg.UpdateTags(tags) + return ircmsg +} + +// Line returns a sendable line created from an IrcMessage. +func (ircmsg *IrcMessage) Line() (result string, err error) { + bytes, err := ircmsg.line(0, 0, 0, 0) + if err == nil { + result = string(bytes) + } + return +} + +// LineBytes returns a sendable line created from an IrcMessage. +func (ircmsg *IrcMessage) LineBytes() (result []byte, err error) { + result, err = ircmsg.line(0, 0, 0, 0) + return +} + +// LineBytesStrict returns a sendable line, as a []byte, created from an IrcMessage. +// fromClient controls whether the server-side or client-side tag length limit +// is enforced. If truncateLen is nonzero, it is the length at which the +// non-tag portion of the message is truncated. +func (ircmsg *IrcMessage) LineBytesStrict(fromClient bool, truncateLen int) ([]byte, error) { + var tagLimit, clientOnlyTagDataLimit, serverAddedTagDataLimit int + if fromClient { + // enforce client max tags: + // (4096) :: '@' ' ' + tagLimit = MaxlenTagsFromClient + } else { + // on the server side, enforce separate client-only and server-added tag budgets: + // "Servers MUST NOT add tag data exceeding 4094 bytes to messages." + // (8191) :: '@' ';' ' ' + clientOnlyTagDataLimit = MaxlenClientTagData + serverAddedTagDataLimit = MaxlenServerTagData + } + return ircmsg.line(tagLimit, clientOnlyTagDataLimit, serverAddedTagDataLimit, truncateLen) +} + +// line returns a sendable line created from an IrcMessage. +func (ircmsg *IrcMessage) line(tagLimit, clientOnlyTagDataLimit, serverAddedTagDataLimit, truncateLen int) ([]byte, error) { + if len(ircmsg.Command) < 1 { + return nil, ErrorCommandMissing + } + + var buf bytes.Buffer + + // write the tags, computing the budgets for client-only tags and regular tags + var lenRegularTags, lenClientOnlyTags, lenTags int + if 0 < len(ircmsg.tags) || 0 < len(ircmsg.clientOnlyTags) { + buf.WriteByte('@') + firstTag := true + writeTags := func(tags map[string]string) { + for tag, val := range tags { + if !firstTag { + buf.WriteByte(';') // delimiter + } + buf.WriteString(tag) + if val != "" { + buf.WriteByte('=') + buf.WriteString(EscapeTagValue(val)) + } + firstTag = false + } + } + writeTags(ircmsg.tags) + lenRegularTags = buf.Len() - 1 // '@' is not counted + writeTags(ircmsg.clientOnlyTags) + lenClientOnlyTags = (buf.Len() - 1) - lenRegularTags // '@' is not counted + if lenRegularTags != 0 { + // semicolon between regular and client-only tags is not counted + lenClientOnlyTags -= 1 + } + buf.WriteByte(' ') + } + lenTags = buf.Len() + + if 0 < tagLimit && tagLimit < buf.Len() { + return nil, ErrorLineTooLong + } + if (0 < clientOnlyTagDataLimit && clientOnlyTagDataLimit < lenClientOnlyTags) || (0 < serverAddedTagDataLimit && serverAddedTagDataLimit < lenRegularTags) { + return nil, ErrorLineTooLong + } + + if len(ircmsg.Prefix) > 0 { + buf.WriteByte(':') + buf.WriteString(ircmsg.Prefix) + buf.WriteByte(' ') + } + + buf.WriteString(ircmsg.Command) + + for i, param := range ircmsg.Params { + buf.WriteByte(' ') + if len(param) < 1 || strings.IndexByte(param, ' ') != -1 || param[0] == ':' { + if i != len(ircmsg.Params)-1 { + return nil, ErrorBadParam + } + buf.WriteByte(':') + } + buf.WriteString(param) + } + + // truncate if desired + // -2 for \r\n + restLen := buf.Len() - lenTags + if 0 < truncateLen && (truncateLen-2) < restLen { + buf.Truncate(lenTags + (truncateLen - 2)) + } + buf.WriteString("\r\n") + + result := buf.Bytes() + if bytes.IndexByte(result, '\x00') != -1 { + return nil, ErrorLineContainsBadChar + } + return result, nil +} diff --git a/vendor/github.com/goshuirc/irc-go/ircmsg/tags.go b/vendor/github.com/goshuirc/irc-go/ircmsg/tags.go new file mode 100644 index 00000000..1ef23aac --- /dev/null +++ b/vendor/github.com/goshuirc/irc-go/ircmsg/tags.go @@ -0,0 +1,75 @@ +// written by Daniel Oaks +// released under the ISC license + +package ircmsg + +import "bytes" +import "strings" + +var ( + // valtoescape replaces real characters with message tag escapes. + valtoescape = strings.NewReplacer("\\", "\\\\", ";", "\\:", " ", "\\s", "\r", "\\r", "\n", "\\n") + + escapedCharLookupTable [256]byte +) + +func init() { + // most chars escape to themselves + for i := 0; i < 256; i += 1 { + escapedCharLookupTable[i] = byte(i) + } + // these are the exceptions + escapedCharLookupTable[':'] = ';' + escapedCharLookupTable['s'] = ' ' + escapedCharLookupTable['r'] = '\r' + escapedCharLookupTable['n'] = '\n' +} + +// EscapeTagValue takes a value, and returns an escaped message tag value. +// +// This function is automatically used when lines are created from an +// IrcMessage, so you don't need to call it yourself before creating a line. +func EscapeTagValue(inString string) string { + return valtoescape.Replace(inString) +} + +// UnescapeTagValue takes an escaped message tag value, and returns the raw value. +// +// This function is automatically used when lines are interpreted by ParseLine, +// so you don't need to call it yourself after parsing a line. +func UnescapeTagValue(inString string) string { + // buf.Len() == 0 is the fastpath where we have not needed to unescape any chars + var buf bytes.Buffer + remainder := inString + for { + backslashPos := strings.IndexByte(remainder, '\\') + + if backslashPos == -1 { + if buf.Len() == 0 { + return inString + } else { + buf.WriteString(remainder) + break + } + } else if backslashPos == len(remainder)-1 { + // trailing backslash, which we strip + if buf.Len() == 0 { + return inString[:len(inString)-1] + } else { + buf.WriteString(remainder[:len(remainder)-1]) + break + } + } + + // non-trailing backslash detected; we're now on the slowpath + // where we modify the string + if buf.Len() == 0 { + buf.Grow(len(inString)) // just an optimization + } + buf.WriteString(remainder[:backslashPos]) + buf.WriteByte(escapedCharLookupTable[remainder[backslashPos+1]]) + remainder = remainder[backslashPos+2:] + } + + return buf.String() +} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml new file mode 100644 index 00000000..98db8f06 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - tip + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 00000000..91b5cef3 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 00000000..56729a92 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 00000000..0b0aef83 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,29 @@ +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 00000000..3fb771dc --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,30 @@ +// +build !windows +// +build !appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 00000000..1bd628f2 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,1005 @@ +// +build windows +// +build !appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provides colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer +} + +// NewColorable returns new instance of Writer which handles escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n < 1 { + n = 1 + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case n == 22 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod new file mode 100644 index 00000000..ef3ca9d4 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-colorable + +require github.com/mattn/go-isatty v0.0.8 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum new file mode 100644 index 00000000..2c12960e --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -0,0 +1,4 @@ +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 00000000..95f2c6be --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,55 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable holds writer but removes escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write writes data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml new file mode 100644 index 00000000..5597e026 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - tip + +os: + - linux + - osx + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 00000000..65dc692b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 00000000..1e69004b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 00000000..17d4f90e --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod new file mode 100644 index 00000000..a8ddf404 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -0,0 +1,5 @@ +module github.com/mattn/go-isatty + +require golang.org/x/sys v0.0.0-20191008105621-543471e840be + +go 1.14 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum new file mode 100644 index 00000000..c141fc53 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -0,0 +1,4 @@ +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/isatty_android.go b/vendor/github.com/mattn/go-isatty/isatty_android.go new file mode 100644 index 00000000..d3567cb5 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_android.go @@ -0,0 +1,23 @@ +// +build android + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 00000000..07e93039 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,24 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 00000000..ff714a37 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,15 @@ +// +build appengine js nacl + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 00000000..bc0a7092 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,22 @@ +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(fd) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 00000000..bdd5c79a --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,22 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 00000000..453b025d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +// +build linux aix +// +build !appengine +// +build !android + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 00000000..1fa86915 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mgutz/ansi/.gitignore b/vendor/github.com/mgutz/ansi/.gitignore new file mode 100644 index 00000000..9ed3b07c --- /dev/null +++ b/vendor/github.com/mgutz/ansi/.gitignore @@ -0,0 +1 @@ +*.test diff --git a/vendor/github.com/mgutz/ansi/LICENSE b/vendor/github.com/mgutz/ansi/LICENSE new file mode 100644 index 00000000..06ce0c3b --- /dev/null +++ b/vendor/github.com/mgutz/ansi/LICENSE @@ -0,0 +1,9 @@ +The MIT License (MIT) +Copyright (c) 2013 Mario L. Gutierrez + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/mgutz/ansi/README.md b/vendor/github.com/mgutz/ansi/README.md new file mode 100644 index 00000000..8f8e20b7 --- /dev/null +++ b/vendor/github.com/mgutz/ansi/README.md @@ -0,0 +1,121 @@ +# ansi + +Package ansi is a small, fast library to create ANSI colored strings and codes. + +## Install + +Get it + +```sh +go get -u github.com/mgutz/ansi +``` + +## Example + +```go +import "github.com/mgutz/ansi" + +// colorize a string, SLOW +msg := ansi.Color("foo", "red+b:white") + +// create a FAST closure function to avoid computation of ANSI code +phosphorize := ansi.ColorFunc("green+h:black") +msg = phosphorize("Bring back the 80s!") +msg2 := phospohorize("Look, I'm a CRT!") + +// cache escape codes and build strings manually +lime := ansi.ColorCode("green+h:black") +reset := ansi.ColorCode("reset") + +fmt.Println(lime, "Bring back the 80s!", reset) +``` + +Other examples + +```go +Color(s, "red") // red +Color(s, "red+b") // red bold +Color(s, "red+B") // red blinking +Color(s, "red+u") // red underline +Color(s, "red+bh") // red bold bright +Color(s, "red:white") // red on white +Color(s, "red+b:white+h") // red bold on white bright +Color(s, "red+B:white+h") // red blink on white bright +Color(s, "off") // turn off ansi codes +``` + +To view color combinations, from project directory in terminal. + +```sh +go test +``` + +## Style format + +```go +"foregroundColor+attributes:backgroundColor+attributes" +``` + +Colors + +* black +* red +* green +* yellow +* blue +* magenta +* cyan +* white +* 0...255 (256 colors) + +Foreground Attributes + +* B = Blink +* b = bold +* h = high intensity (bright) +* i = inverse +* s = strikethrough +* u = underline + +Background Attributes + +* h = high intensity (bright) + +## Constants + +* ansi.Reset +* ansi.DefaultBG +* ansi.DefaultFG +* ansi.Black +* ansi.Red +* ansi.Green +* ansi.Yellow +* ansi.Blue +* ansi.Magenta +* ansi.Cyan +* ansi.White +* ansi.LightBlack +* ansi.LightRed +* ansi.LightGreen +* ansi.LightYellow +* ansi.LightBlue +* ansi.LightMagenta +* ansi.LightCyan +* ansi.LightWhite + +## References + +Wikipedia ANSI escape codes [Colors](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) + +General [tips and formatting](http://misc.flogisoft.com/bash/tip_colors_and_formatting) + +What about support on Windows? Use [colorable by mattn](https://github.com/mattn/go-colorable). +Ansi and colorable are used by [logxi](https://github.com/mgutz/logxi) to support logging in +color on Windows. + +## MIT License + +Copyright (c) 2013 Mario Gutierrez mario@mgutz.com + +See the file LICENSE for copying permission. + diff --git a/vendor/github.com/mgutz/ansi/ansi.go b/vendor/github.com/mgutz/ansi/ansi.go new file mode 100644 index 00000000..dc041364 --- /dev/null +++ b/vendor/github.com/mgutz/ansi/ansi.go @@ -0,0 +1,285 @@ +package ansi + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +const ( + black = iota + red + green + yellow + blue + magenta + cyan + white + defaultt = 9 + + normalIntensityFG = 30 + highIntensityFG = 90 + normalIntensityBG = 40 + highIntensityBG = 100 + + start = "\033[" + bold = "1;" + blink = "5;" + underline = "4;" + inverse = "7;" + strikethrough = "9;" + + // Reset is the ANSI reset escape sequence + Reset = "\033[0m" + // DefaultBG is the default background + DefaultBG = "\033[49m" + // DefaultFG is the default foreground + DefaultFG = "\033[39m" +) + +// Black FG +var Black string + +// Red FG +var Red string + +// Green FG +var Green string + +// Yellow FG +var Yellow string + +// Blue FG +var Blue string + +// Magenta FG +var Magenta string + +// Cyan FG +var Cyan string + +// White FG +var White string + +// LightBlack FG +var LightBlack string + +// LightRed FG +var LightRed string + +// LightGreen FG +var LightGreen string + +// LightYellow FG +var LightYellow string + +// LightBlue FG +var LightBlue string + +// LightMagenta FG +var LightMagenta string + +// LightCyan FG +var LightCyan string + +// LightWhite FG +var LightWhite string + +var ( + plain = false + // Colors maps common color names to their ANSI color code. + Colors = map[string]int{ + "black": black, + "red": red, + "green": green, + "yellow": yellow, + "blue": blue, + "magenta": magenta, + "cyan": cyan, + "white": white, + "default": defaultt, + } +) + +func init() { + for i := 0; i < 256; i++ { + Colors[strconv.Itoa(i)] = i + } + + Black = ColorCode("black") + Red = ColorCode("red") + Green = ColorCode("green") + Yellow = ColorCode("yellow") + Blue = ColorCode("blue") + Magenta = ColorCode("magenta") + Cyan = ColorCode("cyan") + White = ColorCode("white") + LightBlack = ColorCode("black+h") + LightRed = ColorCode("red+h") + LightGreen = ColorCode("green+h") + LightYellow = ColorCode("yellow+h") + LightBlue = ColorCode("blue+h") + LightMagenta = ColorCode("magenta+h") + LightCyan = ColorCode("cyan+h") + LightWhite = ColorCode("white+h") +} + +// ColorCode returns the ANSI color color code for style. +func ColorCode(style string) string { + return colorCode(style).String() +} + +// Gets the ANSI color code for a style. +func colorCode(style string) *bytes.Buffer { + buf := bytes.NewBufferString("") + if plain || style == "" { + return buf + } + if style == "reset" { + buf.WriteString(Reset) + return buf + } else if style == "off" { + return buf + } + + foregroundBackground := strings.Split(style, ":") + foreground := strings.Split(foregroundBackground[0], "+") + fgKey := foreground[0] + fg := Colors[fgKey] + fgStyle := "" + if len(foreground) > 1 { + fgStyle = foreground[1] + } + + bg, bgStyle := "", "" + + if len(foregroundBackground) > 1 { + background := strings.Split(foregroundBackground[1], "+") + bg = background[0] + if len(background) > 1 { + bgStyle = background[1] + } + } + + buf.WriteString(start) + base := normalIntensityFG + if len(fgStyle) > 0 { + if strings.Contains(fgStyle, "b") { + buf.WriteString(bold) + } + if strings.Contains(fgStyle, "B") { + buf.WriteString(blink) + } + if strings.Contains(fgStyle, "u") { + buf.WriteString(underline) + } + if strings.Contains(fgStyle, "i") { + buf.WriteString(inverse) + } + if strings.Contains(fgStyle, "s") { + buf.WriteString(strikethrough) + } + if strings.Contains(fgStyle, "h") { + base = highIntensityFG + } + } + + // if 256-color + n, err := strconv.Atoi(fgKey) + if err == nil { + fmt.Fprintf(buf, "38;5;%d;", n) + } else { + fmt.Fprintf(buf, "%d;", base+fg) + } + + base = normalIntensityBG + if len(bg) > 0 { + if strings.Contains(bgStyle, "h") { + base = highIntensityBG + } + // if 256-color + n, err := strconv.Atoi(bg) + if err == nil { + fmt.Fprintf(buf, "48;5;%d;", n) + } else { + fmt.Fprintf(buf, "%d;", base+Colors[bg]) + } + } + + // remove last ";" + buf.Truncate(buf.Len() - 1) + buf.WriteRune('m') + return buf +} + +// Color colors a string based on the ANSI color code for style. +func Color(s, style string) string { + if plain || len(style) < 1 { + return s + } + buf := colorCode(style) + buf.WriteString(s) + buf.WriteString(Reset) + return buf.String() +} + +// ColorFunc creates a closure to avoid computation ANSI color code. +func ColorFunc(style string) func(string) string { + if style == "" { + return func(s string) string { + return s + } + } + color := ColorCode(style) + return func(s string) string { + if plain || s == "" { + return s + } + buf := bytes.NewBufferString(color) + buf.WriteString(s) + buf.WriteString(Reset) + result := buf.String() + return result + } +} + +// DisableColors disables ANSI color codes. The default is false (colors are on). +func DisableColors(disable bool) { + plain = disable + if plain { + Black = "" + Red = "" + Green = "" + Yellow = "" + Blue = "" + Magenta = "" + Cyan = "" + White = "" + LightBlack = "" + LightRed = "" + LightGreen = "" + LightYellow = "" + LightBlue = "" + LightMagenta = "" + LightCyan = "" + LightWhite = "" + } else { + Black = ColorCode("black") + Red = ColorCode("red") + Green = ColorCode("green") + Yellow = ColorCode("yellow") + Blue = ColorCode("blue") + Magenta = ColorCode("magenta") + Cyan = ColorCode("cyan") + White = ColorCode("white") + LightBlack = ColorCode("black+h") + LightRed = ColorCode("red+h") + LightGreen = ColorCode("green+h") + LightYellow = ColorCode("yellow+h") + LightBlue = ColorCode("blue+h") + LightMagenta = ColorCode("magenta+h") + LightCyan = ColorCode("cyan+h") + LightWhite = ColorCode("white+h") + } +} diff --git a/vendor/github.com/mgutz/ansi/doc.go b/vendor/github.com/mgutz/ansi/doc.go new file mode 100644 index 00000000..43c217e1 --- /dev/null +++ b/vendor/github.com/mgutz/ansi/doc.go @@ -0,0 +1,65 @@ +/* +Package ansi is a small, fast library to create ANSI colored strings and codes. + +Installation + + # this installs the color viewer and the package + go get -u github.com/mgutz/ansi/cmd/ansi-mgutz + +Example + + // colorize a string, SLOW + msg := ansi.Color("foo", "red+b:white") + + // create a closure to avoid recalculating ANSI code compilation + phosphorize := ansi.ColorFunc("green+h:black") + msg = phosphorize("Bring back the 80s!") + msg2 := phospohorize("Look, I'm a CRT!") + + // cache escape codes and build strings manually + lime := ansi.ColorCode("green+h:black") + reset := ansi.ColorCode("reset") + + fmt.Println(lime, "Bring back the 80s!", reset) + +Other examples + + Color(s, "red") // red + Color(s, "red+b") // red bold + Color(s, "red+B") // red blinking + Color(s, "red+u") // red underline + Color(s, "red+bh") // red bold bright + Color(s, "red:white") // red on white + Color(s, "red+b:white+h") // red bold on white bright + Color(s, "red+B:white+h") // red blink on white bright + +To view color combinations, from terminal + + ansi-mgutz + +Style format + + "foregroundColor+attributes:backgroundColor+attributes" + +Colors + + black + red + green + yellow + blue + magenta + cyan + white + +Attributes + + b = bold foreground + B = Blink foreground + u = underline foreground + h = high intensity (bright) foreground, background + i = inverse + +Wikipedia ANSI escape codes [Colors](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) +*/ +package ansi diff --git a/vendor/github.com/mgutz/ansi/print.go b/vendor/github.com/mgutz/ansi/print.go new file mode 100644 index 00000000..806f436b --- /dev/null +++ b/vendor/github.com/mgutz/ansi/print.go @@ -0,0 +1,57 @@ +package ansi + +import ( + "fmt" + "sort" + + colorable "github.com/mattn/go-colorable" +) + +// PrintStyles prints all style combinations to the terminal. +func PrintStyles() { + // for compatibility with Windows, not needed for *nix + stdout := colorable.NewColorableStdout() + + bgColors := []string{ + "", + ":black", + ":red", + ":green", + ":yellow", + ":blue", + ":magenta", + ":cyan", + ":white", + } + + keys := make([]string, 0, len(Colors)) + for k := range Colors { + keys = append(keys, k) + } + + sort.Sort(sort.StringSlice(keys)) + + for _, fg := range keys { + for _, bg := range bgColors { + fmt.Fprintln(stdout, padColor(fg, []string{"" + bg, "+b" + bg, "+bh" + bg, "+u" + bg})) + fmt.Fprintln(stdout, padColor(fg, []string{"+s" + bg, "+i" + bg})) + fmt.Fprintln(stdout, padColor(fg, []string{"+uh" + bg, "+B" + bg, "+Bb" + bg /* backgrounds */, "" + bg + "+h"})) + fmt.Fprintln(stdout, padColor(fg, []string{"+b" + bg + "+h", "+bh" + bg + "+h", "+u" + bg + "+h", "+uh" + bg + "+h"})) + } + } +} + +func pad(s string, length int) string { + for len(s) < length { + s += " " + } + return s +} + +func padColor(color string, styles []string) string { + buffer := "" + for _, style := range styles { + buffer += Color(pad(color+style, 20), color+style) + } + return buffer +} diff --git a/vendor/github.com/oragono/confusables/.gitignore b/vendor/github.com/oragono/confusables/.gitignore new file mode 100644 index 00000000..230994ea --- /dev/null +++ b/vendor/github.com/oragono/confusables/.gitignore @@ -0,0 +1,2 @@ +/maketables +confusables.txt diff --git a/vendor/github.com/oragono/confusables/LICENSE b/vendor/github.com/oragono/confusables/LICENSE new file mode 100644 index 00000000..6071c7e4 --- /dev/null +++ b/vendor/github.com/oragono/confusables/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Michael Tibben. All rights reserved. +Copyright (c) 2014 Filippo Valsorda. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/oragono/confusables/README.md b/vendor/github.com/oragono/confusables/README.md new file mode 100644 index 00000000..80921047 --- /dev/null +++ b/vendor/github.com/oragono/confusables/README.md @@ -0,0 +1,17 @@ +# Unicode confusables + +This Go library implements the `Skeleton` algorithm from Unicode TR39 + +See http://www.unicode.org/reports/tr39/ + +### Examples +``` +import "github.com/mtibben/confusables" + +confusables.Skeleton("𝔭𝒶ỿ𝕡𝕒ℓ") # "paypal" +confusables.Confusable("𝔭𝒶ỿ𝕡𝕒ℓ", "paypal") # true +``` + +*Note on the use of `Skeleton`, from TR39:* + +> A skeleton is intended only for internal use for testing confusability of strings; the resulting text is not suitable for display to users, because it will appear to be a hodgepodge of different scripts. In particular, the result of mapping an identifier will not necessary be an identifier. Thus the confusability mappings can be used to test whether two identifiers are confusable (if their skeletons are the same), but should definitely not be used as a "normalization" of identifiers. diff --git a/vendor/github.com/oragono/confusables/confusables.go b/vendor/github.com/oragono/confusables/confusables.go new file mode 100644 index 00000000..4b39d4c0 --- /dev/null +++ b/vendor/github.com/oragono/confusables/confusables.go @@ -0,0 +1,82 @@ +//go:generate go run maketables.go > tables.go + +package confusables + +import ( + "bytes" + + "golang.org/x/text/unicode/norm" +) + +// TODO: document casefolding approaches +// (suggest to force casefold strings; explain how to catch paypal - pAypal) +// TODO: DOC you might want to store the Skeleton and check against it later +// TODO: implement xidmodifications.txt restricted characters + +type lookupFunc func(rune) (string) + +func lookupReplacement(r rune) string { + return confusablesMap[r] +} + +func lookupReplacementTweaked(r rune) string { + if replacement, ok := tweaksMap[r]; ok { + return replacement + } + return confusablesMap[r] +} + +func skeletonBase(s string, lookup lookupFunc) string { + + // 1. Converting X to NFD format + s = norm.NFD.String(s) + + // 2. Successively mapping each source character in X to the target string + // according to the specified data table + var buf bytes.Buffer + changed := false // fast path: if this remains false, keep s intact + prevPos := 0 + var replacement string + for i, r := range s { + if changed && replacement == "" { + buf.WriteString(s[prevPos:i]) + } + prevPos = i + replacement = lookup(r) + if replacement != "" { + if !changed { + changed = true + // first replacement: copy over the previously unmodified text + buf.WriteString(s[:i]) + } + buf.WriteString(replacement) + } + } + if changed && replacement == "" { + buf.WriteString(s[prevPos:]) // loop-and-a-half + } + if changed { + s = buf.String() + } + + // 3. Reapplying NFD + s = norm.NFD.String(s) + + return s +} + +// Skeleton converts a string to its "skeleton" form +// as described in http://www.unicode.org/reports/tr39/#Confusable_Detection +func Skeleton(s string) string { + return skeletonBase(s, lookupReplacement) +} + +// SkeletonTweaked is like Skeleton, but it implements some custom overrides +// to the confusables table (currently it removes the m -> rn mapping): +func SkeletonTweaked(s string) string { + return skeletonBase(s, lookupReplacementTweaked) +} + +func Confusable(x, y string) bool { + return Skeleton(x) == Skeleton(y) +} diff --git a/vendor/github.com/oragono/confusables/tables.go b/vendor/github.com/oragono/confusables/tables.go new file mode 100644 index 00000000..0a1cf30b --- /dev/null +++ b/vendor/github.com/oragono/confusables/tables.go @@ -0,0 +1,6317 @@ +// This file was generated by go generate; DO NOT EDIT + +package confusables + +// Following is the original header of the source confusables.txt file +// +// confusables.txt +// Date: 2018-05-25, 00:12:52 GMT +// © 2018 Unicode®, Inc. +// Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. +// For terms of use, see http://www.unicode.org/terms_of_use.html +// +// Unicode Security Mechanisms for UTS #39 +// Version: 11.0.0 +// +// For documentation and usage, see http://www.unicode.org/reports/tr39 +// + +var confusablesMap = map[rune]string{ + + 0x000005AD: "\u0596", + 0x000005AE: "\u0598", + 0x000005A8: "\u0599", + 0x000005A4: "\u059a", + 0x00001AB4: "\u06db", + 0x000020DB: "\u06db", + 0x00000619: "\u0313", + 0x000008F3: "\u0313", + 0x00000343: "\u0313", + 0x00000315: "\u0313", + 0x0000064F: "\u0313", + 0x0000065D: "\u0314", + 0x0000059C: "\u0301", + 0x0000059D: "\u0301", + 0x00000618: "\u0301", + 0x00000747: "\u0301", + 0x00000341: "\u0301", + 0x00000954: "\u0301", + 0x0000064E: "\u0301", + 0x00000340: "\u0300", + 0x00000953: "\u0300", + 0x0000030C: "\u0306", + 0x0000A67C: "\u0306", + 0x00000658: "\u0306", + 0x0000065A: "\u0306", + 0x0000036E: "\u0306", + 0x000006E8: "\u0306\u0307", + 0x00000310: "\u0306\u0307", + 0x00000901: "\u0306\u0307", + 0x00000981: "\u0306\u0307", + 0x00000A81: "\u0306\u0307", + 0x00000B01: "\u0306\u0307", + 0x00000C00: "\u0306\u0307", + 0x00000C81: "\u0306\u0307", + 0x00000D01: "\u0306\u0307", + 0x000114BF: "\u0306\u0307", + 0x00001CD0: "\u0302", + 0x00000311: "\u0302", + 0x0000065B: "\u0302", + 0x000007EE: "\u0302", + 0x0000A6F0: "\u0302", + 0x000005AF: "\u030a", + 0x000006DF: "\u030a", + 0x000017D3: "\u030a", + 0x0000309A: "\u030a", + 0x00000652: "\u030a", + 0x00000B82: "\u030a", + 0x00001036: "\u030a", + 0x000017C6: "\u030a", + 0x00011300: "\u030a", + 0x00000E4D: "\u030a", + 0x00000ECD: "\u030a", + 0x00000366: "\u030a", + 0x00002DEA: "\u030a", + 0x000008EB: "\u0308", + 0x000007F3: "\u0308", + 0x0000064B: "\u030b", + 0x000008F0: "\u030b", + 0x00000342: "\u0303", + 0x00000653: "\u0303", + 0x000005C4: "\u0307", + 0x000006EC: "\u0307", + 0x00000740: "\u0307", + 0x000008EA: "\u0307", + 0x00000741: "\u0307", + 0x00000358: "\u0307", + 0x000005B9: "\u0307", + 0x000005BA: "\u0307", + 0x000005C2: "\u0307", + 0x000005C1: "\u0307", + 0x000007ED: "\u0307", + 0x00000902: "\u0307", + 0x00000A02: "\u0307", + 0x00000A82: "\u0307", + 0x00000BCD: "\u0307", + 0x00000337: "\u0338", + 0x00001AB7: "\u0328", + 0x00000322: "\u0328", + 0x00000345: "\u0328", + 0x00001CD2: "\u0304", + 0x00000305: "\u0304", + 0x00000659: "\u0304", + 0x000007EB: "\u0304", + 0x0000A6F1: "\u0304", + 0x00001CDA: "\u030e", + 0x00000657: "\u0312", + 0x00000357: "\u0350", + 0x000008FF: "\u0350", + 0x000008F8: "\u0350", + 0x00000900: "\u0352", + 0x00001CED: "\u0316", + 0x00001CDC: "\u0329", + 0x00000656: "\u0329", + 0x00001CD5: "\u032b", + 0x00000347: "\u0333", + 0x000008F9: "\u0354", + 0x000008FA: "\u0355", + 0x0000309B: "\uff9e", + 0x0000309C: "\uff9f", + 0x00000336: "\u0335", + 0x0000302C: "\u0309", + 0x000005C5: "\u0323", + 0x000008ED: "\u0323", + 0x00001CDD: "\u0323", + 0x000005B4: "\u0323", + 0x0000065C: "\u0323", + 0x0000093C: "\u0323", + 0x000009BC: "\u0323", + 0x00000A3C: "\u0323", + 0x00000ABC: "\u0323", + 0x00000B3C: "\u0323", + 0x000111CA: "\u0323", + 0x000114C3: "\u0323", + 0x00010A3A: "\u0323", + 0x000008EE: "\u0324", + 0x00001CDE: "\u0324", + 0x00000F37: "\u0325", + 0x0000302D: "\u0325", + 0x00000327: "\u0326", + 0x00000321: "\u0326", + 0x00000339: "\u0326", + 0x00001CD9: "\u032d", + 0x00001CD8: "\u032e", + 0x00000952: "\u0331", + 0x00000320: "\u0331", + 0x000008F1: "\u064c", + 0x000008E8: "\u064c", + 0x000008E5: "\u064c", + 0x0000FC5E: "\ufe72\u0651", + 0x000008F2: "\u064d", + 0x0000FC5F: "\ufe74\u0651", + 0x0000FCF2: "\ufe77\u0651", + 0x0000FC60: "\ufe76\u0651", + 0x0000FCF3: "\ufe79\u0651", + 0x0000FC61: "\ufe78\u0651", + 0x0000061A: "\u0650", + 0x00000317: "\u0650", + 0x0000FCF4: "\ufe7b\u0651", + 0x0000FC62: "\ufe7a\u0651", + 0x0000FC63: "\ufe7c\u0670", + 0x0000065F: "\u0655", + 0x0000030D: "\u0670", + 0x00000742: "\u073c", + 0x00000A03: "\u0983", + 0x00000C03: "\u0983", + 0x00000C83: "\u0983", + 0x00000D03: "\u0983", + 0x00000D83: "\u0983", + 0x00001038: "\u0983", + 0x000114C1: "\u0983", + 0x000017CB: "\u0e48", + 0x00000EC8: "\u0e48", + 0x00000EC9: "\u0e49", + 0x00000ECA: "\u0e4a", + 0x00000ECB: "\u0e4b", + 0x0000A66F: "\u20e9", + 0x00002028: " ", + 0x00002029: " ", + 0x00001680: " ", + 0x00002000: " ", + 0x00002001: " ", + 0x00002002: " ", + 0x00002003: " ", + 0x00002004: " ", + 0x00002005: " ", + 0x00002006: " ", + 0x00002008: " ", + 0x00002009: " ", + 0x0000200A: " ", + 0x0000205F: " ", + 0x000000A0: " ", + 0x00002007: " ", + 0x0000202F: " ", + 0x000007FA: "_", + 0x0000FE4D: "_", + 0x0000FE4E: "_", + 0x0000FE4F: "_", + 0x00002010: "-", + 0x00002011: "-", + 0x00002012: "-", + 0x00002013: "-", + 0x0000FE58: "-", + 0x000006D4: "-", + 0x00002043: "-", + 0x000002D7: "-", + 0x00002212: "-", + 0x00002796: "-", + 0x00002CBA: "-", + 0x00002A29: "-\u0313", + 0x00002E1A: "-\u0308", + 0x0000FB29: "-\u0307", + 0x00002238: "-\u0307", + 0x00002A2A: "-\u0323", + 0x0000A4FE: "-.", + 0x0000FF5E: "\u301c", + 0x0000060D: ",", + 0x0000066B: ",", + 0x0000201A: ",", + 0x000000B8: ",", + 0x0000A4F9: ",", + 0x00002E32: "\u060c", + 0x0000066C: "\u060c", + 0x0000037E: ";", + 0x00002E35: "\u061b", + 0x00000903: ":", + 0x00000A83: ":", + 0x0000FF1A: ":", + 0x00000589: ":", + 0x00000703: ":", + 0x00000704: ":", + 0x000016EC: ":", + 0x0000FE30: ":", + 0x00001803: ":", + 0x00001809: ":", + 0x0000205A: ":", + 0x000005C3: ":", + 0x000002F8: ":", + 0x0000A789: ":", + 0x00002236: ":", + 0x000002D0: ":", + 0x0000A4FD: ":", + 0x00002A74: "::=", + 0x000029F4: ":\u2192", + 0x0000FF01: "!", + 0x000001C3: "!", + 0x00002D51: "!", + 0x0000203C: "!!", + 0x00002049: "!?", + 0x00000294: "?", + 0x00000241: "?", + 0x0000097D: "?", + 0x000013AE: "?", + 0x0000A6EB: "?", + 0x00002048: "?!", + 0x00002047: "??", + 0x00002E2E: "\u061f", + 0x0001D16D: ".", + 0x00002024: ".", + 0x00000701: ".", + 0x00000702: ".", + 0x0000A60E: ".", + 0x00010A50: ".", + 0x00000660: ".", + 0x000006F0: ".", + 0x0000A4F8: ".", + 0x0000A4FB: ".,", + 0x00002025: "..", + 0x0000A4FA: "..", + 0x00002026: "...", + 0x0000A6F4: "\ua6f3\ua6f3", + 0x000030FB: "\u00b7", + 0x0000FF65: "\u00b7", + 0x000016EB: "\u00b7", + 0x00000387: "\u00b7", + 0x00002E31: "\u00b7", + 0x00010101: "\u00b7", + 0x00002022: "\u00b7", + 0x00002027: "\u00b7", + 0x00002219: "\u00b7", + 0x000022C5: "\u00b7", + 0x0000A78F: "\u00b7", + 0x00001427: "\u00b7", + 0x000022EF: "\u00b7\u00b7\u00b7", + 0x00002D48: "\u00b7\u00b7\u00b7", + 0x00001444: "\u00b7<", + 0x000022D7: "\u00b7>", + 0x00001437: "\u00b7>", + 0x00001440: "\u00b7>", + 0x0000152F: "\u00b74", + 0x0000147E: "\u00b7b", + 0x00001480: "\u00b7b\u0307", + 0x0000147A: "\u00b7d", + 0x00001498: "\u00b7J", + 0x000014B6: "\u00b7L", + 0x00001476: "\u00b7P", + 0x00001457: "\u00b7U", + 0x0000143A: "\u00b7V", + 0x0000143C: "\u00b7\u0245", + 0x000014AE: "\u00b7\u0393", + 0x0000140E: "\u00b7\u0394", + 0x00001459: "\u00b7\u0548", + 0x0000140C: "\u00b7\u1401", + 0x00001410: "\u00b7\u1404", + 0x00001412: "\u00b7\u1405", + 0x00001414: "\u00b7\u1406", + 0x00001417: "\u00b7\u140a", + 0x00001419: "\u00b7\u140b", + 0x0000143E: "\u00b7\u1432", + 0x00001442: "\u00b7\u1434", + 0x00001446: "\u00b7\u1439", + 0x0000145B: "\u00b7\u144f", + 0x00001454: "\u00b7\u1450", + 0x0000145D: "\u00b7\u1450", + 0x0000145F: "\u00b7\u1451", + 0x00001461: "\u00b7\u1455", + 0x00001463: "\u00b7\u1456", + 0x00001474: "\u00b7\u146b", + 0x00001478: "\u00b7\u146e", + 0x0000147C: "\u00b7\u1470", + 0x00001492: "\u00b7\u1489", + 0x00001494: "\u00b7\u148b", + 0x00001496: "\u00b7\u148c", + 0x0000149A: "\u00b7\u148e", + 0x0000149C: "\u00b7\u1490", + 0x0000149E: "\u00b7\u1491", + 0x000014AC: "\u00b7\u14a3", + 0x000014B0: "\u00b7\u14a6", + 0x000014B2: "\u00b7\u14a7", + 0x000014B4: "\u00b7\u14a8", + 0x000014B8: "\u00b7\u14ab", + 0x000014C9: "\u00b7\u14c0", + 0x000018C6: "\u00b7\u14c2", + 0x000018C8: "\u00b7\u14c3", + 0x000018CA: "\u00b7\u14c4", + 0x000018CC: "\u00b7\u14c5", + 0x000014CB: "\u00b7\u14c7", + 0x000014CD: "\u00b7\u14c8", + 0x000014DC: "\u00b7\u14d3", + 0x000014DE: "\u00b7\u14d5", + 0x000014E0: "\u00b7\u14d6", + 0x000014E2: "\u00b7\u14d7", + 0x000014E4: "\u00b7\u14d8", + 0x000014E6: "\u00b7\u14da", + 0x000014E8: "\u00b7\u14db", + 0x000014F6: "\u00b7\u14ed", + 0x000014F8: "\u00b7\u14ef", + 0x000014FA: "\u00b7\u14f0", + 0x000014FC: "\u00b7\u14f1", + 0x000014FE: "\u00b7\u14f2", + 0x00001500: "\u00b7\u14f4", + 0x00001502: "\u00b7\u14f5", + 0x00001517: "\u00b7\u1510", + 0x00001519: "\u00b7\u1511", + 0x0000151B: "\u00b7\u1512", + 0x0000151D: "\u00b7\u1513", + 0x0000151F: "\u00b7\u1514", + 0x00001521: "\u00b7\u1515", + 0x00001523: "\u00b7\u1516", + 0x00001531: "\u00b7\u1528", + 0x00001533: "\u00b7\u1529", + 0x00001535: "\u00b7\u152a", + 0x00001537: "\u00b7\u152b", + 0x00001539: "\u00b7\u152d", + 0x0000153B: "\u00b7\u152e", + 0x000018CE: "\u00b7\u1543", + 0x000018CF: "\u00b7\u1546", + 0x000018D0: "\u00b7\u1547", + 0x000018D1: "\u00b7\u1548", + 0x000018D2: "\u00b7\u1549", + 0x000018D3: "\u00b7\u154b", + 0x0000154E: "\u00b7\u154c", + 0x0000155B: "\u00b7\u155a", + 0x00001568: "\u00b7\u1567", + 0x000018B3: "\u00b7\u18b1", + 0x000018B6: "\u00b7\u18b4", + 0x000018B9: "\u00b7\u18b8", + 0x000018C2: "\u00b7\u18c0", + 0x0000A830: "\u0964", + 0x00000965: "\u0964\u0964", + 0x00001C3C: "\u1c3b\u1c3b", + 0x0000104B: "\u104a\u104a", + 0x00001AA9: "\u1aa8\u1aa8", + 0x00001AAB: "\u1aaa\u1aa8", + 0x00001B5F: "\u1b5e\u1b5e", + 0x00010A57: "\U00010a56\U00010a56", + 0x0001144C: "\U0001144b\U0001144b", + 0x00011642: "\U00011641\U00011641", + 0x00011C42: "\U00011c41\U00011c41", + 0x00001C7F: "\u1c7e\u1c7e", + 0x0000055D: "'", + 0x0000FF07: "'", + 0x00002018: "'", + 0x00002019: "'", + 0x0000201B: "'", + 0x00002032: "'", + 0x00002035: "'", + 0x0000055A: "'", + 0x000005F3: "'", + 0x00000060: "'", + 0x00001FEF: "'", + 0x0000FF40: "'", + 0x000000B4: "'", + 0x00000384: "'", + 0x00001FFD: "'", + 0x00001FBD: "'", + 0x00001FBF: "'", + 0x00001FFE: "'", + 0x000002B9: "'", + 0x00000374: "'", + 0x000002C8: "'", + 0x000002CA: "'", + 0x000002CB: "'", + 0x000002F4: "'", + 0x000002BB: "'", + 0x000002BD: "'", + 0x000002BC: "'", + 0x000002BE: "'", + 0x0000A78C: "'", + 0x000005D9: "'", + 0x000007F4: "'", + 0x000007F5: "'", + 0x0000144A: "'", + 0x000016CC: "'", + 0x00016F51: "'", + 0x00016F52: "'", + 0x00001CD3: "''", + 0x00000022: "''", + 0x0000FF02: "''", + 0x0000201C: "''", + 0x0000201D: "''", + 0x0000201F: "''", + 0x00002033: "''", + 0x00002036: "''", + 0x00003003: "''", + 0x000005F4: "''", + 0x000002DD: "''", + 0x000002BA: "''", + 0x000002F6: "''", + 0x000002EE: "''", + 0x000005F2: "''", + 0x00002034: "'''", + 0x00002037: "'''", + 0x00002057: "''''", + 0x00000181: "'B", + 0x0000018A: "'D", + 0x00000149: "'n", + 0x000001A4: "'P", + 0x000001AC: "'T", + 0x000001B3: "'Y", + 0x0000FF3B: "(", + 0x00002768: "(", + 0x00002772: "(", + 0x00003014: "(", + 0x0000FD3E: "(", + 0x00002E28: "((", + 0x00003220: "(\u30fc)", + 0x00002475: "(2)", + 0x00002487: "(2O)", + 0x00002476: "(3)", + 0x00002477: "(4)", + 0x00002478: "(5)", + 0x00002479: "(6)", + 0x0000247A: "(7)", + 0x0000247B: "(8)", + 0x0000247C: "(9)", + 0x0000249C: "(a)", + 0x0001F110: "(A)", + 0x0000249D: "(b)", + 0x0001F111: "(B)", + 0x0000249E: "(c)", + 0x0001F112: "(C)", + 0x0000249F: "(d)", + 0x0001F113: "(D)", + 0x000024A0: "(e)", + 0x0001F114: "(E)", + 0x000024A1: "(f)", + 0x0001F115: "(F)", + 0x000024A2: "(g)", + 0x0001F116: "(G)", + 0x000024A3: "(h)", + 0x0001F117: "(H)", + 0x000024A4: "(i)", + 0x000024A5: "(j)", + 0x0001F119: "(J)", + 0x000024A6: "(k)", + 0x0001F11A: "(K)", + 0x00002474: "(l)", + 0x0001F118: "(l)", + 0x000024A7: "(l)", + 0x0001F11B: "(L)", + 0x0000247F: "(l2)", + 0x00002480: "(l3)", + 0x00002481: "(l4)", + 0x00002482: "(l5)", + 0x00002483: "(l6)", + 0x00002484: "(l7)", + 0x00002485: "(l8)", + 0x00002486: "(l9)", + 0x0000247E: "(ll)", + 0x0000247D: "(lO)", + 0x0001F11C: "(M)", + 0x000024A9: "(n)", + 0x0001F11D: "(N)", + 0x000024AA: "(o)", + 0x0001F11E: "(O)", + 0x000024AB: "(p)", + 0x0001F11F: "(P)", + 0x000024AC: "(q)", + 0x0001F120: "(Q)", + 0x000024AD: "(r)", + 0x0001F121: "(R)", + 0x000024A8: "(rn)", + 0x000024AE: "(s)", + 0x0001F122: "(S)", + 0x0001F12A: "(S)", + 0x000024AF: "(t)", + 0x0001F123: "(T)", + 0x000024B0: "(u)", + 0x0001F124: "(U)", + 0x000024B1: "(v)", + 0x0001F125: "(V)", + 0x000024B2: "(w)", + 0x0001F126: "(W)", + 0x000024B3: "(x)", + 0x0001F127: "(X)", + 0x000024B4: "(y)", + 0x0001F128: "(Y)", + 0x000024B5: "(z)", + 0x0001F129: "(Z)", + 0x00003200: "(\u1100)", + 0x0000320E: "(\uac00)", + 0x00003201: "(\u1102)", + 0x0000320F: "(\ub098)", + 0x00003202: "(\u1103)", + 0x00003210: "(\ub2e4)", + 0x00003203: "(\u1105)", + 0x00003211: "(\ub77c)", + 0x00003204: "(\u1106)", + 0x00003212: "(\ub9c8)", + 0x00003205: "(\u1107)", + 0x00003213: "(\ubc14)", + 0x00003206: "(\u1109)", + 0x00003214: "(\uc0ac)", + 0x00003207: "(\u110b)", + 0x00003215: "(\uc544)", + 0x0000321D: "(\uc624\uc804)", + 0x0000321E: "(\uc624\ud6c4)", + 0x00003208: "(\u110c)", + 0x00003216: "(\uc790)", + 0x0000321C: "(\uc8fc)", + 0x00003209: "(\u110e)", + 0x00003217: "(\ucc28)", + 0x0000320A: "(\u110f)", + 0x00003218: "(\uce74)", + 0x0000320B: "(\u1110)", + 0x00003219: "(\ud0c0)", + 0x0000320C: "(\u1111)", + 0x0000321A: "(\ud30c)", + 0x0000320D: "(\u1112)", + 0x0000321B: "(\ud558)", + 0x00003226: "(\u4e03)", + 0x00003222: "(\u4e09)", + 0x0001F241: "(\u4e09)", + 0x00003228: "(\u4e5d)", + 0x00003221: "(\u4e8c)", + 0x0001F242: "(\u4e8c)", + 0x00003224: "(\u4e94)", + 0x00003239: "(\u4ee3)", + 0x0000323D: "(\u4f01)", + 0x00003241: "(\u4f11)", + 0x00003227: "(\u516b)", + 0x00003225: "(\u516d)", + 0x00003238: "(\u52b4)", + 0x0001F247: "(\u52dd)", + 0x00003229: "(\u5341)", + 0x0000323F: "(\u5354)", + 0x00003234: "(\u540d)", + 0x0000323A: "(\u547c)", + 0x00003223: "(\u56db)", + 0x0000322F: "(\u571f)", + 0x0000323B: "(\u5b66)", + 0x0001F243: "(\u5b89)", + 0x0001F245: "(\u6253)", + 0x0001F248: "(\u6557)", + 0x00003230: "(\u65e5)", + 0x0000322A: "(\u6708)", + 0x00003232: "(\u6709)", + 0x0000322D: "(\u6728)", + 0x0001F240: "(\u672c)", + 0x00003231: "(\u682a)", + 0x0000322C: "(\u6c34)", + 0x0000322B: "(\u706b)", + 0x0001F244: "(\u70b9)", + 0x00003235: "(\u7279)", + 0x0001F246: "(\u76d7)", + 0x0000323C: "(\u76e3)", + 0x00003233: "(\u793e)", + 0x00003237: "(\u795d)", + 0x00003240: "(\u796d)", + 0x00003242: "(\u81ea)", + 0x00003243: "(\u81f3)", + 0x00003236: "(\u8ca1)", + 0x0000323E: "(\u8cc7)", + 0x0000322E: "(\u91d1)", + 0x0000FF3D: ")", + 0x00002769: ")", + 0x00002773: ")", + 0x00003015: ")", + 0x0000FD3F: ")", + 0x00002E29: "))", + 0x00002774: "{", + 0x0001D114: "{", + 0x00002775: "}", + 0x0000301A: "\u27e6", + 0x0000301B: "\u27e7", + 0x000027E8: "\u276c", + 0x00002329: "\u276c", + 0x00003008: "\u276c", + 0x000031DB: "\u276c", + 0x0000304F: "\u276c", + 0x00021FE8: "\u276c", + 0x000027E9: "\u276d", + 0x0000232A: "\u276d", + 0x00003009: "\u276d", + 0x0000FF3E: "\ufe3f", + 0x00002E3F: "\u00b6", + 0x0000204E: "*", + 0x0000066D: "*", + 0x00002217: "*", + 0x0001031F: "*", + 0x00001735: "/", + 0x00002041: "/", + 0x00002215: "/", + 0x00002044: "/", + 0x00002571: "/", + 0x000027CB: "/", + 0x000029F8: "/", + 0x0001D23A: "/", + 0x000031D3: "/", + 0x00003033: "/", + 0x00002CC6: "/", + 0x000030CE: "/", + 0x00004E3F: "/", + 0x00002F03: "/", + 0x000029F6: "/\u0304", + 0x00002AFD: "//", + 0x00002AFB: "///", + 0x0000FF3C: "\\", + 0x0000FE68: "\\", + 0x00002216: "\\", + 0x000027CD: "\\", + 0x000029F5: "\\", + 0x000029F9: "\\", + 0x0001D20F: "\\", + 0x0001D23B: "\\", + 0x000031D4: "\\", + 0x00004E36: "\\", + 0x00002F02: "\\", + 0x00002CF9: "\\\\", + 0x0000244A: "\\\\", + 0x000027C8: "\\\u1455", + 0x0000A778: "&", + 0x00000AF0: "\u0970", + 0x000110BB: "\u0970", + 0x000111C7: "\u0970", + 0x000026AC: "\u0970", + 0x000111DB: "\ua8fc", + 0x000017D9: "\u0e4f", + 0x000017D5: "\u0e5a", + 0x000017DA: "\u0e5b", + 0x00000F0C: "\u0f0b", + 0x00000F0E: "\u0f0d\u0f0d", + 0x000002C4: "^", + 0x000002C6: "^", + 0x0000A67E: "\u02c7", + 0x000002D8: "\u02c7", + 0x0000203E: "\u02c9", + 0x0000FE49: "\u02c9", + 0x0000FE4A: "\u02c9", + 0x0000FE4B: "\u02c9", + 0x0000FE4C: "\u02c9", + 0x000000AF: "\u02c9", + 0x0000FFE3: "\u02c9", + 0x00002594: "\u02c9", + 0x0000044A: "\u02c9b", + 0x0000A651: "\u02c9bi", + 0x00000375: "\u02cf", + 0x000002FB: "\u02ea", + 0x0000A716: "\u02ea", + 0x0000A714: "\u02eb", + 0x00003002: "\u02f3", + 0x00002E30: "\u00b0", + 0x000002DA: "\u00b0", + 0x00002218: "\u00b0", + 0x000025CB: "\u00b0", + 0x000025E6: "\u00b0", + 0x0000235C: "\u00b0\u0332", + 0x00002364: "\u00b0\u0308", + 0x00002103: "\u00b0C", + 0x00002109: "\u00b0F", + 0x00000BF5: "\u0bf3", + 0x00000F1B: "\u0f1a\u0f1a", + 0x00000F1F: "\u0f1a\u0f1d", + 0x00000FCE: "\u0f1d\u0f1a", + 0x00000F1E: "\u0f1d\u0f1d", + 0x000024B8: "\u00a9", + 0x000024C7: "\u00ae", + 0x000024C5: "\u2117", + 0x0001D21B: "\u2144", + 0x00002BEC: "\u219e", + 0x00002BED: "\u219f", + 0x00002BEE: "\u21a0", + 0x00002BEF: "\u21a1", + 0x000021B5: "\u21b2", + 0x00002965: "\u21c3\u21c2", + 0x0000296F: "\u21c3\u16da", + 0x0001D6DB: "\u2202", + 0x0001D715: "\u2202", + 0x0001D74F: "\u2202", + 0x0001D789: "\u2202", + 0x0001D7C3: "\u2202", + 0x0001E8CC: "\u2202", + 0x0001E8CD: "\u2202\u0335", + 0x000000F0: "\u2202\u0335", + 0x00002300: "\u2205", + 0x0001D6C1: "\u2207", + 0x0001D6FB: "\u2207", + 0x0001D735: "\u2207", + 0x0001D76F: "\u2207", + 0x0001D7A9: "\u2207", + 0x000118A8: "\u2207", + 0x00002362: "\u2207\u0308", + 0x0000236B: "\u2207\u0334", + 0x00002588: "\u220e", + 0x000025A0: "\u220e", + 0x00002A3F: "\u2210", + 0x000016ED: "+", + 0x00002795: "+", + 0x0001029B: "+", + 0x00002A23: "+\u0302", + 0x00002A22: "+\u030a", + 0x00002A24: "+\u0303", + 0x00002214: "+\u0307", + 0x00002A25: "+\u0323", + 0x00002A26: "+\u0330", + 0x00002A27: "+\u2082", + 0x00002797: "\u00f7", + 0x00002039: "<", + 0x0000276E: "<", + 0x000002C2: "<", + 0x0001D236: "<", + 0x00001438: "<", + 0x000016B2: "<", + 0x000022D6: "<\u00b7", + 0x00002CB4: "<\u00b7", + 0x00001445: "<\u00b7", + 0x0000226A: "<<", + 0x000022D8: "<<<", + 0x00001400: "=", + 0x00002E40: "=", + 0x000030A0: "=", + 0x0000A4FF: "=", + 0x0000225A: "=\u0306", + 0x00002259: "=\u0302", + 0x00002257: "=\u030a", + 0x00002250: "=\u0307", + 0x00002251: "=\u0307\u0323", + 0x00002A6E: "=\u20f0", + 0x00002A75: "==", + 0x00002A76: "===", + 0x0000225E: "=\u036b", + 0x0000203A: ">", + 0x0000276F: ">", + 0x000002C3: ">", + 0x0001D237: ">", + 0x00001433: ">", + 0x00016F3F: ">", + 0x00001441: ">\u00b7", + 0x00002AA5: "><", + 0x0000226B: ">>", + 0x00002A20: ">>", + 0x000022D9: ">>>", + 0x00002053: "~", + 0x000002DC: "~", + 0x00001FC0: "~", + 0x0000223C: "~", + 0x00002368: "~\u0308", + 0x00002E1E: "~\u0307", + 0x00002A6A: "~\u0307", + 0x00002E1F: "~\u0323", + 0x0001E8C8: "\u2220", + 0x000022C0: "\u2227", + 0x0000222F: "\u222e\u222e", + 0x00002230: "\u222e\u222e\u222e", + 0x00002E2B: "\u2234", + 0x00002E2A: "\u2235", + 0x00002E2C: "\u2237", + 0x000111DE: "\u2248", + 0x0000264E: "\u224f", + 0x0001F75E: "\u224f", + 0x00002263: "\u2261", + 0x00002A03: "\u228d", + 0x00002A04: "\u228e", + 0x0001D238: "\u228f", + 0x0001D239: "\u2290", + 0x00002A05: "\u2293", + 0x00002A06: "\u2294", + 0x00002A02: "\u2297", + 0x0000235F: "\u229b", + 0x0001F771: "\u22a0", + 0x0001F755: "\u22a1", + 0x000025C1: "\u22b2", + 0x000025B7: "\u22b3", + 0x00002363: "\u22c6\u0308", + 0x0000FE34: "\u2307", + 0x000025E0: "\u2312", + 0x00002A3D: "\u2319", + 0x00002325: "\u2324", + 0x000029C7: "\u233b", + 0x000025CE: "\u233e", + 0x000029BE: "\u233e", + 0x000029C5: "\u2342", + 0x000029B0: "\u2349", + 0x000023C3: "\u234b", + 0x000023C2: "\u234e", + 0x000023C1: "\u2355", + 0x000023C6: "\u236d", + 0x00002638: "\u2388", + 0x0000FE35: "\u23dc", + 0x0000FE36: "\u23dd", + 0x0000FE37: "\u23de", + 0x0000FE38: "\u23df", + 0x0000FE39: "\u23e0", + 0x0000FE3A: "\u23e1", + 0x000025B1: "\u23e5", + 0x000023FC: "\u23fb", + 0x0000FE31: "\u2502", + 0x0000FF5C: "\u2502", + 0x00002503: "\u2502", + 0x0000250F: "\u250c", + 0x00002523: "\u251c", + 0x00002590: "\u258c", + 0x00002597: "\u2596", + 0x0000259D: "\u2598", + 0x00002610: "\u25a1", + 0x0000FFED: "\u25aa", + 0x000025B8: "\u25b6", + 0x000025BA: "\u25b6", + 0x00002CE9: "\u2627", + 0x0001F70A: "\u2629", + 0x0001F312: "\u263d", + 0x0001F319: "\u263d", + 0x000023FE: "\u263e", + 0x0001F318: "\u263e", + 0x000029D9: "\u299a", + 0x0001F73A: "\u29df", + 0x00002A3E: "\u2a1f", + 0x000101A0: "\u2ce8", + 0x00002669: "\U0001d158\U0001d165", + 0x0000266A: "\U0001d158\U0001d165\U0001d16e", + 0x000002D9: "\u0971", + 0x00000D4E: "\u0971", + 0x0000FF0D: "\u30fc", + 0x00002014: "\u30fc", + 0x00002015: "\u30fc", + 0x00002500: "\u30fc", + 0x00002501: "\u30fc", + 0x000031D0: "\u30fc", + 0x0000A7F7: "\u30fc", + 0x00001173: "\u30fc", + 0x00003161: "\u30fc", + 0x00004E00: "\u30fc", + 0x00002F00: "\u30fc", + 0x00001196: "\u30fc\u30fc", + 0x0000D7B9: "\u30fc\u1161", + 0x0000D7BA: "\u30fc\u1165", + 0x0000D7BB: "\u30fc\u1165\u4e28", + 0x0000D7BC: "\u30fc\u1169", + 0x00001195: "\u30fc\u116e", + 0x00001174: "\u30fc\u4e28", + 0x00003162: "\u30fc\u4e28", + 0x00001197: "\u30fc\u4e28\u116e", + 0x000020A4: "\u00a3", + 0x00003012: "\u20b8", + 0x00003036: "\u20b8", + 0x00001B5C: "\u1b50", + 0x0000A9C6: "\ua9d0", + 0x000114D1: "\u09e7", + 0x00000CE7: "\u0c67", + 0x00001065: "\u1041", + 0x00002460: "\u2780", + 0x00002469: "\u2789", + 0x000023E8: "\u2081\u2080", + 0x0001D7D0: "2", + 0x0001D7DA: "2", + 0x0001D7E4: "2", + 0x0001D7EE: "2", + 0x0001D7F8: "2", + 0x0000A75A: "2", + 0x000001A7: "2", + 0x000003E8: "2", + 0x0000A644: "2", + 0x000014BF: "2", + 0x0000A6EF: "2", + 0x0000A9CF: "\u0662", + 0x000006F2: "\u0662", + 0x00000AE8: "\u0968", + 0x000114D2: "\u09e8", + 0x00000CE8: "\u0c68", + 0x00002461: "\u2781", + 0x000001BB: "2\u0335", + 0x0001F103: "2,", + 0x00002489: "2.", + 0x000033F5: "22\u65e5", + 0x0000336E: "22\u70b9", + 0x000033F6: "23\u65e5", + 0x0000336F: "23\u70b9", + 0x000033F7: "24\u65e5", + 0x00003370: "24\u70b9", + 0x000033F8: "25\u65e5", + 0x000033F9: "26\u65e5", + 0x000033FA: "27\u65e5", + 0x000033FB: "28\u65e5", + 0x000033FC: "29\u65e5", + 0x000033F4: "2l\u65e5", + 0x0000336D: "2l\u70b9", + 0x0000249B: "2O.", + 0x000033F3: "2O\u65e5", + 0x0000336C: "2O\u70b9", + 0x00000DE9: "\u0de8\u0dcf", + 0x00000DEF: "\u0de8\u0dd3", + 0x000033E1: "2\u65e5", + 0x000032C1: "2\u6708", + 0x0000335A: "2\u70b9", + 0x0001D206: "3", + 0x0001D7D1: "3", + 0x0001D7DB: "3", + 0x0001D7E5: "3", + 0x0001D7EF: "3", + 0x0001D7F9: "3", + 0x0000A7AB: "3", + 0x0000021C: "3", + 0x000001B7: "3", + 0x0000A76A: "3", + 0x00002CCC: "3", + 0x00000417: "3", + 0x000004E0: "3", + 0x00016F3B: "3", + 0x000118CA: "3", + 0x000006F3: "\u0663", + 0x0001E8C9: "\u0663", + 0x00000AE9: "\u0969", + 0x00002462: "\u2782", + 0x00000498: "3\u0326", + 0x0001F104: "3,", + 0x0000248A: "3.", + 0x000033FE: "3l\u65e5", + 0x000033FD: "3O\u65e5", + 0x000033E2: "3\u65e5", + 0x000032C2: "3\u6708", + 0x0000335B: "3\u70b9", + 0x0001D7D2: "4", + 0x0001D7DC: "4", + 0x0001D7E6: "4", + 0x0001D7F0: "4", + 0x0001D7FA: "4", + 0x000013CE: "4", + 0x000118AF: "4", + 0x000006F4: "\u0664", + 0x00000AEA: "\u096a", + 0x00002463: "\u2783", + 0x0001F105: "4,", + 0x0000248B: "4.", + 0x00001530: "4\u00b7", + 0x000033E3: "4\u65e5", + 0x000032C3: "4\u6708", + 0x0000335C: "4\u70b9", + 0x0001D7D3: "5", + 0x0001D7DD: "5", + 0x0001D7E7: "5", + 0x0001D7F1: "5", + 0x0001D7FB: "5", + 0x000001BC: "5", + 0x000118BB: "5", + 0x00002464: "\u2784", + 0x0001F106: "5,", + 0x0000248C: "5.", + 0x000033E4: "5\u65e5", + 0x000032C4: "5\u6708", + 0x0000335D: "5\u70b9", + 0x0001D7D4: "6", + 0x0001D7DE: "6", + 0x0001D7E8: "6", + 0x0001D7F2: "6", + 0x0001D7FC: "6", + 0x00002CD2: "6", + 0x00000431: "6", + 0x000013EE: "6", + 0x000118D5: "6", + 0x000006F6: "\u0666", + 0x000114D6: "\u09ec", + 0x00002465: "\u2785", + 0x0001F107: "6,", + 0x0000248D: "6.", + 0x000033E5: "6\u65e5", + 0x000032C5: "6\u6708", + 0x0000335E: "6\u70b9", + 0x0001D212: "7", + 0x0001D7D5: "7", + 0x0001D7DF: "7", + 0x0001D7E9: "7", + 0x0001D7F3: "7", + 0x0001D7FD: "7", + 0x000104D2: "7", + 0x000118C6: "7", + 0x00002466: "\u2786", + 0x0001F108: "7,", + 0x0000248E: "7.", + 0x000033E6: "7\u65e5", + 0x000032C6: "7\u6708", + 0x0000335F: "7\u70b9", + 0x00000B03: "8", + 0x000009EA: "8", + 0x00000A6A: "8", + 0x0001E8CB: "8", + 0x0001D7D6: "8", + 0x0001D7E0: "8", + 0x0001D7EA: "8", + 0x0001D7F4: "8", + 0x0001D7FE: "8", + 0x00000223: "8", + 0x00000222: "8", + 0x0001031A: "8", + 0x00000AEE: "\u096e", + 0x00002467: "\u2787", + 0x0001F109: "8,", + 0x0000248F: "8.", + 0x000033E7: "8\u65e5", + 0x000032C7: "8\u6708", + 0x00003360: "8\u70b9", + 0x00000A67: "9", + 0x00000B68: "9", + 0x000009ED: "9", + 0x00000D6D: "9", + 0x0001D7D7: "9", + 0x0001D7E1: "9", + 0x0001D7EB: "9", + 0x0001D7F5: "9", + 0x0001D7FF: "9", + 0x0000A76E: "9", + 0x00002CCA: "9", + 0x000118CC: "9", + 0x000118AC: "9", + 0x000118D6: "9", + 0x00000967: "\u0669", + 0x000118E4: "\u0669", + 0x000006F9: "\u0669", + 0x00000CEF: "\u0c6f", + 0x00002468: "\u2788", + 0x0001F10A: "9,", + 0x00002490: "9.", + 0x000033E8: "9\u65e5", + 0x000032C8: "9\u6708", + 0x00003361: "9\u70b9", + 0x0000237A: "a", + 0x0000FF41: "a", + 0x0001D41A: "a", + 0x0001D44E: "a", + 0x0001D482: "a", + 0x0001D4B6: "a", + 0x0001D4EA: "a", + 0x0001D51E: "a", + 0x0001D552: "a", + 0x0001D586: "a", + 0x0001D5BA: "a", + 0x0001D5EE: "a", + 0x0001D622: "a", + 0x0001D656: "a", + 0x0001D68A: "a", + 0x00000251: "a", + 0x000003B1: "a", + 0x0001D6C2: "a", + 0x0001D6FC: "a", + 0x0001D736: "a", + 0x0001D770: "a", + 0x0001D7AA: "a", + 0x00000430: "a", + 0x00002DF6: "\u0363", + 0x0000FF21: "A", + 0x0001D400: "A", + 0x0001D434: "A", + 0x0001D468: "A", + 0x0001D49C: "A", + 0x0001D4D0: "A", + 0x0001D504: "A", + 0x0001D538: "A", + 0x0001D56C: "A", + 0x0001D5A0: "A", + 0x0001D5D4: "A", + 0x0001D608: "A", + 0x0001D63C: "A", + 0x0001D670: "A", + 0x00000391: "A", + 0x0001D6A8: "A", + 0x0001D6E2: "A", + 0x0001D71C: "A", + 0x0001D756: "A", + 0x0001D790: "A", + 0x00000410: "A", + 0x000013AA: "A", + 0x000015C5: "A", + 0x0000A4EE: "A", + 0x00016F40: "A", + 0x000102A0: "A", + 0x00002376: "a\u0332", + 0x000001CE: "\u0103", + 0x000001CD: "\u0102", + 0x00000227: "\u00e5", + 0x00000226: "\u00c5", + 0x00001E9A: "\u1ea3", + 0x00002100: "a/c", + 0x00002101: "a/s", + 0x0000A733: "aa", + 0x0000A732: "AA", + 0x000000E6: "ae", + 0x000004D5: "ae", + 0x000000C6: "AE", + 0x000004D4: "AE", + 0x0000A735: "ao", + 0x0000A734: "AO", + 0x0001F707: "AR", + 0x0000A737: "au", + 0x0000A736: "AU", + 0x0000A739: "av", + 0x0000A73B: "av", + 0x0000A738: "AV", + 0x0000A73A: "AV", + 0x0000A73D: "ay", + 0x0000A73C: "AY", + 0x0000AB7A: "\u1d00", + 0x00002200: "\u2c6f", + 0x0001D217: "\u2c6f", + 0x000015C4: "\u2c6f", + 0x0000A4EF: "\u2c6f", + 0x0001041F: "\u2c70", + 0x0001D41B: "b", + 0x0001D44F: "b", + 0x0001D483: "b", + 0x0001D4B7: "b", + 0x0001D4EB: "b", + 0x0001D51F: "b", + 0x0001D553: "b", + 0x0001D587: "b", + 0x0001D5BB: "b", + 0x0001D5EF: "b", + 0x0001D623: "b", + 0x0001D657: "b", + 0x0001D68B: "b", + 0x00000184: "b", + 0x0000042C: "b", + 0x000013CF: "b", + 0x00001472: "b", + 0x000015AF: "b", + 0x0000FF22: "B", + 0x0000212C: "B", + 0x0001D401: "B", + 0x0001D435: "B", + 0x0001D469: "B", + 0x0001D4D1: "B", + 0x0001D505: "B", + 0x0001D539: "B", + 0x0001D56D: "B", + 0x0001D5A1: "B", + 0x0001D5D5: "B", + 0x0001D609: "B", + 0x0001D63D: "B", + 0x0001D671: "B", + 0x0000A7B4: "B", + 0x00000392: "B", + 0x0001D6A9: "B", + 0x0001D6E3: "B", + 0x0001D71D: "B", + 0x0001D757: "B", + 0x0001D791: "B", + 0x00000412: "B", + 0x000013F4: "B", + 0x000015F7: "B", + 0x0000A4D0: "B", + 0x00010282: "B", + 0x000102A1: "B", + 0x00010301: "B", + 0x00000253: "b\u0314", + 0x00001473: "b\u0307", + 0x00000183: "b\u0304", + 0x00000182: "b\u0304", + 0x00000411: "b\u0304", + 0x00000180: "b\u0335", + 0x0000048D: "b\u0335", + 0x0000048C: "b\u0335", + 0x00000463: "b\u0335", + 0x00000462: "b\u0335", + 0x0000147F: "b\u00b7", + 0x00001481: "b\u0307\u00b7", + 0x00001488: "b'", + 0x0000042B: "bl", + 0x00000432: "\u0299", + 0x000013FC: "\u0299", + 0x0000FF43: "c", + 0x0000217D: "c", + 0x0001D41C: "c", + 0x0001D450: "c", + 0x0001D484: "c", + 0x0001D4B8: "c", + 0x0001D4EC: "c", + 0x0001D520: "c", + 0x0001D554: "c", + 0x0001D588: "c", + 0x0001D5BC: "c", + 0x0001D5F0: "c", + 0x0001D624: "c", + 0x0001D658: "c", + 0x0001D68C: "c", + 0x00001D04: "c", + 0x000003F2: "c", + 0x00002CA5: "c", + 0x00000441: "c", + 0x0000ABAF: "c", + 0x0001043D: "c", + 0x00002DED: "\u0368", + 0x0001F74C: "C", + 0x000118F2: "C", + 0x000118E9: "C", + 0x0000FF23: "C", + 0x0000216D: "C", + 0x00002102: "C", + 0x0000212D: "C", + 0x0001D402: "C", + 0x0001D436: "C", + 0x0001D46A: "C", + 0x0001D49E: "C", + 0x0001D4D2: "C", + 0x0001D56E: "C", + 0x0001D5A2: "C", + 0x0001D5D6: "C", + 0x0001D60A: "C", + 0x0001D63E: "C", + 0x0001D672: "C", + 0x000003F9: "C", + 0x00002CA4: "C", + 0x00000421: "C", + 0x000013DF: "C", + 0x0000A4DA: "C", + 0x000102A2: "C", + 0x00010302: "C", + 0x00010415: "C", + 0x0001051C: "C", + 0x000000A2: "c\u0338", + 0x0000023C: "c\u0338", + 0x000020A1: "C\u20eb", + 0x000000E7: "c\u0326", + 0x000004AB: "c\u0326", + 0x000000C7: "C\u0326", + 0x000004AA: "C\u0326", + 0x00000187: "C'", + 0x00002105: "c/o", + 0x00002106: "c/u", + 0x000022F4: "\ua793", + 0x0000025B: "\ua793", + 0x000003B5: "\ua793", + 0x000003F5: "\ua793", + 0x0001D6C6: "\ua793", + 0x0001D6DC: "\ua793", + 0x0001D700: "\ua793", + 0x0001D716: "\ua793", + 0x0001D73A: "\ua793", + 0x0001D750: "\ua793", + 0x0001D774: "\ua793", + 0x0001D78A: "\ua793", + 0x0001D7AE: "\ua793", + 0x0001D7C4: "\ua793", + 0x00002C89: "\ua793", + 0x00000454: "\ua793", + 0x00000511: "\ua793", + 0x0000AB9B: "\ua793", + 0x000118CE: "\ua793", + 0x00010429: "\ua793", + 0x000020AC: "\ua792", + 0x00002C88: "\ua792", + 0x00000404: "\ua792", + 0x00002377: "\ua793\u0332", + 0x0000037D: "\ua73f", + 0x000003FF: "\ua73e", + 0x0000217E: "d", + 0x00002146: "d", + 0x0001D41D: "d", + 0x0001D451: "d", + 0x0001D485: "d", + 0x0001D4B9: "d", + 0x0001D4ED: "d", + 0x0001D521: "d", + 0x0001D555: "d", + 0x0001D589: "d", + 0x0001D5BD: "d", + 0x0001D5F1: "d", + 0x0001D625: "d", + 0x0001D659: "d", + 0x0001D68D: "d", + 0x00000501: "d", + 0x000013E7: "d", + 0x0000146F: "d", + 0x0000A4D2: "d", + 0x0000216E: "D", + 0x00002145: "D", + 0x0001D403: "D", + 0x0001D437: "D", + 0x0001D46B: "D", + 0x0001D49F: "D", + 0x0001D4D3: "D", + 0x0001D507: "D", + 0x0001D53B: "D", + 0x0001D56F: "D", + 0x0001D5A3: "D", + 0x0001D5D7: "D", + 0x0001D60B: "D", + 0x0001D63F: "D", + 0x0001D673: "D", + 0x000013A0: "D", + 0x000015DE: "D", + 0x000015EA: "D", + 0x0000A4D3: "D", + 0x00000257: "d\u0314", + 0x00000256: "d\u0328", + 0x0000018C: "d\u0304", + 0x00000111: "d\u0335", + 0x00000110: "D\u0335", + 0x000000D0: "D\u0335", + 0x00000189: "D\u0335", + 0x000020AB: "d\u0335\u0331", + 0x0000A77A: "\ua779", + 0x0000147B: "d\u00b7", + 0x00001487: "d'", + 0x000002A4: "d\u021d", + 0x000001F3: "dz", + 0x000002A3: "dz", + 0x000001F2: "Dz", + 0x000001F1: "DZ", + 0x000001C6: "d\u017e", + 0x000001C5: "D\u017e", + 0x000001C4: "D\u017d", + 0x000002A5: "d\u0291", + 0x0000AB70: "\u1d05", + 0x00002E39: "\u1e9f", + 0x000003B4: "\u1e9f", + 0x0001D6C5: "\u1e9f", + 0x0001D6FF: "\u1e9f", + 0x0001D739: "\u1e9f", + 0x0001D773: "\u1e9f", + 0x0001D7AD: "\u1e9f", + 0x0000056E: "\u1e9f", + 0x00001577: "\u1e9f", + 0x0000212E: "e", + 0x0000FF45: "e", + 0x0000212F: "e", + 0x00002147: "e", + 0x0001D41E: "e", + 0x0001D452: "e", + 0x0001D486: "e", + 0x0001D4EE: "e", + 0x0001D522: "e", + 0x0001D556: "e", + 0x0001D58A: "e", + 0x0001D5BE: "e", + 0x0001D5F2: "e", + 0x0001D626: "e", + 0x0001D65A: "e", + 0x0001D68E: "e", + 0x0000AB32: "e", + 0x00000435: "e", + 0x000004BD: "e", + 0x00002DF7: "\u0364", + 0x000022FF: "E", + 0x0000FF25: "E", + 0x00002130: "E", + 0x0001D404: "E", + 0x0001D438: "E", + 0x0001D46C: "E", + 0x0001D4D4: "E", + 0x0001D508: "E", + 0x0001D53C: "E", + 0x0001D570: "E", + 0x0001D5A4: "E", + 0x0001D5D8: "E", + 0x0001D60C: "E", + 0x0001D640: "E", + 0x0001D674: "E", + 0x00000395: "E", + 0x0001D6AC: "E", + 0x0001D6E6: "E", + 0x0001D720: "E", + 0x0001D75A: "E", + 0x0001D794: "E", + 0x00000415: "E", + 0x00002D39: "E", + 0x000013AC: "E", + 0x0000A4F0: "E", + 0x000118A6: "E", + 0x000118AE: "E", + 0x00010286: "E", + 0x0000011B: "\u0115", + 0x0000011A: "\u0114", + 0x00000247: "e\u0338", + 0x00000246: "E\u0338", + 0x000004BF: "e\u0328", + 0x0000AB7C: "\u1d07", + 0x00000259: "\u01dd", + 0x000004D9: "\u01dd", + 0x00002203: "\u018e", + 0x00002D3A: "\u018e", + 0x0000A4F1: "\u018e", + 0x0000025A: "\u01dd\u02de", + 0x00001D14: "\u01ddo", + 0x0000AB41: "\u01ddo\u0338", + 0x0000AB42: "\u01ddo\u0335", + 0x000004D8: "\u018f", + 0x0001D221: "\u0190", + 0x00002107: "\u0190", + 0x00000510: "\u0190", + 0x000013CB: "\u0190", + 0x00016F2D: "\u0190", + 0x00010401: "\u0190", + 0x00001D9F: "\u1d4b", + 0x00001D08: "\u025c", + 0x00000437: "\u025c", + 0x00000499: "\u025c\u0326", + 0x00010442: "\u025e", + 0x0000A79D: "\u029a", + 0x0001042A: "\u029a", + 0x0001D41F: "f", + 0x0001D453: "f", + 0x0001D487: "f", + 0x0001D4BB: "f", + 0x0001D4EF: "f", + 0x0001D523: "f", + 0x0001D557: "f", + 0x0001D58B: "f", + 0x0001D5BF: "f", + 0x0001D5F3: "f", + 0x0001D627: "f", + 0x0001D65B: "f", + 0x0001D68F: "f", + 0x0000AB35: "f", + 0x0000A799: "f", + 0x0000017F: "f", + 0x00001E9D: "f", + 0x00000584: "f", + 0x0001D213: "F", + 0x00002131: "F", + 0x0001D405: "F", + 0x0001D439: "F", + 0x0001D46D: "F", + 0x0001D4D5: "F", + 0x0001D509: "F", + 0x0001D53D: "F", + 0x0001D571: "F", + 0x0001D5A5: "F", + 0x0001D5D9: "F", + 0x0001D60D: "F", + 0x0001D641: "F", + 0x0001D675: "F", + 0x0000A798: "F", + 0x000003DC: "F", + 0x0001D7CA: "F", + 0x000015B4: "F", + 0x0000A4DD: "F", + 0x000118C2: "F", + 0x000118A2: "F", + 0x00010287: "F", + 0x000102A5: "F", + 0x00010525: "F", + 0x00000192: "f\u0326", + 0x00000191: "F\u0326", + 0x00001D6E: "f\u0334", + 0x0000213B: "FAX", + 0x0000FB00: "ff", + 0x0000FB03: "ffi", + 0x0000FB04: "ffl", + 0x0000FB01: "fi", + 0x0000FB02: "fl", + 0x000002A9: "f\u014b", + 0x000015B5: "\u2132", + 0x0000A4DE: "\u2132", + 0x0001D230: "\ua7fb", + 0x000015B7: "\ua7fb", + 0x0000FF47: "g", + 0x0000210A: "g", + 0x0001D420: "g", + 0x0001D454: "g", + 0x0001D488: "g", + 0x0001D4F0: "g", + 0x0001D524: "g", + 0x0001D558: "g", + 0x0001D58C: "g", + 0x0001D5C0: "g", + 0x0001D5F4: "g", + 0x0001D628: "g", + 0x0001D65C: "g", + 0x0001D690: "g", + 0x00000261: "g", + 0x00001D83: "g", + 0x0000018D: "g", + 0x00000581: "g", + 0x0001D406: "G", + 0x0001D43A: "G", + 0x0001D46E: "G", + 0x0001D4A2: "G", + 0x0001D4D6: "G", + 0x0001D50A: "G", + 0x0001D53E: "G", + 0x0001D572: "G", + 0x0001D5A6: "G", + 0x0001D5DA: "G", + 0x0001D60E: "G", + 0x0001D642: "G", + 0x0001D676: "G", + 0x0000050C: "G", + 0x000013C0: "G", + 0x000013F3: "G", + 0x0000A4D6: "G", + 0x00001DA2: "\u1d4d", + 0x00000260: "g\u0314", + 0x000001E7: "\u011f", + 0x000001E6: "\u011e", + 0x000001F5: "\u0123", + 0x000001E5: "g\u0335", + 0x000001E4: "G\u0335", + 0x00000193: "G'", + 0x0000050D: "\u0262", + 0x0000AB90: "\u0262", + 0x000013FB: "\u0262", + 0x0000FF48: "h", + 0x0000210E: "h", + 0x0001D421: "h", + 0x0001D489: "h", + 0x0001D4BD: "h", + 0x0001D4F1: "h", + 0x0001D525: "h", + 0x0001D559: "h", + 0x0001D58D: "h", + 0x0001D5C1: "h", + 0x0001D5F5: "h", + 0x0001D629: "h", + 0x0001D65D: "h", + 0x0001D691: "h", + 0x000004BB: "h", + 0x00000570: "h", + 0x000013C2: "h", + 0x0000FF28: "H", + 0x0000210B: "H", + 0x0000210C: "H", + 0x0000210D: "H", + 0x0001D407: "H", + 0x0001D43B: "H", + 0x0001D46F: "H", + 0x0001D4D7: "H", + 0x0001D573: "H", + 0x0001D5A7: "H", + 0x0001D5DB: "H", + 0x0001D60F: "H", + 0x0001D643: "H", + 0x0001D677: "H", + 0x00000397: "H", + 0x0001D6AE: "H", + 0x0001D6E8: "H", + 0x0001D722: "H", + 0x0001D75C: "H", + 0x0001D796: "H", + 0x00002C8E: "H", + 0x0000041D: "H", + 0x000013BB: "H", + 0x0000157C: "H", + 0x0000A4E7: "H", + 0x000102CF: "H", + 0x00001D78: "\u1d34", + 0x00000266: "h\u0314", + 0x0000A695: "h\u0314", + 0x000013F2: "h\u0314", + 0x00002C67: "H\u0329", + 0x000004A2: "H\u0329", + 0x00000127: "h\u0335", + 0x0000210F: "h\u0335", + 0x0000045B: "h\u0335", + 0x00000126: "H\u0335", + 0x000004C9: "H\u0326", + 0x000004C7: "H\u0326", + 0x0000043D: "\u029c", + 0x0000AB8B: "\u029c", + 0x000004A3: "\u029c\u0329", + 0x000004CA: "\u029c\u0326", + 0x000004C8: "\u029c\u0326", + 0x0000050A: "\u01f6", + 0x0000AB80: "\u2c76", + 0x00000370: "\u2c75", + 0x000013A8: "\u2c75", + 0x000013B0: "\u2c75", + 0x0000A6B1: "\u2c75", + 0x0000A795: "\ua727", + 0x000002DB: "i", + 0x00002373: "i", + 0x0000FF49: "i", + 0x00002170: "i", + 0x00002139: "i", + 0x00002148: "i", + 0x0001D422: "i", + 0x0001D456: "i", + 0x0001D48A: "i", + 0x0001D4BE: "i", + 0x0001D4F2: "i", + 0x0001D526: "i", + 0x0001D55A: "i", + 0x0001D58E: "i", + 0x0001D5C2: "i", + 0x0001D5F6: "i", + 0x0001D62A: "i", + 0x0001D65E: "i", + 0x0001D692: "i", + 0x00000131: "i", + 0x0001D6A4: "i", + 0x0000026A: "i", + 0x00000269: "i", + 0x000003B9: "i", + 0x00001FBE: "i", + 0x0000037A: "i", + 0x0001D6CA: "i", + 0x0001D704: "i", + 0x0001D73E: "i", + 0x0001D778: "i", + 0x0001D7B2: "i", + 0x00000456: "i", + 0x0000A647: "i", + 0x000004CF: "i", + 0x0000AB75: "i", + 0x000013A5: "i", + 0x000118C3: "i", + 0x000024DB: "\u24be", + 0x00002378: "i\u0332", + 0x000001D0: "\u012d", + 0x000001CF: "\u012c", + 0x00000268: "i\u0335", + 0x00001D7B: "i\u0335", + 0x00001D7C: "i\u0335", + 0x00002171: "ii", + 0x00002172: "iii", + 0x00000133: "ij", + 0x00002173: "iv", + 0x00002178: "ix", + 0x0000FF4A: "j", + 0x00002149: "j", + 0x0001D423: "j", + 0x0001D457: "j", + 0x0001D48B: "j", + 0x0001D4BF: "j", + 0x0001D4F3: "j", + 0x0001D527: "j", + 0x0001D55B: "j", + 0x0001D58F: "j", + 0x0001D5C3: "j", + 0x0001D5F7: "j", + 0x0001D62B: "j", + 0x0001D65F: "j", + 0x0001D693: "j", + 0x000003F3: "j", + 0x00000458: "j", + 0x0000FF2A: "J", + 0x0001D409: "J", + 0x0001D43D: "J", + 0x0001D471: "J", + 0x0001D4A5: "J", + 0x0001D4D9: "J", + 0x0001D50D: "J", + 0x0001D541: "J", + 0x0001D575: "J", + 0x0001D5A9: "J", + 0x0001D5DD: "J", + 0x0001D611: "J", + 0x0001D645: "J", + 0x0001D679: "J", + 0x0000A7B2: "J", + 0x0000037F: "J", + 0x00000408: "J", + 0x000013AB: "J", + 0x0000148D: "J", + 0x0000A4D9: "J", + 0x00000249: "j\u0335", + 0x00000248: "J\u0335", + 0x00001499: "J\u00b7", + 0x0001D6A5: "\u0237", + 0x00000575: "\u0237", + 0x0000AB7B: "\u1d0a", + 0x0001D424: "k", + 0x0001D458: "k", + 0x0001D48C: "k", + 0x0001D4C0: "k", + 0x0001D4F4: "k", + 0x0001D528: "k", + 0x0001D55C: "k", + 0x0001D590: "k", + 0x0001D5C4: "k", + 0x0001D5F8: "k", + 0x0001D62C: "k", + 0x0001D660: "k", + 0x0001D694: "k", + 0x0000212A: "K", + 0x0000FF2B: "K", + 0x0001D40A: "K", + 0x0001D43E: "K", + 0x0001D472: "K", + 0x0001D4A6: "K", + 0x0001D4DA: "K", + 0x0001D50E: "K", + 0x0001D542: "K", + 0x0001D576: "K", + 0x0001D5AA: "K", + 0x0001D5DE: "K", + 0x0001D612: "K", + 0x0001D646: "K", + 0x0001D67A: "K", + 0x0000039A: "K", + 0x0001D6B1: "K", + 0x0001D6EB: "K", + 0x0001D725: "K", + 0x0001D75F: "K", + 0x0001D799: "K", + 0x00002C94: "K", + 0x0000041A: "K", + 0x000013E6: "K", + 0x000016D5: "K", + 0x0000A4D7: "K", + 0x00010518: "K", + 0x00000199: "k\u0314", + 0x00002C69: "K\u0329", + 0x0000049A: "K\u0329", + 0x000020AD: "K\u0335", + 0x0000A740: "K\u0335", + 0x0000049E: "K\u0335", + 0x00000198: "K'", + 0x000005C0: "l", + 0x0000007C: "l", + 0x00002223: "l", + 0x000023FD: "l", + 0x0000FFE8: "l", + 0x00000031: "l", + 0x00000661: "l", + 0x000006F1: "l", + 0x00010320: "l", + 0x0001E8C7: "l", + 0x0001D7CF: "l", + 0x0001D7D9: "l", + 0x0001D7E3: "l", + 0x0001D7ED: "l", + 0x0001D7F7: "l", + 0x00000049: "l", + 0x0000FF29: "l", + 0x00002160: "l", + 0x00002110: "l", + 0x00002111: "l", + 0x0001D408: "l", + 0x0001D43C: "l", + 0x0001D470: "l", + 0x0001D4D8: "l", + 0x0001D540: "l", + 0x0001D574: "l", + 0x0001D5A8: "l", + 0x0001D5DC: "l", + 0x0001D610: "l", + 0x0001D644: "l", + 0x0001D678: "l", + 0x00000196: "l", + 0x0000FF4C: "l", + 0x0000217C: "l", + 0x00002113: "l", + 0x0001D425: "l", + 0x0001D459: "l", + 0x0001D48D: "l", + 0x0001D4C1: "l", + 0x0001D4F5: "l", + 0x0001D529: "l", + 0x0001D55D: "l", + 0x0001D591: "l", + 0x0001D5C5: "l", + 0x0001D5F9: "l", + 0x0001D62D: "l", + 0x0001D661: "l", + 0x0001D695: "l", + 0x000001C0: "l", + 0x00000399: "l", + 0x0001D6B0: "l", + 0x0001D6EA: "l", + 0x0001D724: "l", + 0x0001D75E: "l", + 0x0001D798: "l", + 0x00002C92: "l", + 0x00000406: "l", + 0x000004C0: "l", + 0x000005D5: "l", + 0x000005DF: "l", + 0x00000627: "l", + 0x0001EE00: "l", + 0x0001EE80: "l", + 0x0000FE8E: "l", + 0x0000FE8D: "l", + 0x000007CA: "l", + 0x00002D4F: "l", + 0x000016C1: "l", + 0x0000A4F2: "l", + 0x00016F28: "l", + 0x0001028A: "l", + 0x00010309: "l", + 0x0001D22A: "L", + 0x0000216C: "L", + 0x00002112: "L", + 0x0001D40B: "L", + 0x0001D43F: "L", + 0x0001D473: "L", + 0x0001D4DB: "L", + 0x0001D50F: "L", + 0x0001D543: "L", + 0x0001D577: "L", + 0x0001D5AB: "L", + 0x0001D5DF: "L", + 0x0001D613: "L", + 0x0001D647: "L", + 0x0001D67B: "L", + 0x00002CD0: "L", + 0x000013DE: "L", + 0x000014AA: "L", + 0x0000A4E1: "L", + 0x00016F16: "L", + 0x000118A3: "L", + 0x000118B2: "L", + 0x0001041B: "L", + 0x00010526: "L", + 0x0000FD3C: "l\u030b", + 0x0000FD3D: "l\u030b", + 0x00000142: "l\u0338", + 0x00000141: "L\u0338", + 0x0000026D: "l\u0328", + 0x00000197: "l\u0335", + 0x0000019A: "l\u0335", + 0x0000026B: "l\u0334", + 0x00000625: "l\u0655", + 0x0000FE88: "l\u0655", + 0x0000FE87: "l\u0655", + 0x00000673: "l\u0655", + 0x00000140: "l\u00b7", + 0x0000013F: "l\u00b7", + 0x000014B7: "l\u00b7", + 0x0001F102: "l,", + 0x00002488: "l.", + 0x000005F1: "l'", + 0x00002493: "l2.", + 0x000033EB: "l2\u65e5", + 0x000032CB: "l2\u6708", + 0x00003364: "l2\u70b9", + 0x00002494: "l3.", + 0x000033EC: "l3\u65e5", + 0x00003365: "l3\u70b9", + 0x00002495: "l4.", + 0x000033ED: "l4\u65e5", + 0x00003366: "l4\u70b9", + 0x00002496: "l5.", + 0x000033EE: "l5\u65e5", + 0x00003367: "l5\u70b9", + 0x00002497: "l6.", + 0x000033EF: "l6\u65e5", + 0x00003368: "l6\u70b9", + 0x00002498: "l7.", + 0x000033F0: "l7\u65e5", + 0x00003369: "l7\u70b9", + 0x00002499: "l8.", + 0x000033F1: "l8\u65e5", + 0x0000336A: "l8\u70b9", + 0x0000249A: "l9.", + 0x000033F2: "l9\u65e5", + 0x0000336B: "l9\u70b9", + 0x000001C9: "lj", + 0x00000132: "lJ", + 0x000001C8: "Lj", + 0x000001C7: "LJ", + 0x00002016: "ll", + 0x00002225: "ll", + 0x00002161: "ll", + 0x000001C1: "ll", + 0x000005F0: "ll", + 0x00010199: "l\u0335l\u0335", + 0x00002492: "ll.", + 0x00002162: "lll", + 0x00010198: "l\u0335l\u0335S\u0335", + 0x000033EA: "ll\u65e5", + 0x000032CA: "ll\u6708", + 0x00003363: "ll\u70b9", + 0x0000042E: "lO", + 0x00002491: "lO.", + 0x000033E9: "lO\u65e5", + 0x000032C9: "lO\u6708", + 0x00003362: "lO\u70b9", + 0x000002AA: "ls", + 0x000020B6: "lt", + 0x00002163: "lV", + 0x00002168: "lX", + 0x0000026E: "l\u021d", + 0x000002AB: "lz", + 0x00000623: "l\u0674", + 0x0000FE84: "l\u0674", + 0x0000FE83: "l\u0674", + 0x00000672: "l\u0674", + 0x00000675: "l\u0674", + 0x0000FDF3: "l\u0643\u0628\u0631", + 0x0000FDF2: "l\u0644\u0644\u0651\u0670o", + 0x000033E0: "l\u65e5", + 0x000032C0: "l\u6708", + 0x00003359: "l\u70b9", + 0x00002CD1: "\u029f", + 0x0000ABAE: "\u029f", + 0x00010443: "\u029f", + 0x0000FF2D: "M", + 0x0000216F: "M", + 0x00002133: "M", + 0x0001D40C: "M", + 0x0001D440: "M", + 0x0001D474: "M", + 0x0001D4DC: "M", + 0x0001D510: "M", + 0x0001D544: "M", + 0x0001D578: "M", + 0x0001D5AC: "M", + 0x0001D5E0: "M", + 0x0001D614: "M", + 0x0001D648: "M", + 0x0001D67C: "M", + 0x0000039C: "M", + 0x0001D6B3: "M", + 0x0001D6ED: "M", + 0x0001D727: "M", + 0x0001D761: "M", + 0x0001D79B: "M", + 0x000003FA: "M", + 0x00002C98: "M", + 0x0000041C: "M", + 0x000013B7: "M", + 0x000015F0: "M", + 0x000016D6: "M", + 0x0000A4DF: "M", + 0x000102B0: "M", + 0x00010311: "M", + 0x000004CD: "M\u0326", + 0x0001F76B: "MB", + 0x00002DE8: "\u1ddf", + 0x0001D427: "n", + 0x0001D45B: "n", + 0x0001D48F: "n", + 0x0001D4C3: "n", + 0x0001D4F7: "n", + 0x0001D52B: "n", + 0x0001D55F: "n", + 0x0001D593: "n", + 0x0001D5C7: "n", + 0x0001D5FB: "n", + 0x0001D62F: "n", + 0x0001D663: "n", + 0x0001D697: "n", + 0x00000578: "n", + 0x0000057C: "n", + 0x0000FF2E: "N", + 0x00002115: "N", + 0x0001D40D: "N", + 0x0001D441: "N", + 0x0001D475: "N", + 0x0001D4A9: "N", + 0x0001D4DD: "N", + 0x0001D511: "N", + 0x0001D579: "N", + 0x0001D5AD: "N", + 0x0001D5E1: "N", + 0x0001D615: "N", + 0x0001D649: "N", + 0x0001D67D: "N", + 0x0000039D: "N", + 0x0001D6B4: "N", + 0x0001D6EE: "N", + 0x0001D728: "N", + 0x0001D762: "N", + 0x0001D79C: "N", + 0x00002C9A: "N", + 0x0000A4E0: "N", + 0x00010513: "N", + 0x0001018E: "N\u030a", + 0x00000273: "n\u0328", + 0x0000019E: "n\u0329", + 0x000003B7: "n\u0329", + 0x0001D6C8: "n\u0329", + 0x0001D702: "n\u0329", + 0x0001D73C: "n\u0329", + 0x0001D776: "n\u0329", + 0x0001D7B0: "n\u0329", + 0x0000019D: "N\u0326", + 0x00001D70: "n\u0334", + 0x000001CC: "nj", + 0x000001CB: "Nj", + 0x000001CA: "NJ", + 0x00002116: "No", + 0x00000377: "\u1d0e", + 0x00000438: "\u1d0e", + 0x0001044D: "\u1d0e", + 0x00000146: "\u0272", + 0x00000C02: "o", + 0x00000C82: "o", + 0x00000D02: "o", + 0x00000D82: "o", + 0x00000966: "o", + 0x00000A66: "o", + 0x00000AE6: "o", + 0x00000BE6: "o", + 0x00000C66: "o", + 0x00000CE6: "o", + 0x00000D66: "o", + 0x00000E50: "o", + 0x00000ED0: "o", + 0x00001040: "o", + 0x00000665: "o", + 0x000006F5: "o", + 0x0000FF4F: "o", + 0x00002134: "o", + 0x0001D428: "o", + 0x0001D45C: "o", + 0x0001D490: "o", + 0x0001D4F8: "o", + 0x0001D52C: "o", + 0x0001D560: "o", + 0x0001D594: "o", + 0x0001D5C8: "o", + 0x0001D5FC: "o", + 0x0001D630: "o", + 0x0001D664: "o", + 0x0001D698: "o", + 0x00001D0F: "o", + 0x00001D11: "o", + 0x0000AB3D: "o", + 0x000003BF: "o", + 0x0001D6D0: "o", + 0x0001D70A: "o", + 0x0001D744: "o", + 0x0001D77E: "o", + 0x0001D7B8: "o", + 0x000003C3: "o", + 0x0001D6D4: "o", + 0x0001D70E: "o", + 0x0001D748: "o", + 0x0001D782: "o", + 0x0001D7BC: "o", + 0x00002C9F: "o", + 0x0000043E: "o", + 0x000010FF: "o", + 0x00000585: "o", + 0x000005E1: "o", + 0x00000647: "o", + 0x0001EE24: "o", + 0x0001EE64: "o", + 0x0001EE84: "o", + 0x0000FEEB: "o", + 0x0000FEEC: "o", + 0x0000FEEA: "o", + 0x0000FEE9: "o", + 0x000006BE: "o", + 0x0000FBAC: "o", + 0x0000FBAD: "o", + 0x0000FBAB: "o", + 0x0000FBAA: "o", + 0x000006C1: "o", + 0x0000FBA8: "o", + 0x0000FBA9: "o", + 0x0000FBA7: "o", + 0x0000FBA6: "o", + 0x000006D5: "o", + 0x00000D20: "o", + 0x0000101D: "o", + 0x000104EA: "o", + 0x000118C8: "o", + 0x000118D7: "o", + 0x0001042C: "o", + 0x00000030: "O", + 0x000007C0: "O", + 0x000009E6: "O", + 0x00000B66: "O", + 0x00003007: "O", + 0x000114D0: "O", + 0x000118E0: "O", + 0x0001D7CE: "O", + 0x0001D7D8: "O", + 0x0001D7E2: "O", + 0x0001D7EC: "O", + 0x0001D7F6: "O", + 0x0000FF2F: "O", + 0x0001D40E: "O", + 0x0001D442: "O", + 0x0001D476: "O", + 0x0001D4AA: "O", + 0x0001D4DE: "O", + 0x0001D512: "O", + 0x0001D546: "O", + 0x0001D57A: "O", + 0x0001D5AE: "O", + 0x0001D5E2: "O", + 0x0001D616: "O", + 0x0001D64A: "O", + 0x0001D67E: "O", + 0x0000039F: "O", + 0x0001D6B6: "O", + 0x0001D6F0: "O", + 0x0001D72A: "O", + 0x0001D764: "O", + 0x0001D79E: "O", + 0x00002C9E: "O", + 0x0000041E: "O", + 0x00000555: "O", + 0x00002D54: "O", + 0x000012D0: "O", + 0x00000B20: "O", + 0x000104C2: "O", + 0x0000A4F3: "O", + 0x000118B5: "O", + 0x00010292: "O", + 0x000102AB: "O", + 0x00010404: "O", + 0x00010516: "O", + 0x00002070: "\u00ba", + 0x00001D52: "\u00ba", + 0x000001D2: "\u014f", + 0x000001D1: "\u014e", + 0x000006FF: "o\u0302", + 0x00000150: "\u00d6", + 0x000000F8: "o\u0338", + 0x0000AB3E: "o\u0338", + 0x000000D8: "O\u0338", + 0x00002D41: "O\u0338", + 0x000001FE: "O\u0338\u0301", + 0x00000275: "o\u0335", + 0x0000A74B: "o\u0335", + 0x000004E9: "o\u0335", + 0x00000473: "o\u0335", + 0x0000AB8E: "o\u0335", + 0x0000ABBB: "o\u0335", + 0x00002296: "O\u0335", + 0x0000229D: "O\u0335", + 0x0000236C: "O\u0335", + 0x0001D21A: "O\u0335", + 0x0001F714: "O\u0335", + 0x0000019F: "O\u0335", + 0x0000A74A: "O\u0335", + 0x000003B8: "O\u0335", + 0x000003D1: "O\u0335", + 0x0001D6C9: "O\u0335", + 0x0001D6DD: "O\u0335", + 0x0001D703: "O\u0335", + 0x0001D717: "O\u0335", + 0x0001D73D: "O\u0335", + 0x0001D751: "O\u0335", + 0x0001D777: "O\u0335", + 0x0001D78B: "O\u0335", + 0x0001D7B1: "O\u0335", + 0x0001D7C5: "O\u0335", + 0x00000398: "O\u0335", + 0x000003F4: "O\u0335", + 0x0001D6AF: "O\u0335", + 0x0001D6B9: "O\u0335", + 0x0001D6E9: "O\u0335", + 0x0001D6F3: "O\u0335", + 0x0001D723: "O\u0335", + 0x0001D72D: "O\u0335", + 0x0001D75D: "O\u0335", + 0x0001D767: "O\u0335", + 0x0001D797: "O\u0335", + 0x0001D7A1: "O\u0335", + 0x000004E8: "O\u0335", + 0x00000472: "O\u0335", + 0x00002D31: "O\u0335", + 0x000013BE: "O\u0335", + 0x000013EB: "O\u0335", + 0x0000AB74: "o\u031b", + 0x0000FCD9: "o\u0670", + 0x0001F101: "O,", + 0x0001F100: "O.", + 0x000001A1: "o'", + 0x000001A0: "O'", + 0x000013A4: "O'", + 0x00000025: "\u00ba/\u2080", + 0x0000066A: "\u00ba/\u2080", + 0x00002052: "\u00ba/\u2080", + 0x00002030: "\u00ba/\u2080\u2080", + 0x00000609: "\u00ba/\u2080\u2080", + 0x00002031: "\u00ba/\u2080\u2080\u2080", + 0x0000060A: "\u00ba/\u2080\u2080\u2080", + 0x00000153: "oe", + 0x00000152: "OE", + 0x00000276: "o\u1d07", + 0x0000221E: "oo", + 0x0000A74F: "oo", + 0x0000A699: "oo", + 0x0000A74E: "OO", + 0x0000A698: "OO", + 0x0000FCD7: "o\u062c", + 0x0000FC51: "o\u062c", + 0x0000FCD8: "o\u0645", + 0x0000FC52: "o\u0645", + 0x0000FD93: "o\u0645\u062c", + 0x0000FD94: "o\u0645\u0645", + 0x0000FC53: "o\u0649", + 0x0000FC54: "o\u0649", + 0x00000D5F: "o\u0d30o", + 0x00001010: "o\u102c", + 0x00003358: "O\u70b9", + 0x00002184: "\u0254", + 0x00001D10: "\u0254", + 0x0000037B: "\u0254", + 0x0001044B: "\u0254", + 0x00002183: "\u0186", + 0x000003FD: "\u0186", + 0x0000A4DB: "\u0186", + 0x00010423: "\u0186", + 0x0000AB3F: "\u0254\u0338", + 0x0000AB62: "\u0254e", + 0x0001043F: "\u0277", + 0x00002374: "p", + 0x0000FF50: "p", + 0x0001D429: "p", + 0x0001D45D: "p", + 0x0001D491: "p", + 0x0001D4C5: "p", + 0x0001D4F9: "p", + 0x0001D52D: "p", + 0x0001D561: "p", + 0x0001D595: "p", + 0x0001D5C9: "p", + 0x0001D5FD: "p", + 0x0001D631: "p", + 0x0001D665: "p", + 0x0001D699: "p", + 0x000003C1: "p", + 0x000003F1: "p", + 0x0001D6D2: "p", + 0x0001D6E0: "p", + 0x0001D70C: "p", + 0x0001D71A: "p", + 0x0001D746: "p", + 0x0001D754: "p", + 0x0001D780: "p", + 0x0001D78E: "p", + 0x0001D7BA: "p", + 0x0001D7C8: "p", + 0x00002CA3: "p", + 0x00000440: "p", + 0x0000FF30: "P", + 0x00002119: "P", + 0x0001D40F: "P", + 0x0001D443: "P", + 0x0001D477: "P", + 0x0001D4AB: "P", + 0x0001D4DF: "P", + 0x0001D513: "P", + 0x0001D57B: "P", + 0x0001D5AF: "P", + 0x0001D5E3: "P", + 0x0001D617: "P", + 0x0001D64B: "P", + 0x0001D67F: "P", + 0x000003A1: "P", + 0x0001D6B8: "P", + 0x0001D6F2: "P", + 0x0001D72C: "P", + 0x0001D766: "P", + 0x0001D7A0: "P", + 0x00002CA2: "P", + 0x00000420: "P", + 0x000013E2: "P", + 0x0000146D: "P", + 0x0000A4D1: "P", + 0x00010295: "P", + 0x000001A5: "p\u0314", + 0x00001D7D: "p\u0335", + 0x00001477: "p\u00b7", + 0x00001486: "P'", + 0x00001D29: "\u1d18", + 0x0000ABB2: "\u1d18", + 0x000003C6: "\u0278", + 0x000003D5: "\u0278", + 0x0001D6D7: "\u0278", + 0x0001D6DF: "\u0278", + 0x0001D711: "\u0278", + 0x0001D719: "\u0278", + 0x0001D74B: "\u0278", + 0x0001D753: "\u0278", + 0x0001D785: "\u0278", + 0x0001D78D: "\u0278", + 0x0001D7BF: "\u0278", + 0x0001D7C7: "\u0278", + 0x00002CAB: "\u0278", + 0x00000444: "\u0278", + 0x0001D42A: "q", + 0x0001D45E: "q", + 0x0001D492: "q", + 0x0001D4C6: "q", + 0x0001D4FA: "q", + 0x0001D52E: "q", + 0x0001D562: "q", + 0x0001D596: "q", + 0x0001D5CA: "q", + 0x0001D5FE: "q", + 0x0001D632: "q", + 0x0001D666: "q", + 0x0001D69A: "q", + 0x0000051B: "q", + 0x00000563: "q", + 0x00000566: "q", + 0x0000211A: "Q", + 0x0001D410: "Q", + 0x0001D444: "Q", + 0x0001D478: "Q", + 0x0001D4AC: "Q", + 0x0001D4E0: "Q", + 0x0001D514: "Q", + 0x0001D57C: "Q", + 0x0001D5B0: "Q", + 0x0001D5E4: "Q", + 0x0001D618: "Q", + 0x0001D64C: "Q", + 0x0001D680: "Q", + 0x00002D55: "Q", + 0x000002A0: "q\u0314", + 0x0001F700: "QE", + 0x00001D90: "\u024b", + 0x00001D0B: "\u0138", + 0x000003BA: "\u0138", + 0x000003F0: "\u0138", + 0x0001D6CB: "\u0138", + 0x0001D6DE: "\u0138", + 0x0001D705: "\u0138", + 0x0001D718: "\u0138", + 0x0001D73F: "\u0138", + 0x0001D752: "\u0138", + 0x0001D779: "\u0138", + 0x0001D78C: "\u0138", + 0x0001D7B3: "\u0138", + 0x0001D7C6: "\u0138", + 0x00002C95: "\u0138", + 0x0000043A: "\u0138", + 0x0000ABB6: "\u0138", + 0x0000049B: "\u0138\u0329", + 0x0000049F: "\u0138\u0335", + 0x0001D42B: "r", + 0x0001D45F: "r", + 0x0001D493: "r", + 0x0001D4C7: "r", + 0x0001D4FB: "r", + 0x0001D52F: "r", + 0x0001D563: "r", + 0x0001D597: "r", + 0x0001D5CB: "r", + 0x0001D5FF: "r", + 0x0001D633: "r", + 0x0001D667: "r", + 0x0001D69B: "r", + 0x0000AB47: "r", + 0x0000AB48: "r", + 0x00001D26: "r", + 0x00002C85: "r", + 0x00000433: "r", + 0x0000AB81: "r", + 0x0001D216: "R", + 0x0000211B: "R", + 0x0000211C: "R", + 0x0000211D: "R", + 0x0001D411: "R", + 0x0001D445: "R", + 0x0001D479: "R", + 0x0001D4E1: "R", + 0x0001D57D: "R", + 0x0001D5B1: "R", + 0x0001D5E5: "R", + 0x0001D619: "R", + 0x0001D64D: "R", + 0x0001D681: "R", + 0x000001A6: "R", + 0x000013A1: "R", + 0x000013D2: "R", + 0x000104B4: "R", + 0x00001587: "R", + 0x0000A4E3: "R", + 0x00016F35: "R", + 0x0000027D: "r\u0328", + 0x0000027C: "r\u0329", + 0x0000024D: "r\u0335", + 0x00000493: "r\u0335", + 0x00001D72: "r\u0334", + 0x00000491: "r'", + 0x000118E3: "rn", + 0x0000006D: "rn", + 0x0000217F: "rn", + 0x0001D426: "rn", + 0x0001D45A: "rn", + 0x0001D48E: "rn", + 0x0001D4C2: "rn", + 0x0001D4F6: "rn", + 0x0001D52A: "rn", + 0x0001D55E: "rn", + 0x0001D592: "rn", + 0x0001D5C6: "rn", + 0x0001D5FA: "rn", + 0x0001D62E: "rn", + 0x0001D662: "rn", + 0x0001D696: "rn", + 0x00011700: "rn", + 0x000020A5: "rn\u0338", + 0x00000271: "rn\u0326", + 0x00001D6F: "rn\u0334", + 0x000020A8: "Rs", + 0x0000AB71: "\u0280", + 0x0000ABA2: "\u0280", + 0x0000044F: "\u1d19", + 0x00001D73: "\u027e\u0334", + 0x00002129: "\u027f", + 0x0000FF53: "s", + 0x0001D42C: "s", + 0x0001D460: "s", + 0x0001D494: "s", + 0x0001D4C8: "s", + 0x0001D4FC: "s", + 0x0001D530: "s", + 0x0001D564: "s", + 0x0001D598: "s", + 0x0001D5CC: "s", + 0x0001D600: "s", + 0x0001D634: "s", + 0x0001D668: "s", + 0x0001D69C: "s", + 0x0000A731: "s", + 0x000001BD: "s", + 0x00000455: "s", + 0x0000ABAA: "s", + 0x000118C1: "s", + 0x00010448: "s", + 0x0000FF33: "S", + 0x0001D412: "S", + 0x0001D446: "S", + 0x0001D47A: "S", + 0x0001D4AE: "S", + 0x0001D4E2: "S", + 0x0001D516: "S", + 0x0001D54A: "S", + 0x0001D57E: "S", + 0x0001D5B2: "S", + 0x0001D5E6: "S", + 0x0001D61A: "S", + 0x0001D64E: "S", + 0x0001D682: "S", + 0x00000405: "S", + 0x0000054F: "S", + 0x000013D5: "S", + 0x000013DA: "S", + 0x0000A4E2: "S", + 0x00016F3A: "S", + 0x00010296: "S", + 0x00010420: "S", + 0x00000282: "s\u0328", + 0x00001D74: "s\u0334", + 0x0000A7B5: "\u00df", + 0x000003B2: "\u00df", + 0x000003D0: "\u00df", + 0x0001D6C3: "\u00df", + 0x0001D6FD: "\u00df", + 0x0001D737: "\u00df", + 0x0001D771: "\u00df", + 0x0001D7AB: "\u00df", + 0x000013F0: "\u00df", + 0x0001F75C: "sss", + 0x0000FB06: "st", + 0x0000222B: "\u0283", + 0x0000AB4D: "\u0283", + 0x00002211: "\u01a9", + 0x00002140: "\u01a9", + 0x000003A3: "\u01a9", + 0x0001D6BA: "\u01a9", + 0x0001D6F4: "\u01a9", + 0x0001D72E: "\u01a9", + 0x0001D768: "\u01a9", + 0x0001D7A2: "\u01a9", + 0x00002D49: "\u01a9", + 0x0000222C: "\u0283\u0283", + 0x0000222D: "\u0283\u0283\u0283", + 0x00002A0C: "\u0283\u0283\u0283\u0283", + 0x0001D42D: "t", + 0x0001D461: "t", + 0x0001D495: "t", + 0x0001D4C9: "t", + 0x0001D4FD: "t", + 0x0001D531: "t", + 0x0001D565: "t", + 0x0001D599: "t", + 0x0001D5CD: "t", + 0x0001D601: "t", + 0x0001D635: "t", + 0x0001D669: "t", + 0x0001D69D: "t", + 0x000022A4: "T", + 0x000027D9: "T", + 0x0001F768: "T", + 0x0000FF34: "T", + 0x0001D413: "T", + 0x0001D447: "T", + 0x0001D47B: "T", + 0x0001D4AF: "T", + 0x0001D4E3: "T", + 0x0001D517: "T", + 0x0001D54B: "T", + 0x0001D57F: "T", + 0x0001D5B3: "T", + 0x0001D5E7: "T", + 0x0001D61B: "T", + 0x0001D64F: "T", + 0x0001D683: "T", + 0x000003A4: "T", + 0x0001D6BB: "T", + 0x0001D6F5: "T", + 0x0001D72F: "T", + 0x0001D769: "T", + 0x0001D7A3: "T", + 0x00002CA6: "T", + 0x00000422: "T", + 0x000013A2: "T", + 0x0000A4D4: "T", + 0x00016F0A: "T", + 0x000118BC: "T", + 0x00010297: "T", + 0x000102B1: "T", + 0x00010315: "T", + 0x000001AD: "t\u0314", + 0x00002361: "T\u0308", + 0x0000023E: "T\u0338", + 0x0000021A: "\u0162", + 0x000001AE: "T\u0328", + 0x000004AC: "T\u0329", + 0x000020AE: "T\u20eb", + 0x00000167: "t\u0335", + 0x00000166: "T\u0335", + 0x00001D75: "t\u0334", + 0x000010A0: "\ua786", + 0x0000A728: "T3", + 0x000002A8: "t\u0255", + 0x00002121: "TEL", + 0x0000A777: "tf", + 0x000002A6: "ts", + 0x000002A7: "t\u0283", + 0x0000A729: "t\u021d", + 0x000003C4: "\u1d1b", + 0x0001D6D5: "\u1d1b", + 0x0001D70F: "\u1d1b", + 0x0001D749: "\u1d1b", + 0x0001D783: "\u1d1b", + 0x0001D7BD: "\u1d1b", + 0x00000442: "\u1d1b", + 0x0000AB72: "\u1d1b", + 0x000004AD: "\u1d1b\u0329", + 0x00000163: "\u01ab", + 0x0000021B: "\u01ab", + 0x000013BF: "\u01ab", + 0x0001D42E: "u", + 0x0001D462: "u", + 0x0001D496: "u", + 0x0001D4CA: "u", + 0x0001D4FE: "u", + 0x0001D532: "u", + 0x0001D566: "u", + 0x0001D59A: "u", + 0x0001D5CE: "u", + 0x0001D602: "u", + 0x0001D636: "u", + 0x0001D66A: "u", + 0x0001D69E: "u", + 0x0000A79F: "u", + 0x00001D1C: "u", + 0x0000AB4E: "u", + 0x0000AB52: "u", + 0x0000028B: "u", + 0x000003C5: "u", + 0x0001D6D6: "u", + 0x0001D710: "u", + 0x0001D74A: "u", + 0x0001D784: "u", + 0x0001D7BE: "u", + 0x0000057D: "u", + 0x000104F6: "u", + 0x000118D8: "u", + 0x0000222A: "U", + 0x000022C3: "U", + 0x0001D414: "U", + 0x0001D448: "U", + 0x0001D47C: "U", + 0x0001D4B0: "U", + 0x0001D4E4: "U", + 0x0001D518: "U", + 0x0001D54C: "U", + 0x0001D580: "U", + 0x0001D5B4: "U", + 0x0001D5E8: "U", + 0x0001D61C: "U", + 0x0001D650: "U", + 0x0001D684: "U", + 0x0000054D: "U", + 0x00001200: "U", + 0x000104CE: "U", + 0x0000144C: "U", + 0x0000A4F4: "U", + 0x00016F42: "U", + 0x000118B8: "U", + 0x000001D4: "\u016d", + 0x000001D3: "\u016c", + 0x00001D7E: "u\u0335", + 0x0000AB9C: "u\u0335", + 0x00000244: "U\u0335", + 0x000013CC: "U\u0335", + 0x00001458: "U\u00b7", + 0x00001467: "U'", + 0x00001D6B: "ue", + 0x0000AB63: "uo", + 0x00001E43: "\uab51", + 0x0000057A: "\u0270", + 0x00001223: "\u0270", + 0x00002127: "\u01b1", + 0x0000162E: "\u01b1", + 0x00001634: "\u01b1", + 0x00001D7F: "\u028a\u0335", + 0x00002228: "v", + 0x000022C1: "v", + 0x0000FF56: "v", + 0x00002174: "v", + 0x0001D42F: "v", + 0x0001D463: "v", + 0x0001D497: "v", + 0x0001D4CB: "v", + 0x0001D4FF: "v", + 0x0001D533: "v", + 0x0001D567: "v", + 0x0001D59B: "v", + 0x0001D5CF: "v", + 0x0001D603: "v", + 0x0001D637: "v", + 0x0001D66B: "v", + 0x0001D69F: "v", + 0x00001D20: "v", + 0x000003BD: "v", + 0x0001D6CE: "v", + 0x0001D708: "v", + 0x0001D742: "v", + 0x0001D77C: "v", + 0x0001D7B6: "v", + 0x00000475: "v", + 0x000005D8: "v", + 0x00011706: "v", + 0x0000ABA9: "v", + 0x000118C0: "v", + 0x0001D20D: "V", + 0x00000667: "V", + 0x000006F7: "V", + 0x00002164: "V", + 0x0001D415: "V", + 0x0001D449: "V", + 0x0001D47D: "V", + 0x0001D4B1: "V", + 0x0001D4E5: "V", + 0x0001D519: "V", + 0x0001D54D: "V", + 0x0001D581: "V", + 0x0001D5B5: "V", + 0x0001D5E9: "V", + 0x0001D61D: "V", + 0x0001D651: "V", + 0x0001D685: "V", + 0x00000474: "V", + 0x00002D38: "V", + 0x000013D9: "V", + 0x0000142F: "V", + 0x0000A6DF: "V", + 0x0000A4E6: "V", + 0x00016F08: "V", + 0x000118A0: "V", + 0x0001051D: "V", + 0x00010197: "V\u0335", + 0x0000143B: "V\u00b7", + 0x0001F76C: "VB", + 0x00002175: "vi", + 0x00002176: "vii", + 0x00002177: "viii", + 0x00002165: "Vl", + 0x00002166: "Vll", + 0x00002167: "Vlll", + 0x0001F708: "V\u1de4", + 0x00001D27: "\u028c", + 0x000104D8: "\u028c", + 0x00000668: "\u0245", + 0x000006F8: "\u0245", + 0x0000039B: "\u0245", + 0x0001D6B2: "\u0245", + 0x0001D6EC: "\u0245", + 0x0001D726: "\u0245", + 0x0001D760: "\u0245", + 0x0001D79A: "\u0245", + 0x0000041B: "\u0245", + 0x00002D37: "\u0245", + 0x000104B0: "\u0245", + 0x00001431: "\u0245", + 0x0000A6CE: "\u0245", + 0x0000A4E5: "\u0245", + 0x00016F3D: "\u0245", + 0x0001028D: "\u0245", + 0x000004C5: "\u0245\u0326", + 0x0000143D: "\u0245\u00b7", + 0x0000026F: "w", + 0x0001D430: "w", + 0x0001D464: "w", + 0x0001D498: "w", + 0x0001D4CC: "w", + 0x0001D500: "w", + 0x0001D534: "w", + 0x0001D568: "w", + 0x0001D59C: "w", + 0x0001D5D0: "w", + 0x0001D604: "w", + 0x0001D638: "w", + 0x0001D66C: "w", + 0x0001D6A0: "w", + 0x00001D21: "w", + 0x00000461: "w", + 0x0000051D: "w", + 0x00000561: "w", + 0x0001170A: "w", + 0x0001170E: "w", + 0x0001170F: "w", + 0x0000AB83: "w", + 0x000118EF: "W", + 0x000118E6: "W", + 0x0001D416: "W", + 0x0001D44A: "W", + 0x0001D47E: "W", + 0x0001D4B2: "W", + 0x0001D4E6: "W", + 0x0001D51A: "W", + 0x0001D54E: "W", + 0x0001D582: "W", + 0x0001D5B6: "W", + 0x0001D5EA: "W", + 0x0001D61E: "W", + 0x0001D652: "W", + 0x0001D686: "W", + 0x0000051C: "W", + 0x000013B3: "W", + 0x000013D4: "W", + 0x0000A4EA: "W", + 0x0000047D: "w\u0486\u0487", + 0x000114C5: "w\u0307", + 0x000020A9: "W\u0335", + 0x0000A761: "w\u0326", + 0x00001D0D: "\u028d", + 0x0000043C: "\u028d", + 0x0000AB87: "\u028d", + 0x000004CE: "\u028d\u0326", + 0x0000166E: "x", + 0x000000D7: "x", + 0x0000292B: "x", + 0x0000292C: "x", + 0x00002A2F: "x", + 0x0000FF58: "x", + 0x00002179: "x", + 0x0001D431: "x", + 0x0001D465: "x", + 0x0001D499: "x", + 0x0001D4CD: "x", + 0x0001D501: "x", + 0x0001D535: "x", + 0x0001D569: "x", + 0x0001D59D: "x", + 0x0001D5D1: "x", + 0x0001D605: "x", + 0x0001D639: "x", + 0x0001D66D: "x", + 0x0001D6A1: "x", + 0x00000445: "x", + 0x00001541: "x", + 0x0000157D: "x", + 0x00002DEF: "\u036f", + 0x0000166D: "X", + 0x00002573: "X", + 0x00010322: "X", + 0x000118EC: "X", + 0x0000FF38: "X", + 0x00002169: "X", + 0x0001D417: "X", + 0x0001D44B: "X", + 0x0001D47F: "X", + 0x0001D4B3: "X", + 0x0001D4E7: "X", + 0x0001D51B: "X", + 0x0001D54F: "X", + 0x0001D583: "X", + 0x0001D5B7: "X", + 0x0001D5EB: "X", + 0x0001D61F: "X", + 0x0001D653: "X", + 0x0001D687: "X", + 0x0000A7B3: "X", + 0x000003A7: "X", + 0x0001D6BE: "X", + 0x0001D6F8: "X", + 0x0001D732: "X", + 0x0001D76C: "X", + 0x0001D7A6: "X", + 0x00002CAC: "X", + 0x00000425: "X", + 0x00002D5D: "X", + 0x000016B7: "X", + 0x0000A4EB: "X", + 0x00010290: "X", + 0x000102B4: "X", + 0x00010317: "X", + 0x00010527: "X", + 0x00002A30: "x\u0307", + 0x000004B2: "X\u0329", + 0x00010196: "X\u0335", + 0x0000217A: "xi", + 0x0000217B: "xii", + 0x0000216A: "Xl", + 0x0000216B: "Xll", + 0x00000263: "y", + 0x00001D8C: "y", + 0x0000FF59: "y", + 0x0001D432: "y", + 0x0001D466: "y", + 0x0001D49A: "y", + 0x0001D4CE: "y", + 0x0001D502: "y", + 0x0001D536: "y", + 0x0001D56A: "y", + 0x0001D59E: "y", + 0x0001D5D2: "y", + 0x0001D606: "y", + 0x0001D63A: "y", + 0x0001D66E: "y", + 0x0001D6A2: "y", + 0x0000028F: "y", + 0x00001EFF: "y", + 0x0000AB5A: "y", + 0x000003B3: "y", + 0x0000213D: "y", + 0x0001D6C4: "y", + 0x0001D6FE: "y", + 0x0001D738: "y", + 0x0001D772: "y", + 0x0001D7AC: "y", + 0x00000443: "y", + 0x000004AF: "y", + 0x000010E7: "y", + 0x000118DC: "y", + 0x0000FF39: "Y", + 0x0001D418: "Y", + 0x0001D44C: "Y", + 0x0001D480: "Y", + 0x0001D4B4: "Y", + 0x0001D4E8: "Y", + 0x0001D51C: "Y", + 0x0001D550: "Y", + 0x0001D584: "Y", + 0x0001D5B8: "Y", + 0x0001D5EC: "Y", + 0x0001D620: "Y", + 0x0001D654: "Y", + 0x0001D688: "Y", + 0x000003A5: "Y", + 0x000003D2: "Y", + 0x0001D6BC: "Y", + 0x0001D6F6: "Y", + 0x0001D730: "Y", + 0x0001D76A: "Y", + 0x0001D7A4: "Y", + 0x00002CA8: "Y", + 0x00000423: "Y", + 0x000004AE: "Y", + 0x000013A9: "Y", + 0x000013BD: "Y", + 0x0000A4EC: "Y", + 0x00016F43: "Y", + 0x000118A4: "Y", + 0x000102B2: "Y", + 0x000001B4: "y\u0314", + 0x0000024F: "y\u0335", + 0x000004B1: "y\u0335", + 0x000000A5: "Y\u0335", + 0x0000024E: "Y\u0335", + 0x000004B0: "Y\u0335", + 0x00000292: "\u021d", + 0x0000A76B: "\u021d", + 0x00002CCD: "\u021d", + 0x000004E1: "\u021d", + 0x000010F3: "\u021d", + 0x0001D433: "z", + 0x0001D467: "z", + 0x0001D49B: "z", + 0x0001D4CF: "z", + 0x0001D503: "z", + 0x0001D537: "z", + 0x0001D56B: "z", + 0x0001D59F: "z", + 0x0001D5D3: "z", + 0x0001D607: "z", + 0x0001D63B: "z", + 0x0001D66F: "z", + 0x0001D6A3: "z", + 0x00001D22: "z", + 0x0000AB93: "z", + 0x000118C4: "z", + 0x000102F5: "Z", + 0x000118E5: "Z", + 0x0000FF3A: "Z", + 0x00002124: "Z", + 0x00002128: "Z", + 0x0001D419: "Z", + 0x0001D44D: "Z", + 0x0001D481: "Z", + 0x0001D4B5: "Z", + 0x0001D4E9: "Z", + 0x0001D585: "Z", + 0x0001D5B9: "Z", + 0x0001D5ED: "Z", + 0x0001D621: "Z", + 0x0001D655: "Z", + 0x0001D689: "Z", + 0x00000396: "Z", + 0x0001D6AD: "Z", + 0x0001D6E7: "Z", + 0x0001D721: "Z", + 0x0001D75B: "Z", + 0x0001D795: "Z", + 0x000013C3: "Z", + 0x0000A4DC: "Z", + 0x000118A9: "Z", + 0x00000290: "z\u0328", + 0x000001B6: "z\u0335", + 0x000001B5: "Z\u0335", + 0x00000225: "z\u0326", + 0x00000224: "Z\u0326", + 0x00001D76: "z\u0334", + 0x000001BF: "\u00fe", + 0x000003F8: "\u00fe", + 0x000003F7: "\u00de", + 0x000104C4: "\u00de", + 0x00002079: "\ua770", + 0x00001D24: "\u01a8", + 0x000003E9: "\u01a8", + 0x0000A645: "\u01a8", + 0x0000044C: "\u0185", + 0x0000AB9F: "\u0185", + 0x0000044B: "\u0185i", + 0x0000AB7E: "\u0242", + 0x000002E4: "\u02c1", + 0x0000A6CD: "\u02a1", + 0x00002299: "\u0298", + 0x00002609: "\u0298", + 0x00002A00: "\u0298", + 0x0000A668: "\u0298", + 0x00002D59: "\u0298", + 0x000104C3: "\u0298", + 0x0000213E: "\u0393", + 0x0001D6AA: "\u0393", + 0x0001D6E4: "\u0393", + 0x0001D71E: "\u0393", + 0x0001D758: "\u0393", + 0x0001D792: "\u0393", + 0x00002C84: "\u0393", + 0x00000413: "\u0393", + 0x000013B1: "\u0393", + 0x000014A5: "\u0393", + 0x00016F07: "\u0393", + 0x00000492: "\u0393\u0335", + 0x000014AF: "\u0393\u00b7", + 0x00000490: "\u0393'", + 0x00002206: "\u0394", + 0x000025B3: "\u0394", + 0x0001F702: "\u0394", + 0x0001D6AB: "\u0394", + 0x0001D6E5: "\u0394", + 0x0001D71F: "\u0394", + 0x0001D759: "\u0394", + 0x0001D793: "\u0394", + 0x00002C86: "\u0394", + 0x00002D60: "\u0394", + 0x00001403: "\u0394", + 0x00016F1A: "\u0394", + 0x00010285: "\u0394", + 0x000102A3: "\u0394", + 0x00002359: "\u0394\u0332", + 0x0000140F: "\u0394\u00b7", + 0x0000142C: "\u0394\u1420", + 0x0001D7CB: "\u03dd", + 0x0001D6C7: "\u03b6", + 0x0001D701: "\u03b6", + 0x0001D73B: "\u03b6", + 0x0001D775: "\u03b6", + 0x0001D7AF: "\u03b6", + 0x00002CE4: "\u03d7", + 0x0001D6CC: "\u03bb", + 0x0001D706: "\u03bb", + 0x0001D740: "\u03bb", + 0x0001D77A: "\u03bb", + 0x0001D7B4: "\u03bb", + 0x00002C96: "\u03bb", + 0x000104DB: "\u03bb", + 0x000000B5: "\u03bc", + 0x0001D6CD: "\u03bc", + 0x0001D707: "\u03bc", + 0x0001D741: "\u03bc", + 0x0001D77B: "\u03bc", + 0x0001D7B5: "\u03bc", + 0x0001D6CF: "\u03be", + 0x0001D709: "\u03be", + 0x0001D743: "\u03be", + 0x0001D77D: "\u03be", + 0x0001D7B7: "\u03be", + 0x0001D6B5: "\u039e", + 0x0001D6EF: "\u039e", + 0x0001D729: "\u039e", + 0x0001D763: "\u039e", + 0x0001D79D: "\u039e", + 0x000003D6: "\u03c0", + 0x0000213C: "\u03c0", + 0x0001D6D1: "\u03c0", + 0x0001D6E1: "\u03c0", + 0x0001D70B: "\u03c0", + 0x0001D71B: "\u03c0", + 0x0001D745: "\u03c0", + 0x0001D755: "\u03c0", + 0x0001D77F: "\u03c0", + 0x0001D78F: "\u03c0", + 0x0001D7B9: "\u03c0", + 0x0001D7C9: "\u03c0", + 0x00001D28: "\u03c0", + 0x0000043F: "\u03c0", + 0x0000220F: "\u03a0", + 0x0000213F: "\u03a0", + 0x0001D6B7: "\u03a0", + 0x0001D6F1: "\u03a0", + 0x0001D72B: "\u03a0", + 0x0001D765: "\u03a0", + 0x0001D79F: "\u03a0", + 0x00002CA0: "\u03a0", + 0x0000041F: "\u03a0", + 0x0000A6DB: "\u03a0", + 0x000102AD: "\u03d8", + 0x00010312: "\u03d8", + 0x000003DB: "\u03c2", + 0x0001D6D3: "\u03c2", + 0x0001D70D: "\u03c2", + 0x0001D747: "\u03c2", + 0x0001D781: "\u03c2", + 0x0001D7BB: "\u03c2", + 0x0001D6BD: "\u03a6", + 0x0001D6F7: "\u03a6", + 0x0001D731: "\u03a6", + 0x0001D76B: "\u03a6", + 0x0001D7A5: "\u03a6", + 0x00002CAA: "\u03a6", + 0x00000424: "\u03a6", + 0x00000553: "\u03a6", + 0x00001240: "\u03a6", + 0x000016F0: "\u03a6", + 0x000102B3: "\u03a6", + 0x0000AB53: "\u03c7", + 0x0000AB55: "\u03c7", + 0x0001D6D8: "\u03c7", + 0x0001D712: "\u03c7", + 0x0001D74C: "\u03c7", + 0x0001D786: "\u03c7", + 0x0001D7C0: "\u03c7", + 0x00002CAD: "\u03c7", + 0x0001D6D9: "\u03c8", + 0x0001D713: "\u03c8", + 0x0001D74D: "\u03c8", + 0x0001D787: "\u03c8", + 0x0001D7C1: "\u03c8", + 0x00000471: "\u03c8", + 0x000104F9: "\u03c8", + 0x0001D6BF: "\u03a8", + 0x0001D6F9: "\u03a8", + 0x0001D733: "\u03a8", + 0x0001D76D: "\u03a8", + 0x0001D7A7: "\u03a8", + 0x00002CAE: "\u03a8", + 0x00000470: "\u03a8", + 0x000104D1: "\u03a8", + 0x000016D8: "\u03a8", + 0x000102B5: "\u03a8", + 0x00002375: "\u03c9", + 0x0000A7B7: "\u03c9", + 0x0001D6DA: "\u03c9", + 0x0001D714: "\u03c9", + 0x0001D74E: "\u03c9", + 0x0001D788: "\u03c9", + 0x0001D7C2: "\u03c9", + 0x00002CB1: "\u03c9", + 0x0000A64D: "\u03c9", + 0x00002126: "\u03a9", + 0x0001D6C0: "\u03a9", + 0x0001D6FA: "\u03a9", + 0x0001D734: "\u03a9", + 0x0001D76E: "\u03a9", + 0x0001D7A8: "\u03a9", + 0x0000162F: "\u03a9", + 0x00001635: "\u03a9", + 0x000102B6: "\u03a9", + 0x00002379: "\u03c9\u0332", + 0x00001F7D: "\u1ff4", + 0x00002630: "\u2cb6", + 0x00002CDC: "\u03ec", + 0x00000497: "\u0436\u0329", + 0x00000496: "\u0416\u0329", + 0x0001D20B: "\u0418", + 0x00000376: "\u0418", + 0x0000A6A1: "\u0418", + 0x00010425: "\u0418", + 0x00000419: "\u040d", + 0x0000048A: "\u040d\u0326", + 0x0000045D: "\u0439", + 0x0000048B: "\u0439\u0326", + 0x000104BC: "\u04c3", + 0x00001D2B: "\u043b", + 0x000004C6: "\u043b\u0326", + 0x0000AB60: "\u0459", + 0x000104EB: "\ua669", + 0x00001DEE: "\u2dec", + 0x000104CD: "\u040b", + 0x0001D202: "\u04fe", + 0x0001D222: "\u0460", + 0x000013C7: "\u0460", + 0x000015EF: "\u0460", + 0x0000047C: "\u0460\u0486\u0487", + 0x000018ED: "\u0460\u00b7", + 0x0000A7B6: "\ua64c", + 0x000004CC: "\u04b7", + 0x000004CB: "\u04b6", + 0x000004BE: "\u04bc\u0328", + 0x00002CBD: "\u0448", + 0x00002CBC: "\u0428", + 0x0000A650: "\u042al", + 0x00002108: "\u042d", + 0x0001F701: "\ua658", + 0x00016F1C: "\ua658", + 0x0000A992: "\u2c3f", + 0x00000587: "\u0565\u0582", + 0x00001294: "\u0571", + 0x0000FB14: "\u0574\u0565", + 0x0000FB15: "\u0574\u056b", + 0x0000FB17: "\u0574\u056d", + 0x0000FB13: "\u0574\u0576", + 0x00002229: "\u0548", + 0x000022C2: "\u0548", + 0x0001D245: "\u0548", + 0x00001260: "\u0548", + 0x0000144E: "\u0548", + 0x0000A4F5: "\u0548", + 0x0000145A: "\u0548\u00b7", + 0x00001468: "\u0548'", + 0x0000FB16: "\u057e\u0576", + 0x000020BD: "\u0554", + 0x000002D3: "\u0559", + 0x000002BF: "\u0559", + 0x00002135: "\u05d0", + 0x0000FB21: "\u05d0", + 0x0000FB2F: "\ufb2e", + 0x0000FB30: "\ufb2e", + 0x0000FB4F: "\u05d0\u05dc", + 0x00002136: "\u05d1", + 0x00002137: "\u05d2", + 0x00002138: "\u05d3", + 0x0000FB22: "\u05d3", + 0x0000FB23: "\u05d4", + 0x0000FB39: "\ufb1d", + 0x0000FB24: "\u05db", + 0x0000FB25: "\u05dc", + 0x0000FB26: "\u05dd", + 0x0000FB20: "\u05e2", + 0x0000FB27: "\u05e8", + 0x0000FB2B: "\ufb2a", + 0x0000FB49: "\ufb2a", + 0x0000FB2D: "\ufb2c", + 0x0000FB28: "\u05ea", + 0x0000FE80: "\u0621", + 0x000006FD: "\u0621\u0348", + 0x0000FE82: "\u0622", + 0x0000FE81: "\u0622", + 0x0000FB51: "\u0671", + 0x0000FB50: "\u0671", + 0x0001EE01: "\u0628", + 0x0001EE21: "\u0628", + 0x0001EE61: "\u0628", + 0x0001EE81: "\u0628", + 0x0001EEA1: "\u0628", + 0x0000FE91: "\u0628", + 0x0000FE92: "\u0628", + 0x0000FE90: "\u0628", + 0x0000FE8F: "\u0628", + 0x00000751: "\u0628\u06db", + 0x000008B6: "\u0628\u06e2", + 0x000008A1: "\u0628\u0654", + 0x0000FCA0: "\u0628o", + 0x0000FCE2: "\u0628o", + 0x0000FC9C: "\u0628\u062c", + 0x0000FC05: "\u0628\u062c", + 0x0000FC9D: "\u0628\u062d", + 0x0000FC06: "\u0628\u062d", + 0x0000FDC2: "\u0628\u062d\u0649", + 0x0000FC9E: "\u0628\u062e", + 0x0000FC07: "\u0628\u062e", + 0x0000FCD2: "\u0628\u062e", + 0x0000FC4B: "\u0628\u062e", + 0x0000FD9E: "\u0628\u062e\u0649", + 0x0000FC6A: "\u0628\u0631", + 0x0000FC6B: "\u0628\u0632", + 0x0000FC9F: "\u0628\u0645", + 0x0000FCE1: "\u0628\u0645", + 0x0000FC6C: "\u0628\u0645", + 0x0000FC08: "\u0628\u0645", + 0x0000FC6D: "\u0628\u0646", + 0x0000FC6E: "\u0628\u0649", + 0x0000FC09: "\u0628\u0649", + 0x0000FC6F: "\u0628\u0649", + 0x0000FC0A: "\u0628\u0649", + 0x0000FB54: "\u067b", + 0x0000FB55: "\u067b", + 0x0000FB53: "\u067b", + 0x0000FB52: "\u067b", + 0x000006D0: "\u067b", + 0x0000FBE6: "\u067b", + 0x0000FBE7: "\u067b", + 0x0000FBE5: "\u067b", + 0x0000FBE4: "\u067b", + 0x0000FB5C: "\u0680", + 0x0000FB5D: "\u0680", + 0x0000FB5B: "\u0680", + 0x0000FB5A: "\u0680", + 0x000008A9: "\u0754", + 0x00000767: "\u0754", + 0x00002365: "\u0629", + 0x000000F6: "\u0629", + 0x0000FE94: "\u0629", + 0x0000FE93: "\u0629", + 0x000006C3: "\u0629", + 0x0001EE15: "\u062a", + 0x0001EE35: "\u062a", + 0x0001EE75: "\u062a", + 0x0001EE95: "\u062a", + 0x0001EEB5: "\u062a", + 0x0000FE97: "\u062a", + 0x0000FE98: "\u062a", + 0x0000FE96: "\u062a", + 0x0000FE95: "\u062a", + 0x0000FCA5: "\u062ao", + 0x0000FCE4: "\u062ao", + 0x0000FCA1: "\u062a\u062c", + 0x0000FC0B: "\u062a\u062c", + 0x0000FD50: "\u062a\u062c\u0645", + 0x0000FDA0: "\u062a\u062c\u0649", + 0x0000FD9F: "\u062a\u062c\u0649", + 0x0000FCA2: "\u062a\u062d", + 0x0000FC0C: "\u062a\u062d", + 0x0000FD52: "\u062a\u062d\u062c", + 0x0000FD51: "\u062a\u062d\u062c", + 0x0000FD53: "\u062a\u062d\u0645", + 0x0000FCA3: "\u062a\u062e", + 0x0000FC0D: "\u062a\u062e", + 0x0000FD54: "\u062a\u062e\u0645", + 0x0000FDA2: "\u062a\u062e\u0649", + 0x0000FDA1: "\u062a\u062e\u0649", + 0x0000FC70: "\u062a\u0631", + 0x0000FC71: "\u062a\u0632", + 0x0000FCA4: "\u062a\u0645", + 0x0000FCE3: "\u062a\u0645", + 0x0000FC72: "\u062a\u0645", + 0x0000FC0E: "\u062a\u0645", + 0x0000FD55: "\u062a\u0645\u062c", + 0x0000FD56: "\u062a\u0645\u062d", + 0x0000FD57: "\u062a\u0645\u062e", + 0x0000FDA4: "\u062a\u0645\u0649", + 0x0000FDA3: "\u062a\u0645\u0649", + 0x0000FC73: "\u062a\u0646", + 0x0000FC74: "\u062a\u0649", + 0x0000FC0F: "\u062a\u0649", + 0x0000FC75: "\u062a\u0649", + 0x0000FC10: "\u062a\u0649", + 0x0000FB60: "\u067a", + 0x0000FB61: "\u067a", + 0x0000FB5F: "\u067a", + 0x0000FB5E: "\u067a", + 0x0000FB64: "\u067f", + 0x0000FB65: "\u067f", + 0x0000FB63: "\u067f", + 0x0000FB62: "\u067f", + 0x0001EE02: "\u062c", + 0x0001EE22: "\u062c", + 0x0001EE42: "\u062c", + 0x0001EE62: "\u062c", + 0x0001EE82: "\u062c", + 0x0001EEA2: "\u062c", + 0x0000FE9F: "\u062c", + 0x0000FEA0: "\u062c", + 0x0000FE9E: "\u062c", + 0x0000FE9D: "\u062c", + 0x0000FCA7: "\u062c\u062d", + 0x0000FC15: "\u062c\u062d", + 0x0000FDA6: "\u062c\u062d\u0649", + 0x0000FDBE: "\u062c\u062d\u0649", + 0x0000FDFB: "\u062c\u0644 \u062c\u0644l\u0644o", + 0x0000FCA8: "\u062c\u0645", + 0x0000FC16: "\u062c\u0645", + 0x0000FD59: "\u062c\u0645\u062d", + 0x0000FD58: "\u062c\u0645\u062d", + 0x0000FDA7: "\u062c\u0645\u0649", + 0x0000FDA5: "\u062c\u0645\u0649", + 0x0000FD1D: "\u062c\u0649", + 0x0000FD01: "\u062c\u0649", + 0x0000FD1E: "\u062c\u0649", + 0x0000FD02: "\u062c\u0649", + 0x0000FB78: "\u0683", + 0x0000FB79: "\u0683", + 0x0000FB77: "\u0683", + 0x0000FB76: "\u0683", + 0x0000FB74: "\u0684", + 0x0000FB75: "\u0684", + 0x0000FB73: "\u0684", + 0x0000FB72: "\u0684", + 0x0000FB7C: "\u0686", + 0x0000FB7D: "\u0686", + 0x0000FB7B: "\u0686", + 0x0000FB7A: "\u0686", + 0x0000FB80: "\u0687", + 0x0000FB81: "\u0687", + 0x0000FB7F: "\u0687", + 0x0000FB7E: "\u0687", + 0x0001EE07: "\u062d", + 0x0001EE27: "\u062d", + 0x0001EE47: "\u062d", + 0x0001EE67: "\u062d", + 0x0001EE87: "\u062d", + 0x0001EEA7: "\u062d", + 0x0000FEA3: "\u062d", + 0x0000FEA4: "\u062d", + 0x0000FEA2: "\u062d", + 0x0000FEA1: "\u062d", + 0x00000685: "\u062d\u06db", + 0x00000681: "\u062d\u0654", + 0x00000772: "\u062d\u0654", + 0x0000FCA9: "\u062d\u062c", + 0x0000FC17: "\u062d\u062c", + 0x0000FDBF: "\u062d\u062c\u0649", + 0x0000FCAA: "\u062d\u0645", + 0x0000FC18: "\u062d\u0645", + 0x0000FD5B: "\u062d\u0645\u0649", + 0x0000FD5A: "\u062d\u0645\u0649", + 0x0000FD1B: "\u062d\u0649", + 0x0000FCFF: "\u062d\u0649", + 0x0000FD1C: "\u062d\u0649", + 0x0000FD00: "\u062d\u0649", + 0x0001EE17: "\u062e", + 0x0001EE37: "\u062e", + 0x0001EE57: "\u062e", + 0x0001EE77: "\u062e", + 0x0001EE97: "\u062e", + 0x0001EEB7: "\u062e", + 0x0000FEA7: "\u062e", + 0x0000FEA8: "\u062e", + 0x0000FEA6: "\u062e", + 0x0000FEA5: "\u062e", + 0x0000FCAB: "\u062e\u062c", + 0x0000FC19: "\u062e\u062c", + 0x0000FC1A: "\u062e\u062d", + 0x0000FCAC: "\u062e\u0645", + 0x0000FC1B: "\u062e\u0645", + 0x0000FD1F: "\u062e\u0649", + 0x0000FD03: "\u062e\u0649", + 0x0000FD20: "\u062e\u0649", + 0x0000FD04: "\u062e\u0649", + 0x000102E1: "\u062f", + 0x0001EE03: "\u062f", + 0x0001EE83: "\u062f", + 0x0001EEA3: "\u062f", + 0x0000FEAA: "\u062f", + 0x0000FEA9: "\u062f", + 0x00000688: "\u062f\u0615", + 0x0000FB89: "\u062f\u0615", + 0x0000FB88: "\u062f\u0615", + 0x0000068E: "\u062f\u06db", + 0x0000FB87: "\u062f\u06db", + 0x0000FB86: "\u062f\u06db", + 0x000006EE: "\u062f\u0302", + 0x000008AE: "\u062f\u0324\u0323", + 0x0001EE18: "\u0630", + 0x0001EE98: "\u0630", + 0x0001EEB8: "\u0630", + 0x0000FEAC: "\u0630", + 0x0000FEAB: "\u0630", + 0x0000FC5B: "\u0630\u0670", + 0x0000068B: "\u068a\u0615", + 0x0000FB85: "\u068c", + 0x0000FB84: "\u068c", + 0x0000FB83: "\u068d", + 0x0000FB82: "\u068d", + 0x0001EE13: "\u0631", + 0x0001EE93: "\u0631", + 0x0001EEB3: "\u0631", + 0x0000FEAE: "\u0631", + 0x0000FEAD: "\u0631", + 0x00000691: "\u0631\u0615", + 0x0000FB8D: "\u0631\u0615", + 0x0000FB8C: "\u0631\u0615", + 0x00000698: "\u0631\u06db", + 0x0000FB8B: "\u0631\u06db", + 0x0000FB8A: "\u0631\u06db", + 0x00000692: "\u0631\u0306", + 0x000008B9: "\u0631\u0306\u0307", + 0x000006EF: "\u0631\u0302", + 0x0000076C: "\u0631\u0654", + 0x0000FC5C: "\u0631\u0670", + 0x0000FDF6: "\u0631\u0633\u0648\u0644", + 0x0000FDFC: "\u0631\u0649l\u0644", + 0x0001EE06: "\u0632", + 0x0001EE86: "\u0632", + 0x0001EEA6: "\u0632", + 0x0000FEB0: "\u0632", + 0x0000FEAF: "\u0632", + 0x000008B2: "\u0632\u0302", + 0x00000771: "\u0697\u0615", + 0x0001EE0E: "\u0633", + 0x0001EE2E: "\u0633", + 0x0001EE4E: "\u0633", + 0x0001EE6E: "\u0633", + 0x0001EE8E: "\u0633", + 0x0001EEAE: "\u0633", + 0x0000FEB3: "\u0633", + 0x0000FEB4: "\u0633", + 0x0000FEB2: "\u0633", + 0x0000FEB1: "\u0633", + 0x00000634: "\u0633\u06db", + 0x0001EE14: "\u0633\u06db", + 0x0001EE34: "\u0633\u06db", + 0x0001EE54: "\u0633\u06db", + 0x0001EE74: "\u0633\u06db", + 0x0001EE94: "\u0633\u06db", + 0x0001EEB4: "\u0633\u06db", + 0x0000FEB7: "\u0633\u06db", + 0x0000FEB8: "\u0633\u06db", + 0x0000FEB6: "\u0633\u06db", + 0x0000FEB5: "\u0633\u06db", + 0x0000077E: "\u0633\u0302", + 0x0000FD31: "\u0633o", + 0x0000FCE8: "\u0633o", + 0x0000FD32: "\u0633\u06dbo", + 0x0000FCEA: "\u0633\u06dbo", + 0x0000FCAD: "\u0633\u062c", + 0x0000FD34: "\u0633\u062c", + 0x0000FC1C: "\u0633\u062c", + 0x0000FD2D: "\u0633\u06db\u062c", + 0x0000FD37: "\u0633\u06db\u062c", + 0x0000FD25: "\u0633\u06db\u062c", + 0x0000FD09: "\u0633\u06db\u062c", + 0x0000FD5D: "\u0633\u062c\u062d", + 0x0000FD5E: "\u0633\u062c\u0649", + 0x0000FD69: "\u0633\u06db\u062c\u0649", + 0x0000FCAE: "\u0633\u062d", + 0x0000FD35: "\u0633\u062d", + 0x0000FC1D: "\u0633\u062d", + 0x0000FD2E: "\u0633\u06db\u062d", + 0x0000FD38: "\u0633\u06db\u062d", + 0x0000FD26: "\u0633\u06db\u062d", + 0x0000FD0A: "\u0633\u06db\u062d", + 0x0000FD5C: "\u0633\u062d\u062c", + 0x0000FD68: "\u0633\u06db\u062d\u0645", + 0x0000FD67: "\u0633\u06db\u062d\u0645", + 0x0000FDAA: "\u0633\u06db\u062d\u0649", + 0x0000FCAF: "\u0633\u062e", + 0x0000FD36: "\u0633\u062e", + 0x0000FC1E: "\u0633\u062e", + 0x0000FD2F: "\u0633\u06db\u062e", + 0x0000FD39: "\u0633\u06db\u062e", + 0x0000FD27: "\u0633\u06db\u062e", + 0x0000FD0B: "\u0633\u06db\u062e", + 0x0000FDA8: "\u0633\u062e\u0649", + 0x0000FDC6: "\u0633\u062e\u0649", + 0x0000FD2A: "\u0633\u0631", + 0x0000FD0E: "\u0633\u0631", + 0x0000FD29: "\u0633\u06db\u0631", + 0x0000FD0D: "\u0633\u06db\u0631", + 0x0000FCB0: "\u0633\u0645", + 0x0000FCE7: "\u0633\u0645", + 0x0000FC1F: "\u0633\u0645", + 0x0000FD30: "\u0633\u06db\u0645", + 0x0000FCE9: "\u0633\u06db\u0645", + 0x0000FD28: "\u0633\u06db\u0645", + 0x0000FD0C: "\u0633\u06db\u0645", + 0x0000FD61: "\u0633\u0645\u062c", + 0x0000FD60: "\u0633\u0645\u062d", + 0x0000FD5F: "\u0633\u0645\u062d", + 0x0000FD6B: "\u0633\u06db\u0645\u062e", + 0x0000FD6A: "\u0633\u06db\u0645\u062e", + 0x0000FD63: "\u0633\u0645\u0645", + 0x0000FD62: "\u0633\u0645\u0645", + 0x0000FD6D: "\u0633\u06db\u0645\u0645", + 0x0000FD6C: "\u0633\u06db\u0645\u0645", + 0x0000FD17: "\u0633\u0649", + 0x0000FCFB: "\u0633\u0649", + 0x0000FD18: "\u0633\u0649", + 0x0000FCFC: "\u0633\u0649", + 0x0000FD19: "\u0633\u06db\u0649", + 0x0000FCFD: "\u0633\u06db\u0649", + 0x0000FD1A: "\u0633\u06db\u0649", + 0x0000FCFE: "\u0633\u06db\u0649", + 0x000102F2: "\u0635", + 0x0001EE11: "\u0635", + 0x0001EE31: "\u0635", + 0x0001EE51: "\u0635", + 0x0001EE71: "\u0635", + 0x0001EE91: "\u0635", + 0x0001EEB1: "\u0635", + 0x0000FEBB: "\u0635", + 0x0000FEBC: "\u0635", + 0x0000FEBA: "\u0635", + 0x0000FEB9: "\u0635", + 0x0000069E: "\u0635\u06db", + 0x000008AF: "\u0635\u0324\u0323", + 0x0000FCB1: "\u0635\u062d", + 0x0000FC20: "\u0635\u062d", + 0x0000FD65: "\u0635\u062d\u062d", + 0x0000FD64: "\u0635\u062d\u062d", + 0x0000FDA9: "\u0635\u062d\u0649", + 0x0000FCB2: "\u0635\u062e", + 0x0000FD2B: "\u0635\u0631", + 0x0000FD0F: "\u0635\u0631", + 0x0000FDF5: "\u0635\u0644\u0639\u0645", + 0x0000FDF9: "\u0635\u0644\u0649", + 0x0000FDF0: "\u0635\u0644\u0649", + 0x0000FDFA: "\u0635\u0644\u0649 l\u0644\u0644o \u0639\u0644\u0649o \u0648\u0633\u0644\u0645", + 0x0000FCB3: "\u0635\u0645", + 0x0000FC21: "\u0635\u0645", + 0x0000FDC5: "\u0635\u0645\u0645", + 0x0000FD66: "\u0635\u0645\u0645", + 0x0000FD21: "\u0635\u0649", + 0x0000FD05: "\u0635\u0649", + 0x0000FD22: "\u0635\u0649", + 0x0000FD06: "\u0635\u0649", + 0x0001EE19: "\u0636", + 0x0001EE39: "\u0636", + 0x0001EE59: "\u0636", + 0x0001EE79: "\u0636", + 0x0001EE99: "\u0636", + 0x0001EEB9: "\u0636", + 0x0000FEBF: "\u0636", + 0x0000FEC0: "\u0636", + 0x0000FEBE: "\u0636", + 0x0000FEBD: "\u0636", + 0x0000FCB4: "\u0636\u062c", + 0x0000FC22: "\u0636\u062c", + 0x0000FCB5: "\u0636\u062d", + 0x0000FC23: "\u0636\u062d", + 0x0000FD6E: "\u0636\u062d\u0649", + 0x0000FDAB: "\u0636\u062d\u0649", + 0x0000FCB6: "\u0636\u062e", + 0x0000FC24: "\u0636\u062e", + 0x0000FD70: "\u0636\u062e\u0645", + 0x0000FD6F: "\u0636\u062e\u0645", + 0x0000FD2C: "\u0636\u0631", + 0x0000FD10: "\u0636\u0631", + 0x0000FCB7: "\u0636\u0645", + 0x0000FC25: "\u0636\u0645", + 0x0000FD23: "\u0636\u0649", + 0x0000FD07: "\u0636\u0649", + 0x0000FD24: "\u0636\u0649", + 0x0000FD08: "\u0636\u0649", + 0x000102E8: "\u0637", + 0x0001EE08: "\u0637", + 0x0001EE68: "\u0637", + 0x0001EE88: "\u0637", + 0x0001EEA8: "\u0637", + 0x0000FEC3: "\u0637", + 0x0000FEC4: "\u0637", + 0x0000FEC2: "\u0637", + 0x0000FEC1: "\u0637", + 0x0000069F: "\u0637\u06db", + 0x0000FCB8: "\u0637\u062d", + 0x0000FC26: "\u0637\u062d", + 0x0000FD33: "\u0637\u0645", + 0x0000FD3A: "\u0637\u0645", + 0x0000FC27: "\u0637\u0645", + 0x0000FD72: "\u0637\u0645\u062d", + 0x0000FD71: "\u0637\u0645\u062d", + 0x0000FD73: "\u0637\u0645\u0645", + 0x0000FD74: "\u0637\u0645\u0649", + 0x0000FD11: "\u0637\u0649", + 0x0000FCF5: "\u0637\u0649", + 0x0000FD12: "\u0637\u0649", + 0x0000FCF6: "\u0637\u0649", + 0x0001EE1A: "\u0638", + 0x0001EE7A: "\u0638", + 0x0001EE9A: "\u0638", + 0x0001EEBA: "\u0638", + 0x0000FEC7: "\u0638", + 0x0000FEC8: "\u0638", + 0x0000FEC6: "\u0638", + 0x0000FEC5: "\u0638", + 0x0000FCB9: "\u0638\u0645", + 0x0000FD3B: "\u0638\u0645", + 0x0000FC28: "\u0638\u0645", + 0x0000060F: "\u0639", + 0x0001EE0F: "\u0639", + 0x0001EE2F: "\u0639", + 0x0001EE4F: "\u0639", + 0x0001EE6F: "\u0639", + 0x0001EE8F: "\u0639", + 0x0001EEAF: "\u0639", + 0x0000FECB: "\u0639", + 0x0000FECC: "\u0639", + 0x0000FECA: "\u0639", + 0x0000FEC9: "\u0639", + 0x0000FCBA: "\u0639\u062c", + 0x0000FC29: "\u0639\u062c", + 0x0000FDC4: "\u0639\u062c\u0645", + 0x0000FD75: "\u0639\u062c\u0645", + 0x0000FDF7: "\u0639\u0644\u0649o", + 0x0000FCBB: "\u0639\u0645", + 0x0000FC2A: "\u0639\u0645", + 0x0000FD77: "\u0639\u0645\u0645", + 0x0000FD76: "\u0639\u0645\u0645", + 0x0000FD78: "\u0639\u0645\u0649", + 0x0000FDB6: "\u0639\u0645\u0649", + 0x0000FD13: "\u0639\u0649", + 0x0000FCF7: "\u0639\u0649", + 0x0000FD14: "\u0639\u0649", + 0x0000FCF8: "\u0639\u0649", + 0x0001EE1B: "\u063a", + 0x0001EE3B: "\u063a", + 0x0001EE5B: "\u063a", + 0x0001EE7B: "\u063a", + 0x0001EE9B: "\u063a", + 0x0001EEBB: "\u063a", + 0x0000FECF: "\u063a", + 0x0000FED0: "\u063a", + 0x0000FECE: "\u063a", + 0x0000FECD: "\u063a", + 0x0000FCBC: "\u063a\u062c", + 0x0000FC2B: "\u063a\u062c", + 0x0000FCBD: "\u063a\u0645", + 0x0000FC2C: "\u063a\u0645", + 0x0000FD79: "\u063a\u0645\u0645", + 0x0000FD7B: "\u063a\u0645\u0649", + 0x0000FD7A: "\u063a\u0645\u0649", + 0x0000FD15: "\u063a\u0649", + 0x0000FCF9: "\u063a\u0649", + 0x0000FD16: "\u063a\u0649", + 0x0000FCFA: "\u063a\u0649", + 0x0001EE10: "\u0641", + 0x0001EE30: "\u0641", + 0x0001EE70: "\u0641", + 0x0001EE90: "\u0641", + 0x0001EEB0: "\u0641", + 0x0000FED3: "\u0641", + 0x0000FED4: "\u0641", + 0x0000FED2: "\u0641", + 0x0000FED1: "\u0641", + 0x000006A7: "\u0641", + 0x0000FCBE: "\u0641\u062c", + 0x0000FC2D: "\u0641\u062c", + 0x0000FCBF: "\u0641\u062d", + 0x0000FC2E: "\u0641\u062d", + 0x0000FCC0: "\u0641\u062e", + 0x0000FC2F: "\u0641\u062e", + 0x0000FD7D: "\u0641\u062e\u0645", + 0x0000FD7C: "\u0641\u062e\u0645", + 0x0000FCC1: "\u0641\u0645", + 0x0000FC30: "\u0641\u0645", + 0x0000FDC1: "\u0641\u0645\u0649", + 0x0000FC7C: "\u0641\u0649", + 0x0000FC31: "\u0641\u0649", + 0x0000FC7D: "\u0641\u0649", + 0x0000FC32: "\u0641\u0649", + 0x0001EE1E: "\u06a1", + 0x0001EE7E: "\u06a1", + 0x000008BB: "\u06a1", + 0x0000066F: "\u06a1", + 0x0001EE1F: "\u06a1", + 0x0001EE5F: "\u06a1", + 0x000008BC: "\u06a1", + 0x000006A4: "\u06a1\u06db", + 0x0000FB6C: "\u06a1\u06db", + 0x0000FB6D: "\u06a1\u06db", + 0x0000FB6B: "\u06a1\u06db", + 0x0000FB6A: "\u06a1\u06db", + 0x000006A8: "\u06a1\u06db", + 0x000008A4: "\u06a2\u06db", + 0x0000FB70: "\u06a6", + 0x0000FB71: "\u06a6", + 0x0000FB6F: "\u06a6", + 0x0000FB6E: "\u06a6", + 0x0001EE12: "\u0642", + 0x0001EE32: "\u0642", + 0x0001EE52: "\u0642", + 0x0001EE72: "\u0642", + 0x0001EE92: "\u0642", + 0x0001EEB2: "\u0642", + 0x0000FED7: "\u0642", + 0x0000FED8: "\u0642", + 0x0000FED6: "\u0642", + 0x0000FED5: "\u0642", + 0x0000FCC2: "\u0642\u062d", + 0x0000FC33: "\u0642\u062d", + 0x0000FDF1: "\u0642\u0644\u0649", + 0x0000FCC3: "\u0642\u0645", + 0x0000FC34: "\u0642\u0645", + 0x0000FDB4: "\u0642\u0645\u062d", + 0x0000FD7E: "\u0642\u0645\u062d", + 0x0000FD7F: "\u0642\u0645\u0645", + 0x0000FDB2: "\u0642\u0645\u0649", + 0x0000FC7E: "\u0642\u0649", + 0x0000FC35: "\u0642\u0649", + 0x0000FC7F: "\u0642\u0649", + 0x0000FC36: "\u0642\u0649", + 0x0001EE0A: "\u0643", + 0x0001EE2A: "\u0643", + 0x0001EE6A: "\u0643", + 0x0000FEDB: "\u0643", + 0x0000FEDC: "\u0643", + 0x0000FEDA: "\u0643", + 0x0000FED9: "\u0643", + 0x000006A9: "\u0643", + 0x0000FB90: "\u0643", + 0x0000FB91: "\u0643", + 0x0000FB8F: "\u0643", + 0x0000FB8E: "\u0643", + 0x000006AA: "\u0643", + 0x000006AD: "\u0643\u06db", + 0x0000FBD5: "\u0643\u06db", + 0x0000FBD6: "\u0643\u06db", + 0x0000FBD4: "\u0643\u06db", + 0x0000FBD3: "\u0643\u06db", + 0x00000763: "\u0643\u06db", + 0x0000FC80: "\u0643l", + 0x0000FC37: "\u0643l", + 0x0000FCC4: "\u0643\u062c", + 0x0000FC38: "\u0643\u062c", + 0x0000FCC5: "\u0643\u062d", + 0x0000FC39: "\u0643\u062d", + 0x0000FCC6: "\u0643\u062e", + 0x0000FC3A: "\u0643\u062e", + 0x0000FCC7: "\u0643\u0644", + 0x0000FCEB: "\u0643\u0644", + 0x0000FC81: "\u0643\u0644", + 0x0000FC3B: "\u0643\u0644", + 0x0000FCC8: "\u0643\u0645", + 0x0000FCEC: "\u0643\u0645", + 0x0000FC82: "\u0643\u0645", + 0x0000FC3C: "\u0643\u0645", + 0x0000FDC3: "\u0643\u0645\u0645", + 0x0000FDBB: "\u0643\u0645\u0645", + 0x0000FDB7: "\u0643\u0645\u0649", + 0x0000FC83: "\u0643\u0649", + 0x0000FC3D: "\u0643\u0649", + 0x0000FC84: "\u0643\u0649", + 0x0000FC3E: "\u0643\u0649", + 0x00000762: "\u06ac", + 0x0000FB94: "\u06af", + 0x0000FB95: "\u06af", + 0x0000FB93: "\u06af", + 0x0000FB92: "\u06af", + 0x000008B0: "\u06af", + 0x000006B4: "\u06af\u06db", + 0x0000FB9C: "\u06b1", + 0x0000FB9D: "\u06b1", + 0x0000FB9B: "\u06b1", + 0x0000FB9A: "\u06b1", + 0x0000FB98: "\u06b3", + 0x0000FB99: "\u06b3", + 0x0000FB97: "\u06b3", + 0x0000FB96: "\u06b3", + 0x0001EE0B: "\u0644", + 0x0001EE2B: "\u0644", + 0x0001EE4B: "\u0644", + 0x0001EE8B: "\u0644", + 0x0001EEAB: "\u0644", + 0x0000FEDF: "\u0644", + 0x0000FEE0: "\u0644", + 0x0000FEDE: "\u0644", + 0x0000FEDD: "\u0644", + 0x000006B7: "\u0644\u06db", + 0x000006B5: "\u0644\u0306", + 0x0000FEFC: "\u0644l", + 0x0000FEFB: "\u0644l", + 0x0000FEFA: "\u0644l\u0655", + 0x0000FEF9: "\u0644l\u0655", + 0x0000FEF8: "\u0644l\u0674", + 0x0000FEF7: "\u0644l\u0674", + 0x0000FCCD: "\u0644o", + 0x0000FEF6: "\u0644\u0622", + 0x0000FEF5: "\u0644\u0622", + 0x0000FCC9: "\u0644\u062c", + 0x0000FC3F: "\u0644\u062c", + 0x0000FD83: "\u0644\u062c\u062c", + 0x0000FD84: "\u0644\u062c\u062c", + 0x0000FDBA: "\u0644\u062c\u0645", + 0x0000FDBC: "\u0644\u062c\u0645", + 0x0000FDAC: "\u0644\u062c\u0649", + 0x0000FCCA: "\u0644\u062d", + 0x0000FC40: "\u0644\u062d", + 0x0000FDB5: "\u0644\u062d\u0645", + 0x0000FD80: "\u0644\u062d\u0645", + 0x0000FD82: "\u0644\u062d\u0649", + 0x0000FD81: "\u0644\u062d\u0649", + 0x0000FCCB: "\u0644\u062e", + 0x0000FC41: "\u0644\u062e", + 0x0000FD86: "\u0644\u062e\u0645", + 0x0000FD85: "\u0644\u062e\u0645", + 0x0000FCCC: "\u0644\u0645", + 0x0000FCED: "\u0644\u0645", + 0x0000FC85: "\u0644\u0645", + 0x0000FC42: "\u0644\u0645", + 0x0000FD88: "\u0644\u0645\u062d", + 0x0000FD87: "\u0644\u0645\u062d", + 0x0000FDAD: "\u0644\u0645\u0649", + 0x0000FC86: "\u0644\u0649", + 0x0000FC43: "\u0644\u0649", + 0x0000FC87: "\u0644\u0649", + 0x0000FC44: "\u0644\u0649", + 0x0001EE0C: "\u0645", + 0x0001EE2C: "\u0645", + 0x0001EE6C: "\u0645", + 0x0001EE8C: "\u0645", + 0x0001EEAC: "\u0645", + 0x0000FEE3: "\u0645", + 0x0000FEE4: "\u0645", + 0x0000FEE2: "\u0645", + 0x0000FEE1: "\u0645", + 0x000008A7: "\u0645\u06db", + 0x000006FE: "\u0645\u0348", + 0x0000FC88: "\u0645l", + 0x0000FCCE: "\u0645\u062c", + 0x0000FC45: "\u0645\u062c", + 0x0000FD8C: "\u0645\u062c\u062d", + 0x0000FD92: "\u0645\u062c\u062e", + 0x0000FD8D: "\u0645\u062c\u0645", + 0x0000FDC0: "\u0645\u062c\u0649", + 0x0000FCCF: "\u0645\u062d", + 0x0000FC46: "\u0645\u062d", + 0x0000FD89: "\u0645\u062d\u062c", + 0x0000FD8A: "\u0645\u062d\u0645", + 0x0000FDF4: "\u0645\u062d\u0645\u062f", + 0x0000FD8B: "\u0645\u062d\u0649", + 0x0000FCD0: "\u0645\u062e", + 0x0000FC47: "\u0645\u062e", + 0x0000FD8E: "\u0645\u062e\u062c", + 0x0000FD8F: "\u0645\u062e\u0645", + 0x0000FDB9: "\u0645\u062e\u0649", + 0x0000FCD1: "\u0645\u0645", + 0x0000FC89: "\u0645\u0645", + 0x0000FC48: "\u0645\u0645", + 0x0000FDB1: "\u0645\u0645\u0649", + 0x0000FC49: "\u0645\u0649", + 0x0000FC4A: "\u0645\u0649", + 0x0001EE0D: "\u0646", + 0x0001EE2D: "\u0646", + 0x0001EE4D: "\u0646", + 0x0001EE6D: "\u0646", + 0x0001EE8D: "\u0646", + 0x0001EEAD: "\u0646", + 0x0000FEE7: "\u0646", + 0x0000FEE8: "\u0646", + 0x0000FEE6: "\u0646", + 0x0000FEE5: "\u0646", + 0x00000768: "\u0646\u0615", + 0x00000769: "\u0646\u0306", + 0x0000FCD6: "\u0646o", + 0x0000FCEF: "\u0646o", + 0x0000FDB8: "\u0646\u062c\u062d", + 0x0000FDBD: "\u0646\u062c\u062d", + 0x0000FD98: "\u0646\u062c\u0645", + 0x0000FD97: "\u0646\u062c\u0645", + 0x0000FD99: "\u0646\u062c\u0649", + 0x0000FDC7: "\u0646\u062c\u0649", + 0x0000FCD3: "\u0646\u062d", + 0x0000FC4C: "\u0646\u062d", + 0x0000FD95: "\u0646\u062d\u0645", + 0x0000FD96: "\u0646\u062d\u0649", + 0x0000FDB3: "\u0646\u062d\u0649", + 0x0000FCD4: "\u0646\u062e", + 0x0000FC4D: "\u0646\u062e", + 0x0000FC8A: "\u0646\u0631", + 0x0000FC8B: "\u0646\u0632", + 0x0000FCD5: "\u0646\u0645", + 0x0000FCEE: "\u0646\u0645", + 0x0000FC8C: "\u0646\u0645", + 0x0000FC4E: "\u0646\u0645", + 0x0000FD9B: "\u0646\u0645\u0649", + 0x0000FD9A: "\u0646\u0645\u0649", + 0x0000FC8D: "\u0646\u0646", + 0x0000FC8E: "\u0646\u0649", + 0x0000FC4F: "\u0646\u0649", + 0x0000FC8F: "\u0646\u0649", + 0x0000FC50: "\u0646\u0649", + 0x000006C2: "\u06c0", + 0x0000FBA5: "\u06c0", + 0x0000FBA4: "\u06c0", + 0x000102E4: "\u0648", + 0x0001EE05: "\u0648", + 0x0001EE85: "\u0648", + 0x0001EEA5: "\u0648", + 0x0000FEEE: "\u0648", + 0x0000FEED: "\u0648", + 0x000008B1: "\u0648", + 0x000006CB: "\u0648\u06db", + 0x0000FBDF: "\u0648\u06db", + 0x0000FBDE: "\u0648\u06db", + 0x000006C7: "\u0648\u0313", + 0x0000FBD8: "\u0648\u0313", + 0x0000FBD7: "\u0648\u0313", + 0x000006C6: "\u0648\u0306", + 0x0000FBDA: "\u0648\u0306", + 0x0000FBD9: "\u0648\u0306", + 0x000006C9: "\u0648\u0302", + 0x0000FBE3: "\u0648\u0302", + 0x0000FBE2: "\u0648\u0302", + 0x000006C8: "\u0648\u0670", + 0x0000FBDC: "\u0648\u0670", + 0x0000FBDB: "\u0648\u0670", + 0x00000624: "\u0648\u0674", + 0x0000FE86: "\u0648\u0674", + 0x0000FE85: "\u0648\u0674", + 0x00000676: "\u0648\u0674", + 0x00000677: "\u0648\u0313\u0674", + 0x0000FBDD: "\u0648\u0313\u0674", + 0x0000FDF8: "\u0648\u0633\u0644\u0645", + 0x0000FBE1: "\u06c5", + 0x0000FBE0: "\u06c5", + 0x0000066E: "\u0649", + 0x0001EE1C: "\u0649", + 0x0001EE7C: "\u0649", + 0x000006BA: "\u0649", + 0x0001EE1D: "\u0649", + 0x0001EE5D: "\u0649", + 0x0000FB9F: "\u0649", + 0x0000FB9E: "\u0649", + 0x000008BD: "\u0649", + 0x0000FBE8: "\u0649", + 0x0000FBE9: "\u0649", + 0x0000FEF0: "\u0649", + 0x0000FEEF: "\u0649", + 0x0000064A: "\u0649", + 0x0001EE09: "\u0649", + 0x0001EE29: "\u0649", + 0x0001EE49: "\u0649", + 0x0001EE69: "\u0649", + 0x0001EE89: "\u0649", + 0x0001EEA9: "\u0649", + 0x0000FEF3: "\u0649", + 0x0000FEF4: "\u0649", + 0x0000FEF2: "\u0649", + 0x0000FEF1: "\u0649", + 0x000006CC: "\u0649", + 0x0000FBFE: "\u0649", + 0x0000FBFF: "\u0649", + 0x0000FBFD: "\u0649", + 0x0000FBFC: "\u0649", + 0x000006D2: "\u0649", + 0x0000FBAF: "\u0649", + 0x0000FBAE: "\u0649", + 0x00000679: "\u0649\u0615", + 0x0000FB68: "\u0649\u0615", + 0x0000FB69: "\u0649\u0615", + 0x0000FB67: "\u0649\u0615", + 0x0000FB66: "\u0649\u0615", + 0x000006BB: "\u0649\u0615", + 0x0000FBA2: "\u0649\u0615", + 0x0000FBA3: "\u0649\u0615", + 0x0000FBA1: "\u0649\u0615", + 0x0000FBA0: "\u0649\u0615", + 0x0000067E: "\u0649\u06db", + 0x0000FB58: "\u0649\u06db", + 0x0000FB59: "\u0649\u06db", + 0x0000FB57: "\u0649\u06db", + 0x0000FB56: "\u0649\u06db", + 0x0000062B: "\u0649\u06db", + 0x0001EE16: "\u0649\u06db", + 0x0001EE36: "\u0649\u06db", + 0x0001EE76: "\u0649\u06db", + 0x0001EE96: "\u0649\u06db", + 0x0001EEB6: "\u0649\u06db", + 0x0000FE9B: "\u0649\u06db", + 0x0000FE9C: "\u0649\u06db", + 0x0000FE9A: "\u0649\u06db", + 0x0000FE99: "\u0649\u06db", + 0x000006BD: "\u0649\u06db", + 0x000006D1: "\u0649\u06db", + 0x0000063F: "\u0649\u06db", + 0x000008B7: "\u0649\u06db\u06e2", + 0x00000756: "\u0649\u0306", + 0x000006CE: "\u0649\u0306", + 0x000008BA: "\u0649\u0306\u0307", + 0x0000063D: "\u0649\u0302", + 0x000008A8: "\u0649\u0654", + 0x0000FC90: "\u0649\u0670", + 0x0000FC5D: "\u0649\u0670", + 0x0000FCDE: "\u0649o", + 0x0000FCF1: "\u0649o", + 0x0000FCE6: "\u0649\u06dbo", + 0x00000626: "\u0649\u0674", + 0x0000FE8B: "\u0649\u0674", + 0x0000FE8C: "\u0649\u0674", + 0x0000FE8A: "\u0649\u0674", + 0x0000FE89: "\u0649\u0674", + 0x00000678: "\u0649\u0674", + 0x0000FBEB: "\u0649\u0674l", + 0x0000FBEA: "\u0649\u0674l", + 0x0000FC9B: "\u0649\u0674o", + 0x0000FCE0: "\u0649\u0674o", + 0x0000FBED: "\u0649\u0674o", + 0x0000FBEC: "\u0649\u0674o", + 0x0000FBF8: "\u0649\u0674\u067b", + 0x0000FBF7: "\u0649\u0674\u067b", + 0x0000FBF6: "\u0649\u0674\u067b", + 0x0000FC97: "\u0649\u0674\u062c", + 0x0000FC00: "\u0649\u0674\u062c", + 0x0000FC98: "\u0649\u0674\u062d", + 0x0000FC01: "\u0649\u0674\u062d", + 0x0000FC99: "\u0649\u0674\u062e", + 0x0000FC64: "\u0649\u0674\u0631", + 0x0000FC65: "\u0649\u0674\u0632", + 0x0000FC9A: "\u0649\u0674\u0645", + 0x0000FCDF: "\u0649\u0674\u0645", + 0x0000FC66: "\u0649\u0674\u0645", + 0x0000FC02: "\u0649\u0674\u0645", + 0x0000FC67: "\u0649\u0674\u0646", + 0x0000FBEF: "\u0649\u0674\u0648", + 0x0000FBEE: "\u0649\u0674\u0648", + 0x0000FBF1: "\u0649\u0674\u0648\u0313", + 0x0000FBF0: "\u0649\u0674\u0648\u0313", + 0x0000FBF3: "\u0649\u0674\u0648\u0306", + 0x0000FBF2: "\u0649\u0674\u0648\u0306", + 0x0000FBF5: "\u0649\u0674\u0648\u0670", + 0x0000FBF4: "\u0649\u0674\u0648\u0670", + 0x0000FBFB: "\u0649\u0674\u0649", + 0x0000FBFA: "\u0649\u0674\u0649", + 0x0000FC68: "\u0649\u0674\u0649", + 0x0000FBF9: "\u0649\u0674\u0649", + 0x0000FC03: "\u0649\u0674\u0649", + 0x0000FC69: "\u0649\u0674\u0649", + 0x0000FC04: "\u0649\u0674\u0649", + 0x0000FCDA: "\u0649\u062c", + 0x0000FC55: "\u0649\u062c", + 0x0000FC11: "\u0649\u06db\u062c", + 0x0000FDAF: "\u0649\u062c\u0649", + 0x0000FCDB: "\u0649\u062d", + 0x0000FC56: "\u0649\u062d", + 0x0000FDAE: "\u0649\u062d\u0649", + 0x0000FCDC: "\u0649\u062e", + 0x0000FC57: "\u0649\u062e", + 0x0000FC91: "\u0649\u0631", + 0x0000FC76: "\u0649\u06db\u0631", + 0x0000FC92: "\u0649\u0632", + 0x0000FC77: "\u0649\u06db\u0632", + 0x0000FCDD: "\u0649\u0645", + 0x0000FCF0: "\u0649\u0645", + 0x0000FC93: "\u0649\u0645", + 0x0000FC58: "\u0649\u0645", + 0x0000FCA6: "\u0649\u06db\u0645", + 0x0000FCE5: "\u0649\u06db\u0645", + 0x0000FC78: "\u0649\u06db\u0645", + 0x0000FC12: "\u0649\u06db\u0645", + 0x0000FD9D: "\u0649\u0645\u0645", + 0x0000FD9C: "\u0649\u0645\u0645", + 0x0000FDB0: "\u0649\u0645\u0649", + 0x0000FC94: "\u0649\u0646", + 0x0000FC79: "\u0649\u06db\u0646", + 0x0000FC95: "\u0649\u0649", + 0x0000FC59: "\u0649\u0649", + 0x0000FC96: "\u0649\u0649", + 0x0000FC5A: "\u0649\u0649", + 0x0000FC7A: "\u0649\u06db\u0649", + 0x0000FC13: "\u0649\u06db\u0649", + 0x0000FC7B: "\u0649\u06db\u0649", + 0x0000FC14: "\u0649\u06db\u0649", + 0x0000FBB1: "\u06d3", + 0x0000FBB0: "\u06d3", + 0x000102B8: "\u2d40", + 0x0000205E: "\u2d42", + 0x00002E3D: "\u2d42", + 0x00002999: "\u2d42", + 0x0000FE19: "\u2d57", + 0x0000205D: "\u2d57", + 0x000022EE: "\u2d57", + 0x00000544: "\u1206", + 0x0000054C: "\u1261", + 0x0000053B: "\u12ae", + 0x0000054A: "\u1323", + 0x00000906: "\u0905\u093e", + 0x00000912: "\u0905\u093e\u0946", + 0x00000913: "\u0905\u093e\u0947", + 0x00000914: "\u0905\u093e\u0948", + 0x00000904: "\u0905\u0946", + 0x00000911: "\u0905\u0949", + 0x0000090D: "\u090f\u0945", + 0x0000090E: "\u090f\u0946", + 0x00000910: "\u090f\u0947", + 0x00000908: "\u0930\u094d\u0907", + 0x00000ABD: "\u093d", + 0x000111DC: "\ua8fb", + 0x000111CB: "\u093a", + 0x00000AC1: "\u0941", + 0x00000AC2: "\u0942", + 0x00000A4B: "\u0946", + 0x00000A4D: "\u094d", + 0x00000ACD: "\u094d", + 0x00000986: "\u0985\u09be", + 0x000009E0: "\u098b\u09c3", + 0x000009E1: "\u098b\u09c3", + 0x00011492: "\u0998", + 0x00011494: "\u099a", + 0x00011496: "\u099c", + 0x00011498: "\u099e", + 0x00011499: "\u099f", + 0x0001149B: "\u09a1", + 0x000114AA: "\u09a3", + 0x0001149E: "\u09a4", + 0x0001149F: "\u09a5", + 0x000114A0: "\u09a6", + 0x000114A1: "\u09a7", + 0x000114A2: "\u09a8", + 0x000114A3: "\u09aa", + 0x000114A9: "\u09ac", + 0x000114A7: "\u09ae", + 0x000114A8: "\u09af", + 0x000114AB: "\u09b0", + 0x0001149D: "\u09b2", + 0x000114AD: "\u09b7", + 0x000114AE: "\u09b8", + 0x000114C4: "\u09bd", + 0x000114B0: "\u09be", + 0x000114B1: "\u09bf", + 0x000114B9: "\u09c7", + 0x000114BC: "\u09cb", + 0x000114BE: "\u09cc", + 0x000114C2: "\u09cd", + 0x000114BD: "\u09d7", + 0x00000A09: "\u0a73\u0a41", + 0x00000A0A: "\u0a73\u0a42", + 0x00000A06: "\u0a05\u0a3e", + 0x00000A10: "\u0a05\u0a48", + 0x00000A14: "\u0a05\u0a4c", + 0x00000A07: "\u0a72\u0a3f", + 0x00000A08: "\u0a72\u0a40", + 0x00000A0F: "\u0a72\u0a47", + 0x00000A86: "\u0a85\u0abe", + 0x00000A91: "\u0a85\u0abe\u0ac5", + 0x00000A93: "\u0a85\u0abe\u0ac7", + 0x00000A94: "\u0a85\u0abe\u0ac8", + 0x00000A8D: "\u0a85\u0ac5", + 0x00000A8F: "\u0a85\u0ac7", + 0x00000A90: "\u0a85\u0ac8", + 0x00000B06: "\u0b05\u0b3e", + 0x00000BEE: "\u0b85", + 0x00000BB0: "\u0b88", + 0x00000BBE: "\u0b88", + 0x00000BEB: "\u0b88\u0bc1", + 0x00000BE8: "\u0b89", + 0x00000D09: "\u0b89", + 0x00000B8A: "\u0b89\u0bb3", + 0x00000D0A: "\u0b89\u0d57", + 0x00000BED: "\u0b8e", + 0x00000BF7: "\u0b8e\u0bb5", + 0x00000B9C: "\u0b90", + 0x00000D1C: "\u0b90", + 0x00000BE7: "\u0b95", + 0x00000BEA: "\u0b9a", + 0x00000BEC: "\u0b9a\u0bc1", + 0x00000BF2: "\u0b9a\u0bc2", + 0x00000D3A: "\u0b9f\u0bbf", + 0x00000D23: "\u0ba3", + 0x00000BFA: "\u0ba8\u0bc0", + 0x00000BF4: "\u0bae\u0bc0", + 0x00000BF0: "\u0baf", + 0x00000D34: "\u0bb4", + 0x00000BD7: "\u0bb3", + 0x00000BC8: "\u0ba9", + 0x00000D36: "\u0bb6", + 0x00000BF8: "\u0bb7", + 0x00000D3F: "\u0bbf", + 0x00000D40: "\u0bbf", + 0x00000BCA: "\u0bc6\u0b88", + 0x00000BCC: "\u0bc6\u0bb3", + 0x00000BCB: "\u0bc7\u0b88", + 0x00000C85: "\u0c05", + 0x00000C86: "\u0c06", + 0x00000C87: "\u0c07", + 0x00000C60: "\u0c0b\u0c3e", + 0x00000C61: "\u0c0c\u0c3e", + 0x00000C92: "\u0c12", + 0x00000C14: "\u0c12\u0c4c", + 0x00000C94: "\u0c12\u0c4c", + 0x00000C13: "\u0c12\u0c55", + 0x00000C93: "\u0c12\u0c55", + 0x00000C9C: "\u0c1c", + 0x00000C9E: "\u0c1e", + 0x00000C22: "\u0c21\u0323", + 0x00000CA3: "\u0c23", + 0x00000C25: "\u0c27\u05bc", + 0x00000C2D: "\u0c2c\u0323", + 0x00000CAF: "\u0c2f", + 0x00000C20: "\u0c30\u05bc", + 0x00000CB1: "\u0c31", + 0x00000CB2: "\u0c32", + 0x00000C37: "\u0c35\u0323", + 0x00000C39: "\u0c35\u0c3e", + 0x00000C2E: "\u0c35\u0c41", + 0x00000C42: "\u0c41\u0c3e", + 0x00000C44: "\u0c43\u0c3e", + 0x00000CE1: "\u0c8c\u0cbe", + 0x00000D08: "\u0d07\u0d57", + 0x00000D10: "\u0d0e\u0d46", + 0x00000D13: "\u0d12\u0d3e", + 0x00000D14: "\u0d12\u0d57", + 0x00000D61: "\u0d1e", + 0x00000D6B: "\u0d26\u0d4d\u0d30", + 0x00000D79: "\u0d28\u0d41", + 0x00000D0C: "\u0d28\u0d41", + 0x00000D19: "\u0d28\u0d41", + 0x00000D6F: "\u0d28\u0d4d", + 0x00000D7B: "\u0d28\u0d4d", + 0x00000D6C: "\u0d28\u0d4d\u0d28", + 0x00000D5A: "\u0d28\u0d4d\u0d2e", + 0x00000D31: "\u0d30", + 0x00000D6A: "\u0d30\u0d4d", + 0x00000D7C: "\u0d30\u0d4d", + 0x00000D6E: "\u0d35\u0d4d\u0d30", + 0x00000D76: "\u0d39\u0d4d\u0d2e", + 0x00000D42: "\u0d41", + 0x00000D43: "\u0d41", + 0x00000D48: "\u0d46\u0d46", + 0x00000DEA: "\u0da2", + 0x00000DEB: "\u0daf", + 0x00011413: "\U00011434\U00011442\U00011412", + 0x00011419: "\U00011434\U00011442\U00011418", + 0x00011424: "\U00011434\U00011442\U00011423", + 0x0001142A: "\U00011434\U00011442\U00011429", + 0x0001142D: "\U00011434\U00011442\U0001142c", + 0x0001142F: "\U00011434\U00011442\U0001142e", + 0x000115D8: "\U00011582", + 0x000115D9: "\U00011582", + 0x000115DA: "\U00011583", + 0x000115DB: "\U00011584", + 0x000115DC: "\U000115b2", + 0x000115DD: "\U000115b3", + 0x00000E03: "\u0e02", + 0x00000E14: "\u0e04", + 0x00000E15: "\u0e04", + 0x00000E21: "\u0e06", + 0x00000E88: "\u0e08", + 0x00000E0B: "\u0e0a", + 0x00000E0F: "\u0e0e", + 0x00000E17: "\u0e11", + 0x00000E9A: "\u0e1a", + 0x00000E9B: "\u0e1b", + 0x00000E9D: "\u0e1d", + 0x00000E9E: "\u0e1e", + 0x00000E9F: "\u0e1f", + 0x00000E26: "\u0e20", + 0x00000E8D: "\u0e22", + 0x000017D4: "\u0e2f", + 0x00000E45: "\u0e32", + 0x00000E33: "\u030a\u0e32", + 0x000017B7: "\u0e34", + 0x000017B8: "\u0e35", + 0x000017B9: "\u0e36", + 0x000017BA: "\u0e37", + 0x00000EB8: "\u0e38", + 0x00000EB9: "\u0e39", + 0x00000E41: "\u0e40\u0e40", + 0x00000EDC: "\u0eab\u0e99", + 0x00000EDD: "\u0eab\u0ea1", + 0x00000EB3: "\u030a\u0eb2", + 0x00000F02: "\u0f60\u0f74\u0f82\u0f7f", + 0x00000F03: "\u0f60\u0f74\u0f82\u0f14", + 0x00000F6A: "\u0f62", + 0x00000F00: "\u0f68\u0f7c\u0f7e", + 0x00000F77: "\u0fb2\u0f71\u0f80", + 0x00000F79: "\u0fb3\u0f71\u0f80", + 0x00011CB2: "\U00011caa", + 0x00001081: "\u1002\u103e", + 0x00001000: "\u1002\u102c", + 0x00001070: "\u1003\u103e", + 0x00001066: "\u1015\u103e", + 0x0000101F: "\u1015\u102c", + 0x0000106F: "\u1015\u102c\u103e", + 0x0000107E: "\u107d\u103e", + 0x00001029: "\u101e\u103c", + 0x0000102A: "\u101e\u103c\u1031\u102c\u103a", + 0x0000109E: "\u1083\u030a", + 0x000017A3: "\u17a2", + 0x000019D0: "\u199e", + 0x000019D1: "\u19b1", + 0x00001A80: "\u1a45", + 0x00001A90: "\u1a45", + 0x0000AA53: "\uaa01", + 0x0000AA56: "\uaa23", + 0x00001B52: "\u1b0d", + 0x00001B53: "\u1b11", + 0x00001B58: "\u1b28", + 0x0000A9A3: "\ua99d", + 0x00001896: "\u185c", + 0x00001855: "\u1835", + 0x00001FF6: "\u13ef", + 0x0000140D: "\u1401\u00b7", + 0x0000142B: "\u1401\u1420", + 0x00001411: "\u1404\u00b7", + 0x00001413: "\u1405\u00b7", + 0x0000142D: "\u1405\u1420", + 0x00001415: "\u1406\u00b7", + 0x00001418: "\u140a\u00b7", + 0x0000142E: "\u140a\u1420", + 0x0000141A: "\u140b\u00b7", + 0x000018DD: "\u141e\u18df", + 0x000014D1: "\u1421", + 0x00001540: "\u1429", + 0x0000143F: "\u1432\u00b7", + 0x00001443: "\u1434\u00b7", + 0x00002369: "\u1435", + 0x00001447: "\u1439\u00b7", + 0x0000145C: "\u144f\u00b7", + 0x00002E27: "\u1450", + 0x00002283: "\u1450", + 0x0000145E: "\u1450\u00b7", + 0x00001469: "\u1450'", + 0x000027C9: "\u1450/", + 0x00002AD7: "\u1450\u1455", + 0x00001460: "\u1451\u00b7", + 0x00002E26: "\u1455", + 0x00002282: "\u1455", + 0x00001462: "\u1455\u00b7", + 0x0000146A: "\u1455'", + 0x00001464: "\u1456\u00b7", + 0x00001475: "\u146b\u00b7", + 0x00001485: "\u146b'", + 0x00001479: "\u146e\u00b7", + 0x0000147D: "\u1470\u00b7", + 0x00001603: "\u1489", + 0x00001493: "\u1489\u00b7", + 0x00001495: "\u148b\u00b7", + 0x00001497: "\u148c\u00b7", + 0x0000149B: "\u148e\u00b7", + 0x00001602: "\u1490", + 0x0000149D: "\u1490\u00b7", + 0x0000149F: "\u1491\u00b7", + 0x000014AD: "\u14a3\u00b7", + 0x000014B1: "\u14a6\u00b7", + 0x000014B3: "\u14a7\u00b7", + 0x000014B5: "\u14a8\u00b7", + 0x000014B9: "\u14ab\u00b7", + 0x000014CA: "\u14c0\u00b7", + 0x000018C7: "\u14c2\u00b7", + 0x000018C9: "\u14c3\u00b7", + 0x000018CB: "\u14c4\u00b7", + 0x000018CD: "\u14c5\u00b7", + 0x000014CC: "\u14c7\u00b7", + 0x000014CE: "\u14c8\u00b7", + 0x00001604: "\u14d3", + 0x000014DD: "\u14d3\u00b7", + 0x000014DF: "\u14d5\u00b7", + 0x000014E1: "\u14d6\u00b7", + 0x000014E3: "\u14d7\u00b7", + 0x000014E5: "\u14d8\u00b7", + 0x00001607: "\u14da", + 0x000014E7: "\u14da\u00b7", + 0x000014E9: "\u14db\u00b7", + 0x000014F7: "\u14ed\u00b7", + 0x000014F9: "\u14ef\u00b7", + 0x000014FB: "\u14f0\u00b7", + 0x000014FD: "\u14f1\u00b7", + 0x000014FF: "\u14f2\u00b7", + 0x00001501: "\u14f4\u00b7", + 0x00001503: "\u14f5\u00b7", + 0x0000150C: "\u150b<", + 0x0000150E: "\u150bb", + 0x0000150D: "\u150b\u1455", + 0x0000150F: "\u150b\u1490", + 0x00001518: "\u1510\u00b7", + 0x0000151A: "\u1511\u00b7", + 0x0000151C: "\u1512\u00b7", + 0x0000151E: "\u1513\u00b7", + 0x00001520: "\u1514\u00b7", + 0x00001522: "\u1515\u00b7", + 0x00001524: "\u1516\u00b7", + 0x00001532: "\u1528\u00b7", + 0x00001534: "\u1529\u00b7", + 0x00001536: "\u152a\u00b7", + 0x00001538: "\u152b\u00b7", + 0x0000153A: "\u152d\u00b7", + 0x0000153C: "\u152e\u00b7", + 0x00001622: "\u1543", + 0x000018E0: "\u1543\u00b7", + 0x00001623: "\u1546", + 0x00001624: "\u154a", + 0x0000154F: "\u154c\u00b7", + 0x00001583: "\u1550b", + 0x00001584: "\u1550b\u0307", + 0x00001581: "\u1550d", + 0x0000157F: "\u1550P", + 0x0000166F: "\u1550\u146b", + 0x0000157E: "\u1550\u146c", + 0x00001580: "\u1550\u146e", + 0x00001582: "\u1550\u1470", + 0x00001585: "\u1550\u1483", + 0x0000155C: "\u155a\u00b7", + 0x000018E3: "\u155e\u00b7", + 0x000018E4: "\u1566\u00b7", + 0x00001569: "\u1567\u00b7", + 0x000018E5: "\u156b\u00b7", + 0x000018E8: "\u1586\u00b7", + 0x00001591: "\u1595J", + 0x00001670: "\u1595\u1489", + 0x0000158E: "\u1595\u148a", + 0x0000158F: "\u1595\u148b", + 0x00001590: "\u1595\u148c", + 0x00001592: "\u1595\u148e", + 0x00001593: "\u1595\u1490", + 0x00001594: "\u1595\u1491", + 0x00001673: "\u1596J", + 0x00001671: "\u1596\u148b", + 0x00001672: "\u1596\u148c", + 0x00001674: "\u1596\u148e", + 0x00001675: "\u1596\u1490", + 0x00001676: "\u1596\u1491", + 0x000018EA: "\u1597\u00b7", + 0x00001677: "\u15a7\u00b7", + 0x00001678: "\u15a8\u00b7", + 0x00001679: "\u15a9\u00b7", + 0x0000167A: "\u15aa\u00b7", + 0x0000167B: "\u15ab\u00b7", + 0x0000167C: "\u15ac\u00b7", + 0x0000167D: "\u15ad\u00b7", + 0x00002AAB: "\u15d2", + 0x00002AAA: "\u15d5", + 0x0000A4F7: "\u15e1", + 0x000018F0: "\u15f4\u00b7", + 0x000018F2: "\u161b\u00b7", + 0x00001DBB: "\u1646", + 0x0000A4ED: "\u1660", + 0x00001DBA: "\u18d4", + 0x00001D3E: "\u18d6", + 0x000018DC: "\u18df\u141e", + 0x000002E1: "\u18f3", + 0x000002B3: "\u18f4", + 0x000002E2: "\u18f5", + 0x000018DB: "\u18f5", + 0x0000A6B0: "\u16b9", + 0x000016E1: "\u16bc", + 0x0000237F: "\u16bd", + 0x000016C2: "\u16bd", + 0x0001D23F: "\u16cb", + 0x00002191: "\u16cf", + 0x000021BF: "\u16d0", + 0x0000296E: "\u16d0\u21c2", + 0x00002963: "\u16d0\u16da", + 0x00002D63: "\u16ef", + 0x000021BE: "\u16da", + 0x00002A21: "\u16da", + 0x000022C4: "\u16dc", + 0x000025C7: "\u16dc", + 0x000025CA: "\u16dc", + 0x00002662: "\u16dc", + 0x0001F754: "\u16dc", + 0x000118B7: "\u16dc", + 0x00010294: "\u16dc", + 0x0000235A: "\u16dc\u0332", + 0x000022C8: "\u16de", + 0x00002A1D: "\u16de", + 0x000104D0: "\u16e6", + 0x00002195: "\u16e8", + 0x00010CFC: "\U00010c82", + 0x00010CFA: "\U00010ca5", + 0x00003131: "\u1100", + 0x000011A8: "\u1100", + 0x00001101: "\u1100\u1100", + 0x00003132: "\u1100\u1100", + 0x000011A9: "\u1100\u1100", + 0x000011FA: "\u1100\u1102", + 0x0000115A: "\u1100\u1103", + 0x000011C3: "\u1100\u1105", + 0x000011FB: "\u1100\u1107", + 0x000011AA: "\u1100\u1109", + 0x00003133: "\u1100\u1109", + 0x000011C4: "\u1100\u1109\u1100", + 0x000011FC: "\u1100\u110e", + 0x000011FD: "\u1100\u110f", + 0x000011FE: "\u1100\u1112", + 0x00003134: "\u1102", + 0x000011AB: "\u1102", + 0x00001113: "\u1102\u1100", + 0x000011C5: "\u1102\u1100", + 0x00001114: "\u1102\u1102", + 0x00003165: "\u1102\u1102", + 0x000011FF: "\u1102\u1102", + 0x00001115: "\u1102\u1103", + 0x00003166: "\u1102\u1103", + 0x000011C6: "\u1102\u1103", + 0x0000D7CB: "\u1102\u1105", + 0x00001116: "\u1102\u1107", + 0x0000115B: "\u1102\u1109", + 0x000011C7: "\u1102\u1109", + 0x00003167: "\u1102\u1109", + 0x0000115C: "\u1102\u110c", + 0x000011AC: "\u1102\u110c", + 0x00003135: "\u1102\u110c", + 0x0000D7CC: "\u1102\u110e", + 0x000011C9: "\u1102\u1110", + 0x0000115D: "\u1102\u1112", + 0x000011AD: "\u1102\u1112", + 0x00003136: "\u1102\u1112", + 0x000011C8: "\u1102\u1140", + 0x00003168: "\u1102\u1140", + 0x00003137: "\u1103", + 0x000011AE: "\u1103", + 0x00001117: "\u1103\u1100", + 0x000011CA: "\u1103\u1100", + 0x00001104: "\u1103\u1103", + 0x00003138: "\u1103\u1103", + 0x0000D7CD: "\u1103\u1103", + 0x0000D7CE: "\u1103\u1103\u1107", + 0x0000115E: "\u1103\u1105", + 0x000011CB: "\u1103\u1105", + 0x0000A960: "\u1103\u1106", + 0x0000A961: "\u1103\u1107", + 0x0000D7CF: "\u1103\u1107", + 0x0000A962: "\u1103\u1109", + 0x0000D7D0: "\u1103\u1109", + 0x0000D7D1: "\u1103\u1109\u1100", + 0x0000A963: "\u1103\u110c", + 0x0000D7D2: "\u1103\u110c", + 0x0000D7D3: "\u1103\u110e", + 0x0000D7D4: "\u1103\u1110", + 0x00003139: "\u1105", + 0x000011AF: "\u1105", + 0x0000A964: "\u1105\u1100", + 0x000011B0: "\u1105\u1100", + 0x0000313A: "\u1105\u1100", + 0x0000A965: "\u1105\u1100\u1100", + 0x0000D7D5: "\u1105\u1100\u1100", + 0x000011CC: "\u1105\u1100\u1109", + 0x00003169: "\u1105\u1100\u1109", + 0x0000D7D6: "\u1105\u1100\u1112", + 0x00001118: "\u1105\u1102", + 0x000011CD: "\u1105\u1102", + 0x0000A966: "\u1105\u1103", + 0x000011CE: "\u1105\u1103", + 0x0000316A: "\u1105\u1103", + 0x0000A967: "\u1105\u1103\u1103", + 0x000011CF: "\u1105\u1103\u1112", + 0x00001119: "\u1105\u1105", + 0x000011D0: "\u1105\u1105", + 0x0000D7D7: "\u1105\u1105\u110f", + 0x0000A968: "\u1105\u1106", + 0x000011B1: "\u1105\u1106", + 0x0000313B: "\u1105\u1106", + 0x000011D1: "\u1105\u1106\u1100", + 0x000011D2: "\u1105\u1106\u1109", + 0x0000D7D8: "\u1105\u1106\u1112", + 0x0000A969: "\u1105\u1107", + 0x000011B2: "\u1105\u1107", + 0x0000313C: "\u1105\u1107", + 0x0000D7D9: "\u1105\u1107\u1103", + 0x0000A96A: "\u1105\u1107\u1107", + 0x000011D3: "\u1105\u1107\u1109", + 0x0000316B: "\u1105\u1107\u1109", + 0x0000A96B: "\u1105\u1107\u110b", + 0x000011D5: "\u1105\u1107\u110b", + 0x0000D7DA: "\u1105\u1107\u1111", + 0x000011D4: "\u1105\u1107\u1112", + 0x0000A96C: "\u1105\u1109", + 0x000011B3: "\u1105\u1109", + 0x0000313D: "\u1105\u1109", + 0x000011D6: "\u1105\u1109\u1109", + 0x0000111B: "\u1105\u110b", + 0x0000D7DD: "\u1105\u110b", + 0x0000A96D: "\u1105\u110c", + 0x0000A96E: "\u1105\u110f", + 0x000011D8: "\u1105\u110f", + 0x000011B4: "\u1105\u1110", + 0x0000313E: "\u1105\u1110", + 0x000011B5: "\u1105\u1111", + 0x0000313F: "\u1105\u1111", + 0x0000111A: "\u1105\u1112", + 0x00003140: "\u1105\u1112", + 0x0000113B: "\u1105\u1112", + 0x000011B6: "\u1105\u1112", + 0x0000D7F2: "\u1105\u1112", + 0x000011D7: "\u1105\u1140", + 0x0000316C: "\u1105\u1140", + 0x0000D7DB: "\u1105\u114c", + 0x000011D9: "\u1105\u1159", + 0x0000316D: "\u1105\u1159", + 0x0000D7DC: "\u1105\u1159\u1112", + 0x00003141: "\u1106", + 0x000011B7: "\u1106", + 0x0000A96F: "\u1106\u1100", + 0x000011DA: "\u1106\u1100", + 0x0000D7DE: "\u1106\u1102", + 0x0000D7DF: "\u1106\u1102\u1102", + 0x0000A970: "\u1106\u1103", + 0x000011DB: "\u1106\u1105", + 0x0000D7E0: "\u1106\u1106", + 0x0000111C: "\u1106\u1107", + 0x0000316E: "\u1106\u1107", + 0x000011DC: "\u1106\u1107", + 0x0000D7E1: "\u1106\u1107\u1109", + 0x0000A971: "\u1106\u1109", + 0x000011DD: "\u1106\u1109", + 0x0000316F: "\u1106\u1109", + 0x000011DE: "\u1106\u1109\u1109", + 0x0000111D: "\u1106\u110b", + 0x00003171: "\u1106\u110b", + 0x000011E2: "\u1106\u110b", + 0x0000D7E2: "\u1106\u110c", + 0x000011E0: "\u1106\u110e", + 0x000011E1: "\u1106\u1112", + 0x000011DF: "\u1106\u1140", + 0x00003170: "\u1106\u1140", + 0x00003142: "\u1107", + 0x000011B8: "\u1107", + 0x0000111E: "\u1107\u1100", + 0x00003172: "\u1107\u1100", + 0x0000111F: "\u1107\u1102", + 0x00001120: "\u1107\u1103", + 0x00003173: "\u1107\u1103", + 0x0000D7E3: "\u1107\u1103", + 0x000011E3: "\u1107\u1105", + 0x0000D7E4: "\u1107\u1105\u1111", + 0x0000D7E5: "\u1107\u1106", + 0x00001108: "\u1107\u1107", + 0x00003143: "\u1107\u1107", + 0x0000D7E6: "\u1107\u1107", + 0x0000112C: "\u1107\u1107\u110b", + 0x00003179: "\u1107\u1107\u110b", + 0x00001121: "\u1107\u1109", + 0x00003144: "\u1107\u1109", + 0x000011B9: "\u1107\u1109", + 0x00001122: "\u1107\u1109\u1100", + 0x00003174: "\u1107\u1109\u1100", + 0x00001123: "\u1107\u1109\u1103", + 0x00003175: "\u1107\u1109\u1103", + 0x0000D7E7: "\u1107\u1109\u1103", + 0x00001124: "\u1107\u1109\u1107", + 0x00001125: "\u1107\u1109\u1109", + 0x00001126: "\u1107\u1109\u110c", + 0x0000A972: "\u1107\u1109\u1110", + 0x0000112B: "\u1107\u110b", + 0x00003178: "\u1107\u110b", + 0x000011E6: "\u1107\u110b", + 0x00001127: "\u1107\u110c", + 0x00003176: "\u1107\u110c", + 0x0000D7E8: "\u1107\u110c", + 0x00001128: "\u1107\u110e", + 0x0000D7E9: "\u1107\u110e", + 0x0000A973: "\u1107\u110f", + 0x00001129: "\u1107\u1110", + 0x00003177: "\u1107\u1110", + 0x0000112A: "\u1107\u1111", + 0x000011E4: "\u1107\u1111", + 0x0000A974: "\u1107\u1112", + 0x000011E5: "\u1107\u1112", + 0x00003145: "\u1109", + 0x000011BA: "\u1109", + 0x0000112D: "\u1109\u1100", + 0x0000317A: "\u1109\u1100", + 0x000011E7: "\u1109\u1100", + 0x0000112E: "\u1109\u1102", + 0x0000317B: "\u1109\u1102", + 0x0000112F: "\u1109\u1103", + 0x0000317C: "\u1109\u1103", + 0x000011E8: "\u1109\u1103", + 0x00001130: "\u1109\u1105", + 0x000011E9: "\u1109\u1105", + 0x00001131: "\u1109\u1106", + 0x0000D7EA: "\u1109\u1106", + 0x00001132: "\u1109\u1107", + 0x0000317D: "\u1109\u1107", + 0x000011EA: "\u1109\u1107", + 0x00001133: "\u1109\u1107\u1100", + 0x0000D7EB: "\u1109\u1107\u110b", + 0x0000110A: "\u1109\u1109", + 0x00003146: "\u1109\u1109", + 0x000011BB: "\u1109\u1109", + 0x0000D7EC: "\u1109\u1109\u1100", + 0x0000D7ED: "\u1109\u1109\u1103", + 0x0000A975: "\u1109\u1109\u1107", + 0x00001134: "\u1109\u1109\u1109", + 0x00001135: "\u1109\u110b", + 0x00001136: "\u1109\u110c", + 0x0000317E: "\u1109\u110c", + 0x0000D7EF: "\u1109\u110c", + 0x00001137: "\u1109\u110e", + 0x0000D7F0: "\u1109\u110e", + 0x00001138: "\u1109\u110f", + 0x00001139: "\u1109\u1110", + 0x0000D7F1: "\u1109\u1110", + 0x0000113A: "\u1109\u1111", + 0x0000D7EE: "\u1109\u1140", + 0x00003147: "\u110b", + 0x000011BC: "\u110b", + 0x00001141: "\u110b\u1100", + 0x000011EC: "\u110b\u1100", + 0x000011ED: "\u110b\u1100\u1100", + 0x00001142: "\u110b\u1103", + 0x0000A976: "\u110b\u1105", + 0x00001143: "\u110b\u1106", + 0x00001144: "\u110b\u1107", + 0x00001145: "\u110b\u1109", + 0x000011F1: "\u110b\u1109", + 0x00003182: "\u110b\u1109", + 0x00001147: "\u110b\u110b", + 0x00003180: "\u110b\u110b", + 0x000011EE: "\u110b\u110b", + 0x00001148: "\u110b\u110c", + 0x00001149: "\u110b\u110e", + 0x000011EF: "\u110b\u110f", + 0x0000114A: "\u110b\u1110", + 0x0000114B: "\u110b\u1111", + 0x0000A977: "\u110b\u1112", + 0x00001146: "\u110b\u1140", + 0x000011F2: "\u110b\u1140", + 0x00003183: "\u110b\u1140", + 0x00003148: "\u110c", + 0x000011BD: "\u110c", + 0x0000D7F7: "\u110c\u1107", + 0x0000D7F8: "\u110c\u1107\u1107", + 0x0000114D: "\u110c\u110b", + 0x0000110D: "\u110c\u110c", + 0x00003149: "\u110c\u110c", + 0x0000D7F9: "\u110c\u110c", + 0x0000A978: "\u110c\u110c\u1112", + 0x0000314A: "\u110e", + 0x000011BE: "\u110e", + 0x00001152: "\u110e\u110f", + 0x00001153: "\u110e\u1112", + 0x0000314B: "\u110f", + 0x000011BF: "\u110f", + 0x0000314C: "\u1110", + 0x000011C0: "\u1110", + 0x0000A979: "\u1110\u1110", + 0x0000314D: "\u1111", + 0x000011C1: "\u1111", + 0x00001156: "\u1111\u1107", + 0x000011F3: "\u1111\u1107", + 0x0000D7FA: "\u1111\u1109", + 0x00001157: "\u1111\u110b", + 0x00003184: "\u1111\u110b", + 0x000011F4: "\u1111\u110b", + 0x0000D7FB: "\u1111\u1110", + 0x0000A97A: "\u1111\u1112", + 0x0000314E: "\u1112", + 0x000011C2: "\u1112", + 0x000011F5: "\u1112\u1102", + 0x000011F6: "\u1112\u1105", + 0x000011F7: "\u1112\u1106", + 0x000011F8: "\u1112\u1107", + 0x0000A97B: "\u1112\u1109", + 0x00001158: "\u1112\u1112", + 0x00003185: "\u1112\u1112", + 0x0000113D: "\u113c\u113c", + 0x0000113F: "\u113e\u113e", + 0x0000317F: "\u1140", + 0x000011EB: "\u1140", + 0x0000D7F3: "\u1140\u1107", + 0x0000D7F4: "\u1140\u1107\u110b", + 0x00003181: "\u114c", + 0x000011F0: "\u114c", + 0x0000D7F5: "\u114c\u1106", + 0x0000D7F6: "\u114c\u1112", + 0x0000114F: "\u114e\u114e", + 0x00001151: "\u1150\u1150", + 0x00003186: "\u1159", + 0x000011F9: "\u1159", + 0x0000A97C: "\u1159\u1159", + 0x00003164: "\u1160", + 0x0000314F: "\u1161", + 0x000011A3: "\u1161\u30fc", + 0x00001176: "\u1161\u1169", + 0x00001177: "\u1161\u116e", + 0x00001162: "\u1161\u4e28", + 0x00003150: "\u1161\u4e28", + 0x00003151: "\u1163", + 0x00001178: "\u1163\u1169", + 0x00001179: "\u1163\u116d", + 0x000011A4: "\u1163\u116e", + 0x00001164: "\u1163\u4e28", + 0x00003152: "\u1163\u4e28", + 0x00003153: "\u1165", + 0x0000117C: "\u1165\u30fc", + 0x0000117A: "\u1165\u1169", + 0x0000117B: "\u1165\u116e", + 0x00001166: "\u1165\u4e28", + 0x00003154: "\u1165\u4e28", + 0x00003155: "\u1167", + 0x000011A5: "\u1167\u1163", + 0x0000117D: "\u1167\u1169", + 0x0000117E: "\u1167\u116e", + 0x00001168: "\u1167\u4e28", + 0x00003156: "\u1167\u4e28", + 0x00003157: "\u1169", + 0x0000116A: "\u1169\u1161", + 0x00003158: "\u1169\u1161", + 0x0000116B: "\u1169\u1161\u4e28", + 0x00003159: "\u1169\u1161\u4e28", + 0x000011A6: "\u1169\u1163", + 0x000011A7: "\u1169\u1163\u4e28", + 0x0000117F: "\u1169\u1165", + 0x00001180: "\u1169\u1165\u4e28", + 0x0000D7B0: "\u1169\u1167", + 0x00001181: "\u1169\u1167\u4e28", + 0x00001182: "\u1169\u1169", + 0x0000D7B1: "\u1169\u1169\u4e28", + 0x00001183: "\u1169\u116e", + 0x0000116C: "\u1169\u4e28", + 0x0000315A: "\u1169\u4e28", + 0x0000315B: "\u116d", + 0x0000D7B2: "\u116d\u1161", + 0x0000D7B3: "\u116d\u1161\u4e28", + 0x00001184: "\u116d\u1163", + 0x00003187: "\u116d\u1163", + 0x00001186: "\u116d\u1163", + 0x00001185: "\u116d\u1163\u4e28", + 0x00003188: "\u116d\u1163\u4e28", + 0x0000D7B4: "\u116d\u1165", + 0x00001187: "\u116d\u1169", + 0x00001188: "\u116d\u4e28", + 0x00003189: "\u116d\u4e28", + 0x0000315C: "\u116e", + 0x00001189: "\u116e\u1161", + 0x0000118A: "\u116e\u1161\u4e28", + 0x0000116F: "\u116e\u1165", + 0x0000315D: "\u116e\u1165", + 0x0000118B: "\u116e\u1165\u30fc", + 0x00001170: "\u116e\u1165\u4e28", + 0x0000315E: "\u116e\u1165\u4e28", + 0x0000D7B5: "\u116e\u1167", + 0x0000118C: "\u116e\u1167\u4e28", + 0x0000118D: "\u116e\u116e", + 0x00001171: "\u116e\u4e28", + 0x0000315F: "\u116e\u4e28", + 0x0000D7B6: "\u116e\u4e28\u4e28", + 0x00003160: "\u1172", + 0x0000118E: "\u1172\u1161", + 0x0000D7B7: "\u1172\u1161\u4e28", + 0x0000118F: "\u1172\u1165", + 0x00001190: "\u1172\u1165\u4e28", + 0x00001191: "\u1172\u1167", + 0x0000318A: "\u1172\u1167", + 0x00001192: "\u1172\u1167\u4e28", + 0x0000318B: "\u1172\u1167\u4e28", + 0x0000D7B8: "\u1172\u1169", + 0x00001193: "\u1172\u116e", + 0x00001194: "\u1172\u4e28", + 0x0000318C: "\u1172\u4e28", + 0x0000318D: "\u119e", + 0x0000D7C5: "\u119e\u1161", + 0x0000119F: "\u119e\u1165", + 0x0000D7C6: "\u119e\u1165\u4e28", + 0x000011A0: "\u119e\u116e", + 0x000011A2: "\u119e\u119e", + 0x000011A1: "\u119e\u4e28", + 0x0000318E: "\u119e\u4e28", + 0x000030D8: "\u3078", + 0x00002341: "\u303c", + 0x000029C4: "\u303c", + 0x0000A49E: "\ua04a", + 0x0000A4AC: "\ua050", + 0x0000A49C: "\ua0c0", + 0x0000A4A8: "\ua132", + 0x0000A4BF: "\ua259", + 0x0000A4BE: "\ua2b1", + 0x0000A494: "\ua2cd", + 0x0000A4C0: "\ua3ab", + 0x0000A4C2: "\ua3b5", + 0x0000A4BA: "\ua3bf", + 0x0000A4B0: "\ua3c2", + 0x0000A4A7: "\ua458", + 0x000022A5: "\ua4d5", + 0x000027C2: "\ua4d5", + 0x0001D21C: "\ua4d5", + 0x0000A7B1: "\ua4d5", + 0x0000A79E: "\ua4e4", + 0x00002141: "\ua4e8", + 0x00002142: "\ua4f6", + 0x0001D215: "\ua4f6", + 0x0001D22B: "\ua4f6", + 0x00016F26: "\ua4f6", + 0x00010411: "\ua4f6", + 0x00002143: "\U00016f00", + 0x00011AE6: "\U00011ae5\U00011aef", + 0x00011AE8: "\U00011ae5\U00011ae5", + 0x00011AE9: "\U00011ae5\U00011ae5\U00011aef", + 0x00011AEA: "\U00011ae5\U00011ae5\U00011af0", + 0x00011AE7: "\U00011ae5\U00011af0", + 0x00011AF4: "\U00011af3\U00011aef", + 0x00011AF6: "\U00011af3\U00011af3", + 0x00011AF7: "\U00011af3\U00011af3\U00011aef", + 0x00011AF8: "\U00011af3\U00011af3\U00011af0", + 0x00011AF5: "\U00011af3\U00011af0", + 0x00011AEC: "\U00011aeb\U00011aef", + 0x00011AED: "\U00011aeb\U00011aeb", + 0x00011AEE: "\U00011aeb\U00011aeb\U00011aef", + 0x00002295: "\U000102a8", + 0x00002A01: "\U000102a8", + 0x0001F728: "\U000102a8", + 0x0000A69A: "\U000102a8", + 0x000025BD: "\U000102bc", + 0x0001D214: "\U000102bc", + 0x0001F704: "\U000102bc", + 0x000029D6: "\U000102c0", + 0x0000A79B: "\U0001043a", + 0x0000A79A: "\U00010412", + 0x000104A0: "\U00010486", + 0x000103D1: "\U00010382", + 0x000103D3: "\U00010393", + 0x00012038: "\U0001039a", + 0x00002625: "\U0001099e", + 0x000132F9: "\U0001099e", + 0x00003039: "\u5344", + 0x0000F967: "\u4e0d", + 0x0002F800: "\u4e3d", + 0x0000FA70: "\u4e26", + 0x0000239C: "\u4e28", + 0x0000239F: "\u4e28", + 0x000023A2: "\u4e28", + 0x000023A5: "\u4e28", + 0x000023AA: "\u4e28", + 0x000023AE: "\u4e28", + 0x000031D1: "\u4e28", + 0x00001175: "\u4e28", + 0x00003163: "\u4e28", + 0x00002F01: "\u4e28", + 0x0000119C: "\u4e28\u30fc", + 0x00001198: "\u4e28\u1161", + 0x00001199: "\u4e28\u1163", + 0x0000D7BD: "\u4e28\u1163\u1169", + 0x0000D7BE: "\u4e28\u1163\u4e28", + 0x0000D7BF: "\u4e28\u1167", + 0x0000D7C0: "\u4e28\u1167\u4e28", + 0x0000119A: "\u4e28\u1169", + 0x0000D7C1: "\u4e28\u1169\u4e28", + 0x0000D7C2: "\u4e28\u116d", + 0x0000119B: "\u4e28\u116e", + 0x0000D7C3: "\u4e28\u1172", + 0x0000119D: "\u4e28\u119e", + 0x0000D7C4: "\u4e28\u4e28", + 0x0000F905: "\u4e32", + 0x0002F801: "\u4e38", + 0x0000F95E: "\u4e39", + 0x0002F802: "\u4e41", + 0x000031E0: "\u4e59", + 0x00002F04: "\u4e59", + 0x000031DF: "\u4e5a", + 0x00002E83: "\u4e5a", + 0x000031D6: "\u4e5b", + 0x00002E82: "\u4e5b", + 0x00002EF2: "\u4e80", + 0x0000F91B: "\u4e82", + 0x000031DA: "\u4e85", + 0x00002F05: "\u4e85", + 0x0000F9BA: "\u4e86", + 0x000030CB: "\u4e8c", + 0x00002F06: "\u4e8c", + 0x0002F803: "\U00020122", + 0x00002F07: "\u4ea0", + 0x0000F977: "\u4eae", + 0x00002F08: "\u4eba", + 0x000030A4: "\u4ebb", + 0x00002E85: "\u4ebb", + 0x0000F9FD: "\u4ec0", + 0x0002F819: "\u4ecc", + 0x0000F9A8: "\u4ee4", + 0x0002F804: "\u4f60", + 0x00005002: "\u4f75", + 0x0002F807: "\u4f75", + 0x0000FA73: "\u4f80", + 0x0000F92D: "\u4f86", + 0x0000F9B5: "\u4f8b", + 0x0000FA30: "\u4fae", + 0x0002F805: "\u4fae", + 0x0002F806: "\u4fbb", + 0x0000F965: "\u4fbf", + 0x0000503C: "\u5024", + 0x0000F9D4: "\u502b", + 0x0002F808: "\u507a", + 0x0002F809: "\u5099", + 0x0002F80B: "\u50cf", + 0x0000F9BB: "\u50da", + 0x0000FA31: "\u50e7", + 0x0002F80A: "\u50e7", + 0x0002F80C: "\u349e", + 0x00002F09: "\u513f", + 0x0000FA0C: "\u5140", + 0x00002E8E: "\u5140", + 0x0000FA74: "\u5145", + 0x0000FA32: "\u514d", + 0x0002F80E: "\u514d", + 0x0002F80F: "\u5154", + 0x0002F810: "\u5164", + 0x00002F0A: "\u5165", + 0x0002F814: "\u5167", + 0x0000FA72: "\u5168", + 0x0000F978: "\u5169", + 0x000030CF: "\u516b", + 0x00002F0B: "\u516b", + 0x0000F9D1: "\u516d", + 0x0002F811: "\u5177", + 0x0002F812: "\U0002051c", + 0x0002F91B: "\U00020525", + 0x0000FA75: "\u5180", + 0x0002F813: "\u34b9", + 0x00002F0C: "\u5182", + 0x0002F815: "\u518d", + 0x0002F816: "\U0002054b", + 0x0002F8D2: "\u5192", + 0x0002F8D3: "\u5195", + 0x0002F9CA: "\u34bb", + 0x0002F8D4: "\u6700", + 0x00002F0D: "\u5196", + 0x0002F817: "\u5197", + 0x0002F818: "\u51a4", + 0x00002F0E: "\u51ab", + 0x0002F81A: "\u51ac", + 0x0000FA71: "\u51b5", + 0x0002F81B: "\u51b5", + 0x0000F92E: "\u51b7", + 0x0000F979: "\u51c9", + 0x0000F955: "\u51cc", + 0x0000F954: "\u51dc", + 0x0000FA15: "\u51de", + 0x00002F0F: "\u51e0", + 0x0002F80D: "\U0002063a", + 0x0002F81D: "\u51f5", + 0x00002F10: "\u51f5", + 0x00002F11: "\u5200", + 0x00002E89: "\u5202", + 0x0002F81E: "\u5203", + 0x0000FA00: "\u5207", + 0x0002F850: "\u5207", + 0x0000F99C: "\u5217", + 0x0000F9DD: "\u5229", + 0x0002F81F: "\u34df", + 0x0000F9FF: "\u523a", + 0x0002F820: "\u523b", + 0x0002F821: "\u5246", + 0x0002F822: "\u5272", + 0x0002F823: "\u5277", + 0x0000F9C7: "\u5289", + 0x0002F9D9: "\U00020804", + 0x000030AB: "\u529b", + 0x0000F98A: "\u529b", + 0x00002F12: "\u529b", + 0x0000F99D: "\u52a3", + 0x0002F824: "\u3515", + 0x0002F992: "\u52b3", + 0x0000FA76: "\u52c7", + 0x0002F825: "\u52c7", + 0x0000FA33: "\u52c9", + 0x0002F826: "\u52c9", + 0x0000F952: "\u52d2", + 0x0000F92F: "\u52de", + 0x0000FA34: "\u52e4", + 0x0002F827: "\u52e4", + 0x0000F97F: "\u52f5", + 0x00002F13: "\u52f9", + 0x0000FA77: "\u52fa", + 0x0002F828: "\u52fa", + 0x0002F829: "\u5305", + 0x0002F82A: "\u5306", + 0x0002F9DD: "\U000208de", + 0x00002F14: "\u5315", + 0x0000F963: "\u5317", + 0x0002F82B: "\u5317", + 0x00002F15: "\u531a", + 0x00002F16: "\u5338", + 0x0000F9EB: "\u533f", + 0x00002F17: "\u5341", + 0x00003038: "\u5341", + 0x0000303A: "\u5345", + 0x0002F82C: "\u5349", + 0x00000FD6: "\u534d", + 0x00000FD5: "\u5350", + 0x0000FA35: "\u5351", + 0x0002F82D: "\u5351", + 0x0002F82E: "\u535a", + 0x000030C8: "\u535c", + 0x00002F18: "\u535c", + 0x00002F19: "\u5369", + 0x00002E8B: "\u353e", + 0x0002F82F: "\u5373", + 0x0000F91C: "\u5375", + 0x0002F830: "\u537d", + 0x0002F831: "\u537f", + 0x0002F832: "\u537f", + 0x0002F833: "\u537f", + 0x00002F1A: "\u5382", + 0x0002F834: "\U00020a2c", + 0x00002F1B: "\u53b6", + 0x0000F96B: "\u53c3", + 0x00002F1C: "\u53c8", + 0x0002F836: "\u53ca", + 0x0002F837: "\u53df", + 0x0002F838: "\U00020b63", + 0x000030ED: "\u53e3", + 0x00002F1D: "\u53e3", + 0x000056D7: "\u53e3", + 0x00002F1E: "\u53e3", + 0x0000F906: "\u53e5", + 0x0002F839: "\u53eb", + 0x0002F83A: "\u53f1", + 0x0002F83B: "\u5406", + 0x0000F9DE: "\u540f", + 0x0000F9ED: "\u541d", + 0x0002F83D: "\u5438", + 0x0000F980: "\u5442", + 0x0002F83E: "\u5448", + 0x0002F83F: "\u5468", + 0x0002F83C: "\u549e", + 0x0002F840: "\u54a2", + 0x0000F99E: "\u54bd", + 0x0000439B: "\u3588", + 0x0002F841: "\u54f6", + 0x0002F842: "\u5510", + 0x0002F843: "\u5553", + 0x0000555F: "\u5553", + 0x0000FA79: "\u5555", + 0x0002F844: "\u5563", + 0x0002F845: "\u5584", + 0x0002F846: "\u5584", + 0x0000F90B: "\u5587", + 0x0000FA7A: "\u5599", + 0x0002F847: "\u5599", + 0x0000FA36: "\u559d", + 0x0000FA78: "\u559d", + 0x0002F848: "\u55ab", + 0x0002F849: "\u55b3", + 0x0000FA0D: "\u55c0", + 0x0002F84A: "\u55c2", + 0x0000FA7B: "\u55e2", + 0x0000FA37: "\u5606", + 0x0002F84C: "\u5606", + 0x0002F84E: "\u5651", + 0x0002F84F: "\u5674", + 0x0000FA38: "\u5668", + 0x0000F9A9: "\u56f9", + 0x0002F84B: "\u5716", + 0x0002F84D: "\u5717", + 0x00002F1F: "\u571f", + 0x000058EB: "\u571f", + 0x00002F20: "\u571f", + 0x0002F855: "\u578b", + 0x0002F852: "\u57ce", + 0x000039B3: "\u363d", + 0x0002F853: "\u57f4", + 0x0002F854: "\u580d", + 0x0002F857: "\u5831", + 0x0002F856: "\u5832", + 0x0000FA39: "\u5840", + 0x0000FA10: "\u585a", + 0x0000FA7C: "\u585a", + 0x0000F96C: "\u585e", + 0x0000586B: "\u5861", + 0x000058FF: "\u58ab", + 0x0002F858: "\u58ac", + 0x0000FA7D: "\u58b3", + 0x0000F94A: "\u58d8", + 0x0000F942: "\u58df", + 0x0002F859: "\U000214e4", + 0x0002F851: "\u58ee", + 0x0002F85A: "\u58f2", + 0x0002F85B: "\u58f7", + 0x00002F21: "\u5902", + 0x0002F85C: "\u5906", + 0x00002F22: "\u590a", + 0x000030BF: "\u5915", + 0x00002F23: "\u5915", + 0x0002F85D: "\u591a", + 0x0002F85E: "\u5922", + 0x00002F24: "\u5927", + 0x0000FA7E: "\u5944", + 0x0000F90C: "\u5948", + 0x0000F909: "\u5951", + 0x0000FA7F: "\u5954", + 0x0002F85F: "\u5962", + 0x0000F981: "\u5973", + 0x00002F25: "\u5973", + 0x0002F860: "\U000216a8", + 0x0002F861: "\U000216ea", + 0x0002F865: "\u59d8", + 0x0002F862: "\u59ec", + 0x0002F863: "\u5a1b", + 0x0002F864: "\u5a27", + 0x0000FA80: "\u5a62", + 0x0002F866: "\u5a66", + 0x00005B00: "\u5aaf", + 0x0002F867: "\u36ee", + 0x0002F868: "\u36fc", + 0x0002F986: "\u5ab5", + 0x0002F869: "\u5b08", + 0x0000FA81: "\u5b28", + 0x0002F86A: "\u5b3e", + 0x0002F86B: "\u5b3e", + 0x00002F26: "\u5b50", + 0x00002F27: "\u5b80", + 0x0000FA04: "\u5b85", + 0x0002F86C: "\U000219c8", + 0x0002F86D: "\u5bc3", + 0x0002F86E: "\u5bd8", + 0x0000F95F: "\u5be7", + 0x0000F9AA: "\u5be7", + 0x0002F86F: "\u5be7", + 0x0000F9BC: "\u5bee", + 0x0002F870: "\u5bf3", + 0x0002F871: "\U00021b18", + 0x00002F28: "\u5bf8", + 0x0002F872: "\u5bff", + 0x0002F873: "\u5c06", + 0x00002F29: "\u5c0f", + 0x0002F875: "\u5c22", + 0x00002E90: "\u5c22", + 0x00002F2A: "\u5c22", + 0x00002E8F: "\u5c23", + 0x0002F876: "\u3781", + 0x00002F2B: "\u5c38", + 0x0000F9BD: "\u5c3f", + 0x0002F877: "\u5c60", + 0x0000F94B: "\u5c62", + 0x0000FA3B: "\u5c64", + 0x0000F9DF: "\u5c65", + 0x0000FA3C: "\u5c6e", + 0x0002F878: "\u5c6e", + 0x00002F2C: "\u5c6e", + 0x0002F8F8: "\U00021d0b", + 0x00002F2D: "\u5c71", + 0x0002F879: "\u5cc0", + 0x0002F87A: "\u5c8d", + 0x0002F87B: "\U00021de4", + 0x0002F87D: "\U00021de6", + 0x0000F9D5: "\u5d19", + 0x0002F87C: "\u5d43", + 0x0000F921: "\u5d50", + 0x0002F87F: "\u5d6b", + 0x0002F87E: "\u5d6e", + 0x0002F880: "\u5d7c", + 0x0002F9F4: "\u5db2", + 0x0000F9AB: "\u5dba", + 0x00002F2E: "\u5ddb", + 0x0002F882: "\u5de2", + 0x000030A8: "\u5de5", + 0x00002F2F: "\u5de5", + 0x00002F30: "\u5df1", + 0x00002E92: "\u5df3", + 0x0002F883: "\u382f", + 0x0002F884: "\u5dfd", + 0x00002F31: "\u5dfe", + 0x00005E32: "\u5e21", + 0x0002F885: "\u5e28", + 0x0002F886: "\u5e3d", + 0x0002F887: "\u5e69", + 0x0002F888: "\u3862", + 0x0002F889: "\U00022183", + 0x00002F32: "\u5e72", + 0x0000F98E: "\u5e74", + 0x0002F939: "\U0002219f", + 0x00002E93: "\u5e7a", + 0x00002F33: "\u5e7a", + 0x00002F34: "\u5e7f", + 0x0000FA01: "\u5ea6", + 0x0002F88A: "\u387c", + 0x0002F88B: "\u5eb0", + 0x0002F88C: "\u5eb3", + 0x0002F88D: "\u5eb6", + 0x0000F928: "\u5eca", + 0x0002F88E: "\u5eca", + 0x0000F9A2: "\u5ec9", + 0x0000FA82: "\u5ed2", + 0x0000FA0B: "\u5ed3", + 0x0000FA83: "\u5ed9", + 0x0000F982: "\u5eec", + 0x00002F35: "\u5ef4", + 0x0002F890: "\u5efe", + 0x00002F36: "\u5efe", + 0x0002F891: "\U00022331", + 0x0002F892: "\U00022331", + 0x0000F943: "\u5f04", + 0x00002F37: "\u5f0b", + 0x00002F38: "\u5f13", + 0x0002F894: "\u5f22", + 0x0002F895: "\u5f22", + 0x00002F39: "\u5f50", + 0x00002E94: "\u5f51", + 0x0002F874: "\u5f53", + 0x0002F896: "\u38c7", + 0x00002F3A: "\u5f61", + 0x0002F899: "\u5f62", + 0x0000FA84: "\u5f69", + 0x0002F89A: "\u5f6b", + 0x00002F3B: "\u5f73", + 0x0000F9D8: "\u5f8b", + 0x0002F89B: "\u38e3", + 0x0002F89C: "\u5f9a", + 0x0000F966: "\u5fa9", + 0x0000FA85: "\u5fad", + 0x00002F3C: "\u5fc3", + 0x00002E96: "\u5fc4", + 0x00002E97: "\u38fa", + 0x0002F89D: "\u5fcd", + 0x0002F89E: "\u5fd7", + 0x0000F9A3: "\u5ff5", + 0x0002F89F: "\u5ff9", + 0x0000F960: "\u6012", + 0x0000F9AC: "\u601c", + 0x0000FA6B: "\u6075", + 0x0002F8A2: "\u391c", + 0x0002F8A1: "\u393a", + 0x0002F8A0: "\u6081", + 0x0000FA3D: "\u6094", + 0x0002F8A3: "\u6094", + 0x0002F8A5: "\u60c7", + 0x0000FA86: "\u60d8", + 0x0000F9B9: "\u60e1", + 0x0002F8A4: "\U000226d4", + 0x0000FA88: "\u6108", + 0x0000FA3E: "\u6168", + 0x0000F9D9: "\u6144", + 0x0002F8A6: "\u6148", + 0x0002F8A7: "\u614c", + 0x0002F8A9: "\u614c", + 0x0000FA87: "\u614e", + 0x0002F8A8: "\u614e", + 0x0000FA8A: "\u6160", + 0x0002F8AA: "\u617a", + 0x0000FA3F: "\u618e", + 0x0000FA89: "\u618e", + 0x0002F8AB: "\u618e", + 0x0000F98F: "\u6190", + 0x0002F8AD: "\u61a4", + 0x0002F8AE: "\u61af", + 0x0002F8AC: "\u61b2", + 0x0000FAD0: "\U00022844", + 0x0000FACF: "\U0002284a", + 0x0002F8AF: "\u61de", + 0x0000FA40: "\u61f2", + 0x0000FA8B: "\u61f2", + 0x0002F8B0: "\u61f2", + 0x0000F90D: "\u61f6", + 0x0002F8B1: "\u61f6", + 0x0000F990: "\u6200", + 0x00002F3D: "\u6208", + 0x0002F8B2: "\u6210", + 0x0002F8B3: "\u621b", + 0x0000F9D2: "\u622e", + 0x0000FA8C: "\u6234", + 0x00002F3E: "\u6236", + 0x00006238: "\u6236", + 0x00002F3F: "\u624b", + 0x00002E98: "\u624c", + 0x0002F8B4: "\u625d", + 0x0002F8B5: "\u62b1", + 0x0000F925: "\u62c9", + 0x0000F95B: "\u62cf", + 0x0000FA02: "\u62d3", + 0x0002F8B6: "\u62d4", + 0x0002F8BA: "\u62fc", + 0x0000F973: "\u62fe", + 0x0002F8B8: "\U00022b0c", + 0x0002F8B9: "\u633d", + 0x0002F8B7: "\u6350", + 0x0002F8BB: "\u6368", + 0x0000F9A4: "\u637b", + 0x0002F8BC: "\u6383", + 0x0000F975: "\u63a0", + 0x0002F8C1: "\u63a9", + 0x0000FA8D: "\u63c4", + 0x0002F8BD: "\u63e4", + 0x0000FA8F: "\u6452", + 0x0002F8BE: "\U00022bf1", + 0x0000FA8E: "\u641c", + 0x0002F8BF: "\u6422", + 0x0002F8C0: "\u63c5", + 0x0002F8C3: "\u6469", + 0x0002F8C6: "\u6477", + 0x0002F8C4: "\u647e", + 0x0002F8C2: "\u3a2e", + 0x00006409: "\u3a41", + 0x0000F991: "\u649a", + 0x0002F8C5: "\u649d", + 0x0000F930: "\u64c4", + 0x0002F8C7: "\u3a6c", + 0x00002F40: "\u652f", + 0x00002F41: "\u6534", + 0x00002E99: "\u6535", + 0x0000FA41: "\u654f", + 0x0002F8C8: "\u654f", + 0x0000FA90: "\u6556", + 0x0002F8C9: "\u656c", + 0x0000F969: "\u6578", + 0x0002F8CA: "\U0002300a", + 0x00002F42: "\u6587", + 0x00002EEB: "\u6589", + 0x00002F43: "\u6597", + 0x0000F9BE: "\u6599", + 0x00002F44: "\u65a4", + 0x00002F45: "\u65b9", + 0x0000F983: "\u65c5", + 0x00002F46: "\u65e0", + 0x00002E9B: "\u65e1", + 0x0000FA42: "\u65e2", + 0x0002F8CB: "\u65e3", + 0x00002F47: "\u65e5", + 0x0000F9E0: "\u6613", + 0x000066F6: "\u3ada", + 0x0002F8D1: "\u3ae4", + 0x0002F8CD: "\u6649", + 0x00006669: "\u665a", + 0x0000FA12: "\u6674", + 0x0000FA91: "\u6674", + 0x0000FA43: "\u6691", + 0x0002F8CF: "\u6691", + 0x0000F9C5: "\u6688", + 0x0002F8D0: "\u3b08", + 0x0002F8D5: "\u669c", + 0x0000FA06: "\u66b4", + 0x0000F98B: "\u66c6", + 0x0002F8CE: "\u3b19", + 0x0002F897: "\U000232b8", + 0x00002F48: "\u66f0", + 0x0000F901: "\u66f4", + 0x0002F8CC: "\u66f8", + 0x00002F49: "\u6708", + 0x0002F980: "\U0002335f", + 0x000080A6: "\u670c", + 0x000080D0: "\u670f", + 0x000080CA: "\u6710", + 0x00008101: "\u6713", + 0x000080F6: "\u3b35", + 0x0000F929: "\u6717", + 0x0000FA92: "\u6717", + 0x0002F8D8: "\u6717", + 0x00008127: "\u6718", + 0x0000FA93: "\u671b", + 0x0002F8D9: "\u671b", + 0x0002F8DA: "\u6721", + 0x00005E50: "\u3b3a", + 0x00004420: "\u3b3b", + 0x0002F989: "\U00023393", + 0x000081A7: "\u6723", + 0x0002F98A: "\U0002339c", + 0x00002F4A: "\u6728", + 0x0000F9E1: "\u674e", + 0x0002F8DC: "\u6753", + 0x0000FA94: "\u6756", + 0x0002F8DB: "\u675e", + 0x0002F8DD: "\U000233c3", + 0x000067FF: "\u676e", + 0x0000F9C8: "\u677b", + 0x0002F8E0: "\u6785", + 0x0000F9F4: "\u6797", + 0x0002F8DE: "\u3b49", + 0x0000FAD1: "\U000233d5", + 0x0000F9C9: "\u67f3", + 0x0002F8DF: "\u67fa", + 0x0000F9DA: "\u6817", + 0x0002F8E5: "\u681f", + 0x0002F8E1: "\u6852", + 0x0002F8E3: "\U0002346d", + 0x0000F97A: "\u6881", + 0x0000FA44: "\u6885", + 0x0002F8E2: "\u6885", + 0x0002F8E4: "\u688e", + 0x0000F9E2: "\u68a8", + 0x0002F8E6: "\u6914", + 0x0002F8E8: "\u6942", + 0x0000FAD2: "\u3b9d", + 0x0002F8E7: "\u3b9d", + 0x000069E9: "\u3ba3", + 0x00006A27: "\u699d", + 0x0002F8E9: "\u69a3", + 0x0002F8EA: "\u69ea", + 0x0000F914: "\u6a02", + 0x0000F95C: "\u6a02", + 0x0000F9BF: "\u6a02", + 0x0000F94C: "\u6a13", + 0x0002F8EC: "\U000236a3", + 0x0002F8EB: "\u6aa8", + 0x0000F931: "\u6ad3", + 0x0002F8ED: "\u6adb", + 0x0000F91D: "\u6b04", + 0x0002F8EE: "\u3c18", + 0x00002F4B: "\u6b20", + 0x0002F8EF: "\u6b21", + 0x0002F8F0: "\U000238a7", + 0x0002F8F1: "\u6b54", + 0x0002F8F2: "\u3c4e", + 0x00002F4C: "\u6b62", + 0x00002EED: "\u6b6f", + 0x0002F8F3: "\u6b72", + 0x0000F98C: "\u6b77", + 0x0000FA95: "\u6b79", + 0x00002F4D: "\u6b79", + 0x00002E9E: "\u6b7a", + 0x0002F8F4: "\u6b9f", + 0x0000F9A5: "\u6bae", + 0x00002F4E: "\u6bb3", + 0x0000F970: "\u6bba", + 0x0000FA96: "\u6bba", + 0x0002F8F5: "\u6bba", + 0x0002F8F6: "\u6bbb", + 0x0002F8F7: "\U00023a8d", + 0x00002F4F: "\u6bcb", + 0x00002E9F: "\u6bcd", + 0x0002F8F9: "\U00023afa", + 0x00002F50: "\u6bd4", + 0x00002F51: "\u6bdb", + 0x00002F52: "\u6c0f", + 0x00002EA0: "\u6c11", + 0x00002F53: "\u6c14", + 0x00002F54: "\u6c34", + 0x00002EA1: "\u6c35", + 0x00002EA2: "\u6c3a", + 0x0002F8FA: "\u6c4e", + 0x0002F8FE: "\u6c67", + 0x0000F972: "\u6c88", + 0x0002F8FC: "\u6cbf", + 0x0000F968: "\u6ccc", + 0x0002F8FD: "\u6ccd", + 0x0000F9E3: "\u6ce5", + 0x0002F8FB: "\U00023cbc", + 0x0000F915: "\u6d1b", + 0x0000FA05: "\u6d1e", + 0x0002F907: "\u6d34", + 0x0002F900: "\u6d3e", + 0x0000F9CA: "\u6d41", + 0x0000FA97: "\u6d41", + 0x0002F902: "\u6d41", + 0x0002F8FF: "\u6d16", + 0x0002F903: "\u6d69", + 0x0000F92A: "\u6d6a", + 0x0000FA45: "\u6d77", + 0x0002F901: "\u6d77", + 0x0002F904: "\u6d78", + 0x0002F905: "\u6d85", + 0x0002F906: "\U00023d1e", + 0x0000F9F5: "\u6dcb", + 0x0000F94D: "\u6dda", + 0x0000F9D6: "\u6dea", + 0x0002F90E: "\u6df9", + 0x0000FA46: "\u6e1a", + 0x0002F908: "\u6e2f", + 0x0002F909: "\u6e6e", + 0x00006F59: "\u6e88", + 0x0000FA99: "\u6ecb", + 0x0002F90B: "\u6ecb", + 0x0000F9CB: "\u6e9c", + 0x0000F9EC: "\u6eba", + 0x0002F90C: "\u6ec7", + 0x0000F904: "\u6ed1", + 0x0000FA98: "\u6edb", + 0x0002F90A: "\u3d33", + 0x0000F94E: "\u6f0f", + 0x0000FA47: "\u6f22", + 0x0000FA9A: "\u6f22", + 0x0000F992: "\u6f23", + 0x0002F90D: "\U00023ed1", + 0x0002F90F: "\u6f6e", + 0x0002F910: "\U00023f5e", + 0x0002F911: "\U00023f8e", + 0x0002F912: "\u6fc6", + 0x0000F922: "\u6feb", + 0x0000F984: "\u6ffe", + 0x0002F915: "\u701b", + 0x0000FA9B: "\u701e", + 0x0002F914: "\u701e", + 0x0002F913: "\u7039", + 0x0002F917: "\u704a", + 0x0002F916: "\u3d96", + 0x00002F55: "\u706b", + 0x00002EA3: "\u706c", + 0x0002F835: "\u7070", + 0x0002F919: "\u7077", + 0x0002F918: "\u707d", + 0x0000F9FB: "\u7099", + 0x0002F91A: "\u70ad", + 0x0000F99F: "\u70c8", + 0x0000F916: "\u70d9", + 0x0000FA48: "\u716e", + 0x0000FA9C: "\u716e", + 0x0002F91D: "\U00024263", + 0x0002F91C: "\u7145", + 0x0000F993: "\u7149", + 0x0000FA6C: "\U000242ee", + 0x0002F91E: "\u719c", + 0x0000F9C0: "\u71ce", + 0x0000F9EE: "\u71d0", + 0x0002F91F: "\U000243ab", + 0x0000F932: "\u7210", + 0x0000F91E: "\u721b", + 0x0002F920: "\u7228", + 0x00002F56: "\u722a", + 0x0000FA49: "\u722b", + 0x00002EA4: "\u722b", + 0x0000FA9E: "\u7235", + 0x0002F921: "\u7235", + 0x00002F57: "\u7236", + 0x00002F58: "\u723b", + 0x00002EA6: "\u4e2c", + 0x00002F59: "\u723f", + 0x00002F5A: "\u7247", + 0x0002F922: "\u7250", + 0x00002F5B: "\u7259", + 0x0002F923: "\U00024608", + 0x00002F5C: "\u725b", + 0x0000F946: "\u7262", + 0x0002F924: "\u7280", + 0x0002F925: "\u7295", + 0x00002F5D: "\u72ac", + 0x00002EA8: "\u72ad", + 0x0000FA9F: "\u72af", + 0x0000F9FA: "\u72c0", + 0x0002F926: "\U00024735", + 0x0000F92B: "\u72fc", + 0x0000FA16: "\u732a", + 0x0000FAA0: "\u732a", + 0x0002F927: "\U00024814", + 0x0000F9A7: "\u7375", + 0x0002F928: "\u737a", + 0x00002F5E: "\u7384", + 0x0000F961: "\u7387", + 0x0000F9DB: "\u7387", + 0x00002F5F: "\u7389", + 0x0002F929: "\u738b", + 0x0002F92A: "\u3eac", + 0x0002F92B: "\u73a5", + 0x0000F9AD: "\u73b2", + 0x0002F92C: "\u3eb8", + 0x0002F92D: "\u3eb8", + 0x0000F917: "\u73de", + 0x0000F9CC: "\u7409", + 0x0000F9E4: "\u7406", + 0x0000FA4A: "\u7422", + 0x0002F92E: "\u7447", + 0x0002F92F: "\u745c", + 0x0000F9AE: "\u7469", + 0x0000FAA1: "\u7471", + 0x0002F930: "\u7471", + 0x0002F931: "\u7485", + 0x0000F994: "\u7489", + 0x0000F9EF: "\u7498", + 0x0002F932: "\u74ca", + 0x00002F60: "\u74dc", + 0x00002F61: "\u74e6", + 0x0002F933: "\u3f1b", + 0x0000FAA2: "\u7506", + 0x00002F62: "\u7518", + 0x00002F63: "\u751f", + 0x0002F934: "\u7524", + 0x00002F64: "\u7528", + 0x00002F65: "\u7530", + 0x0000FAA3: "\u753b", + 0x0002F936: "\u753e", + 0x0002F935: "\U00024c36", + 0x0000F9CD: "\u7559", + 0x0000F976: "\u7565", + 0x0000F962: "\u7570", + 0x0002F938: "\u7570", + 0x0002F937: "\U00024c92", + 0x00002F66: "\u758b", + 0x00002F67: "\u7592", + 0x0000F9E5: "\u75e2", + 0x0002F93A: "\u7610", + 0x0000FAA5: "\u761f", + 0x0000FAA4: "\u761d", + 0x0000F9C1: "\u7642", + 0x0000F90E: "\u7669", + 0x00002F68: "\u7676", + 0x00002F69: "\u767d", + 0x0002F93B: "\U00024fa1", + 0x0002F93C: "\U00024fb8", + 0x00002F6A: "\u76ae", + 0x00002F6B: "\u76bf", + 0x0002F93D: "\U00025044", + 0x0002F93E: "\u3ffc", + 0x0000FA17: "\u76ca", + 0x0000FAA6: "\u76ca", + 0x0000FAA7: "\u76db", + 0x0000F933: "\u76e7", + 0x0002F93F: "\u4008", + 0x00002F6C: "\u76ee", + 0x0000FAA8: "\u76f4", + 0x0002F940: "\u76f4", + 0x0002F942: "\U000250f2", + 0x0002F941: "\U000250f3", + 0x0000F96D: "\u7701", + 0x0000FAD3: "\u4018", + 0x0002F943: "\U00025119", + 0x0002F945: "\u771e", + 0x0002F946: "\u771f", + 0x0002F947: "\u771f", + 0x0002F944: "\U00025133", + 0x0000FAAA: "\u7740", + 0x0000FAA9: "\u774a", + 0x0002F948: "\u774a", + 0x00009FC3: "\u4039", + 0x0000FAD4: "\u4039", + 0x0002F949: "\u4039", + 0x00006663: "\u403f", + 0x0002F94B: "\u4046", + 0x0002F94A: "\u778b", + 0x0000FAD5: "\U00025249", + 0x0000FA9D: "\u77a7", + 0x00002F6D: "\u77db", + 0x00002F6E: "\u77e2", + 0x00002F6F: "\u77f3", + 0x0002F94C: "\u4096", + 0x0002F94D: "\U0002541d", + 0x0000784F: "\u7814", + 0x0002F94E: "\u784e", + 0x0000F9CE: "\u786b", + 0x0000F93B: "\u788c", + 0x0002F94F: "\u788c", + 0x0000FA4B: "\u7891", + 0x0000F947: "\u78ca", + 0x0000FAAB: "\u78cc", + 0x0002F950: "\u78cc", + 0x0000F964: "\u78fb", + 0x0002F951: "\u40e3", + 0x0000F985: "\u792a", + 0x00002F70: "\u793a", + 0x00002EAD: "\u793b", + 0x0000FA18: "\u793c", + 0x0000FA4C: "\u793e", + 0x0000FA4E: "\u7948", + 0x0000FA4D: "\u7949", + 0x0002F952: "\U00025626", + 0x0000FA4F: "\u7950", + 0x0000FA50: "\u7956", + 0x0002F953: "\u7956", + 0x0000FA51: "\u795d", + 0x0000FA19: "\u795e", + 0x0000FA1A: "\u7965", + 0x0000FA61: "\u8996", + 0x0000FAB8: "\u8996", + 0x0000F93C: "\u797f", + 0x0002F954: "\U0002569a", + 0x0000FA52: "\u798d", + 0x0000FA53: "\u798e", + 0x0000FA1B: "\u798f", + 0x0002F956: "\u798f", + 0x0002F955: "\U000256c5", + 0x0000F9B6: "\u79ae", + 0x00002F71: "\u79b8", + 0x00002F72: "\u79be", + 0x0000F995: "\u79ca", + 0x0002F958: "\u412f", + 0x0002F957: "\u79eb", + 0x0000F956: "\u7a1c", + 0x0002F95A: "\u7a4a", + 0x0000FA54: "\u7a40", + 0x0002F959: "\u7a40", + 0x0002F95B: "\u7a4f", + 0x00002F73: "\u7a74", + 0x0000FA55: "\u7a81", + 0x0002F95C: "\U0002597c", + 0x0000FAAC: "\u7ab1", + 0x0000F9F7: "\u7acb", + 0x00002F74: "\u7acb", + 0x00002EEF: "\u7adc", + 0x0002F95D: "\U00025aa7", + 0x0002F95E: "\U00025aa7", + 0x0002F95F: "\u7aee", + 0x00002F75: "\u7af9", + 0x0000F9F8: "\u7b20", + 0x0000FA56: "\u7bc0", + 0x0000FAAD: "\u7bc0", + 0x0002F960: "\u4202", + 0x0002F961: "\U00025bab", + 0x0002F962: "\u7bc6", + 0x0002F964: "\u4227", + 0x0002F963: "\u7bc9", + 0x0002F965: "\U00025c80", + 0x0000FAD6: "\U00025cd0", + 0x0000F9A6: "\u7c3e", + 0x0000F944: "\u7c60", + 0x00002F76: "\u7c73", + 0x0000FAAE: "\u7c7b", + 0x0000F9F9: "\u7c92", + 0x0000FA1D: "\u7cbe", + 0x0002F966: "\u7cd2", + 0x0000FA03: "\u7cd6", + 0x0002F968: "\u7ce8", + 0x0002F967: "\u42a0", + 0x0002F969: "\u7ce3", + 0x0000F97B: "\u7ce7", + 0x00002F77: "\u7cf8", + 0x00002EAF: "\u7cf9", + 0x0002F96B: "\U00025f86", + 0x0002F96A: "\u7d00", + 0x0000F9CF: "\u7d10", + 0x0000F96A: "\u7d22", + 0x0000F94F: "\u7d2f", + 0x00007D76: "\u7d55", + 0x0002F96C: "\u7d63", + 0x0000FAAF: "\u7d5b", + 0x0000F93D: "\u7da0", + 0x0000F957: "\u7dbe", + 0x0002F96E: "\u7dc7", + 0x0000F996: "\u7df4", + 0x0000FA57: "\u7df4", + 0x0000FAB0: "\u7df4", + 0x0002F96F: "\u7e02", + 0x0002F96D: "\u4301", + 0x0000FA58: "\u7e09", + 0x0000F950: "\u7e37", + 0x0000FA59: "\u7e41", + 0x0002F970: "\u7e45", + 0x0002F898: "\U000261da", + 0x0002F971: "\u4334", + 0x00002F78: "\u7f36", + 0x0002F972: "\U00026228", + 0x0000FAB1: "\u7f3e", + 0x0002F973: "\U00026247", + 0x00002F79: "\u7f51", + 0x00002EAB: "\u7f52", + 0x00002EB2: "\u7f52", + 0x00002EB1: "\u7f53", + 0x0002F974: "\u4359", + 0x0000FA5A: "\u7f72", + 0x0002F975: "\U000262d9", + 0x0000F9E6: "\u7f79", + 0x0002F976: "\u7f7a", + 0x0000F90F: "\u7f85", + 0x0002F977: "\U0002633e", + 0x00002F7A: "\u7f8a", + 0x0002F978: "\u7f95", + 0x0000F9AF: "\u7f9a", + 0x0000FA1E: "\u7fbd", + 0x00002F7B: "\u7fbd", + 0x0002F979: "\u7ffa", + 0x0000F934: "\u8001", + 0x00002F7C: "\u8001", + 0x00002EB9: "\u8002", + 0x0000FA5B: "\u8005", + 0x0000FAB2: "\u8005", + 0x0002F97A: "\u8005", + 0x00002F7D: "\u800c", + 0x0002F97B: "\U000264da", + 0x00002F7E: "\u8012", + 0x0002F97C: "\U00026523", + 0x00002F7F: "\u8033", + 0x0000F9B0: "\u8046", + 0x0002F97D: "\u8060", + 0x0002F97E: "\U000265a8", + 0x0000F997: "\u806f", + 0x0002F97F: "\u8070", + 0x0000F945: "\u807e", + 0x00002F80: "\u807f", + 0x00002EBA: "\u8080", + 0x00002F81: "\u8089", + 0x0000F953: "\u808b", + 0x0002F8D6: "\u80ad", + 0x0002F982: "\u80b2", + 0x0002F981: "\u43d5", + 0x0002F8D7: "\u43d9", + 0x00008141: "\u80fc", + 0x0002F983: "\u8103", + 0x0002F985: "\u813e", + 0x0002F984: "\u440b", + 0x0002F987: "\U000267a7", + 0x0002F988: "\U000267b5", + 0x00006726: "\u4443", + 0x0000F926: "\u81d8", + 0x00002F82: "\u81e3", + 0x0000F9F6: "\u81e8", + 0x00002F83: "\u81ea", + 0x0000FA5C: "\u81ed", + 0x00002F84: "\u81f3", + 0x00002F85: "\u81fc", + 0x0002F893: "\u8201", + 0x0002F98B: "\u8201", + 0x0002F98C: "\u8204", + 0x00002F86: "\u820c", + 0x0000FA6D: "\u8218", + 0x00002F87: "\u821b", + 0x00002F88: "\u821f", + 0x0002F98E: "\u446b", + 0x00002F89: "\u826e", + 0x0000F97C: "\u826f", + 0x00002F8A: "\u8272", + 0x00002F8B: "\u8278", + 0x0000FA5D: "\u8279", + 0x0000FA5E: "\u8279", + 0x00002EBE: "\u8279", + 0x00002EBF: "\u8279", + 0x00002EC0: "\u8279", + 0x0002F990: "\u828b", + 0x0002F98F: "\u8291", + 0x0002F991: "\u829d", + 0x0002F993: "\u82b1", + 0x0002F994: "\u82b3", + 0x0002F995: "\u82bd", + 0x0000F974: "\u82e5", + 0x0002F998: "\u82e5", + 0x0002F996: "\u82e6", + 0x0002F997: "\U00026b3c", + 0x0000F9FE: "\u8336", + 0x0000FAB3: "\u8352", + 0x0002F99A: "\u8363", + 0x0002F999: "\u831d", + 0x0002F99C: "\u8323", + 0x0002F99D: "\u83bd", + 0x0002F9A0: "\u8353", + 0x0000F93E: "\u83c9", + 0x0002F9A1: "\u83ca", + 0x0002F9A2: "\u83cc", + 0x0002F9A3: "\u83dc", + 0x0002F99E: "\u83e7", + 0x0000FAB4: "\u83ef", + 0x0000F958: "\u83f1", + 0x0000FA5F: "\u8457", + 0x0002F99F: "\u8457", + 0x0002F9A4: "\U00026c36", + 0x0002F99B: "\u83ad", + 0x0000F918: "\u843d", + 0x0000F96E: "\u8449", + 0x0000853F: "\u848d", + 0x0002F9A6: "\U00026cd5", + 0x0002F9A5: "\U00026d6b", + 0x0000F999: "\u84ee", + 0x0002F9A8: "\u84f1", + 0x0002F9A9: "\u84f3", + 0x0000F9C2: "\u84fc", + 0x0002F9AA: "\u8516", + 0x0002F9A7: "\u452b", + 0x0002F9AC: "\u8564", + 0x0002F9AD: "\U00026f2c", + 0x0000F923: "\u85cd", + 0x0002F9AE: "\u455d", + 0x0002F9B0: "\U00026fb1", + 0x0002F9AF: "\u4561", + 0x0000F9F0: "\u85fa", + 0x0000F935: "\u8606", + 0x0002F9B2: "\u456b", + 0x0000FA20: "\u8612", + 0x0000F91F: "\u862d", + 0x0002F9B1: "\U000270d2", + 0x00008641: "\u8637", + 0x0000F910: "\u863f", + 0x00002F8C: "\u864d", + 0x00002EC1: "\u864e", + 0x0002F9B3: "\u8650", + 0x0000F936: "\u865c", + 0x0002F9B4: "\u865c", + 0x0002F9B5: "\u8667", + 0x0002F9B6: "\u8669", + 0x00002F8D: "\u866b", + 0x0002F9B7: "\u86a9", + 0x0002F9B8: "\u8688", + 0x0002F9BA: "\u86e2", + 0x0002F9B9: "\u870e", + 0x0002F9BC: "\u8728", + 0x0002F9BD: "\u876b", + 0x0002F9C0: "\u87e1", + 0x0000FAB5: "\u8779", + 0x0002F9BB: "\u8779", + 0x0002F9BE: "\u8786", + 0x0002F9BF: "\u45d7", + 0x0002F9AB: "\U000273ca", + 0x0000F911: "\u87ba", + 0x0002F9C1: "\u8801", + 0x0002F9C2: "\u45f9", + 0x0000F927: "\u881f", + 0x00002F8E: "\u8840", + 0x0000FA08: "\u884c", + 0x00002F8F: "\u884c", + 0x0002F9C3: "\u8860", + 0x0002F9C4: "\u8863", + 0x00002F90: "\u8863", + 0x00002EC2: "\u8864", + 0x0000F9A0: "\u88c2", + 0x0002F9C5: "\U00027667", + 0x0000F9E7: "\u88cf", + 0x0002F9C6: "\u88d7", + 0x0002F9C7: "\u88de", + 0x0000F9E8: "\u88e1", + 0x0000F912: "\u88f8", + 0x0002F9C9: "\u88fa", + 0x0002F9C8: "\u4635", + 0x0000FA60: "\u8910", + 0x0000FAB6: "\u8941", + 0x0000F924: "\u8964", + 0x00002F91: "\u897e", + 0x00002EC4: "\u897f", + 0x00002EC3: "\u8980", + 0x0000FAB7: "\u8986", + 0x0000FA0A: "\u898b", + 0x00002F92: "\u898b", + 0x0002F9CB: "\U000278ae", + 0x00002EC5: "\u89c1", + 0x00002F93: "\u89d2", + 0x00002F94: "\u8a00", + 0x0002F9CC: "\U00027966", + 0x00008A7D: "\u8a2e", + 0x00008A1E: "\u46b6", + 0x0002F9CD: "\u46be", + 0x0002F9CE: "\u46c7", + 0x0002F9CF: "\u8aa0", + 0x0000F96F: "\u8aaa", + 0x0000F9A1: "\u8aaa", + 0x0000FAB9: "\u8abf", + 0x0000FABB: "\u8acb", + 0x0000F97D: "\u8ad2", + 0x0000F941: "\u8ad6", + 0x0000FABE: "\u8aed", + 0x0002F9D0: "\u8aed", + 0x0000FA22: "\u8af8", + 0x0000FABA: "\u8af8", + 0x0000F95D: "\u8afe", + 0x0000FABD: "\u8afe", + 0x0000FA62: "\u8b01", + 0x0000FABC: "\u8b01", + 0x0000FA63: "\u8b39", + 0x0000FABF: "\u8b39", + 0x0000F9FC: "\u8b58", + 0x0000F95A: "\u8b80", + 0x00008B8F: "\u8b86", + 0x0000FAC0: "\u8b8a", + 0x0002F9D1: "\u8b8a", + 0x00002EC8: "\u8ba0", + 0x00002F95: "\u8c37", + 0x00002F96: "\u8c46", + 0x0000F900: "\u8c48", + 0x0002F9D2: "\u8c55", + 0x00002F97: "\u8c55", + 0x00008C63: "\u8c5c", + 0x00002F98: "\u8c78", + 0x0002F9D3: "\U00027ca8", + 0x00002F99: "\u8c9d", + 0x0002F9D4: "\u8cab", + 0x0002F9D5: "\u8cc1", + 0x0000F948: "\u8cc2", + 0x0000F903: "\u8cc8", + 0x0000FA64: "\u8cd3", + 0x0000FA65: "\u8d08", + 0x0000FAC1: "\u8d08", + 0x0002F9D6: "\u8d1b", + 0x00002EC9: "\u8d1d", + 0x00002F9A: "\u8d64", + 0x00002F9B: "\u8d70", + 0x0002F9D7: "\u8d77", + 0x00008D86: "\u8d7f", + 0x0000FAD7: "\U00027ed3", + 0x0002F9D8: "\U00027f2f", + 0x00002F9C: "\u8db3", + 0x0002F9DA: "\u8dcb", + 0x0002F9DB: "\u8dbc", + 0x00008DFA: "\u8de5", + 0x0000F937: "\u8def", + 0x0002F9DC: "\u8df0", + 0x00008E9B: "\u8e97", + 0x00002F9D: "\u8eab", + 0x0000F902: "\u8eca", + 0x00002F9E: "\u8eca", + 0x0002F9DE: "\u8ed4", + 0x00008F27: "\u8eff", + 0x0000F998: "\u8f26", + 0x0000F9D7: "\u8f2a", + 0x0000FAC2: "\u8f38", + 0x0002F9DF: "\u8f38", + 0x0000FA07: "\u8f3b", + 0x0000F98D: "\u8f62", + 0x00002ECB: "\u8f66", + 0x00002F9F: "\u8f9b", + 0x0002F98D: "\u8f9e", + 0x0000F971: "\u8fb0", + 0x00002FA0: "\u8fb0", + 0x00002FA1: "\u8fb5", + 0x0000FA66: "\u8fb6", + 0x00002ECC: "\u8fb6", + 0x00002ECD: "\u8fb6", + 0x0002F881: "\u5de1", + 0x0000F99A: "\u9023", + 0x0000FA25: "\u9038", + 0x0000FA67: "\u9038", + 0x0000FAC3: "\u9072", + 0x0000F9C3: "\u907c", + 0x0002F9E0: "\U000285d2", + 0x0002F9E1: "\U000285ed", + 0x0000F913: "\u908f", + 0x00002FA2: "\u9091", + 0x0002F9E2: "\u9094", + 0x0000F92C: "\u90ce", + 0x000090DE: "\u90ce", + 0x0000FA2E: "\u90ce", + 0x0002F9E3: "\u90f1", + 0x0000FA26: "\u90fd", + 0x0002F9E5: "\U0002872e", + 0x0002F9E4: "\u9111", + 0x0002F9E6: "\u911b", + 0x00002FA3: "\u9149", + 0x0000F919: "\u916a", + 0x0000FAC4: "\u9199", + 0x0000F9B7: "\u91b4", + 0x00002FA4: "\u91c6", + 0x0000F9E9: "\u91cc", + 0x00002FA5: "\u91cc", + 0x0000F97E: "\u91cf", + 0x0000F90A: "\u91d1", + 0x00002FA6: "\u91d1", + 0x0000F9B1: "\u9234", + 0x0002F9E7: "\u9238", + 0x0000FAC5: "\u9276", + 0x0002F9E8: "\u92d7", + 0x0002F9E9: "\u92d8", + 0x0002F9EA: "\u927c", + 0x0000F93F: "\u9304", + 0x0000F99B: "\u934a", + 0x000093AE: "\u93ad", + 0x0002F9EB: "\u93f9", + 0x0002F9EC: "\u9415", + 0x0002F9ED: "\U00028bfa", + 0x00002ED0: "\u9485", + 0x00002ED1: "\u9577", + 0x00002FA7: "\u9577", + 0x00002ED2: "\u9578", + 0x00002ED3: "\u957f", + 0x00002FA8: "\u9580", + 0x0002F9EE: "\u958b", + 0x0002F9EF: "\u4995", + 0x0000F986: "\u95ad", + 0x0002F9F0: "\u95b7", + 0x0002F9F1: "\U00028d77", + 0x00002ED4: "\u95e8", + 0x00002FA9: "\u961c", + 0x00002ECF: "\u961d", + 0x00002ED6: "\u961d", + 0x0000F9C6: "\u962e", + 0x0000F951: "\u964b", + 0x0000FA09: "\u964d", + 0x0000F959: "\u9675", + 0x0000F9D3: "\u9678", + 0x0000FAC6: "\u967c", + 0x0000F9DC: "\u9686", + 0x0000F9F1: "\u96a3", + 0x0002F9F2: "\u49e6", + 0x00002FAA: "\u96b6", + 0x0000FA2F: "\u96b7", + 0x000096B8: "\u96b7", + 0x0000F9B8: "\u96b7", + 0x00002FAB: "\u96b9", + 0x0002F9F3: "\u96c3", + 0x0000F9EA: "\u96e2", + 0x0000FA68: "\u96e3", + 0x0000FAC7: "\u96e3", + 0x00002FAC: "\u96e8", + 0x0000F9B2: "\u96f6", + 0x0000F949: "\u96f7", + 0x0002F9F5: "\u9723", + 0x0002F9F6: "\U00029145", + 0x0000F938: "\u9732", + 0x0000F9B3: "\u9748", + 0x00002FAD: "\u9751", + 0x00002ED8: "\u9752", + 0x0000FA1C: "\u9756", + 0x0000FAC8: "\u9756", + 0x0002F81C: "\U000291df", + 0x00002FAE: "\u975e", + 0x00002FAF: "\u9762", + 0x0002F9F7: "\U0002921a", + 0x00002FB0: "\u9769", + 0x0002F9F8: "\u4a6e", + 0x0002F9F9: "\u4a76", + 0x00002FB1: "\u97cb", + 0x0000FAC9: "\u97db", + 0x0002F9FA: "\u97e0", + 0x00002ED9: "\u97e6", + 0x00002FB2: "\u97ed", + 0x0002F9FB: "\U0002940a", + 0x00002FB3: "\u97f3", + 0x0000FA69: "\u97ff", + 0x0000FACA: "\u97ff", + 0x00002FB4: "\u9801", + 0x0002F9FC: "\u4ab2", + 0x0000FACB: "\u980b", + 0x0002F9FE: "\u980b", + 0x0002F9FF: "\u980b", + 0x0000F9B4: "\u9818", + 0x0002FA00: "\u9829", + 0x0002F9FD: "\U00029496", + 0x0000FA6A: "\u983b", + 0x0000FACC: "\u983b", + 0x0000F9D0: "\u985e", + 0x00002EDA: "\u9875", + 0x00002FB5: "\u98a8", + 0x0002FA01: "\U000295b6", + 0x00002EDB: "\u98ce", + 0x00002FB6: "\u98db", + 0x00002EDC: "\u98de", + 0x00002EDD: "\u98df", + 0x00002FB7: "\u98df", + 0x00002EDF: "\u98e0", + 0x0002FA02: "\u98e2", + 0x0000FA2A: "\u98ef", + 0x0000FA2B: "\u98fc", + 0x0002FA03: "\u4b33", + 0x0000FA2C: "\u9928", + 0x0002FA04: "\u9929", + 0x00002EE0: "\u9963", + 0x00002FB8: "\u9996", + 0x00002FB9: "\u9999", + 0x0002FA05: "\u99a7", + 0x00002FBA: "\u99ac", + 0x0002FA06: "\u99c2", + 0x0000F91A: "\u99f1", + 0x0002FA07: "\u99fe", + 0x0000F987: "\u9a6a", + 0x00002EE2: "\u9a6c", + 0x00002FBB: "\u9aa8", + 0x0002FA08: "\u4bce", + 0x00002FBC: "\u9ad8", + 0x00002FBD: "\u9adf", + 0x0002FA09: "\U00029b30", + 0x0000FACD: "\u9b12", + 0x0002FA0A: "\u9b12", + 0x00002FBE: "\u9b25", + 0x00002FBF: "\u9b2f", + 0x00002FC0: "\u9b32", + 0x00002FC1: "\u9b3c", + 0x00002EE4: "\u9b3c", + 0x00002FC2: "\u9b5a", + 0x0000F939: "\u9b6f", + 0x0002FA0B: "\u9c40", + 0x0000F9F2: "\u9c57", + 0x00002EE5: "\u9c7c", + 0x00002FC3: "\u9ce5", + 0x0002FA0C: "\u9cfd", + 0x0002FA0D: "\u4cce", + 0x0002FA0F: "\u9d67", + 0x0002FA0E: "\u4ced", + 0x0002FA10: "\U0002a0ce", + 0x0000FA2D: "\u9db4", + 0x0002FA12: "\U0002a105", + 0x0002FA11: "\u4cf8", + 0x0000F93A: "\u9dfa", + 0x0002FA13: "\U0002a20e", + 0x0000F920: "\u9e1e", + 0x00009E43: "\u9e42", + 0x00002FC4: "\u9e75", + 0x0000F940: "\u9e7f", + 0x00002FC5: "\u9e7f", + 0x0002FA14: "\U0002a291", + 0x0000F988: "\u9e97", + 0x0000F9F3: "\u9e9f", + 0x00002FC6: "\u9ea5", + 0x00002EE8: "\u9ea6", + 0x0002FA15: "\u9ebb", + 0x00002FC7: "\u9ebb", + 0x0002F88F: "\U0002a392", + 0x00002FC8: "\u9ec3", + 0x00002EE9: "\u9ec4", + 0x00002FC9: "\u9ecd", + 0x0000F989: "\u9ece", + 0x0002FA16: "\u4d56", + 0x00002FCA: "\u9ed1", + 0x00009ED2: "\u9ed1", + 0x0000FA3A: "\u58a8", + 0x0002FA17: "\u9ef9", + 0x00002FCB: "\u9ef9", + 0x00002FCC: "\u9efd", + 0x0002FA19: "\u9f05", + 0x0002FA18: "\u9efe", + 0x00002FCD: "\u9f0e", + 0x0002FA1A: "\u9f0f", + 0x00002FCE: "\u9f13", + 0x0002FA1B: "\u9f16", + 0x00002FCF: "\u9f20", + 0x0002FA1C: "\u9f3b", + 0x00002FD0: "\u9f3b", + 0x0000FAD8: "\u9f43", + 0x00002FD1: "\u9f4a", + 0x00002EEC: "\u9f50", + 0x00002FD2: "\u9f52", + 0x0002FA1D: "\U0002a600", + 0x00002EEE: "\u9f7f", + 0x0000F9C4: "\u9f8d", + 0x00002FD3: "\u9f8d", + 0x0000FAD9: "\u9f8e", + 0x00002EF0: "\u9f99", + 0x0000F907: "\u9f9c", + 0x0000F908: "\u9f9c", + 0x0000FACE: "\u9f9c", + 0x00002FD4: "\u9f9c", + 0x00002EF3: "\u9f9f", + 0x00002FD5: "\u9fa0", +} diff --git a/vendor/github.com/oragono/confusables/tweaks.go b/vendor/github.com/oragono/confusables/tweaks.go new file mode 100644 index 00000000..0d05af1c --- /dev/null +++ b/vendor/github.com/oragono/confusables/tweaks.go @@ -0,0 +1,38 @@ +package confusables + +// these are overrides for the standard confusables table: +// a mapping to "" means "don't map", a mapping to a replacement means +// "replace with this", no entry means "defer to the standard table" + +var tweaksMap = map[rune]string{ + // ASCII-to-ASCII mapping that we are removing: + 0x6d: "", // m -> rn + // these characters are confusable with m, hence the official table + // maps them to rn (`grep "LATIN SMALL LETTER R, LATIN SMALL LETTER N" confusables.txt`) + 0x118E3: "m", // 118E3 ; 0072 006E ; MA # ( 𑣣 → rn ) WARANG CITI DIGIT THREE → LATIN SMALL LETTER R, LATIN SMALL LETTER N + 0x11700: "m", // 11700 ; 0072 006E ; MA # ( 𑜀 → rn ) AHOM LETTER KA → LATIN SMALL LETTER R, LATIN SMALL LETTER N + // the table thinks this is confusable with m̦ but I think it's confusable with m: + 0x0271: "m", // 0271 ; 0072 006E 0326 ; MA # ( ɱ → rn̦ ) LATIN SMALL LETTER M WITH HOOK → LATIN SMALL LETTER R, LATIN SMALL LETTER N, COMBINING COMMA BELOW # →m̡→ + + /* + // ASCII-to-ASCII mapping that we are removing: + 0x49: "", // I -> l + // these characters are confusable with I, hence the official table + // maps them to l (`grep "LATIN SMALL LETTER L" confusables.txt`) + 0x0399: "I", // 0399 ; 006C ; MA # ( Ι → l ) GREEK CAPITAL LETTER IOTA → LATIN SMALL LETTER L # + 0x0406: "I", // 0406 ; 006C ; MA # ( І → l ) CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I → LATIN SMALL LETTER L # + 0x04C0: "I", // 04C0 ; 006C ; MA # ( Ӏ → l ) CYRILLIC LETTER PALOCHKA → LATIN SMALL LETTER L # + + // ASCII-to-ASCII mapping that we are removing: + 0x31: "", // 1 -> l + // these characters are confusable with 1, hence the official table + // maps them to l (`grep "LATIN SMALL LETTER L" confusables.txt`) + // [nothing yet] + + // ASCII-to-ASCII mapping that we are removing: + 0x30: "", // 0 -> O + // these characters are confusable with 0, hence the official table + // maps them to O (`grep "LATIN CAPITAL LETTER O\>" confusables.txt`) + // [nothing yet] + */ +} diff --git a/vendor/github.com/oragono/go-ident/LICENSE b/vendor/github.com/oragono/go-ident/LICENSE new file mode 100644 index 00000000..bcf2907b --- /dev/null +++ b/vendor/github.com/oragono/go-ident/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013 Dominik Honnef + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/oragono/go-ident/README.md b/vendor/github.com/oragono/go-ident/README.md new file mode 100644 index 00000000..7c7a200e --- /dev/null +++ b/vendor/github.com/oragono/go-ident/README.md @@ -0,0 +1,19 @@ +# RFC 1413 (Identification Protocol) client + +This package provides a client for the [Identification Protocol](https://tools.ietf.org/html/rfc1413). + +--- + +[![GoDoc](https://godoc.org/github.com/DanielOaks/go-ident?status.svg)](https://godoc.org/github.com/DanielOaks/go-ident) [![Go Report Card](https://goreportcard.com/badge/github.com/DanielOaks/go-ident)](https://goreportcard.com/report/github.com/DanielOaks/go-ident) + +--- + +## Installation + +```sh +go get github.com/DanielOaks/go-ident +``` + +## Documentation + +Documentation can be found at [godoc.org](http://godoc.org/github.com/DanielOaks/go-ident). diff --git a/vendor/github.com/oragono/go-ident/client.go b/vendor/github.com/oragono/go-ident/client.go new file mode 100644 index 00000000..663da8a9 --- /dev/null +++ b/vendor/github.com/oragono/go-ident/client.go @@ -0,0 +1,108 @@ +// Package ident implements an RFC 1413 client +package ident + +import ( + "bufio" + "fmt" + "net" + "strings" + "time" +) + +// Response is a successful answer to our query to the identd server. +type Response struct { + OS string + Charset string + Identifier string +} + +// ResponseError indicates that the identd server returned an error rather than an +// identifying string. +type ResponseError struct { + Type string +} + +func (e ResponseError) Error() string { + return fmt.Sprintf("Ident error: %s", e.Type) +} + +// ProtocolError indicates that an error occurred with the protocol itself, that the response +// could not be successfully parsed or was malformed. +type ProtocolError struct { + Line string +} + +func (e ProtocolError) Error() string { + return fmt.Sprintf("Unexpected response from server: %s", e.Line) +} + +// Query makes an Ident query, if timeout is >0 the query is timed out after that many seconds. +func Query(ip string, portOnServer, portOnClient int, timeout float64) (Response, error) { + var ( + conn net.Conn + err error + fields []string + r *bufio.Reader + resp string + ) + + if timeout > 0 { + conn, err = net.DialTimeout("tcp", net.JoinHostPort(ip, "113"), time.Duration(timeout)*time.Second) + } else { + conn, err = net.Dial("tcp", net.JoinHostPort(ip, "113")) + } + if err != nil { + return Response{}, err + } + + // stop the ident read after seconds + if timeout > 0 { + conn.SetDeadline(time.Now().Add(time.Second * time.Duration(timeout))) + } + + _, err = conn.Write([]byte(fmt.Sprintf("%d, %d", portOnClient, portOnServer) + "\r\n")) + if err != nil { + return Response{}, err + } + + r = bufio.NewReader(conn) + resp, err = r.ReadString('\n') + if err != nil { + return Response{}, err + } + + fields = strings.SplitN(strings.TrimSpace(resp), " : ", 4) + if len(fields) < 3 { + return Response{}, ProtocolError{resp} + } + + switch fields[1] { + case "USERID": + if len(fields) != 4 { + return Response{}, ProtocolError{resp} + } + + var os, charset string + osAndCharset := strings.SplitN(fields[2], ",", 2) + if len(osAndCharset) == 2 { + os = osAndCharset[0] + charset = osAndCharset[1] + } else { + os = osAndCharset[0] + charset = "US-ASCII" + } + + return Response{ + OS: os, + Charset: charset, + Identifier: fields[3], + }, nil + case "ERROR": + if len(fields) != 3 { + return Response{}, ProtocolError{resp} + } + + return Response{}, ResponseError{fields[2]} + } + return Response{}, err +} diff --git a/vendor/github.com/tidwall/btree/.travis.yml b/vendor/github.com/tidwall/btree/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/vendor/github.com/tidwall/btree/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/tidwall/btree/LICENSE b/vendor/github.com/tidwall/btree/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/tidwall/btree/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/tidwall/btree/README.md b/vendor/github.com/tidwall/btree/README.md new file mode 100644 index 00000000..deb1e886 --- /dev/null +++ b/vendor/github.com/tidwall/btree/README.md @@ -0,0 +1,107 @@ +BTree implementation for Go +=========================== + +![Travis CI Build Status](https://api.travis-ci.org/tidwall/btree.svg?branch=master) +[![GoDoc](https://godoc.org/github.com/tidwall/btree?status.svg)](https://godoc.org/github.com/tidwall/btree) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +This is a fork of the wonderful [google/btree](https://github.com/google/btree) package. It's has all the same great features and adds a few more. + +- Descend* functions for iterating backwards. +- Iteration performance boost. +- User defined context. + +User defined context +-------------------- +This is a great new feature that allows for entering the same item into multiple B-trees, and each B-tree have a different ordering formula. + +For example: + +```go +package main + +import ( + "fmt" + + "github.com/tidwall/btree" +) + +type Item struct { + Key, Val string +} + +func (i1 *Item) Less(item btree.Item, ctx interface{}) bool { + i2 := item.(*Item) + switch tag := ctx.(type) { + case string: + if tag == "vals" { + if i1.Val < i2.Val { + return true + } else if i1.Val > i2.Val { + return false + } + // Both vals are equal so we should fall though + // and let the key comparison take over. + } + } + return i1.Key < i2.Key +} + +func main() { + + // Create a tree for keys and a tree for values. + // The "keys" tree will be sorted on the Keys field. + // The "values" tree will be sorted on the Values field. + keys := btree.New(16, "keys") + vals := btree.New(16, "vals") + + // Create some items. + users := []*Item{ + &Item{Key: "user:1", Val: "Jane"}, + &Item{Key: "user:2", Val: "Andy"}, + &Item{Key: "user:3", Val: "Steve"}, + &Item{Key: "user:4", Val: "Andrea"}, + &Item{Key: "user:5", Val: "Janet"}, + &Item{Key: "user:6", Val: "Andy"}, + } + + // Insert each user into both trees + for _, user := range users { + keys.ReplaceOrInsert(user) + vals.ReplaceOrInsert(user) + } + + // Iterate over each user in the key tree + keys.Ascend(func(item btree.Item) bool { + kvi := item.(*Item) + fmt.Printf("%s %s\n", kvi.Key, kvi.Val) + return true + }) + + fmt.Printf("\n") + // Iterate over each user in the val tree + vals.Ascend(func(item btree.Item) bool { + kvi := item.(*Item) + fmt.Printf("%s %s\n", kvi.Key, kvi.Val) + return true + }) +} + +// Should see the results +/* +user:1 Jane +user:2 Andy +user:3 Steve +user:4 Andrea +user:5 Janet +user:6 Andy + +user:4 Andrea +user:2 Andy +user:6 Andy +user:1 Jane +user:3 Steve +*/ +``` diff --git a/vendor/github.com/tidwall/btree/btree.go b/vendor/github.com/tidwall/btree/btree.go new file mode 100644 index 00000000..3b9a27de --- /dev/null +++ b/vendor/github.com/tidwall/btree/btree.go @@ -0,0 +1,978 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +package btree + +import ( + "fmt" + "io" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + // + // There is a user-defined ctx argument that is equal to the ctx value which + // is set at time of the btree contruction. + Less(than Item, ctx interface{}) bool +} + +const ( + DefaultFreeListSize = 32 +) + +var ( + nilItems = make(items, 16) + nilChildren = make(children, 16) +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList struct { + mu sync.Mutex + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +func (f *FreeList) freeNode(n *node) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + } + f.mu.Unlock() +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int, ctx interface{}) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize), ctx) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList, ctx interface{}) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + cow: ©OnWriteContext{freelist: f}, + ctx: ctx, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item, ctx interface{}) (index int, found bool) { + i, j := 0, len(s) + for i < j { + h := i + (j-i)/2 + if !item.Less(s[h], ctx) { + i = h + 1 + } else { + j = h + } + } + if i > 0 && !s[i-1].Less(item, ctx) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + cow *copyOnWriteContext +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int, ctx interface{}) Item { + i, found := n.items.find(item, ctx) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree, ctx): + // no change, we want first split node + case inTree.Less(item, ctx): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.mutableChild(i).insert(item, maxItems, ctx) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item, ctx interface{}) Item { + i, found := n.items.find(key, ctx) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key, ctx) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove, ctx interface{}) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item, ctx) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ, ctx) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax, ctx) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ, ctx) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove, ctx interface{}) Item { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ, ctx) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator, ctx interface{}) (bool, bool) { + var ok bool + switch dir { + case ascend: + for i := 0; i < len(n.items); i++ { + if start != nil && n.items[i].Less(start, ctx) { + continue + } + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + if !includeStart && !hit && start != nil && !start.Less(n.items[i], ctx) { + hit = true + continue + } + hit = true + if stop != nil && !n.items[i].Less(stop, ctx) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + case descend: + for i := len(n.items) - 1; i >= 0; i-- { + if start != nil && !n.items[i].Less(start, ctx) { + if !includeStart || hit || start.Less(n.items[i], ctx) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + if stop != nil && !stop.Less(n.items[i], ctx) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + } + return hit, true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + ctx interface{} + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct { + freelist *FreeList +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext) newNode() (n *node) { + n = c.freelist.newNode() + n.cow = c + return +} + +func (c *copyOnWriteContext) freeNode(n *node) { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + c.freelist.freeNode(n) + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out := t.root.insert(item, t.maxItems(), t.ctx) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem, t.ctx) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin, t.ctx) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax, t.ctx) +} + +func (t *BTree) deleteItem(item Item, typ toRemove, ctx interface{}) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + t.root = t.root.mutableFor(t.cow) + out := t.root.remove(item, t.minItems(), typ, ctx) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator, t.ctx) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, pivot, false, false, iterator, t.ctx) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, pivot, nil, true, false, iterator, t.ctx) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, nil, false, false, iterator, t.ctx) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator, t.ctx) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, pivot, nil, true, false, iterator, t.ctx) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range (pivot, last], until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, pivot, false, false, iterator, t.ctx) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, nil, false, false, iterator, t.ctx) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key, t.ctx) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Context returns the context of the tree. +func (t *BTree) Context() interface{} { + return t.ctx +} + +// SetContext will replace the context of the tree. +func (t *BTree) SetContext(ctx interface{}) { + t.ctx = ctx +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item, ctx interface{}) bool { + return a < b.(Int) +} + +type stackItem struct { + n *node // current node + i int // index of the next child/item. +} + +// Cursor represents an iterator that can traverse over all items in the tree +// in sorted order. +// +// Changing data while traversing a cursor may result in unexpected items to +// be returned. You must reposition your cursor after mutating data. +type Cursor struct { + t *BTree + stack []stackItem +} + +// Cursor returns a new cursor used to traverse over items in the tree. +func (t *BTree) Cursor() *Cursor { + return &Cursor{t: t} +} + +// First moves the cursor to the first item in the tree and returns that item. +func (c *Cursor) First() Item { + c.stack = c.stack[:0] + n := c.t.root + if n == nil { + return nil + } + c.stack = append(c.stack, stackItem{n: n}) + for len(n.children) > 0 { + n = n.children[0] + c.stack = append(c.stack, stackItem{n: n}) + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// Next moves the cursor to the next item and returns that item. +func (c *Cursor) Next() Item { + if len(c.stack) == 0 { + return nil + } + si := len(c.stack) - 1 + c.stack[si].i++ + n := c.stack[si].n + i := c.stack[si].i + if i == len(n.children)+len(n.items) { + c.stack = c.stack[:len(c.stack)-1] + return c.Next() + } + if len(n.children) == 0 { + if i >= len(n.items) { + c.stack = c.stack[:len(c.stack)-1] + return c.Next() + } + return n.items[i] + } else if i%2 == 1 { + return n.items[i/2] + } + c.stack = append(c.stack, stackItem{n: n.children[i/2], i: -1}) + return c.Next() + +} + +// Last moves the cursor to the last item in the tree and returns that item. +func (c *Cursor) Last() Item { + c.stack = c.stack[:0] + n := c.t.root + if n == nil { + return nil + } + c.stack = append(c.stack, stackItem{n: n, i: len(n.children) + len(n.items) - 1}) + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + c.stack = append(c.stack, stackItem{n: n, i: len(n.children) + len(n.items) - 1}) + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// Prev moves the cursor to the previous item and returns that item. +func (c *Cursor) Prev() Item { + if len(c.stack) == 0 { + return nil + } + si := len(c.stack) - 1 + c.stack[si].i-- + n := c.stack[si].n + i := c.stack[si].i + if i == -1 { + c.stack = c.stack[:len(c.stack)-1] + return c.Prev() + } + if len(n.children) == 0 { + return n.items[i] + } else if i%2 == 1 { + return n.items[i/2] + } + child := n.children[i/2] + c.stack = append(c.stack, stackItem{n: child, + i: len(child.children) + len(child.items)}) + return c.Prev() +} + +// Seek moves the cursor to provided item and returns that item. +// If the item does not exist then the next item is returned. +func (c *Cursor) Seek(pivot Item) Item { + c.stack = c.stack[:0] + n := c.t.root + for n != nil { + i, found := n.items.find(pivot, c.t.ctx) + c.stack = append(c.stack, stackItem{n: n}) + if found { + if len(n.children) == 0 { + c.stack[len(c.stack)-1].i = i + } else { + c.stack[len(c.stack)-1].i = i*2 + 1 + } + return n.items[i] + } + if len(n.children) == 0 { + if i == len(n.items) { + c.stack[len(c.stack)-1].i = i + 1 + return c.Next() + } + c.stack[len(c.stack)-1].i = i + return n.items[i] + } + c.stack[len(c.stack)-1].i = i * 2 + n = n.children[i] + } + return nil +} diff --git a/vendor/github.com/tidwall/buntdb/.travis.yml b/vendor/github.com/tidwall/buntdb/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/tidwall/buntdb/LICENSE b/vendor/github.com/tidwall/buntdb/LICENSE new file mode 100644 index 00000000..58f5819a --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/buntdb/README.md b/vendor/github.com/tidwall/buntdb/README.md new file mode 100644 index 00000000..31798093 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/README.md @@ -0,0 +1,634 @@ +

+BuntDB +
+
Build Status +Code Coverage +Go Report Card +GoDoc +

+ +BuntDB is a low-level, in-memory, key/value store in pure Go. +It persists to disk, is ACID compliant, and uses locking for multiple +readers and a single writer. It supports custom indexes and geospatial +data. It's ideal for projects that need a dependable database and favor +speed over data size. + +Features +======== + +- In-memory database for [fast reads and writes](#performance) +- Embeddable with a [simple API](https://godoc.org/github.com/tidwall/buntdb) +- [Spatial indexing](#spatial-indexes) for up to 20 dimensions; Useful for Geospatial data +- Index fields inside [JSON](#json-indexes) documents +- [Collate i18n Indexes](#collate-i18n-indexes) using the optional [collate package](https://github.com/tidwall/collate) +- Create [custom indexes](#custom-indexes) for any data type +- Support for [multi value indexes](#multi-value-index); Similar to a SQL multi column index +- [Built-in types](#built-in-types) that are easy to get up & running; String, Uint, Int, Float +- Flexible [iteration](#iterating) of data; ascending, descending, and ranges +- [Durable append-only file](#append-only-file) format for persistence +- Option to evict old items with an [expiration](#data-expiration) TTL +- Tight codebase, under 2K loc using the `cloc` command +- ACID semantics with locking [transactions](#transactions) that support rollbacks + + +Getting Started +=============== + +## Installing + +To start using BuntDB, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/buntdb +``` + +This will retrieve the library. + + +## Opening a database + +The primary object in BuntDB is a `DB`. To open or create your +database, use the `buntdb.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/tidwall/buntdb" +) + +func main() { + // Open the data.db file. It will be created if it doesn't exist. + db, err := buntdb.Open("data.db") + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +It's also possible to open a database that does not persist to disk by using `:memory:` as the path of the file. + +```go +buntdb.Open(":memory:") // Open a file that does not persist to disk. +``` + +## Transactions +All reads and writes must be performed from inside a transaction. BuntDB can have one write transaction opened at a time, but can have many concurrent read transactions. Each transaction maintains a stable view of the database. In other words, once a transaction has begun, the data for that transaction cannot be changed by other transactions. + +Transactions run in a function that exposes a `Tx` object, which represents the transaction state. While inside a transaction, all database operations should be performed using this object. You should never access the origin `DB` object while inside a transaction. Doing so may have side-effects, such as blocking your application. + +When a transaction fails, it will roll back, and revert all changes that occurred to the database during that transaction. There's a single return value that you can use to close the transaction. For read/write transactions, returning an error this way will force the transaction to roll back. When a read/write transaction succeeds all changes are persisted to disk. + +### Read-only Transactions +A read-only transaction should be used when you don't need to make changes to the data. The advantage of a read-only transaction is that there can be many running concurrently. + +```go +err := db.View(func(tx *buntdb.Tx) error { + ... + return nil +}) +``` + +### Read/write Transactions +A read/write transaction is used when you need to make changes to your data. There can only be one read/write transaction running at a time. So make sure you close it as soon as you are done with it. + +```go +err := db.Update(func(tx *buntdb.Tx) error { + ... + return nil +}) +``` + +## Setting and getting key/values + +To set a value you must open a read/write transaction: + +```go +err := db.Update(func(tx *buntdb.Tx) error { + _, _, err := tx.Set("mykey", "myvalue", nil) + return err +}) +``` + + +To get the value: + +```go +err := db.View(func(tx *buntdb.Tx) error { + val, err := tx.Get("mykey") + if err != nil{ + return err + } + fmt.Printf("value is %s\n", val) + return nil +}) +``` + +Getting non-existent values will cause an `ErrNotFound` error. + +### Iterating +All keys/value pairs are ordered in the database by the key. To iterate over the keys: + +```go +err := db.View(func(tx *buntdb.Tx) error { + err := tx.Ascend("", func(key, value string) bool { + fmt.Printf("key: %s, value: %s\n", key, value) + }) + return err +}) +``` + +There is also `AscendGreaterOrEqual`, `AscendLessThan`, `AscendRange`, `AscendEqual`, `Descend`, `DescendLessOrEqual`, `DescendGreaterThan`, `DescendRange`, and `DescendEqual`. Please see the [documentation](https://godoc.org/github.com/tidwall/buntdb) for more information on these functions. + + +## Custom Indexes +Initially all data is stored in a single [B-tree](https://en.wikipedia.org/wiki/B-tree) with each item having one key and one value. All of these items are ordered by the key. This is great for quickly getting a value from a key or [iterating](#iterating) over the keys. Feel free to peruse the [B-tree implementation](https://github.com/tidwall/btree). + +You can also create custom indexes that allow for ordering and [iterating](#iterating) over values. A custom index also uses a B-tree, but it's more flexible because it allows for custom ordering. + +For example, let's say you want to create an index for ordering names: + +```go +db.CreateIndex("names", "*", buntdb.IndexString) +``` + +This will create an index named `names` which stores and sorts all values. The second parameter is a pattern that is used to filter on keys. A `*` wildcard argument means that we want to accept all keys. `IndexString` is a built-in function that performs case-insensitive ordering on the values + +Now you can add various names: + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("user:0:name", "tom", nil) + tx.Set("user:1:name", "Randi", nil) + tx.Set("user:2:name", "jane", nil) + tx.Set("user:4:name", "Janet", nil) + tx.Set("user:5:name", "Paula", nil) + tx.Set("user:6:name", "peter", nil) + tx.Set("user:7:name", "Terri", nil) + return nil +}) +``` + +Finally you can iterate over the index: + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Ascend("names", func(key, val string) bool { + fmt.Printf(buf, "%s %s\n", key, val) + return true + }) + return nil +}) +``` +The output should be: +``` +user:2:name jane +user:4:name Janet +user:5:name Paula +user:6:name peter +user:1:name Randi +user:7:name Terri +user:0:name tom +``` + +The pattern parameter can be used to filter on keys like this: + +```go +db.CreateIndex("names", "user:*", buntdb.IndexString) +``` + +Now only items with keys that have the prefix `user:` will be added to the `names` index. + + +### Built-in types +Along with `IndexString`, there is also `IndexInt`, `IndexUint`, and `IndexFloat`. +These are built-in types for indexing. You can choose to use these or create your own. + +So to create an index that is numerically ordered on an age key, we could use: + +```go +db.CreateIndex("ages", "user:*:age", buntdb.IndexInt) +``` + +And then add values: + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("user:0:age", "35", nil) + tx.Set("user:1:age", "49", nil) + tx.Set("user:2:age", "13", nil) + tx.Set("user:4:age", "63", nil) + tx.Set("user:5:age", "8", nil) + tx.Set("user:6:age", "3", nil) + tx.Set("user:7:age", "16", nil) + return nil +}) +``` + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Ascend("ages", func(key, val string) bool { + fmt.Printf(buf, "%s %s\n", key, val) + return true + }) + return nil +}) +``` + +The output should be: +``` +user:6:age 3 +user:5:age 8 +user:2:age 13 +user:7:age 16 +user:0:age 35 +user:1:age 49 +user:4:age 63 +``` + +## Spatial Indexes +BuntDB has support for spatial indexes by storing rectangles in an [R-tree](https://en.wikipedia.org/wiki/R-tree). An R-tree is organized in a similar manner as a [B-tree](https://en.wikipedia.org/wiki/B-tree), and both are balanced trees. But, an R-tree is special because it can operate on data that is in multiple dimensions. This is super handy for Geospatial applications. + +To create a spatial index use the `CreateSpatialIndex` function: + +```go +db.CreateSpatialIndex("fleet", "fleet:*:pos", buntdb.IndexRect) +``` + +Then `IndexRect` is a built-in function that converts rect strings to a format that the R-tree can use. It's easy to use this function out of the box, but you might find it better to create a custom one that renders from a different format, such as [Well-known text](https://en.wikipedia.org/wiki/Well-known_text) or [GeoJSON](http://geojson.org/). + +To add some lon,lat points to the `fleet` index: + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("fleet:0:pos", "[-115.567 33.532]", nil) + tx.Set("fleet:1:pos", "[-116.671 35.735]", nil) + tx.Set("fleet:2:pos", "[-113.902 31.234]", nil) + return nil +}) +``` + +And then you can run the `Intersects` function on the index: + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Intersects("fleet", "[-117 30],[-112 36]", func(key, val string) bool { + ... + return true + }) + return nil +}) +``` + +This will get all three positions. + +### k-Nearest Neighbors + +Use the `Nearby` function to get all the positions in order of nearest to farthest : + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Nearby("fleet", "[-113 33]", func(key, val string, dist float64) bool { + ... + return true + }) + return nil +}) +``` + +### Spatial bracket syntax + +The bracket syntax `[-117 30],[-112 36]` is unique to BuntDB, and it's how the built-in rectangles are processed. But, you are not limited to this syntax. Whatever Rect function you choose to use during `CreateSpatialIndex` will be used to process the parameter, in this case it's `IndexRect`. + +- **2D rectangle:** `[10 15],[20 25]` +*Min XY: "10x15", Max XY: "20x25"* + +- **3D rectangle:** `[10 15 12],[20 25 18]` +*Min XYZ: "10x15x12", Max XYZ: "20x25x18"* + +- **2D point:** `[10 15]` +*XY: "10x15"* + +- **LonLat point:** `[-112.2693 33.5123]` +*LatLon: "33.5123 -112.2693"* + +- **LonLat bounding box:** `[-112.26 33.51],[-112.18 33.67]` +*Min LatLon: "33.51 -112.26", Max LatLon: "33.67 -112.18"* + +**Notice:** The longitude is the Y axis and is on the left, and latitude is the X axis and is on the right. + +You can also represent `Infinity` by using `-inf` and `+inf`. +For example, you might have the following points (`[X Y M]` where XY is a point and M is a timestamp): +``` +[3 9 1] +[3 8 2] +[4 8 3] +[4 7 4] +[5 7 5] +[5 6 6] +``` + +You can then do a search for all points with `M` between 2-4 by calling `Intersects`. + +```go +tx.Intersects("points", "[-inf -inf 2],[+inf +inf 4]", func(key, val string) bool { + println(val) + return true +}) +``` + +Which will return: + +``` +[3 8 2] +[4 8 3] +[4 7 4] +``` + +## JSON Indexes +Indexes can be created on individual fields inside JSON documents. BuntDB uses [GJSON](https://github.com/tidwall/gjson) under the hood. + +For example: + +```go +package main + +import ( + "fmt" + + "github.com/tidwall/buntdb" +) + +func main() { + db, _ := buntdb.Open(":memory:") + db.CreateIndex("last_name", "*", buntdb.IndexJSON("name.last")) + db.CreateIndex("age", "*", buntdb.IndexJSON("age")) + db.Update(func(tx *buntdb.Tx) error { + tx.Set("1", `{"name":{"first":"Tom","last":"Johnson"},"age":38}`, nil) + tx.Set("2", `{"name":{"first":"Janet","last":"Prichard"},"age":47}`, nil) + tx.Set("3", `{"name":{"first":"Carol","last":"Anderson"},"age":52}`, nil) + tx.Set("4", `{"name":{"first":"Alan","last":"Cooper"},"age":28}`, nil) + return nil + }) + db.View(func(tx *buntdb.Tx) error { + fmt.Println("Order by last name") + tx.Ascend("last_name", func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + fmt.Println("Order by age") + tx.Ascend("age", func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + fmt.Println("Order by age range 30-50") + tx.AscendRange("age", `{"age":30}`, `{"age":50}`, func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + return nil + }) +} +``` + +Results: + +``` +Order by last name +3: {"name":{"first":"Carol","last":"Anderson"},"age":52} +4: {"name":{"first":"Alan","last":"Cooper"},"age":28} +1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +2: {"name":{"first":"Janet","last":"Prichard"},"age":47} + +Order by age +4: {"name":{"first":"Alan","last":"Cooper"},"age":28} +1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +2: {"name":{"first":"Janet","last":"Prichard"},"age":47} +3: {"name":{"first":"Carol","last":"Anderson"},"age":52} + +Order by age range 30-50 +1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +2: {"name":{"first":"Janet","last":"Prichard"},"age":47} +``` + +## Multi Value Index +With BuntDB it's possible to join multiple values on a single index. +This is similar to a [multi column index](http://dev.mysql.com/doc/refman/5.7/en/multiple-column-indexes.html) in a traditional SQL database. + +In this example we are creating a multi value index on "name.last" and "age": + +```go +db, _ := buntdb.Open(":memory:") +db.CreateIndex("last_name_age", "*", buntdb.IndexJSON("name.last"), buntdb.IndexJSON("age")) +db.Update(func(tx *buntdb.Tx) error { + tx.Set("1", `{"name":{"first":"Tom","last":"Johnson"},"age":38}`, nil) + tx.Set("2", `{"name":{"first":"Janet","last":"Prichard"},"age":47}`, nil) + tx.Set("3", `{"name":{"first":"Carol","last":"Anderson"},"age":52}`, nil) + tx.Set("4", `{"name":{"first":"Alan","last":"Cooper"},"age":28}`, nil) + tx.Set("5", `{"name":{"first":"Sam","last":"Anderson"},"age":51}`, nil) + tx.Set("6", `{"name":{"first":"Melinda","last":"Prichard"},"age":44}`, nil) + return nil +}) +db.View(func(tx *buntdb.Tx) error { + tx.Ascend("last_name_age", func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + return nil +}) + +// Output: +// 5: {"name":{"first":"Sam","last":"Anderson"},"age":51} +// 3: {"name":{"first":"Carol","last":"Anderson"},"age":52} +// 4: {"name":{"first":"Alan","last":"Cooper"},"age":28} +// 1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +// 6: {"name":{"first":"Melinda","last":"Prichard"},"age":44} +// 2: {"name":{"first":"Janet","last":"Prichard"},"age":47} +``` + +## Descending Ordered Index +Any index can be put in descending order by wrapping it's less function with `buntdb.Desc`. + +```go +db.CreateIndex("last_name_age", "*", +buntdb.IndexJSON("name.last"), +buntdb.Desc(buntdb.IndexJSON("age"))) +``` + +This will create a multi value index where the last name is ascending and the age is descending. + +## Collate i18n Indexes + +Using the external [collate package](https://github.com/tidwall/collate) it's possible to create +indexes that are sorted by the specified language. This is similar to the [SQL COLLATE keyword](https://msdn.microsoft.com/en-us/library/ms174596.aspx) found in traditional databases. + +To install: + +``` +go get -u github.com/tidwall/collate +``` + +For example: + +```go +import "github.com/tidwall/collate" + +// To sort case-insensitive in French. +db.CreateIndex("name", "*", collate.IndexString("FRENCH_CI")) + +// To specify that numbers should sort numerically ("2" < "12") +// and use a comma to represent a decimal point. +db.CreateIndex("amount", "*", collate.IndexString("FRENCH_NUM")) +``` + +There's also support for Collation on JSON indexes: + +```go +db.CreateIndex("last_name", "*", collate.IndexJSON("CHINESE_CI", "name.last")) +``` + +Check out the [collate project](https://github.com/tidwall/collate) for more information. + +## Data Expiration +Items can be automatically evicted by using the `SetOptions` object in the `Set` function to set a `TTL`. + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("mykey", "myval", &buntdb.SetOptions{Expires:true, TTL:time.Second}) + return nil +}) +``` + +Now `mykey` will automatically be deleted after one second. You can remove the TTL by setting the value again with the same key/value, but with the options parameter set to nil. + +## Delete while iterating +BuntDB does not currently support deleting a key while in the process of iterating. +As a workaround you'll need to delete keys following the completion of the iterator. + +```go +var delkeys []string +tx.AscendKeys("object:*", func(k, v string) bool { + if someCondition(k) == true { + delkeys = append(delkeys, k) + } + return true // continue +}) +for _, k := range delkeys { + if _, err = tx.Delete(k); err != nil { + return err + } +} +``` + +## Append-only File + +BuntDB uses an AOF (append-only file) which is a log of all database changes that occur from operations like `Set()` and `Delete()`. + +The format of this file looks like: +``` +set key:1 value1 +set key:2 value2 +set key:1 value3 +del key:2 +... +``` + +When the database opens again, it will read back the aof file and process each command in exact order. +This read process happens one time when the database opens. +From there on the file is only appended. + +As you may guess this log file can grow large over time. +There's a background routine that automatically shrinks the log file when it gets too large. +There is also a `Shrink()` function which will rewrite the aof file so that it contains only the items in the database. +The shrink operation does not lock up the database so read and write transactions can continue while shrinking is in process. + +### Durability and fsync + +By default BuntDB executes an `fsync` once every second on the [aof file](#append-only-file). Which simply means that there's a chance that up to one second of data might be lost. If you need higher durability then there's an optional database config setting `Config.SyncPolicy` which can be set to `Always`. + +The `Config.SyncPolicy` has the following options: + +- `Never` - fsync is managed by the operating system, less safe +- `EverySecond` - fsync every second, fast and safer, this is the default +- `Always` - fsync after every write, very durable, slower + +## Config + +Here are some configuration options that can be use to change various behaviors of the database. + +- **SyncPolicy** adjusts how often the data is synced to disk. This value can be Never, EverySecond, or Always. Default is EverySecond. +- **AutoShrinkPercentage** is used by the background process to trigger a shrink of the aof file when the size of the file is larger than the percentage of the result of the previous shrunk file. For example, if this value is 100, and the last shrink process resulted in a 100mb file, then the new aof file must be 200mb before a shrink is triggered. Default is 100. +- **AutoShrinkMinSize** defines the minimum size of the aof file before an automatic shrink can occur. Default is 32MB. +- **AutoShrinkDisabled** turns off automatic background shrinking. Default is false. + +To update the configuration you should call `ReadConfig` followed by `SetConfig`. For example: + +```go + +var config buntdb.Config +if err := db.ReadConfig(&config); err != nil{ + log.Fatal(err) +} +if err := db.WriteConfig(config); err != nil{ + log.Fatal(err) +} +``` + +## Performance + +How fast is BuntDB? + +Here are some example [benchmarks](https://github.com/tidwall/raft-buntdb#raftstore-performance-comparison) when using BuntDB in a Raft Store implementation. + +You can also run the standard Go benchmark tool from the project root directory: + +``` +go test --bench=. +``` + +### BuntDB-Benchmark + +There's a [custom utility](https://github.com/tidwall/buntdb-benchmark) that was created specifically for benchmarking BuntDB. + +*These are the results from running the benchmarks on a MacBook Pro 15" 2.8 GHz Intel Core i7:* + +``` +$ buntdb-benchmark -q +GET: 4609604.74 operations per second +SET: 248500.33 operations per second +ASCEND_100: 2268998.79 operations per second +ASCEND_200: 1178388.14 operations per second +ASCEND_400: 679134.20 operations per second +ASCEND_800: 348445.55 operations per second +DESCEND_100: 2313821.69 operations per second +DESCEND_200: 1292738.38 operations per second +DESCEND_400: 675258.76 operations per second +DESCEND_800: 337481.67 operations per second +SPATIAL_SET: 134824.60 operations per second +SPATIAL_INTERSECTS_100: 939491.47 operations per second +SPATIAL_INTERSECTS_200: 561590.40 operations per second +SPATIAL_INTERSECTS_400: 306951.15 operations per second +SPATIAL_INTERSECTS_800: 159673.91 operations per second +``` + +To install this utility: + +``` +go get github.com/tidwall/buntdb-benchmark +``` + + + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +BuntDB source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/buntdb/buntdb.go b/vendor/github.com/tidwall/buntdb/buntdb.go new file mode 100644 index 00000000..9b8d28b1 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/buntdb.go @@ -0,0 +1,2183 @@ +// Package buntdb implements a low-level in-memory key/value store in pure Go. +// It persists to disk, is ACID compliant, and uses locking for multiple +// readers and a single writer. Bunt is ideal for projects that need +// a dependable database, and favor speed over data size. +package buntdb + +import ( + "bufio" + "errors" + "io" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/tidwall/btree" + "github.com/tidwall/gjson" + "github.com/tidwall/grect" + "github.com/tidwall/match" + "github.com/tidwall/rtree" +) + +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrNotFound is returned when an item or index is not in the database. + ErrNotFound = errors.New("not found") + + // ErrInvalid is returned when the database file is an invalid format. + ErrInvalid = errors.New("invalid database") + + // ErrDatabaseClosed is returned when the database is closed. + ErrDatabaseClosed = errors.New("database closed") + + // ErrIndexExists is returned when an index already exists in the database. + ErrIndexExists = errors.New("index exists") + + // ErrInvalidOperation is returned when an operation cannot be completed. + ErrInvalidOperation = errors.New("invalid operation") + + // ErrInvalidSyncPolicy is returned for an invalid SyncPolicy value. + ErrInvalidSyncPolicy = errors.New("invalid sync policy") + + // ErrShrinkInProcess is returned when a shrink operation is in-process. + ErrShrinkInProcess = errors.New("shrink is in-process") + + // ErrPersistenceActive is returned when post-loading data from an database + // not opened with Open(":memory:"). + ErrPersistenceActive = errors.New("persistence active") + + // ErrTxIterating is returned when Set or Delete are called while iterating. + ErrTxIterating = errors.New("tx is iterating") +) + +// DB represents a collection of key-value pairs that persist on disk. +// Transactions are used for all forms of data access to the DB. +type DB struct { + mu sync.RWMutex // the gatekeeper for all fields + file *os.File // the underlying file + buf []byte // a buffer to write to + keys *btree.BTree // a tree of all item ordered by key + exps *btree.BTree // a tree of items ordered by expiration + idxs map[string]*index // the index trees. + exmgr bool // indicates that expires manager is running. + flushes int // a count of the number of disk flushes + closed bool // set when the database has been closed + config Config // the database configuration + persist bool // do we write to disk + shrinking bool // when an aof shrink is in-process. + lastaofsz int // the size of the last shrink aof size +} + +// SyncPolicy represents how often data is synced to disk. +type SyncPolicy int + +const ( + // Never is used to disable syncing data to disk. + // The faster and less safe method. + Never SyncPolicy = 0 + // EverySecond is used to sync data to disk every second. + // It's pretty fast and you can lose 1 second of data if there + // is a disaster. + // This is the recommended setting. + EverySecond = 1 + // Always is used to sync data after every write to disk. + // Slow. Very safe. + Always = 2 +) + +// Config represents database configuration options. These +// options are used to change various behaviors of the database. +type Config struct { + // SyncPolicy adjusts how often the data is synced to disk. + // This value can be Never, EverySecond, or Always. + // The default is EverySecond. + SyncPolicy SyncPolicy + + // AutoShrinkPercentage is used by the background process to trigger + // a shrink of the aof file when the size of the file is larger than the + // percentage of the result of the previous shrunk file. + // For example, if this value is 100, and the last shrink process + // resulted in a 100mb file, then the new aof file must be 200mb before + // a shrink is triggered. + AutoShrinkPercentage int + + // AutoShrinkMinSize defines the minimum size of the aof file before + // an automatic shrink can occur. + AutoShrinkMinSize int + + // AutoShrinkDisabled turns off automatic background shrinking + AutoShrinkDisabled bool + + // OnExpired is used to custom handle the deletion option when a key + // has been expired. + OnExpired func(keys []string) + + // OnExpiredSync will be called inside the same transaction that is performing + // the deletion of expired items. If OnExpired is present then this callback + // will not be called. If this callback is present, then the deletion of the + // timeed-out item is the explicit responsibility of this callback. + OnExpiredSync func(key, value string, tx *Tx) error +} + +// exctx is a simple b-tree context for ordering by expiration. +type exctx struct { + db *DB +} + +// Default number of btree degrees +const btreeDegrees = 64 + +// Open opens a database at the provided path. +// If the file does not exist then it will be created automatically. +func Open(path string) (*DB, error) { + db := &DB{} + // initialize trees and indexes + db.keys = btree.New(btreeDegrees, nil) + db.exps = btree.New(btreeDegrees, &exctx{db}) + db.idxs = make(map[string]*index) + // initialize default configuration + db.config = Config{ + SyncPolicy: EverySecond, + AutoShrinkPercentage: 100, + AutoShrinkMinSize: 32 * 1024 * 1024, + } + // turn off persistence for pure in-memory + db.persist = path != ":memory:" + if db.persist { + var err error + // hardcoding 0666 as the default mode. + db.file, err = os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return nil, err + } + // load the database from disk + if err := db.load(); err != nil { + // close on error, ignore close error + _ = db.file.Close() + return nil, err + } + } + // start the background manager. + go db.backgroundManager() + return db, nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.mu.Lock() + defer db.mu.Unlock() + if db.closed { + return ErrDatabaseClosed + } + db.closed = true + if db.persist { + db.file.Sync() // do a sync but ignore the error + if err := db.file.Close(); err != nil { + return err + } + } + // Let's release all references to nil. This will help both with debugging + // late usage panics and it provides a hint to the garbage collector + db.keys, db.exps, db.idxs, db.file = nil, nil, nil, nil + return nil +} + +// Save writes a snapshot of the database to a writer. This operation blocks all +// writes, but not reads. This can be used for snapshots and backups for pure +// in-memory databases using the ":memory:". Database that persist to disk +// can be snapshotted by simply copying the database file. +func (db *DB) Save(wr io.Writer) error { + var err error + db.mu.RLock() + defer db.mu.RUnlock() + // use a buffered writer and flush every 4MB + var buf []byte + // iterated through every item in the database and write to the buffer + db.keys.Ascend(func(item btree.Item) bool { + dbi := item.(*dbItem) + buf = dbi.writeSetTo(buf) + if len(buf) > 1024*1024*4 { + // flush when buffer is over 4MB + _, err = wr.Write(buf) + if err != nil { + return false + } + buf = buf[:0] + } + return true + }) + if err != nil { + return err + } + // one final flush + if len(buf) > 0 { + _, err = wr.Write(buf) + if err != nil { + return err + } + } + return nil +} + +// Load loads commands from reader. This operation blocks all reads and writes. +// Note that this can only work for fully in-memory databases opened with +// Open(":memory:"). +func (db *DB) Load(rd io.Reader) error { + db.mu.Lock() + defer db.mu.Unlock() + if db.persist { + // cannot load into databases that persist to disk + return ErrPersistenceActive + } + return db.readLoad(rd, time.Now()) +} + +// index represents a b-tree or r-tree index and also acts as the +// b-tree/r-tree context for itself. +type index struct { + btr *btree.BTree // contains the items + rtr *rtree.RTree // contains the items + name string // name of the index + pattern string // a required key pattern + less func(a, b string) bool // less comparison function + rect func(item string) (min, max []float64) // rect from string function + db *DB // the origin database + opts IndexOptions // index options +} + +// match matches the pattern to the key +func (idx *index) match(key string) bool { + if idx.pattern == "*" { + return true + } + if idx.opts.CaseInsensitiveKeyMatching { + for i := 0; i < len(key); i++ { + if key[i] >= 'A' && key[i] <= 'Z' { + key = strings.ToLower(key) + break + } + } + } + return match.Match(key, idx.pattern) +} + +// clearCopy creates a copy of the index, but with an empty dataset. +func (idx *index) clearCopy() *index { + // copy the index meta information + nidx := &index{ + name: idx.name, + pattern: idx.pattern, + db: idx.db, + less: idx.less, + rect: idx.rect, + opts: idx.opts, + } + // initialize with empty trees + if nidx.less != nil { + nidx.btr = btree.New(btreeDegrees, nidx) + } + if nidx.rect != nil { + nidx.rtr = rtree.New(nidx) + } + return nidx +} + +// rebuild rebuilds the index +func (idx *index) rebuild() { + // initialize trees + if idx.less != nil { + idx.btr = btree.New(btreeDegrees, idx) + } + if idx.rect != nil { + idx.rtr = rtree.New(idx) + } + // iterate through all keys and fill the index + idx.db.keys.Ascend(func(item btree.Item) bool { + dbi := item.(*dbItem) + if !idx.match(dbi.key) { + // does not match the pattern, conintue + return true + } + if idx.less != nil { + idx.btr.ReplaceOrInsert(dbi) + } + if idx.rect != nil { + idx.rtr.Insert(dbi) + } + return true + }) +} + +// CreateIndex builds a new index and populates it with items. +// The items are ordered in an b-tree and can be retrieved using the +// Ascend* and Descend* methods. +// An error will occur if an index with the same name already exists. +// +// When a pattern is provided, the index will be populated with +// keys that match the specified pattern. This is a very simple pattern +// match where '*' matches on any number characters and '?' matches on +// any one character. +// The less function compares if string 'a' is less than string 'b'. +// It allows for indexes to create custom ordering. It's possible +// that the strings may be textual or binary. It's up to the provided +// less function to handle the content format and comparison. +// There are some default less function that can be used such as +// IndexString, IndexBinary, etc. +func (db *DB) CreateIndex(name, pattern string, + less ...func(a, b string) bool) error { + return db.Update(func(tx *Tx) error { + return tx.CreateIndex(name, pattern, less...) + }) +} + +// ReplaceIndex builds a new index and populates it with items. +// The items are ordered in an b-tree and can be retrieved using the +// Ascend* and Descend* methods. +// If a previous index with the same name exists, that index will be deleted. +func (db *DB) ReplaceIndex(name, pattern string, + less ...func(a, b string) bool) error { + return db.Update(func(tx *Tx) error { + err := tx.CreateIndex(name, pattern, less...) + if err != nil { + if err == ErrIndexExists { + err := tx.DropIndex(name) + if err != nil { + return err + } + return tx.CreateIndex(name, pattern, less...) + } + return err + } + return nil + }) +} + +// CreateSpatialIndex builds a new index and populates it with items. +// The items are organized in an r-tree and can be retrieved using the +// Intersects method. +// An error will occur if an index with the same name already exists. +// +// The rect function converts a string to a rectangle. The rectangle is +// represented by two arrays, min and max. Both arrays may have a length +// between 1 and 20, and both arrays must match in length. A length of 1 is a +// one dimensional rectangle, and a length of 4 is a four dimension rectangle. +// There is support for up to 20 dimensions. +// The values of min must be less than the values of max at the same dimension. +// Thus min[0] must be less-than-or-equal-to max[0]. +// The IndexRect is a default function that can be used for the rect +// parameter. +func (db *DB) CreateSpatialIndex(name, pattern string, + rect func(item string) (min, max []float64)) error { + return db.Update(func(tx *Tx) error { + return tx.CreateSpatialIndex(name, pattern, rect) + }) +} + +// ReplaceSpatialIndex builds a new index and populates it with items. +// The items are organized in an r-tree and can be retrieved using the +// Intersects method. +// If a previous index with the same name exists, that index will be deleted. +func (db *DB) ReplaceSpatialIndex(name, pattern string, + rect func(item string) (min, max []float64)) error { + return db.Update(func(tx *Tx) error { + err := tx.CreateSpatialIndex(name, pattern, rect) + if err != nil { + if err == ErrIndexExists { + err := tx.DropIndex(name) + if err != nil { + return err + } + return tx.CreateSpatialIndex(name, pattern, rect) + } + return err + } + return nil + }) +} + +// DropIndex removes an index. +func (db *DB) DropIndex(name string) error { + return db.Update(func(tx *Tx) error { + return tx.DropIndex(name) + }) +} + +// Indexes returns a list of index names. +func (db *DB) Indexes() ([]string, error) { + var names []string + var err = db.View(func(tx *Tx) error { + var err error + names, err = tx.Indexes() + return err + }) + return names, err +} + +// ReadConfig returns the database configuration. +func (db *DB) ReadConfig(config *Config) error { + db.mu.RLock() + defer db.mu.RUnlock() + if db.closed { + return ErrDatabaseClosed + } + *config = db.config + return nil +} + +// SetConfig updates the database configuration. +func (db *DB) SetConfig(config Config) error { + db.mu.Lock() + defer db.mu.Unlock() + if db.closed { + return ErrDatabaseClosed + } + switch config.SyncPolicy { + default: + return ErrInvalidSyncPolicy + case Never, EverySecond, Always: + } + db.config = config + return nil +} + +// insertIntoDatabase performs inserts an item in to the database and updates +// all indexes. If a previous item with the same key already exists, that item +// will be replaced with the new one, and return the previous item. +func (db *DB) insertIntoDatabase(item *dbItem) *dbItem { + var pdbi *dbItem + prev := db.keys.ReplaceOrInsert(item) + if prev != nil { + // A previous item was removed from the keys tree. Let's + // fully delete this item from all indexes. + pdbi = prev.(*dbItem) + if pdbi.opts != nil && pdbi.opts.ex { + // Remove it from the exipres tree. + db.exps.Delete(pdbi) + } + for _, idx := range db.idxs { + if idx.btr != nil { + // Remove it from the btree index. + idx.btr.Delete(pdbi) + } + if idx.rtr != nil { + // Remove it from the rtree index. + idx.rtr.Remove(pdbi) + } + } + } + if item.opts != nil && item.opts.ex { + // The new item has eviction options. Add it to the + // expires tree + db.exps.ReplaceOrInsert(item) + } + for _, idx := range db.idxs { + if !idx.match(item.key) { + continue + } + if idx.btr != nil { + // Add new item to btree index. + idx.btr.ReplaceOrInsert(item) + } + if idx.rtr != nil { + // Add new item to rtree index. + idx.rtr.Insert(item) + } + } + // we must return the previous item to the caller. + return pdbi +} + +// deleteFromDatabase removes and item from the database and indexes. The input +// item must only have the key field specified thus "&dbItem{key: key}" is all +// that is needed to fully remove the item with the matching key. If an item +// with the matching key was found in the database, it will be removed and +// returned to the caller. A nil return value means that the item was not +// found in the database +func (db *DB) deleteFromDatabase(item *dbItem) *dbItem { + var pdbi *dbItem + prev := db.keys.Delete(item) + if prev != nil { + pdbi = prev.(*dbItem) + if pdbi.opts != nil && pdbi.opts.ex { + // Remove it from the exipres tree. + db.exps.Delete(pdbi) + } + for _, idx := range db.idxs { + if idx.btr != nil { + // Remove it from the btree index. + idx.btr.Delete(pdbi) + } + if idx.rtr != nil { + // Remove it from the rtree index. + idx.rtr.Remove(pdbi) + } + } + } + return pdbi +} + +// backgroundManager runs continuously in the background and performs various +// operations such as removing expired items and syncing to disk. +func (db *DB) backgroundManager() { + flushes := 0 + t := time.NewTicker(time.Second) + defer t.Stop() + for range t.C { + var shrink bool + // Open a standard view. This will take a full lock of the + // database thus allowing for access to anything we need. + var onExpired func([]string) + var expired []*dbItem + var onExpiredSync func(key, value string, tx *Tx) error + err := db.Update(func(tx *Tx) error { + onExpired = db.config.OnExpired + if onExpired == nil { + onExpiredSync = db.config.OnExpiredSync + } + if db.persist && !db.config.AutoShrinkDisabled { + pos, err := db.file.Seek(0, 1) + if err != nil { + return err + } + aofsz := int(pos) + if aofsz > db.config.AutoShrinkMinSize { + prc := float64(db.config.AutoShrinkPercentage) / 100.0 + shrink = aofsz > db.lastaofsz+int(float64(db.lastaofsz)*prc) + } + } + // produce a list of expired items that need removing + db.exps.AscendLessThan(&dbItem{ + opts: &dbItemOpts{ex: true, exat: time.Now()}, + }, func(item btree.Item) bool { + expired = append(expired, item.(*dbItem)) + return true + }) + if onExpired == nil && onExpiredSync == nil { + for _, itm := range expired { + if _, err := tx.Delete(itm.key); err != nil { + // it's ok to get a "not found" because the + // 'Delete' method reports "not found" for + // expired items. + if err != ErrNotFound { + return err + } + } + } + } else if onExpiredSync != nil { + for _, itm := range expired { + if err := onExpiredSync(itm.key, itm.val, tx); err != nil { + return err + } + } + } + return nil + }) + if err == ErrDatabaseClosed { + break + } + + // send expired event, if needed + if onExpired != nil && len(expired) > 0 { + keys := make([]string, 0, 32) + for _, itm := range expired { + keys = append(keys, itm.key) + } + onExpired(keys) + } + + // execute a disk sync, if needed + func() { + db.mu.Lock() + defer db.mu.Unlock() + if db.persist && db.config.SyncPolicy == EverySecond && + flushes != db.flushes { + _ = db.file.Sync() + flushes = db.flushes + } + }() + if shrink { + if err = db.Shrink(); err != nil { + if err == ErrDatabaseClosed { + break + } + } + } + } +} + +// Shrink will make the database file smaller by removing redundant +// log entries. This operation does not block the database. +func (db *DB) Shrink() error { + db.mu.Lock() + if db.closed { + db.mu.Unlock() + return ErrDatabaseClosed + } + if !db.persist { + // The database was opened with ":memory:" as the path. + // There is no persistence, and no need to do anything here. + db.mu.Unlock() + return nil + } + if db.shrinking { + // The database is already in the process of shrinking. + db.mu.Unlock() + return ErrShrinkInProcess + } + db.shrinking = true + defer func() { + db.mu.Lock() + db.shrinking = false + db.mu.Unlock() + }() + fname := db.file.Name() + tmpname := fname + ".tmp" + // the endpos is used to return to the end of the file when we are + // finished writing all of the current items. + endpos, err := db.file.Seek(0, 2) + if err != nil { + return err + } + db.mu.Unlock() + time.Sleep(time.Second / 4) // wait just a bit before starting + f, err := os.Create(tmpname) + if err != nil { + return err + } + defer func() { + _ = f.Close() + _ = os.RemoveAll(tmpname) + }() + + // we are going to read items in as chunks as to not hold up the database + // for too long. + var buf []byte + pivot := "" + done := false + for !done { + err := func() error { + db.mu.RLock() + defer db.mu.RUnlock() + if db.closed { + return ErrDatabaseClosed + } + done = true + var n int + db.keys.AscendGreaterOrEqual(&dbItem{key: pivot}, + func(item btree.Item) bool { + dbi := item.(*dbItem) + // 1000 items or 64MB buffer + if n > 1000 || len(buf) > 64*1024*1024 { + pivot = dbi.key + done = false + return false + } + buf = dbi.writeSetTo(buf) + n++ + return true + }, + ) + if len(buf) > 0 { + if _, err := f.Write(buf); err != nil { + return err + } + buf = buf[:0] + } + return nil + }() + if err != nil { + return err + } + } + // We reached this far so all of the items have been written to a new tmp + // There's some more work to do by appending the new line from the aof + // to the tmp file and finally swap the files out. + return func() error { + // We're wrapping this in a function to get the benefit of a defered + // lock/unlock. + db.mu.Lock() + defer db.mu.Unlock() + if db.closed { + return ErrDatabaseClosed + } + // We are going to open a new version of the aof file so that we do + // not change the seek position of the previous. This may cause a + // problem in the future if we choose to use syscall file locking. + aof, err := os.Open(fname) + if err != nil { + return err + } + defer func() { _ = aof.Close() }() + if _, err := aof.Seek(endpos, 0); err != nil { + return err + } + // Just copy all of the new commands that have occurred since we + // started the shrink process. + if _, err := io.Copy(f, aof); err != nil { + return err + } + // Close all files + if err := aof.Close(); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := db.file.Close(); err != nil { + return err + } + // Any failures below here is really bad. So just panic. + if err := os.Rename(tmpname, fname); err != nil { + panic(err) + } + db.file, err = os.OpenFile(fname, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + panic(err) + } + pos, err := db.file.Seek(0, 2) + if err != nil { + return err + } + db.lastaofsz = int(pos) + return nil + }() +} + +var errValidEOF = errors.New("valid eof") + +// readLoad reads from the reader and loads commands into the database. +// modTime is the modified time of the reader, should be no greater than +// the current time.Now(). +func (db *DB) readLoad(rd io.Reader, modTime time.Time) error { + data := make([]byte, 4096) + parts := make([]string, 0, 8) + r := bufio.NewReader(rd) + for { + // read a single command. + // first we should read the number of parts that the of the command + line, err := r.ReadBytes('\n') + if err != nil { + if len(line) > 0 { + // got an eof but also data. this should be an unexpected eof. + return io.ErrUnexpectedEOF + } + if err == io.EOF { + break + } + return err + } + if line[0] != '*' { + return ErrInvalid + } + // convert the string number to and int + var n int + if len(line) == 4 && line[len(line)-2] == '\r' { + if line[1] < '0' || line[1] > '9' { + return ErrInvalid + } + n = int(line[1] - '0') + } else { + if len(line) < 5 || line[len(line)-2] != '\r' { + return ErrInvalid + } + for i := 1; i < len(line)-2; i++ { + if line[i] < '0' || line[i] > '9' { + return ErrInvalid + } + n = n*10 + int(line[i]-'0') + } + } + // read each part of the command. + parts = parts[:0] + for i := 0; i < n; i++ { + // read the number of bytes of the part. + line, err := r.ReadBytes('\n') + if err != nil { + return err + } + if line[0] != '$' { + return ErrInvalid + } + // convert the string number to and int + var n int + if len(line) == 4 && line[len(line)-2] == '\r' { + if line[1] < '0' || line[1] > '9' { + return ErrInvalid + } + n = int(line[1] - '0') + } else { + if len(line) < 5 || line[len(line)-2] != '\r' { + return ErrInvalid + } + for i := 1; i < len(line)-2; i++ { + if line[i] < '0' || line[i] > '9' { + return ErrInvalid + } + n = n*10 + int(line[i]-'0') + } + } + // resize the read buffer + if len(data) < n+2 { + dataln := len(data) + for dataln < n+2 { + dataln *= 2 + } + data = make([]byte, dataln) + } + if _, err = io.ReadFull(r, data[:n+2]); err != nil { + return err + } + if data[n] != '\r' || data[n+1] != '\n' { + return ErrInvalid + } + // copy string + parts = append(parts, string(data[:n])) + } + // finished reading the command + + if len(parts) == 0 { + continue + } + if (parts[0][0] == 's' || parts[0][1] == 'S') && + (parts[0][1] == 'e' || parts[0][1] == 'E') && + (parts[0][2] == 't' || parts[0][2] == 'T') { + // SET + if len(parts) < 3 || len(parts) == 4 || len(parts) > 5 { + return ErrInvalid + } + if len(parts) == 5 { + if strings.ToLower(parts[3]) != "ex" { + return ErrInvalid + } + ex, err := strconv.ParseInt(parts[4], 10, 64) + if err != nil { + return err + } + now := time.Now() + dur := (time.Duration(ex) * time.Second) - now.Sub(modTime) + if dur > 0 { + db.insertIntoDatabase(&dbItem{ + key: parts[1], + val: parts[2], + opts: &dbItemOpts{ + ex: true, + exat: now.Add(dur), + }, + }) + } + } else { + db.insertIntoDatabase(&dbItem{key: parts[1], val: parts[2]}) + } + } else if (parts[0][0] == 'd' || parts[0][1] == 'D') && + (parts[0][1] == 'e' || parts[0][1] == 'E') && + (parts[0][2] == 'l' || parts[0][2] == 'L') { + // DEL + if len(parts) != 2 { + return ErrInvalid + } + db.deleteFromDatabase(&dbItem{key: parts[1]}) + } else if (parts[0][0] == 'f' || parts[0][1] == 'F') && + strings.ToLower(parts[0]) == "flushdb" { + db.keys = btree.New(btreeDegrees, nil) + db.exps = btree.New(btreeDegrees, &exctx{db}) + db.idxs = make(map[string]*index) + } else { + return ErrInvalid + } + } + return nil +} + +// load reads entries from the append only database file and fills the database. +// The file format uses the Redis append only file format, which is and a series +// of RESP commands. For more information on RESP please read +// http://redis.io/topics/protocol. The only supported RESP commands are DEL and +// SET. +func (db *DB) load() error { + fi, err := db.file.Stat() + if err != nil { + return err + } + if err := db.readLoad(db.file, fi.ModTime()); err != nil { + return err + } + pos, err := db.file.Seek(0, 2) + if err != nil { + return err + } + db.lastaofsz = int(pos) + return nil +} + +// managed calls a block of code that is fully contained in a transaction. +// This method is intended to be wrapped by Update and View +func (db *DB) managed(writable bool, fn func(tx *Tx) error) (err error) { + var tx *Tx + tx, err = db.Begin(writable) + if err != nil { + return + } + defer func() { + if err != nil { + // The caller returned an error. We must rollback. + _ = tx.Rollback() + return + } + if writable { + // Everything went well. Lets Commit() + err = tx.Commit() + } else { + // read-only transaction can only roll back. + err = tx.Rollback() + } + }() + tx.funcd = true + defer func() { + tx.funcd = false + }() + err = fn(tx) + return +} + +// View executes a function within a managed read-only transaction. +// When a non-nil error is returned from the function that error will be return +// to the caller of View(). +// +// Executing a manual commit or rollback from inside the function will result +// in a panic. +func (db *DB) View(fn func(tx *Tx) error) error { + return db.managed(false, fn) +} + +// Update executes a function within a managed read/write transaction. +// The transaction has been committed when no error is returned. +// In the event that an error is returned, the transaction will be rolled back. +// When a non-nil error is returned from the function, the transaction will be +// rolled back and the that error will be return to the caller of Update(). +// +// Executing a manual commit or rollback from inside the function will result +// in a panic. +func (db *DB) Update(fn func(tx *Tx) error) error { + return db.managed(true, fn) +} + +// get return an item or nil if not found. +func (db *DB) get(key string) *dbItem { + item := db.keys.Get(&dbItem{key: key}) + if item != nil { + return item.(*dbItem) + } + return nil +} + +// Tx represents a transaction on the database. This transaction can either be +// read-only or read/write. Read-only transactions can be used for retrieving +// values for keys and iterating through keys and values. Read/write +// transactions can set and delete keys. +// +// All transactions must be committed or rolled-back when done. +type Tx struct { + db *DB // the underlying database. + writable bool // when false mutable operations fail. + funcd bool // when true Commit and Rollback panic. + wc *txWriteContext // context for writable transactions. +} + +type txWriteContext struct { + // rollback when deleteAll is called + rbkeys *btree.BTree // a tree of all item ordered by key + rbexps *btree.BTree // a tree of items ordered by expiration + rbidxs map[string]*index // the index trees. + + rollbackItems map[string]*dbItem // details for rolling back tx. + commitItems map[string]*dbItem // details for committing tx. + itercount int // stack of iterators + rollbackIndexes map[string]*index // details for dropped indexes. +} + +// DeleteAll deletes all items from the database. +func (tx *Tx) DeleteAll() error { + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return ErrTxIterating + } + + // check to see if we've already deleted everything + if tx.wc.rbkeys == nil { + // we need to backup the live data in case of a rollback. + tx.wc.rbkeys = tx.db.keys + tx.wc.rbexps = tx.db.exps + tx.wc.rbidxs = tx.db.idxs + } + + // now reset the live database trees + tx.db.keys = btree.New(btreeDegrees, nil) + tx.db.exps = btree.New(btreeDegrees, &exctx{tx.db}) + tx.db.idxs = make(map[string]*index) + + // finally re-create the indexes + for name, idx := range tx.wc.rbidxs { + tx.db.idxs[name] = idx.clearCopy() + } + + // always clear out the commits + tx.wc.commitItems = make(map[string]*dbItem) + + return nil +} + +// Begin opens a new transaction. +// Multiple read-only transactions can be opened at the same time but there can +// only be one read/write transaction at a time. Attempting to open a read/write +// transactions while another one is in progress will result in blocking until +// the current read/write transaction is completed. +// +// All transactions must be closed by calling Commit() or Rollback() when done. +func (db *DB) Begin(writable bool) (*Tx, error) { + tx := &Tx{ + db: db, + writable: writable, + } + tx.lock() + if db.closed { + tx.unlock() + return nil, ErrDatabaseClosed + } + if writable { + // writable transactions have a writeContext object that + // contains information about changes to the database. + tx.wc = &txWriteContext{} + tx.wc.rollbackItems = make(map[string]*dbItem) + tx.wc.rollbackIndexes = make(map[string]*index) + if db.persist { + tx.wc.commitItems = make(map[string]*dbItem) + } + } + return tx, nil +} + +// lock locks the database based on the transaction type. +func (tx *Tx) lock() { + if tx.writable { + tx.db.mu.Lock() + } else { + tx.db.mu.RLock() + } +} + +// unlock unlocks the database based on the transaction type. +func (tx *Tx) unlock() { + if tx.writable { + tx.db.mu.Unlock() + } else { + tx.db.mu.RUnlock() + } +} + +// rollbackInner handles the underlying rollback logic. +// Intended to be called from Commit() and Rollback(). +func (tx *Tx) rollbackInner() { + // rollback the deleteAll if needed + if tx.wc.rbkeys != nil { + tx.db.keys = tx.wc.rbkeys + tx.db.idxs = tx.wc.rbidxs + tx.db.exps = tx.wc.rbexps + } + for key, item := range tx.wc.rollbackItems { + tx.db.deleteFromDatabase(&dbItem{key: key}) + if item != nil { + // When an item is not nil, we will need to reinsert that item + // into the database overwriting the current one. + tx.db.insertIntoDatabase(item) + } + } + for name, idx := range tx.wc.rollbackIndexes { + delete(tx.db.idxs, name) + if idx != nil { + // When an index is not nil, we will need to rebuilt that index + // this could be an expensive process if the database has many + // items or the index is complex. + tx.db.idxs[name] = idx + idx.rebuild() + } + } +} + +// Commit writes all changes to disk. +// An error is returned when a write error occurs, or when a Commit() is called +// from a read-only transaction. +func (tx *Tx) Commit() error { + if tx.funcd { + panic("managed tx commit not allowed") + } + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + var err error + if tx.db.persist && (len(tx.wc.commitItems) > 0 || tx.wc.rbkeys != nil) { + tx.db.buf = tx.db.buf[:0] + // write a flushdb if a deleteAll was called. + if tx.wc.rbkeys != nil { + tx.db.buf = append(tx.db.buf, "*1\r\n$7\r\nflushdb\r\n"...) + } + // Each committed record is written to disk + for key, item := range tx.wc.commitItems { + if item == nil { + tx.db.buf = (&dbItem{key: key}).writeDeleteTo(tx.db.buf) + } else { + tx.db.buf = item.writeSetTo(tx.db.buf) + } + } + // Flushing the buffer only once per transaction. + // If this operation fails then the write did failed and we must + // rollback. + if _, err = tx.db.file.Write(tx.db.buf); err != nil { + tx.rollbackInner() + } + if tx.db.config.SyncPolicy == Always { + _ = tx.db.file.Sync() + } + // Increment the number of flushes. The background syncing uses this. + tx.db.flushes++ + } + // Unlock the database and allow for another writable transaction. + tx.unlock() + // Clear the db field to disable this transaction from future use. + tx.db = nil + return err +} + +// Rollback closes the transaction and reverts all mutable operations that +// were performed on the transaction such as Set() and Delete(). +// +// Read-only transactions can only be rolled back, not committed. +func (tx *Tx) Rollback() error { + if tx.funcd { + panic("managed tx rollback not allowed") + } + if tx.db == nil { + return ErrTxClosed + } + // The rollback func does the heavy lifting. + if tx.writable { + tx.rollbackInner() + } + // unlock the database for more transactions. + tx.unlock() + // Clear the db field to disable this transaction from future use. + tx.db = nil + return nil +} + +// dbItemOpts holds various meta information about an item. +type dbItemOpts struct { + ex bool // does this item expire? + exat time.Time // when does this item expire? +} +type dbItem struct { + key, val string // the binary key and value + opts *dbItemOpts // optional meta information + keyless bool // keyless item for scanning +} + +func appendArray(buf []byte, count int) []byte { + buf = append(buf, '*') + buf = append(buf, strconv.FormatInt(int64(count), 10)...) + buf = append(buf, '\r', '\n') + return buf +} + +func appendBulkString(buf []byte, s string) []byte { + buf = append(buf, '$') + buf = append(buf, strconv.FormatInt(int64(len(s)), 10)...) + buf = append(buf, '\r', '\n') + buf = append(buf, s...) + buf = append(buf, '\r', '\n') + return buf +} + +// writeSetTo writes an item as a single SET record to the a bufio Writer. +func (dbi *dbItem) writeSetTo(buf []byte) []byte { + if dbi.opts != nil && dbi.opts.ex { + ex := dbi.opts.exat.Sub(time.Now()) / time.Second + buf = appendArray(buf, 5) + buf = appendBulkString(buf, "set") + buf = appendBulkString(buf, dbi.key) + buf = appendBulkString(buf, dbi.val) + buf = appendBulkString(buf, "ex") + buf = appendBulkString(buf, strconv.FormatUint(uint64(ex), 10)) + } else { + buf = appendArray(buf, 3) + buf = appendBulkString(buf, "set") + buf = appendBulkString(buf, dbi.key) + buf = appendBulkString(buf, dbi.val) + } + return buf +} + +// writeSetTo writes an item as a single DEL record to the a bufio Writer. +func (dbi *dbItem) writeDeleteTo(buf []byte) []byte { + buf = appendArray(buf, 2) + buf = appendBulkString(buf, "del") + buf = appendBulkString(buf, dbi.key) + return buf +} + +// expired evaluates id the item has expired. This will always return false when +// the item does not have `opts.ex` set to true. +func (dbi *dbItem) expired() bool { + return dbi.opts != nil && dbi.opts.ex && time.Now().After(dbi.opts.exat) +} + +// MaxTime from http://stackoverflow.com/questions/25065055#32620397 +// This is a long time in the future. It's an imaginary number that is +// used for b-tree ordering. +var maxTime = time.Unix(1<<63-62135596801, 999999999) + +// expiresAt will return the time when the item will expire. When an item does +// not expire `maxTime` is used. +func (dbi *dbItem) expiresAt() time.Time { + if dbi.opts == nil || !dbi.opts.ex { + return maxTime + } + return dbi.opts.exat +} + +// Less determines if a b-tree item is less than another. This is required +// for ordering, inserting, and deleting items from a b-tree. It's important +// to note that the ctx parameter is used to help with determine which +// formula to use on an item. Each b-tree should use a different ctx when +// sharing the same item. +func (dbi *dbItem) Less(item btree.Item, ctx interface{}) bool { + dbi2 := item.(*dbItem) + switch ctx := ctx.(type) { + case *exctx: + // The expires b-tree formula + if dbi2.expiresAt().After(dbi.expiresAt()) { + return true + } + if dbi.expiresAt().After(dbi2.expiresAt()) { + return false + } + case *index: + if ctx.less != nil { + // Using an index + if ctx.less(dbi.val, dbi2.val) { + return true + } + if ctx.less(dbi2.val, dbi.val) { + return false + } + } + } + // Always fall back to the key comparison. This creates absolute uniqueness. + if dbi.keyless { + return false + } else if dbi2.keyless { + return true + } + return dbi.key < dbi2.key +} + +// Rect converts a string to a rectangle. +// An invalid rectangle will cause a panic. +func (dbi *dbItem) Rect(ctx interface{}) (min, max []float64) { + switch ctx := ctx.(type) { + case *index: + return ctx.rect(dbi.val) + } + return nil, nil +} + +// SetOptions represents options that may be included with the Set() command. +type SetOptions struct { + // Expires indicates that the Set() key-value will expire + Expires bool + // TTL is how much time the key-value will exist in the database + // before being evicted. The Expires field must also be set to true. + // TTL stands for Time-To-Live. + TTL time.Duration +} + +// GetLess returns the less function for an index. This is handy for +// doing ad-hoc compares inside a transaction. +// Returns ErrNotFound if the index is not found or there is no less +// function bound to the index +func (tx *Tx) GetLess(index string) (func(a, b string) bool, error) { + if tx.db == nil { + return nil, ErrTxClosed + } + idx, ok := tx.db.idxs[index] + if !ok || idx.less == nil { + return nil, ErrNotFound + } + return idx.less, nil +} + +// GetRect returns the rect function for an index. This is handy for +// doing ad-hoc searches inside a transaction. +// Returns ErrNotFound if the index is not found or there is no rect +// function bound to the index +func (tx *Tx) GetRect(index string) (func(s string) (min, max []float64), + error) { + if tx.db == nil { + return nil, ErrTxClosed + } + idx, ok := tx.db.idxs[index] + if !ok || idx.rect == nil { + return nil, ErrNotFound + } + return idx.rect, nil +} + +// Set inserts or replaces an item in the database based on the key. +// The opt params may be used for additional functionality such as forcing +// the item to be evicted at a specified time. When the return value +// for err is nil the operation succeeded. When the return value of +// replaced is true, then the operaton replaced an existing item whose +// value will be returned through the previousValue variable. +// The results of this operation will not be available to other +// transactions until the current transaction has successfully committed. +// +// Only a writable transaction can be used with this operation. +// This operation is not allowed during iterations such as Ascend* & Descend*. +func (tx *Tx) Set(key, value string, opts *SetOptions) (previousValue string, + replaced bool, err error) { + if tx.db == nil { + return "", false, ErrTxClosed + } else if !tx.writable { + return "", false, ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return "", false, ErrTxIterating + } + item := &dbItem{key: key, val: value} + if opts != nil { + if opts.Expires { + // The caller is requesting that this item expires. Convert the + // TTL to an absolute time and bind it to the item. + item.opts = &dbItemOpts{ex: true, exat: time.Now().Add(opts.TTL)} + } + } + // Insert the item into the keys tree. + prev := tx.db.insertIntoDatabase(item) + + // insert into the rollback map if there has not been a deleteAll. + if tx.wc.rbkeys == nil { + if prev == nil { + // An item with the same key did not previously exist. Let's + // create a rollback entry with a nil value. A nil value indicates + // that the entry should be deleted on rollback. When the value is + // *not* nil, that means the entry should be reverted. + tx.wc.rollbackItems[key] = nil + } else { + // A previous item already exists in the database. Let's create a + // rollback entry with the item as the value. We need to check the + // map to see if there isn't already an item that matches the + // same key. + if _, ok := tx.wc.rollbackItems[key]; !ok { + tx.wc.rollbackItems[key] = prev + } + if !prev.expired() { + previousValue, replaced = prev.val, true + } + } + } + // For commits we simply assign the item to the map. We use this map to + // write the entry to disk. + if tx.db.persist { + tx.wc.commitItems[key] = item + } + return previousValue, replaced, nil +} + +// Get returns a value for a key. If the item does not exist or if the item +// has expired then ErrNotFound is returned. If ignoreExpired is true, then +// the found value will be returned even if it is expired. +func (tx *Tx) Get(key string, ignoreExpired ...bool) (val string, err error) { + if tx.db == nil { + return "", ErrTxClosed + } + var ignore bool + if len(ignoreExpired) != 0 { + ignore = ignoreExpired[0] + } + item := tx.db.get(key) + if item == nil || (item.expired() && !ignore) { + // The item does not exists or has expired. Let's assume that + // the caller is only interested in items that have not expired. + return "", ErrNotFound + } + return item.val, nil +} + +// Delete removes an item from the database based on the item's key. If the item +// does not exist or if the item has expired then ErrNotFound is returned. +// +// Only a writable transaction can be used for this operation. +// This operation is not allowed during iterations such as Ascend* & Descend*. +func (tx *Tx) Delete(key string) (val string, err error) { + if tx.db == nil { + return "", ErrTxClosed + } else if !tx.writable { + return "", ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return "", ErrTxIterating + } + item := tx.db.deleteFromDatabase(&dbItem{key: key}) + if item == nil { + return "", ErrNotFound + } + // create a rollback entry if there has not been a deleteAll call. + if tx.wc.rbkeys == nil { + if _, ok := tx.wc.rollbackItems[key]; !ok { + tx.wc.rollbackItems[key] = item + } + } + if tx.db.persist { + tx.wc.commitItems[key] = nil + } + // Even though the item has been deleted, we still want to check + // if it has expired. An expired item should not be returned. + if item.expired() { + // The item exists in the tree, but has expired. Let's assume that + // the caller is only interested in items that have not expired. + return "", ErrNotFound + } + return item.val, nil +} + +// TTL returns the remaining time-to-live for an item. +// A negative duration will be returned for items that do not have an +// expiration. +func (tx *Tx) TTL(key string) (time.Duration, error) { + if tx.db == nil { + return 0, ErrTxClosed + } + item := tx.db.get(key) + if item == nil { + return 0, ErrNotFound + } else if item.opts == nil || !item.opts.ex { + return -1, nil + } + dur := item.opts.exat.Sub(time.Now()) + if dur < 0 { + return 0, ErrNotFound + } + return dur, nil +} + +// scan iterates through a specified index and calls user-defined iterator +// function for each item encountered. +// The desc param indicates that the iterator should descend. +// The gt param indicates that there is a greaterThan limit. +// The lt param indicates that there is a lessThan limit. +// The index param tells the scanner to use the specified index tree. An +// empty string for the index means to scan the keys, not the values. +// The start and stop params are the greaterThan, lessThan limits. For +// descending order, these will be lessThan, greaterThan. +// An error will be returned if the tx is closed or the index is not found. +func (tx *Tx) scan(desc, gt, lt bool, index, start, stop string, + iterator func(key, value string) bool) error { + if tx.db == nil { + return ErrTxClosed + } + // wrap a btree specific iterator around the user-defined iterator. + iter := func(item btree.Item) bool { + dbi := item.(*dbItem) + return iterator(dbi.key, dbi.val) + } + var tr *btree.BTree + if index == "" { + // empty index means we will use the keys tree. + tr = tx.db.keys + } else { + idx := tx.db.idxs[index] + if idx == nil { + // index was not found. return error + return ErrNotFound + } + tr = idx.btr + if tr == nil { + return nil + } + } + // create some limit items + var itemA, itemB *dbItem + if gt || lt { + if index == "" { + itemA = &dbItem{key: start} + itemB = &dbItem{key: stop} + } else { + itemA = &dbItem{val: start} + itemB = &dbItem{val: stop} + if desc { + itemA.keyless = true + itemB.keyless = true + } + } + } + // execute the scan on the underlying tree. + if tx.wc != nil { + tx.wc.itercount++ + defer func() { + tx.wc.itercount-- + }() + } + if desc { + if gt { + if lt { + tr.DescendRange(itemA, itemB, iter) + } else { + tr.DescendGreaterThan(itemA, iter) + } + } else if lt { + tr.DescendLessOrEqual(itemA, iter) + } else { + tr.Descend(iter) + } + } else { + if gt { + if lt { + tr.AscendRange(itemA, itemB, iter) + } else { + tr.AscendGreaterOrEqual(itemA, iter) + } + } else if lt { + tr.AscendLessThan(itemA, iter) + } else { + tr.Ascend(iter) + } + } + return nil +} + +// Match returns true if the specified key matches the pattern. This is a very +// simple pattern matcher where '*' matches on any number characters and '?' +// matches on any one character. +func Match(key, pattern string) bool { + return match.Match(key, pattern) +} + +// AscendKeys allows for iterating through keys based on the specified pattern. +func (tx *Tx) AscendKeys(pattern string, + iterator func(key, value string) bool) error { + if pattern == "" { + return nil + } + if pattern[0] == '*' { + if pattern == "*" { + return tx.Ascend("", iterator) + } + return tx.Ascend("", func(key, value string) bool { + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) + } + min, max := match.Allowable(pattern) + return tx.AscendGreaterOrEqual("", min, func(key, value string) bool { + if key > max { + return false + } + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) +} + +// DescendKeys allows for iterating through keys based on the specified pattern. +func (tx *Tx) DescendKeys(pattern string, + iterator func(key, value string) bool) error { + if pattern == "" { + return nil + } + if pattern[0] == '*' { + if pattern == "*" { + return tx.Descend("", iterator) + } + return tx.Descend("", func(key, value string) bool { + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) + } + min, max := match.Allowable(pattern) + return tx.DescendLessOrEqual("", max, func(key, value string) bool { + if key < min { + return false + } + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) +} + +// Ascend calls the iterator for every item in the database within the range +// [first, last], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) Ascend(index string, + iterator func(key, value string) bool) error { + return tx.scan(false, false, false, index, "", "", iterator) +} + +// AscendGreaterOrEqual calls the iterator for every item in the database within +// the range [pivot, last], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendGreaterOrEqual(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(false, true, false, index, pivot, "", iterator) +} + +// AscendLessThan calls the iterator for every item in the database within the +// range [first, pivot), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendLessThan(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(false, false, true, index, pivot, "", iterator) +} + +// AscendRange calls the iterator for every item in the database within +// the range [greaterOrEqual, lessThan), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendRange(index, greaterOrEqual, lessThan string, + iterator func(key, value string) bool) error { + return tx.scan( + false, true, true, index, greaterOrEqual, lessThan, iterator, + ) +} + +// Descend calls the iterator for every item in the database within the range +// [last, first], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) Descend(index string, + iterator func(key, value string) bool) error { + return tx.scan(true, false, false, index, "", "", iterator) +} + +// DescendGreaterThan calls the iterator for every item in the database within +// the range [last, pivot), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendGreaterThan(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(true, true, false, index, pivot, "", iterator) +} + +// DescendLessOrEqual calls the iterator for every item in the database within +// the range [pivot, first], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendLessOrEqual(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(true, false, true, index, pivot, "", iterator) +} + +// DescendRange calls the iterator for every item in the database within +// the range [lessOrEqual, greaterThan), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendRange(index, lessOrEqual, greaterThan string, + iterator func(key, value string) bool) error { + return tx.scan( + true, true, true, index, lessOrEqual, greaterThan, iterator, + ) +} + +// AscendEqual calls the iterator for every item in the database that equals +// pivot, until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendEqual(index, pivot string, + iterator func(key, value string) bool) error { + var err error + var less func(a, b string) bool + if index != "" { + less, err = tx.GetLess(index) + if err != nil { + return err + } + } + return tx.AscendGreaterOrEqual(index, pivot, func(key, value string) bool { + if less == nil { + if key != pivot { + return false + } + } else if less(pivot, value) { + return false + } + return iterator(key, value) + }) +} + +// DescendEqual calls the iterator for every item in the database that equals +// pivot, until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendEqual(index, pivot string, + iterator func(key, value string) bool) error { + var err error + var less func(a, b string) bool + if index != "" { + less, err = tx.GetLess(index) + if err != nil { + return err + } + } + return tx.DescendLessOrEqual(index, pivot, func(key, value string) bool { + if less == nil { + if key != pivot { + return false + } + } else if less(value, pivot) { + return false + } + return iterator(key, value) + }) +} + +// rect is used by Intersects and Nearby +type rect struct { + min, max []float64 +} + +func (r *rect) Rect(ctx interface{}) (min, max []float64) { + return r.min, r.max +} + +// Nearby searches for rectangle items that are nearby a target rect. +// All items belonging to the specified index will be returned in order of +// nearest to farthest. +// The specified index must have been created by AddIndex() and the target +// is represented by the rect string. This string will be processed by the +// same bounds function that was passed to the CreateSpatialIndex() function. +// An invalid index will return an error. +// The dist param is the distance of the bounding boxes. In the case of +// simple 2D points, it's the distance of the two 2D points squared. +func (tx *Tx) Nearby(index, bounds string, + iterator func(key, value string, dist float64) bool) error { + if tx.db == nil { + return ErrTxClosed + } + if index == "" { + // cannot search on keys tree. just return nil. + return nil + } + // // wrap a rtree specific iterator around the user-defined iterator. + iter := func(item rtree.Item, dist float64) bool { + dbi := item.(*dbItem) + return iterator(dbi.key, dbi.val, dist) + } + idx := tx.db.idxs[index] + if idx == nil { + // index was not found. return error + return ErrNotFound + } + if idx.rtr == nil { + // not an r-tree index. just return nil + return nil + } + // execute the nearby search + var min, max []float64 + if idx.rect != nil { + min, max = idx.rect(bounds) + } + // set the center param to false, which uses the box dist calc. + idx.rtr.KNN(&rect{min, max}, false, iter) + return nil +} + +// Intersects searches for rectangle items that intersect a target rect. +// The specified index must have been created by AddIndex() and the target +// is represented by the rect string. This string will be processed by the +// same bounds function that was passed to the CreateSpatialIndex() function. +// An invalid index will return an error. +func (tx *Tx) Intersects(index, bounds string, + iterator func(key, value string) bool) error { + if tx.db == nil { + return ErrTxClosed + } + if index == "" { + // cannot search on keys tree. just return nil. + return nil + } + // wrap a rtree specific iterator around the user-defined iterator. + iter := func(item rtree.Item) bool { + dbi := item.(*dbItem) + return iterator(dbi.key, dbi.val) + } + idx := tx.db.idxs[index] + if idx == nil { + // index was not found. return error + return ErrNotFound + } + if idx.rtr == nil { + // not an r-tree index. just return nil + return nil + } + // execute the search + var min, max []float64 + if idx.rect != nil { + min, max = idx.rect(bounds) + } + idx.rtr.Search(&rect{min, max}, iter) + return nil +} + +// Len returns the number of items in the database +func (tx *Tx) Len() (int, error) { + if tx.db == nil { + return 0, ErrTxClosed + } + return tx.db.keys.Len(), nil +} + +// IndexOptions provides an index with additional features or +// alternate functionality. +type IndexOptions struct { + // CaseInsensitiveKeyMatching allow for case-insensitive + // matching on keys when setting key/values. + CaseInsensitiveKeyMatching bool +} + +// CreateIndex builds a new index and populates it with items. +// The items are ordered in an b-tree and can be retrieved using the +// Ascend* and Descend* methods. +// An error will occur if an index with the same name already exists. +// +// When a pattern is provided, the index will be populated with +// keys that match the specified pattern. This is a very simple pattern +// match where '*' matches on any number characters and '?' matches on +// any one character. +// The less function compares if string 'a' is less than string 'b'. +// It allows for indexes to create custom ordering. It's possible +// that the strings may be textual or binary. It's up to the provided +// less function to handle the content format and comparison. +// There are some default less function that can be used such as +// IndexString, IndexBinary, etc. +func (tx *Tx) CreateIndex(name, pattern string, + less ...func(a, b string) bool) error { + return tx.createIndex(name, pattern, less, nil, nil) +} + +// CreateIndexOptions is the same as CreateIndex except that it allows +// for additional options. +func (tx *Tx) CreateIndexOptions(name, pattern string, + opts *IndexOptions, + less ...func(a, b string) bool) error { + return tx.createIndex(name, pattern, less, nil, opts) +} + +// CreateSpatialIndex builds a new index and populates it with items. +// The items are organized in an r-tree and can be retrieved using the +// Intersects method. +// An error will occur if an index with the same name already exists. +// +// The rect function converts a string to a rectangle. The rectangle is +// represented by two arrays, min and max. Both arrays may have a length +// between 1 and 20, and both arrays must match in length. A length of 1 is a +// one dimensional rectangle, and a length of 4 is a four dimension rectangle. +// There is support for up to 20 dimensions. +// The values of min must be less than the values of max at the same dimension. +// Thus min[0] must be less-than-or-equal-to max[0]. +// The IndexRect is a default function that can be used for the rect +// parameter. +func (tx *Tx) CreateSpatialIndex(name, pattern string, + rect func(item string) (min, max []float64)) error { + return tx.createIndex(name, pattern, nil, rect, nil) +} + +// CreateSpatialIndexOptions is the same as CreateSpatialIndex except that +// it allows for additional options. +func (tx *Tx) CreateSpatialIndexOptions(name, pattern string, + opts *IndexOptions, + rect func(item string) (min, max []float64)) error { + return tx.createIndex(name, pattern, nil, rect, nil) +} + +// createIndex is called by CreateIndex() and CreateSpatialIndex() +func (tx *Tx) createIndex(name string, pattern string, + lessers []func(a, b string) bool, + rect func(item string) (min, max []float64), + opts *IndexOptions, +) error { + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return ErrTxIterating + } + if name == "" { + // cannot create an index without a name. + // an empty name index is designated for the main "keys" tree. + return ErrIndexExists + } + // check if an index with that name already exists. + if _, ok := tx.db.idxs[name]; ok { + // index with name already exists. error. + return ErrIndexExists + } + // genreate a less function + var less func(a, b string) bool + switch len(lessers) { + default: + // multiple less functions specified. + // create a compound less function. + less = func(a, b string) bool { + for i := 0; i < len(lessers)-1; i++ { + if lessers[i](a, b) { + return true + } + if lessers[i](b, a) { + return false + } + } + return lessers[len(lessers)-1](a, b) + } + case 0: + // no less function + case 1: + less = lessers[0] + } + var sopts IndexOptions + if opts != nil { + sopts = *opts + } + if sopts.CaseInsensitiveKeyMatching { + pattern = strings.ToLower(pattern) + } + // intialize new index + idx := &index{ + name: name, + pattern: pattern, + less: less, + rect: rect, + db: tx.db, + opts: sopts, + } + idx.rebuild() + // save the index + tx.db.idxs[name] = idx + if tx.wc.rbkeys == nil { + // store the index in the rollback map. + if _, ok := tx.wc.rollbackIndexes[name]; !ok { + // we use nil to indicate that the index should be removed upon rollback. + tx.wc.rollbackIndexes[name] = nil + } + } + return nil +} + +// DropIndex removes an index. +func (tx *Tx) DropIndex(name string) error { + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return ErrTxIterating + } + if name == "" { + // cannot drop the default "keys" index + return ErrInvalidOperation + } + idx, ok := tx.db.idxs[name] + if !ok { + return ErrNotFound + } + // delete from the map. + // this is all that is needed to delete an index. + delete(tx.db.idxs, name) + if tx.wc.rbkeys == nil { + // store the index in the rollback map. + if _, ok := tx.wc.rollbackIndexes[name]; !ok { + // we use a non-nil copy of the index without the data to indicate that the + // index should be rebuilt upon rollback. + tx.wc.rollbackIndexes[name] = idx.clearCopy() + } + } + return nil +} + +// Indexes returns a list of index names. +func (tx *Tx) Indexes() ([]string, error) { + if tx.db == nil { + return nil, ErrTxClosed + } + names := make([]string, 0, len(tx.db.idxs)) + for name := range tx.db.idxs { + names = append(names, name) + } + sort.Strings(names) + return names, nil +} + +// Rect is helper function that returns a string representation +// of a rect. IndexRect() is the reverse function and can be used +// to generate a rect from a string. +func Rect(min, max []float64) string { + r := grect.Rect{Min: min, Max: max} + return r.String() +} + +// Point is a helper function that converts a series of float64s +// to a rectangle for a spatial index. +func Point(coords ...float64) string { + return Rect(coords, coords) +} + +// IndexRect is a helper function that converts string to a rect. +// Rect() is the reverse function and can be used to generate a string +// from a rect. +func IndexRect(a string) (min, max []float64) { + r := grect.Get(a) + return r.Min, r.Max +} + +// IndexString is a helper function that return true if 'a' is less than 'b'. +// This is a case-insensitive comparison. Use the IndexBinary() for comparing +// case-sensitive strings. +func IndexString(a, b string) bool { + for i := 0; i < len(a) && i < len(b); i++ { + if a[i] >= 'A' && a[i] <= 'Z' { + if b[i] >= 'A' && b[i] <= 'Z' { + // both are uppercase, do nothing + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } else { + // a is uppercase, convert a to lowercase + if a[i]+32 < b[i] { + return true + } else if a[i]+32 > b[i] { + return false + } + } + } else if b[i] >= 'A' && b[i] <= 'Z' { + // b is uppercase, convert b to lowercase + if a[i] < b[i]+32 { + return true + } else if a[i] > b[i]+32 { + return false + } + } else { + // neither are uppercase + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } + } + return len(a) < len(b) +} + +// IndexBinary is a helper function that returns true if 'a' is less than 'b'. +// This compares the raw binary of the string. +func IndexBinary(a, b string) bool { + return a < b +} + +// IndexInt is a helper function that returns true if 'a' is less than 'b'. +func IndexInt(a, b string) bool { + ia, _ := strconv.ParseInt(a, 10, 64) + ib, _ := strconv.ParseInt(b, 10, 64) + return ia < ib +} + +// IndexUint is a helper function that returns true if 'a' is less than 'b'. +// This compares uint64s that are added to the database using the +// Uint() conversion function. +func IndexUint(a, b string) bool { + ia, _ := strconv.ParseUint(a, 10, 64) + ib, _ := strconv.ParseUint(b, 10, 64) + return ia < ib +} + +// IndexFloat is a helper function that returns true if 'a' is less than 'b'. +// This compares float64s that are added to the database using the +// Float() conversion function. +func IndexFloat(a, b string) bool { + ia, _ := strconv.ParseFloat(a, 64) + ib, _ := strconv.ParseFloat(b, 64) + return ia < ib +} + +// IndexJSON provides for the ability to create an index on any JSON field. +// When the field is a string, the comparison will be case-insensitive. +// It returns a helper function used by CreateIndex. +func IndexJSON(path string) func(a, b string) bool { + return func(a, b string) bool { + return gjson.Get(a, path).Less(gjson.Get(b, path), false) + } +} + +// IndexJSONCaseSensitive provides for the ability to create an index on +// any JSON field. +// When the field is a string, the comparison will be case-sensitive. +// It returns a helper function used by CreateIndex. +func IndexJSONCaseSensitive(path string) func(a, b string) bool { + return func(a, b string) bool { + return gjson.Get(a, path).Less(gjson.Get(b, path), true) + } +} + +// Desc is a helper function that changes the order of an index. +func Desc(less func(a, b string) bool) func(a, b string) bool { + return func(a, b string) bool { return less(b, a) } +} diff --git a/vendor/github.com/tidwall/buntdb/go.mod b/vendor/github.com/tidwall/buntdb/go.mod new file mode 100644 index 00000000..f4597f10 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/go.mod @@ -0,0 +1,12 @@ +module github.com/tidwall/buntdb + +go 1.13 + +require ( + github.com/tidwall/btree v0.0.0-20191029221954-400434d76274 + github.com/tidwall/gjson v1.3.4 + github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb + github.com/tidwall/match v1.0.1 + github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e + github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 // indirect +) diff --git a/vendor/github.com/tidwall/buntdb/go.sum b/vendor/github.com/tidwall/buntdb/go.sum new file mode 100644 index 00000000..fc5a01cd --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/go.sum @@ -0,0 +1,14 @@ +github.com/tidwall/btree v0.0.0-20191029221954-400434d76274 h1:G6Z6HvJuPjG6XfNGi/feOATzeJrfgTNJY+rGrHbA04E= +github.com/tidwall/btree v0.0.0-20191029221954-400434d76274/go.mod h1:huei1BkDWJ3/sLXmO+bsCNELL+Bp2Kks9OLyQFkzvA8= +github.com/tidwall/gjson v1.3.4 h1:On5waDnyKKk3SWE4EthbjjirAWXp43xx5cKCUZY1eZw= +github.com/tidwall/gjson v1.3.4/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb h1:5NSYaAdrnblKByzd7XByQEJVT8+9v0W/tIY0Oo4OwrE= +github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb/go.mod h1:lKYYLFIr9OIgdgrtgkZ9zgRxRdvPYsExnYBsEAd8W5M= +github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e h1:+NL1GDIUOKxVfbp2KoJQD9cTQ6dyP2co9q4yzmT9FZo= +github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e/go.mod h1:/h+UnNGt0IhNNJLkGikcdcJqm66zGD/uJGMRxK/9+Ao= +github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 h1:Otn9S136ELckZ3KKDyCkxapfufrqDqwmGjcHfAyXRrE= +github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563/go.mod h1:mLqSmt7Dv/CNneF2wfcChfN1rvapyQr01LGKnKex0DQ= diff --git a/vendor/github.com/tidwall/buntdb/logo.png b/vendor/github.com/tidwall/buntdb/logo.png new file mode 100644 index 00000000..01c6d75c Binary files /dev/null and b/vendor/github.com/tidwall/buntdb/logo.png differ diff --git a/vendor/github.com/tidwall/gjson/.travis.yml b/vendor/github.com/tidwall/gjson/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/tidwall/gjson/LICENSE b/vendor/github.com/tidwall/gjson/LICENSE new file mode 100644 index 00000000..58f5819a --- /dev/null +++ b/vendor/github.com/tidwall/gjson/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md new file mode 100644 index 00000000..cab0f9fa --- /dev/null +++ b/vendor/github.com/tidwall/gjson/README.md @@ -0,0 +1,491 @@ +

+GJSON +
+Build Status +GoDoc +GJSON Playground +

+ + + +

get json values quickly

+ +GJSON is a Go package that provides a [fast](#performance) and [simple](#get-a-value) way to get values from a json document. +It has features such as [one line retrieval](#get-a-value), [dot notation paths](#path-syntax), [iteration](#iterate-through-an-object-or-array), and [parsing json lines](#json-lines). + +Also check out [SJSON](https://github.com/tidwall/sjson) for modifying json, and the [JJ](https://github.com/tidwall/jj) command line tool. + +Getting Started +=============== + +## Installing + +To start using GJSON, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/gjson +``` + +This will retrieve the library. + +## Get a value +Get searches json for the specified path. A path is in dot syntax, such as "name.last" or "age". When the value is found it's returned immediately. + +```go +package main + +import "github.com/tidwall/gjson" + +const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}` + +func main() { + value := gjson.Get(json, "name.last") + println(value.String()) +} +``` + +This will print: + +``` +Prichard +``` +*There's also the [GetMany](#get-multiple-values-at-once) function to get multiple values at once, and [GetBytes](#working-with-bytes) for working with JSON byte slices.* + +## Path Syntax + +Below is a quick overview of the path syntax, for more complete information please +check out [GJSON Syntax](SYNTAX.md). + +A path is a series of keys separated by a dot. +A key may contain special wildcard characters '\*' and '?'. +To access an array value use the index as the key. +To get the number of elements in an array or to access a child path, use the '#' character. +The dot and wildcard characters can be escaped with '\\'. + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]}, + {"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]}, + {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]} + ] +} +``` +``` +"name.last" >> "Anderson" +"age" >> 37 +"children" >> ["Sara","Alex","Jack"] +"children.#" >> 3 +"children.1" >> "Alex" +"child*.2" >> "Jack" +"c?ildren.0" >> "Sara" +"fav\.movie" >> "Deer Hunter" +"friends.#.first" >> ["Dale","Roger","Jane"] +"friends.1.last" >> "Craig" +``` + +You can also query an array for the first match by using `#(...)`, or find all +matches with `#(...)#`. Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` +comparison operators and the simple pattern matching `%` (like) and `!%` +(not like) operators. + +``` +friends.#(last=="Murphy").first >> "Dale" +friends.#(last=="Murphy")#.first >> ["Dale","Jane"] +friends.#(age>45)#.last >> ["Craig","Murphy"] +friends.#(first%"D*").last >> "Murphy" +friends.#(first!%"D*").last >> "Craig" +friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"] +``` + +*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was +changed in v1.3.0 as to avoid confusion with the new +[multipath](SYNTAX.md#multipaths) syntax. For backwards compatibility, +`#[...]` will continue to work until the next major release.* + +## Result Type + +GJSON supports the json types `string`, `number`, `bool`, and `null`. +Arrays and Objects are returned as their raw json types. + +The `Result` type holds one of these: + +``` +bool, for JSON booleans +float64, for JSON numbers +string, for JSON string literals +nil, for JSON null +``` + +To directly access the value: + +```go +result.Type // can be String, Number, True, False, Null, or JSON +result.Str // holds the string +result.Num // holds the float64 number +result.Raw // holds the raw json +result.Index // index of raw value in original json, zero means index unknown +``` + +There are a variety of handy functions that work on a result: + +```go +result.Exists() bool +result.Value() interface{} +result.Int() int64 +result.Uint() uint64 +result.Float() float64 +result.String() string +result.Bool() bool +result.Time() time.Time +result.Array() []gjson.Result +result.Map() map[string]gjson.Result +result.Get(path string) Result +result.ForEach(iterator func(key, value Result) bool) +result.Less(token Result, caseSensitive bool) bool +``` + +The `result.Value()` function returns an `interface{}` which requires type assertion and is one of the following Go types: + +The `result.Array()` function returns back an array of values. +If the result represents a non-existent value, then an empty array will be returned. +If the result is not a JSON array, the return value will be an array containing one result. + +```go +boolean >> bool +number >> float64 +string >> string +null >> nil +array >> []interface{} +object >> map[string]interface{} +``` + +### 64-bit integers + +The `result.Int()` and `result.Uint()` calls are capable of reading all 64 bits, allowing for large JSON integers. + +```go +result.Int() int64 // -9223372036854775808 to 9223372036854775807 +result.Uint() int64 // 0 to 18446744073709551615 +``` + +## Modifiers and path chaining + +New in version 1.2 is support for modifier functions and path chaining. + +A modifier is a path component that performs custom processing on the +json. + +Multiple paths can be "chained" together using the pipe character. +This is useful for getting results from a modified query. + +For example, using the built-in `@reverse` modifier on the above json document, +we'll get `children` array and reverse the order: + +``` +"children|@reverse" >> ["Jack","Alex","Sara"] +"children|@reverse|0" >> "Jack" +``` + +There are currently three built-in modifiers: + +- `@reverse`: Reverse an array or the members of an object. +- `@ugly`: Remove all whitespace from a json document. +- `@pretty`: Make the json document more human readable. + +### Modifier arguments + +A modifier may accept an optional argument. The argument can be a valid JSON +document or just characters. + +For example, the `@pretty` modifier takes a json object as its argument. + +``` +@pretty:{"sortKeys":true} +``` + +Which makes the json pretty and orders all of its keys. + +```json +{ + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"age": 44, "first": "Dale", "last": "Murphy"}, + {"age": 68, "first": "Roger", "last": "Craig"}, + {"age": 47, "first": "Jane", "last": "Murphy"} + ], + "name": {"first": "Tom", "last": "Anderson"} +} +``` + +*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`. +Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.* + +### Custom modifiers + +You can also add custom modifiers. + +For example, here we create a modifier that makes the entire json document upper +or lower case. + +```go +gjson.AddModifier("case", func(json, arg string) string { + if arg == "upper" { + return strings.ToUpper(json) + } + if arg == "lower" { + return strings.ToLower(json) + } + return json +}) +``` + +``` +"children|@case:upper" >> ["SARA","ALEX","JACK"] +"children|@case:lower|@reverse" >> ["jack","alex","sara"] +``` + +## JSON Lines + +There's support for [JSON Lines](http://jsonlines.org/) using the `..` prefix, which treats a multilined document as an array. + +For example: + +``` +{"name": "Gilbert", "age": 61} +{"name": "Alexa", "age": 34} +{"name": "May", "age": 57} +{"name": "Deloise", "age": 44} +``` + +``` +..# >> 4 +..1 >> {"name": "Alexa", "age": 34} +..3 >> {"name": "Deloise", "age": 44} +..#.name >> ["Gilbert","Alexa","May","Deloise"] +..#(name="May").age >> 57 +``` + +The `ForEachLines` function will iterate through JSON lines. + +```go +gjson.ForEachLine(json, func(line gjson.Result) bool{ + println(line.String()) + return true +}) +``` + +## Get nested array values + +Suppose you want all the last names from the following json: + +```json +{ + "programmers": [ + { + "firstName": "Janet", + "lastName": "McLaughlin", + }, { + "firstName": "Elliotte", + "lastName": "Hunter", + }, { + "firstName": "Jason", + "lastName": "Harold", + } + ] +} +``` + +You would use the path "programmers.#.lastName" like such: + +```go +result := gjson.Get(json, "programmers.#.lastName") +for _, name := range result.Array() { + println(name.String()) +} +``` + +You can also query an object inside an array: + +```go +name := gjson.Get(json, `programmers.#(lastName="Hunter").firstName`) +println(name.String()) // prints "Elliotte" +``` + +## Iterate through an object or array + +The `ForEach` function allows for quickly iterating through an object or array. +The key and value are passed to the iterator function for objects. +Only the value is passed for arrays. +Returning `false` from an iterator will stop iteration. + +```go +result := gjson.Get(json, "programmers") +result.ForEach(func(key, value gjson.Result) bool { + println(value.String()) + return true // keep iterating +}) +``` + +## Simple Parse and Get + +There's a `Parse(json)` function that will do a simple parse, and `result.Get(path)` that will search a result. + +For example, all of these will return the same result: + +```go +gjson.Parse(json).Get("name").Get("last") +gjson.Get(json, "name").Get("last") +gjson.Get(json, "name.last") +``` + +## Check for the existence of a value + +Sometimes you just want to know if a value exists. + +```go +value := gjson.Get(json, "name.last") +if !value.Exists() { + println("no last name") +} else { + println(value.String()) +} + +// Or as one step +if gjson.Get(json, "name.last").Exists() { + println("has a last name") +} +``` + +## Validate JSON + +The `Get*` and `Parse*` functions expects that the json is well-formed. Bad json will not panic, but it may return back unexpected results. + +If you are consuming JSON from an unpredictable source then you may want to validate prior to using GJSON. + +```go +if !gjson.Valid(json) { + return errors.New("invalid json") +} +value := gjson.Get(json, "name.last") +``` + +## Unmarshal to a map + +To unmarshal to a `map[string]interface{}`: + +```go +m, ok := gjson.Parse(json).Value().(map[string]interface{}) +if !ok { + // not a map +} +``` + +## Working with Bytes + +If your JSON is contained in a `[]byte` slice, there's the [GetBytes](https://godoc.org/github.com/tidwall/gjson#GetBytes) function. This is preferred over `Get(string(data), path)`. + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +``` + +If you are using the `gjson.GetBytes(json, path)` function and you want to avoid converting `result.Raw` to a `[]byte`, then you can use this pattern: + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +var raw []byte +if result.Index > 0 { + raw = json[result.Index:result.Index+len(result.Raw)] +} else { + raw = []byte(result.Raw) +} +``` + +This is a best-effort no allocation sub slice of the original json. This method utilizes the `result.Index` field, which is the position of the raw data in the original json. It's possible that the value of `result.Index` equals zero, in which case the `result.Raw` is converted to a `[]byte`. + +## Get multiple values at once + +The `GetMany` function can be used to get multiple values at the same time. + +```go +results := gjson.GetMany(json, "name.first", "name.last", "age") +``` + +The return value is a `[]Result`, which will always contain exactly the same number of items as the input paths. + +## Performance + +Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/), +[ffjson](https://github.com/pquerna/ffjson), +[EasyJSON](https://github.com/mailru/easyjson), +[jsonparser](https://github.com/buger/jsonparser), +and [json-iterator](https://github.com/json-iterator/go) + +``` +BenchmarkGJSONGet-8 3000000 372 ns/op 0 B/op 0 allocs/op +BenchmarkGJSONUnmarshalMap-8 900000 4154 ns/op 1920 B/op 26 allocs/op +BenchmarkJSONUnmarshalMap-8 600000 9019 ns/op 3048 B/op 69 allocs/op +BenchmarkJSONDecoder-8 300000 14120 ns/op 4224 B/op 184 allocs/op +BenchmarkFFJSONLexer-8 1500000 3111 ns/op 896 B/op 8 allocs/op +BenchmarkEasyJSONLexer-8 3000000 887 ns/op 613 B/op 6 allocs/op +BenchmarkJSONParserGet-8 3000000 499 ns/op 21 B/op 0 allocs/op +BenchmarkJSONIterator-8 3000000 812 ns/op 544 B/op 9 allocs/op +``` + +JSON document used: + +```json +{ + "widget": { + "debug": "on", + "window": { + "title": "Sample Konfabulator Widget", + "name": "main_window", + "width": 500, + "height": 500 + }, + "image": { + "src": "Images/Sun.png", + "hOffset": 250, + "vOffset": 250, + "alignment": "center" + }, + "text": { + "data": "Click Here", + "size": 36, + "style": "bold", + "vOffset": 100, + "alignment": "center", + "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" + } + } +} +``` + +Each operation was rotated though one of the following search paths: + +``` +widget.window.name +widget.image.hOffset +widget.text.onMouseUp +``` + +*These benchmarks were run on a MacBook Pro 15" 2.8 GHz Intel Core i7 using Go 1.8 and can be be found [here](https://github.com/tidwall/gjson-benchmarks).* + + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +GJSON source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/gjson/SYNTAX.md b/vendor/github.com/tidwall/gjson/SYNTAX.md new file mode 100644 index 00000000..a57a4d66 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/SYNTAX.md @@ -0,0 +1,265 @@ +# GJSON Path Syntax + +A GJSON Path is a text string syntax that describes a search pattern for quickly retreiving values from a JSON payload. + +This document is designed to explain the structure of a GJSON Path through examples. + +- [Path structure](#path-structure) +- [Basic](#basic) +- [Wildcards](#wildcards) +- [Escape Character](#escape-character) +- [Arrays](#arrays) +- [Queries](#queries) +- [Dot vs Pipe](#dot-vs-pipe) +- [Modifiers](#modifiers) +- [Multipaths](#multipaths) + +The definitive implemenation is [github.com/tidwall/gjson](https://github.com/tidwall/gjson). +Use the [GJSON Playground](https://gjson.dev) to experiment with the syntax online. + + +## Path structure + +A GJSON Path is intended to be easily expressed as a series of components seperated by a `.` character. + +Along with `.` character, there are a few more that have special meaning, including `|`, `#`, `@`, `\`, `*`, and `?`. + +## Example + +Given this JSON + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]}, + {"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]}, + {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]} + ] +} +``` + +The following GJSON Paths evaluate to the accompanying values. + +### Basic + +In many cases you'll just want to retreive values by object name or array index. + +```go +name.last "Anderson" +name.first "Tom" +age 37 +children ["Sara","Alex","Jack"] +children.0 "Sara" +children.1 "Alex" +friends.1 {"first": "Roger", "last": "Craig", "age": 68} +friends.1.first "Roger" +``` + +### Wildcards + +A key may contain the special wildcard characters `*` and `?`. +The `*` will match on any zero+ characters, and `?` matches on any one character. + +```go +child*.2 "Jack" +c?ildren.0 "Sara" +``` + +### Escape character + +Special purpose characters, such as `.`, `*`, and `?` can be escaped with `\`. + +```go +fav\.movie "Deer Hunter" +``` + +### Arrays + +The `#` character allows for digging into JSON Arrays. + +To get the length of an array you'll just use the `#` all by itself. + +```go +friends.# 3 +friends.#.age [44,68,47] +``` + +### Queries + +You can also query an array for the first match by using `#(...)`, or find all matches with `#(...)#`. +Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` comparison operators, +and the simple pattern matching `%` (like) and `!%` (not like) operators. + +```go +friends.#(last=="Murphy").first "Dale" +friends.#(last=="Murphy")#.first ["Dale","Jane"] +friends.#(age>45)#.last ["Craig","Murphy"] +friends.#(first%"D*").last "Murphy" +friends.#(first!%"D*").last "Craig" +``` + +To query for a non-object value in an array, you can forgo the string to the right of the operator. + +```go +children.#(!%"*a*") "Alex" +children.#(%"*a*")# ["Sara","Jack"] +``` + +Nested queries are allowed. + +```go +friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"] +``` + +*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was +changed in v1.3.0 as to avoid confusion with the new [multipath](#multipaths) +syntax. For backwards compatibility, `#[...]` will continue to work until the +next major release.* + +### Dot vs Pipe + +The `.` is standard separator, but it's also possible to use a `|`. +In most cases they both end up returning the same results. +The cases where`|` differs from `.` is when it's used after the `#` for [Arrays](#arrays) and [Queries](#queries). + +Here are some examples + +```go +friends.0.first "Dale" +friends|0.first "Dale" +friends.0|first "Dale" +friends|0|first "Dale" +friends|# 3 +friends.# 3 +friends.#(last="Murphy")# [{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}] +friends.#(last="Murphy")#.first ["Dale","Jane"] +friends.#(last="Murphy")#|first +friends.#(last="Murphy")#.0 [] +friends.#(last="Murphy")#|0 {"first": "Dale", "last": "Murphy", "age": 44} +friends.#(last="Murphy")#.# [] +friends.#(last="Murphy")#|# 2 +``` + +Let's break down a few of these. + +The path `friends.#(last="Murphy")#` all by itself results in + +```json +[{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}] +``` + +The `.first` suffix will process the `first` path on each array element *before* returning the results. Which becomes + +```json +["Dale","Jane"] +``` + +But the `|first` suffix actually processes the `first` path *after* the previous result. +Since the previous result is an array, not an object, it's not possible to process +because `first` does not exist. + +Yet, `|0` suffix returns + +```json +{"first": "Dale", "last": "Murphy", "age": 44} +``` + +Because `0` is the first index of the previous result. + +### Modifiers + +A modifier is a path component that performs custom processing on the JSON. + +For example, using the built-in `@reverse` modifier on the above JSON payload will reverse the `children` array: + +```go +children.@reverse ["Jack","Alex","Sara"] +children.@reverse.0 "Jack" +``` + +There are currently three built-in modifiers: + +- `@reverse`: Reverse an array or the members of an object. +- `@ugly`: Remove all whitespace from JSON. +- `@pretty`: Make the JSON more human readable. + +#### Modifier arguments + +A modifier may accept an optional argument. The argument can be a valid JSON payload or just characters. + +For example, the `@pretty` modifier takes a json object as its argument. + +``` +@pretty:{"sortKeys":true} +``` + +Which makes the json pretty and orders all of its keys. + +```json +{ + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"age": 44, "first": "Dale", "last": "Murphy"}, + {"age": 68, "first": "Roger", "last": "Craig"}, + {"age": 47, "first": "Jane", "last": "Murphy"} + ], + "name": {"first": "Tom", "last": "Anderson"} +} +``` + +*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`. +Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.* + +#### Custom modifiers + +You can also add custom modifiers. + +For example, here we create a modifier which makes the entire JSON payload upper or lower case. + +```go +gjson.AddModifier("case", func(json, arg string) string { + if arg == "upper" { + return strings.ToUpper(json) + } + if arg == "lower" { + return strings.ToLower(json) + } + return json +}) +"children.@case:upper" ["SARA","ALEX","JACK"] +"children.@case:lower.@reverse" ["jack","alex","sara"] +``` + +### Multipaths + +Starting with v1.3.0, GJSON added the ability to join multiple paths together +to form new documents. Wrapping comma-separated paths between `{...}` or +`[...]` will result in a new array or object, respectively. + +For example, using the given multipath + +``` +{name.first,age,"the_murphys":friends.#(last="Murphy")#.first} +``` + +Here we selected the first name, age, and the first name for friends with the +last name "Murphy". + +You'll notice that an optional key can be provided, in this case +"the_murphys", to force assign a key to a value. Otherwise, the name of the +actual field will be used, in this case "first". If a name cannot be +determined, then "_" is used. + +This results in + +``` +{"first":"Tom","age":37,"the_murphys":["Dale","Jane"]} +``` + + diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go new file mode 100644 index 00000000..787d3276 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/gjson.go @@ -0,0 +1,2826 @@ +// Package gjson provides searching for json strings. +package gjson + +import ( + "encoding/base64" + "encoding/json" + "errors" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf16" + "unicode/utf8" + + "github.com/tidwall/match" + "github.com/tidwall/pretty" +) + +// Type is Result type +type Type int + +const ( + // Null is a null json value + Null Type = iota + // False is a json false boolean + False + // Number is json number + Number + // String is a json string + String + // True is a json true boolean + True + // JSON is a raw block of JSON + JSON +) + +// String returns a string representation of the type. +func (t Type) String() string { + switch t { + default: + return "" + case Null: + return "Null" + case False: + return "False" + case Number: + return "Number" + case String: + return "String" + case True: + return "True" + case JSON: + return "JSON" + } +} + +// Result represents a json value that is returned from Get(). +type Result struct { + // Type is the json type + Type Type + // Raw is the raw json + Raw string + // Str is the json string + Str string + // Num is the json number + Num float64 + // Index of raw value in original json, zero means index unknown + Index int +} + +// String returns a string representation of the value. +func (t Result) String() string { + switch t.Type { + default: + return "" + case False: + return "false" + case Number: + if len(t.Raw) == 0 { + // calculated result + return strconv.FormatFloat(t.Num, 'f', -1, 64) + } + var i int + if t.Raw[0] == '-' { + i++ + } + for ; i < len(t.Raw); i++ { + if t.Raw[i] < '0' || t.Raw[i] > '9' { + return strconv.FormatFloat(t.Num, 'f', -1, 64) + } + } + return t.Raw + case String: + return t.Str + case JSON: + return t.Raw + case True: + return "true" + } +} + +// Bool returns an boolean representation. +func (t Result) Bool() bool { + switch t.Type { + default: + return false + case True: + return true + case String: + return t.Str != "" && t.Str != "0" && t.Str != "false" + case Number: + return t.Num != 0 + } +} + +// Int returns an integer representation. +func (t Result) Int() int64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := parseInt(t.Str) + return n + case Number: + // try to directly convert the float64 to int64 + n, ok := floatToInt(t.Num) + if !ok { + // now try to parse the raw string + n, ok = parseInt(t.Raw) + if !ok { + // fallback to a standard conversion + return int64(t.Num) + } + } + return n + } +} + +// Uint returns an unsigned integer representation. +func (t Result) Uint() uint64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := parseUint(t.Str) + return n + case Number: + // try to directly convert the float64 to uint64 + n, ok := floatToUint(t.Num) + if !ok { + // now try to parse the raw string + n, ok = parseUint(t.Raw) + if !ok { + // fallback to a standard conversion + return uint64(t.Num) + } + } + return n + } +} + +// Float returns an float64 representation. +func (t Result) Float() float64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := strconv.ParseFloat(t.Str, 64) + return n + case Number: + return t.Num + } +} + +// Time returns a time.Time representation. +func (t Result) Time() time.Time { + res, _ := time.Parse(time.RFC3339, t.String()) + return res +} + +// Array returns back an array of values. +// If the result represents a non-existent value, then an empty array will be +// returned. If the result is not a JSON array, the return value will be an +// array containing one result. +func (t Result) Array() []Result { + if t.Type == Null { + return []Result{} + } + if t.Type != JSON { + return []Result{t} + } + r := t.arrayOrMap('[', false) + return r.a +} + +// IsObject returns true if the result value is a JSON object. +func (t Result) IsObject() bool { + return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '{' +} + +// IsArray returns true if the result value is a JSON array. +func (t Result) IsArray() bool { + return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '[' +} + +// ForEach iterates through values. +// If the result represents a non-existent value, then no values will be +// iterated. If the result is an Object, the iterator will pass the key and +// value of each item. If the result is an Array, the iterator will only pass +// the value of each item. If the result is not a JSON array or object, the +// iterator will pass back one value equal to the result. +func (t Result) ForEach(iterator func(key, value Result) bool) { + if !t.Exists() { + return + } + if t.Type != JSON { + iterator(Result{}, t) + return + } + json := t.Raw + var keys bool + var i int + var key, value Result + for ; i < len(json); i++ { + if json[i] == '{' { + i++ + key.Type = String + keys = true + break + } else if json[i] == '[' { + i++ + break + } + if json[i] > ' ' { + return + } + } + var str string + var vesc bool + var ok bool + for ; i < len(json); i++ { + if keys { + if json[i] != '"' { + continue + } + s := i + i, str, vesc, ok = parseString(json, i+1) + if !ok { + return + } + if vesc { + key.Str = unescape(str[1 : len(str)-1]) + } else { + key.Str = str[1 : len(str)-1] + } + key.Raw = str + key.Index = s + } + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' { + continue + } + break + } + s := i + i, value, ok = parseAny(json, i, true) + if !ok { + return + } + value.Index = s + if !iterator(key, value) { + return + } + } +} + +// Map returns back an map of values. The result should be a JSON array. +func (t Result) Map() map[string]Result { + if t.Type != JSON { + return map[string]Result{} + } + r := t.arrayOrMap('{', false) + return r.o +} + +// Get searches result for the specified path. +// The result should be a JSON array or object. +func (t Result) Get(path string) Result { + return Get(t.Raw, path) +} + +type arrayOrMapResult struct { + a []Result + ai []interface{} + o map[string]Result + oi map[string]interface{} + vc byte +} + +func (t Result) arrayOrMap(vc byte, valueize bool) (r arrayOrMapResult) { + var json = t.Raw + var i int + var value Result + var count int + var key Result + if vc == 0 { + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + r.vc = json[i] + i++ + break + } + if json[i] > ' ' { + goto end + } + } + } else { + for ; i < len(json); i++ { + if json[i] == vc { + i++ + break + } + if json[i] > ' ' { + goto end + } + } + r.vc = vc + } + if r.vc == '{' { + if valueize { + r.oi = make(map[string]interface{}) + } else { + r.o = make(map[string]Result) + } + } else { + if valueize { + r.ai = make([]interface{}, 0) + } else { + r.a = make([]Result, 0) + } + } + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + // get next value + if json[i] == ']' || json[i] == '}' { + break + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + value.Str = "" + } else { + continue + } + case '{', '[': + value.Type = JSON + value.Raw = squash(json[i:]) + value.Str, value.Num = "", 0 + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + value.Num = 0 + } + i += len(value.Raw) - 1 + + if r.vc == '{' { + if count%2 == 0 { + key = value + } else { + if valueize { + if _, ok := r.oi[key.Str]; !ok { + r.oi[key.Str] = value.Value() + } + } else { + if _, ok := r.o[key.Str]; !ok { + r.o[key.Str] = value + } + } + } + count++ + } else { + if valueize { + r.ai = append(r.ai, value.Value()) + } else { + r.a = append(r.a, value) + } + } + } +end: + return +} + +// Parse parses the json and returns a result. +// +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// If you are consuming JSON from an unpredictable source then you may want to +// use the Valid function first. +func Parse(json string) Result { + var value Result + for i := 0; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + value.Type = JSON + value.Raw = json[i:] // just take the entire raw + break + } + if json[i] <= ' ' { + continue + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + } else { + return Result{} + } + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + } + break + } + return value +} + +// ParseBytes parses the json and returns a result. +// If working with bytes, this method preferred over Parse(string(data)) +func ParseBytes(json []byte) Result { + return Parse(string(json)) +} + +func squash(json string) string { + // expects that the lead character is a '[' or '{' or '(' + // squash the value, ignoring all nested arrays and objects. + // the first '[' or '{' or '(', has already been read + depth := 1 + for i := 1; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + case '{', '[', '(': + depth++ + case '}', ']', ')': + depth-- + if depth == 0 { + return json[:i+1] + } + } + } + } + return json +} + +func tonum(json string) (raw string, num float64) { + for i := 1; i < len(json); i++ { + // less than dash might have valid characters + if json[i] <= '-' { + if json[i] <= ' ' || json[i] == ',' { + // break on whitespace and comma + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + // could be a '+' or '-'. let's assume so. + continue + } + if json[i] < ']' { + // probably a valid number + continue + } + if json[i] == 'e' || json[i] == 'E' { + // allow for exponential numbers + continue + } + // likely a ']' or '}' + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + raw = json + num, _ = strconv.ParseFloat(raw, 64) + return +} + +func tolit(json string) (raw string) { + for i := 1; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return json[:i] + } + } + return json +} + +func tostr(json string) (raw string, str string) { + // expects that the lead character is a '"' + for i := 1; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return json[:i+1], json[1:i] + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + var ret string + if i+1 < len(json) { + ret = json[:i+1] + } else { + ret = json[:i] + } + return ret, unescape(json[1:i]) + } + } + return json, json[1:] +} + +// Exists returns true if value exists. +// +// if gjson.Get(json, "name.last").Exists(){ +// println("value exists") +// } +func (t Result) Exists() bool { + return t.Type != Null || len(t.Raw) != 0 +} + +// Value returns one of these types: +// +// bool, for JSON booleans +// float64, for JSON numbers +// Number, for JSON numbers +// string, for JSON string literals +// nil, for JSON null +// map[string]interface{}, for JSON objects +// []interface{}, for JSON arrays +// +func (t Result) Value() interface{} { + if t.Type == String { + return t.Str + } + switch t.Type { + default: + return nil + case False: + return false + case Number: + return t.Num + case JSON: + r := t.arrayOrMap(0, true) + if r.vc == '{' { + return r.oi + } else if r.vc == '[' { + return r.ai + } + return nil + case True: + return true + } +} + +func parseString(json string, i int) (int, string, bool, bool) { + var s = i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return i + 1, json[s-1 : i+1], false, true + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + return i + 1, json[s-1 : i+1], true, true + } + } + break + } + } + return i, json[s-1:], false, false +} + +func parseNumber(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ']' || + json[i] == '}' { + return i, json[s:i] + } + } + return i, json[s:] +} + +func parseLiteral(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return i, json[s:i] + } + } + return i, json[s:] +} + +type arrayPathResult struct { + part string + path string + pipe string + piped bool + more bool + alogok bool + arrch bool + alogkey string + query struct { + on bool + path string + op string + value string + all bool + } +} + +func parseArrayPath(path string) (r arrayPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '|' { + r.part = path[:i] + r.pipe = path[i+1:] + r.piped = true + return + } + if path[i] == '.' { + r.part = path[:i] + r.path = path[i+1:] + r.more = true + return + } + if path[i] == '#' { + r.arrch = true + if i == 0 && len(path) > 1 { + if path[1] == '.' { + r.alogok = true + r.alogkey = path[2:] + r.path = path[:1] + } else if path[1] == '[' || path[1] == '(' { + // query + r.query.on = true + if true { + qpath, op, value, _, fi, ok := parseQuery(path[i:]) + if !ok { + // bad query, end now + break + } + r.query.path = qpath + r.query.op = op + r.query.value = value + i = fi - 1 + if i+1 < len(path) && path[i+1] == '#' { + r.query.all = true + } + } else { + var end byte + if path[1] == '[' { + end = ']' + } else { + end = ')' + } + i += 2 + // whitespace + for ; i < len(path); i++ { + if path[i] > ' ' { + break + } + } + s := i + for ; i < len(path); i++ { + if path[i] <= ' ' || + path[i] == '!' || + path[i] == '=' || + path[i] == '<' || + path[i] == '>' || + path[i] == '%' || + path[i] == end { + break + } + } + r.query.path = path[s:i] + // whitespace + for ; i < len(path); i++ { + if path[i] > ' ' { + break + } + } + if i < len(path) { + s = i + if path[i] == '!' { + if i < len(path)-1 && (path[i+1] == '=' || + path[i+1] == '%') { + i++ + } + } else if path[i] == '<' || path[i] == '>' { + if i < len(path)-1 && path[i+1] == '=' { + i++ + } + } else if path[i] == '=' { + if i < len(path)-1 && path[i+1] == '=' { + s++ + i++ + } + } + i++ + r.query.op = path[s:i] + // whitespace + for ; i < len(path); i++ { + if path[i] > ' ' { + break + } + } + s = i + for ; i < len(path); i++ { + if path[i] == '"' { + i++ + s2 := i + for ; i < len(path); i++ { + if path[i] > '\\' { + continue + } + if path[i] == '"' { + // look for an escaped slash + if path[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if path[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + } else if path[i] == end { + if i+1 < len(path) && path[i+1] == '#' { + r.query.all = true + } + break + } + } + if i > len(path) { + i = len(path) + } + v := path[s:i] + for len(v) > 0 && v[len(v)-1] <= ' ' { + v = v[:len(v)-1] + } + r.query.value = v + } + } + } + } + continue + } + } + r.part = path + r.path = "" + return +} + +// splitQuery takes a query and splits it into three parts: +// path, op, middle, and right. +// So for this query: +// #(first_name=="Murphy").last +// Becomes +// first_name # path +// =="Murphy" # middle +// .last # right +// Or, +// #(service_roles.#(=="one")).cap +// Becomes +// service_roles.#(=="one") # path +// # middle +// .cap # right +func parseQuery(query string) ( + path, op, value, remain string, i int, ok bool, +) { + if len(query) < 2 || query[0] != '#' || + (query[1] != '(' && query[1] != '[') { + return "", "", "", "", i, false + } + i = 2 + j := 0 // start of value part + depth := 1 + for ; i < len(query); i++ { + if depth == 1 && j == 0 { + switch query[i] { + case '!', '=', '<', '>', '%': + // start of the value part + j = i + continue + } + } + if query[i] == '\\' { + i++ + } else if query[i] == '[' || query[i] == '(' { + depth++ + } else if query[i] == ']' || query[i] == ')' { + depth-- + if depth == 0 { + break + } + } else if query[i] == '"' { + // inside selector string, balance quotes + i++ + for ; i < len(query); i++ { + if query[i] == '\\' { + i++ + } else if query[i] == '"' { + break + } + } + } + } + if depth > 0 { + return "", "", "", "", i, false + } + if j > 0 { + path = trim(query[2:j]) + value = trim(query[j:i]) + remain = query[i+1:] + // parse the compare op from the value + var opsz int + switch { + case len(value) == 1: + opsz = 1 + case value[0] == '!' && value[1] == '=': + opsz = 2 + case value[0] == '!' && value[1] == '%': + opsz = 2 + case value[0] == '<' && value[1] == '=': + opsz = 2 + case value[0] == '>' && value[1] == '=': + opsz = 2 + case value[0] == '=' && value[1] == '=': + value = value[1:] + opsz = 1 + case value[0] == '<': + opsz = 1 + case value[0] == '>': + opsz = 1 + case value[0] == '=': + opsz = 1 + case value[0] == '%': + opsz = 1 + } + op = value[:opsz] + value = trim(value[opsz:]) + } else { + path = trim(query[2:i]) + remain = query[i+1:] + } + return path, op, value, remain, i + 1, true +} + +func trim(s string) string { +left: + if len(s) > 0 && s[0] <= ' ' { + s = s[1:] + goto left + } +right: + if len(s) > 0 && s[len(s)-1] <= ' ' { + s = s[:len(s)-1] + goto right + } + return s +} + +type objectPathResult struct { + part string + path string + pipe string + piped bool + wild bool + more bool +} + +func parseObjectPath(path string) (r objectPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '|' { + r.part = path[:i] + r.pipe = path[i+1:] + r.piped = true + return + } + if path[i] == '.' { + // peek at the next byte and see if it's a '@', '[', or '{'. + r.part = path[:i] + if !DisableModifiers && + i < len(path)-1 && + (path[i+1] == '@' || + path[i+1] == '[' || path[i+1] == '{') { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + r.more = true + } + return + } + if path[i] == '*' || path[i] == '?' { + r.wild = true + continue + } + if path[i] == '\\' { + // go into escape mode. this is a slower path that + // strips off the escape character from the part. + epart := []byte(path[:i]) + i++ + if i < len(path) { + epart = append(epart, path[i]) + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + if i < len(path) { + epart = append(epart, path[i]) + } + continue + } else if path[i] == '.' { + r.part = string(epart) + // peek at the next byte and see if it's a '@' modifier + if !DisableModifiers && + i < len(path)-1 && path[i+1] == '@' { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + r.more = true + } + r.more = true + return + } else if path[i] == '|' { + r.part = string(epart) + r.pipe = path[i+1:] + r.piped = true + return + } else if path[i] == '*' || path[i] == '?' { + r.wild = true + } + epart = append(epart, path[i]) + } + } + // append the last part + r.part = string(epart) + return + } + } + r.part = path + return +} + +func parseSquash(json string, i int) (int, string) { + // expects that the lead character is a '[' or '{' or '(' + // squash the value, ignoring all nested arrays and objects. + // the first '[' or '{' or '(' has already been read + s := i + i++ + depth := 1 + for ; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + case '{', '[', '(': + depth++ + case '}', ']', ')': + depth-- + if depth == 0 { + i++ + return i, json[s:i] + } + } + } + } + return i, json[s:] +} + +func parseObject(c *parseContext, i int, path string) (int, bool) { + var pmatch, kesc, vesc, ok, hit bool + var key, val string + rp := parseObjectPath(path) + if !rp.more && rp.piped { + c.pipe = rp.pipe + c.piped = true + } + for i < len(c.json) { + for ; i < len(c.json); i++ { + if c.json[i] == '"' { + // parse_key_string + // this is slightly different from getting s string value + // because we don't need the outer quotes. + i++ + var s = i + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + i, key, kesc, ok = i+1, c.json[s:i], false, true + goto parse_key_string_done + } + if c.json[i] == '\\' { + i++ + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + // look for an escaped slash + if c.json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if c.json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + i, key, kesc, ok = i+1, c.json[s:i], true, true + goto parse_key_string_done + } + } + break + } + } + key, kesc, ok = c.json[s:], false, false + parse_key_string_done: + break + } + if c.json[i] == '}' { + return i + 1, false + } + } + if !ok { + return i, false + } + if rp.wild { + if kesc { + pmatch = match.Match(unescape(key), rp.part) + } else { + pmatch = match.Match(key, rp.part) + } + } else { + if kesc { + pmatch = rp.part == unescape(key) + } else { + pmatch = rp.part == key + } + } + hit = pmatch && !rp.more + for ; i < len(c.json); i++ { + switch c.json[i] { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if hit { + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + case 't', 'f', 'n': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if hit { + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + } + break + } + } + return i, false +} +func queryMatches(rp *arrayPathResult, value Result) bool { + rpv := rp.query.value + if len(rpv) > 2 && rpv[0] == '"' && rpv[len(rpv)-1] == '"' { + rpv = rpv[1 : len(rpv)-1] + } + if !value.Exists() { + return false + } + if rp.query.op == "" { + // the query is only looking for existence, such as: + // friends.#(name) + // which makes sure that the array "friends" has an element of + // "name" that exists + return true + } + switch value.Type { + case String: + switch rp.query.op { + case "=": + return value.Str == rpv + case "!=": + return value.Str != rpv + case "<": + return value.Str < rpv + case "<=": + return value.Str <= rpv + case ">": + return value.Str > rpv + case ">=": + return value.Str >= rpv + case "%": + return match.Match(value.Str, rpv) + case "!%": + return !match.Match(value.Str, rpv) + } + case Number: + rpvn, _ := strconv.ParseFloat(rpv, 64) + switch rp.query.op { + case "=": + return value.Num == rpvn + case "!=": + return value.Num != rpvn + case "<": + return value.Num < rpvn + case "<=": + return value.Num <= rpvn + case ">": + return value.Num > rpvn + case ">=": + return value.Num >= rpvn + } + case True: + switch rp.query.op { + case "=": + return rpv == "true" + case "!=": + return rpv != "true" + case ">": + return rpv == "false" + case ">=": + return true + } + case False: + switch rp.query.op { + case "=": + return rpv == "false" + case "!=": + return rpv != "false" + case "<": + return rpv == "true" + case "<=": + return true + } + } + return false +} +func parseArray(c *parseContext, i int, path string) (int, bool) { + var pmatch, vesc, ok, hit bool + var val string + var h int + var alog []int + var partidx int + var multires []byte + rp := parseArrayPath(path) + if !rp.arrch { + n, ok := parseUint(rp.part) + if !ok { + partidx = -1 + } else { + partidx = int(n) + } + } + if !rp.more && rp.piped { + c.pipe = rp.pipe + c.piped = true + } + + procQuery := func(qval Result) bool { + if rp.query.all { + if len(multires) == 0 { + multires = append(multires, '[') + } + } + var res Result + if qval.Type == JSON { + res = qval.Get(rp.query.path) + } else { + if rp.query.path != "" { + return false + } + res = qval + } + if queryMatches(&rp, res) { + if rp.more { + left, right, ok := splitPossiblePipe(rp.path) + if ok { + rp.path = left + c.pipe = right + c.piped = true + } + res = qval.Get(rp.path) + } else { + res = qval + } + if rp.query.all { + raw := res.Raw + if len(raw) == 0 { + raw = res.String() + } + if raw != "" { + if len(multires) > 1 { + multires = append(multires, ',') + } + multires = append(multires, raw...) + } + } else { + c.value = res + return true + } + } + return false + } + + for i < len(c.json)+1 { + if !rp.arrch { + pmatch = partidx == h + hit = pmatch && !rp.more + } + h++ + if rp.alogok { + alog = append(alog, i) + } + for ; ; i++ { + var ch byte + if i > len(c.json) { + break + } else if i == len(c.json) { + ch = ']' + } else { + ch = c.json[i] + } + switch ch { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if rp.query.on { + var qval Result + if vesc { + qval.Str = unescape(val[1 : len(val)-1]) + } else { + qval.Str = val[1 : len(val)-1] + } + qval.Raw = val + qval.Type = String + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if rp.query.on { + if procQuery(Result{Raw: val, Type: JSON}) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if rp.query.on { + if procQuery(Result{Raw: val, Type: JSON}) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(c.json, i) + if rp.query.on { + var qval Result + qval.Raw = val + qval.Type = Number + qval.Num, _ = strconv.ParseFloat(val, 64) + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + case 't', 'f', 'n': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if rp.query.on { + var qval Result + qval.Raw = val + switch vc { + case 't': + qval.Type = True + case 'f': + qval.Type = False + } + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + case ']': + if rp.arrch && rp.part == "#" { + if rp.alogok { + left, right, ok := splitPossiblePipe(rp.alogkey) + if ok { + rp.alogkey = left + c.pipe = right + c.piped = true + } + var jsons = make([]byte, 0, 64) + jsons = append(jsons, '[') + for j, k := 0, 0; j < len(alog); j++ { + idx := alog[j] + for idx < len(c.json) { + switch c.json[idx] { + case ' ', '\t', '\r', '\n': + idx++ + continue + } + break + } + if idx < len(c.json) && c.json[idx] != ']' { + _, res, ok := parseAny(c.json, idx, true) + if ok { + res := res.Get(rp.alogkey) + if res.Exists() { + if k > 0 { + jsons = append(jsons, ',') + } + raw := res.Raw + if len(raw) == 0 { + raw = res.String() + } + jsons = append(jsons, []byte(raw)...) + k++ + } + } + } + } + jsons = append(jsons, ']') + c.value.Type = JSON + c.value.Raw = string(jsons) + return i + 1, true + } + if rp.alogok { + break + } + + c.value.Type = Number + c.value.Num = float64(h - 1) + c.value.Raw = strconv.Itoa(h - 1) + c.calcd = true + return i + 1, true + } + if len(multires) > 0 && !c.value.Exists() { + c.value = Result{ + Raw: string(append(multires, ']')), + Type: JSON, + } + } + return i + 1, false + } + break + } + } + return i, false +} + +func splitPossiblePipe(path string) (left, right string, ok bool) { + // take a quick peek for the pipe character. If found we'll split the piped + // part of the path into the c.pipe field and shorten the rp. + var possible bool + for i := 0; i < len(path); i++ { + if path[i] == '|' { + possible = true + break + } + } + if !possible { + return + } + + if len(path) > 0 && path[0] == '{' { + squashed := squash(path[1:]) + if len(squashed) < len(path)-1 { + squashed = path[:len(squashed)+1] + remain := path[len(squashed):] + if remain[0] == '|' { + return squashed, remain[1:], true + } + } + return + } + + // split the left and right side of the path with the pipe character as + // the delimiter. This is a little tricky because we'll need to basically + // parse the entire path. + for i := 0; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == '.' { + if i == len(path)-1 { + return + } + if path[i+1] == '#' { + i += 2 + if i == len(path) { + return + } + if path[i] == '[' || path[i] == '(' { + var start, end byte + if path[i] == '[' { + start, end = '[', ']' + } else { + start, end = '(', ')' + } + // inside selector, balance brackets + i++ + depth := 1 + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == start { + depth++ + } else if path[i] == end { + depth-- + if depth == 0 { + break + } + } else if path[i] == '"' { + // inside selector string, balance quotes + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == '"' { + break + } + } + } + } + } + } + } else if path[i] == '|' { + return path[:i], path[i+1:], true + } + } + return +} + +// ForEachLine iterates through lines of JSON as specified by the JSON Lines +// format (http://jsonlines.org/). +// Each line is returned as a GJSON Result. +func ForEachLine(json string, iterator func(line Result) bool) { + var res Result + var i int + for { + i, res, _ = parseAny(json, i, true) + if !res.Exists() { + break + } + if !iterator(res) { + return + } + } +} + +type subSelector struct { + name string + path string +} + +// parseSubSelectors returns the subselectors belonging to a '[path1,path2]' or +// '{"field1":path1,"field2":path2}' type subSelection. It's expected that the +// first character in path is either '[' or '{', and has already been checked +// prior to calling this function. +func parseSubSelectors(path string) (sels []subSelector, out string, ok bool) { + modifer := 0 + depth := 1 + colon := 0 + start := 1 + i := 1 + pushSel := func() { + var sel subSelector + if colon == 0 { + sel.path = path[start:i] + } else { + sel.name = path[start:colon] + sel.path = path[colon+1 : i] + } + sels = append(sels, sel) + colon = 0 + start = i + 1 + } + for ; i < len(path); i++ { + switch path[i] { + case '\\': + i++ + case '@': + if modifer == 0 && i > 0 && (path[i-1] == '.' || path[i-1] == '|') { + modifer = i + } + case ':': + if modifer == 0 && colon == 0 && depth == 1 { + colon = i + } + case ',': + if depth == 1 { + pushSel() + } + case '"': + i++ + loop: + for ; i < len(path); i++ { + switch path[i] { + case '\\': + i++ + case '"': + break loop + } + } + case '[', '(', '{': + depth++ + case ']', ')', '}': + depth-- + if depth == 0 { + pushSel() + path = path[i+1:] + return sels, path, true + } + } + } + return +} + +// nameOfLast returns the name of the last component +func nameOfLast(path string) string { + for i := len(path) - 1; i >= 0; i-- { + if path[i] == '|' || path[i] == '.' { + if i > 0 { + if path[i-1] == '\\' { + continue + } + } + return path[i+1:] + } + } + return path +} + +func isSimpleName(component string) bool { + for i := 0; i < len(component); i++ { + if component[i] < ' ' { + return false + } + switch component[i] { + case '[', ']', '{', '}', '(', ')', '#', '|': + return false + } + } + return true +} + +func appendJSONString(dst []byte, s string) []byte { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] == '\\' || s[i] == '"' || s[i] > 126 { + d, _ := json.Marshal(s) + return append(dst, string(d)...) + } + } + dst = append(dst, '"') + dst = append(dst, s...) + dst = append(dst, '"') + return dst +} + +type parseContext struct { + json string + value Result + pipe string + piped bool + calcd bool + lines bool +} + +// Get searches json for the specified path. +// A path is in dot syntax, such as "name.last" or "age". +// When the value is found it's returned immediately. +// +// A path is a series of keys searated by a dot. +// A key may contain special wildcard characters '*' and '?'. +// To access an array value use the index as the key. +// To get the number of elements in an array or to access a child path, use +// the '#' character. +// The dot and wildcard character can be escaped with '\'. +// +// { +// "name": {"first": "Tom", "last": "Anderson"}, +// "age":37, +// "children": ["Sara","Alex","Jack"], +// "friends": [ +// {"first": "James", "last": "Murphy"}, +// {"first": "Roger", "last": "Craig"} +// ] +// } +// "name.last" >> "Anderson" +// "age" >> 37 +// "children" >> ["Sara","Alex","Jack"] +// "children.#" >> 3 +// "children.1" >> "Alex" +// "child*.2" >> "Jack" +// "c?ildren.0" >> "Sara" +// "friends.#.first" >> ["James","Roger"] +// +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// If you are consuming JSON from an unpredictable source then you may want to +// use the Valid function first. +func Get(json, path string) Result { + if len(path) > 1 { + if !DisableModifiers { + if path[0] == '@' { + // possible modifier + var ok bool + var npath string + var rjson string + npath, rjson, ok = execModifier(json, path) + if ok { + path = npath + if len(path) > 0 && (path[0] == '|' || path[0] == '.') { + res := Get(rjson, path[1:]) + res.Index = 0 + return res + } + return Parse(rjson) + } + } + } + if path[0] == '[' || path[0] == '{' { + // using a subselector path + kind := path[0] + var ok bool + var subs []subSelector + subs, path, ok = parseSubSelectors(path) + if ok { + if len(path) == 0 || (path[0] == '|' || path[0] == '.') { + var b []byte + b = append(b, kind) + var i int + for _, sub := range subs { + res := Get(json, sub.path) + if res.Exists() { + if i > 0 { + b = append(b, ',') + } + if kind == '{' { + if len(sub.name) > 0 { + if sub.name[0] == '"' && Valid(sub.name) { + b = append(b, sub.name...) + } else { + b = appendJSONString(b, sub.name) + } + } else { + last := nameOfLast(sub.path) + if isSimpleName(last) { + b = appendJSONString(b, last) + } else { + b = appendJSONString(b, "_") + } + } + b = append(b, ':') + } + var raw string + if len(res.Raw) == 0 { + raw = res.String() + if len(raw) == 0 { + raw = "null" + } + } else { + raw = res.Raw + } + b = append(b, raw...) + i++ + } + } + b = append(b, kind+2) + var res Result + res.Raw = string(b) + res.Type = JSON + if len(path) > 0 { + res = res.Get(path[1:]) + } + res.Index = 0 + return res + } + } + } + } + + var i int + var c = &parseContext{json: json} + if len(path) >= 2 && path[0] == '.' && path[1] == '.' { + c.lines = true + parseArray(c, 0, path[2:]) + } else { + for ; i < len(c.json); i++ { + if c.json[i] == '{' { + i++ + parseObject(c, i, path) + break + } + if c.json[i] == '[' { + i++ + parseArray(c, i, path) + break + } + } + } + if c.piped { + res := c.value.Get(c.pipe) + res.Index = 0 + return res + } + fillIndex(json, c) + return c.value +} + +// GetBytes searches json for the specified path. +// If working with bytes, this method preferred over Get(string(data), path) +func GetBytes(json []byte, path string) Result { + return getBytes(json, path) +} + +// runeit returns the rune from the the \uXXXX +func runeit(json string) rune { + n, _ := strconv.ParseUint(json[:4], 16, 64) + return rune(n) +} + +// unescape unescapes a string +func unescape(json string) string { //, error) { + var str = make([]byte, 0, len(json)) + for i := 0; i < len(json); i++ { + switch { + default: + str = append(str, json[i]) + case json[i] < ' ': + return string(str) + case json[i] == '\\': + i++ + if i >= len(json) { + return string(str) + } + switch json[i] { + default: + return string(str) + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + case '"': + str = append(str, '"') + case 'u': + if i+5 > len(json) { + return string(str) + } + r := runeit(json[i+1:]) + i += 5 + if utf16.IsSurrogate(r) { + // need another code + if len(json[i:]) >= 6 && json[i] == '\\' && + json[i+1] == 'u' { + // we expect it to be correct so just consume it + r = utf16.DecodeRune(r, runeit(json[i+2:])) + i += 6 + } + } + // provide enough space to encode the largest utf8 possible + str = append(str, 0, 0, 0, 0, 0, 0, 0, 0) + n := utf8.EncodeRune(str[len(str)-8:], r) + str = str[:len(str)-8+n] + i-- // backtrack index by one + } + } + } + return string(str) +} + +// Less return true if a token is less than another token. +// The caseSensitive paramater is used when the tokens are Strings. +// The order when comparing two different type is: +// +// Null < False < Number < String < True < JSON +// +func (t Result) Less(token Result, caseSensitive bool) bool { + if t.Type < token.Type { + return true + } + if t.Type > token.Type { + return false + } + if t.Type == String { + if caseSensitive { + return t.Str < token.Str + } + return stringLessInsensitive(t.Str, token.Str) + } + if t.Type == Number { + return t.Num < token.Num + } + return t.Raw < token.Raw +} + +func stringLessInsensitive(a, b string) bool { + for i := 0; i < len(a) && i < len(b); i++ { + if a[i] >= 'A' && a[i] <= 'Z' { + if b[i] >= 'A' && b[i] <= 'Z' { + // both are uppercase, do nothing + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } else { + // a is uppercase, convert a to lowercase + if a[i]+32 < b[i] { + return true + } else if a[i]+32 > b[i] { + return false + } + } + } else if b[i] >= 'A' && b[i] <= 'Z' { + // b is uppercase, convert b to lowercase + if a[i] < b[i]+32 { + return true + } else if a[i] > b[i]+32 { + return false + } + } else { + // neither are uppercase + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } + } + return len(a) < len(b) +} + +// parseAny parses the next value from a json string. +// A Result is returned when the hit param is set. +// The return values are (i int, res Result, ok bool) +func parseAny(json string, i int, hit bool) (int, Result, bool) { + var res Result + var val string + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + i, val = parseSquash(json, i) + if hit { + res.Raw = val + res.Type = JSON + } + return i, res, true + } + if json[i] <= ' ' { + continue + } + switch json[i] { + case '"': + i++ + var vesc bool + var ok bool + i, val, vesc, ok = parseString(json, i) + if !ok { + return i, res, false + } + if hit { + res.Type = String + res.Raw = val + if vesc { + res.Str = unescape(val[1 : len(val)-1]) + } else { + res.Str = val[1 : len(val)-1] + } + } + return i, res, true + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(json, i) + if hit { + res.Raw = val + res.Type = Number + res.Num, _ = strconv.ParseFloat(val, 64) + } + return i, res, true + case 't', 'f', 'n': + vc := json[i] + i, val = parseLiteral(json, i) + if hit { + res.Raw = val + switch vc { + case 't': + res.Type = True + case 'f': + res.Type = False + } + return i, res, true + } + } + } + return i, res, false +} + +var ( // used for testing + testWatchForFallback bool + testLastWasFallback bool +) + +// GetMany searches json for the multiple paths. +// The return value is a Result array where the number of items +// will be equal to the number of input paths. +func GetMany(json string, path ...string) []Result { + res := make([]Result, len(path)) + for i, path := range path { + res[i] = Get(json, path) + } + return res +} + +// GetManyBytes searches json for the multiple paths. +// The return value is a Result array where the number of items +// will be equal to the number of input paths. +func GetManyBytes(json []byte, path ...string) []Result { + res := make([]Result, len(path)) + for i, path := range path { + res[i] = GetBytes(json, path) + } + return res +} + +var fieldsmu sync.RWMutex +var fields = make(map[string]map[string]int) + +func assign(jsval Result, goval reflect.Value) { + if jsval.Type == Null { + return + } + switch goval.Kind() { + default: + case reflect.Ptr: + if !goval.IsNil() { + newval := reflect.New(goval.Elem().Type()) + assign(jsval, newval.Elem()) + goval.Elem().Set(newval.Elem()) + } else { + newval := reflect.New(goval.Type().Elem()) + assign(jsval, newval.Elem()) + goval.Set(newval) + } + case reflect.Struct: + fieldsmu.RLock() + sf := fields[goval.Type().String()] + fieldsmu.RUnlock() + if sf == nil { + fieldsmu.Lock() + sf = make(map[string]int) + for i := 0; i < goval.Type().NumField(); i++ { + f := goval.Type().Field(i) + tag := strings.Split(f.Tag.Get("json"), ",")[0] + if tag != "-" { + if tag != "" { + sf[tag] = i + sf[f.Name] = i + } else { + sf[f.Name] = i + } + } + } + fields[goval.Type().String()] = sf + fieldsmu.Unlock() + } + jsval.ForEach(func(key, value Result) bool { + if idx, ok := sf[key.Str]; ok { + f := goval.Field(idx) + if f.CanSet() { + assign(value, f) + } + } + return true + }) + case reflect.Slice: + if goval.Type().Elem().Kind() == reflect.Uint8 && + jsval.Type == String { + data, _ := base64.StdEncoding.DecodeString(jsval.String()) + goval.Set(reflect.ValueOf(data)) + } else { + jsvals := jsval.Array() + slice := reflect.MakeSlice(goval.Type(), len(jsvals), len(jsvals)) + for i := 0; i < len(jsvals); i++ { + assign(jsvals[i], slice.Index(i)) + } + goval.Set(slice) + } + case reflect.Array: + i, n := 0, goval.Len() + jsval.ForEach(func(_, value Result) bool { + if i == n { + return false + } + assign(value, goval.Index(i)) + i++ + return true + }) + case reflect.Map: + if goval.Type().Key().Kind() == reflect.String && + goval.Type().Elem().Kind() == reflect.Interface { + goval.Set(reflect.ValueOf(jsval.Value())) + } + case reflect.Interface: + goval.Set(reflect.ValueOf(jsval.Value())) + case reflect.Bool: + goval.SetBool(jsval.Bool()) + case reflect.Float32, reflect.Float64: + goval.SetFloat(jsval.Float()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + goval.SetInt(jsval.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + goval.SetUint(jsval.Uint()) + case reflect.String: + goval.SetString(jsval.String()) + } + if len(goval.Type().PkgPath()) > 0 { + v := goval.Addr() + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + u.UnmarshalJSON([]byte(jsval.Raw)) + } + } + } +} + +var validate uintptr = 1 + +// UnmarshalValidationEnabled provides the option to disable JSON validation +// during the Unmarshal routine. Validation is enabled by default. +// +// Deprecated: Use encoder/json.Unmarshal instead +func UnmarshalValidationEnabled(enabled bool) { + if enabled { + atomic.StoreUintptr(&validate, 1) + } else { + atomic.StoreUintptr(&validate, 0) + } +} + +// Unmarshal loads the JSON data into the value pointed to by v. +// +// This function works almost identically to json.Unmarshal except that +// gjson.Unmarshal will automatically attempt to convert JSON values to any Go +// type. For example, the JSON string "100" or the JSON number 100 can be +// equally assigned to Go string, int, byte, uint64, etc. This rule applies to +// all types. +// +// Deprecated: Use encoder/json.Unmarshal instead +func Unmarshal(data []byte, v interface{}) error { + if atomic.LoadUintptr(&validate) == 1 { + _, ok := validpayload(data, 0) + if !ok { + return errors.New("invalid json") + } + } + if v := reflect.ValueOf(v); v.Kind() == reflect.Ptr { + assign(ParseBytes(data), v) + } + return nil +} + +func validpayload(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + i, ok = validany(data, i) + if !ok { + return i, false + } + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + } + } + return i, true + case ' ', '\t', '\n', '\r': + continue + } + } + return i, false +} +func validany(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '{': + return validobject(data, i+1) + case '[': + return validarray(data, i+1) + case '"': + return validstring(data, i+1) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return validnumber(data, i+1) + case 't': + return validtrue(data, i+1) + case 'f': + return validfalse(data, i+1) + case 'n': + return validnull(data, i+1) + } + } + return i, false +} +func validobject(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '}': + return i + 1, true + case '"': + key: + if i, ok = validstring(data, i+1); !ok { + return i, false + } + if i, ok = validcolon(data, i); !ok { + return i, false + } + if i, ok = validany(data, i); !ok { + return i, false + } + if i, ok = validcomma(data, i, '}'); !ok { + return i, false + } + if data[i] == '}' { + return i + 1, true + } + i++ + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '"': + goto key + } + } + return i, false + } + } + return i, false +} +func validcolon(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case ':': + return i + 1, true + } + } + return i, false +} +func validcomma(data []byte, i int, end byte) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case ',': + return i, true + case end: + return i, true + } + } + return i, false +} +func validarray(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + for ; i < len(data); i++ { + if i, ok = validany(data, i); !ok { + return i, false + } + if i, ok = validcomma(data, i, ']'); !ok { + return i, false + } + if data[i] == ']' { + return i + 1, true + } + } + case ' ', '\t', '\n', '\r': + continue + case ']': + return i + 1, true + } + } + return i, false +} +func validstring(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + if data[i] < ' ' { + return i, false + } else if data[i] == '\\' { + i++ + if i == len(data) { + return i, false + } + switch data[i] { + default: + return i, false + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + case 'u': + for j := 0; j < 4; j++ { + i++ + if i >= len(data) { + return i, false + } + if !((data[i] >= '0' && data[i] <= '9') || + (data[i] >= 'a' && data[i] <= 'f') || + (data[i] >= 'A' && data[i] <= 'F')) { + return i, false + } + } + } + } else if data[i] == '"' { + return i + 1, true + } + } + return i, false +} +func validnumber(data []byte, i int) (outi int, ok bool) { + i-- + // sign + if data[i] == '-' { + i++ + } + // int + if i == len(data) { + return i, false + } + if data[i] == '0' { + i++ + } else { + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + // frac + if i == len(data) { + return i, true + } + if data[i] == '.' { + i++ + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + i++ + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + // exp + if i == len(data) { + return i, true + } + if data[i] == 'e' || data[i] == 'E' { + i++ + if i == len(data) { + return i, false + } + if data[i] == '+' || data[i] == '-' { + i++ + } + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + i++ + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + return i, true +} + +func validtrue(data []byte, i int) (outi int, ok bool) { + if i+3 <= len(data) && data[i] == 'r' && data[i+1] == 'u' && + data[i+2] == 'e' { + return i + 3, true + } + return i, false +} +func validfalse(data []byte, i int) (outi int, ok bool) { + if i+4 <= len(data) && data[i] == 'a' && data[i+1] == 'l' && + data[i+2] == 's' && data[i+3] == 'e' { + return i + 4, true + } + return i, false +} +func validnull(data []byte, i int) (outi int, ok bool) { + if i+3 <= len(data) && data[i] == 'u' && data[i+1] == 'l' && + data[i+2] == 'l' { + return i + 3, true + } + return i, false +} + +// Valid returns true if the input is valid json. +// +// if !gjson.Valid(json) { +// return errors.New("invalid json") +// } +// value := gjson.Get(json, "name.last") +// +func Valid(json string) bool { + _, ok := validpayload(stringBytes(json), 0) + return ok +} + +// ValidBytes returns true if the input is valid json. +// +// if !gjson.Valid(json) { +// return errors.New("invalid json") +// } +// value := gjson.Get(json, "name.last") +// +// If working with bytes, this method preferred over ValidBytes(string(data)) +// +func ValidBytes(json []byte) bool { + _, ok := validpayload(json, 0) + return ok +} + +func parseUint(s string) (n uint64, ok bool) { + var i int + if i == len(s) { + return 0, false + } + for ; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + n = n*10 + uint64(s[i]-'0') + } else { + return 0, false + } + } + return n, true +} + +func parseInt(s string) (n int64, ok bool) { + var i int + var sign bool + if len(s) > 0 && s[0] == '-' { + sign = true + i++ + } + if i == len(s) { + return 0, false + } + for ; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + n = n*10 + int64(s[i]-'0') + } else { + return 0, false + } + } + if sign { + return n * -1, true + } + return n, true +} + +const minUint53 = 0 +const maxUint53 = 4503599627370495 +const minInt53 = -2251799813685248 +const maxInt53 = 2251799813685247 + +func floatToUint(f float64) (n uint64, ok bool) { + n = uint64(f) + if float64(n) == f && n >= minUint53 && n <= maxUint53 { + return n, true + } + return 0, false +} + +func floatToInt(f float64) (n int64, ok bool) { + n = int64(f) + if float64(n) == f && n >= minInt53 && n <= maxInt53 { + return n, true + } + return 0, false +} + +// execModifier parses the path to find a matching modifier function. +// then input expects that the path already starts with a '@' +func execModifier(json, path string) (pathOut, res string, ok bool) { + name := path[1:] + var hasArgs bool + for i := 1; i < len(path); i++ { + if path[i] == ':' { + pathOut = path[i+1:] + name = path[1:i] + hasArgs = len(pathOut) > 0 + break + } + if path[i] == '|' { + pathOut = path[i:] + name = path[1:i] + break + } + if path[i] == '.' { + pathOut = path[i:] + name = path[1:i] + break + } + } + if fn, ok := modifiers[name]; ok { + var args string + if hasArgs { + var parsedArgs bool + switch pathOut[0] { + case '{', '[', '"': + res := Parse(pathOut) + if res.Exists() { + _, args = parseSquash(pathOut, 0) + pathOut = pathOut[len(args):] + parsedArgs = true + } + } + if !parsedArgs { + idx := strings.IndexByte(pathOut, '|') + if idx == -1 { + args = pathOut + pathOut = "" + } else { + args = pathOut[:idx] + pathOut = pathOut[idx:] + } + } + } + return pathOut, fn(json, args), true + } + return pathOut, res, false +} + +// DisableModifiers will disable the modifier syntax +var DisableModifiers = false + +var modifiers = map[string]func(json, arg string) string{ + "pretty": modPretty, + "ugly": modUgly, + "reverse": modReverse, +} + +// AddModifier binds a custom modifier command to the GJSON syntax. +// This operation is not thread safe and should be executed prior to +// using all other gjson function. +func AddModifier(name string, fn func(json, arg string) string) { + modifiers[name] = fn +} + +// ModifierExists returns true when the specified modifier exists. +func ModifierExists(name string, fn func(json, arg string) string) bool { + _, ok := modifiers[name] + return ok +} + +// @pretty modifier makes the json look nice. +func modPretty(json, arg string) string { + if len(arg) > 0 { + opts := *pretty.DefaultOptions + Parse(arg).ForEach(func(key, value Result) bool { + switch key.String() { + case "sortKeys": + opts.SortKeys = value.Bool() + case "indent": + opts.Indent = value.String() + case "prefix": + opts.Prefix = value.String() + case "width": + opts.Width = int(value.Int()) + } + return true + }) + return bytesString(pretty.PrettyOptions(stringBytes(json), &opts)) + } + return bytesString(pretty.Pretty(stringBytes(json))) +} + +// @ugly modifier removes all whitespace. +func modUgly(json, arg string) string { + return bytesString(pretty.Ugly(stringBytes(json))) +} + +// @reverse reverses array elements or root object members. +func modReverse(json, arg string) string { + res := Parse(json) + if res.IsArray() { + var values []Result + res.ForEach(func(_, value Result) bool { + values = append(values, value) + return true + }) + out := make([]byte, 0, len(json)) + out = append(out, '[') + for i, j := len(values)-1, 0; i >= 0; i, j = i-1, j+1 { + if j > 0 { + out = append(out, ',') + } + out = append(out, values[i].Raw...) + } + out = append(out, ']') + return bytesString(out) + } + if res.IsObject() { + var keyValues []Result + res.ForEach(func(key, value Result) bool { + keyValues = append(keyValues, key, value) + return true + }) + out := make([]byte, 0, len(json)) + out = append(out, '{') + for i, j := len(keyValues)-2, 0; i >= 0; i, j = i-2, j+1 { + if j > 0 { + out = append(out, ',') + } + out = append(out, keyValues[i+0].Raw...) + out = append(out, ':') + out = append(out, keyValues[i+1].Raw...) + } + out = append(out, '}') + return bytesString(out) + } + return json +} diff --git a/vendor/github.com/tidwall/gjson/gjson_gae.go b/vendor/github.com/tidwall/gjson/gjson_gae.go new file mode 100644 index 00000000..95869039 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/gjson_gae.go @@ -0,0 +1,18 @@ +//+build appengine js + +package gjson + +func getBytes(json []byte, path string) Result { + return Get(string(json), path) +} +func fillIndex(json string, c *parseContext) { + // noop. Use zero for the Index value. +} + +func stringBytes(s string) []byte { + return []byte(s) +} + +func bytesString(b []byte) string { + return string(b) +} diff --git a/vendor/github.com/tidwall/gjson/gjson_ngae.go b/vendor/github.com/tidwall/gjson/gjson_ngae.go new file mode 100644 index 00000000..bc608b53 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/gjson_ngae.go @@ -0,0 +1,81 @@ +//+build !appengine +//+build !js + +package gjson + +import ( + "reflect" + "unsafe" +) + +// getBytes casts the input json bytes to a string and safely returns the +// results as uniquely allocated data. This operation is intended to minimize +// copies and allocations for the large json string->[]byte. +func getBytes(json []byte, path string) Result { + var result Result + if json != nil { + // unsafe cast to string + result = Get(*(*string)(unsafe.Pointer(&json)), path) + // safely get the string headers + rawhi := *(*reflect.StringHeader)(unsafe.Pointer(&result.Raw)) + strhi := *(*reflect.StringHeader)(unsafe.Pointer(&result.Str)) + // create byte slice headers + rawh := reflect.SliceHeader{Data: rawhi.Data, Len: rawhi.Len} + strh := reflect.SliceHeader{Data: strhi.Data, Len: strhi.Len} + if strh.Data == 0 { + // str is nil + if rawh.Data == 0 { + // raw is nil + result.Raw = "" + } else { + // raw has data, safely copy the slice header to a string + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + } + result.Str = "" + } else if rawh.Data == 0 { + // raw is nil + result.Raw = "" + // str has data, safely copy the slice header to a string + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } else if strh.Data >= rawh.Data && + int(strh.Data)+strh.Len <= int(rawh.Data)+rawh.Len { + // Str is a substring of Raw. + start := int(strh.Data - rawh.Data) + // safely copy the raw slice header + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + // substring the raw + result.Str = result.Raw[start : start+strh.Len] + } else { + // safely copy both the raw and str slice headers to strings + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } + } + return result +} + +// fillIndex finds the position of Raw data and assigns it to the Index field +// of the resulting value. If the position cannot be found then Index zero is +// used instead. +func fillIndex(json string, c *parseContext) { + if len(c.value.Raw) > 0 && !c.calcd { + jhdr := *(*reflect.StringHeader)(unsafe.Pointer(&json)) + rhdr := *(*reflect.StringHeader)(unsafe.Pointer(&(c.value.Raw))) + c.value.Index = int(rhdr.Data - jhdr.Data) + if c.value.Index < 0 || c.value.Index >= len(json) { + c.value.Index = 0 + } + } +} + +func stringBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: (*reflect.StringHeader)(unsafe.Pointer(&s)).Data, + Len: len(s), + Cap: len(s), + })) +} + +func bytesString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/vendor/github.com/tidwall/gjson/go.mod b/vendor/github.com/tidwall/gjson/go.mod new file mode 100644 index 00000000..d851688c --- /dev/null +++ b/vendor/github.com/tidwall/gjson/go.mod @@ -0,0 +1,8 @@ +module github.com/tidwall/gjson + +go 1.12 + +require ( + github.com/tidwall/match v1.0.1 + github.com/tidwall/pretty v1.0.0 +) diff --git a/vendor/github.com/tidwall/gjson/go.sum b/vendor/github.com/tidwall/gjson/go.sum new file mode 100644 index 00000000..a4a2d872 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/go.sum @@ -0,0 +1,4 @@ +github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= diff --git a/vendor/github.com/tidwall/gjson/logo.png b/vendor/github.com/tidwall/gjson/logo.png new file mode 100644 index 00000000..17a8bbe9 Binary files /dev/null and b/vendor/github.com/tidwall/gjson/logo.png differ diff --git a/vendor/github.com/tidwall/grect/LICENSE.md b/vendor/github.com/tidwall/grect/LICENSE.md new file mode 100644 index 00000000..58f5819a --- /dev/null +++ b/vendor/github.com/tidwall/grect/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/grect/README.md b/vendor/github.com/tidwall/grect/README.md new file mode 100644 index 00000000..04a8bf04 --- /dev/null +++ b/vendor/github.com/tidwall/grect/README.md @@ -0,0 +1,25 @@ +GRECT +==== + +Quickly get the outer rectangle for GeoJSON, WKT, WKB. + +```go + r := grect.Get(`{ + "type": "Polygon", + "coordinates": [ + [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], + [100.0, 1.0], [100.0, 0.0] ] + ] + }`) + fmt.Printf("%v %v\n", r.Min, r.Max) + // Output: + // [100 0] [101 1] +``` + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +GRECT source code is available under the MIT [License](/LICENSE). + diff --git a/vendor/github.com/tidwall/grect/grect.go b/vendor/github.com/tidwall/grect/grect.go new file mode 100644 index 00000000..13eb761a --- /dev/null +++ b/vendor/github.com/tidwall/grect/grect.go @@ -0,0 +1,337 @@ +package grect + +import ( + "strconv" + "strings" + + "github.com/tidwall/gjson" +) + +type Rect struct { + Min, Max []float64 +} + +func (r Rect) String() string { + diff := len(r.Min) != len(r.Max) + if !diff { + for i := 0; i < len(r.Min); i++ { + if r.Min[i] != r.Max[i] { + diff = true + break + } + } + } + var buf []byte + buf = append(buf, '[') + for i, v := range r.Min { + if i > 0 { + buf = append(buf, ' ') + } + buf = append(buf, strconv.FormatFloat(v, 'f', -1, 64)...) + } + if diff { + buf = append(buf, ']', ',', '[') + for i, v := range r.Max { + if i > 0 { + buf = append(buf, ' ') + } + buf = append(buf, strconv.FormatFloat(v, 'f', -1, 64)...) + } + } + buf = append(buf, ']') + return string(buf) +} + +func normalize(min, max []float64) (nmin, nmax []float64) { + if len(max) == 0 { + return min, min + } else if len(max) != len(min) { + if len(max) < len(min) { + max = append(max, min[len(max):]...) + } else if len(min) < len(max) { + min = append(min, max[len(min):]...) + } + } + match := true + for i := 0; i < len(min); i++ { + if min[i] != max[i] { + if match { + match = false + } + if min[i] > max[i] { + min[i], max[i] = max[i], min[i] + } + } + } + if match { + return min, min + } + return min, max +} + +func Get(s string) Rect { + var i int + var ws bool + var min, max []float64 + for ; i < len(s); i++ { + switch s[i] { + default: + continue + case ' ', '\t', '\r', '\n': + ws = true + continue + case '[': + min, max, i = getRect(s, i) + case '{': + min, max, i = getGeoJSON(s, i) + case 0x00, 0x01: + if !ws { + // return parseWKB(s, i) + } + case 'p', 'P', 'l', 'L', 'm', 'M', 'g', 'G': + min, max, i = getWKT(s, i) + } + break + } + min, max = normalize(min, max) + return Rect{Min: min, Max: max} +} + +func getRect(s string, i int) (min, max []float64, ri int) { + a := s[i:] + parts := strings.Split(a, ",") + for i := 0; i < len(parts) && i < 2; i++ { + part := parts[i] + if len(part) > 0 && (part[0] <= ' ' || part[len(part)-1] <= ' ') { + part = strings.TrimSpace(part) + } + if len(part) >= 2 && part[0] == '[' && part[len(part)-1] == ']' { + pieces := strings.Split(part[1:len(part)-1], " ") + if i == 0 { + min = make([]float64, 0, len(pieces)) + } else { + max = make([]float64, 0, len(pieces)) + } + for j := 0; j < len(pieces); j++ { + piece := pieces[j] + if piece != "" { + n, _ := strconv.ParseFloat(piece, 64) + if i == 0 { + min = append(min, n) + } else { + max = append(max, n) + } + } + } + } + } + + // normalize + if len(parts) == 1 { + max = min + } else { + min, max = normalize(min, max) + } + + return min, max, len(s) +} + +func union(min1, max1, min2, max2 []float64) (umin, umax []float64) { + for i := 0; i < len(min1) || i < len(min2); i++ { + if i >= len(min1) { + // just copy min2 + umin = append(umin, min2[i]) + umax = append(umax, max2[i]) + } else if i >= len(min2) { + // just copy min1 + umin = append(umin, min1[i]) + umax = append(umax, max1[i]) + } else { + if min1[i] < min2[i] { + umin = append(umin, min1[i]) + } else { + umin = append(umin, min2[i]) + } + if max1[i] > max2[i] { + umax = append(umax, max1[i]) + } else { + umax = append(umax, max2[i]) + } + } + } + return umin, umax +} + +func getWKT(s string, i int) (min, max []float64, ri int) { + switch s[i] { + default: + for ; i < len(s); i++ { + if s[i] == ',' { + return nil, nil, i + } + if s[i] == '(' { + return getWKTAny(s, i) + } + } + return nil, nil, i + case 'g', 'G': + if len(s)-i < 18 { + return nil, nil, i + } + return getWKTGeometryCollection(s, i+18) + } +} + +func getWKTAny(s string, i int) (min, max []float64, ri int) { + min, max = make([]float64, 0, 4), make([]float64, 0, 4) + var depth int + var ni int + var idx int +loop: + for ; i < len(s); i++ { + switch s[i] { + default: + if ni == 0 { + ni = i + } + case '(': + depth++ + case ')', ' ', '\t', '\r', '\n', ',': + if ni != 0 { + n, _ := strconv.ParseFloat(s[ni:i], 64) + if idx >= len(min) { + min = append(min, n) + max = append(max, n) + } else { + if n < min[idx] { + min[idx] = n + } else if n > max[idx] { + max[idx] = n + } + } + idx++ + ni = 0 + } + switch s[i] { + case ')': + idx = 0 + depth-- + if depth == 0 { + i++ + break loop + } + case ',': + idx = 0 + } + } + } + return min, max, i +} + +func getWKTGeometryCollection(s string, i int) (min, max []float64, ri int) { + var depth int + for ; i < len(s); i++ { + if s[i] == ',' || s[i] == ')' { + // do not increment the index + return nil, nil, i + } + if s[i] == '(' { + depth++ + i++ + break + } + } +next: + for ; i < len(s); i++ { + switch s[i] { + case 'p', 'P', 'l', 'L', 'm', 'M', 'g', 'G': + var min2, max2 []float64 + min2, max2, i = getWKT(s, i) + min, max = union(min, max, min2, max2) + for ; i < len(s); i++ { + if s[i] == ',' { + i++ + goto next + } + if s[i] == ')' { + i++ + goto done + } + } + case ' ', '\t', '\r', '\n': + continue + default: + goto end_early + } + } +end_early: + // just balance the parens + for ; i < len(s); i++ { + if s[i] == '(' { + depth++ + } else if s[i] == ')' { + depth-- + if depth == 0 { + i++ + break + } + } + } +done: + return min, max, i +} +func getGeoJSON(s string, i int) (min, max []float64, ri int) { + json := s[i:] + switch gjson.Get(json, "type").String() { + default: + min, max = getMinMaxBrackets(gjson.Get(json, "coordinates").Raw) + case "Feature": + min, max, _ = getGeoJSON(gjson.Get(json, "geometry").String(), 0) + case "FeatureCollection": + for _, json := range gjson.Get(json, "features").Array() { + nmin, nmax, _ := getGeoJSON(json.String(), 0) + min, max = union(min, max, nmin, nmax) + } + case "GeometryCollection": + for _, json := range gjson.Get(json, "geometries").Array() { + nmin, nmax, _ := getGeoJSON(json.String(), 0) + min, max = union(min, max, nmin, nmax) + } + } + return min, max, len(json) +} + +func getMinMaxBrackets(s string) (min, max []float64) { + var ni int + var idx int + for i := 0; i < len(s); i++ { + switch s[i] { + default: + if ni == 0 { + ni = i + } + case '[', ',', ']', ' ', '\t', '\r', '\n': + if ni > 0 { + n, _ := strconv.ParseFloat(s[ni:i], 64) + if idx >= len(min) { + min = append(min, n) + max = append(max, n) + } else { + if n < min[idx] { + min[idx] = n + } else if n > max[idx] { + max[idx] = n + } + } + ni = 0 + idx++ + } + if s[i] == ']' { + idx = 0 + } + + } + } + + return +} diff --git a/vendor/github.com/tidwall/match/.travis.yml b/vendor/github.com/tidwall/match/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/vendor/github.com/tidwall/match/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/tidwall/match/LICENSE b/vendor/github.com/tidwall/match/LICENSE new file mode 100644 index 00000000..58f5819a --- /dev/null +++ b/vendor/github.com/tidwall/match/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/match/README.md b/vendor/github.com/tidwall/match/README.md new file mode 100644 index 00000000..2aa5bc38 --- /dev/null +++ b/vendor/github.com/tidwall/match/README.md @@ -0,0 +1,32 @@ +Match +===== +Build Status +GoDoc + +Match is a very simple pattern matcher where '*' matches on any +number characters and '?' matches on any one character. + +Installing +---------- + +``` +go get -u github.com/tidwall/match +``` + +Example +------- + +```go +match.Match("hello", "*llo") +match.Match("jello", "?ello") +match.Match("hello", "h*o") +``` + + +Contact +------- +Josh Baker [@tidwall](http://twitter.com/tidwall) + +License +------- +Redcon source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/match/match.go b/vendor/github.com/tidwall/match/match.go new file mode 100644 index 00000000..fcfe998b --- /dev/null +++ b/vendor/github.com/tidwall/match/match.go @@ -0,0 +1,181 @@ +// Match provides a simple pattern matcher with unicode support. +package match + +import "unicode/utf8" + +// Match returns true if str matches pattern. This is a very +// simple wildcard match where '*' matches on any number characters +// and '?' matches on any one character. + +// pattern: +// { term } +// term: +// '*' matches any sequence of non-Separator characters +// '?' matches any single non-Separator character +// c matches character c (c != '*', '?', '\\') +// '\\' c matches character c +// +func Match(str, pattern string) bool { + if pattern == "*" { + return true + } + return deepMatch(str, pattern) +} +func deepMatch(str, pattern string) bool { + for len(pattern) > 0 { + if pattern[0] > 0x7f { + return deepMatchRune(str, pattern) + } + switch pattern[0] { + default: + if len(str) == 0 { + return false + } + if str[0] > 0x7f { + return deepMatchRune(str, pattern) + } + if str[0] != pattern[0] { + return false + } + case '?': + if len(str) == 0 { + return false + } + case '*': + return deepMatch(str, pattern[1:]) || + (len(str) > 0 && deepMatch(str[1:], pattern)) + } + str = str[1:] + pattern = pattern[1:] + } + return len(str) == 0 && len(pattern) == 0 +} + +func deepMatchRune(str, pattern string) bool { + var sr, pr rune + var srsz, prsz int + + // read the first rune ahead of time + if len(str) > 0 { + if str[0] > 0x7f { + sr, srsz = utf8.DecodeRuneInString(str) + } else { + sr, srsz = rune(str[0]), 1 + } + } else { + sr, srsz = utf8.RuneError, 0 + } + if len(pattern) > 0 { + if pattern[0] > 0x7f { + pr, prsz = utf8.DecodeRuneInString(pattern) + } else { + pr, prsz = rune(pattern[0]), 1 + } + } else { + pr, prsz = utf8.RuneError, 0 + } + // done reading + for pr != utf8.RuneError { + switch pr { + default: + if srsz == utf8.RuneError { + return false + } + if sr != pr { + return false + } + case '?': + if srsz == utf8.RuneError { + return false + } + case '*': + return deepMatchRune(str, pattern[prsz:]) || + (srsz > 0 && deepMatchRune(str[srsz:], pattern)) + } + str = str[srsz:] + pattern = pattern[prsz:] + // read the next runes + if len(str) > 0 { + if str[0] > 0x7f { + sr, srsz = utf8.DecodeRuneInString(str) + } else { + sr, srsz = rune(str[0]), 1 + } + } else { + sr, srsz = utf8.RuneError, 0 + } + if len(pattern) > 0 { + if pattern[0] > 0x7f { + pr, prsz = utf8.DecodeRuneInString(pattern) + } else { + pr, prsz = rune(pattern[0]), 1 + } + } else { + pr, prsz = utf8.RuneError, 0 + } + // done reading + } + + return srsz == 0 && prsz == 0 +} + +var maxRuneBytes = func() []byte { + b := make([]byte, 4) + if utf8.EncodeRune(b, '\U0010FFFF') != 4 { + panic("invalid rune encoding") + } + return b +}() + +// Allowable parses the pattern and determines the minimum and maximum allowable +// values that the pattern can represent. +// When the max cannot be determined, 'true' will be returned +// for infinite. +func Allowable(pattern string) (min, max string) { + if pattern == "" || pattern[0] == '*' { + return "", "" + } + + minb := make([]byte, 0, len(pattern)) + maxb := make([]byte, 0, len(pattern)) + var wild bool + for i := 0; i < len(pattern); i++ { + if pattern[i] == '*' { + wild = true + break + } + if pattern[i] == '?' { + minb = append(minb, 0) + maxb = append(maxb, maxRuneBytes...) + } else { + minb = append(minb, pattern[i]) + maxb = append(maxb, pattern[i]) + } + } + if wild { + r, n := utf8.DecodeLastRune(maxb) + if r != utf8.RuneError { + if r < utf8.MaxRune { + r++ + if r > 0x7f { + b := make([]byte, 4) + nn := utf8.EncodeRune(b, r) + maxb = append(maxb[:len(maxb)-n], b[:nn]...) + } else { + maxb = append(maxb[:len(maxb)-n], byte(r)) + } + } + } + } + return string(minb), string(maxb) +} + +// IsPattern returns true if the string is a pattern. +func IsPattern(str string) bool { + for i := 0; i < len(str); i++ { + if str[i] == '*' || str[i] == '?' { + return true + } + } + return false +} diff --git a/vendor/github.com/tidwall/pretty/.travis.yml b/vendor/github.com/tidwall/pretty/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/vendor/github.com/tidwall/pretty/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/tidwall/pretty/LICENSE b/vendor/github.com/tidwall/pretty/LICENSE new file mode 100644 index 00000000..993b83f2 --- /dev/null +++ b/vendor/github.com/tidwall/pretty/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2017 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/pretty/README.md b/vendor/github.com/tidwall/pretty/README.md new file mode 100644 index 00000000..d2b8864d --- /dev/null +++ b/vendor/github.com/tidwall/pretty/README.md @@ -0,0 +1,124 @@ +# Pretty +[![Build Status](https://img.shields.io/travis/tidwall/pretty.svg?style=flat-square)](https://travis-ci.org/tidwall/prettty) +[![Coverage Status](https://img.shields.io/badge/coverage-100%25-brightgreen.svg?style=flat-square)](http://gocover.io/github.com/tidwall/pretty) +[![GoDoc](https://img.shields.io/badge/api-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/tidwall/pretty) + + +Pretty is a Go package that provides [fast](#performance) methods for formatting JSON for human readability, or to compact JSON for smaller payloads. + +Getting Started +=============== + +## Installing + +To start using Pretty, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/pretty +``` + +This will retrieve the library. + +## Pretty + +Using this example: + +```json +{"name": {"first":"Tom","last":"Anderson"}, "age":37, +"children": ["Sara","Alex","Jack"], +"fav.movie": "Deer Hunter", "friends": [ + {"first": "Janet", "last": "Murphy", "age": 44} + ]} +``` + +The following code: +```go +result = pretty.Pretty(example) +``` + +Will format the json to: + +```json +{ + "name": { + "first": "Tom", + "last": "Anderson" + }, + "age": 37, + "children": ["Sara", "Alex", "Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + { + "first": "Janet", + "last": "Murphy", + "age": 44 + } + ] +} +``` + +## Color + +Color will colorize the json for outputing to the screen. + +```json +result = pretty.Color(json, nil) +``` + +Will add color to the result for printing to the terminal. +The second param is used for a customizing the style, and passing nil will use the default `pretty.TerminalStyle`. + +## Ugly + +The following code: +```go +result = pretty.Ugly(example) +``` + +Will format the json to: + +```json +{"name":{"first":"Tom","last":"Anderson"},"age":37,"children":["Sara","Alex","Jack"],"fav.movie":"Deer Hunter","friends":[{"first":"Janet","last":"Murphy","age":44}]}``` +``` + + +## Customized output + +There's a `PrettyOptions(json, opts)` function which allows for customizing the output with the following options: + +```go +type Options struct { + // Width is an max column width for single line arrays + // Default is 80 + Width int + // Prefix is a prefix for all lines + // Default is an empty string + Prefix string + // Indent is the nested indentation + // Default is two spaces + Indent string + // SortKeys will sort the keys alphabetically + // Default is false + SortKeys bool +} +``` +## Performance + +Benchmarks of Pretty alongside the builtin `encoding/json` Indent/Compact methods. +``` +BenchmarkPretty-8 1000000 1283 ns/op 720 B/op 2 allocs/op +BenchmarkUgly-8 3000000 426 ns/op 240 B/op 1 allocs/op +BenchmarkUglyInPlace-8 5000000 340 ns/op 0 B/op 0 allocs/op +BenchmarkJSONIndent-8 300000 4628 ns/op 1069 B/op 4 allocs/op +BenchmarkJSONCompact-8 1000000 2469 ns/op 758 B/op 4 allocs/op +``` + +*These benchmarks were run on a MacBook Pro 15" 2.8 GHz Intel Core i7 using Go 1.7.* + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +Pretty source code is available under the MIT [License](/LICENSE). + diff --git a/vendor/github.com/tidwall/pretty/pretty.go b/vendor/github.com/tidwall/pretty/pretty.go new file mode 100644 index 00000000..0a922d03 --- /dev/null +++ b/vendor/github.com/tidwall/pretty/pretty.go @@ -0,0 +1,432 @@ +package pretty + +import ( + "sort" +) + +// Options is Pretty options +type Options struct { + // Width is an max column width for single line arrays + // Default is 80 + Width int + // Prefix is a prefix for all lines + // Default is an empty string + Prefix string + // Indent is the nested indentation + // Default is two spaces + Indent string + // SortKeys will sort the keys alphabetically + // Default is false + SortKeys bool +} + +// DefaultOptions is the default options for pretty formats. +var DefaultOptions = &Options{Width: 80, Prefix: "", Indent: " ", SortKeys: false} + +// Pretty converts the input json into a more human readable format where each +// element is on it's own line with clear indentation. +func Pretty(json []byte) []byte { return PrettyOptions(json, nil) } + +// PrettyOptions is like Pretty but with customized options. +func PrettyOptions(json []byte, opts *Options) []byte { + if opts == nil { + opts = DefaultOptions + } + buf := make([]byte, 0, len(json)) + if len(opts.Prefix) != 0 { + buf = append(buf, opts.Prefix...) + } + buf, _, _, _ = appendPrettyAny(buf, json, 0, true, + opts.Width, opts.Prefix, opts.Indent, opts.SortKeys, + 0, 0, -1) + if len(buf) > 0 { + buf = append(buf, '\n') + } + return buf +} + +// Ugly removes insignificant space characters from the input json byte slice +// and returns the compacted result. +func Ugly(json []byte) []byte { + buf := make([]byte, 0, len(json)) + return ugly(buf, json) +} + +// UglyInPlace removes insignificant space characters from the input json +// byte slice and returns the compacted result. This method reuses the +// input json buffer to avoid allocations. Do not use the original bytes +// slice upon return. +func UglyInPlace(json []byte) []byte { return ugly(json, json) } + +func ugly(dst, src []byte) []byte { + dst = dst[:0] + for i := 0; i < len(src); i++ { + if src[i] > ' ' { + dst = append(dst, src[i]) + if src[i] == '"' { + for i = i + 1; i < len(src); i++ { + dst = append(dst, src[i]) + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + } + } + } + return dst +} + +func appendPrettyAny(buf, json []byte, i int, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) { + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + if json[i] == '"' { + return appendPrettyString(buf, json, i, nl) + } + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + return appendPrettyNumber(buf, json, i, nl) + } + if json[i] == '{' { + return appendPrettyObject(buf, json, i, '{', '}', pretty, width, prefix, indent, sortkeys, tabs, nl, max) + } + if json[i] == '[' { + return appendPrettyObject(buf, json, i, '[', ']', pretty, width, prefix, indent, sortkeys, tabs, nl, max) + } + switch json[i] { + case 't': + return append(buf, 't', 'r', 'u', 'e'), i + 4, nl, true + case 'f': + return append(buf, 'f', 'a', 'l', 's', 'e'), i + 5, nl, true + case 'n': + return append(buf, 'n', 'u', 'l', 'l'), i + 4, nl, true + } + } + return buf, i, nl, true +} + +type pair struct { + kstart, kend int + vstart, vend int +} + +type byKey struct { + sorted bool + json []byte + pairs []pair +} + +func (arr *byKey) Len() int { + return len(arr.pairs) +} +func (arr *byKey) Less(i, j int) bool { + key1 := arr.json[arr.pairs[i].kstart+1 : arr.pairs[i].kend-1] + key2 := arr.json[arr.pairs[j].kstart+1 : arr.pairs[j].kend-1] + return string(key1) < string(key2) +} +func (arr *byKey) Swap(i, j int) { + arr.pairs[i], arr.pairs[j] = arr.pairs[j], arr.pairs[i] + arr.sorted = true +} + +func appendPrettyObject(buf, json []byte, i int, open, close byte, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) { + var ok bool + if width > 0 { + if pretty && open == '[' && max == -1 { + // here we try to create a single line array + max := width - (len(buf) - nl) + if max > 3 { + s1, s2 := len(buf), i + buf, i, _, ok = appendPrettyObject(buf, json, i, '[', ']', false, width, prefix, "", sortkeys, 0, 0, max) + if ok && len(buf)-s1 <= max { + return buf, i, nl, true + } + buf = buf[:s1] + i = s2 + } + } else if max != -1 && open == '{' { + return buf, i, nl, false + } + } + buf = append(buf, open) + i++ + var pairs []pair + if open == '{' && sortkeys { + pairs = make([]pair, 0, 8) + } + var n int + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + if json[i] == close { + if pretty { + if open == '{' && sortkeys { + buf = sortPairs(json, buf, pairs) + } + if n > 0 { + nl = len(buf) + buf = append(buf, '\n') + } + if buf[len(buf)-1] != open { + buf = appendTabs(buf, prefix, indent, tabs) + } + } + buf = append(buf, close) + return buf, i + 1, nl, open != '{' + } + if open == '[' || json[i] == '"' { + if n > 0 { + buf = append(buf, ',') + if width != -1 && open == '[' { + buf = append(buf, ' ') + } + } + var p pair + if pretty { + nl = len(buf) + buf = append(buf, '\n') + if open == '{' && sortkeys { + p.kstart = i + p.vstart = len(buf) + } + buf = appendTabs(buf, prefix, indent, tabs+1) + } + if open == '{' { + buf, i, nl, _ = appendPrettyString(buf, json, i, nl) + if sortkeys { + p.kend = i + } + buf = append(buf, ':') + if pretty { + buf = append(buf, ' ') + } + } + buf, i, nl, ok = appendPrettyAny(buf, json, i, pretty, width, prefix, indent, sortkeys, tabs+1, nl, max) + if max != -1 && !ok { + return buf, i, nl, false + } + if pretty && open == '{' && sortkeys { + p.vend = len(buf) + if p.kstart > p.kend || p.vstart > p.vend { + // bad data. disable sorting + sortkeys = false + } else { + pairs = append(pairs, p) + } + } + i-- + n++ + } + } + return buf, i, nl, open != '{' +} +func sortPairs(json, buf []byte, pairs []pair) []byte { + if len(pairs) == 0 { + return buf + } + vstart := pairs[0].vstart + vend := pairs[len(pairs)-1].vend + arr := byKey{false, json, pairs} + sort.Sort(&arr) + if !arr.sorted { + return buf + } + nbuf := make([]byte, 0, vend-vstart) + for i, p := range pairs { + nbuf = append(nbuf, buf[p.vstart:p.vend]...) + if i < len(pairs)-1 { + nbuf = append(nbuf, ',') + nbuf = append(nbuf, '\n') + } + } + return append(buf[:vstart], nbuf...) +} + +func appendPrettyString(buf, json []byte, i, nl int) ([]byte, int, int, bool) { + s := i + i++ + for ; i < len(json); i++ { + if json[i] == '"' { + var sc int + for j := i - 1; j > s; j-- { + if json[j] == '\\' { + sc++ + } else { + break + } + } + if sc%2 == 1 { + continue + } + i++ + break + } + } + return append(buf, json[s:i]...), i, nl, true +} + +func appendPrettyNumber(buf, json []byte, i, nl int) ([]byte, int, int, bool) { + s := i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' || json[i] == ']' || json[i] == '}' { + break + } + } + return append(buf, json[s:i]...), i, nl, true +} + +func appendTabs(buf []byte, prefix, indent string, tabs int) []byte { + if len(prefix) != 0 { + buf = append(buf, prefix...) + } + if len(indent) == 2 && indent[0] == ' ' && indent[1] == ' ' { + for i := 0; i < tabs; i++ { + buf = append(buf, ' ', ' ') + } + } else { + for i := 0; i < tabs; i++ { + buf = append(buf, indent...) + } + } + return buf +} + +// Style is the color style +type Style struct { + Key, String, Number [2]string + True, False, Null [2]string + Append func(dst []byte, c byte) []byte +} + +func hexp(p byte) byte { + switch { + case p < 10: + return p + '0' + default: + return (p - 10) + 'a' + } +} + +// TerminalStyle is for terminals +var TerminalStyle = &Style{ + Key: [2]string{"\x1B[94m", "\x1B[0m"}, + String: [2]string{"\x1B[92m", "\x1B[0m"}, + Number: [2]string{"\x1B[93m", "\x1B[0m"}, + True: [2]string{"\x1B[96m", "\x1B[0m"}, + False: [2]string{"\x1B[96m", "\x1B[0m"}, + Null: [2]string{"\x1B[91m", "\x1B[0m"}, + Append: func(dst []byte, c byte) []byte { + if c < ' ' && (c != '\r' && c != '\n' && c != '\t' && c != '\v') { + dst = append(dst, "\\u00"...) + dst = append(dst, hexp((c>>4)&0xF)) + return append(dst, hexp((c)&0xF)) + } + return append(dst, c) + }, +} + +// Color will colorize the json. The style parma is used for customizing +// the colors. Passing nil to the style param will use the default +// TerminalStyle. +func Color(src []byte, style *Style) []byte { + if style == nil { + style = TerminalStyle + } + apnd := style.Append + if apnd == nil { + apnd = func(dst []byte, c byte) []byte { + return append(dst, c) + } + } + type stackt struct { + kind byte + key bool + } + var dst []byte + var stack []stackt + for i := 0; i < len(src); i++ { + if src[i] == '"' { + key := len(stack) > 0 && stack[len(stack)-1].key + if key { + dst = append(dst, style.Key[0]...) + } else { + dst = append(dst, style.String[0]...) + } + dst = apnd(dst, '"') + for i = i + 1; i < len(src); i++ { + dst = apnd(dst, src[i]) + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + if key { + dst = append(dst, style.Key[1]...) + } else { + dst = append(dst, style.String[1]...) + } + } else if src[i] == '{' || src[i] == '[' { + stack = append(stack, stackt{src[i], src[i] == '{'}) + dst = apnd(dst, src[i]) + } else if (src[i] == '}' || src[i] == ']') && len(stack) > 0 { + stack = stack[:len(stack)-1] + dst = apnd(dst, src[i]) + } else if (src[i] == ':' || src[i] == ',') && len(stack) > 0 && stack[len(stack)-1].kind == '{' { + stack[len(stack)-1].key = !stack[len(stack)-1].key + dst = apnd(dst, src[i]) + } else { + var kind byte + if (src[i] >= '0' && src[i] <= '9') || src[i] == '-' { + kind = '0' + dst = append(dst, style.Number[0]...) + } else if src[i] == 't' { + kind = 't' + dst = append(dst, style.True[0]...) + } else if src[i] == 'f' { + kind = 'f' + dst = append(dst, style.False[0]...) + } else if src[i] == 'n' { + kind = 'n' + dst = append(dst, style.Null[0]...) + } else { + dst = apnd(dst, src[i]) + } + if kind != 0 { + for ; i < len(src); i++ { + if src[i] <= ' ' || src[i] == ',' || src[i] == ':' || src[i] == ']' || src[i] == '}' { + i-- + break + } + dst = apnd(dst, src[i]) + } + if kind == '0' { + dst = append(dst, style.Number[1]...) + } else if kind == 't' { + dst = append(dst, style.True[1]...) + } else if kind == 'f' { + dst = append(dst, style.False[1]...) + } else if kind == 'n' { + dst = append(dst, style.Null[1]...) + } + } + } + } + return dst +} diff --git a/vendor/github.com/tidwall/rtree/.travis.yml b/vendor/github.com/tidwall/rtree/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/vendor/github.com/tidwall/rtree/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/tidwall/rtree/LICENSE b/vendor/github.com/tidwall/rtree/LICENSE new file mode 100644 index 00000000..1a6cb670 --- /dev/null +++ b/vendor/github.com/tidwall/rtree/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/tidwall/rtree/README.md b/vendor/github.com/tidwall/rtree/README.md new file mode 100644 index 00000000..53a845d2 --- /dev/null +++ b/vendor/github.com/tidwall/rtree/README.md @@ -0,0 +1,22 @@ +RTree implementation for Go +=========================== + +[![Build Status](https://travis-ci.org/tidwall/rtree.svg?branch=master)](https://travis-ci.org/tidwall/rtree) +[![GoDoc](https://godoc.org/github.com/tidwall/rtree?status.svg)](https://godoc.org/github.com/tidwall/rtree) + +This package provides an in-memory R-Tree implementation for Go, useful as a spatial data structure. +It has support for 1-20 dimensions, and can store and search multidimensions interchangably in the same tree. + +Authors +------- +* 1983 Original algorithm and test code by Antonin Guttman and Michael Stonebraker, UC Berkely +* 1994 ANCI C ported from original test code by Melinda Green +* 1995 Sphere volume fix for degeneracy problem submitted by Paul Brook +* 2004 Templated C++ port by Greg Douglas +* 2016 Go port by Josh Baker +* 2018 Added kNN and merged in some of the RBush logic by Vladimir Agafonkin + +License +------- +RTree source code is available under the MIT License. + diff --git a/vendor/github.com/tidwall/rtree/base/knn.go b/vendor/github.com/tidwall/rtree/base/knn.go new file mode 100644 index 00000000..6b26df3f --- /dev/null +++ b/vendor/github.com/tidwall/rtree/base/knn.go @@ -0,0 +1,98 @@ +package base + +import ( + "github.com/tidwall/tinyqueue" +) + +type queueItem struct { + node *treeNode + isItem bool + dist float64 +} + +func (item *queueItem) Less(b tinyqueue.Item) bool { + return item.dist < b.(*queueItem).dist +} + +// KNN returns items nearest to farthest. The dist param is the "box distance". +func (tr *RTree) KNN(min, max []float64, center bool, iter func(item interface{}, dist float64) bool) bool { + var isBox bool + knnPoint := make([]float64, tr.dims) + + bbox := &treeNode{min: min, max: max} + + for i := 0; i < tr.dims; i++ { + knnPoint[i] = (bbox.min[i] + bbox.max[i]) / 2 + if !isBox && bbox.min[i] != bbox.max[i] { + isBox = true + } + } + node := tr.data + queue := tinyqueue.New(nil) + for node != nil { + for i := 0; i < node.count; i++ { + child := node.children[i] + var dist float64 + if isBox { + dist = boxDistRect(bbox, child) + } else { + dist = boxDistPoint(knnPoint, child) + } + queue.Push(&queueItem{node: child, isItem: node.leaf, dist: dist}) + } + for queue.Len() > 0 && queue.Peek().(*queueItem).isItem { + item := queue.Pop().(*queueItem) + if !iter(item.node.unsafeItem().item, item.dist) { + return false + } + } + last := queue.Pop() + if last != nil { + node = (*treeNode)(last.(*queueItem).node) + } else { + node = nil + } + } + return true +} + +func boxDistRect(a, b *treeNode) float64 { + var dist float64 + for i := 0; i < len(a.min); i++ { + var min, max float64 + if a.min[i] > b.min[i] { + min = a.min[i] + } else { + min = b.min[i] + } + if a.max[i] < b.max[i] { + max = a.max[i] + } else { + max = b.max[i] + } + squared := min - max + if squared > 0 { + dist += squared * squared + } + } + return dist +} + +func boxDistPoint(point []float64, childBox *treeNode) float64 { + var dist float64 + for i := 0; i < len(point); i++ { + d := axisDist(point[i], childBox.min[i], childBox.max[i]) + dist += d * d + } + return dist +} + +func axisDist(k, min, max float64) float64 { + if k < min { + return min - k + } + if k <= max { + return 0 + } + return k - max +} diff --git a/vendor/github.com/tidwall/rtree/base/load.go b/vendor/github.com/tidwall/rtree/base/load.go new file mode 100644 index 00000000..bf6954fd --- /dev/null +++ b/vendor/github.com/tidwall/rtree/base/load.go @@ -0,0 +1,97 @@ +package base + +import "math" + +// Load bulk load items into the R-tree. +func (tr *RTree) Load(mins, maxs [][]float64, items []interface{}) { + if len(items) < tr.minEntries { + for i := 0; i < len(items); i++ { + tr.Insert(mins[i], maxs[i], items[i]) + } + return + } + + // prefill the items + fitems := make([]*treeNode, len(items)) + for i := 0; i < len(items); i++ { + item := &treeItem{min: mins[i], max: maxs[i], item: items[i]} + fitems[i] = item.unsafeNode() + } + + // following equations are defined in the paper describing OMT + N := len(fitems) + M := tr.maxEntries + h := int(math.Ceil(math.Log(float64(N)) / math.Log(float64(M)))) + Nsubtree := int(math.Pow(float64(M), float64(h-1))) + S := int(math.Ceil(math.Sqrt(float64(N) / float64(Nsubtree)))) + + // sort by the initial axis + axis := 0 + sortByAxis(fitems, axis) + + // build the root node. it's split differently from the subtrees. + children := make([]*treeNode, 0, S) + for i := 0; i < S; i++ { + var part []*treeNode + if i == S-1 { + // last split + part = fitems[len(fitems)/S*i:] + } else { + part = fitems[len(fitems)/S*i : len(fitems)/S*(i+1)] + } + children = append(children, tr.omt(part, h-1, axis+1)) + } + + node := tr.createNode(children) + node.leaf = false + node.height = h + tr.calcBBox(node) + + if tr.data.count == 0 { + // save as is if tree is empty + tr.data = node + } else if tr.data.height == node.height { + // split root if trees have the same height + tr.splitRoot(tr.data, node) + } else { + if tr.data.height < node.height { + // swap trees if inserted one is bigger + tr.data, node = node, tr.data + } + + // insert the small tree into the large tree at appropriate level + tr.insert(node, nil, tr.data.height-node.height-1, true) + } +} + +func (tr *RTree) omt(fitems []*treeNode, h, axis int) *treeNode { + if len(fitems) <= tr.maxEntries { + // reached leaf level; return leaf + children := make([]*treeNode, len(fitems)) + copy(children, fitems) + node := tr.createNode(children) + node.height = h + tr.calcBBox(node) + return node + } + + // sort the items on a different axis than the previous level. + sortByAxis(fitems, axis%tr.dims) + children := make([]*treeNode, 0, tr.maxEntries) + partsz := len(fitems) / tr.maxEntries + for i := 0; i < tr.maxEntries; i++ { + var part []*treeNode + if i == tr.maxEntries-1 { + // last part + part = fitems[partsz*i:] + } else { + part = fitems[partsz*i : partsz*(i+1)] + } + children = append(children, tr.omt(part, h-1, axis+1)) + } + node := tr.createNode(children) + node.height = h + node.leaf = false + tr.calcBBox(node) + return node +} diff --git a/vendor/github.com/tidwall/rtree/base/rtree.go b/vendor/github.com/tidwall/rtree/base/rtree.go new file mode 100644 index 00000000..4a66235f --- /dev/null +++ b/vendor/github.com/tidwall/rtree/base/rtree.go @@ -0,0 +1,673 @@ +package base + +import ( + "math" + "unsafe" +) + +// precalculate infinity +var mathInfNeg = math.Inf(-1) +var mathInfPos = math.Inf(+1) + +type treeNode struct { + min, max []float64 + children []*treeNode + count int + height int + leaf bool +} + +func (node *treeNode) unsafeItem() *treeItem { + return (*treeItem)(unsafe.Pointer(node)) +} + +func (tr *RTree) createNode(children []*treeNode) *treeNode { + n := &treeNode{ + height: 1, + leaf: true, + children: make([]*treeNode, tr.maxEntries+1), + } + if len(children) > 0 { + n.count = len(children) + copy(n.children[:n.count], children) + } + n.min = make([]float64, tr.dims) + n.max = make([]float64, tr.dims) + for i := 0; i < tr.dims; i++ { + n.min[i] = mathInfPos + n.max[i] = mathInfNeg + } + return n +} + +func (node *treeNode) extend(b *treeNode) { + for i := 0; i < len(node.min); i++ { + if b.min[i] < node.min[i] { + node.min[i] = b.min[i] + } + if b.max[i] > node.max[i] { + node.max[i] = b.max[i] + } + } +} + +func (node *treeNode) area() float64 { + area := node.max[0] - node.min[0] + for i := 1; i < len(node.min); i++ { + area *= node.max[i] - node.min[i] + } + return area +} + +func (node *treeNode) enlargedAreaAxis(b *treeNode, axis int) float64 { + var max, min float64 + if b.max[axis] > node.max[axis] { + max = b.max[axis] + } else { + max = node.max[axis] + } + if b.min[axis] < node.min[axis] { + min = b.min[axis] + } else { + min = node.min[axis] + } + return max - min +} + +func (node *treeNode) enlargedArea(b *treeNode) float64 { + area := node.enlargedAreaAxis(b, 0) + for i := 1; i < len(node.min); i++ { + area *= node.enlargedAreaAxis(b, i) + } + return area +} + +func (node *treeNode) intersectionAreaAxis(b *treeNode, axis int) float64 { + var max, min float64 + if node.max[axis] < b.max[axis] { + max = node.max[axis] + } else { + max = b.max[axis] + } + if node.min[axis] > b.min[axis] { + min = node.min[axis] + } else { + min = b.min[axis] + } + if max > min { + return max - min + } + return 0 +} +func (node *treeNode) intersectionArea(b *treeNode) float64 { + area := node.intersectionAreaAxis(b, 0) + for i := 1; i < len(node.min); i++ { + area *= node.intersectionAreaAxis(b, i) + } + return area +} +func (node *treeNode) margin() float64 { + margin := node.max[0] - node.min[0] + for i := 1; i < len(node.min); i++ { + margin += node.max[i] - node.min[i] + } + return margin +} + +type result int + +const ( + not result = 0 + intersects result = 1 + contains result = 2 +) + +func (node *treeNode) overlaps(b *treeNode) result { + for i := 0; i < len(node.min); i++ { + if b.min[i] > node.max[i] || b.max[i] < node.min[i] { + return not + } + if node.min[i] > b.min[i] || b.max[i] > node.max[i] { + i++ + for ; i < len(node.min); i++ { + if b.min[i] > node.max[i] || b.max[i] < node.min[i] { + return not + } + } + return intersects + } + } + return contains +} + +func (node *treeNode) intersects(b *treeNode) bool { + for i := 0; i < len(node.min); i++ { + if b.min[i] > node.max[i] || b.max[i] < node.min[i] { + return false + } + } + return true +} + +func (node *treeNode) findItem(item interface{}) int { + for i := 0; i < node.count; i++ { + if node.children[i].unsafeItem().item == item { + return i + } + } + return -1 +} + +func (node *treeNode) contains(b *treeNode) bool { + for i := 0; i < len(node.min); i++ { + if node.min[i] > b.min[i] || b.max[i] > node.max[i] { + return false + } + } + return true +} + +func (node *treeNode) childCount() int { + if node.leaf { + return node.count + } + var n int + for i := 0; i < node.count; i++ { + n += node.children[i].childCount() + } + return n +} + +type treeItem struct { + min, max []float64 + item interface{} +} + +func (item *treeItem) unsafeNode() *treeNode { + return (*treeNode)(unsafe.Pointer(item)) +} + +// RTree is an R-tree +type RTree struct { + dims int + maxEntries int + minEntries int + data *treeNode // root node + // resusable fields, these help performance of common mutable operations. + reuse struct { + path []*treeNode // for reinsertion path + indexes []int // for remove function + stack []int // for bulk loading + } +} + +// New creates a new R-tree +func New(dims, maxEntries int) *RTree { + if dims <= 0 { + panic("invalid dimensions") + } + + tr := &RTree{} + tr.dims = dims + tr.maxEntries = int(math.Max(4, float64(maxEntries))) + tr.minEntries = int(math.Max(2, math.Ceil(float64(tr.maxEntries)*0.4))) + tr.data = tr.createNode(nil) + return tr +} + +// Insert inserts an item +func (tr *RTree) Insert(min, max []float64, item interface{}) { + if len(min) != tr.dims || len(max) != tr.dims { + panic("invalid dimensions") + } + if item == nil { + panic("nil item") + } + bbox := treeNode{min: min, max: max} + tr.insert(&bbox, item, tr.data.height-1, false) +} + +func (tr *RTree) insert(bbox *treeNode, item interface{}, level int, isNode bool) { + tr.reuse.path = tr.reuse.path[:0] + node, insertPath := tr.chooseSubtree(bbox, tr.data, level, tr.reuse.path) + if item == nil { + // item is only nil when bulk loading a node + if node.leaf { + panic("loading node into leaf") + } + node.children[node.count] = bbox + node.count++ + } else { + ti := &treeItem{min: bbox.min, max: bbox.max, item: item} + node.children[node.count] = ti.unsafeNode() + node.count++ + } + node.extend(bbox) + for level >= 0 { + if insertPath[level].count > tr.maxEntries { + insertPath = tr.split(insertPath, level) + level-- + } else { + break + } + } + tr.adjustParentBBoxes(bbox, insertPath, level) + tr.reuse.path = insertPath +} + +func (tr *RTree) adjustParentBBoxes(bbox *treeNode, path []*treeNode, level int) { + // adjust bboxes along the given tree path + for i := level; i >= 0; i-- { + path[i].extend(bbox) + } +} + +func (tr *RTree) chooseSubtree(bbox, node *treeNode, level int, path []*treeNode) (*treeNode, []*treeNode) { + var targetNode *treeNode + var area, enlargement, minArea, minEnlargement float64 + for { + path = append(path, node) + if node.leaf || len(path)-1 == level { + break + } + minEnlargement = mathInfPos + minArea = minEnlargement + for i := 0; i < node.count; i++ { + child := node.children[i] + area = child.area() + enlargement = bbox.enlargedArea(child) - area + if enlargement < minEnlargement { + minEnlargement = enlargement + if area < minArea { + minArea = area + } + targetNode = child + } else if enlargement == minEnlargement { + if area < minArea { + minArea = area + targetNode = child + } + } + } + if targetNode != nil { + node = targetNode + } else if node.count > 0 { + node = (*treeNode)(node.children[0]) + } else { + node = nil + } + } + return node, path +} +func (tr *RTree) split(insertPath []*treeNode, level int) []*treeNode { + var node = insertPath[level] + var M = node.count + var m = tr.minEntries + + tr.chooseSplitAxis(node, m, M) + splitIndex := tr.chooseSplitIndex(node, m, M) + + spliced := make([]*treeNode, node.count-splitIndex) + copy(spliced, node.children[splitIndex:]) + node.count = splitIndex + + newNode := tr.createNode(spliced) + newNode.height = node.height + newNode.leaf = node.leaf + + tr.calcBBox(node) + tr.calcBBox(newNode) + + if level != 0 { + insertPath[level-1].children[insertPath[level-1].count] = newNode + insertPath[level-1].count++ + } else { + tr.splitRoot(node, newNode) + } + return insertPath +} +func (tr *RTree) chooseSplitIndex(node *treeNode, m, M int) int { + var i int + var bbox1, bbox2 *treeNode + var overlap, area, minOverlap, minArea float64 + var index int + + minArea = mathInfPos + minOverlap = minArea + + for i = m; i <= M-m; i++ { + bbox1 = tr.distBBox(node, 0, i, nil) + bbox2 = tr.distBBox(node, i, M, nil) + + overlap = bbox1.intersectionArea(bbox2) + area = bbox1.area() + bbox2.area() + + // choose distribution with minimum overlap + if overlap < minOverlap { + minOverlap = overlap + index = i + + if area < minArea { + minArea = area + } + } else if overlap == minOverlap { + // otherwise choose distribution with minimum area + if area < minArea { + minArea = area + index = i + } + } + } + return index +} +func (tr *RTree) calcBBox(node *treeNode) { + tr.distBBox(node, 0, node.count, node) +} +func (tr *RTree) chooseSplitAxis(node *treeNode, m, M int) { + minMargin := tr.allDistMargin(node, m, M, 0) + var minAxis int + for axis := 1; axis < tr.dims; axis++ { + margin := tr.allDistMargin(node, m, M, axis) + if margin < minMargin { + minMargin = margin + minAxis = axis + } + } + if minAxis < tr.dims { + tr.sortNodes(node, minAxis) + } +} +func (tr *RTree) splitRoot(node, newNode *treeNode) { + tr.data = tr.createNode([]*treeNode{node, newNode}) + tr.data.height = node.height + 1 + tr.data.leaf = false + tr.calcBBox(tr.data) +} +func (tr *RTree) distBBox(node *treeNode, k, p int, destNode *treeNode) *treeNode { + if destNode == nil { + destNode = tr.createNode(nil) + } else { + for i := 0; i < tr.dims; i++ { + destNode.min[i] = mathInfPos + destNode.max[i] = mathInfNeg + } + } + for i := k; i < p; i++ { + if node.leaf { + destNode.extend(node.children[i]) + } else { + destNode.extend((*treeNode)(node.children[i])) + } + } + return destNode +} +func (tr *RTree) allDistMargin(node *treeNode, m, M int, axis int) float64 { + tr.sortNodes(node, axis) + + var leftBBox = tr.distBBox(node, 0, m, nil) + var rightBBox = tr.distBBox(node, M-m, M, nil) + var margin = leftBBox.margin() + rightBBox.margin() + + var i int + + if node.leaf { + for i = m; i < M-m; i++ { + leftBBox.extend(node.children[i]) + margin += leftBBox.margin() + } + for i = M - m - 1; i >= m; i-- { + leftBBox.extend(node.children[i]) + margin += rightBBox.margin() + } + } else { + for i = m; i < M-m; i++ { + child := (*treeNode)(node.children[i]) + leftBBox.extend(child) + margin += leftBBox.margin() + } + for i = M - m - 1; i >= m; i-- { + child := (*treeNode)(node.children[i]) + leftBBox.extend(child) + margin += rightBBox.margin() + } + } + return margin +} +func (tr *RTree) sortNodes(node *treeNode, axis int) { + sortByAxis(node.children[:node.count], axis) +} + +func sortByAxis(items []*treeNode, axis int) { + if len(items) < 2 { + return + } + left, right := 0, len(items)-1 + pivotIndex := len(items) / 2 + items[pivotIndex], items[right] = items[right], items[pivotIndex] + for i := range items { + if items[i].min[axis] < items[right].min[axis] { + items[i], items[left] = items[left], items[i] + left++ + } + } + items[left], items[right] = items[right], items[left] + sortByAxis(items[:left], axis) + sortByAxis(items[left+1:], axis) +} + +// Search searches the tree for items in the input rectangle +func (tr *RTree) Search(min, max []float64, iter func(item interface{}) bool) bool { + bbox := &treeNode{min: min, max: max} + if !tr.data.intersects(bbox) { + return true + } + return tr.search(tr.data, bbox, iter) +} + +func (tr *RTree) search(node, bbox *treeNode, iter func(item interface{}) bool) bool { + if node.leaf { + for i := 0; i < node.count; i++ { + if bbox.intersects(node.children[i]) { + if !iter(node.children[i].unsafeItem().item) { + return false + } + } + } + } else { + for i := 0; i < node.count; i++ { + r := bbox.overlaps(node.children[i]) + if r == intersects { + if !tr.search(node.children[i], bbox, iter) { + return false + } + } else if r == contains { + if !scan(node.children[i], iter) { + return false + } + } + } + } + return true +} + +func (tr *RTree) IsEmpty() bool { + empty := true + tr.Scan(func(item interface{}) bool { + empty = false + return false + }) + return empty +} + +// Remove removes an item from the R-tree. +func (tr *RTree) Remove(min, max []float64, item interface{}) { + bbox := &treeNode{min: min, max: max} + tr.remove(bbox, item) +} + +func (tr *RTree) remove(bbox *treeNode, item interface{}) { + path := tr.reuse.path[:0] + indexes := tr.reuse.indexes[:0] + + var node = tr.data + var i int + var parent *treeNode + var index int + var goingUp bool + + for node != nil || len(path) != 0 { + if node == nil { + node = path[len(path)-1] + path = path[:len(path)-1] + if len(path) == 0 { + parent = nil + } else { + parent = path[len(path)-1] + } + i = indexes[len(indexes)-1] + indexes = indexes[:len(indexes)-1] + goingUp = true + } + + if node.leaf { + index = node.findItem(item) + if index != -1 { + // item found, remove the item and condense tree upwards + copy(node.children[index:], node.children[index+1:]) + node.children[node.count-1] = nil + node.count-- + path = append(path, node) + tr.condense(path) + goto done + } + } + if !goingUp && !node.leaf && node.contains(bbox) { // go down + path = append(path, node) + indexes = append(indexes, i) + i = 0 + parent = node + node = (*treeNode)(node.children[0]) + } else if parent != nil { // go right + i++ + if i == parent.count { + node = nil + } else { + node = (*treeNode)(parent.children[i]) + } + goingUp = false + } else { + node = nil + } + } +done: + tr.reuse.path = path + tr.reuse.indexes = indexes + return +} +func (tr *RTree) condense(path []*treeNode) { + // go through the path, removing empty nodes and updating bboxes + var siblings []*treeNode + for i := len(path) - 1; i >= 0; i-- { + if path[i].count == 0 { + if i > 0 { + siblings = path[i-1].children[:path[i-1].count] + index := -1 + for j := 0; j < len(siblings); j++ { + if siblings[j] == path[i] { + index = j + break + } + } + copy(siblings[index:], siblings[index+1:]) + siblings[len(siblings)-1] = nil + path[i-1].count-- + //siblings = siblings[:len(siblings)-1] + //path[i-1].children = siblings + } else { + tr.data = tr.createNode(nil) // clear tree + } + } else { + tr.calcBBox(path[i]) + } + } +} + +// Count returns the number of items in the R-tree. +func (tr *RTree) Count() int { + return tr.data.childCount() +} + +// Traverse iterates over the entire R-tree and includes all nodes and items. +func (tr *RTree) Traverse(iter func(min, max []float64, level int, item interface{}) bool) bool { + return tr.traverse(tr.data, iter) +} + +func (tr *RTree) traverse(node *treeNode, iter func(min, max []float64, level int, item interface{}) bool) bool { + if !iter(node.min, node.max, int(node.height), nil) { + return false + } + if node.leaf { + for i := 0; i < node.count; i++ { + child := node.children[i] + if !iter(child.min, child.max, 0, child.unsafeItem().item) { + return false + } + } + } else { + for i := 0; i < node.count; i++ { + child := node.children[i] + if !tr.traverse(child, iter) { + return false + } + } + } + return true +} + +// Scan iterates over the entire R-tree +func (tr *RTree) Scan(iter func(item interface{}) bool) bool { + return scan(tr.data, iter) +} + +func scan(node *treeNode, iter func(item interface{}) bool) bool { + if node.leaf { + for i := 0; i < node.count; i++ { + child := node.children[i] + if !iter(child.unsafeItem().item) { + return false + } + } + } else { + for i := 0; i < node.count; i++ { + child := node.children[i] + if !scan(child, iter) { + return false + } + } + } + return true +} + +// Bounds returns the bounding box of the entire R-tree +func (tr *RTree) Bounds() (min, max []float64) { + if tr.data.count > 0 { + return tr.data.min, tr.data.max + } + return make([]float64, tr.dims), make([]float64, tr.dims) +} + +// Complexity returns the complexity of the R-tree. The higher the value, the +// more complex the tree. The value of 1 is the lowest. +func (tr *RTree) Complexity() float64 { + var nodeCount int + var itemCount int + tr.Traverse(func(_, _ []float64, level int, _ interface{}) bool { + if level == 0 { + itemCount++ + } else { + nodeCount++ + } + return true + }) + return float64(tr.maxEntries*nodeCount) / float64(itemCount) +} diff --git a/vendor/github.com/tidwall/rtree/rtree.go b/vendor/github.com/tidwall/rtree/rtree.go new file mode 100644 index 00000000..bbbb1ffe --- /dev/null +++ b/vendor/github.com/tidwall/rtree/rtree.go @@ -0,0 +1,278 @@ +package rtree + +import ( + "math" + "sync" + + "github.com/tidwall/rtree/base" +) + +type Iterator func(item Item) bool +type Item interface { + Rect(ctx interface{}) (min []float64, max []float64) +} + +type RTree struct { + dims int + maxEntries int + ctx interface{} + trs []*base.RTree + used int +} + +func New(ctx interface{}) *RTree { + tr := &RTree{ + ctx: ctx, + dims: 20, + maxEntries: 13, + } + tr.trs = make([]*base.RTree, 20) + return tr +} + +func (tr *RTree) Insert(item Item) { + if item == nil { + panic("nil item") + } + min, max := item.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + if len(min) < 1 || len(min) > len(tr.trs) { + return // just return + panic("invalid dimension") + } + btr := tr.trs[len(min)-1] + if btr == nil { + btr = base.New(len(min), tr.maxEntries) + tr.trs[len(min)-1] = btr + tr.used++ + } + amin := make([]float64, len(min)) + amax := make([]float64, len(max)) + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + btr.Insert(amin, amax, item) +} + +func (tr *RTree) Remove(item Item) { + if item == nil { + panic("nil item") + } + min, max := item.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + if len(min) < 1 || len(min) > len(tr.trs) { + return // just return + panic("invalid dimension") + } + btr := tr.trs[len(min)-1] + if btr == nil { + return + } + amin := make([]float64, len(min)) + amax := make([]float64, len(max)) + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + btr.Remove(amin, amax, item) + if btr.IsEmpty() { + tr.trs[len(min)-1] = nil + tr.used-- + } +} +func (tr *RTree) Reset() { + for i := 0; i < len(tr.trs); i++ { + tr.trs[i] = nil + } + tr.used = 0 +} +func (tr *RTree) Count() int { + var count int + for _, btr := range tr.trs { + if btr != nil { + count += btr.Count() + } + } + return count +} + +func (tr *RTree) Search(bounds Item, iter Iterator) { + if bounds == nil { + panic("nil bounds being used for search") + } + min, max := bounds.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + if len(min) < 1 || len(min) > len(tr.trs) { + return // just return + panic("invalid dimension") + } + used := tr.used + for i, btr := range tr.trs { + if used == 0 { + break + } + if btr != nil { + if !search(btr, min, max, i+1, iter) { + return + } + used-- + } + } +} +func search(btr *base.RTree, min, max []float64, dims int, iter Iterator) bool { + amin := make([]float64, dims) + amax := make([]float64, dims) + for i := 0; i < dims; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + var ended bool + btr.Search(amin, amax, func(item interface{}) bool { + if !iter(item.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) KNN(bounds Item, center bool, iter func(item Item, dist float64) bool) { + if bounds == nil { + panic("nil bounds being used for search") + } + min, max := bounds.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + if len(min) < 1 || len(min) > len(tr.trs) { + return // just return + panic("invalid dimension") + } + + if tr.used == 0 { + return + } + if tr.used == 1 { + for i, btr := range tr.trs { + if btr != nil { + knn(btr, min, max, center, i+1, func(item interface{}, dist float64) bool { + return iter(item.(Item), dist) + }) + break + } + } + return + } + + type queueT struct { + done bool + step int + item Item + dist float64 + } + + var mu sync.Mutex + var ended bool + queues := make(map[int][]queueT) + cond := sync.NewCond(&mu) + for i, btr := range tr.trs { + if btr != nil { + dims := i + 1 + mu.Lock() + queues[dims] = []queueT{} + cond.Signal() + mu.Unlock() + go func(dims int, btr *base.RTree) { + knn(btr, min, max, center, dims, func(item interface{}, dist float64) bool { + mu.Lock() + if ended { + mu.Unlock() + return false + } + queues[dims] = append(queues[dims], queueT{item: item.(Item), dist: dist}) + cond.Signal() + mu.Unlock() + return true + }) + mu.Lock() + queues[dims] = append(queues[dims], queueT{done: true}) + cond.Signal() + mu.Unlock() + }(dims, btr) + } + } + mu.Lock() + for { + ready := true + for i := range queues { + if len(queues[i]) == 0 { + ready = false + break + } + if queues[i][0].done { + delete(queues, i) + } + } + if len(queues) == 0 { + break + } + if ready { + var j int + var minDist float64 + var minItem Item + var minQueue int + for i := range queues { + if j == 0 || queues[i][0].dist < minDist { + minDist = queues[i][0].dist + minItem = queues[i][0].item + minQueue = i + } + } + queues[minQueue] = queues[minQueue][1:] + if !iter(minItem, minDist) { + ended = true + break + } + continue + } + cond.Wait() + } + mu.Unlock() +} +func knn(btr *base.RTree, min, max []float64, center bool, dims int, iter func(item interface{}, dist float64) bool) bool { + amin := make([]float64, dims) + amax := make([]float64, dims) + for i := 0; i < dims; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + var ended bool + btr.KNN(amin, amax, center, func(item interface{}, dist float64) bool { + if !iter(item.(Item), dist) { + ended = true + return false + } + return true + }) + return !ended +} diff --git a/vendor/github.com/tidwall/tinyqueue/LICENSE b/vendor/github.com/tidwall/tinyqueue/LICENSE new file mode 100644 index 00000000..2b7cd9d3 --- /dev/null +++ b/vendor/github.com/tidwall/tinyqueue/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2017, Vladimir Agafonkin + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. diff --git a/vendor/github.com/tidwall/tinyqueue/README.md b/vendor/github.com/tidwall/tinyqueue/README.md new file mode 100644 index 00000000..f4edc916 --- /dev/null +++ b/vendor/github.com/tidwall/tinyqueue/README.md @@ -0,0 +1,7 @@ +# tinyqueue +GoDoc + +tinyqueue is a Go package for binary heap priority queues. +Ported from the [tinyqueue](https://github.com/mourner/tinyqueue) Javascript library. + + diff --git a/vendor/github.com/tidwall/tinyqueue/tinyqueue.go b/vendor/github.com/tidwall/tinyqueue/tinyqueue.go new file mode 100644 index 00000000..4a06258a --- /dev/null +++ b/vendor/github.com/tidwall/tinyqueue/tinyqueue.go @@ -0,0 +1,86 @@ +package tinyqueue + +type Queue struct { + length int + data []Item +} + +type Item interface { + Less(Item) bool +} + +func New(data []Item) *Queue { + q := &Queue{} + q.data = data + q.length = len(data) + if q.length > 0 { + i := q.length >> 1 + for ; i >= 0; i-- { + q.down(i) + } + } + return q +} + +func (q *Queue) Push(item Item) { + q.data = append(q.data, item) + q.length++ + q.up(q.length - 1) +} +func (q *Queue) Pop() Item { + if q.length == 0 { + return nil + } + top := q.data[0] + q.length-- + if q.length > 0 { + q.data[0] = q.data[q.length] + q.down(0) + } + q.data = q.data[:len(q.data)-1] + return top +} +func (q *Queue) Peek() Item { + if q.length == 0 { + return nil + } + return q.data[0] +} +func (q *Queue) Len() int { + return q.length +} +func (q *Queue) down(pos int) { + data := q.data + halfLength := q.length >> 1 + item := data[pos] + for pos < halfLength { + left := (pos << 1) + 1 + right := left + 1 + best := data[left] + if right < q.length && data[right].Less(best) { + left = right + best = data[right] + } + if !best.Less(item) { + break + } + data[pos] = best + pos = left + } + data[pos] = item +} + +func (q *Queue) up(pos int) { + data := q.data + item := data[pos] + for pos > 0 { + parent := (pos - 1) >> 1 + current := data[parent] + if !item.Less(current) { + break + } + data[pos] = current + pos = parent + } + data[pos] = item +} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS new file mode 100644 index 00000000..2b00ddba --- /dev/null +++ b/vendor/golang.org/x/crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS new file mode 100644 index 00000000..1fbd3e97 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 00000000..fc311609 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 00000000..aeb73f81 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,295 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "golang.org/x/crypto/blowfish" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + // We copy the key to prevent changing the underlying array. + ckey := append(key[:len(key):len(key)], 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n++ + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n++ + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 00000000..9d80f195 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 00000000..213bf204 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,99 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +// +// Blowfish is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See https://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 00000000..d0407759 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// https://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go new file mode 100644 index 00000000..c2fef30a --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -0,0 +1,66 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 fixed-output-length hash functions and +// the SHAKE variable-output-length hash functions defined by FIPS-202. +// +// Both types of hash function use the "sponge" construction and the Keccak +// permutation. For a detailed specification see http://keccak.noekeon.org/ +// +// +// Guidance +// +// If you aren't sure what function you need, use SHAKE256 with at least 64 +// bytes of output. The SHAKE instances are faster than the SHA3 instances; +// the latter have to allocate memory to conform to the hash.Hash interface. +// +// If you need a secret-key MAC (message authentication code), prepend the +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of +// output. +// +// +// Security strengths +// +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security +// strength against preimage attacks of x bits. Since they only produce "x" +// bits of output, their collision-resistance is only "x/2" bits. +// +// The SHAKE-256 and -128 functions have a generic security strength of 256 and +// 128 bits against all attacks, provided that at least 2x bits of their output +// is used. Requesting more than 64 or 32 bytes of output, respectively, does +// not increase the collision-resistance of the SHAKE functions. +// +// +// The sponge construction +// +// A sponge builds a pseudo-random function from a public pseudo-random +// permutation, by applying the permutation to a state of "rate + capacity" +// bytes, but hiding "capacity" of the bytes. +// +// A sponge starts out with a zero state. To hash an input using a sponge, up +// to "rate" bytes of the input are XORed into the sponge's state. The sponge +// is then "full" and the permutation is applied to "empty" it. This process is +// repeated until all the input has been "absorbed". The input is then padded. +// The digest is "squeezed" from the sponge in the same way, except that output +// is copied out instead of input being XORed in. +// +// A sponge is parameterized by its generic security strength, which is equal +// to half its capacity; capacity + rate is equal to the permutation's width. +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. +// +// +// Recommendations +// +// The SHAKE functions are recommended for most new uses. They can produce +// output of arbitrary length. SHAKE256, with an output length of at least +// 64 bytes, provides 256-bit security against all attacks. The Keccak team +// recommends it for most applications upgrading from SHA2-512. (NIST chose a +// much stronger, but much slower, sponge instance for SHA3-512.) +// +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. +// They produce output of the same length, with the same security strengths +// against all attacks. This means, in particular, that SHA3-256 only has +// 128-bit collision resistance, because its output length is 32 bytes. +package sha3 // import "golang.org/x/crypto/sha3" diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go new file mode 100644 index 00000000..0d8043fd --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file provides functions for creating instances of the SHA-3 +// and SHAKE hash functions, as well as utility functions for hashing +// bytes. + +import ( + "hash" +) + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +func New224() hash.Hash { + if h := new224Asm(); h != nil { + return h + } + return &state{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +func New256() hash.Hash { + if h := new256Asm(); h != nil { + return h + } + return &state{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +func New384() hash.Hash { + if h := new384Asm(); h != nil { + return h + } + return &state{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +func New512() hash.Hash { + if h := new512Asm(); h != nil { + return h + } + return &state{rate: 72, outputLen: 64, dsbyte: 0x06} +} + +// NewLegacyKeccak256 creates a new Keccak-256 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New256 instead. +func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} } + +// NewLegacyKeccak512 creates a new Keccak-512 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New512 instead. +func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} } + +// Sum224 returns the SHA3-224 digest of the data. +func Sum224(data []byte) (digest [28]byte) { + h := New224() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum256 returns the SHA3-256 digest of the data. +func Sum256(data []byte) (digest [32]byte) { + h := New256() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum384 returns the SHA3-384 digest of the data. +func Sum384(data []byte) (digest [48]byte) { + h := New384() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum512 returns the SHA3-512 digest of the data. +func Sum512(data []byte) (digest [64]byte) { + h := New512() + h.Write(data) + h.Sum(digest[:0]) + return +} diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go new file mode 100644 index 00000000..f455147d --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo appengine !s390x + +package sha3 + +import ( + "hash" +) + +// new224Asm returns an assembly implementation of SHA3-224 if available, +// otherwise it returns nil. +func new224Asm() hash.Hash { return nil } + +// new256Asm returns an assembly implementation of SHA3-256 if available, +// otherwise it returns nil. +func new256Asm() hash.Hash { return nil } + +// new384Asm returns an assembly implementation of SHA3-384 if available, +// otherwise it returns nil. +func new384Asm() hash.Hash { return nil } + +// new512Asm returns an assembly implementation of SHA3-512 if available, +// otherwise it returns nil. +func new512Asm() hash.Hash { return nil } diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go new file mode 100644 index 00000000..46d03ed3 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -0,0 +1,412 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package sha3 + +// rc stores the round constants for use in the ι step. +var rc = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} + +// keccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +func keccakF1600(a *[25]uint64) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + for i := 0; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[12] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[18] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[24] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[16] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[22] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[3] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[1] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[7] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[19] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[11] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[23] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[4] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[2] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[8] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[14] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[7] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[23] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[14] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[11] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[2] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[18] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[6] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[22] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[4] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[1] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[8] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[24] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[12] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[3] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[19] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[22] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[8] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[19] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[1] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[12] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[23] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[16] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[2] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[24] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[6] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[3] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[14] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[7] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[18] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[4] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[2] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[3] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[4] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[6] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[7] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[8] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[11] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[12] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[14] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[16] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[18] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[19] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[22] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[23] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[24] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go new file mode 100644 index 00000000..78867958 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +package sha3 + +// This function is implemented in keccakf_amd64.s. + +//go:noescape + +func keccakF1600(a *[25]uint64) diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s new file mode 100644 index 00000000..f88533ac --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -0,0 +1,390 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +// This code was translated into a form compatible with 6a from the public +// domain sources at https://github.com/gvanas/KeccakCodePackage + +// Offsets in state +#define _ba (0*8) +#define _be (1*8) +#define _bi (2*8) +#define _bo (3*8) +#define _bu (4*8) +#define _ga (5*8) +#define _ge (6*8) +#define _gi (7*8) +#define _go (8*8) +#define _gu (9*8) +#define _ka (10*8) +#define _ke (11*8) +#define _ki (12*8) +#define _ko (13*8) +#define _ku (14*8) +#define _ma (15*8) +#define _me (16*8) +#define _mi (17*8) +#define _mo (18*8) +#define _mu (19*8) +#define _sa (20*8) +#define _se (21*8) +#define _si (22*8) +#define _so (23*8) +#define _su (24*8) + +// Temporary registers +#define rT1 AX + +// Round vars +#define rpState DI +#define rpStack SP + +#define rDa BX +#define rDe CX +#define rDi DX +#define rDo R8 +#define rDu R9 + +#define rBa R10 +#define rBe R11 +#define rBi R12 +#define rBo R13 +#define rBu R14 + +#define rCa SI +#define rCe BP +#define rCi rBi +#define rCo rBo +#define rCu R15 + +#define MOVQ_RBI_RCE MOVQ rBi, rCe +#define XORQ_RT1_RCA XORQ rT1, rCa +#define XORQ_RT1_RCE XORQ rT1, rCe +#define XORQ_RBA_RCU XORQ rBa, rCu +#define XORQ_RBE_RCU XORQ rBe, rCu +#define XORQ_RDU_RCU XORQ rDu, rCu +#define XORQ_RDA_RCA XORQ rDa, rCa +#define XORQ_RDE_RCE XORQ rDe, rCe + +#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \ + /* Prepare round */ \ + MOVQ rCe, rDa; \ + ROLQ $1, rDa; \ + \ + MOVQ _bi(iState), rCi; \ + XORQ _gi(iState), rDi; \ + XORQ rCu, rDa; \ + XORQ _ki(iState), rCi; \ + XORQ _mi(iState), rDi; \ + XORQ rDi, rCi; \ + \ + MOVQ rCi, rDe; \ + ROLQ $1, rDe; \ + \ + MOVQ _bo(iState), rCo; \ + XORQ _go(iState), rDo; \ + XORQ rCa, rDe; \ + XORQ _ko(iState), rCo; \ + XORQ _mo(iState), rDo; \ + XORQ rDo, rCo; \ + \ + MOVQ rCo, rDi; \ + ROLQ $1, rDi; \ + \ + MOVQ rCu, rDo; \ + XORQ rCe, rDi; \ + ROLQ $1, rDo; \ + \ + MOVQ rCa, rDu; \ + XORQ rCi, rDo; \ + ROLQ $1, rDu; \ + \ + /* Result b */ \ + MOVQ _ba(iState), rBa; \ + MOVQ _ge(iState), rBe; \ + XORQ rCo, rDu; \ + MOVQ _ki(iState), rBi; \ + MOVQ _mo(iState), rBo; \ + MOVQ _su(iState), rBu; \ + XORQ rDe, rBe; \ + ROLQ $44, rBe; \ + XORQ rDi, rBi; \ + XORQ rDa, rBa; \ + ROLQ $43, rBi; \ + \ + MOVQ rBe, rCa; \ + MOVQ rc, rT1; \ + ORQ rBi, rCa; \ + XORQ rBa, rT1; \ + XORQ rT1, rCa; \ + MOVQ rCa, _ba(oState); \ + \ + XORQ rDu, rBu; \ + ROLQ $14, rBu; \ + MOVQ rBa, rCu; \ + ANDQ rBe, rCu; \ + XORQ rBu, rCu; \ + MOVQ rCu, _bu(oState); \ + \ + XORQ rDo, rBo; \ + ROLQ $21, rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _bi(oState); \ + \ + NOTQ rBi; \ + ORQ rBa, rBu; \ + ORQ rBo, rBi; \ + XORQ rBo, rBu; \ + XORQ rBe, rBi; \ + MOVQ rBu, _bo(oState); \ + MOVQ rBi, _be(oState); \ + B_RBI_RCE; \ + \ + /* Result g */ \ + MOVQ _gu(iState), rBe; \ + XORQ rDu, rBe; \ + MOVQ _ka(iState), rBi; \ + ROLQ $20, rBe; \ + XORQ rDa, rBi; \ + ROLQ $3, rBi; \ + MOVQ _bo(iState), rBa; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDo, rBa; \ + MOVQ _me(iState), rBo; \ + MOVQ _si(iState), rBu; \ + ROLQ $28, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ga(oState); \ + G_RT1_RCA; \ + \ + XORQ rDe, rBo; \ + ROLQ $45, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ge(oState); \ + G_RT1_RCE; \ + \ + XORQ rDi, rBu; \ + ROLQ $61, rBu; \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _go(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _gu(oState); \ + NOTQ rBu; \ + G_RBA_RCU; \ + \ + ORQ rBu, rBo; \ + XORQ rBi, rBo; \ + MOVQ rBo, _gi(oState); \ + \ + /* Result k */ \ + MOVQ _be(iState), rBa; \ + MOVQ _gi(iState), rBe; \ + MOVQ _ko(iState), rBi; \ + MOVQ _mu(iState), rBo; \ + MOVQ _sa(iState), rBu; \ + XORQ rDi, rBe; \ + ROLQ $6, rBe; \ + XORQ rDo, rBi; \ + ROLQ $25, rBi; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDe, rBa; \ + ROLQ $1, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ka(oState); \ + K_RT1_RCA; \ + \ + XORQ rDu, rBo; \ + ROLQ $8, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ke(oState); \ + K_RT1_RCE; \ + \ + XORQ rDa, rBu; \ + ROLQ $18, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _ki(oState); \ + \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _ko(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _ku(oState); \ + K_RBA_RCU; \ + \ + /* Result m */ \ + MOVQ _ga(iState), rBe; \ + XORQ rDa, rBe; \ + MOVQ _ke(iState), rBi; \ + ROLQ $36, rBe; \ + XORQ rDe, rBi; \ + MOVQ _bu(iState), rBa; \ + ROLQ $10, rBi; \ + MOVQ rBe, rT1; \ + MOVQ _mi(iState), rBo; \ + ANDQ rBi, rT1; \ + XORQ rDu, rBa; \ + MOVQ _so(iState), rBu; \ + ROLQ $27, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ma(oState); \ + M_RT1_RCA; \ + \ + XORQ rDi, rBo; \ + ROLQ $15, rBo; \ + MOVQ rBi, rT1; \ + ORQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _me(oState); \ + M_RT1_RCE; \ + \ + XORQ rDo, rBu; \ + ROLQ $56, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ORQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _mi(oState); \ + \ + ORQ rBa, rBe; \ + XORQ rBu, rBe; \ + MOVQ rBe, _mu(oState); \ + \ + ANDQ rBa, rBu; \ + XORQ rBo, rBu; \ + MOVQ rBu, _mo(oState); \ + M_RBE_RCU; \ + \ + /* Result s */ \ + MOVQ _bi(iState), rBa; \ + MOVQ _go(iState), rBe; \ + MOVQ _ku(iState), rBi; \ + XORQ rDi, rBa; \ + MOVQ _ma(iState), rBo; \ + ROLQ $62, rBa; \ + XORQ rDo, rBe; \ + MOVQ _se(iState), rBu; \ + ROLQ $55, rBe; \ + \ + XORQ rDu, rBi; \ + MOVQ rBa, rDu; \ + XORQ rDe, rBu; \ + ROLQ $2, rBu; \ + ANDQ rBe, rDu; \ + XORQ rBu, rDu; \ + MOVQ rDu, _su(oState); \ + \ + ROLQ $39, rBi; \ + S_RDU_RCU; \ + NOTQ rBe; \ + XORQ rDa, rBo; \ + MOVQ rBe, rDa; \ + ANDQ rBi, rDa; \ + XORQ rBa, rDa; \ + MOVQ rDa, _sa(oState); \ + S_RDA_RCA; \ + \ + ROLQ $41, rBo; \ + MOVQ rBi, rDe; \ + ORQ rBo, rDe; \ + XORQ rBe, rDe; \ + MOVQ rDe, _se(oState); \ + S_RDE_RCE; \ + \ + MOVQ rBo, rDi; \ + MOVQ rBu, rDo; \ + ANDQ rBu, rDi; \ + ORQ rBa, rDo; \ + XORQ rBi, rDi; \ + XORQ rBo, rDo; \ + MOVQ rDi, _si(oState); \ + MOVQ rDo, _so(oState) \ + +// func keccakF1600(state *[25]uint64) +TEXT ·keccakF1600(SB), 0, $200-8 + MOVQ state+0(FP), rpState + + // Convert the user state into an internal state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + // Execute the KeccakF permutation + MOVQ _ba(rpState), rCa + MOVQ _be(rpState), rCe + MOVQ _bu(rpState), rCu + + XORQ _ga(rpState), rCa + XORQ _ge(rpState), rCe + XORQ _gu(rpState), rCu + + XORQ _ka(rpState), rCa + XORQ _ke(rpState), rCe + XORQ _ku(rpState), rCu + + XORQ _ma(rpState), rCa + XORQ _me(rpState), rCe + XORQ _mu(rpState), rCu + + XORQ _sa(rpState), rCa + XORQ _se(rpState), rCe + MOVQ _si(rpState), rDi + MOVQ _so(rpState), rDo + XORQ _su(rpState), rCu + + mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP) + + // Revert the internal state to the user state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + RET diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go new file mode 100644 index 00000000..3cf6a22e --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/register.go @@ -0,0 +1,18 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.4 + +package sha3 + +import ( + "crypto" +) + +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go new file mode 100644 index 00000000..ba269a07 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -0,0 +1,193 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +const ( + // maxRate is the maximum size of the internal buffer. SHAKE-256 + // currently needs the largest buffer. + maxRate = 168 +) + +type state struct { + // Generic sponge components. + a [25]uint64 // main state of the hash + buf []byte // points into storage + rate int // the number of bytes of state to use + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + storage storageBuf + + // Specific to SHA-3 and SHAKE. + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *state) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *state) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the byte buffer, and setting Sponge.state to absorbing. +func (d *state) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.buf = d.storage.asBytes()[:0] +} + +func (d *state) clone() *state { + ret := *d + if ret.state == spongeAbsorbing { + ret.buf = ret.storage.asBytes()[:len(ret.buf)] + } else { + ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate] + } + + return &ret +} + +// permute applies the KeccakF-1600 permutation. It handles +// any input-output buffering. +func (d *state) permute() { + switch d.state { + case spongeAbsorbing: + // If we're absorbing, we need to xor the input into the state + // before applying the permutation. + xorIn(d, d.buf) + d.buf = d.storage.asBytes()[:0] + keccakF1600(&d.a) + case spongeSqueezing: + // If we're squeezing, we need to apply the permutatin before + // copying more output. + keccakF1600(&d.a) + d.buf = d.storage.asBytes()[:d.rate] + copyOut(d, d.buf) + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *state) padAndPermute(dsbyte byte) { + if d.buf == nil { + d.buf = d.storage.asBytes()[:0] + } + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in d.buf because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + d.buf = append(d.buf, dsbyte) + zerosStart := len(d.buf) + d.buf = d.storage.asBytes()[:d.rate] + for i := zerosStart; i < d.rate; i++ { + d.buf[i] = 0 + } + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + d.buf[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing + d.buf = d.storage.asBytes()[:d.rate] + copyOut(d, d.buf) +} + +// Write absorbs more data into the hash's state. It produces an error +// if more data is written to the ShakeHash after writing +func (d *state) Write(p []byte) (written int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: write to sponge after read") + } + if d.buf == nil { + d.buf = d.storage.asBytes()[:0] + } + written = len(p) + + for len(p) > 0 { + if len(d.buf) == 0 && len(p) >= d.rate { + // The fast path; absorb a full "rate" bytes of input and apply the permutation. + xorIn(d, p[:d.rate]) + p = p[d.rate:] + keccakF1600(&d.a) + } else { + // The slow path; buffer the input until we can fill the sponge, and then xor it in. + todo := d.rate - len(d.buf) + if todo > len(p) { + todo = len(p) + } + d.buf = append(d.buf, p[:todo]...) + p = p[todo:] + + // If the sponge is full, apply the permutation. + if len(d.buf) == d.rate { + d.permute() + } + } + } + + return +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *state) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute(d.dsbyte) + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + n := copy(out, d.buf) + d.buf = d.buf[n:] + out = out[n:] + + // Apply the permutation if we've squeezed the sponge dry. + if len(d.buf) == 0 { + d.permute() + } + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. +func (d *state) Sum(in []byte) []byte { + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen) + dup.Read(hash) + return append(in, hash...) +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go new file mode 100644 index 00000000..c13ec85b --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -0,0 +1,284 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!appengine + +package sha3 + +// This file contains code for using the 'compute intermediate +// message digest' (KIMD) and 'compute last message digest' (KLMD) +// instructions to compute SHA-3 and SHAKE hashes on IBM Z. + +import ( + "hash" + + "golang.org/x/sys/cpu" +) + +// codes represent 7-bit KIMD/KLMD function codes as defined in +// the Principles of Operation. +type code uint64 + +const ( + // function codes for KIMD/KLMD + sha3_224 code = 32 + sha3_256 = 33 + sha3_384 = 34 + sha3_512 = 35 + shake_128 = 36 + shake_256 = 37 + nopad = 0x100 +) + +// kimd is a wrapper for the 'compute intermediate message digest' instruction. +// src must be a multiple of the rate for the given function code. +//go:noescape +func kimd(function code, chain *[200]byte, src []byte) + +// klmd is a wrapper for the 'compute last message digest' instruction. +// src padding is handled by the instruction. +//go:noescape +func klmd(function code, chain *[200]byte, dst, src []byte) + +type asmState struct { + a [200]byte // 1600 bit state + buf []byte // care must be taken to ensure cap(buf) is a multiple of rate + rate int // equivalent to block size + storage [3072]byte // underlying storage for buf + outputLen int // output length if fixed, 0 if not + function code // KIMD/KLMD function code + state spongeDirection // whether the sponge is absorbing or squeezing +} + +func newAsmState(function code) *asmState { + var s asmState + s.function = function + switch function { + case sha3_224: + s.rate = 144 + s.outputLen = 28 + case sha3_256: + s.rate = 136 + s.outputLen = 32 + case sha3_384: + s.rate = 104 + s.outputLen = 48 + case sha3_512: + s.rate = 72 + s.outputLen = 64 + case shake_128: + s.rate = 168 + case shake_256: + s.rate = 136 + default: + panic("sha3: unrecognized function code") + } + + // limit s.buf size to a multiple of s.rate + s.resetBuf() + return &s +} + +func (s *asmState) clone() *asmState { + c := *s + c.buf = c.storage[:len(s.buf):cap(s.buf)] + return &c +} + +// copyIntoBuf copies b into buf. It will panic if there is not enough space to +// store all of b. +func (s *asmState) copyIntoBuf(b []byte) { + bufLen := len(s.buf) + s.buf = s.buf[:len(s.buf)+len(b)] + copy(s.buf[bufLen:], b) +} + +// resetBuf points buf at storage, sets the length to 0 and sets cap to be a +// multiple of the rate. +func (s *asmState) resetBuf() { + max := (cap(s.storage) / s.rate) * s.rate + s.buf = s.storage[:0:max] +} + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (s *asmState) Write(b []byte) (int, error) { + if s.state != spongeAbsorbing { + panic("sha3: write to sponge after read") + } + length := len(b) + for len(b) > 0 { + if len(s.buf) == 0 && len(b) >= cap(s.buf) { + // Hash the data directly and push any remaining bytes + // into the buffer. + remainder := len(s.buf) % s.rate + kimd(s.function, &s.a, b[:len(b)-remainder]) + if remainder != 0 { + s.copyIntoBuf(b[len(b)-remainder:]) + } + return length, nil + } + + if len(s.buf) == cap(s.buf) { + // flush the buffer + kimd(s.function, &s.a, s.buf) + s.buf = s.buf[:0] + } + + // copy as much as we can into the buffer + n := len(b) + if len(b) > cap(s.buf)-len(s.buf) { + n = cap(s.buf) - len(s.buf) + } + s.copyIntoBuf(b[:n]) + b = b[n:] + } + return length, nil +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (s *asmState) Read(out []byte) (n int, err error) { + n = len(out) + + // need to pad if we were absorbing + if s.state == spongeAbsorbing { + s.state = spongeSqueezing + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function, &s.a, out, s.buf) // len(out) may be 0 + s.buf = s.buf[:0] + return + } + + // write hash into buffer + max := cap(s.buf) + if max > len(out) { + max = (len(out)/s.rate)*s.rate + s.rate + } + klmd(s.function, &s.a, s.buf[:max], s.buf) + s.buf = s.buf[:max] + } + + for len(out) > 0 { + // flush the buffer + if len(s.buf) != 0 { + c := copy(out, s.buf) + out = out[c:] + s.buf = s.buf[c:] + continue + } + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function|nopad, &s.a, out, nil) + return + } + + // write hash into buffer + s.resetBuf() + if cap(s.buf) > len(out) { + s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate] + } + klmd(s.function|nopad, &s.a, s.buf, nil) + } + return +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (s *asmState) Sum(b []byte) []byte { + if s.outputLen == 0 { + panic("sha3: cannot call Sum on SHAKE functions") + } + + // Copy the state to preserve the original. + a := s.a + + // Hash the buffer. Note that we don't clear it because we + // aren't updating the state. + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) +} + +// Reset resets the Hash to its initial state. +func (s *asmState) Reset() { + for i := range s.a { + s.a[i] = 0 + } + s.resetBuf() + s.state = spongeAbsorbing +} + +// Size returns the number of bytes Sum will return. +func (s *asmState) Size() int { + return s.outputLen +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (s *asmState) BlockSize() int { + return s.rate +} + +// Clone returns a copy of the ShakeHash in its current state. +func (s *asmState) Clone() ShakeHash { + return s.clone() +} + +// new224Asm returns an assembly implementation of SHA3-224 if available, +// otherwise it returns nil. +func new224Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_224) + } + return nil +} + +// new256Asm returns an assembly implementation of SHA3-256 if available, +// otherwise it returns nil. +func new256Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_256) + } + return nil +} + +// new384Asm returns an assembly implementation of SHA3-384 if available, +// otherwise it returns nil. +func new384Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_384) + } + return nil +} + +// new512Asm returns an assembly implementation of SHA3-512 if available, +// otherwise it returns nil. +func new512Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_512) + } + return nil +} + +// newShake128Asm returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns nil. +func newShake128Asm() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_128) + } + return nil +} + +// newShake256Asm returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns nil. +func newShake256Asm() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_256) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s new file mode 100644 index 00000000..8a4458f6 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!appengine + +#include "textflag.h" + +// func kimd(function code, chain *[200]byte, src []byte) +TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG src+16(FP), R2, R3 // R2=base, R3=len + +continue: + WORD $0xB93E0002 // KIMD --, R2 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET + +// func klmd(function code, chain *[200]byte, dst, src []byte) +TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 + // TODO: SHAKE support + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG dst+16(FP), R2, R3 // R2=base, R3=len + LMG src+40(FP), R4, R5 // R4=base, R5=len + +continue: + WORD $0xB93F0024 // KLMD R2, R4 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go new file mode 100644 index 00000000..d7be2954 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file defines the ShakeHash interface, and provides +// functions for creating SHAKE and cSHAKE instances, as well as utility +// functions for hashing bytes to arbitrary-length output. +// +// +// SHAKE implementation is based on FIPS PUB 202 [1] +// cSHAKE implementations is based on NIST SP 800-185 [2] +// +// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// [2] https://doi.org/10.6028/NIST.SP.800-185 + +import ( + "encoding/binary" + "io" +) + +// ShakeHash defines the interface to hash functions that +// support arbitrary-length output. +type ShakeHash interface { + // Write absorbs more data into the hash's state. It panics if input is + // written to it after output has been read from it. + io.Writer + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum) + // It never returns an error. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash + + // Reset resets the ShakeHash to its initial state. + Reset() +} + +// cSHAKE specific context +type cshakeState struct { + *state // SHA-3 state context and Read/Write operations + + // initBlock is the cSHAKE specific initialization set of bytes. It is initialized + // by newCShake function and stores concatenation of N followed by S, encoded + // by the method specified in 3.3 of [1]. + // It is stored here in order for Reset() to be able to put context into + // initial state. + initBlock []byte +} + +// Consts for configuring initial SHA-3 state +const ( + dsbyteShake = 0x1f + dsbyteCShake = 0x04 + rate128 = 168 + rate256 = 136 +) + +func bytepad(input []byte, w int) []byte { + // leftEncode always returns max 9 bytes + buf := make([]byte, 0, 9+len(input)+w) + buf = append(buf, leftEncode(uint64(w))...) + buf = append(buf, input...) + padlen := w - (len(buf) % w) + return append(buf, make([]byte, padlen)...) +} + +func leftEncode(value uint64) []byte { + var b [9]byte + binary.BigEndian.PutUint64(b[1:], value) + // Trim all but last leading zero bytes + i := byte(1) + for i < 8 && b[i] == 0 { + i++ + } + // Prepend number of encoded bytes + b[i-1] = 9 - i + return b[i-1:] +} + +func newCShake(N, S []byte, rate int, dsbyte byte) ShakeHash { + c := cshakeState{state: &state{rate: rate, dsbyte: dsbyte}} + + // leftEncode returns max 9 bytes + c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) + c.initBlock = append(c.initBlock, N...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) + c.initBlock = append(c.initBlock, S...) + c.Write(bytepad(c.initBlock, c.rate)) + return &c +} + +// Reset resets the hash to initial state. +func (c *cshakeState) Reset() { + c.state.Reset() + c.Write(bytepad(c.initBlock, c.rate)) +} + +// Clone returns copy of a cSHAKE context within its current state. +func (c *cshakeState) Clone() ShakeHash { + b := make([]byte, len(c.initBlock)) + copy(b, c.initBlock) + return &cshakeState{state: c.clone(), initBlock: b} +} + +// Clone returns copy of SHAKE context within its current state. +func (c *state) Clone() ShakeHash { + return c.clone() +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() ShakeHash { + if h := newShake128Asm(); h != nil { + return h + } + return &state{rate: rate128, dsbyte: dsbyteShake} +} + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() ShakeHash { + if h := newShake256Asm(); h != nil { + return h + } + return &state{rate: rate256, dsbyte: dsbyteShake} +} + +// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, +// a customizable variant of SHAKE128. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake128. +func NewCShake128(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake128() + } + return newCShake(N, S, rate128, dsbyteCShake) +} + +// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, +// a customizable variant of SHAKE256. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake256. +func NewCShake256(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake256() + } + return newCShake(N, S, rate256, dsbyteCShake) +} + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + h.Write(data) + h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + h.Write(data) + h.Read(hash) +} diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go new file mode 100644 index 00000000..add4e733 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo appengine !s390x + +package sha3 + +// newShake128Asm returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns nil. +func newShake128Asm() ShakeHash { + return nil +} + +// newShake256Asm returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns nil. +func newShake256Asm() ShakeHash { + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go new file mode 100644 index 00000000..079b6501 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!386,!ppc64le appengine + +package sha3 + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate]byte + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(b) +} + +var ( + xorIn = xorInGeneric + copyOut = copyOutGeneric + xorInUnaligned = xorInGeneric + copyOutUnaligned = copyOutGeneric +) + +const xorImplementationUnaligned = "generic" diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go new file mode 100644 index 00000000..fd35f02e --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor_generic.go @@ -0,0 +1,28 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +import "encoding/binary" + +// xorInGeneric xors the bytes in buf into the state; it +// makes no non-portable assumptions about memory layout +// or alignment. +func xorInGeneric(d *state, buf []byte) { + n := len(buf) / 8 + + for i := 0; i < n; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } +} + +// copyOutGeneric copies ulint64s to a byte buffer. +func copyOutGeneric(d *state, b []byte) { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } +} diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go new file mode 100644 index 00000000..a3d06863 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go @@ -0,0 +1,65 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 386 ppc64le +// +build !appengine + +package sha3 + +import "unsafe" + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate / 8]uint64 + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(unsafe.Pointer(b)) +} + +func xorInUnaligned(d *state, buf []byte) { + n := len(buf) + bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] + if n >= 72 { + d.a[0] ^= bw[0] + d.a[1] ^= bw[1] + d.a[2] ^= bw[2] + d.a[3] ^= bw[3] + d.a[4] ^= bw[4] + d.a[5] ^= bw[5] + d.a[6] ^= bw[6] + d.a[7] ^= bw[7] + d.a[8] ^= bw[8] + } + if n >= 104 { + d.a[9] ^= bw[9] + d.a[10] ^= bw[10] + d.a[11] ^= bw[11] + d.a[12] ^= bw[12] + } + if n >= 136 { + d.a[13] ^= bw[13] + d.a[14] ^= bw[14] + d.a[15] ^= bw[15] + d.a[16] ^= bw[16] + } + if n >= 144 { + d.a[17] ^= bw[17] + } + if n >= 168 { + d.a[18] ^= bw[18] + d.a[19] ^= bw[19] + d.a[20] ^= bw[20] + } +} + +func copyOutUnaligned(d *state, buf []byte) { + ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) + copy(buf, ab[:]) +} + +var ( + xorIn = xorInUnaligned + copyOut = copyOutUnaligned +) + +const xorImplementationUnaligned = "unaligned" diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go new file mode 100644 index 00000000..2f04ee5b --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -0,0 +1,966 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bytes" + "io" + "strconv" + "sync" + "unicode/utf8" +) + +// EscapeCodes contains escape sequences that can be written to the terminal in +// order to achieve different styles of text. +type EscapeCodes struct { + // Foreground colors + Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte + + // Reset all attributes + Reset []byte +} + +var vt100EscapeCodes = EscapeCodes{ + Black: []byte{keyEscape, '[', '3', '0', 'm'}, + Red: []byte{keyEscape, '[', '3', '1', 'm'}, + Green: []byte{keyEscape, '[', '3', '2', 'm'}, + Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, + Blue: []byte{keyEscape, '[', '3', '4', 'm'}, + Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, + Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, + White: []byte{keyEscape, '[', '3', '7', 'm'}, + + Reset: []byte{keyEscape, '[', '0', 'm'}, +} + +// Terminal contains the state for running a VT100 terminal that is capable of +// reading lines of input. +type Terminal struct { + // AutoCompleteCallback, if non-null, is called for each keypress with + // the full input line and the current position of the cursor (in + // bytes, as an index into |line|). If it returns ok=false, the key + // press is processed normally. Otherwise it returns a replacement line + // and the new cursor position. + AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) + + // Escape contains a pointer to the escape codes for this terminal. + // It's always a valid pointer, although the escape codes themselves + // may be empty if the terminal doesn't support them. + Escape *EscapeCodes + + // lock protects the terminal and the state in this object from + // concurrent processing of a key press and a Write() call. + lock sync.Mutex + + c io.ReadWriter + prompt []rune + + // line is the current line being entered. + line []rune + // pos is the logical position of the cursor in line + pos int + // echo is true if local echo is enabled + echo bool + // pasteActive is true iff there is a bracketed paste operation in + // progress. + pasteActive bool + + // cursorX contains the current X value of the cursor where the left + // edge is 0. cursorY contains the row number where the first row of + // the current line is 0. + cursorX, cursorY int + // maxLine is the greatest value of cursorY so far. + maxLine int + + termWidth, termHeight int + + // outBuf contains the terminal data to be sent. + outBuf []byte + // remainder contains the remainder of any partial key sequences after + // a read. It aliases into inBuf. + remainder []byte + inBuf [256]byte + + // history contains previously entered commands so that they can be + // accessed with the up and down keys. + history stRingBuffer + // historyIndex stores the currently accessed history entry, where zero + // means the immediately previous entry. + historyIndex int + // When navigating up and down the history it's possible to return to + // the incomplete, initial line. That value is stored in + // historyPending. + historyPending string +} + +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is +// a local terminal, that terminal must first have been put into raw mode. +// prompt is a string that is written at the start of each input line (i.e. +// "> "). +func NewTerminal(c io.ReadWriter, prompt string) *Terminal { + return &Terminal{ + Escape: &vt100EscapeCodes, + c: c, + prompt: []rune(prompt), + termWidth: 80, + termHeight: 24, + echo: true, + historyIndex: -1, + } +} + +const ( + keyCtrlD = 4 + keyCtrlU = 21 + keyEnter = '\r' + keyEscape = 27 + keyBackspace = 127 + keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota + keyUp + keyDown + keyLeft + keyRight + keyAltLeft + keyAltRight + keyHome + keyEnd + keyDeleteWord + keyDeleteLine + keyClearScreen + keyPasteStart + keyPasteEnd +) + +var ( + crlf = []byte{'\r', '\n'} + pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} + pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} +) + +// bytesToKey tries to parse a key sequence from b. If successful, it returns +// the key and the remainder of the input. Otherwise it returns utf8.RuneError. +func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { + if len(b) == 0 { + return utf8.RuneError, nil + } + + if !pasteActive { + switch b[0] { + case 1: // ^A + return keyHome, b[1:] + case 5: // ^E + return keyEnd, b[1:] + case 8: // ^H + return keyBackspace, b[1:] + case 11: // ^K + return keyDeleteLine, b[1:] + case 12: // ^L + return keyClearScreen, b[1:] + case 23: // ^W + return keyDeleteWord, b[1:] + case 14: // ^N + return keyDown, b[1:] + case 16: // ^P + return keyUp, b[1:] + } + } + + if b[0] != keyEscape { + if !utf8.FullRune(b) { + return utf8.RuneError, b + } + r, l := utf8.DecodeRune(b) + return r, b[l:] + } + + if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { + switch b[2] { + case 'A': + return keyUp, b[3:] + case 'B': + return keyDown, b[3:] + case 'C': + return keyRight, b[3:] + case 'D': + return keyLeft, b[3:] + case 'H': + return keyHome, b[3:] + case 'F': + return keyEnd, b[3:] + } + } + + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { + switch b[5] { + case 'C': + return keyAltRight, b[6:] + case 'D': + return keyAltLeft, b[6:] + } + } + + if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { + return keyPasteStart, b[6:] + } + + if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { + return keyPasteEnd, b[6:] + } + + // If we get here then we have a key that we don't recognise, or a + // partial sequence. It's not clear how one should find the end of a + // sequence without knowing them all, but it seems that [a-zA-Z~] only + // appears at the end of a sequence. + for i, c := range b[0:] { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { + return keyUnknown, b[i+1:] + } + } + + return utf8.RuneError, b +} + +// queue appends data to the end of t.outBuf +func (t *Terminal) queue(data []rune) { + t.outBuf = append(t.outBuf, []byte(string(data))...) +} + +var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} +var space = []rune{' '} + +func isPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// moveCursorToPos appends data to t.outBuf which will move the cursor to the +// given, logical position in the text. +func (t *Terminal) moveCursorToPos(pos int) { + if !t.echo { + return + } + + x := visualLength(t.prompt) + pos + y := x / t.termWidth + x = x % t.termWidth + + up := 0 + if y < t.cursorY { + up = t.cursorY - y + } + + down := 0 + if y > t.cursorY { + down = y - t.cursorY + } + + left := 0 + if x < t.cursorX { + left = t.cursorX - x + } + + right := 0 + if x > t.cursorX { + right = x - t.cursorX + } + + t.cursorX = x + t.cursorY = y + t.move(up, down, left, right) +} + +func (t *Terminal) move(up, down, left, right int) { + m := []rune{} + + // 1 unit up can be expressed as ^[[A or ^[A + // 5 units up can be expressed as ^[[5A + + if up == 1 { + m = append(m, keyEscape, '[', 'A') + } else if up > 1 { + m = append(m, keyEscape, '[') + m = append(m, []rune(strconv.Itoa(up))...) + m = append(m, 'A') + } + + if down == 1 { + m = append(m, keyEscape, '[', 'B') + } else if down > 1 { + m = append(m, keyEscape, '[') + m = append(m, []rune(strconv.Itoa(down))...) + m = append(m, 'B') + } + + if right == 1 { + m = append(m, keyEscape, '[', 'C') + } else if right > 1 { + m = append(m, keyEscape, '[') + m = append(m, []rune(strconv.Itoa(right))...) + m = append(m, 'C') + } + + if left == 1 { + m = append(m, keyEscape, '[', 'D') + } else if left > 1 { + m = append(m, keyEscape, '[') + m = append(m, []rune(strconv.Itoa(left))...) + m = append(m, 'D') + } + + t.queue(m) +} + +func (t *Terminal) clearLineToRight() { + op := []rune{keyEscape, '[', 'K'} + t.queue(op) +} + +const maxLineLength = 4096 + +func (t *Terminal) setLine(newLine []rune, newPos int) { + if t.echo { + t.moveCursorToPos(0) + t.writeLine(newLine) + for i := len(newLine); i < len(t.line); i++ { + t.writeLine(space) + } + t.moveCursorToPos(newPos) + } + t.line = newLine + t.pos = newPos +} + +func (t *Terminal) advanceCursor(places int) { + t.cursorX += places + t.cursorY += t.cursorX / t.termWidth + if t.cursorY > t.maxLine { + t.maxLine = t.cursorY + } + t.cursorX = t.cursorX % t.termWidth + + if places > 0 && t.cursorX == 0 { + // Normally terminals will advance the current position + // when writing a character. But that doesn't happen + // for the last character in a line. However, when + // writing a character (except a new line) that causes + // a line wrap, the position will be advanced two + // places. + // + // So, if we are stopping at the end of a line, we + // need to write a newline so that our cursor can be + // advanced to the next line. + t.outBuf = append(t.outBuf, '\r', '\n') + } +} + +func (t *Terminal) eraseNPreviousChars(n int) { + if n == 0 { + return + } + + if t.pos < n { + n = t.pos + } + t.pos -= n + t.moveCursorToPos(t.pos) + + copy(t.line[t.pos:], t.line[n+t.pos:]) + t.line = t.line[:len(t.line)-n] + if t.echo { + t.writeLine(t.line[t.pos:]) + for i := 0; i < n; i++ { + t.queue(space) + } + t.advanceCursor(n) + t.moveCursorToPos(t.pos) + } +} + +// countToLeftWord returns then number of characters from the cursor to the +// start of the previous word. +func (t *Terminal) countToLeftWord() int { + if t.pos == 0 { + return 0 + } + + pos := t.pos - 1 + for pos > 0 { + if t.line[pos] != ' ' { + break + } + pos-- + } + for pos > 0 { + if t.line[pos] == ' ' { + pos++ + break + } + pos-- + } + + return t.pos - pos +} + +// countToRightWord returns then number of characters from the cursor to the +// start of the next word. +func (t *Terminal) countToRightWord() int { + pos := t.pos + for pos < len(t.line) { + if t.line[pos] == ' ' { + break + } + pos++ + } + for pos < len(t.line) { + if t.line[pos] != ' ' { + break + } + pos++ + } + return pos - t.pos +} + +// visualLength returns the number of visible glyphs in s. +func visualLength(runes []rune) int { + inEscapeSeq := false + length := 0 + + for _, r := range runes { + switch { + case inEscapeSeq: + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { + inEscapeSeq = false + } + case r == '\x1b': + inEscapeSeq = true + default: + length++ + } + } + + return length +} + +// handleKey processes the given key and, optionally, returns a line of text +// that the user has entered. +func (t *Terminal) handleKey(key rune) (line string, ok bool) { + if t.pasteActive && key != keyEnter { + t.addKeyToLine(key) + return + } + + switch key { + case keyBackspace: + if t.pos == 0 { + return + } + t.eraseNPreviousChars(1) + case keyAltLeft: + // move left by a word. + t.pos -= t.countToLeftWord() + t.moveCursorToPos(t.pos) + case keyAltRight: + // move right by a word. + t.pos += t.countToRightWord() + t.moveCursorToPos(t.pos) + case keyLeft: + if t.pos == 0 { + return + } + t.pos-- + t.moveCursorToPos(t.pos) + case keyRight: + if t.pos == len(t.line) { + return + } + t.pos++ + t.moveCursorToPos(t.pos) + case keyHome: + if t.pos == 0 { + return + } + t.pos = 0 + t.moveCursorToPos(t.pos) + case keyEnd: + if t.pos == len(t.line) { + return + } + t.pos = len(t.line) + t.moveCursorToPos(t.pos) + case keyUp: + entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + if !ok { + return "", false + } + if t.historyIndex == -1 { + t.historyPending = string(t.line) + } + t.historyIndex++ + runes := []rune(entry) + t.setLine(runes, len(runes)) + case keyDown: + switch t.historyIndex { + case -1: + return + case 0: + runes := []rune(t.historyPending) + t.setLine(runes, len(runes)) + t.historyIndex-- + default: + entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + if ok { + t.historyIndex-- + runes := []rune(entry) + t.setLine(runes, len(runes)) + } + } + case keyEnter: + t.moveCursorToPos(len(t.line)) + t.queue([]rune("\r\n")) + line = string(t.line) + ok = true + t.line = t.line[:0] + t.pos = 0 + t.cursorX = 0 + t.cursorY = 0 + t.maxLine = 0 + case keyDeleteWord: + // Delete zero or more spaces and then one or more characters. + t.eraseNPreviousChars(t.countToLeftWord()) + case keyDeleteLine: + // Delete everything from the current cursor position to the + // end of line. + for i := t.pos; i < len(t.line); i++ { + t.queue(space) + t.advanceCursor(1) + } + t.line = t.line[:t.pos] + t.moveCursorToPos(t.pos) + case keyCtrlD: + // Erase the character under the current position. + // The EOF case when the line is empty is handled in + // readLine(). + if t.pos < len(t.line) { + t.pos++ + t.eraseNPreviousChars(1) + } + case keyCtrlU: + t.eraseNPreviousChars(t.pos) + case keyClearScreen: + // Erases the screen and moves the cursor to the home position. + t.queue([]rune("\x1b[2J\x1b[H")) + t.queue(t.prompt) + t.cursorX, t.cursorY = 0, 0 + t.advanceCursor(visualLength(t.prompt)) + t.setLine(t.line, t.pos) + default: + if t.AutoCompleteCallback != nil { + prefix := string(t.line[:t.pos]) + suffix := string(t.line[t.pos:]) + + t.lock.Unlock() + newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) + t.lock.Lock() + + if completeOk { + t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) + return + } + } + if !isPrintable(key) { + return + } + if len(t.line) == maxLineLength { + return + } + t.addKeyToLine(key) + } + return +} + +// addKeyToLine inserts the given key at the current position in the current +// line. +func (t *Terminal) addKeyToLine(key rune) { + if len(t.line) == cap(t.line) { + newLine := make([]rune, len(t.line), 2*(1+len(t.line))) + copy(newLine, t.line) + t.line = newLine + } + t.line = t.line[:len(t.line)+1] + copy(t.line[t.pos+1:], t.line[t.pos:]) + t.line[t.pos] = key + if t.echo { + t.writeLine(t.line[t.pos:]) + } + t.pos++ + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) writeLine(line []rune) { + for len(line) != 0 { + remainingOnLine := t.termWidth - t.cursorX + todo := len(line) + if todo > remainingOnLine { + todo = remainingOnLine + } + t.queue(line[:todo]) + t.advanceCursor(visualLength(line[:todo])) + line = line[todo:] + } +} + +// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n. +func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) { + for len(buf) > 0 { + i := bytes.IndexByte(buf, '\n') + todo := len(buf) + if i >= 0 { + todo = i + } + + var nn int + nn, err = w.Write(buf[:todo]) + n += nn + if err != nil { + return n, err + } + buf = buf[todo:] + + if i >= 0 { + if _, err = w.Write(crlf); err != nil { + return n, err + } + n++ + buf = buf[1:] + } + } + + return n, nil +} + +func (t *Terminal) Write(buf []byte) (n int, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + if t.cursorX == 0 && t.cursorY == 0 { + // This is the easy case: there's nothing on the screen that we + // have to move out of the way. + return writeWithCRLF(t.c, buf) + } + + // We have a prompt and possibly user input on the screen. We + // have to clear it first. + t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) + t.cursorX = 0 + t.clearLineToRight() + + for t.cursorY > 0 { + t.move(1 /* up */, 0, 0, 0) + t.cursorY-- + t.clearLineToRight() + } + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + + if n, err = writeWithCRLF(t.c, buf); err != nil { + return + } + + t.writeLine(t.prompt) + if t.echo { + t.writeLine(t.line) + } + + t.moveCursorToPos(t.pos) + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + return +} + +// ReadPassword temporarily changes the prompt and reads a password, without +// echo, from the terminal. +func (t *Terminal) ReadPassword(prompt string) (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + oldPrompt := t.prompt + t.prompt = []rune(prompt) + t.echo = false + + line, err = t.readLine() + + t.prompt = oldPrompt + t.echo = true + + return +} + +// ReadLine returns a line of input from the terminal. +func (t *Terminal) ReadLine() (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + return t.readLine() +} + +func (t *Terminal) readLine() (line string, err error) { + // t.lock must be held at this point + + if t.cursorX == 0 && t.cursorY == 0 { + t.writeLine(t.prompt) + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + } + + lineIsPasted := t.pasteActive + + for { + rest := t.remainder + lineOk := false + for !lineOk { + var key rune + key, rest = bytesToKey(rest, t.pasteActive) + if key == utf8.RuneError { + break + } + if !t.pasteActive { + if key == keyCtrlD { + if len(t.line) == 0 { + return "", io.EOF + } + } + if key == keyPasteStart { + t.pasteActive = true + if len(t.line) == 0 { + lineIsPasted = true + } + continue + } + } else if key == keyPasteEnd { + t.pasteActive = false + continue + } + if !t.pasteActive { + lineIsPasted = false + } + line, lineOk = t.handleKey(key) + } + if len(rest) > 0 { + n := copy(t.inBuf[:], rest) + t.remainder = t.inBuf[:n] + } else { + t.remainder = nil + } + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + if lineOk { + if t.echo { + t.historyIndex = -1 + t.history.Add(line) + } + if lineIsPasted { + err = ErrPasteIndicator + } + return + } + + // t.remainder is a slice at the beginning of t.inBuf + // containing a partial key sequence + readBuf := t.inBuf[len(t.remainder):] + var n int + + t.lock.Unlock() + n, err = t.c.Read(readBuf) + t.lock.Lock() + + if err != nil { + return + } + + t.remainder = t.inBuf[:n+len(t.remainder)] + } +} + +// SetPrompt sets the prompt to be used when reading subsequent lines. +func (t *Terminal) SetPrompt(prompt string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.prompt = []rune(prompt) +} + +func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { + // Move cursor to column zero at the start of the line. + t.move(t.cursorY, 0, t.cursorX, 0) + t.cursorX, t.cursorY = 0, 0 + t.clearLineToRight() + for t.cursorY < numPrevLines { + // Move down a line + t.move(0, 1, 0, 0) + t.cursorY++ + t.clearLineToRight() + } + // Move back to beginning. + t.move(t.cursorY, 0, 0, 0) + t.cursorX, t.cursorY = 0, 0 + + t.queue(t.prompt) + t.advanceCursor(visualLength(t.prompt)) + t.writeLine(t.line) + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) SetSize(width, height int) error { + t.lock.Lock() + defer t.lock.Unlock() + + if width == 0 { + width = 1 + } + + oldWidth := t.termWidth + t.termWidth, t.termHeight = width, height + + switch { + case width == oldWidth: + // If the width didn't change then nothing else needs to be + // done. + return nil + case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: + // If there is nothing on current line and no prompt printed, + // just do nothing + return nil + case width < oldWidth: + // Some terminals (e.g. xterm) will truncate lines that were + // too long when shinking. Others, (e.g. gnome-terminal) will + // attempt to wrap them. For the former, repainting t.maxLine + // works great, but that behaviour goes badly wrong in the case + // of the latter because they have doubled every full line. + + // We assume that we are working on a terminal that wraps lines + // and adjust the cursor position based on every previous line + // wrapping and turning into two. This causes the prompt on + // xterms to move upwards, which isn't great, but it avoids a + // huge mess with gnome-terminal. + if t.cursorX >= t.termWidth { + t.cursorX = t.termWidth - 1 + } + t.cursorY *= 2 + t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) + case width > oldWidth: + // If the terminal expands then our position calculations will + // be wrong in the future because we think the cursor is + // |t.pos| chars into the string, but there will be a gap at + // the end of any wrapped line. + // + // But the position will actually be correct until we move, so + // we can move back to the beginning and repaint everything. + t.clearAndRepaintLinePlusNPrevious(t.maxLine) + } + + _, err := t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + return err +} + +type pasteIndicatorError struct{} + +func (pasteIndicatorError) Error() string { + return "terminal: ErrPasteIndicator not correctly handled" +} + +// ErrPasteIndicator may be returned from ReadLine as the error, in addition +// to valid line data. It indicates that bracketed paste mode is enabled and +// that the returned line consists only of pasted data. Programs may wish to +// interpret pasted data more literally than typed data. +var ErrPasteIndicator = pasteIndicatorError{} + +// SetBracketedPasteMode requests that the terminal bracket paste operations +// with markers. Not all terminals support this but, if it is supported, then +// enabling this mode will stop any autocomplete callback from running due to +// pastes. Additionally, any lines that are completely pasted will be returned +// from ReadLine with the error set to ErrPasteIndicator. +func (t *Terminal) SetBracketedPasteMode(on bool) { + if on { + io.WriteString(t.c, "\x1b[?2004h") + } else { + io.WriteString(t.c, "\x1b[?2004l") + } +} + +// stRingBuffer is a ring buffer of strings. +type stRingBuffer struct { + // entries contains max elements. + entries []string + max int + // head contains the index of the element most recently added to the ring. + head int + // size contains the number of elements in the ring. + size int +} + +func (s *stRingBuffer) Add(a string) { + if s.entries == nil { + const defaultNumEntries = 100 + s.entries = make([]string, defaultNumEntries) + s.max = defaultNumEntries + } + + s.head = (s.head + 1) % s.max + s.entries[s.head] = a + if s.size < s.max { + s.size++ + } +} + +// NthPreviousEntry returns the value passed to the nth previous call to Add. +// If n is zero then the immediately prior value is returned, if one, then the +// next most recent, and so on. If such an element doesn't exist then ok is +// false. +func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { + if n >= s.size { + return "", false + } + index := s.head - n + if index < 0 { + index += s.max + } + return s.entries[index], true +} + +// readPasswordLine reads from reader until it finds \n or io.EOF. +// The slice returned does not include the \n. +// readPasswordLine also ignores any \r it finds. +func readPasswordLine(reader io.Reader) ([]byte, error) { + var buf [1]byte + var ret []byte + + for { + n, err := reader.Read(buf[:]) + if n > 0 { + switch buf[0] { + case '\n': + return ret, nil + case '\r': + // remove \r from passwords on Windows + default: + ret = append(ret, buf[0]) + } + continue + } + if err != nil { + if err == io.EOF && len(ret) > 0 { + return ret, nil + } + return ret, err + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go new file mode 100644 index 00000000..39110408 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -0,0 +1,114 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" +) + +// State contains the state of a terminal. +type State struct { + termios unix.Termios +} + +// IsTerminal returns whether the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil { + return nil, err + } + + return &oldState, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return -1, -1, err + } + return int(ws.Col), int(ws.Row), nil +} + +// passwordReader is an io.Reader that reads from a specific file descriptor. +type passwordReader int + +func (r passwordReader) Read(buf []byte) (int, error) { + return unix.Read(int(r), buf) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + newState := *termios + newState.Lflag &^= unix.ECHO + newState.Lflag |= unix.ICANON | unix.ISIG + newState.Iflag |= unix.ICRNL + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newState); err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) + + return readPasswordLine(passwordReader(fd)) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go new file mode 100644 index 00000000..dfcd6278 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go new file mode 100644 index 00000000..cb23a590 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA +const ioctlWriteTermios = unix.TIOCSETA diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go new file mode 100644 index 00000000..5fadfe8a --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go @@ -0,0 +1,10 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go new file mode 100644 index 00000000..9317ac7e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go @@ -0,0 +1,58 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "fmt" + "runtime" +) + +type State struct{} + +// IsTerminal returns whether the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + return false +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go new file mode 100644 index 00000000..3d5f06a9 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -0,0 +1,124 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + termios unix.Termios +} + +// IsTerminal returns whether the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c + val, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + oldState := *val + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState) + if err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState) + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +// see http://cr.illumos.org/~webrev/andy_js/1060/ +func MakeRaw(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil { + return nil, err + } + + return &oldState, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, oldState *State) error { + return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, 0, err + } + return int(ws.Col), int(ws.Row), nil +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go new file mode 100644 index 00000000..5cfdf8f3 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -0,0 +1,105 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "os" + + "golang.org/x/sys/windows" +) + +type State struct { + mode uint32 +} + +// IsTerminal returns whether the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &st) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { + return nil, err + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return windows.SetConsoleMode(windows.Handle(fd), state.mode) +} + +// GetSize returns the visible dimensions of the given terminal. +// +// These dimensions don't include any scrollback buffer height. +func GetSize(fd int) (width, height int, err error) { + var info windows.ConsoleScreenBufferInfo + if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { + return 0, 0, err + } + return int(info.Window.Right - info.Window.Left + 1), int(info.Window.Bottom - info.Window.Top + 1), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + old := st + + st &^= (windows.ENABLE_ECHO_INPUT) + st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil { + return nil, err + } + + defer windows.SetConsoleMode(windows.Handle(fd), old) + + var h windows.Handle + p, _ := windows.GetCurrentProcess() + if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil { + return nil, err + } + + f := os.NewFile(uintptr(h), "stdin") + defer f.Close() + return readPasswordLine(f) +} diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/sys/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/sys/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/sys/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/sys/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 00000000..06f84b85 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 00000000..ed8da8de --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,60 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "runtime" +) + +// byteOrder is a subset of encoding/binary.ByteOrder. +type byteOrder interface { + Uint32([]byte) uint32 + Uint64([]byte) uint64 +} + +type littleEndian struct{} +type bigEndian struct{} + +func (littleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (bigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// hostByteOrder returns binary.LittleEndian on little-endian machines and +// binary.BigEndian on big-endian machines. +func hostByteOrder() byteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "arm", "arm64", + "mipsle", "mips64le", "mips64p32le", + "ppc64le", + "riscv", "riscv64": + return littleEndian{} + case "armbe", "arm64be", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "sparc", "sparc64": + return bigEndian{} + } + panic("unknown architecture") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 00000000..b4e6ecb2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,162 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + _ CacheLinePad +} + +// ARM contains the supported CPU features of the current ARM (32-bit) platform. +// All feature flags are false if: +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. +var ARM struct { + _ CacheLinePad + HasSWP bool // SWP instruction support + HasHALF bool // Half-word load and store support + HasTHUMB bool // ARM Thumb instruction set + Has26BIT bool // Address space limited to 26-bits + HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support + HasFPA bool // Floating point arithmetic support + HasVFP bool // Vector floating point support + HasEDSP bool // DSP Extensions support + HasJAVA bool // Java instruction set + HasIWMMXT bool // Intel Wireless MMX technology support + HasCRUNCH bool // MaverickCrunch context switching and handling + HasTHUMBEE bool // Thumb EE instruction set + HasNEON bool // NEON instruction set + HasVFPv3 bool // Vector floating point version 3 support + HasVFPv3D16 bool // Vector floating point version 3 D8-D15 + HasTLS bool // Thread local storage support + HasVFPv4 bool // Vector floating point version 4 support + HasIDIVA bool // Integer divide instruction support in ARM mode + HasIDIVT bool // Integer divide instruction support in Thumb mode + HasVFPD32 bool // Vector floating point version 3 D15-D31 + HasLPAE bool // Large Physical Address Extensions + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The minimum processor requirement is POWER8 (ISA 2.07). +// The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9) + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go b/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go new file mode 100644 index 00000000..be602722 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix,ppc64 + +package cpu + +const cacheLineSize = 128 + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func init() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 00000000..981af681 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,40 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +// HWCAP/HWCAP2 bits. +// These are specific to Linux. +const ( + hwcap_SWP = 1 << 0 + hwcap_HALF = 1 << 1 + hwcap_THUMB = 1 << 2 + hwcap_26BIT = 1 << 3 + hwcap_FAST_MULT = 1 << 4 + hwcap_FPA = 1 << 5 + hwcap_VFP = 1 << 6 + hwcap_EDSP = 1 << 7 + hwcap_JAVA = 1 << 8 + hwcap_IWMMXT = 1 << 9 + hwcap_CRUNCH = 1 << 10 + hwcap_THUMBEE = 1 << 11 + hwcap_NEON = 1 << 12 + hwcap_VFPv3 = 1 << 13 + hwcap_VFPv3D16 = 1 << 14 + hwcap_TLS = 1 << 15 + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_IDIVT = 1 << 18 + hwcap_VFPD32 = 1 << 19 + hwcap_LPAE = 1 << 20 + hwcap_EVTSTRM = 1 << 21 + + hwcap2_AES = 1 << 0 + hwcap2_PMULL = 1 << 1 + hwcap2_SHA1 = 1 << 2 + hwcap2_SHA2 = 1 << 3 + hwcap2_CRC32 = 1 << 4 +) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 00000000..568bcd03 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 00000000..f7cb4697 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build !gccgo + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c new file mode 100644 index 00000000..e363c7d1 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +// +// TODO: Replace with a better alternative: +// +// #include +// +// #pragma GCC target("xsave") +// +// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { +// unsigned long long x = _xgetbv(0); +// *eax = x & 0xffffffff; +// *edx = (x >> 32) & 0xffffffff; +// } +// +// Note that _xgetbv is defined starting with GCC 8. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + __asm(" xorl %%ecx, %%ecx\n" + " xgetbv" + : "=a"(*eax), "=d"(*edx)); +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go new file mode 100644 index 00000000..ba49b91b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 00000000..aa986f77 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 00000000..10e712dc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!amd64p32,!386 + +package cpu + +import ( + "io/ioutil" +) + +const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + procAuxv = "/proc/self/auxv" + + uintSize = int(32 << (^uint(0) >> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func init() { + buf, err := ioutil.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false + return + } + + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + doinit() + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 00000000..2057006d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 00000000..fa7fb1bd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 64 + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func doinit() { + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go new file mode 100644 index 00000000..f65134f6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!arm,!arm64,!ppc64,!ppc64le,!s390x + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 00000000..6c8d975d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 00000000..d579eaef --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,161 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // cryptography facilities + msa4 facility = 77 // message-security-assist extension 4 + msa8 facility = 146 // message-security-assist extension 8 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 00000000..6165f121 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 00000000..1269eee8 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 00000000..e1f31dd2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,arm64 + +package cpu + +const cacheLineSize = 64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 00000000..efe2b7a8 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build riscv64 + +package cpu + +const cacheLineSize = 32 diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 00000000..e5037d92 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 00000000..8681e876 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 00000000..d70d317f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 + +package cpu + +const cacheLineSize = 64 + +func init() { + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + osSupportsAVX := false + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, _, _ := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<