mirror of
https://github.com/ergochat/ergo.git
synced 2024-11-22 11:59:40 +01:00
fix #782 (bring vendor into the main tree)
This commit is contained in:
parent
702c7b1e7c
commit
d0aa7cc860
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,3 +0,0 @@
|
||||
[submodule "vendor"]
|
||||
path = vendor
|
||||
url = https://github.com/oragono/oragono-vendor.git
|
@ -7,9 +7,7 @@ This is just a bunch of tips and tricks we keep in mind while developing Oragono
|
||||
|
||||
You should use the [latest distribution of the Go language for your OS and architecture](https://golang.org/dl/). (If `uname -m` on your Raspberry Pi reports `armv7l`, use the `armv6l` distribution of Go; if it reports v8, you may be able to use the `arm64` distribution.)
|
||||
|
||||
Oragono vendors all its dependencies. The vendored code is tracked via a git submodule: `vendor/` is a submodule pointing to the [oragono-vendor](https://github.com/oragono/oragono-vendor) repository. As long as you're not modifying the vendored dependencies, `make` should take care of everything for you --- but if you are, see the "vendor" section below.
|
||||
|
||||
Because of this, Oragono is self-contained and you should not need to fetch any dependencies with `go get`. Doing so is not recommended, since it may fetch incompatible versions of the dependencies. If you're having trouble building the code, it's very likely because your clone of the repository is in the wrong place: Go is very opinionated about where you should keep your code. Take a look at the [go workspaces documentation](https://golang.org/doc/code.html) if you're having trouble.
|
||||
Oragono vendors all its dependencies. Because of this, Oragono is self-contained and you should not need to fetch any dependencies with `go get`. Doing so is not recommended, since it may fetch incompatible versions of the dependencies.
|
||||
|
||||
|
||||
## Branches
|
||||
@ -59,23 +57,6 @@ New release of Oragono!
|
||||
|
||||
|
||||
|
||||
## Updating `vendor/`
|
||||
|
||||
The `vendor/` directory holds our dependencies. When we import new repos, we need to update this folder to contain these new deps. This is something that I'll mostly be handling.
|
||||
|
||||
To update this folder:
|
||||
|
||||
1. Install https://github.com/golang/dep
|
||||
2. `cd` to Oragono folder
|
||||
3. `dep ensure -update`
|
||||
4. `cd vendor`
|
||||
5. Commit the changes with the message `"Updated packages"`
|
||||
6. `cd ..`
|
||||
4. Commit the result with the message `"vendor: Updated submodules"`
|
||||
|
||||
This will make sure things stay nice and up-to-date for users.
|
||||
|
||||
|
||||
## Fuzzing and Testing
|
||||
|
||||
Fuzzing can be useful. We don't have testing done inside the IRCd itself, but this fuzzer I've written works alright and has helped shake out various bugs: [irc_fuzz.py](https://gist.github.com/DanielOaks/63ae611039cdf591dfa4).
|
||||
|
11
Makefile
11
Makefile
@ -1,24 +1,21 @@
|
||||
.PHONY: all install build release capdefs deps test
|
||||
.PHONY: all install build release capdefs test
|
||||
|
||||
capdef_file = ./irc/caps/defs.go
|
||||
|
||||
all: install
|
||||
|
||||
install: deps
|
||||
install:
|
||||
go install -v
|
||||
|
||||
build: deps
|
||||
build:
|
||||
go build -v
|
||||
|
||||
release: deps
|
||||
release:
|
||||
goreleaser --skip-publish --rm-dist
|
||||
|
||||
capdefs:
|
||||
python3 ./gencapdefs.py > ${capdef_file}
|
||||
|
||||
deps:
|
||||
git submodule update --init
|
||||
|
||||
test:
|
||||
python3 ./gencapdefs.py | diff - ${capdef_file}
|
||||
cd irc && go test . && go vet .
|
||||
|
1
vendor
1
vendor
@ -1 +0,0 @@
|
||||
Subproject commit 6e49b8a260f1ba3351c17876c2e2d0044c315078
|
201
vendor/code.cloudfoundry.org/bytefmt/LICENSE
generated
vendored
Normal file
201
vendor/code.cloudfoundry.org/bytefmt/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
20
vendor/code.cloudfoundry.org/bytefmt/NOTICE
generated
vendored
Normal file
20
vendor/code.cloudfoundry.org/bytefmt/NOTICE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved.
|
||||
|
||||
This project contains software that is Copyright (c) 2013-2015 Pivotal Software, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
This project may include a number of subcomponents with separate
|
||||
copyright notices and license terms. Your use of these subcomponents
|
||||
is subject to the terms and conditions of each subcomponent's license,
|
||||
as noted in the LICENSE file.
|
15
vendor/code.cloudfoundry.org/bytefmt/README.md
generated
vendored
Normal file
15
vendor/code.cloudfoundry.org/bytefmt/README.md
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
bytefmt
|
||||
=======
|
||||
|
||||
**Note**: This repository should be imported as `code.cloudfoundry.org/bytefmt`.
|
||||
|
||||
Human-readable byte formatter.
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
bytefmt.ByteSize(100.5*bytefmt.MEGABYTE) // returns "100.5M"
|
||||
bytefmt.ByteSize(uint64(1024)) // returns "1K"
|
||||
```
|
||||
|
||||
For documentation, please see http://godoc.org/code.cloudfoundry.org/bytefmt
|
121
vendor/code.cloudfoundry.org/bytefmt/bytes.go
generated
vendored
Normal file
121
vendor/code.cloudfoundry.org/bytefmt/bytes.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
// Package bytefmt contains helper methods and constants for converting to and from a human-readable byte format.
|
||||
//
|
||||
// bytefmt.ByteSize(100.5*bytefmt.MEGABYTE) // "100.5M"
|
||||
// bytefmt.ByteSize(uint64(1024)) // "1K"
|
||||
//
|
||||
package bytefmt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const (
|
||||
BYTE = 1 << (10 * iota)
|
||||
KILOBYTE
|
||||
MEGABYTE
|
||||
GIGABYTE
|
||||
TERABYTE
|
||||
PETABYTE
|
||||
EXABYTE
|
||||
)
|
||||
|
||||
var invalidByteQuantityError = errors.New("byte quantity must be a positive integer with a unit of measurement like M, MB, MiB, G, GiB, or GB")
|
||||
|
||||
// ByteSize returns a human-readable byte string of the form 10M, 12.5K, and so forth. The following units are available:
|
||||
// E: Exabyte
|
||||
// P: Petabyte
|
||||
// T: Terabyte
|
||||
// G: Gigabyte
|
||||
// M: Megabyte
|
||||
// K: Kilobyte
|
||||
// B: Byte
|
||||
// The unit that results in the smallest number greater than or equal to 1 is always chosen.
|
||||
func ByteSize(bytes uint64) string {
|
||||
unit := ""
|
||||
value := float64(bytes)
|
||||
|
||||
switch {
|
||||
case bytes >= EXABYTE:
|
||||
unit = "E"
|
||||
value = value / EXABYTE
|
||||
case bytes >= PETABYTE:
|
||||
unit = "P"
|
||||
value = value / PETABYTE
|
||||
case bytes >= TERABYTE:
|
||||
unit = "T"
|
||||
value = value / TERABYTE
|
||||
case bytes >= GIGABYTE:
|
||||
unit = "G"
|
||||
value = value / GIGABYTE
|
||||
case bytes >= MEGABYTE:
|
||||
unit = "M"
|
||||
value = value / MEGABYTE
|
||||
case bytes >= KILOBYTE:
|
||||
unit = "K"
|
||||
value = value / KILOBYTE
|
||||
case bytes >= BYTE:
|
||||
unit = "B"
|
||||
case bytes == 0:
|
||||
return "0B"
|
||||
}
|
||||
|
||||
result := strconv.FormatFloat(value, 'f', 1, 64)
|
||||
result = strings.TrimSuffix(result, ".0")
|
||||
return result + unit
|
||||
}
|
||||
|
||||
// ToMegabytes parses a string formatted by ByteSize as megabytes.
|
||||
func ToMegabytes(s string) (uint64, error) {
|
||||
bytes, err := ToBytes(s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return bytes / MEGABYTE, nil
|
||||
}
|
||||
|
||||
// ToBytes parses a string formatted by ByteSize as bytes. Note binary-prefixed and SI prefixed units both mean a base-2 units
|
||||
// KB = K = KiB = 1024
|
||||
// MB = M = MiB = 1024 * K
|
||||
// GB = G = GiB = 1024 * M
|
||||
// TB = T = TiB = 1024 * G
|
||||
// PB = P = PiB = 1024 * T
|
||||
// EB = E = EiB = 1024 * P
|
||||
func ToBytes(s string) (uint64, error) {
|
||||
s = strings.TrimSpace(s)
|
||||
s = strings.ToUpper(s)
|
||||
|
||||
i := strings.IndexFunc(s, unicode.IsLetter)
|
||||
|
||||
if i == -1 {
|
||||
return 0, invalidByteQuantityError
|
||||
}
|
||||
|
||||
bytesString, multiple := s[:i], s[i:]
|
||||
bytes, err := strconv.ParseFloat(bytesString, 64)
|
||||
if err != nil || bytes < 0 {
|
||||
return 0, invalidByteQuantityError
|
||||
}
|
||||
|
||||
switch multiple {
|
||||
case "E", "EB", "EIB":
|
||||
return uint64(bytes * EXABYTE), nil
|
||||
case "P", "PB", "PIB":
|
||||
return uint64(bytes * PETABYTE), nil
|
||||
case "T", "TB", "TIB":
|
||||
return uint64(bytes * TERABYTE), nil
|
||||
case "G", "GB", "GIB":
|
||||
return uint64(bytes * GIGABYTE), nil
|
||||
case "M", "MB", "MIB":
|
||||
return uint64(bytes * MEGABYTE), nil
|
||||
case "K", "KB", "KIB":
|
||||
return uint64(bytes * KILOBYTE), nil
|
||||
case "B":
|
||||
return uint64(bytes), nil
|
||||
default:
|
||||
return 0, invalidByteQuantityError
|
||||
}
|
||||
}
|
1
vendor/code.cloudfoundry.org/bytefmt/package.go
generated
vendored
Normal file
1
vendor/code.cloudfoundry.org/bytefmt/package.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
package bytefmt // import "code.cloudfoundry.org/bytefmt"
|
25
vendor/github.com/docopt/docopt-go/.gitignore
generated
vendored
Normal file
25
vendor/github.com/docopt/docopt-go/.gitignore
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
# coverage droppings
|
||||
profile.cov
|
32
vendor/github.com/docopt/docopt-go/.travis.yml
generated
vendored
Normal file
32
vendor/github.com/docopt/docopt-go/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
# Travis CI (http://travis-ci.org/) is a continuous integration
|
||||
# service for open source projects. This file configures it
|
||||
# to run unit tests for docopt-go.
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.9
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
install:
|
||||
- go get -d -v ./... && go build -v ./...
|
||||
|
||||
script:
|
||||
- go vet -x ./...
|
||||
- go test -v ./...
|
||||
- go test -covermode=count -coverprofile=profile.cov .
|
||||
|
||||
after_script:
|
||||
- $HOME/gopath/bin/goveralls -coverprofile=profile.cov -service=travis-ci
|
21
vendor/github.com/docopt/docopt-go/LICENSE
generated
vendored
Normal file
21
vendor/github.com/docopt/docopt-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Keith Batten
|
||||
Copyright (c) 2016 David Irvine
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
116
vendor/github.com/docopt/docopt-go/README.md
generated
vendored
Normal file
116
vendor/github.com/docopt/docopt-go/README.md
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
docopt-go
|
||||
=========
|
||||
|
||||
[![Build Status](https://travis-ci.org/docopt/docopt.go.svg?branch=master)](https://travis-ci.org/docopt/docopt.go)
|
||||
[![Coverage Status](https://coveralls.io/repos/github/docopt/docopt.go/badge.svg)](https://coveralls.io/github/docopt/docopt.go)
|
||||
[![GoDoc](https://godoc.org/github.com/docopt/docopt.go?status.svg)](https://godoc.org/github.com/docopt/docopt.go)
|
||||
|
||||
An implementation of [docopt](http://docopt.org/) in the [Go](http://golang.org/) programming language.
|
||||
|
||||
**docopt** helps you create *beautiful* command-line interfaces easily:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/docopt/docopt-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
usage := `Naval Fate.
|
||||
|
||||
Usage:
|
||||
naval_fate ship new <name>...
|
||||
naval_fate ship <name> move <x> <y> [--speed=<kn>]
|
||||
naval_fate ship shoot <x> <y>
|
||||
naval_fate mine (set|remove) <x> <y> [--moored|--drifting]
|
||||
naval_fate -h | --help
|
||||
naval_fate --version
|
||||
|
||||
Options:
|
||||
-h --help Show this screen.
|
||||
--version Show version.
|
||||
--speed=<kn> Speed in knots [default: 10].
|
||||
--moored Moored (anchored) mine.
|
||||
--drifting Drifting mine.`
|
||||
|
||||
arguments, _ := docopt.ParseDoc(usage)
|
||||
fmt.Println(arguments)
|
||||
}
|
||||
```
|
||||
|
||||
**docopt** parses command-line arguments based on a help message. Don't write parser code: a good help message already has all the necessary information in it.
|
||||
|
||||
## Installation
|
||||
|
||||
⚠ Use the alias "docopt-go". To use docopt in your Go code:
|
||||
|
||||
```go
|
||||
import "github.com/docopt/docopt-go"
|
||||
```
|
||||
|
||||
To install docopt in your `$GOPATH`:
|
||||
|
||||
```console
|
||||
$ go get github.com/docopt/docopt-go
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
Given a conventional command-line help message, docopt processes the arguments. See https://github.com/docopt/docopt#help-message-format for a description of the help message format.
|
||||
|
||||
This package exposes three different APIs, depending on the level of control required. The first, simplest way to parse your docopt usage is to just call:
|
||||
|
||||
```go
|
||||
docopt.ParseDoc(usage)
|
||||
```
|
||||
|
||||
This will use `os.Args[1:]` as the argv slice, and use the default parser options. If you want to provide your own version string and args, then use:
|
||||
|
||||
```go
|
||||
docopt.ParseArgs(usage, argv, "1.2.3")
|
||||
```
|
||||
|
||||
If the last parameter (version) is a non-empty string, it will be printed when `--version` is given in the argv slice. Finally, we can instantiate our own `docopt.Parser` which gives us control over how things like help messages are printed and whether to exit after displaying usage messages, etc.
|
||||
|
||||
```go
|
||||
parser := &docopt.Parser{
|
||||
HelpHandler: docopt.PrintHelpOnly,
|
||||
OptionsFirst: true,
|
||||
}
|
||||
opts, err := parser.ParseArgs(usage, argv, "")
|
||||
```
|
||||
|
||||
In particular, setting your own custom `HelpHandler` function makes unit testing your own docs with example command line invocations much more enjoyable.
|
||||
|
||||
All three of these return a map of option names to the values parsed from argv, and an error or nil. You can get the values using the helpers, or just treat it as a regular map:
|
||||
|
||||
```go
|
||||
flag, _ := opts.Bool("--flag")
|
||||
secs, _ := opts.Int("<seconds>")
|
||||
```
|
||||
|
||||
Additionally, you can `Bind` these to a struct, assigning option values to the
|
||||
exported fields of that struct, all at once.
|
||||
|
||||
```go
|
||||
var config struct {
|
||||
Command string `docopt:"<cmd>"`
|
||||
Tries int `docopt:"-n"`
|
||||
Force bool // Gets the value of --force
|
||||
}
|
||||
opts.Bind(&config)
|
||||
```
|
||||
|
||||
More documentation is available at [godoc.org](https://godoc.org/github.com/docopt/docopt-go).
|
||||
|
||||
## Unit Testing
|
||||
|
||||
Unit testing your own usage docs is recommended, so you can be sure that for a given command line invocation, the expected options are set. An example of how to do this is [in the examples folder](examples/unit_test/unit_test.go).
|
||||
|
||||
## Tests
|
||||
|
||||
All tests from the Python version are implemented and passing at [Travis CI](https://travis-ci.org/docopt/docopt-go). New language-agnostic tests have been added to [test_golang.docopt](test_golang.docopt).
|
||||
|
||||
To run tests for docopt-go, use `go test`.
|
49
vendor/github.com/docopt/docopt-go/doc.go
generated
vendored
Normal file
49
vendor/github.com/docopt/docopt-go/doc.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
/*
|
||||
Package docopt parses command-line arguments based on a help message.
|
||||
|
||||
Given a conventional command-line help message, docopt processes the arguments.
|
||||
See https://github.com/docopt/docopt#help-message-format for a description of
|
||||
the help message format.
|
||||
|
||||
This package exposes three different APIs, depending on the level of control
|
||||
required. The first, simplest way to parse your docopt usage is to just call:
|
||||
|
||||
docopt.ParseDoc(usage)
|
||||
|
||||
This will use os.Args[1:] as the argv slice, and use the default parser
|
||||
options. If you want to provide your own version string and args, then use:
|
||||
|
||||
docopt.ParseArgs(usage, argv, "1.2.3")
|
||||
|
||||
If the last parameter (version) is a non-empty string, it will be printed when
|
||||
--version is given in the argv slice. Finally, we can instantiate our own
|
||||
docopt.Parser which gives us control over how things like help messages are
|
||||
printed and whether to exit after displaying usage messages, etc.
|
||||
|
||||
parser := &docopt.Parser{
|
||||
HelpHandler: docopt.PrintHelpOnly,
|
||||
OptionsFirst: true,
|
||||
}
|
||||
opts, err := parser.ParseArgs(usage, argv, "")
|
||||
|
||||
In particular, setting your own custom HelpHandler function makes unit testing
|
||||
your own docs with example command line invocations much more enjoyable.
|
||||
|
||||
All three of these return a map of option names to the values parsed from argv,
|
||||
and an error or nil. You can get the values using the helpers, or just treat it
|
||||
as a regular map:
|
||||
|
||||
flag, _ := opts.Bool("--flag")
|
||||
secs, _ := opts.Int("<seconds>")
|
||||
|
||||
Additionally, you can `Bind` these to a struct, assigning option values to the
|
||||
exported fields of that struct, all at once.
|
||||
|
||||
var config struct {
|
||||
Command string `docopt:"<cmd>"`
|
||||
Tries int `docopt:"-n"`
|
||||
Force bool // Gets the value of --force
|
||||
}
|
||||
opts.Bind(&config)
|
||||
*/
|
||||
package docopt
|
575
vendor/github.com/docopt/docopt-go/docopt.go
generated
vendored
Normal file
575
vendor/github.com/docopt/docopt-go/docopt.go
generated
vendored
Normal file
@ -0,0 +1,575 @@
|
||||
// Licensed under terms of MIT license (see LICENSE-MIT)
|
||||
// Copyright (c) 2013 Keith Batten, kbatten@gmail.com
|
||||
// Copyright (c) 2016 David Irvine
|
||||
|
||||
package docopt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
// HelpHandler is called when we encounter bad user input, or when the user
|
||||
// asks for help.
|
||||
// By default, this calls os.Exit(0) if it handled a built-in option such
|
||||
// as -h, --help or --version. If the user errored with a wrong command or
|
||||
// options, we exit with a return code of 1.
|
||||
HelpHandler func(err error, usage string)
|
||||
// OptionsFirst requires that option flags always come before positional
|
||||
// arguments; otherwise they can overlap.
|
||||
OptionsFirst bool
|
||||
// SkipHelpFlags tells the parser not to look for -h and --help flags and
|
||||
// call the HelpHandler.
|
||||
SkipHelpFlags bool
|
||||
}
|
||||
|
||||
var PrintHelpAndExit = func(err error, usage string) {
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, usage)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Println(usage)
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
var PrintHelpOnly = func(err error, usage string) {
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, usage)
|
||||
} else {
|
||||
fmt.Println(usage)
|
||||
}
|
||||
}
|
||||
|
||||
var NoHelpHandler = func(err error, usage string) {}
|
||||
|
||||
var DefaultParser = &Parser{
|
||||
HelpHandler: PrintHelpAndExit,
|
||||
OptionsFirst: false,
|
||||
SkipHelpFlags: false,
|
||||
}
|
||||
|
||||
// ParseDoc parses os.Args[1:] based on the interface described in doc, using the default parser options.
|
||||
func ParseDoc(doc string) (Opts, error) {
|
||||
return ParseArgs(doc, nil, "")
|
||||
}
|
||||
|
||||
// ParseArgs parses custom arguments based on the interface described in doc. If you provide a non-empty version
|
||||
// string, then this will be displayed when the --version flag is found. This method uses the default parser options.
|
||||
func ParseArgs(doc string, argv []string, version string) (Opts, error) {
|
||||
return DefaultParser.ParseArgs(doc, argv, version)
|
||||
}
|
||||
|
||||
// ParseArgs parses custom arguments based on the interface described in doc. If you provide a non-empty version
|
||||
// string, then this will be displayed when the --version flag is found.
|
||||
func (p *Parser) ParseArgs(doc string, argv []string, version string) (Opts, error) {
|
||||
return p.parse(doc, argv, version)
|
||||
}
|
||||
|
||||
// Deprecated: Parse is provided for backward compatibility with the original docopt.go package.
|
||||
// Please rather make use of ParseDoc, ParseArgs, or use your own custom Parser.
|
||||
func Parse(doc string, argv []string, help bool, version string, optionsFirst bool, exit ...bool) (map[string]interface{}, error) {
|
||||
exitOk := true
|
||||
if len(exit) > 0 {
|
||||
exitOk = exit[0]
|
||||
}
|
||||
p := &Parser{
|
||||
OptionsFirst: optionsFirst,
|
||||
SkipHelpFlags: !help,
|
||||
}
|
||||
if exitOk {
|
||||
p.HelpHandler = PrintHelpAndExit
|
||||
} else {
|
||||
p.HelpHandler = PrintHelpOnly
|
||||
}
|
||||
return p.parse(doc, argv, version)
|
||||
}
|
||||
|
||||
func (p *Parser) parse(doc string, argv []string, version string) (map[string]interface{}, error) {
|
||||
if argv == nil {
|
||||
argv = os.Args[1:]
|
||||
}
|
||||
if p.HelpHandler == nil {
|
||||
p.HelpHandler = DefaultParser.HelpHandler
|
||||
}
|
||||
args, output, err := parse(doc, argv, !p.SkipHelpFlags, version, p.OptionsFirst)
|
||||
if _, ok := err.(*UserError); ok {
|
||||
// the user gave us bad input
|
||||
p.HelpHandler(err, output)
|
||||
} else if len(output) > 0 && err == nil {
|
||||
// the user asked for help or --version
|
||||
p.HelpHandler(err, output)
|
||||
}
|
||||
return args, err
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// parse and return a map of args, output and all errors
|
||||
func parse(doc string, argv []string, help bool, version string, optionsFirst bool) (args map[string]interface{}, output string, err error) {
|
||||
if argv == nil && len(os.Args) > 1 {
|
||||
argv = os.Args[1:]
|
||||
}
|
||||
|
||||
usageSections := parseSection("usage:", doc)
|
||||
|
||||
if len(usageSections) == 0 {
|
||||
err = newLanguageError("\"usage:\" (case-insensitive) not found.")
|
||||
return
|
||||
}
|
||||
if len(usageSections) > 1 {
|
||||
err = newLanguageError("More than one \"usage:\" (case-insensitive).")
|
||||
return
|
||||
}
|
||||
usage := usageSections[0]
|
||||
|
||||
options := parseDefaults(doc)
|
||||
formal, err := formalUsage(usage)
|
||||
if err != nil {
|
||||
output = handleError(err, usage)
|
||||
return
|
||||
}
|
||||
|
||||
pat, err := parsePattern(formal, &options)
|
||||
if err != nil {
|
||||
output = handleError(err, usage)
|
||||
return
|
||||
}
|
||||
|
||||
patternArgv, err := parseArgv(newTokenList(argv, errorUser), &options, optionsFirst)
|
||||
if err != nil {
|
||||
output = handleError(err, usage)
|
||||
return
|
||||
}
|
||||
patFlat, err := pat.flat(patternOption)
|
||||
if err != nil {
|
||||
output = handleError(err, usage)
|
||||
return
|
||||
}
|
||||
patternOptions := patFlat.unique()
|
||||
|
||||
patFlat, err = pat.flat(patternOptionSSHORTCUT)
|
||||
if err != nil {
|
||||
output = handleError(err, usage)
|
||||
return
|
||||
}
|
||||
for _, optionsShortcut := range patFlat {
|
||||
docOptions := parseDefaults(doc)
|
||||
optionsShortcut.children = docOptions.unique().diff(patternOptions)
|
||||
}
|
||||
|
||||
if output = extras(help, version, patternArgv, doc); len(output) > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
err = pat.fix()
|
||||
if err != nil {
|
||||
output = handleError(err, usage)
|
||||
return
|
||||
}
|
||||
matched, left, collected := pat.match(&patternArgv, nil)
|
||||
if matched && len(*left) == 0 {
|
||||
patFlat, err = pat.flat(patternDefault)
|
||||
if err != nil {
|
||||
output = handleError(err, usage)
|
||||
return
|
||||
}
|
||||
args = append(patFlat, *collected...).dictionary()
|
||||
return
|
||||
}
|
||||
|
||||
err = newUserError("")
|
||||
output = handleError(err, usage)
|
||||
return
|
||||
}
|
||||
|
||||
func handleError(err error, usage string) string {
|
||||
if _, ok := err.(*UserError); ok {
|
||||
return strings.TrimSpace(fmt.Sprintf("%s\n%s", err, usage))
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func parseSection(name, source string) []string {
|
||||
p := regexp.MustCompile(`(?im)^([^\n]*` + name + `[^\n]*\n?(?:[ \t].*?(?:\n|$))*)`)
|
||||
s := p.FindAllString(source, -1)
|
||||
if s == nil {
|
||||
s = []string{}
|
||||
}
|
||||
for i, v := range s {
|
||||
s[i] = strings.TrimSpace(v)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func parseDefaults(doc string) patternList {
|
||||
defaults := patternList{}
|
||||
p := regexp.MustCompile(`\n[ \t]*(-\S+?)`)
|
||||
for _, s := range parseSection("options:", doc) {
|
||||
// FIXME corner case "bla: options: --foo"
|
||||
_, _, s = stringPartition(s, ":") // get rid of "options:"
|
||||
split := p.Split("\n"+s, -1)[1:]
|
||||
match := p.FindAllStringSubmatch("\n"+s, -1)
|
||||
for i := range split {
|
||||
optionDescription := match[i][1] + split[i]
|
||||
if strings.HasPrefix(optionDescription, "-") {
|
||||
defaults = append(defaults, parseOption(optionDescription))
|
||||
}
|
||||
}
|
||||
}
|
||||
return defaults
|
||||
}
|
||||
|
||||
func parsePattern(source string, options *patternList) (*pattern, error) {
|
||||
tokens := tokenListFromPattern(source)
|
||||
result, err := parseExpr(tokens, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tokens.current() != nil {
|
||||
return nil, tokens.errorFunc("unexpected ending: %s" + strings.Join(tokens.tokens, " "))
|
||||
}
|
||||
return newRequired(result...), nil
|
||||
}
|
||||
|
||||
func parseArgv(tokens *tokenList, options *patternList, optionsFirst bool) (patternList, error) {
|
||||
/*
|
||||
Parse command-line argument vector.
|
||||
|
||||
If options_first:
|
||||
argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ;
|
||||
else:
|
||||
argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ;
|
||||
*/
|
||||
parsed := patternList{}
|
||||
for tokens.current() != nil {
|
||||
if tokens.current().eq("--") {
|
||||
for _, v := range tokens.tokens {
|
||||
parsed = append(parsed, newArgument("", v))
|
||||
}
|
||||
return parsed, nil
|
||||
} else if tokens.current().hasPrefix("--") {
|
||||
pl, err := parseLong(tokens, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parsed = append(parsed, pl...)
|
||||
} else if tokens.current().hasPrefix("-") && !tokens.current().eq("-") {
|
||||
ps, err := parseShorts(tokens, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parsed = append(parsed, ps...)
|
||||
} else if optionsFirst {
|
||||
for _, v := range tokens.tokens {
|
||||
parsed = append(parsed, newArgument("", v))
|
||||
}
|
||||
return parsed, nil
|
||||
} else {
|
||||
parsed = append(parsed, newArgument("", tokens.move().String()))
|
||||
}
|
||||
}
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func parseOption(optionDescription string) *pattern {
|
||||
optionDescription = strings.TrimSpace(optionDescription)
|
||||
options, _, description := stringPartition(optionDescription, " ")
|
||||
options = strings.Replace(options, ",", " ", -1)
|
||||
options = strings.Replace(options, "=", " ", -1)
|
||||
|
||||
short := ""
|
||||
long := ""
|
||||
argcount := 0
|
||||
var value interface{}
|
||||
value = false
|
||||
|
||||
reDefault := regexp.MustCompile(`(?i)\[default: (.*)\]`)
|
||||
for _, s := range strings.Fields(options) {
|
||||
if strings.HasPrefix(s, "--") {
|
||||
long = s
|
||||
} else if strings.HasPrefix(s, "-") {
|
||||
short = s
|
||||
} else {
|
||||
argcount = 1
|
||||
}
|
||||
if argcount > 0 {
|
||||
matched := reDefault.FindAllStringSubmatch(description, -1)
|
||||
if len(matched) > 0 {
|
||||
value = matched[0][1]
|
||||
} else {
|
||||
value = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return newOption(short, long, argcount, value)
|
||||
}
|
||||
|
||||
func parseExpr(tokens *tokenList, options *patternList) (patternList, error) {
|
||||
// expr ::= seq ( '|' seq )* ;
|
||||
seq, err := parseSeq(tokens, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !tokens.current().eq("|") {
|
||||
return seq, nil
|
||||
}
|
||||
var result patternList
|
||||
if len(seq) > 1 {
|
||||
result = patternList{newRequired(seq...)}
|
||||
} else {
|
||||
result = seq
|
||||
}
|
||||
for tokens.current().eq("|") {
|
||||
tokens.move()
|
||||
seq, err = parseSeq(tokens, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(seq) > 1 {
|
||||
result = append(result, newRequired(seq...))
|
||||
} else {
|
||||
result = append(result, seq...)
|
||||
}
|
||||
}
|
||||
if len(result) > 1 {
|
||||
return patternList{newEither(result...)}, nil
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func parseSeq(tokens *tokenList, options *patternList) (patternList, error) {
|
||||
// seq ::= ( atom [ '...' ] )* ;
|
||||
result := patternList{}
|
||||
for !tokens.current().match(true, "]", ")", "|") {
|
||||
atom, err := parseAtom(tokens, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tokens.current().eq("...") {
|
||||
atom = patternList{newOneOrMore(atom...)}
|
||||
tokens.move()
|
||||
}
|
||||
result = append(result, atom...)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func parseAtom(tokens *tokenList, options *patternList) (patternList, error) {
|
||||
// atom ::= '(' expr ')' | '[' expr ']' | 'options' | long | shorts | argument | command ;
|
||||
tok := tokens.current()
|
||||
result := patternList{}
|
||||
if tokens.current().match(false, "(", "[") {
|
||||
tokens.move()
|
||||
var matching string
|
||||
pl, err := parseExpr(tokens, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tok.eq("(") {
|
||||
matching = ")"
|
||||
result = patternList{newRequired(pl...)}
|
||||
} else if tok.eq("[") {
|
||||
matching = "]"
|
||||
result = patternList{newOptional(pl...)}
|
||||
}
|
||||
moved := tokens.move()
|
||||
if !moved.eq(matching) {
|
||||
return nil, tokens.errorFunc("unmatched '%s', expected: '%s' got: '%s'", tok, matching, moved)
|
||||
}
|
||||
return result, nil
|
||||
} else if tok.eq("options") {
|
||||
tokens.move()
|
||||
return patternList{newOptionsShortcut()}, nil
|
||||
} else if tok.hasPrefix("--") && !tok.eq("--") {
|
||||
return parseLong(tokens, options)
|
||||
} else if tok.hasPrefix("-") && !tok.eq("-") && !tok.eq("--") {
|
||||
return parseShorts(tokens, options)
|
||||
} else if tok.hasPrefix("<") && tok.hasSuffix(">") || tok.isUpper() {
|
||||
return patternList{newArgument(tokens.move().String(), nil)}, nil
|
||||
}
|
||||
return patternList{newCommand(tokens.move().String(), false)}, nil
|
||||
}
|
||||
|
||||
func parseLong(tokens *tokenList, options *patternList) (patternList, error) {
|
||||
// long ::= '--' chars [ ( ' ' | '=' ) chars ] ;
|
||||
long, eq, v := stringPartition(tokens.move().String(), "=")
|
||||
var value interface{}
|
||||
var opt *pattern
|
||||
if eq == "" && v == "" {
|
||||
value = nil
|
||||
} else {
|
||||
value = v
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(long, "--") {
|
||||
return nil, newError("long option '%s' doesn't start with --", long)
|
||||
}
|
||||
similar := patternList{}
|
||||
for _, o := range *options {
|
||||
if o.long == long {
|
||||
similar = append(similar, o)
|
||||
}
|
||||
}
|
||||
if tokens.err == errorUser && len(similar) == 0 { // if no exact match
|
||||
similar = patternList{}
|
||||
for _, o := range *options {
|
||||
if strings.HasPrefix(o.long, long) {
|
||||
similar = append(similar, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(similar) > 1 { // might be simply specified ambiguously 2+ times?
|
||||
similarLong := make([]string, len(similar))
|
||||
for i, s := range similar {
|
||||
similarLong[i] = s.long
|
||||
}
|
||||
return nil, tokens.errorFunc("%s is not a unique prefix: %s?", long, strings.Join(similarLong, ", "))
|
||||
} else if len(similar) < 1 {
|
||||
argcount := 0
|
||||
if eq == "=" {
|
||||
argcount = 1
|
||||
}
|
||||
opt = newOption("", long, argcount, false)
|
||||
*options = append(*options, opt)
|
||||
if tokens.err == errorUser {
|
||||
var val interface{}
|
||||
if argcount > 0 {
|
||||
val = value
|
||||
} else {
|
||||
val = true
|
||||
}
|
||||
opt = newOption("", long, argcount, val)
|
||||
}
|
||||
} else {
|
||||
opt = newOption(similar[0].short, similar[0].long, similar[0].argcount, similar[0].value)
|
||||
if opt.argcount == 0 {
|
||||
if value != nil {
|
||||
return nil, tokens.errorFunc("%s must not have an argument", opt.long)
|
||||
}
|
||||
} else {
|
||||
if value == nil {
|
||||
if tokens.current().match(true, "--") {
|
||||
return nil, tokens.errorFunc("%s requires argument", opt.long)
|
||||
}
|
||||
moved := tokens.move()
|
||||
if moved != nil {
|
||||
value = moved.String() // only set as string if not nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if tokens.err == errorUser {
|
||||
if value != nil {
|
||||
opt.value = value
|
||||
} else {
|
||||
opt.value = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return patternList{opt}, nil
|
||||
}
|
||||
|
||||
func parseShorts(tokens *tokenList, options *patternList) (patternList, error) {
|
||||
// shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;
|
||||
tok := tokens.move()
|
||||
if !tok.hasPrefix("-") || tok.hasPrefix("--") {
|
||||
return nil, newError("short option '%s' doesn't start with -", tok)
|
||||
}
|
||||
left := strings.TrimLeft(tok.String(), "-")
|
||||
parsed := patternList{}
|
||||
for left != "" {
|
||||
var opt *pattern
|
||||
short := "-" + left[0:1]
|
||||
left = left[1:]
|
||||
similar := patternList{}
|
||||
for _, o := range *options {
|
||||
if o.short == short {
|
||||
similar = append(similar, o)
|
||||
}
|
||||
}
|
||||
if len(similar) > 1 {
|
||||
return nil, tokens.errorFunc("%s is specified ambiguously %d times", short, len(similar))
|
||||
} else if len(similar) < 1 {
|
||||
opt = newOption(short, "", 0, false)
|
||||
*options = append(*options, opt)
|
||||
if tokens.err == errorUser {
|
||||
opt = newOption(short, "", 0, true)
|
||||
}
|
||||
} else { // why copying is necessary here?
|
||||
opt = newOption(short, similar[0].long, similar[0].argcount, similar[0].value)
|
||||
var value interface{}
|
||||
if opt.argcount > 0 {
|
||||
if left == "" {
|
||||
if tokens.current().match(true, "--") {
|
||||
return nil, tokens.errorFunc("%s requires argument", short)
|
||||
}
|
||||
value = tokens.move().String()
|
||||
} else {
|
||||
value = left
|
||||
left = ""
|
||||
}
|
||||
}
|
||||
if tokens.err == errorUser {
|
||||
if value != nil {
|
||||
opt.value = value
|
||||
} else {
|
||||
opt.value = true
|
||||
}
|
||||
}
|
||||
}
|
||||
parsed = append(parsed, opt)
|
||||
}
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func formalUsage(section string) (string, error) {
|
||||
_, _, section = stringPartition(section, ":") // drop "usage:"
|
||||
pu := strings.Fields(section)
|
||||
|
||||
if len(pu) == 0 {
|
||||
return "", newLanguageError("no fields found in usage (perhaps a spacing error).")
|
||||
}
|
||||
|
||||
result := "( "
|
||||
for _, s := range pu[1:] {
|
||||
if s == pu[0] {
|
||||
result += ") | ( "
|
||||
} else {
|
||||
result += s + " "
|
||||
}
|
||||
}
|
||||
result += ")"
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func extras(help bool, version string, options patternList, doc string) string {
|
||||
if help {
|
||||
for _, o := range options {
|
||||
if (o.name == "-h" || o.name == "--help") && o.value == true {
|
||||
return strings.Trim(doc, "\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
if version != "" {
|
||||
for _, o := range options {
|
||||
if (o.name == "--version") && o.value == true {
|
||||
return version
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func stringPartition(s, sep string) (string, string, string) {
|
||||
sepPos := strings.Index(s, sep)
|
||||
if sepPos == -1 { // no seperator found
|
||||
return s, "", ""
|
||||
}
|
||||
split := strings.SplitN(s, sep, 2)
|
||||
return split[0], sep, split[1]
|
||||
}
|
49
vendor/github.com/docopt/docopt-go/error.go
generated
vendored
Normal file
49
vendor/github.com/docopt/docopt-go/error.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
package docopt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type errorType int
|
||||
|
||||
const (
|
||||
errorUser errorType = iota
|
||||
errorLanguage
|
||||
)
|
||||
|
||||
func (e errorType) String() string {
|
||||
switch e {
|
||||
case errorUser:
|
||||
return "errorUser"
|
||||
case errorLanguage:
|
||||
return "errorLanguage"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UserError records an error with program arguments.
|
||||
type UserError struct {
|
||||
msg string
|
||||
Usage string
|
||||
}
|
||||
|
||||
func (e UserError) Error() string {
|
||||
return e.msg
|
||||
}
|
||||
func newUserError(msg string, f ...interface{}) error {
|
||||
return &UserError{fmt.Sprintf(msg, f...), ""}
|
||||
}
|
||||
|
||||
// LanguageError records an error with the doc string.
|
||||
type LanguageError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e LanguageError) Error() string {
|
||||
return e.msg
|
||||
}
|
||||
func newLanguageError(msg string, f ...interface{}) error {
|
||||
return &LanguageError{fmt.Sprintf(msg, f...)}
|
||||
}
|
||||
|
||||
var newError = fmt.Errorf
|
264
vendor/github.com/docopt/docopt-go/opts.go
generated
vendored
Normal file
264
vendor/github.com/docopt/docopt-go/opts.go
generated
vendored
Normal file
@ -0,0 +1,264 @@
|
||||
package docopt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
func errKey(key string) error {
|
||||
return fmt.Errorf("no such key: %q", key)
|
||||
}
|
||||
func errType(key string) error {
|
||||
return fmt.Errorf("key: %q failed type conversion", key)
|
||||
}
|
||||
func errStrconv(key string, convErr error) error {
|
||||
return fmt.Errorf("key: %q failed type conversion: %s", key, convErr)
|
||||
}
|
||||
|
||||
// Opts is a map of command line options to their values, with some convenience
|
||||
// methods for value type conversion (bool, float64, int, string). For example,
|
||||
// to get an option value as an int:
|
||||
//
|
||||
// opts, _ := docopt.ParseDoc("Usage: sleep <seconds>")
|
||||
// secs, _ := opts.Int("<seconds>")
|
||||
//
|
||||
// Additionally, Opts.Bind allows you easily populate a struct's fields with the
|
||||
// values of each option value. See below for examples.
|
||||
//
|
||||
// Lastly, you can still treat Opts as a regular map, and do any type checking
|
||||
// and conversion that you want to yourself. For example:
|
||||
//
|
||||
// if s, ok := opts["<binary>"].(string); ok {
|
||||
// if val, err := strconv.ParseUint(s, 2, 64); err != nil { ... }
|
||||
// }
|
||||
//
|
||||
// Note that any non-boolean option / flag will have a string value in the
|
||||
// underlying map.
|
||||
type Opts map[string]interface{}
|
||||
|
||||
func (o Opts) String(key string) (s string, err error) {
|
||||
v, ok := o[key]
|
||||
if !ok {
|
||||
err = errKey(key)
|
||||
return
|
||||
}
|
||||
s, ok = v.(string)
|
||||
if !ok {
|
||||
err = errType(key)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o Opts) Bool(key string) (b bool, err error) {
|
||||
v, ok := o[key]
|
||||
if !ok {
|
||||
err = errKey(key)
|
||||
return
|
||||
}
|
||||
b, ok = v.(bool)
|
||||
if !ok {
|
||||
err = errType(key)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o Opts) Int(key string) (i int, err error) {
|
||||
s, err := o.String(key)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
i, err = strconv.Atoi(s)
|
||||
if err != nil {
|
||||
err = errStrconv(key, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o Opts) Float64(key string) (f float64, err error) {
|
||||
s, err := o.String(key)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
f, err = strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
err = errStrconv(key, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Bind populates the fields of a given struct with matching option values.
|
||||
// Each key in Opts will be mapped to an exported field of the struct pointed
|
||||
// to by `v`, as follows:
|
||||
//
|
||||
// abc int // Unexported field, ignored
|
||||
// Abc string // Mapped from `--abc`, `<abc>`, or `abc`
|
||||
// // (case insensitive)
|
||||
// A string // Mapped from `-a`, `<a>` or `a`
|
||||
// // (case insensitive)
|
||||
// Abc int `docopt:"XYZ"` // Mapped from `XYZ`
|
||||
// Abc bool `docopt:"-"` // Mapped from `-`
|
||||
// Abc bool `docopt:"-x,--xyz"` // Mapped from `-x` or `--xyz`
|
||||
// // (first non-zero value found)
|
||||
//
|
||||
// Tagged (annotated) fields will always be mapped first. If no field is tagged
|
||||
// with an option's key, Bind will try to map the option to an appropriately
|
||||
// named field (as above).
|
||||
//
|
||||
// Bind also handles conversion to bool, float, int or string types.
|
||||
func (o Opts) Bind(v interface{}) error {
|
||||
structVal := reflect.ValueOf(v)
|
||||
if structVal.Kind() != reflect.Ptr {
|
||||
return newError("'v' argument is not pointer to struct type")
|
||||
}
|
||||
for structVal.Kind() == reflect.Ptr {
|
||||
structVal = structVal.Elem()
|
||||
}
|
||||
if structVal.Kind() != reflect.Struct {
|
||||
return newError("'v' argument is not pointer to struct type")
|
||||
}
|
||||
structType := structVal.Type()
|
||||
|
||||
tagged := make(map[string]int) // Tagged field tags
|
||||
untagged := make(map[string]int) // Untagged field names
|
||||
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
field := structType.Field(i)
|
||||
if isUnexportedField(field) || field.Anonymous {
|
||||
continue
|
||||
}
|
||||
tag := field.Tag.Get("docopt")
|
||||
if tag == "" {
|
||||
untagged[field.Name] = i
|
||||
continue
|
||||
}
|
||||
for _, t := range strings.Split(tag, ",") {
|
||||
tagged[t] = i
|
||||
}
|
||||
}
|
||||
|
||||
// Get the index of the struct field to use, based on the option key.
|
||||
// Second argument is true/false on whether something was matched.
|
||||
getFieldIndex := func(key string) (int, bool) {
|
||||
if i, ok := tagged[key]; ok {
|
||||
return i, true
|
||||
}
|
||||
if i, ok := untagged[guessUntaggedField(key)]; ok {
|
||||
return i, true
|
||||
}
|
||||
return -1, false
|
||||
}
|
||||
|
||||
indexMap := make(map[string]int) // Option keys to field index
|
||||
|
||||
// Pre-check that option keys are mapped to fields and fields are zero valued, before populating them.
|
||||
for k := range o {
|
||||
i, ok := getFieldIndex(k)
|
||||
if !ok {
|
||||
if k == "--help" || k == "--version" { // Don't require these to be mapped.
|
||||
continue
|
||||
}
|
||||
return newError("mapping of %q is not found in given struct, or is an unexported field", k)
|
||||
}
|
||||
fieldVal := structVal.Field(i)
|
||||
zeroVal := reflect.Zero(fieldVal.Type())
|
||||
if !reflect.DeepEqual(fieldVal.Interface(), zeroVal.Interface()) {
|
||||
return newError("%q field is non-zero, will be overwritten by value of %q", structType.Field(i).Name, k)
|
||||
}
|
||||
indexMap[k] = i
|
||||
}
|
||||
|
||||
// Populate fields with option values.
|
||||
for k, v := range o {
|
||||
i, ok := indexMap[k]
|
||||
if !ok {
|
||||
continue // Not mapped.
|
||||
}
|
||||
field := structVal.Field(i)
|
||||
if !reflect.DeepEqual(field.Interface(), reflect.Zero(field.Type()).Interface()) {
|
||||
// The struct's field is already non-zero (by our doing), so don't change it.
|
||||
// This happens with comma separated tags, e.g. `docopt:"-h,--help"` which is a
|
||||
// convenient way of checking if one of multiple boolean flags are set.
|
||||
continue
|
||||
}
|
||||
optVal := reflect.ValueOf(v)
|
||||
// Option value is the zero Value, so we can't get its .Type(). No need to assign anyway, so move along.
|
||||
if !optVal.IsValid() {
|
||||
continue
|
||||
}
|
||||
if !field.CanSet() {
|
||||
return newError("%q field cannot be set", structType.Field(i).Name)
|
||||
}
|
||||
// Try to assign now if able. bool and string values should be assignable already.
|
||||
if optVal.Type().AssignableTo(field.Type()) {
|
||||
field.Set(optVal)
|
||||
continue
|
||||
}
|
||||
// Try to convert the value and assign if able.
|
||||
switch field.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if x, err := o.Int(k); err == nil {
|
||||
field.SetInt(int64(x))
|
||||
continue
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if x, err := o.Float64(k); err == nil {
|
||||
field.SetFloat(x)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// TODO: Something clever (recursive?) with non-string slices.
|
||||
// case reflect.Slice:
|
||||
// if optVal.Kind() == reflect.Slice {
|
||||
// for i := 0; i < optVal.Len(); i++ {
|
||||
// sliceVal := optVal.Index(i)
|
||||
// fmt.Printf("%v", sliceVal)
|
||||
// }
|
||||
// fmt.Printf("\n")
|
||||
// }
|
||||
return newError("value of %q is not assignable to %q field", k, structType.Field(i).Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isUnexportedField returns whether the field is unexported.
|
||||
// isUnexportedField is to avoid the bug in versions older than Go1.3.
|
||||
// See following links:
|
||||
// https://code.google.com/p/go/issues/detail?id=7247
|
||||
// http://golang.org/ref/spec#Exported_identifiers
|
||||
func isUnexportedField(field reflect.StructField) bool {
|
||||
return !(field.PkgPath == "" && unicode.IsUpper(rune(field.Name[0])))
|
||||
}
|
||||
|
||||
// Convert a string like "--my-special-flag" to "MySpecialFlag".
|
||||
func titleCaseDashes(key string) string {
|
||||
nextToUpper := true
|
||||
mapFn := func(r rune) rune {
|
||||
if r == '-' {
|
||||
nextToUpper = true
|
||||
return -1
|
||||
}
|
||||
if nextToUpper {
|
||||
nextToUpper = false
|
||||
return unicode.ToUpper(r)
|
||||
}
|
||||
return r
|
||||
}
|
||||
return strings.Map(mapFn, key)
|
||||
}
|
||||
|
||||
// Best guess which field.Name in a struct to assign for an option key.
|
||||
func guessUntaggedField(key string) string {
|
||||
switch {
|
||||
case strings.HasPrefix(key, "--") && len(key[2:]) > 1:
|
||||
return titleCaseDashes(key[2:])
|
||||
case strings.HasPrefix(key, "-") && len(key[1:]) == 1:
|
||||
return titleCaseDashes(key[1:])
|
||||
case strings.HasPrefix(key, "<") && strings.HasSuffix(key, ">"):
|
||||
key = key[1 : len(key)-1]
|
||||
}
|
||||
return strings.Title(strings.ToLower(key))
|
||||
}
|
550
vendor/github.com/docopt/docopt-go/pattern.go
generated
vendored
Normal file
550
vendor/github.com/docopt/docopt-go/pattern.go
generated
vendored
Normal file
@ -0,0 +1,550 @@
|
||||
package docopt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type patternType uint
|
||||
|
||||
const (
|
||||
// leaf
|
||||
patternArgument patternType = 1 << iota
|
||||
patternCommand
|
||||
patternOption
|
||||
|
||||
// branch
|
||||
patternRequired
|
||||
patternOptionAL
|
||||
patternOptionSSHORTCUT // Marker/placeholder for [options] shortcut.
|
||||
patternOneOrMore
|
||||
patternEither
|
||||
|
||||
patternLeaf = patternArgument +
|
||||
patternCommand +
|
||||
patternOption
|
||||
patternBranch = patternRequired +
|
||||
patternOptionAL +
|
||||
patternOptionSSHORTCUT +
|
||||
patternOneOrMore +
|
||||
patternEither
|
||||
patternAll = patternLeaf + patternBranch
|
||||
patternDefault = 0
|
||||
)
|
||||
|
||||
func (pt patternType) String() string {
|
||||
switch pt {
|
||||
case patternArgument:
|
||||
return "argument"
|
||||
case patternCommand:
|
||||
return "command"
|
||||
case patternOption:
|
||||
return "option"
|
||||
case patternRequired:
|
||||
return "required"
|
||||
case patternOptionAL:
|
||||
return "optional"
|
||||
case patternOptionSSHORTCUT:
|
||||
return "optionsshortcut"
|
||||
case patternOneOrMore:
|
||||
return "oneormore"
|
||||
case patternEither:
|
||||
return "either"
|
||||
case patternLeaf:
|
||||
return "leaf"
|
||||
case patternBranch:
|
||||
return "branch"
|
||||
case patternAll:
|
||||
return "all"
|
||||
case patternDefault:
|
||||
return "default"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type pattern struct {
|
||||
t patternType
|
||||
|
||||
children patternList
|
||||
|
||||
name string
|
||||
value interface{}
|
||||
|
||||
short string
|
||||
long string
|
||||
argcount int
|
||||
}
|
||||
|
||||
type patternList []*pattern
|
||||
|
||||
func newBranchPattern(t patternType, pl ...*pattern) *pattern {
|
||||
var p pattern
|
||||
p.t = t
|
||||
p.children = make(patternList, len(pl))
|
||||
copy(p.children, pl)
|
||||
return &p
|
||||
}
|
||||
|
||||
func newRequired(pl ...*pattern) *pattern {
|
||||
return newBranchPattern(patternRequired, pl...)
|
||||
}
|
||||
|
||||
func newEither(pl ...*pattern) *pattern {
|
||||
return newBranchPattern(patternEither, pl...)
|
||||
}
|
||||
|
||||
func newOneOrMore(pl ...*pattern) *pattern {
|
||||
return newBranchPattern(patternOneOrMore, pl...)
|
||||
}
|
||||
|
||||
func newOptional(pl ...*pattern) *pattern {
|
||||
return newBranchPattern(patternOptionAL, pl...)
|
||||
}
|
||||
|
||||
func newOptionsShortcut() *pattern {
|
||||
var p pattern
|
||||
p.t = patternOptionSSHORTCUT
|
||||
return &p
|
||||
}
|
||||
|
||||
func newLeafPattern(t patternType, name string, value interface{}) *pattern {
|
||||
// default: value=nil
|
||||
var p pattern
|
||||
p.t = t
|
||||
p.name = name
|
||||
p.value = value
|
||||
return &p
|
||||
}
|
||||
|
||||
func newArgument(name string, value interface{}) *pattern {
|
||||
// default: value=nil
|
||||
return newLeafPattern(patternArgument, name, value)
|
||||
}
|
||||
|
||||
func newCommand(name string, value interface{}) *pattern {
|
||||
// default: value=false
|
||||
var p pattern
|
||||
p.t = patternCommand
|
||||
p.name = name
|
||||
p.value = value
|
||||
return &p
|
||||
}
|
||||
|
||||
func newOption(short, long string, argcount int, value interface{}) *pattern {
|
||||
// default: "", "", 0, false
|
||||
var p pattern
|
||||
p.t = patternOption
|
||||
p.short = short
|
||||
p.long = long
|
||||
if long != "" {
|
||||
p.name = long
|
||||
} else {
|
||||
p.name = short
|
||||
}
|
||||
p.argcount = argcount
|
||||
if value == false && argcount > 0 {
|
||||
p.value = nil
|
||||
} else {
|
||||
p.value = value
|
||||
}
|
||||
return &p
|
||||
}
|
||||
|
||||
func (p *pattern) flat(types patternType) (patternList, error) {
|
||||
if p.t&patternLeaf != 0 {
|
||||
if types == patternDefault {
|
||||
types = patternAll
|
||||
}
|
||||
if p.t&types != 0 {
|
||||
return patternList{p}, nil
|
||||
}
|
||||
return patternList{}, nil
|
||||
}
|
||||
|
||||
if p.t&patternBranch != 0 {
|
||||
if p.t&types != 0 {
|
||||
return patternList{p}, nil
|
||||
}
|
||||
result := patternList{}
|
||||
for _, child := range p.children {
|
||||
childFlat, err := child.flat(types)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, childFlat...)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
return nil, newError("unknown pattern type: %d, %d", p.t, types)
|
||||
}
|
||||
|
||||
func (p *pattern) fix() error {
|
||||
err := p.fixIdentities(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.fixRepeatingArguments()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pattern) fixIdentities(uniq patternList) error {
|
||||
// Make pattern-tree tips point to same object if they are equal.
|
||||
if p.t&patternBranch == 0 {
|
||||
return nil
|
||||
}
|
||||
if uniq == nil {
|
||||
pFlat, err := p.flat(patternDefault)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uniq = pFlat.unique()
|
||||
}
|
||||
for i, child := range p.children {
|
||||
if child.t&patternBranch == 0 {
|
||||
ind, err := uniq.index(child)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.children[i] = uniq[ind]
|
||||
} else {
|
||||
err := child.fixIdentities(uniq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pattern) fixRepeatingArguments() {
|
||||
// Fix elements that should accumulate/increment values.
|
||||
var either []patternList
|
||||
|
||||
for _, child := range p.transform().children {
|
||||
either = append(either, child.children)
|
||||
}
|
||||
for _, cas := range either {
|
||||
casMultiple := patternList{}
|
||||
for _, e := range cas {
|
||||
if cas.count(e) > 1 {
|
||||
casMultiple = append(casMultiple, e)
|
||||
}
|
||||
}
|
||||
for _, e := range casMultiple {
|
||||
if e.t == patternArgument || e.t == patternOption && e.argcount > 0 {
|
||||
switch e.value.(type) {
|
||||
case string:
|
||||
e.value = strings.Fields(e.value.(string))
|
||||
case []string:
|
||||
default:
|
||||
e.value = []string{}
|
||||
}
|
||||
}
|
||||
if e.t == patternCommand || e.t == patternOption && e.argcount == 0 {
|
||||
e.value = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pattern) match(left *patternList, collected *patternList) (bool, *patternList, *patternList) {
|
||||
if collected == nil {
|
||||
collected = &patternList{}
|
||||
}
|
||||
if p.t&patternRequired != 0 {
|
||||
l := left
|
||||
c := collected
|
||||
for _, p := range p.children {
|
||||
var matched bool
|
||||
matched, l, c = p.match(l, c)
|
||||
if !matched {
|
||||
return false, left, collected
|
||||
}
|
||||
}
|
||||
return true, l, c
|
||||
} else if p.t&patternOptionAL != 0 || p.t&patternOptionSSHORTCUT != 0 {
|
||||
for _, p := range p.children {
|
||||
_, left, collected = p.match(left, collected)
|
||||
}
|
||||
return true, left, collected
|
||||
} else if p.t&patternOneOrMore != 0 {
|
||||
if len(p.children) != 1 {
|
||||
panic("OneOrMore.match(): assert len(p.children) == 1")
|
||||
}
|
||||
l := left
|
||||
c := collected
|
||||
var lAlt *patternList
|
||||
matched := true
|
||||
times := 0
|
||||
for matched {
|
||||
// could it be that something didn't match but changed l or c?
|
||||
matched, l, c = p.children[0].match(l, c)
|
||||
if matched {
|
||||
times++
|
||||
}
|
||||
if lAlt == l {
|
||||
break
|
||||
}
|
||||
lAlt = l
|
||||
}
|
||||
if times >= 1 {
|
||||
return true, l, c
|
||||
}
|
||||
return false, left, collected
|
||||
} else if p.t&patternEither != 0 {
|
||||
type outcomeStruct struct {
|
||||
matched bool
|
||||
left *patternList
|
||||
collected *patternList
|
||||
length int
|
||||
}
|
||||
outcomes := []outcomeStruct{}
|
||||
for _, p := range p.children {
|
||||
matched, l, c := p.match(left, collected)
|
||||
outcome := outcomeStruct{matched, l, c, len(*l)}
|
||||
if matched {
|
||||
outcomes = append(outcomes, outcome)
|
||||
}
|
||||
}
|
||||
if len(outcomes) > 0 {
|
||||
minLen := outcomes[0].length
|
||||
minIndex := 0
|
||||
for i, v := range outcomes {
|
||||
if v.length < minLen {
|
||||
minIndex = i
|
||||
}
|
||||
}
|
||||
return outcomes[minIndex].matched, outcomes[minIndex].left, outcomes[minIndex].collected
|
||||
}
|
||||
return false, left, collected
|
||||
} else if p.t&patternLeaf != 0 {
|
||||
pos, match := p.singleMatch(left)
|
||||
var increment interface{}
|
||||
if match == nil {
|
||||
return false, left, collected
|
||||
}
|
||||
leftAlt := make(patternList, len((*left)[:pos]), len((*left)[:pos])+len((*left)[pos+1:]))
|
||||
copy(leftAlt, (*left)[:pos])
|
||||
leftAlt = append(leftAlt, (*left)[pos+1:]...)
|
||||
sameName := patternList{}
|
||||
for _, a := range *collected {
|
||||
if a.name == p.name {
|
||||
sameName = append(sameName, a)
|
||||
}
|
||||
}
|
||||
|
||||
switch p.value.(type) {
|
||||
case int, []string:
|
||||
switch p.value.(type) {
|
||||
case int:
|
||||
increment = 1
|
||||
case []string:
|
||||
switch match.value.(type) {
|
||||
case string:
|
||||
increment = []string{match.value.(string)}
|
||||
default:
|
||||
increment = match.value
|
||||
}
|
||||
}
|
||||
if len(sameName) == 0 {
|
||||
match.value = increment
|
||||
collectedMatch := make(patternList, len(*collected), len(*collected)+1)
|
||||
copy(collectedMatch, *collected)
|
||||
collectedMatch = append(collectedMatch, match)
|
||||
return true, &leftAlt, &collectedMatch
|
||||
}
|
||||
switch sameName[0].value.(type) {
|
||||
case int:
|
||||
sameName[0].value = sameName[0].value.(int) + increment.(int)
|
||||
case []string:
|
||||
sameName[0].value = append(sameName[0].value.([]string), increment.([]string)...)
|
||||
}
|
||||
return true, &leftAlt, collected
|
||||
}
|
||||
collectedMatch := make(patternList, len(*collected), len(*collected)+1)
|
||||
copy(collectedMatch, *collected)
|
||||
collectedMatch = append(collectedMatch, match)
|
||||
return true, &leftAlt, &collectedMatch
|
||||
}
|
||||
panic("unmatched type")
|
||||
}
|
||||
|
||||
func (p *pattern) singleMatch(left *patternList) (int, *pattern) {
|
||||
if p.t&patternArgument != 0 {
|
||||
for n, pat := range *left {
|
||||
if pat.t&patternArgument != 0 {
|
||||
return n, newArgument(p.name, pat.value)
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
} else if p.t&patternCommand != 0 {
|
||||
for n, pat := range *left {
|
||||
if pat.t&patternArgument != 0 {
|
||||
if pat.value == p.name {
|
||||
return n, newCommand(p.name, true)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
} else if p.t&patternOption != 0 {
|
||||
for n, pat := range *left {
|
||||
if p.name == pat.name {
|
||||
return n, pat
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
panic("unmatched type")
|
||||
}
|
||||
|
||||
func (p *pattern) String() string {
|
||||
if p.t&patternOption != 0 {
|
||||
return fmt.Sprintf("%s(%s, %s, %d, %+v)", p.t, p.short, p.long, p.argcount, p.value)
|
||||
} else if p.t&patternLeaf != 0 {
|
||||
return fmt.Sprintf("%s(%s, %+v)", p.t, p.name, p.value)
|
||||
} else if p.t&patternBranch != 0 {
|
||||
result := ""
|
||||
for i, child := range p.children {
|
||||
if i > 0 {
|
||||
result += ", "
|
||||
}
|
||||
result += child.String()
|
||||
}
|
||||
return fmt.Sprintf("%s(%s)", p.t, result)
|
||||
}
|
||||
panic("unmatched type")
|
||||
}
|
||||
|
||||
func (p *pattern) transform() *pattern {
|
||||
/*
|
||||
Expand pattern into an (almost) equivalent one, but with single Either.
|
||||
|
||||
Example: ((-a | -b) (-c | -d)) => (-a -c | -a -d | -b -c | -b -d)
|
||||
Quirks: [-a] => (-a), (-a...) => (-a -a)
|
||||
*/
|
||||
result := []patternList{}
|
||||
groups := []patternList{patternList{p}}
|
||||
parents := patternRequired +
|
||||
patternOptionAL +
|
||||
patternOptionSSHORTCUT +
|
||||
patternEither +
|
||||
patternOneOrMore
|
||||
for len(groups) > 0 {
|
||||
children := groups[0]
|
||||
groups = groups[1:]
|
||||
var child *pattern
|
||||
for _, c := range children {
|
||||
if c.t&parents != 0 {
|
||||
child = c
|
||||
break
|
||||
}
|
||||
}
|
||||
if child != nil {
|
||||
children.remove(child)
|
||||
if child.t&patternEither != 0 {
|
||||
for _, c := range child.children {
|
||||
r := patternList{}
|
||||
r = append(r, c)
|
||||
r = append(r, children...)
|
||||
groups = append(groups, r)
|
||||
}
|
||||
} else if child.t&patternOneOrMore != 0 {
|
||||
r := patternList{}
|
||||
r = append(r, child.children.double()...)
|
||||
r = append(r, children...)
|
||||
groups = append(groups, r)
|
||||
} else {
|
||||
r := patternList{}
|
||||
r = append(r, child.children...)
|
||||
r = append(r, children...)
|
||||
groups = append(groups, r)
|
||||
}
|
||||
} else {
|
||||
result = append(result, children)
|
||||
}
|
||||
}
|
||||
either := patternList{}
|
||||
for _, e := range result {
|
||||
either = append(either, newRequired(e...))
|
||||
}
|
||||
return newEither(either...)
|
||||
}
|
||||
|
||||
func (p *pattern) eq(other *pattern) bool {
|
||||
return reflect.DeepEqual(p, other)
|
||||
}
|
||||
|
||||
func (pl patternList) unique() patternList {
|
||||
table := make(map[string]bool)
|
||||
result := patternList{}
|
||||
for _, v := range pl {
|
||||
if !table[v.String()] {
|
||||
table[v.String()] = true
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (pl patternList) index(p *pattern) (int, error) {
|
||||
for i, c := range pl {
|
||||
if c.eq(p) {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return -1, newError("%s not in list", p)
|
||||
}
|
||||
|
||||
func (pl patternList) count(p *pattern) int {
|
||||
count := 0
|
||||
for _, c := range pl {
|
||||
if c.eq(p) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (pl patternList) diff(l patternList) patternList {
|
||||
lAlt := make(patternList, len(l))
|
||||
copy(lAlt, l)
|
||||
result := make(patternList, 0, len(pl))
|
||||
for _, v := range pl {
|
||||
if v != nil {
|
||||
match := false
|
||||
for i, w := range lAlt {
|
||||
if w.eq(v) {
|
||||
match = true
|
||||
lAlt[i] = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
if match == false {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (pl patternList) double() patternList {
|
||||
l := len(pl)
|
||||
result := make(patternList, l*2)
|
||||
copy(result, pl)
|
||||
copy(result[l:2*l], pl)
|
||||
return result
|
||||
}
|
||||
|
||||
func (pl *patternList) remove(p *pattern) {
|
||||
(*pl) = pl.diff(patternList{p})
|
||||
}
|
||||
|
||||
func (pl patternList) dictionary() map[string]interface{} {
|
||||
dict := make(map[string]interface{})
|
||||
for _, a := range pl {
|
||||
dict[a.name] = a.value
|
||||
}
|
||||
return dict
|
||||
}
|
9
vendor/github.com/docopt/docopt-go/test_golang.docopt
generated
vendored
Normal file
9
vendor/github.com/docopt/docopt-go/test_golang.docopt
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
r"""usage: prog [NAME_-2]..."""
|
||||
$ prog 10 20
|
||||
{"NAME_-2": ["10", "20"]}
|
||||
|
||||
$ prog 10
|
||||
{"NAME_-2": ["10"]}
|
||||
|
||||
$ prog
|
||||
{"NAME_-2": []}
|
957
vendor/github.com/docopt/docopt-go/testcases.docopt
generated
vendored
Normal file
957
vendor/github.com/docopt/docopt-go/testcases.docopt
generated
vendored
Normal file
@ -0,0 +1,957 @@
|
||||
r"""Usage: prog
|
||||
|
||||
"""
|
||||
$ prog
|
||||
{}
|
||||
|
||||
$ prog --xxx
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""Usage: prog [options]
|
||||
|
||||
Options: -a All.
|
||||
|
||||
"""
|
||||
$ prog
|
||||
{"-a": false}
|
||||
|
||||
$ prog -a
|
||||
{"-a": true}
|
||||
|
||||
$ prog -x
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""Usage: prog [options]
|
||||
|
||||
Options: --all All.
|
||||
|
||||
"""
|
||||
$ prog
|
||||
{"--all": false}
|
||||
|
||||
$ prog --all
|
||||
{"--all": true}
|
||||
|
||||
$ prog --xxx
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""Usage: prog [options]
|
||||
|
||||
Options: -v, --verbose Verbose.
|
||||
|
||||
"""
|
||||
$ prog --verbose
|
||||
{"--verbose": true}
|
||||
|
||||
$ prog --ver
|
||||
{"--verbose": true}
|
||||
|
||||
$ prog -v
|
||||
{"--verbose": true}
|
||||
|
||||
|
||||
r"""Usage: prog [options]
|
||||
|
||||
Options: -p PATH
|
||||
|
||||
"""
|
||||
$ prog -p home/
|
||||
{"-p": "home/"}
|
||||
|
||||
$ prog -phome/
|
||||
{"-p": "home/"}
|
||||
|
||||
$ prog -p
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""Usage: prog [options]
|
||||
|
||||
Options: --path <path>
|
||||
|
||||
"""
|
||||
$ prog --path home/
|
||||
{"--path": "home/"}
|
||||
|
||||
$ prog --path=home/
|
||||
{"--path": "home/"}
|
||||
|
||||
$ prog --pa home/
|
||||
{"--path": "home/"}
|
||||
|
||||
$ prog --pa=home/
|
||||
{"--path": "home/"}
|
||||
|
||||
$ prog --path
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""Usage: prog [options]
|
||||
|
||||
Options: -p PATH, --path=<path> Path to files.
|
||||
|
||||
"""
|
||||
$ prog -proot
|
||||
{"--path": "root"}
|
||||
|
||||
|
||||
r"""Usage: prog [options]
|
||||
|
||||
Options: -p --path PATH Path to files.
|
||||
|
||||
"""
|
||||
$ prog -p root
|
||||
{"--path": "root"}
|
||||
|
||||
$ prog --path root
|
||||
{"--path": "root"}
|
||||
|
||||
|
||||
r"""Usage: prog [options]
|
||||
|
||||
Options:
|
||||
-p PATH Path to files [default: ./]
|
||||
|
||||
"""
|
||||
$ prog
|
||||
{"-p": "./"}
|
||||
|
||||
$ prog -phome
|
||||
{"-p": "home"}
|
||||
|
||||
|
||||
r"""UsAgE: prog [options]
|
||||
|
||||
OpTiOnS: --path=<files> Path to files
|
||||
[dEfAuLt: /root]
|
||||
|
||||
"""
|
||||
$ prog
|
||||
{"--path": "/root"}
|
||||
|
||||
$ prog --path=home
|
||||
{"--path": "home"}
|
||||
|
||||
|
||||
r"""usage: prog [options]
|
||||
|
||||
options:
|
||||
-a Add
|
||||
-r Remote
|
||||
-m <msg> Message
|
||||
|
||||
"""
|
||||
$ prog -a -r -m Hello
|
||||
{"-a": true,
|
||||
"-r": true,
|
||||
"-m": "Hello"}
|
||||
|
||||
$ prog -armyourass
|
||||
{"-a": true,
|
||||
"-r": true,
|
||||
"-m": "yourass"}
|
||||
|
||||
$ prog -a -r
|
||||
{"-a": true,
|
||||
"-r": true,
|
||||
"-m": null}
|
||||
|
||||
|
||||
r"""Usage: prog [options]
|
||||
|
||||
Options: --version
|
||||
--verbose
|
||||
|
||||
"""
|
||||
$ prog --version
|
||||
{"--version": true,
|
||||
"--verbose": false}
|
||||
|
||||
$ prog --verbose
|
||||
{"--version": false,
|
||||
"--verbose": true}
|
||||
|
||||
$ prog --ver
|
||||
"user-error"
|
||||
|
||||
$ prog --verb
|
||||
{"--version": false,
|
||||
"--verbose": true}
|
||||
|
||||
|
||||
r"""usage: prog [-a -r -m <msg>]
|
||||
|
||||
options:
|
||||
-a Add
|
||||
-r Remote
|
||||
-m <msg> Message
|
||||
|
||||
"""
|
||||
$ prog -armyourass
|
||||
{"-a": true,
|
||||
"-r": true,
|
||||
"-m": "yourass"}
|
||||
|
||||
|
||||
r"""usage: prog [-armmsg]
|
||||
|
||||
options: -a Add
|
||||
-r Remote
|
||||
-m <msg> Message
|
||||
|
||||
"""
|
||||
$ prog -a -r -m Hello
|
||||
{"-a": true,
|
||||
"-r": true,
|
||||
"-m": "Hello"}
|
||||
|
||||
|
||||
r"""usage: prog -a -b
|
||||
|
||||
options:
|
||||
-a
|
||||
-b
|
||||
|
||||
"""
|
||||
$ prog -a -b
|
||||
{"-a": true, "-b": true}
|
||||
|
||||
$ prog -b -a
|
||||
{"-a": true, "-b": true}
|
||||
|
||||
$ prog -a
|
||||
"user-error"
|
||||
|
||||
$ prog
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""usage: prog (-a -b)
|
||||
|
||||
options: -a
|
||||
-b
|
||||
|
||||
"""
|
||||
$ prog -a -b
|
||||
{"-a": true, "-b": true}
|
||||
|
||||
$ prog -b -a
|
||||
{"-a": true, "-b": true}
|
||||
|
||||
$ prog -a
|
||||
"user-error"
|
||||
|
||||
$ prog
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""usage: prog [-a] -b
|
||||
|
||||
options: -a
|
||||
-b
|
||||
|
||||
"""
|
||||
$ prog -a -b
|
||||
{"-a": true, "-b": true}
|
||||
|
||||
$ prog -b -a
|
||||
{"-a": true, "-b": true}
|
||||
|
||||
$ prog -a
|
||||
"user-error"
|
||||
|
||||
$ prog -b
|
||||
{"-a": false, "-b": true}
|
||||
|
||||
$ prog
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""usage: prog [(-a -b)]
|
||||
|
||||
options: -a
|
||||
-b
|
||||
|
||||
"""
|
||||
$ prog -a -b
|
||||
{"-a": true, "-b": true}
|
||||
|
||||
$ prog -b -a
|
||||
{"-a": true, "-b": true}
|
||||
|
||||
$ prog -a
|
||||
"user-error"
|
||||
|
||||
$ prog -b
|
||||
"user-error"
|
||||
|
||||
$ prog
|
||||
{"-a": false, "-b": false}
|
||||
|
||||
|
||||
r"""usage: prog (-a|-b)
|
||||
|
||||
options: -a
|
||||
-b
|
||||
|
||||
"""
|
||||
$ prog -a -b
|
||||
"user-error"
|
||||
|
||||
$ prog
|
||||
"user-error"
|
||||
|
||||
$ prog -a
|
||||
{"-a": true, "-b": false}
|
||||
|
||||
$ prog -b
|
||||
{"-a": false, "-b": true}
|
||||
|
||||
|
||||
r"""usage: prog [ -a | -b ]
|
||||
|
||||
options: -a
|
||||
-b
|
||||
|
||||
"""
|
||||
$ prog -a -b
|
||||
"user-error"
|
||||
|
||||
$ prog
|
||||
{"-a": false, "-b": false}
|
||||
|
||||
$ prog -a
|
||||
{"-a": true, "-b": false}
|
||||
|
||||
$ prog -b
|
||||
{"-a": false, "-b": true}
|
||||
|
||||
|
||||
r"""usage: prog <arg>"""
|
||||
$ prog 10
|
||||
{"<arg>": "10"}
|
||||
|
||||
$ prog 10 20
|
||||
"user-error"
|
||||
|
||||
$ prog
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""usage: prog [<arg>]"""
|
||||
$ prog 10
|
||||
{"<arg>": "10"}
|
||||
|
||||
$ prog 10 20
|
||||
"user-error"
|
||||
|
||||
$ prog
|
||||
{"<arg>": null}
|
||||
|
||||
|
||||
r"""usage: prog <kind> <name> <type>"""
|
||||
$ prog 10 20 40
|
||||
{"<kind>": "10", "<name>": "20", "<type>": "40"}
|
||||
|
||||
$ prog 10 20
|
||||
"user-error"
|
||||
|
||||
$ prog
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""usage: prog <kind> [<name> <type>]"""
|
||||
$ prog 10 20 40
|
||||
{"<kind>": "10", "<name>": "20", "<type>": "40"}
|
||||
|
||||
$ prog 10 20
|
||||
{"<kind>": "10", "<name>": "20", "<type>": null}
|
||||
|
||||
$ prog
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""usage: prog [<kind> | <name> <type>]"""
|
||||
$ prog 10 20 40
|
||||
"user-error"
|
||||
|
||||
$ prog 20 40
|
||||
{"<kind>": null, "<name>": "20", "<type>": "40"}
|
||||
|
||||
$ prog
|
||||
{"<kind>": null, "<name>": null, "<type>": null}
|
||||
|
||||
|
||||
r"""usage: prog (<kind> --all | <name>)
|
||||
|
||||
options:
|
||||
--all
|
||||
|
||||
"""
|
||||
$ prog 10 --all
|
||||
{"<kind>": "10", "--all": true, "<name>": null}
|
||||
|
||||
$ prog 10
|
||||
{"<kind>": null, "--all": false, "<name>": "10"}
|
||||
|
||||
$ prog
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""usage: prog [<name> <name>]"""
|
||||
$ prog 10 20
|
||||
{"<name>": ["10", "20"]}
|
||||
|
||||
$ prog 10
|
||||
{"<name>": ["10"]}
|
||||
|
||||
$ prog
|
||||
{"<name>": []}
|
||||
|
||||
|
||||
r"""usage: prog [(<name> <name>)]"""
|
||||
$ prog 10 20
|
||||
{"<name>": ["10", "20"]}
|
||||
|
||||
$ prog 10
|
||||
"user-error"
|
||||
|
||||
$ prog
|
||||
{"<name>": []}
|
||||
|
||||
|
||||
r"""usage: prog NAME..."""
|
||||
$ prog 10 20
|
||||
{"NAME": ["10", "20"]}
|
||||
|
||||
$ prog 10
|
||||
{"NAME": ["10"]}
|
||||
|
||||
$ prog
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""usage: prog [NAME]..."""
|
||||
$ prog 10 20
|
||||
{"NAME": ["10", "20"]}
|
||||
|
||||
$ prog 10
|
||||
{"NAME": ["10"]}
|
||||
|
||||
$ prog
|
||||
{"NAME": []}
|
||||
|
||||
|
||||
r"""usage: prog [NAME...]"""
|
||||
$ prog 10 20
|
||||
{"NAME": ["10", "20"]}
|
||||
|
||||
$ prog 10
|
||||
{"NAME": ["10"]}
|
||||
|
||||
$ prog
|
||||
{"NAME": []}
|
||||
|
||||
|
||||
r"""usage: prog [NAME [NAME ...]]"""
|
||||
$ prog 10 20
|
||||
{"NAME": ["10", "20"]}
|
||||
|
||||
$ prog 10
|
||||
{"NAME": ["10"]}
|
||||
|
||||
$ prog
|
||||
{"NAME": []}
|
||||
|
||||
|
||||
r"""usage: prog (NAME | --foo NAME)
|
||||
|
||||
options: --foo
|
||||
|
||||
"""
|
||||
$ prog 10
|
||||
{"NAME": "10", "--foo": false}
|
||||
|
||||
$ prog --foo 10
|
||||
{"NAME": "10", "--foo": true}
|
||||
|
||||
$ prog --foo=10
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""usage: prog (NAME | --foo) [--bar | NAME]
|
||||
|
||||
options: --foo
|
||||
options: --bar
|
||||
|
||||
"""
|
||||
$ prog 10
|
||||
{"NAME": ["10"], "--foo": false, "--bar": false}
|
||||
|
||||
$ prog 10 20
|
||||
{"NAME": ["10", "20"], "--foo": false, "--bar": false}
|
||||
|
||||
$ prog --foo --bar
|
||||
{"NAME": [], "--foo": true, "--bar": true}
|
||||
|
||||
|
||||
r"""Naval Fate.
|
||||
|
||||
Usage:
|
||||
prog ship new <name>...
|
||||
prog ship [<name>] move <x> <y> [--speed=<kn>]
|
||||
prog ship shoot <x> <y>
|
||||
prog mine (set|remove) <x> <y> [--moored|--drifting]
|
||||
prog -h | --help
|
||||
prog --version
|
||||
|
||||
Options:
|
||||
-h --help Show this screen.
|
||||
--version Show version.
|
||||
--speed=<kn> Speed in knots [default: 10].
|
||||
--moored Mored (anchored) mine.
|
||||
--drifting Drifting mine.
|
||||
|
||||
"""
|
||||
$ prog ship Guardian move 150 300 --speed=20
|
||||
{"--drifting": false,
|
||||
"--help": false,
|
||||
"--moored": false,
|
||||
"--speed": "20",
|
||||
"--version": false,
|
||||
"<name>": ["Guardian"],
|
||||
"<x>": "150",
|
||||
"<y>": "300",
|
||||
"mine": false,
|
||||
"move": true,
|
||||
"new": false,
|
||||
"remove": false,
|
||||
"set": false,
|
||||
"ship": true,
|
||||
"shoot": false}
|
||||
|
||||
|
||||
r"""usage: prog --hello"""
|
||||
$ prog --hello
|
||||
{"--hello": true}
|
||||
|
||||
|
||||
r"""usage: prog [--hello=<world>]"""
|
||||
$ prog
|
||||
{"--hello": null}
|
||||
|
||||
$ prog --hello wrld
|
||||
{"--hello": "wrld"}
|
||||
|
||||
|
||||
r"""usage: prog [-o]"""
|
||||
$ prog
|
||||
{"-o": false}
|
||||
|
||||
$ prog -o
|
||||
{"-o": true}
|
||||
|
||||
|
||||
r"""usage: prog [-opr]"""
|
||||
$ prog -op
|
||||
{"-o": true, "-p": true, "-r": false}
|
||||
|
||||
|
||||
r"""usage: prog --aabb | --aa"""
|
||||
$ prog --aa
|
||||
{"--aabb": false, "--aa": true}
|
||||
|
||||
$ prog --a
|
||||
"user-error" # not a unique prefix
|
||||
|
||||
#
|
||||
# Counting number of flags
|
||||
#
|
||||
|
||||
r"""Usage: prog -v"""
|
||||
$ prog -v
|
||||
{"-v": true}
|
||||
|
||||
|
||||
r"""Usage: prog [-v -v]"""
|
||||
$ prog
|
||||
{"-v": 0}
|
||||
|
||||
$ prog -v
|
||||
{"-v": 1}
|
||||
|
||||
$ prog -vv
|
||||
{"-v": 2}
|
||||
|
||||
|
||||
r"""Usage: prog -v ..."""
|
||||
$ prog
|
||||
"user-error"
|
||||
|
||||
$ prog -v
|
||||
{"-v": 1}
|
||||
|
||||
$ prog -vv
|
||||
{"-v": 2}
|
||||
|
||||
$ prog -vvvvvv
|
||||
{"-v": 6}
|
||||
|
||||
|
||||
r"""Usage: prog [-v | -vv | -vvv]
|
||||
|
||||
This one is probably most readable user-friednly variant.
|
||||
|
||||
"""
|
||||
$ prog
|
||||
{"-v": 0}
|
||||
|
||||
$ prog -v
|
||||
{"-v": 1}
|
||||
|
||||
$ prog -vv
|
||||
{"-v": 2}
|
||||
|
||||
$ prog -vvvv
|
||||
"user-error"
|
||||
|
||||
|
||||
r"""usage: prog [--ver --ver]"""
|
||||
$ prog --ver --ver
|
||||
{"--ver": 2}
|
||||
|
||||
|
||||
#
|
||||
# Counting commands
|
||||
#
|
||||
|
||||
r"""usage: prog [go]"""
|
||||
$ prog go
|
||||
{"go": true}
|
||||
|
||||
|
||||
r"""usage: prog [go go]"""
|
||||
$ prog
|
||||
{"go": 0}
|
||||
|
||||
$ prog go
|
||||
{"go": 1}
|
||||
|
||||
$ prog go go
|
||||
{"go": 2}
|
||||
|
||||
$ prog go go go
|
||||
"user-error"
|
||||
|
||||
r"""usage: prog go..."""
|
||||
$ prog go go go go go
|
||||
{"go": 5}
|
||||
|
||||
#
|
||||
# [options] does not include options from usage-pattern
|
||||
#
|
||||
r"""usage: prog [options] [-a]
|
||||
|
||||
options: -a
|
||||
-b
|
||||
"""
|
||||
$ prog -a
|
||||
{"-a": true, "-b": false}
|
||||
|
||||
$ prog -aa
|
||||
"user-error"
|
||||
|
||||
#
|
||||
# Test [options] shourtcut
|
||||
#
|
||||
|
||||
r"""Usage: prog [options] A
|
||||
Options:
|
||||
-q Be quiet
|
||||
-v Be verbose.
|
||||
|
||||
"""
|
||||
$ prog arg
|
||||
{"A": "arg", "-v": false, "-q": false}
|
||||
|
||||
$ prog -v arg
|
||||
{"A": "arg", "-v": true, "-q": false}
|
||||
|
||||
$ prog -q arg
|
||||
{"A": "arg", "-v": false, "-q": true}
|
||||
|
||||
#
|
||||
# Test single dash
|
||||
#
|
||||
|
||||
r"""usage: prog [-]"""
|
||||
|
||||
$ prog -
|
||||
{"-": true}
|
||||
|
||||
$ prog
|
||||
{"-": false}
|
||||
|
||||
#
|
||||
# If argument is repeated, its value should always be a list
|
||||
#
|
||||
|
||||
r"""usage: prog [NAME [NAME ...]]"""
|
||||
|
||||
$ prog a b
|
||||
{"NAME": ["a", "b"]}
|
||||
|
||||
$ prog
|
||||
{"NAME": []}
|
||||
|
||||
#
|
||||
# Option's argument defaults to null/None
|
||||
#
|
||||
|
||||
r"""usage: prog [options]
|
||||
options:
|
||||
-a Add
|
||||
-m <msg> Message
|
||||
|
||||
"""
|
||||
$ prog -a
|
||||
{"-m": null, "-a": true}
|
||||
|
||||
#
|
||||
# Test options without description
|
||||
#
|
||||
|
||||
r"""usage: prog --hello"""
|
||||
$ prog --hello
|
||||
{"--hello": true}
|
||||
|
||||
r"""usage: prog [--hello=<world>]"""
|
||||
$ prog
|
||||
{"--hello": null}
|
||||
|
||||
$ prog --hello wrld
|
||||
{"--hello": "wrld"}
|
||||
|
||||
r"""usage: prog [-o]"""
|
||||
$ prog
|
||||
{"-o": false}
|
||||
|
||||
$ prog -o
|
||||
{"-o": true}
|
||||
|
||||
r"""usage: prog [-opr]"""
|
||||
$ prog -op
|
||||
{"-o": true, "-p": true, "-r": false}
|
||||
|
||||
r"""usage: git [-v | --verbose]"""
|
||||
$ prog -v
|
||||
{"-v": true, "--verbose": false}
|
||||
|
||||
r"""usage: git remote [-v | --verbose]"""
|
||||
$ prog remote -v
|
||||
{"remote": true, "-v": true, "--verbose": false}
|
||||
|
||||
#
|
||||
# Test empty usage pattern
|
||||
#
|
||||
|
||||
r"""usage: prog"""
|
||||
$ prog
|
||||
{}
|
||||
|
||||
r"""usage: prog
|
||||
prog <a> <b>
|
||||
"""
|
||||
$ prog 1 2
|
||||
{"<a>": "1", "<b>": "2"}
|
||||
|
||||
$ prog
|
||||
{"<a>": null, "<b>": null}
|
||||
|
||||
r"""usage: prog <a> <b>
|
||||
prog
|
||||
"""
|
||||
$ prog
|
||||
{"<a>": null, "<b>": null}
|
||||
|
||||
#
|
||||
# Option's argument should not capture default value from usage pattern
|
||||
#
|
||||
|
||||
r"""usage: prog [--file=<f>]"""
|
||||
$ prog
|
||||
{"--file": null}
|
||||
|
||||
r"""usage: prog [--file=<f>]
|
||||
|
||||
options: --file <a>
|
||||
|
||||
"""
|
||||
$ prog
|
||||
{"--file": null}
|
||||
|
||||
r"""Usage: prog [-a <host:port>]
|
||||
|
||||
Options: -a, --address <host:port> TCP address [default: localhost:6283].
|
||||
|
||||
"""
|
||||
$ prog
|
||||
{"--address": "localhost:6283"}
|
||||
|
||||
#
|
||||
# If option with argument could be repeated,
|
||||
# its arguments should be accumulated into a list
|
||||
#
|
||||
|
||||
r"""usage: prog --long=<arg> ..."""
|
||||
|
||||
$ prog --long one
|
||||
{"--long": ["one"]}
|
||||
|
||||
$ prog --long one --long two
|
||||
{"--long": ["one", "two"]}
|
||||
|
||||
#
|
||||
# Test multiple elements repeated at once
|
||||
#
|
||||
|
||||
r"""usage: prog (go <direction> --speed=<km/h>)..."""
|
||||
$ prog go left --speed=5 go right --speed=9
|
||||
{"go": 2, "<direction>": ["left", "right"], "--speed": ["5", "9"]}
|
||||
|
||||
#
|
||||
# Required options should work with option shortcut
|
||||
#
|
||||
|
||||
r"""usage: prog [options] -a
|
||||
|
||||
options: -a
|
||||
|
||||
"""
|
||||
$ prog -a
|
||||
{"-a": true}
|
||||
|
||||
#
|
||||
# If option could be repeated its defaults should be split into a list
|
||||
#
|
||||
|
||||
r"""usage: prog [-o <o>]...
|
||||
|
||||
options: -o <o> [default: x]
|
||||
|
||||
"""
|
||||
$ prog -o this -o that
|
||||
{"-o": ["this", "that"]}
|
||||
|
||||
$ prog
|
||||
{"-o": ["x"]}
|
||||
|
||||
r"""usage: prog [-o <o>]...
|
||||
|
||||
options: -o <o> [default: x y]
|
||||
|
||||
"""
|
||||
$ prog -o this
|
||||
{"-o": ["this"]}
|
||||
|
||||
$ prog
|
||||
{"-o": ["x", "y"]}
|
||||
|
||||
#
|
||||
# Test stacked option's argument
|
||||
#
|
||||
|
||||
r"""usage: prog -pPATH
|
||||
|
||||
options: -p PATH
|
||||
|
||||
"""
|
||||
$ prog -pHOME
|
||||
{"-p": "HOME"}
|
||||
|
||||
#
|
||||
# Issue 56: Repeated mutually exclusive args give nested lists sometimes
|
||||
#
|
||||
|
||||
r"""Usage: foo (--xx=x|--yy=y)..."""
|
||||
$ prog --xx=1 --yy=2
|
||||
{"--xx": ["1"], "--yy": ["2"]}
|
||||
|
||||
#
|
||||
# POSIXly correct tokenization
|
||||
#
|
||||
|
||||
r"""usage: prog [<input file>]"""
|
||||
$ prog f.txt
|
||||
{"<input file>": "f.txt"}
|
||||
|
||||
r"""usage: prog [--input=<file name>]..."""
|
||||
$ prog --input a.txt --input=b.txt
|
||||
{"--input": ["a.txt", "b.txt"]}
|
||||
|
||||
#
|
||||
# Issue 85: `[options]` shourtcut with multiple subcommands
|
||||
#
|
||||
|
||||
r"""usage: prog good [options]
|
||||
prog fail [options]
|
||||
|
||||
options: --loglevel=N
|
||||
|
||||
"""
|
||||
$ prog fail --loglevel 5
|
||||
{"--loglevel": "5", "fail": true, "good": false}
|
||||
|
||||
#
|
||||
# Usage-section syntax
|
||||
#
|
||||
|
||||
r"""usage:prog --foo"""
|
||||
$ prog --foo
|
||||
{"--foo": true}
|
||||
|
||||
r"""PROGRAM USAGE: prog --foo"""
|
||||
$ prog --foo
|
||||
{"--foo": true}
|
||||
|
||||
r"""Usage: prog --foo
|
||||
prog --bar
|
||||
NOT PART OF SECTION"""
|
||||
$ prog --foo
|
||||
{"--foo": true, "--bar": false}
|
||||
|
||||
r"""Usage:
|
||||
prog --foo
|
||||
prog --bar
|
||||
|
||||
NOT PART OF SECTION"""
|
||||
$ prog --foo
|
||||
{"--foo": true, "--bar": false}
|
||||
|
||||
r"""Usage:
|
||||
prog --foo
|
||||
prog --bar
|
||||
NOT PART OF SECTION"""
|
||||
$ prog --foo
|
||||
{"--foo": true, "--bar": false}
|
||||
|
||||
#
|
||||
# Options-section syntax
|
||||
#
|
||||
|
||||
r"""Usage: prog [options]
|
||||
|
||||
global options: --foo
|
||||
local options: --baz
|
||||
--bar
|
||||
other options:
|
||||
--egg
|
||||
--spam
|
||||
-not-an-option-
|
||||
|
||||
"""
|
||||
$ prog --baz --egg
|
||||
{"--foo": false, "--baz": true, "--bar": false, "--egg": true, "--spam": false}
|
126
vendor/github.com/docopt/docopt-go/token.go
generated
vendored
Normal file
126
vendor/github.com/docopt/docopt-go/token.go
generated
vendored
Normal file
@ -0,0 +1,126 @@
|
||||
package docopt
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type tokenList struct {
|
||||
tokens []string
|
||||
errorFunc func(string, ...interface{}) error
|
||||
err errorType
|
||||
}
|
||||
type token string
|
||||
|
||||
func newTokenList(source []string, err errorType) *tokenList {
|
||||
errorFunc := newError
|
||||
if err == errorUser {
|
||||
errorFunc = newUserError
|
||||
} else if err == errorLanguage {
|
||||
errorFunc = newLanguageError
|
||||
}
|
||||
return &tokenList{source, errorFunc, err}
|
||||
}
|
||||
|
||||
func tokenListFromString(source string) *tokenList {
|
||||
return newTokenList(strings.Fields(source), errorUser)
|
||||
}
|
||||
|
||||
func tokenListFromPattern(source string) *tokenList {
|
||||
p := regexp.MustCompile(`([\[\]\(\)\|]|\.\.\.)`)
|
||||
source = p.ReplaceAllString(source, ` $1 `)
|
||||
p = regexp.MustCompile(`\s+|(\S*<.*?>)`)
|
||||
split := p.Split(source, -1)
|
||||
match := p.FindAllStringSubmatch(source, -1)
|
||||
var result []string
|
||||
l := len(split)
|
||||
for i := 0; i < l; i++ {
|
||||
if len(split[i]) > 0 {
|
||||
result = append(result, split[i])
|
||||
}
|
||||
if i < l-1 && len(match[i][1]) > 0 {
|
||||
result = append(result, match[i][1])
|
||||
}
|
||||
}
|
||||
return newTokenList(result, errorLanguage)
|
||||
}
|
||||
|
||||
func (t *token) eq(s string) bool {
|
||||
if t == nil {
|
||||
return false
|
||||
}
|
||||
return string(*t) == s
|
||||
}
|
||||
func (t *token) match(matchNil bool, tokenStrings ...string) bool {
|
||||
if t == nil && matchNil {
|
||||
return true
|
||||
} else if t == nil && !matchNil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, tok := range tokenStrings {
|
||||
if tok == string(*t) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (t *token) hasPrefix(prefix string) bool {
|
||||
if t == nil {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(string(*t), prefix)
|
||||
}
|
||||
func (t *token) hasSuffix(suffix string) bool {
|
||||
if t == nil {
|
||||
return false
|
||||
}
|
||||
return strings.HasSuffix(string(*t), suffix)
|
||||
}
|
||||
func (t *token) isUpper() bool {
|
||||
if t == nil {
|
||||
return false
|
||||
}
|
||||
return isStringUppercase(string(*t))
|
||||
}
|
||||
func (t *token) String() string {
|
||||
if t == nil {
|
||||
return ""
|
||||
}
|
||||
return string(*t)
|
||||
}
|
||||
|
||||
func (tl *tokenList) current() *token {
|
||||
if len(tl.tokens) > 0 {
|
||||
return (*token)(&(tl.tokens[0]))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tl *tokenList) length() int {
|
||||
return len(tl.tokens)
|
||||
}
|
||||
|
||||
func (tl *tokenList) move() *token {
|
||||
if len(tl.tokens) > 0 {
|
||||
t := tl.tokens[0]
|
||||
tl.tokens = tl.tokens[1:]
|
||||
return (*token)(&t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if all cased characters in the string are uppercase
|
||||
// and there are there is at least one cased charcter
|
||||
func isStringUppercase(s string) bool {
|
||||
if strings.ToUpper(s) != s {
|
||||
return false
|
||||
}
|
||||
for _, c := range []rune(s) {
|
||||
if unicode.IsUpper(c) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
38
vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml
generated
vendored
Normal file
38
vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
language: go
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.2.x
|
||||
env: GOOS=linux GOARCH=amd64
|
||||
- go: 1.2.x
|
||||
env: GOOS=linux GOARCH=386
|
||||
- go: 1.2.x
|
||||
env: GOOS=windows GOARCH=amd64
|
||||
- go: 1.2.x
|
||||
env: GOOS=windows GOARCH=386
|
||||
- go: 1.3.x
|
||||
- go: 1.4.x
|
||||
- go: 1.5.x
|
||||
- go: 1.6.x
|
||||
- go: 1.7.x
|
||||
- go: 1.8.x
|
||||
- go: 1.9.x
|
||||
- go: 1.10.x
|
||||
- go: 1.11.x
|
||||
- go: 1.12.x
|
||||
- go: 1.13.x
|
||||
env: GOOS=linux GOARCH=amd64
|
||||
- go: 1.13.x
|
||||
env: GOOS=linux GOARCH=386
|
||||
- go: 1.13.x
|
||||
env: GOOS=windows GOARCH=amd64
|
||||
- go: 1.13.x
|
||||
env: GOOS=windows GOARCH=386
|
||||
- go: tip
|
||||
go_import_path: gopkg.in/asn-ber.v1
|
||||
install:
|
||||
- go list -f '{{range .Imports}}{{.}} {{end}}' ./... | xargs go get -v
|
||||
- go list -f '{{range .TestImports}}{{.}} {{end}}' ./... | xargs go get -v
|
||||
- go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover
|
||||
- go build -v ./...
|
||||
script:
|
||||
- go test -v -cover ./... || go test -v ./...
|
22
vendor/github.com/go-asn1-ber/asn1-ber/LICENSE
generated
vendored
Normal file
22
vendor/github.com/go-asn1-ber/asn1-ber/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
|
||||
Portions copyright (c) 2015-2016 go-asn1-ber Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
24
vendor/github.com/go-asn1-ber/asn1-ber/README.md
generated
vendored
Normal file
24
vendor/github.com/go-asn1-ber/asn1-ber/README.md
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
[![GoDoc](https://godoc.org/gopkg.in/asn1-ber.v1?status.svg)](https://godoc.org/gopkg.in/asn1-ber.v1) [![Build Status](https://travis-ci.org/go-asn1-ber/asn1-ber.svg)](https://travis-ci.org/go-asn1-ber/asn1-ber)
|
||||
|
||||
|
||||
ASN1 BER Encoding / Decoding Library for the GO programming language.
|
||||
---------------------------------------------------------------------
|
||||
|
||||
Required libraries:
|
||||
None
|
||||
|
||||
Working:
|
||||
Very basic encoding / decoding needed for LDAP protocol
|
||||
|
||||
Tests Implemented:
|
||||
A few
|
||||
|
||||
TODO:
|
||||
Fix all encoding / decoding to conform to ASN1 BER spec
|
||||
Implement Tests / Benchmarks
|
||||
|
||||
---
|
||||
|
||||
The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)
|
||||
The design is licensed under the Creative Commons 3.0 Attributions license.
|
||||
Read this article for more details: http://blog.golang.org/gopher
|
512
vendor/github.com/go-asn1-ber/asn1-ber/ber.go
generated
vendored
Normal file
512
vendor/github.com/go-asn1-ber/asn1-ber/ber.go
generated
vendored
Normal file
@ -0,0 +1,512 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// MaxPacketLengthBytes specifies the maximum allowed packet size when calling ReadPacket or DecodePacket. Set to 0 for
|
||||
// no limit.
|
||||
var MaxPacketLengthBytes int64 = math.MaxInt32
|
||||
|
||||
type Packet struct {
|
||||
Identifier
|
||||
Value interface{}
|
||||
ByteValue []byte
|
||||
Data *bytes.Buffer
|
||||
Children []*Packet
|
||||
Description string
|
||||
}
|
||||
|
||||
type Identifier struct {
|
||||
ClassType Class
|
||||
TagType Type
|
||||
Tag Tag
|
||||
}
|
||||
|
||||
type Tag uint64
|
||||
|
||||
const (
|
||||
TagEOC Tag = 0x00
|
||||
TagBoolean Tag = 0x01
|
||||
TagInteger Tag = 0x02
|
||||
TagBitString Tag = 0x03
|
||||
TagOctetString Tag = 0x04
|
||||
TagNULL Tag = 0x05
|
||||
TagObjectIdentifier Tag = 0x06
|
||||
TagObjectDescriptor Tag = 0x07
|
||||
TagExternal Tag = 0x08
|
||||
TagRealFloat Tag = 0x09
|
||||
TagEnumerated Tag = 0x0a
|
||||
TagEmbeddedPDV Tag = 0x0b
|
||||
TagUTF8String Tag = 0x0c
|
||||
TagRelativeOID Tag = 0x0d
|
||||
TagSequence Tag = 0x10
|
||||
TagSet Tag = 0x11
|
||||
TagNumericString Tag = 0x12
|
||||
TagPrintableString Tag = 0x13
|
||||
TagT61String Tag = 0x14
|
||||
TagVideotexString Tag = 0x15
|
||||
TagIA5String Tag = 0x16
|
||||
TagUTCTime Tag = 0x17
|
||||
TagGeneralizedTime Tag = 0x18
|
||||
TagGraphicString Tag = 0x19
|
||||
TagVisibleString Tag = 0x1a
|
||||
TagGeneralString Tag = 0x1b
|
||||
TagUniversalString Tag = 0x1c
|
||||
TagCharacterString Tag = 0x1d
|
||||
TagBMPString Tag = 0x1e
|
||||
TagBitmask Tag = 0x1f // xxx11111b
|
||||
|
||||
// HighTag indicates the start of a high-tag byte sequence
|
||||
HighTag Tag = 0x1f // xxx11111b
|
||||
// HighTagContinueBitmask indicates the high-tag byte sequence should continue
|
||||
HighTagContinueBitmask Tag = 0x80 // 10000000b
|
||||
// HighTagValueBitmask obtains the tag value from a high-tag byte sequence byte
|
||||
HighTagValueBitmask Tag = 0x7f // 01111111b
|
||||
)
|
||||
|
||||
const (
|
||||
// LengthLongFormBitmask is the mask to apply to the length byte to see if a long-form byte sequence is used
|
||||
LengthLongFormBitmask = 0x80
|
||||
// LengthValueBitmask is the mask to apply to the length byte to get the number of bytes in the long-form byte sequence
|
||||
LengthValueBitmask = 0x7f
|
||||
|
||||
// LengthIndefinite is returned from readLength to indicate an indefinite length
|
||||
LengthIndefinite = -1
|
||||
)
|
||||
|
||||
var tagMap = map[Tag]string{
|
||||
TagEOC: "EOC (End-of-Content)",
|
||||
TagBoolean: "Boolean",
|
||||
TagInteger: "Integer",
|
||||
TagBitString: "Bit String",
|
||||
TagOctetString: "Octet String",
|
||||
TagNULL: "NULL",
|
||||
TagObjectIdentifier: "Object Identifier",
|
||||
TagObjectDescriptor: "Object Descriptor",
|
||||
TagExternal: "External",
|
||||
TagRealFloat: "Real (float)",
|
||||
TagEnumerated: "Enumerated",
|
||||
TagEmbeddedPDV: "Embedded PDV",
|
||||
TagUTF8String: "UTF8 String",
|
||||
TagRelativeOID: "Relative-OID",
|
||||
TagSequence: "Sequence and Sequence of",
|
||||
TagSet: "Set and Set OF",
|
||||
TagNumericString: "Numeric String",
|
||||
TagPrintableString: "Printable String",
|
||||
TagT61String: "T61 String",
|
||||
TagVideotexString: "Videotex String",
|
||||
TagIA5String: "IA5 String",
|
||||
TagUTCTime: "UTC Time",
|
||||
TagGeneralizedTime: "Generalized Time",
|
||||
TagGraphicString: "Graphic String",
|
||||
TagVisibleString: "Visible String",
|
||||
TagGeneralString: "General String",
|
||||
TagUniversalString: "Universal String",
|
||||
TagCharacterString: "Character String",
|
||||
TagBMPString: "BMP String",
|
||||
}
|
||||
|
||||
type Class uint8
|
||||
|
||||
const (
|
||||
ClassUniversal Class = 0 // 00xxxxxxb
|
||||
ClassApplication Class = 64 // 01xxxxxxb
|
||||
ClassContext Class = 128 // 10xxxxxxb
|
||||
ClassPrivate Class = 192 // 11xxxxxxb
|
||||
ClassBitmask Class = 192 // 11xxxxxxb
|
||||
)
|
||||
|
||||
var ClassMap = map[Class]string{
|
||||
ClassUniversal: "Universal",
|
||||
ClassApplication: "Application",
|
||||
ClassContext: "Context",
|
||||
ClassPrivate: "Private",
|
||||
}
|
||||
|
||||
type Type uint8
|
||||
|
||||
const (
|
||||
TypePrimitive Type = 0 // xx0xxxxxb
|
||||
TypeConstructed Type = 32 // xx1xxxxxb
|
||||
TypeBitmask Type = 32 // xx1xxxxxb
|
||||
)
|
||||
|
||||
var TypeMap = map[Type]string{
|
||||
TypePrimitive: "Primitive",
|
||||
TypeConstructed: "Constructed",
|
||||
}
|
||||
|
||||
var Debug bool = false
|
||||
|
||||
func PrintBytes(out io.Writer, buf []byte, indent string) {
|
||||
data_lines := make([]string, (len(buf)/30)+1)
|
||||
num_lines := make([]string, (len(buf)/30)+1)
|
||||
|
||||
for i, b := range buf {
|
||||
data_lines[i/30] += fmt.Sprintf("%02x ", b)
|
||||
num_lines[i/30] += fmt.Sprintf("%02d ", (i+1)%100)
|
||||
}
|
||||
|
||||
for i := 0; i < len(data_lines); i++ {
|
||||
out.Write([]byte(indent + data_lines[i] + "\n"))
|
||||
out.Write([]byte(indent + num_lines[i] + "\n\n"))
|
||||
}
|
||||
}
|
||||
|
||||
func PrintPacket(p *Packet) {
|
||||
printPacket(os.Stdout, p, 0, false)
|
||||
}
|
||||
|
||||
func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) {
|
||||
indent_str := ""
|
||||
|
||||
for len(indent_str) != indent {
|
||||
indent_str += " "
|
||||
}
|
||||
|
||||
class_str := ClassMap[p.ClassType]
|
||||
|
||||
tagtype_str := TypeMap[p.TagType]
|
||||
|
||||
tag_str := fmt.Sprintf("0x%02X", p.Tag)
|
||||
|
||||
if p.ClassType == ClassUniversal {
|
||||
tag_str = tagMap[p.Tag]
|
||||
}
|
||||
|
||||
value := fmt.Sprint(p.Value)
|
||||
description := ""
|
||||
|
||||
if p.Description != "" {
|
||||
description = p.Description + ": "
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indent_str, description, class_str, tagtype_str, tag_str, p.Data.Len(), value)
|
||||
|
||||
if printBytes {
|
||||
PrintBytes(out, p.Bytes(), indent_str)
|
||||
}
|
||||
|
||||
for _, child := range p.Children {
|
||||
printPacket(out, child, indent+1, printBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadPacket reads a single Packet from the reader
|
||||
func ReadPacket(reader io.Reader) (*Packet, error) {
|
||||
p, _, err := readPacket(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func DecodeString(data []byte) string {
|
||||
return string(data)
|
||||
}
|
||||
|
||||
func ParseInt64(bytes []byte) (ret int64, err error) {
|
||||
if len(bytes) > 8 {
|
||||
// We'll overflow an int64 in this case.
|
||||
err = fmt.Errorf("integer too large")
|
||||
return
|
||||
}
|
||||
for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
|
||||
ret <<= 8
|
||||
ret |= int64(bytes[bytesRead])
|
||||
}
|
||||
|
||||
// Shift up and down in order to sign extend the result.
|
||||
ret <<= 64 - uint8(len(bytes))*8
|
||||
ret >>= 64 - uint8(len(bytes))*8
|
||||
return
|
||||
}
|
||||
|
||||
func encodeInteger(i int64) []byte {
|
||||
n := int64Length(i)
|
||||
out := make([]byte, n)
|
||||
|
||||
var j int
|
||||
for ; n > 0; n-- {
|
||||
out[j] = (byte(i >> uint((n-1)*8)))
|
||||
j++
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func int64Length(i int64) (numBytes int) {
|
||||
numBytes = 1
|
||||
|
||||
for i > 127 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
for i < -128 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DecodePacket decodes the given bytes into a single Packet
|
||||
// If a decode error is encountered, nil is returned.
|
||||
func DecodePacket(data []byte) *Packet {
|
||||
p, _, _ := readPacket(bytes.NewBuffer(data))
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// DecodePacketErr decodes the given bytes into a single Packet
|
||||
// If a decode error is encountered, nil is returned
|
||||
func DecodePacketErr(data []byte) (*Packet, error) {
|
||||
p, _, err := readPacket(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// readPacket reads a single Packet from the reader, returning the number of bytes read
|
||||
func readPacket(reader io.Reader) (*Packet, int, error) {
|
||||
identifier, length, read, err := readHeader(reader)
|
||||
if err != nil {
|
||||
return nil, read, err
|
||||
}
|
||||
|
||||
p := &Packet{
|
||||
Identifier: identifier,
|
||||
}
|
||||
|
||||
p.Data = new(bytes.Buffer)
|
||||
p.Children = make([]*Packet, 0, 2)
|
||||
p.Value = nil
|
||||
|
||||
if p.TagType == TypeConstructed {
|
||||
// TODO: if universal, ensure tag type is allowed to be constructed
|
||||
|
||||
// Track how much content we've read
|
||||
contentRead := 0
|
||||
for {
|
||||
if length != LengthIndefinite {
|
||||
// End if we've read what we've been told to
|
||||
if contentRead == length {
|
||||
break
|
||||
}
|
||||
// Detect if a packet boundary didn't fall on the expected length
|
||||
if contentRead > length {
|
||||
return nil, read, fmt.Errorf("expected to read %d bytes, read %d", length, contentRead)
|
||||
}
|
||||
}
|
||||
|
||||
// Read the next packet
|
||||
child, r, err := readPacket(reader)
|
||||
if err != nil {
|
||||
return nil, read, err
|
||||
}
|
||||
contentRead += r
|
||||
read += r
|
||||
|
||||
// Test is this is the EOC marker for our packet
|
||||
if isEOCPacket(child) {
|
||||
if length == LengthIndefinite {
|
||||
break
|
||||
}
|
||||
return nil, read, errors.New("eoc child not allowed with definite length")
|
||||
}
|
||||
|
||||
// Append and continue
|
||||
p.AppendChild(child)
|
||||
}
|
||||
return p, read, nil
|
||||
}
|
||||
|
||||
if length == LengthIndefinite {
|
||||
return nil, read, errors.New("indefinite length used with primitive type")
|
||||
}
|
||||
|
||||
// Read definite-length content
|
||||
if MaxPacketLengthBytes > 0 && int64(length) > MaxPacketLengthBytes {
|
||||
return nil, read, fmt.Errorf("length %d greater than maximum %d", length, MaxPacketLengthBytes)
|
||||
}
|
||||
content := make([]byte, length, length)
|
||||
if length > 0 {
|
||||
_, err := io.ReadFull(reader, content)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, read, io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil, read, err
|
||||
}
|
||||
read += length
|
||||
}
|
||||
|
||||
if p.ClassType == ClassUniversal {
|
||||
p.Data.Write(content)
|
||||
p.ByteValue = content
|
||||
|
||||
switch p.Tag {
|
||||
case TagEOC:
|
||||
case TagBoolean:
|
||||
val, _ := ParseInt64(content)
|
||||
|
||||
p.Value = val != 0
|
||||
case TagInteger:
|
||||
p.Value, _ = ParseInt64(content)
|
||||
case TagBitString:
|
||||
case TagOctetString:
|
||||
// the actual string encoding is not known here
|
||||
// (e.g. for LDAP content is already an UTF8-encoded
|
||||
// string). Return the data without further processing
|
||||
p.Value = DecodeString(content)
|
||||
case TagNULL:
|
||||
case TagObjectIdentifier:
|
||||
case TagObjectDescriptor:
|
||||
case TagExternal:
|
||||
case TagRealFloat:
|
||||
case TagEnumerated:
|
||||
p.Value, _ = ParseInt64(content)
|
||||
case TagEmbeddedPDV:
|
||||
case TagUTF8String:
|
||||
p.Value = DecodeString(content)
|
||||
case TagRelativeOID:
|
||||
case TagSequence:
|
||||
case TagSet:
|
||||
case TagNumericString:
|
||||
case TagPrintableString:
|
||||
p.Value = DecodeString(content)
|
||||
case TagT61String:
|
||||
case TagVideotexString:
|
||||
case TagIA5String:
|
||||
case TagUTCTime:
|
||||
case TagGeneralizedTime:
|
||||
case TagGraphicString:
|
||||
case TagVisibleString:
|
||||
case TagGeneralString:
|
||||
case TagUniversalString:
|
||||
case TagCharacterString:
|
||||
case TagBMPString:
|
||||
}
|
||||
} else {
|
||||
p.Data.Write(content)
|
||||
}
|
||||
|
||||
return p, read, nil
|
||||
}
|
||||
|
||||
func (p *Packet) Bytes() []byte {
|
||||
var out bytes.Buffer
|
||||
|
||||
out.Write(encodeIdentifier(p.Identifier))
|
||||
out.Write(encodeLength(p.Data.Len()))
|
||||
out.Write(p.Data.Bytes())
|
||||
|
||||
return out.Bytes()
|
||||
}
|
||||
|
||||
func (p *Packet) AppendChild(child *Packet) {
|
||||
p.Data.Write(child.Bytes())
|
||||
p.Children = append(p.Children, child)
|
||||
}
|
||||
|
||||
func Encode(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
|
||||
p := new(Packet)
|
||||
|
||||
p.ClassType = ClassType
|
||||
p.TagType = TagType
|
||||
p.Tag = Tag
|
||||
p.Data = new(bytes.Buffer)
|
||||
|
||||
p.Children = make([]*Packet, 0, 2)
|
||||
|
||||
p.Value = Value
|
||||
p.Description = Description
|
||||
|
||||
if Value != nil {
|
||||
v := reflect.ValueOf(Value)
|
||||
|
||||
if ClassType == ClassUniversal {
|
||||
switch Tag {
|
||||
case TagOctetString:
|
||||
sv, ok := v.Interface().(string)
|
||||
|
||||
if ok {
|
||||
p.Data.Write([]byte(sv))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func NewSequence(Description string) *Packet {
|
||||
return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, Description)
|
||||
}
|
||||
|
||||
func NewBoolean(ClassType Class, TagType Type, Tag Tag, Value bool, Description string) *Packet {
|
||||
intValue := int64(0)
|
||||
|
||||
if Value {
|
||||
intValue = 1
|
||||
}
|
||||
|
||||
p := Encode(ClassType, TagType, Tag, nil, Description)
|
||||
|
||||
p.Value = Value
|
||||
p.Data.Write(encodeInteger(intValue))
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
|
||||
p := Encode(ClassType, TagType, Tag, nil, Description)
|
||||
|
||||
p.Value = Value
|
||||
switch v := Value.(type) {
|
||||
case int:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case uint:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case int64:
|
||||
p.Data.Write(encodeInteger(v))
|
||||
case uint64:
|
||||
// TODO : check range or add encodeUInt...
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case int32:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case uint32:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case int16:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case uint16:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case int8:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case uint8:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
default:
|
||||
// TODO : add support for big.Int ?
|
||||
panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v))
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func NewString(ClassType Class, TagType Type, Tag Tag, Value, Description string) *Packet {
|
||||
p := Encode(ClassType, TagType, Tag, nil, Description)
|
||||
|
||||
p.Value = Value
|
||||
p.Data.Write([]byte(Value))
|
||||
|
||||
return p
|
||||
}
|
25
vendor/github.com/go-asn1-ber/asn1-ber/content_int.go
generated
vendored
Normal file
25
vendor/github.com/go-asn1-ber/asn1-ber/content_int.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package ber
|
||||
|
||||
func encodeUnsignedInteger(i uint64) []byte {
|
||||
n := uint64Length(i)
|
||||
out := make([]byte, n)
|
||||
|
||||
var j int
|
||||
for ; n > 0; n-- {
|
||||
out[j] = (byte(i >> uint((n-1)*8)))
|
||||
j++
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func uint64Length(i uint64) (numBytes int) {
|
||||
numBytes = 1
|
||||
|
||||
for i > 255 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
return
|
||||
}
|
3
vendor/github.com/go-asn1-ber/asn1-ber/go.mod
generated
vendored
Normal file
3
vendor/github.com/go-asn1-ber/asn1-ber/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
module github.com/go-asn1-ber/asn1-ber
|
||||
|
||||
go 1.13
|
35
vendor/github.com/go-asn1-ber/asn1-ber/header.go
generated
vendored
Normal file
35
vendor/github.com/go-asn1-ber/asn1-ber/header.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) {
|
||||
if i, c, err := readIdentifier(reader); err != nil {
|
||||
return Identifier{}, 0, read, err
|
||||
} else {
|
||||
identifier = i
|
||||
read += c
|
||||
}
|
||||
|
||||
if l, c, err := readLength(reader); err != nil {
|
||||
return Identifier{}, 0, read, err
|
||||
} else {
|
||||
length = l
|
||||
read += c
|
||||
}
|
||||
|
||||
// Validate length type with identifier (x.600, 8.1.3.2.a)
|
||||
if length == LengthIndefinite && identifier.TagType == TypePrimitive {
|
||||
return Identifier{}, 0, read, errors.New("indefinite length used with primitive type")
|
||||
}
|
||||
|
||||
if length < LengthIndefinite {
|
||||
err = fmt.Errorf("length cannot be less than %d", LengthIndefinite)
|
||||
return
|
||||
}
|
||||
|
||||
return identifier, length, read, nil
|
||||
}
|
112
vendor/github.com/go-asn1-ber/asn1-ber/identifier.go
generated
vendored
Normal file
112
vendor/github.com/go-asn1-ber/asn1-ber/identifier.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func readIdentifier(reader io.Reader) (Identifier, int, error) {
|
||||
identifier := Identifier{}
|
||||
read := 0
|
||||
|
||||
// identifier byte
|
||||
b, err := readByte(reader)
|
||||
if err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("error reading identifier byte: %v\n", err)
|
||||
}
|
||||
return Identifier{}, read, err
|
||||
}
|
||||
read++
|
||||
|
||||
identifier.ClassType = Class(b) & ClassBitmask
|
||||
identifier.TagType = Type(b) & TypeBitmask
|
||||
|
||||
if tag := Tag(b) & TagBitmask; tag != HighTag {
|
||||
// short-form tag
|
||||
identifier.Tag = tag
|
||||
return identifier, read, nil
|
||||
}
|
||||
|
||||
// high-tag-number tag
|
||||
tagBytes := 0
|
||||
for {
|
||||
b, err := readByte(reader)
|
||||
if err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err)
|
||||
}
|
||||
return Identifier{}, read, err
|
||||
}
|
||||
tagBytes++
|
||||
read++
|
||||
|
||||
// Lowest 7 bits get appended to the tag value (x.690, 8.1.2.4.2.b)
|
||||
identifier.Tag <<= 7
|
||||
identifier.Tag |= Tag(b) & HighTagValueBitmask
|
||||
|
||||
// First byte may not be all zeros (x.690, 8.1.2.4.2.c)
|
||||
if tagBytes == 1 && identifier.Tag == 0 {
|
||||
return Identifier{}, read, errors.New("invalid first high-tag-number tag byte")
|
||||
}
|
||||
// Overflow of int64
|
||||
// TODO: support big int tags?
|
||||
if tagBytes > 9 {
|
||||
return Identifier{}, read, errors.New("high-tag-number tag overflow")
|
||||
}
|
||||
|
||||
// Top bit of 0 means this is the last byte in the high-tag-number tag (x.690, 8.1.2.4.2.a)
|
||||
if Tag(b)&HighTagContinueBitmask == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return identifier, read, nil
|
||||
}
|
||||
|
||||
func encodeIdentifier(identifier Identifier) []byte {
|
||||
b := []byte{0x0}
|
||||
b[0] |= byte(identifier.ClassType)
|
||||
b[0] |= byte(identifier.TagType)
|
||||
|
||||
if identifier.Tag < HighTag {
|
||||
// Short-form
|
||||
b[0] |= byte(identifier.Tag)
|
||||
} else {
|
||||
// high-tag-number
|
||||
b[0] |= byte(HighTag)
|
||||
|
||||
tag := identifier.Tag
|
||||
|
||||
b = append(b, encodeHighTag(tag)...)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func encodeHighTag(tag Tag) []byte {
|
||||
// set cap=4 to hopefully avoid additional allocations
|
||||
b := make([]byte, 0, 4)
|
||||
for tag != 0 {
|
||||
// t := last 7 bits of tag (HighTagValueBitmask = 0x7F)
|
||||
t := tag & HighTagValueBitmask
|
||||
|
||||
// right shift tag 7 to remove what was just pulled off
|
||||
tag >>= 7
|
||||
|
||||
// if b already has entries this entry needs a continuation bit (0x80)
|
||||
if len(b) != 0 {
|
||||
t |= HighTagContinueBitmask
|
||||
}
|
||||
|
||||
b = append(b, byte(t))
|
||||
}
|
||||
// reverse
|
||||
// since bits were pulled off 'tag' small to high the byte slice is in reverse order.
|
||||
// example: tag = 0xFF results in {0x7F, 0x01 + 0x80 (continuation bit)}
|
||||
// this needs to be reversed into 0x81 0x7F
|
||||
for i, j := 0, len(b)-1; i < len(b)/2; i++ {
|
||||
b[i], b[j-i] = b[j-i], b[i]
|
||||
}
|
||||
return b
|
||||
}
|
81
vendor/github.com/go-asn1-ber/asn1-ber/length.go
generated
vendored
Normal file
81
vendor/github.com/go-asn1-ber/asn1-ber/length.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func readLength(reader io.Reader) (length int, read int, err error) {
|
||||
// length byte
|
||||
b, err := readByte(reader)
|
||||
if err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("error reading length byte: %v\n", err)
|
||||
}
|
||||
return 0, 0, err
|
||||
}
|
||||
read++
|
||||
|
||||
switch {
|
||||
case b == 0xFF:
|
||||
// Invalid 0xFF (x.600, 8.1.3.5.c)
|
||||
return 0, read, errors.New("invalid length byte 0xff")
|
||||
|
||||
case b == LengthLongFormBitmask:
|
||||
// Indefinite form, we have to decode packets until we encounter an EOC packet (x.600, 8.1.3.6)
|
||||
length = LengthIndefinite
|
||||
|
||||
case b&LengthLongFormBitmask == 0:
|
||||
// Short definite form, extract the length from the bottom 7 bits (x.600, 8.1.3.4)
|
||||
length = int(b) & LengthValueBitmask
|
||||
|
||||
case b&LengthLongFormBitmask != 0:
|
||||
// Long definite form, extract the number of length bytes to follow from the bottom 7 bits (x.600, 8.1.3.5.b)
|
||||
lengthBytes := int(b) & LengthValueBitmask
|
||||
// Protect against overflow
|
||||
// TODO: support big int length?
|
||||
if lengthBytes > 8 {
|
||||
return 0, read, errors.New("long-form length overflow")
|
||||
}
|
||||
|
||||
// Accumulate into a 64-bit variable
|
||||
var length64 int64
|
||||
for i := 0; i < lengthBytes; i++ {
|
||||
b, err = readByte(reader)
|
||||
if err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("error reading long-form length byte %d: %v\n", i, err)
|
||||
}
|
||||
return 0, read, err
|
||||
}
|
||||
read++
|
||||
|
||||
// x.600, 8.1.3.5
|
||||
length64 <<= 8
|
||||
length64 |= int64(b)
|
||||
}
|
||||
|
||||
// Cast to a platform-specific integer
|
||||
length = int(length64)
|
||||
// Ensure we didn't overflow
|
||||
if int64(length) != length64 {
|
||||
return 0, read, errors.New("long-form length overflow")
|
||||
}
|
||||
|
||||
default:
|
||||
return 0, read, errors.New("invalid length byte")
|
||||
}
|
||||
|
||||
return length, read, nil
|
||||
}
|
||||
|
||||
func encodeLength(length int) []byte {
|
||||
length_bytes := encodeUnsignedInteger(uint64(length))
|
||||
if length > 127 || len(length_bytes) > 1 {
|
||||
longFormBytes := []byte{(LengthLongFormBitmask | byte(len(length_bytes)))}
|
||||
longFormBytes = append(longFormBytes, length_bytes...)
|
||||
length_bytes = longFormBytes
|
||||
}
|
||||
return length_bytes
|
||||
}
|
24
vendor/github.com/go-asn1-ber/asn1-ber/util.go
generated
vendored
Normal file
24
vendor/github.com/go-asn1-ber/asn1-ber/util.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package ber
|
||||
|
||||
import "io"
|
||||
|
||||
func readByte(reader io.Reader) (byte, error) {
|
||||
bytes := make([]byte, 1, 1)
|
||||
_, err := io.ReadFull(reader, bytes)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return bytes[0], nil
|
||||
}
|
||||
|
||||
func isEOCPacket(p *Packet) bool {
|
||||
return p != nil &&
|
||||
p.Tag == TagEOC &&
|
||||
p.ClassType == ClassUniversal &&
|
||||
p.TagType == TypePrimitive &&
|
||||
len(p.ByteValue) == 0 &&
|
||||
len(p.Children) == 0
|
||||
}
|
22
vendor/github.com/go-ldap/ldap/v3/LICENSE
generated
vendored
Normal file
22
vendor/github.com/go-ldap/ldap/v3/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
|
||||
Portions copyright (c) 2015-2016 go-ldap Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
91
vendor/github.com/go-ldap/ldap/v3/add.go
generated
vendored
Normal file
91
vendor/github.com/go-ldap/ldap/v3/add.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// Attribute represents an LDAP attribute
|
||||
type Attribute struct {
|
||||
// Type is the name of the LDAP attribute
|
||||
Type string
|
||||
// Vals are the LDAP attribute values
|
||||
Vals []string
|
||||
}
|
||||
|
||||
func (a *Attribute) encode() *ber.Packet {
|
||||
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute")
|
||||
seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type"))
|
||||
set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
|
||||
for _, value := range a.Vals {
|
||||
set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
|
||||
}
|
||||
seq.AppendChild(set)
|
||||
return seq
|
||||
}
|
||||
|
||||
// AddRequest represents an LDAP AddRequest operation
|
||||
type AddRequest struct {
|
||||
// DN identifies the entry being added
|
||||
DN string
|
||||
// Attributes list the attributes of the new entry
|
||||
Attributes []Attribute
|
||||
// Controls hold optional controls to send with the request
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
func (req *AddRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request")
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
|
||||
attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
|
||||
for _, attribute := range req.Attributes {
|
||||
attributes.AppendChild(attribute.encode())
|
||||
}
|
||||
pkt.AppendChild(attributes)
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attribute adds an attribute with the given type and values
|
||||
func (req *AddRequest) Attribute(attrType string, attrVals []string) {
|
||||
req.Attributes = append(req.Attributes, Attribute{Type: attrType, Vals: attrVals})
|
||||
}
|
||||
|
||||
// NewAddRequest returns an AddRequest for the given DN, with no attributes
|
||||
func NewAddRequest(dn string, controls []Control) *AddRequest {
|
||||
return &AddRequest{
|
||||
DN: dn,
|
||||
Controls: controls,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Add performs the given AddRequest
|
||||
func (l *Conn) Add(addRequest *AddRequest) error {
|
||||
msgCtx, err := l.doRequest(addRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationAddResponse {
|
||||
err := GetLDAPError(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
|
||||
}
|
||||
return nil
|
||||
}
|
152
vendor/github.com/go-ldap/ldap/v3/bind.go
generated
vendored
Normal file
152
vendor/github.com/go-ldap/ldap/v3/bind.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// SimpleBindRequest represents a username/password bind operation
|
||||
type SimpleBindRequest struct {
|
||||
// Username is the name of the Directory object that the client wishes to bind as
|
||||
Username string
|
||||
// Password is the credentials to bind with
|
||||
Password string
|
||||
// Controls are optional controls to send with the bind request
|
||||
Controls []Control
|
||||
// AllowEmptyPassword sets whether the client allows binding with an empty password
|
||||
// (normally used for unauthenticated bind).
|
||||
AllowEmptyPassword bool
|
||||
}
|
||||
|
||||
// SimpleBindResult contains the response from the server
|
||||
type SimpleBindResult struct {
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// NewSimpleBindRequest returns a bind request
|
||||
func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest {
|
||||
return &SimpleBindRequest{
|
||||
Username: username,
|
||||
Password: password,
|
||||
Controls: controls,
|
||||
AllowEmptyPassword: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (req *SimpleBindRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
|
||||
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Username, "User Name"))
|
||||
pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.Password, "Password"))
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SimpleBind performs the simple bind operation defined in the given request
|
||||
func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) {
|
||||
if simpleBindRequest.Password == "" && !simpleBindRequest.AllowEmptyPassword {
|
||||
return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client"))
|
||||
}
|
||||
|
||||
msgCtx, err := l.doRequest(simpleBindRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &SimpleBindResult{
|
||||
Controls: make([]Control, 0),
|
||||
}
|
||||
|
||||
if len(packet.Children) == 3 {
|
||||
for _, child := range packet.Children[2].Children {
|
||||
decodedChild, decodeErr := DecodeControl(child)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode child control: %s", decodeErr)
|
||||
}
|
||||
result.Controls = append(result.Controls, decodedChild)
|
||||
}
|
||||
}
|
||||
|
||||
err = GetLDAPError(packet)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Bind performs a bind with the given username and password.
|
||||
//
|
||||
// It does not allow unauthenticated bind (i.e. empty password). Use the UnauthenticatedBind method
|
||||
// for that.
|
||||
func (l *Conn) Bind(username, password string) error {
|
||||
req := &SimpleBindRequest{
|
||||
Username: username,
|
||||
Password: password,
|
||||
AllowEmptyPassword: false,
|
||||
}
|
||||
_, err := l.SimpleBind(req)
|
||||
return err
|
||||
}
|
||||
|
||||
// UnauthenticatedBind performs an unauthenticated bind.
|
||||
//
|
||||
// A username may be provided for trace (e.g. logging) purpose only, but it is normally not
|
||||
// authenticated or otherwise validated by the LDAP server.
|
||||
//
|
||||
// See https://tools.ietf.org/html/rfc4513#section-5.1.2 .
|
||||
// See https://tools.ietf.org/html/rfc4513#section-6.3.1 .
|
||||
func (l *Conn) UnauthenticatedBind(username string) error {
|
||||
req := &SimpleBindRequest{
|
||||
Username: username,
|
||||
Password: "",
|
||||
AllowEmptyPassword: true,
|
||||
}
|
||||
_, err := l.SimpleBind(req)
|
||||
return err
|
||||
}
|
||||
|
||||
var externalBindRequest = requestFunc(func(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
|
||||
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
|
||||
|
||||
saslAuth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication")
|
||||
saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "EXTERNAL", "SASL Mech"))
|
||||
saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "SASL Cred"))
|
||||
|
||||
pkt.AppendChild(saslAuth)
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// ExternalBind performs SASL/EXTERNAL authentication.
|
||||
//
|
||||
// Use ldap.DialURL("ldapi://") to connect to the Unix socket before ExternalBind.
|
||||
//
|
||||
// See https://tools.ietf.org/html/rfc4422#appendix-A
|
||||
func (l *Conn) ExternalBind() error {
|
||||
msgCtx, err := l.doRequest(externalBindRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return GetLDAPError(packet)
|
||||
}
|
30
vendor/github.com/go-ldap/ldap/v3/client.go
generated
vendored
Normal file
30
vendor/github.com/go-ldap/ldap/v3/client.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Client knows how to interact with an LDAP server
|
||||
type Client interface {
|
||||
Start()
|
||||
StartTLS(*tls.Config) error
|
||||
Close()
|
||||
SetTimeout(time.Duration)
|
||||
|
||||
Bind(username, password string) error
|
||||
UnauthenticatedBind(username string) error
|
||||
SimpleBind(*SimpleBindRequest) (*SimpleBindResult, error)
|
||||
ExternalBind() error
|
||||
|
||||
Add(*AddRequest) error
|
||||
Del(*DelRequest) error
|
||||
Modify(*ModifyRequest) error
|
||||
ModifyDN(*ModifyDNRequest) error
|
||||
|
||||
Compare(dn, attribute, value string) (bool, error)
|
||||
PasswordModify(*PasswordModifyRequest) (*PasswordModifyResult, error)
|
||||
|
||||
Search(*SearchRequest) (*SearchResult, error)
|
||||
SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error)
|
||||
}
|
61
vendor/github.com/go-ldap/ldap/v3/compare.go
generated
vendored
Normal file
61
vendor/github.com/go-ldap/ldap/v3/compare.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// CompareRequest represents an LDAP CompareRequest operation.
|
||||
type CompareRequest struct {
|
||||
DN string
|
||||
Attribute string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (req *CompareRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request")
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
|
||||
|
||||
ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion")
|
||||
ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Attribute, "AttributeDesc"))
|
||||
ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Value, "AssertionValue"))
|
||||
|
||||
pkt.AppendChild(ava)
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise
|
||||
// false with any error that occurs if any.
|
||||
func (l *Conn) Compare(dn, attribute, value string) (bool, error) {
|
||||
msgCtx, err := l.doRequest(&CompareRequest{
|
||||
DN: dn,
|
||||
Attribute: attribute,
|
||||
Value: value})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationCompareResponse {
|
||||
err := GetLDAPError(packet)
|
||||
|
||||
switch {
|
||||
case IsErrorWithCode(err, LDAPResultCompareTrue):
|
||||
return true, nil
|
||||
case IsErrorWithCode(err, LDAPResultCompareFalse):
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return false, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag)
|
||||
}
|
565
vendor/github.com/go-ldap/ldap/v3/conn.go
generated
vendored
Normal file
565
vendor/github.com/go-ldap/ldap/v3/conn.go
generated
vendored
Normal file
@ -0,0 +1,565 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
const (
|
||||
// MessageQuit causes the processMessages loop to exit
|
||||
MessageQuit = 0
|
||||
// MessageRequest sends a request to the server
|
||||
MessageRequest = 1
|
||||
// MessageResponse receives a response from the server
|
||||
MessageResponse = 2
|
||||
// MessageFinish indicates the client considers a particular message ID to be finished
|
||||
MessageFinish = 3
|
||||
// MessageTimeout indicates the client-specified timeout for a particular message ID has been reached
|
||||
MessageTimeout = 4
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultLdapPort default ldap port for pure TCP connection
|
||||
DefaultLdapPort = "389"
|
||||
// DefaultLdapsPort default ldap port for SSL connection
|
||||
DefaultLdapsPort = "636"
|
||||
)
|
||||
|
||||
// PacketResponse contains the packet or error encountered reading a response
|
||||
type PacketResponse struct {
|
||||
// Packet is the packet read from the server
|
||||
Packet *ber.Packet
|
||||
// Error is an error encountered while reading
|
||||
Error error
|
||||
}
|
||||
|
||||
// ReadPacket returns the packet or an error
|
||||
func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) {
|
||||
if (pr == nil) || (pr.Packet == nil && pr.Error == nil) {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response"))
|
||||
}
|
||||
return pr.Packet, pr.Error
|
||||
}
|
||||
|
||||
type messageContext struct {
|
||||
id int64
|
||||
// close(done) should only be called from finishMessage()
|
||||
done chan struct{}
|
||||
// close(responses) should only be called from processMessages(), and only sent to from sendResponse()
|
||||
responses chan *PacketResponse
|
||||
}
|
||||
|
||||
// sendResponse should only be called within the processMessages() loop which
|
||||
// is also responsible for closing the responses channel.
|
||||
func (msgCtx *messageContext) sendResponse(packet *PacketResponse) {
|
||||
select {
|
||||
case msgCtx.responses <- packet:
|
||||
// Successfully sent packet to message handler.
|
||||
case <-msgCtx.done:
|
||||
// The request handler is done and will not receive more
|
||||
// packets.
|
||||
}
|
||||
}
|
||||
|
||||
type messagePacket struct {
|
||||
Op int
|
||||
MessageID int64
|
||||
Packet *ber.Packet
|
||||
Context *messageContext
|
||||
}
|
||||
|
||||
type sendMessageFlags uint
|
||||
|
||||
const (
|
||||
startTLS sendMessageFlags = 1 << iota
|
||||
)
|
||||
|
||||
// Conn represents an LDAP Connection
|
||||
type Conn struct {
|
||||
// requestTimeout is loaded atomically
|
||||
// so we need to ensure 64-bit alignment on 32-bit platforms.
|
||||
requestTimeout int64
|
||||
conn net.Conn
|
||||
isTLS bool
|
||||
closing uint32
|
||||
closeErr atomic.Value
|
||||
isStartingTLS bool
|
||||
Debug debugging
|
||||
chanConfirm chan struct{}
|
||||
messageContexts map[int64]*messageContext
|
||||
chanMessage chan *messagePacket
|
||||
chanMessageID chan int64
|
||||
wgClose sync.WaitGroup
|
||||
outstandingRequests uint
|
||||
messageMutex sync.Mutex
|
||||
}
|
||||
|
||||
var _ Client = &Conn{}
|
||||
|
||||
// DefaultTimeout is a package-level variable that sets the timeout value
|
||||
// used for the Dial and DialTLS methods.
|
||||
//
|
||||
// WARNING: since this is a package-level variable, setting this value from
|
||||
// multiple places will probably result in undesired behaviour.
|
||||
var DefaultTimeout = 60 * time.Second
|
||||
|
||||
// DialOpt configures DialContext.
|
||||
type DialOpt func(*DialContext)
|
||||
|
||||
// DialWithDialer updates net.Dialer in DialContext.
|
||||
func DialWithDialer(d *net.Dialer) DialOpt {
|
||||
return func(dc *DialContext) {
|
||||
dc.d = d
|
||||
}
|
||||
}
|
||||
|
||||
// DialWithTLSConfig updates tls.Config in DialContext.
|
||||
func DialWithTLSConfig(tc *tls.Config) DialOpt {
|
||||
return func(dc *DialContext) {
|
||||
dc.tc = tc
|
||||
}
|
||||
}
|
||||
|
||||
// DialContext contains necessary parameters to dial the given ldap URL.
|
||||
type DialContext struct {
|
||||
d *net.Dialer
|
||||
tc *tls.Config
|
||||
}
|
||||
|
||||
func (dc *DialContext) dial(u *url.URL) (net.Conn, error) {
|
||||
if u.Scheme == "ldapi" {
|
||||
if u.Path == "" || u.Path == "/" {
|
||||
u.Path = "/var/run/slapd/ldapi"
|
||||
}
|
||||
return dc.d.Dial("unix", u.Path)
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
// we asume that error is due to missing port
|
||||
host = u.Host
|
||||
port = ""
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "ldap":
|
||||
if port == "" {
|
||||
port = DefaultLdapPort
|
||||
}
|
||||
return dc.d.Dial("tcp", net.JoinHostPort(host, port))
|
||||
case "ldaps":
|
||||
if port == "" {
|
||||
port = DefaultLdapsPort
|
||||
}
|
||||
return tls.DialWithDialer(dc.d, "tcp", net.JoinHostPort(host, port), dc.tc)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Unknown scheme '%s'", u.Scheme)
|
||||
}
|
||||
|
||||
// Dial connects to the given address on the given network using net.Dial
|
||||
// and then returns a new Conn for the connection.
|
||||
// @deprecated Use DialURL instead.
|
||||
func Dial(network, addr string) (*Conn, error) {
|
||||
c, err := net.DialTimeout(network, addr, DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorNetwork, err)
|
||||
}
|
||||
conn := NewConn(c, false)
|
||||
conn.Start()
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// DialTLS connects to the given address on the given network using tls.Dial
|
||||
// and then returns a new Conn for the connection.
|
||||
// @deprecated Use DialURL instead.
|
||||
func DialTLS(network, addr string, config *tls.Config) (*Conn, error) {
|
||||
c, err := tls.DialWithDialer(&net.Dialer{Timeout: DefaultTimeout}, network, addr, config)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorNetwork, err)
|
||||
}
|
||||
conn := NewConn(c, true)
|
||||
conn.Start()
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// DialURL connects to the given ldap URL.
|
||||
// The following schemas are supported: ldap://, ldaps://, ldapi://.
|
||||
// On success a new Conn for the connection is returned.
|
||||
func DialURL(addr string, opts ...DialOpt) (*Conn, error) {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorNetwork, err)
|
||||
}
|
||||
|
||||
var dc DialContext
|
||||
for _, opt := range opts {
|
||||
opt(&dc)
|
||||
}
|
||||
if dc.d == nil {
|
||||
dc.d = &net.Dialer{Timeout: DefaultTimeout}
|
||||
}
|
||||
|
||||
c, err := dc.dial(u)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorNetwork, err)
|
||||
}
|
||||
|
||||
conn := NewConn(c, u.Scheme == "ldaps")
|
||||
conn.Start()
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// NewConn returns a new Conn using conn for network I/O.
|
||||
func NewConn(conn net.Conn, isTLS bool) *Conn {
|
||||
return &Conn{
|
||||
conn: conn,
|
||||
chanConfirm: make(chan struct{}),
|
||||
chanMessageID: make(chan int64),
|
||||
chanMessage: make(chan *messagePacket, 10),
|
||||
messageContexts: map[int64]*messageContext{},
|
||||
requestTimeout: 0,
|
||||
isTLS: isTLS,
|
||||
}
|
||||
}
|
||||
|
||||
// Start initializes goroutines to read responses and process messages
|
||||
func (l *Conn) Start() {
|
||||
l.wgClose.Add(1)
|
||||
go l.reader()
|
||||
go l.processMessages()
|
||||
}
|
||||
|
||||
// IsClosing returns whether or not we're currently closing.
|
||||
func (l *Conn) IsClosing() bool {
|
||||
return atomic.LoadUint32(&l.closing) == 1
|
||||
}
|
||||
|
||||
// setClosing sets the closing value to true
|
||||
func (l *Conn) setClosing() bool {
|
||||
return atomic.CompareAndSwapUint32(&l.closing, 0, 1)
|
||||
}
|
||||
|
||||
// Close closes the connection.
|
||||
func (l *Conn) Close() {
|
||||
l.messageMutex.Lock()
|
||||
defer l.messageMutex.Unlock()
|
||||
|
||||
if l.setClosing() {
|
||||
l.Debug.Printf("Sending quit message and waiting for confirmation")
|
||||
l.chanMessage <- &messagePacket{Op: MessageQuit}
|
||||
<-l.chanConfirm
|
||||
close(l.chanMessage)
|
||||
|
||||
l.Debug.Printf("Closing network connection")
|
||||
if err := l.conn.Close(); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
l.wgClose.Done()
|
||||
}
|
||||
l.wgClose.Wait()
|
||||
}
|
||||
|
||||
// SetTimeout sets the time after a request is sent that a MessageTimeout triggers
|
||||
func (l *Conn) SetTimeout(timeout time.Duration) {
|
||||
if timeout > 0 {
|
||||
atomic.StoreInt64(&l.requestTimeout, int64(timeout))
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the next available messageID
|
||||
func (l *Conn) nextMessageID() int64 {
|
||||
if messageID, ok := <-l.chanMessageID; ok {
|
||||
return messageID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// StartTLS sends the command to start a TLS session and then creates a new TLS Client
|
||||
func (l *Conn) StartTLS(config *tls.Config) error {
|
||||
if l.isTLS {
|
||||
return NewError(ErrorNetwork, errors.New("ldap: already encrypted"))
|
||||
}
|
||||
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS")
|
||||
request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command"))
|
||||
packet.AppendChild(request)
|
||||
l.Debug.PrintPacket(packet)
|
||||
|
||||
msgCtx, err := l.sendMessageWithFlags(packet, startTLS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
l.Close()
|
||||
return err
|
||||
}
|
||||
l.Debug.PrintPacket(packet)
|
||||
}
|
||||
|
||||
if err := GetLDAPError(packet); err == nil {
|
||||
conn := tls.Client(l.conn, config)
|
||||
|
||||
if connErr := conn.Handshake(); connErr != nil {
|
||||
l.Close()
|
||||
return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", connErr))
|
||||
}
|
||||
|
||||
l.isTLS = true
|
||||
l.conn = conn
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
go l.reader()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TLSConnectionState returns the client's TLS connection state.
|
||||
// The return values are their zero values if StartTLS did
|
||||
// not succeed.
|
||||
func (l *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) {
|
||||
tc, ok := l.conn.(*tls.Conn)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
return tc.ConnectionState(), true
|
||||
}
|
||||
|
||||
func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) {
|
||||
return l.sendMessageWithFlags(packet, 0)
|
||||
}
|
||||
|
||||
func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) {
|
||||
if l.IsClosing() {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
|
||||
}
|
||||
l.messageMutex.Lock()
|
||||
l.Debug.Printf("flags&startTLS = %d", flags&startTLS)
|
||||
if l.isStartingTLS {
|
||||
l.messageMutex.Unlock()
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase"))
|
||||
}
|
||||
if flags&startTLS != 0 {
|
||||
if l.outstandingRequests != 0 {
|
||||
l.messageMutex.Unlock()
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests"))
|
||||
}
|
||||
l.isStartingTLS = true
|
||||
}
|
||||
l.outstandingRequests++
|
||||
|
||||
l.messageMutex.Unlock()
|
||||
|
||||
responses := make(chan *PacketResponse)
|
||||
messageID := packet.Children[0].Value.(int64)
|
||||
message := &messagePacket{
|
||||
Op: MessageRequest,
|
||||
MessageID: messageID,
|
||||
Packet: packet,
|
||||
Context: &messageContext{
|
||||
id: messageID,
|
||||
done: make(chan struct{}),
|
||||
responses: responses,
|
||||
},
|
||||
}
|
||||
l.sendProcessMessage(message)
|
||||
return message.Context, nil
|
||||
}
|
||||
|
||||
func (l *Conn) finishMessage(msgCtx *messageContext) {
|
||||
close(msgCtx.done)
|
||||
|
||||
if l.IsClosing() {
|
||||
return
|
||||
}
|
||||
|
||||
l.messageMutex.Lock()
|
||||
l.outstandingRequests--
|
||||
if l.isStartingTLS {
|
||||
l.isStartingTLS = false
|
||||
}
|
||||
l.messageMutex.Unlock()
|
||||
|
||||
message := &messagePacket{
|
||||
Op: MessageFinish,
|
||||
MessageID: msgCtx.id,
|
||||
}
|
||||
l.sendProcessMessage(message)
|
||||
}
|
||||
|
||||
func (l *Conn) sendProcessMessage(message *messagePacket) bool {
|
||||
l.messageMutex.Lock()
|
||||
defer l.messageMutex.Unlock()
|
||||
if l.IsClosing() {
|
||||
return false
|
||||
}
|
||||
l.chanMessage <- message
|
||||
return true
|
||||
}
|
||||
|
||||
func (l *Conn) processMessages() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Printf("ldap: recovered panic in processMessages: %v", err)
|
||||
}
|
||||
for messageID, msgCtx := range l.messageContexts {
|
||||
// If we are closing due to an error, inform anyone who
|
||||
// is waiting about the error.
|
||||
if l.IsClosing() && l.closeErr.Load() != nil {
|
||||
msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)})
|
||||
}
|
||||
l.Debug.Printf("Closing channel for MessageID %d", messageID)
|
||||
close(msgCtx.responses)
|
||||
delete(l.messageContexts, messageID)
|
||||
}
|
||||
close(l.chanMessageID)
|
||||
close(l.chanConfirm)
|
||||
}()
|
||||
|
||||
var messageID int64 = 1
|
||||
for {
|
||||
select {
|
||||
case l.chanMessageID <- messageID:
|
||||
messageID++
|
||||
case message := <-l.chanMessage:
|
||||
switch message.Op {
|
||||
case MessageQuit:
|
||||
l.Debug.Printf("Shutting down - quit message received")
|
||||
return
|
||||
case MessageRequest:
|
||||
// Add to message list and write to network
|
||||
l.Debug.Printf("Sending message %d", message.MessageID)
|
||||
|
||||
buf := message.Packet.Bytes()
|
||||
_, err := l.conn.Write(buf)
|
||||
if err != nil {
|
||||
l.Debug.Printf("Error Sending Message: %s", err.Error())
|
||||
message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)})
|
||||
close(message.Context.responses)
|
||||
break
|
||||
}
|
||||
|
||||
// Only add to messageContexts if we were able to
|
||||
// successfully write the message.
|
||||
l.messageContexts[message.MessageID] = message.Context
|
||||
|
||||
// Add timeout if defined
|
||||
requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout))
|
||||
if requestTimeout > 0 {
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Printf("ldap: recovered panic in RequestTimeout: %v", err)
|
||||
}
|
||||
}()
|
||||
time.Sleep(requestTimeout)
|
||||
timeoutMessage := &messagePacket{
|
||||
Op: MessageTimeout,
|
||||
MessageID: message.MessageID,
|
||||
}
|
||||
l.sendProcessMessage(timeoutMessage)
|
||||
}()
|
||||
}
|
||||
case MessageResponse:
|
||||
l.Debug.Printf("Receiving message %d", message.MessageID)
|
||||
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
|
||||
msgCtx.sendResponse(&PacketResponse{message.Packet, nil})
|
||||
} else {
|
||||
log.Printf("Received unexpected message %d, %v", message.MessageID, l.IsClosing())
|
||||
l.Debug.PrintPacket(message.Packet)
|
||||
}
|
||||
case MessageTimeout:
|
||||
// Handle the timeout by closing the channel
|
||||
// All reads will return immediately
|
||||
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
|
||||
l.Debug.Printf("Receiving message timeout for %d", message.MessageID)
|
||||
msgCtx.sendResponse(&PacketResponse{message.Packet, errors.New("ldap: connection timed out")})
|
||||
delete(l.messageContexts, message.MessageID)
|
||||
close(msgCtx.responses)
|
||||
}
|
||||
case MessageFinish:
|
||||
l.Debug.Printf("Finished message %d", message.MessageID)
|
||||
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
|
||||
delete(l.messageContexts, message.MessageID)
|
||||
close(msgCtx.responses)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Conn) reader() {
|
||||
cleanstop := false
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Printf("ldap: recovered panic in reader: %v", err)
|
||||
}
|
||||
if !cleanstop {
|
||||
l.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
if cleanstop {
|
||||
l.Debug.Printf("reader clean stopping (without closing the connection)")
|
||||
return
|
||||
}
|
||||
packet, err := ber.ReadPacket(l.conn)
|
||||
if err != nil {
|
||||
// A read error is expected here if we are closing the connection...
|
||||
if !l.IsClosing() {
|
||||
l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err))
|
||||
l.Debug.Printf("reader error: %s", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
l.Debug.Printf("descriptions error: %s", err)
|
||||
}
|
||||
if len(packet.Children) == 0 {
|
||||
l.Debug.Printf("Received bad ldap packet")
|
||||
continue
|
||||
}
|
||||
l.messageMutex.Lock()
|
||||
if l.isStartingTLS {
|
||||
cleanstop = true
|
||||
}
|
||||
l.messageMutex.Unlock()
|
||||
message := &messagePacket{
|
||||
Op: MessageResponse,
|
||||
MessageID: packet.Children[0].Value.(int64),
|
||||
Packet: packet,
|
||||
}
|
||||
if !l.sendProcessMessage(message) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
499
vendor/github.com/go-ldap/ldap/v3/control.go
generated
vendored
Normal file
499
vendor/github.com/go-ldap/ldap/v3/control.go
generated
vendored
Normal file
@ -0,0 +1,499 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
const (
|
||||
// ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt
|
||||
ControlTypePaging = "1.2.840.113556.1.4.319"
|
||||
// ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
|
||||
ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1"
|
||||
// ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
|
||||
ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4"
|
||||
// ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
|
||||
ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5"
|
||||
// ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296
|
||||
ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2"
|
||||
|
||||
// ControlTypeMicrosoftNotification - https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx
|
||||
ControlTypeMicrosoftNotification = "1.2.840.113556.1.4.528"
|
||||
// ControlTypeMicrosoftShowDeleted - https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx
|
||||
ControlTypeMicrosoftShowDeleted = "1.2.840.113556.1.4.417"
|
||||
)
|
||||
|
||||
// ControlTypeMap maps controls to text descriptions
|
||||
var ControlTypeMap = map[string]string{
|
||||
ControlTypePaging: "Paging",
|
||||
ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft",
|
||||
ControlTypeManageDsaIT: "Manage DSA IT",
|
||||
ControlTypeMicrosoftNotification: "Change Notification - Microsoft",
|
||||
ControlTypeMicrosoftShowDeleted: "Show Deleted Objects - Microsoft",
|
||||
}
|
||||
|
||||
// Control defines an interface controls provide to encode and describe themselves
|
||||
type Control interface {
|
||||
// GetControlType returns the OID
|
||||
GetControlType() string
|
||||
// Encode returns the ber packet representation
|
||||
Encode() *ber.Packet
|
||||
// String returns a human-readable description
|
||||
String() string
|
||||
}
|
||||
|
||||
// ControlString implements the Control interface for simple controls
|
||||
type ControlString struct {
|
||||
ControlType string
|
||||
Criticality bool
|
||||
ControlValue string
|
||||
}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlString) GetControlType() string {
|
||||
return c.ControlType
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlString) Encode() *ber.Packet {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")"))
|
||||
if c.Criticality {
|
||||
packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
|
||||
}
|
||||
if c.ControlValue != "" {
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value"))
|
||||
}
|
||||
return packet
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlString) String() string {
|
||||
return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue)
|
||||
}
|
||||
|
||||
// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt
|
||||
type ControlPaging struct {
|
||||
// PagingSize indicates the page size
|
||||
PagingSize uint32
|
||||
// Cookie is an opaque value returned by the server to track a paging cursor
|
||||
Cookie []byte
|
||||
}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlPaging) GetControlType() string {
|
||||
return ControlTypePaging
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlPaging) Encode() *ber.Packet {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")"))
|
||||
|
||||
p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)")
|
||||
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value")
|
||||
seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size"))
|
||||
cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie")
|
||||
cookie.Value = c.Cookie
|
||||
cookie.Data.Write(c.Cookie)
|
||||
seq.AppendChild(cookie)
|
||||
p2.AppendChild(seq)
|
||||
|
||||
packet.AppendChild(p2)
|
||||
return packet
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlPaging) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q",
|
||||
ControlTypeMap[ControlTypePaging],
|
||||
ControlTypePaging,
|
||||
false,
|
||||
c.PagingSize,
|
||||
c.Cookie)
|
||||
}
|
||||
|
||||
// SetCookie stores the given cookie in the paging control
|
||||
func (c *ControlPaging) SetCookie(cookie []byte) {
|
||||
c.Cookie = cookie
|
||||
}
|
||||
|
||||
// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
|
||||
type ControlBeheraPasswordPolicy struct {
|
||||
// Expire contains the number of seconds before a password will expire
|
||||
Expire int64
|
||||
// Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password
|
||||
Grace int64
|
||||
// Error indicates the error code
|
||||
Error int8
|
||||
// ErrorString is a human readable error
|
||||
ErrorString string
|
||||
}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlBeheraPasswordPolicy) GetControlType() string {
|
||||
return ControlTypeBeheraPasswordPolicy
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")"))
|
||||
|
||||
return packet
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlBeheraPasswordPolicy) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s",
|
||||
ControlTypeMap[ControlTypeBeheraPasswordPolicy],
|
||||
ControlTypeBeheraPasswordPolicy,
|
||||
false,
|
||||
c.Expire,
|
||||
c.Grace,
|
||||
c.Error,
|
||||
c.ErrorString)
|
||||
}
|
||||
|
||||
// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
|
||||
type ControlVChuPasswordMustChange struct {
|
||||
// MustChange indicates if the password is required to be changed
|
||||
MustChange bool
|
||||
}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlVChuPasswordMustChange) GetControlType() string {
|
||||
return ControlTypeVChuPasswordMustChange
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet {
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlVChuPasswordMustChange) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Control Type: %s (%q) Criticality: %t MustChange: %v",
|
||||
ControlTypeMap[ControlTypeVChuPasswordMustChange],
|
||||
ControlTypeVChuPasswordMustChange,
|
||||
false,
|
||||
c.MustChange)
|
||||
}
|
||||
|
||||
// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
|
||||
type ControlVChuPasswordWarning struct {
|
||||
// Expire indicates the time in seconds until the password expires
|
||||
Expire int64
|
||||
}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlVChuPasswordWarning) GetControlType() string {
|
||||
return ControlTypeVChuPasswordWarning
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlVChuPasswordWarning) Encode() *ber.Packet {
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlVChuPasswordWarning) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Control Type: %s (%q) Criticality: %t Expire: %b",
|
||||
ControlTypeMap[ControlTypeVChuPasswordWarning],
|
||||
ControlTypeVChuPasswordWarning,
|
||||
false,
|
||||
c.Expire)
|
||||
}
|
||||
|
||||
// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296
|
||||
type ControlManageDsaIT struct {
|
||||
// Criticality indicates if this control is required
|
||||
Criticality bool
|
||||
}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlManageDsaIT) GetControlType() string {
|
||||
return ControlTypeManageDsaIT
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlManageDsaIT) Encode() *ber.Packet {
|
||||
//FIXME
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")"))
|
||||
if c.Criticality {
|
||||
packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
|
||||
}
|
||||
return packet
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlManageDsaIT) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Control Type: %s (%q) Criticality: %t",
|
||||
ControlTypeMap[ControlTypeManageDsaIT],
|
||||
ControlTypeManageDsaIT,
|
||||
c.Criticality)
|
||||
}
|
||||
|
||||
// NewControlManageDsaIT returns a ControlManageDsaIT control
|
||||
func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT {
|
||||
return &ControlManageDsaIT{Criticality: Criticality}
|
||||
}
|
||||
|
||||
// ControlMicrosoftNotification implements the control described in https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx
|
||||
type ControlMicrosoftNotification struct{}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlMicrosoftNotification) GetControlType() string {
|
||||
return ControlTypeMicrosoftNotification
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlMicrosoftNotification) Encode() *ber.Packet {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftNotification, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftNotification]+")"))
|
||||
|
||||
return packet
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlMicrosoftNotification) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Control Type: %s (%q)",
|
||||
ControlTypeMap[ControlTypeMicrosoftNotification],
|
||||
ControlTypeMicrosoftNotification)
|
||||
}
|
||||
|
||||
// NewControlMicrosoftNotification returns a ControlMicrosoftNotification control
|
||||
func NewControlMicrosoftNotification() *ControlMicrosoftNotification {
|
||||
return &ControlMicrosoftNotification{}
|
||||
}
|
||||
|
||||
// ControlMicrosoftShowDeleted implements the control described in https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx
|
||||
type ControlMicrosoftShowDeleted struct{}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlMicrosoftShowDeleted) GetControlType() string {
|
||||
return ControlTypeMicrosoftShowDeleted
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlMicrosoftShowDeleted) Encode() *ber.Packet {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftShowDeleted, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftShowDeleted]+")"))
|
||||
|
||||
return packet
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlMicrosoftShowDeleted) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Control Type: %s (%q)",
|
||||
ControlTypeMap[ControlTypeMicrosoftShowDeleted],
|
||||
ControlTypeMicrosoftShowDeleted)
|
||||
}
|
||||
|
||||
// NewControlMicrosoftShowDeleted returns a ControlMicrosoftShowDeleted control
|
||||
func NewControlMicrosoftShowDeleted() *ControlMicrosoftShowDeleted {
|
||||
return &ControlMicrosoftShowDeleted{}
|
||||
}
|
||||
|
||||
// FindControl returns the first control of the given type in the list, or nil
|
||||
func FindControl(controls []Control, controlType string) Control {
|
||||
for _, c := range controls {
|
||||
if c.GetControlType() == controlType {
|
||||
return c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made
|
||||
func DecodeControl(packet *ber.Packet) (Control, error) {
|
||||
var (
|
||||
ControlType = ""
|
||||
Criticality = false
|
||||
value *ber.Packet
|
||||
)
|
||||
|
||||
switch len(packet.Children) {
|
||||
case 0:
|
||||
// at least one child is required for control type
|
||||
return nil, fmt.Errorf("at least one child is required for control type")
|
||||
|
||||
case 1:
|
||||
// just type, no criticality or value
|
||||
packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
|
||||
ControlType = packet.Children[0].Value.(string)
|
||||
|
||||
case 2:
|
||||
packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
|
||||
ControlType = packet.Children[0].Value.(string)
|
||||
|
||||
// Children[1] could be criticality or value (both are optional)
|
||||
// duck-type on whether this is a boolean
|
||||
if _, ok := packet.Children[1].Value.(bool); ok {
|
||||
packet.Children[1].Description = "Criticality"
|
||||
Criticality = packet.Children[1].Value.(bool)
|
||||
} else {
|
||||
packet.Children[1].Description = "Control Value"
|
||||
value = packet.Children[1]
|
||||
}
|
||||
|
||||
case 3:
|
||||
packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
|
||||
ControlType = packet.Children[0].Value.(string)
|
||||
|
||||
packet.Children[1].Description = "Criticality"
|
||||
Criticality = packet.Children[1].Value.(bool)
|
||||
|
||||
packet.Children[2].Description = "Control Value"
|
||||
value = packet.Children[2]
|
||||
|
||||
default:
|
||||
// more than 3 children is invalid
|
||||
return nil, fmt.Errorf("more than 3 children is invalid for controls")
|
||||
}
|
||||
|
||||
switch ControlType {
|
||||
case ControlTypeManageDsaIT:
|
||||
return NewControlManageDsaIT(Criticality), nil
|
||||
case ControlTypePaging:
|
||||
value.Description += " (Paging)"
|
||||
c := new(ControlPaging)
|
||||
if value.Value != nil {
|
||||
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode data bytes: %s", err)
|
||||
}
|
||||
value.Data.Truncate(0)
|
||||
value.Value = nil
|
||||
value.AppendChild(valueChildren)
|
||||
}
|
||||
value = value.Children[0]
|
||||
value.Description = "Search Control Value"
|
||||
value.Children[0].Description = "Paging Size"
|
||||
value.Children[1].Description = "Cookie"
|
||||
c.PagingSize = uint32(value.Children[0].Value.(int64))
|
||||
c.Cookie = value.Children[1].Data.Bytes()
|
||||
value.Children[1].Value = c.Cookie
|
||||
return c, nil
|
||||
case ControlTypeBeheraPasswordPolicy:
|
||||
value.Description += " (Password Policy - Behera)"
|
||||
c := NewControlBeheraPasswordPolicy()
|
||||
if value.Value != nil {
|
||||
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode data bytes: %s", err)
|
||||
}
|
||||
value.Data.Truncate(0)
|
||||
value.Value = nil
|
||||
value.AppendChild(valueChildren)
|
||||
}
|
||||
|
||||
sequence := value.Children[0]
|
||||
|
||||
for _, child := range sequence.Children {
|
||||
if child.Tag == 0 {
|
||||
//Warning
|
||||
warningPacket := child.Children[0]
|
||||
packet, err := ber.DecodePacketErr(warningPacket.Data.Bytes())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode data bytes: %s", err)
|
||||
}
|
||||
val, ok := packet.Value.(int64)
|
||||
if ok {
|
||||
if warningPacket.Tag == 0 {
|
||||
//timeBeforeExpiration
|
||||
c.Expire = val
|
||||
warningPacket.Value = c.Expire
|
||||
} else if warningPacket.Tag == 1 {
|
||||
//graceAuthNsRemaining
|
||||
c.Grace = val
|
||||
warningPacket.Value = c.Grace
|
||||
}
|
||||
}
|
||||
} else if child.Tag == 1 {
|
||||
// Error
|
||||
packet, err := ber.DecodePacketErr(child.Data.Bytes())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode data bytes: %s", err)
|
||||
}
|
||||
val, ok := packet.Value.(int8)
|
||||
if !ok {
|
||||
// what to do?
|
||||
val = -1
|
||||
}
|
||||
c.Error = val
|
||||
child.Value = c.Error
|
||||
c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error]
|
||||
}
|
||||
}
|
||||
return c, nil
|
||||
case ControlTypeVChuPasswordMustChange:
|
||||
c := &ControlVChuPasswordMustChange{MustChange: true}
|
||||
return c, nil
|
||||
case ControlTypeVChuPasswordWarning:
|
||||
c := &ControlVChuPasswordWarning{Expire: -1}
|
||||
expireStr := ber.DecodeString(value.Data.Bytes())
|
||||
|
||||
expire, err := strconv.ParseInt(expireStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse value as int: %s", err)
|
||||
}
|
||||
c.Expire = expire
|
||||
value.Value = c.Expire
|
||||
|
||||
return c, nil
|
||||
case ControlTypeMicrosoftNotification:
|
||||
return NewControlMicrosoftNotification(), nil
|
||||
case ControlTypeMicrosoftShowDeleted:
|
||||
return NewControlMicrosoftShowDeleted(), nil
|
||||
default:
|
||||
c := new(ControlString)
|
||||
c.ControlType = ControlType
|
||||
c.Criticality = Criticality
|
||||
if value != nil {
|
||||
c.ControlValue = value.Value.(string)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewControlString returns a generic control
|
||||
func NewControlString(controlType string, criticality bool, controlValue string) *ControlString {
|
||||
return &ControlString{
|
||||
ControlType: controlType,
|
||||
Criticality: criticality,
|
||||
ControlValue: controlValue,
|
||||
}
|
||||
}
|
||||
|
||||
// NewControlPaging returns a paging control
|
||||
func NewControlPaging(pagingSize uint32) *ControlPaging {
|
||||
return &ControlPaging{PagingSize: pagingSize}
|
||||
}
|
||||
|
||||
// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy
|
||||
func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy {
|
||||
return &ControlBeheraPasswordPolicy{
|
||||
Expire: -1,
|
||||
Grace: -1,
|
||||
Error: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func encodeControls(controls []Control) *ber.Packet {
|
||||
packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls")
|
||||
for _, control := range controls {
|
||||
packet.AppendChild(control.Encode())
|
||||
}
|
||||
return packet
|
||||
}
|
30
vendor/github.com/go-ldap/ldap/v3/debug.go
generated
vendored
Normal file
30
vendor/github.com/go-ldap/ldap/v3/debug.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// debugging type
|
||||
// - has a Printf method to write the debug output
|
||||
type debugging bool
|
||||
|
||||
// Enable controls debugging mode.
|
||||
func (debug *debugging) Enable(b bool) {
|
||||
*debug = debugging(b)
|
||||
}
|
||||
|
||||
// Printf writes debug output.
|
||||
func (debug debugging) Printf(format string, args ...interface{}) {
|
||||
if debug {
|
||||
log.Printf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// PrintPacket dumps a packet.
|
||||
func (debug debugging) PrintPacket(packet *ber.Packet) {
|
||||
if debug {
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
}
|
59
vendor/github.com/go-ldap/ldap/v3/del.go
generated
vendored
Normal file
59
vendor/github.com/go-ldap/ldap/v3/del.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// DelRequest implements an LDAP deletion request
|
||||
type DelRequest struct {
|
||||
// DN is the name of the directory entry to delete
|
||||
DN string
|
||||
// Controls hold optional controls to send with the request
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
func (req *DelRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, req.DN, "Del Request")
|
||||
pkt.Data.Write([]byte(req.DN))
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewDelRequest creates a delete request for the given DN and controls
|
||||
func NewDelRequest(DN string, Controls []Control) *DelRequest {
|
||||
return &DelRequest{
|
||||
DN: DN,
|
||||
Controls: Controls,
|
||||
}
|
||||
}
|
||||
|
||||
// Del executes the given delete request
|
||||
func (l *Conn) Del(delRequest *DelRequest) error {
|
||||
msgCtx, err := l.doRequest(delRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationDelResponse {
|
||||
err := GetLDAPError(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
|
||||
}
|
||||
return nil
|
||||
}
|
207
vendor/github.com/go-ldap/ldap/v3/dn.go
generated
vendored
Normal file
207
vendor/github.com/go-ldap/ldap/v3/dn.go
generated
vendored
Normal file
@ -0,0 +1,207 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
enchex "encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514
|
||||
type AttributeTypeAndValue struct {
|
||||
// Type is the attribute type
|
||||
Type string
|
||||
// Value is the attribute value
|
||||
Value string
|
||||
}
|
||||
|
||||
// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514
|
||||
type RelativeDN struct {
|
||||
Attributes []*AttributeTypeAndValue
|
||||
}
|
||||
|
||||
// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514
|
||||
type DN struct {
|
||||
RDNs []*RelativeDN
|
||||
}
|
||||
|
||||
// ParseDN returns a distinguishedName or an error.
|
||||
// The function respects https://tools.ietf.org/html/rfc4514
|
||||
func ParseDN(str string) (*DN, error) {
|
||||
dn := new(DN)
|
||||
dn.RDNs = make([]*RelativeDN, 0)
|
||||
rdn := new(RelativeDN)
|
||||
rdn.Attributes = make([]*AttributeTypeAndValue, 0)
|
||||
buffer := bytes.Buffer{}
|
||||
attribute := new(AttributeTypeAndValue)
|
||||
escaping := false
|
||||
|
||||
unescapedTrailingSpaces := 0
|
||||
stringFromBuffer := func() string {
|
||||
s := buffer.String()
|
||||
s = s[0 : len(s)-unescapedTrailingSpaces]
|
||||
buffer.Reset()
|
||||
unescapedTrailingSpaces = 0
|
||||
return s
|
||||
}
|
||||
|
||||
for i := 0; i < len(str); i++ {
|
||||
char := str[i]
|
||||
switch {
|
||||
case escaping:
|
||||
unescapedTrailingSpaces = 0
|
||||
escaping = false
|
||||
switch char {
|
||||
case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\':
|
||||
buffer.WriteByte(char)
|
||||
continue
|
||||
}
|
||||
// Not a special character, assume hex encoded octet
|
||||
if len(str) == i+1 {
|
||||
return nil, errors.New("got corrupted escaped character")
|
||||
}
|
||||
|
||||
dst := []byte{0}
|
||||
n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2]))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode escaped character: %s", err)
|
||||
} else if n != 1 {
|
||||
return nil, fmt.Errorf("expected 1 byte when un-escaping, got %d", n)
|
||||
}
|
||||
buffer.WriteByte(dst[0])
|
||||
i++
|
||||
case char == '\\':
|
||||
unescapedTrailingSpaces = 0
|
||||
escaping = true
|
||||
case char == '=':
|
||||
attribute.Type = stringFromBuffer()
|
||||
// Special case: If the first character in the value is # the
|
||||
// following data is BER encoded so we can just fast forward
|
||||
// and decode.
|
||||
if len(str) > i+1 && str[i+1] == '#' {
|
||||
i += 2
|
||||
index := strings.IndexAny(str[i:], ",+")
|
||||
data := str
|
||||
if index > 0 {
|
||||
data = str[i : i+index]
|
||||
} else {
|
||||
data = str[i:]
|
||||
}
|
||||
rawBER, err := enchex.DecodeString(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode BER encoding: %s", err)
|
||||
}
|
||||
packet, err := ber.DecodePacketErr(rawBER)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode BER packet: %s", err)
|
||||
}
|
||||
buffer.WriteString(packet.Data.String())
|
||||
i += len(data) - 1
|
||||
}
|
||||
case char == ',' || char == '+':
|
||||
// We're done with this RDN or value, push it
|
||||
if len(attribute.Type) == 0 {
|
||||
return nil, errors.New("incomplete type, value pair")
|
||||
}
|
||||
attribute.Value = stringFromBuffer()
|
||||
rdn.Attributes = append(rdn.Attributes, attribute)
|
||||
attribute = new(AttributeTypeAndValue)
|
||||
if char == ',' {
|
||||
dn.RDNs = append(dn.RDNs, rdn)
|
||||
rdn = new(RelativeDN)
|
||||
rdn.Attributes = make([]*AttributeTypeAndValue, 0)
|
||||
}
|
||||
case char == ' ' && buffer.Len() == 0:
|
||||
// ignore unescaped leading spaces
|
||||
continue
|
||||
default:
|
||||
if char == ' ' {
|
||||
// Track unescaped spaces in case they are trailing and we need to remove them
|
||||
unescapedTrailingSpaces++
|
||||
} else {
|
||||
// Reset if we see a non-space char
|
||||
unescapedTrailingSpaces = 0
|
||||
}
|
||||
buffer.WriteByte(char)
|
||||
}
|
||||
}
|
||||
if buffer.Len() > 0 {
|
||||
if len(attribute.Type) == 0 {
|
||||
return nil, errors.New("DN ended with incomplete type, value pair")
|
||||
}
|
||||
attribute.Value = stringFromBuffer()
|
||||
rdn.Attributes = append(rdn.Attributes, attribute)
|
||||
dn.RDNs = append(dn.RDNs, rdn)
|
||||
}
|
||||
return dn, nil
|
||||
}
|
||||
|
||||
// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
|
||||
// Returns true if they have the same number of relative distinguished names
|
||||
// and corresponding relative distinguished names (by position) are the same.
|
||||
func (d *DN) Equal(other *DN) bool {
|
||||
if len(d.RDNs) != len(other.RDNs) {
|
||||
return false
|
||||
}
|
||||
for i := range d.RDNs {
|
||||
if !d.RDNs[i].Equal(other.RDNs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN.
|
||||
// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com"
|
||||
// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com"
|
||||
// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com"
|
||||
func (d *DN) AncestorOf(other *DN) bool {
|
||||
if len(d.RDNs) >= len(other.RDNs) {
|
||||
return false
|
||||
}
|
||||
// Take the last `len(d.RDNs)` RDNs from the other DN to compare against
|
||||
otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):]
|
||||
for i := range d.RDNs {
|
||||
if !d.RDNs[i].Equal(otherRDNs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
|
||||
// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues
|
||||
// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type.
|
||||
// The order of attributes is not significant.
|
||||
// Case of attribute types is not significant.
|
||||
func (r *RelativeDN) Equal(other *RelativeDN) bool {
|
||||
if len(r.Attributes) != len(other.Attributes) {
|
||||
return false
|
||||
}
|
||||
return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes)
|
||||
}
|
||||
|
||||
func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool {
|
||||
for _, attr := range attrs {
|
||||
found := false
|
||||
for _, myattr := range r.Attributes {
|
||||
if myattr.Equal(attr) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue
|
||||
// Case of the attribute type is not significant
|
||||
func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool {
|
||||
return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value
|
||||
}
|
4
vendor/github.com/go-ldap/ldap/v3/doc.go
generated
vendored
Normal file
4
vendor/github.com/go-ldap/ldap/v3/doc.go
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
/*
|
||||
Package ldap provides basic LDAP v3 functionality.
|
||||
*/
|
||||
package ldap
|
236
vendor/github.com/go-ldap/ldap/v3/error.go
generated
vendored
Normal file
236
vendor/github.com/go-ldap/ldap/v3/error.go
generated
vendored
Normal file
@ -0,0 +1,236 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// LDAP Result Codes
|
||||
const (
|
||||
LDAPResultSuccess = 0
|
||||
LDAPResultOperationsError = 1
|
||||
LDAPResultProtocolError = 2
|
||||
LDAPResultTimeLimitExceeded = 3
|
||||
LDAPResultSizeLimitExceeded = 4
|
||||
LDAPResultCompareFalse = 5
|
||||
LDAPResultCompareTrue = 6
|
||||
LDAPResultAuthMethodNotSupported = 7
|
||||
LDAPResultStrongAuthRequired = 8
|
||||
LDAPResultReferral = 10
|
||||
LDAPResultAdminLimitExceeded = 11
|
||||
LDAPResultUnavailableCriticalExtension = 12
|
||||
LDAPResultConfidentialityRequired = 13
|
||||
LDAPResultSaslBindInProgress = 14
|
||||
LDAPResultNoSuchAttribute = 16
|
||||
LDAPResultUndefinedAttributeType = 17
|
||||
LDAPResultInappropriateMatching = 18
|
||||
LDAPResultConstraintViolation = 19
|
||||
LDAPResultAttributeOrValueExists = 20
|
||||
LDAPResultInvalidAttributeSyntax = 21
|
||||
LDAPResultNoSuchObject = 32
|
||||
LDAPResultAliasProblem = 33
|
||||
LDAPResultInvalidDNSyntax = 34
|
||||
LDAPResultIsLeaf = 35
|
||||
LDAPResultAliasDereferencingProblem = 36
|
||||
LDAPResultInappropriateAuthentication = 48
|
||||
LDAPResultInvalidCredentials = 49
|
||||
LDAPResultInsufficientAccessRights = 50
|
||||
LDAPResultBusy = 51
|
||||
LDAPResultUnavailable = 52
|
||||
LDAPResultUnwillingToPerform = 53
|
||||
LDAPResultLoopDetect = 54
|
||||
LDAPResultSortControlMissing = 60
|
||||
LDAPResultOffsetRangeError = 61
|
||||
LDAPResultNamingViolation = 64
|
||||
LDAPResultObjectClassViolation = 65
|
||||
LDAPResultNotAllowedOnNonLeaf = 66
|
||||
LDAPResultNotAllowedOnRDN = 67
|
||||
LDAPResultEntryAlreadyExists = 68
|
||||
LDAPResultObjectClassModsProhibited = 69
|
||||
LDAPResultResultsTooLarge = 70
|
||||
LDAPResultAffectsMultipleDSAs = 71
|
||||
LDAPResultVirtualListViewErrorOrControlError = 76
|
||||
LDAPResultOther = 80
|
||||
LDAPResultServerDown = 81
|
||||
LDAPResultLocalError = 82
|
||||
LDAPResultEncodingError = 83
|
||||
LDAPResultDecodingError = 84
|
||||
LDAPResultTimeout = 85
|
||||
LDAPResultAuthUnknown = 86
|
||||
LDAPResultFilterError = 87
|
||||
LDAPResultUserCanceled = 88
|
||||
LDAPResultParamError = 89
|
||||
LDAPResultNoMemory = 90
|
||||
LDAPResultConnectError = 91
|
||||
LDAPResultNotSupported = 92
|
||||
LDAPResultControlNotFound = 93
|
||||
LDAPResultNoResultsReturned = 94
|
||||
LDAPResultMoreResultsToReturn = 95
|
||||
LDAPResultClientLoop = 96
|
||||
LDAPResultReferralLimitExceeded = 97
|
||||
LDAPResultInvalidResponse = 100
|
||||
LDAPResultAmbiguousResponse = 101
|
||||
LDAPResultTLSNotSupported = 112
|
||||
LDAPResultIntermediateResponse = 113
|
||||
LDAPResultUnknownType = 114
|
||||
LDAPResultCanceled = 118
|
||||
LDAPResultNoSuchOperation = 119
|
||||
LDAPResultTooLate = 120
|
||||
LDAPResultCannotCancel = 121
|
||||
LDAPResultAssertionFailed = 122
|
||||
LDAPResultAuthorizationDenied = 123
|
||||
LDAPResultSyncRefreshRequired = 4096
|
||||
|
||||
ErrorNetwork = 200
|
||||
ErrorFilterCompile = 201
|
||||
ErrorFilterDecompile = 202
|
||||
ErrorDebugging = 203
|
||||
ErrorUnexpectedMessage = 204
|
||||
ErrorUnexpectedResponse = 205
|
||||
ErrorEmptyPassword = 206
|
||||
)
|
||||
|
||||
// LDAPResultCodeMap contains string descriptions for LDAP error codes
|
||||
var LDAPResultCodeMap = map[uint16]string{
|
||||
LDAPResultSuccess: "Success",
|
||||
LDAPResultOperationsError: "Operations Error",
|
||||
LDAPResultProtocolError: "Protocol Error",
|
||||
LDAPResultTimeLimitExceeded: "Time Limit Exceeded",
|
||||
LDAPResultSizeLimitExceeded: "Size Limit Exceeded",
|
||||
LDAPResultCompareFalse: "Compare False",
|
||||
LDAPResultCompareTrue: "Compare True",
|
||||
LDAPResultAuthMethodNotSupported: "Auth Method Not Supported",
|
||||
LDAPResultStrongAuthRequired: "Strong Auth Required",
|
||||
LDAPResultReferral: "Referral",
|
||||
LDAPResultAdminLimitExceeded: "Admin Limit Exceeded",
|
||||
LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension",
|
||||
LDAPResultConfidentialityRequired: "Confidentiality Required",
|
||||
LDAPResultSaslBindInProgress: "Sasl Bind In Progress",
|
||||
LDAPResultNoSuchAttribute: "No Such Attribute",
|
||||
LDAPResultUndefinedAttributeType: "Undefined Attribute Type",
|
||||
LDAPResultInappropriateMatching: "Inappropriate Matching",
|
||||
LDAPResultConstraintViolation: "Constraint Violation",
|
||||
LDAPResultAttributeOrValueExists: "Attribute Or Value Exists",
|
||||
LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax",
|
||||
LDAPResultNoSuchObject: "No Such Object",
|
||||
LDAPResultAliasProblem: "Alias Problem",
|
||||
LDAPResultInvalidDNSyntax: "Invalid DN Syntax",
|
||||
LDAPResultIsLeaf: "Is Leaf",
|
||||
LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem",
|
||||
LDAPResultInappropriateAuthentication: "Inappropriate Authentication",
|
||||
LDAPResultInvalidCredentials: "Invalid Credentials",
|
||||
LDAPResultInsufficientAccessRights: "Insufficient Access Rights",
|
||||
LDAPResultBusy: "Busy",
|
||||
LDAPResultUnavailable: "Unavailable",
|
||||
LDAPResultUnwillingToPerform: "Unwilling To Perform",
|
||||
LDAPResultLoopDetect: "Loop Detect",
|
||||
LDAPResultSortControlMissing: "Sort Control Missing",
|
||||
LDAPResultOffsetRangeError: "Result Offset Range Error",
|
||||
LDAPResultNamingViolation: "Naming Violation",
|
||||
LDAPResultObjectClassViolation: "Object Class Violation",
|
||||
LDAPResultResultsTooLarge: "Results Too Large",
|
||||
LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf",
|
||||
LDAPResultNotAllowedOnRDN: "Not Allowed On RDN",
|
||||
LDAPResultEntryAlreadyExists: "Entry Already Exists",
|
||||
LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited",
|
||||
LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs",
|
||||
LDAPResultVirtualListViewErrorOrControlError: "Failed because of a problem related to the virtual list view",
|
||||
LDAPResultOther: "Other",
|
||||
LDAPResultServerDown: "Cannot establish a connection",
|
||||
LDAPResultLocalError: "An error occurred",
|
||||
LDAPResultEncodingError: "LDAP encountered an error while encoding",
|
||||
LDAPResultDecodingError: "LDAP encountered an error while decoding",
|
||||
LDAPResultTimeout: "LDAP timeout while waiting for a response from the server",
|
||||
LDAPResultAuthUnknown: "The auth method requested in a bind request is unknown",
|
||||
LDAPResultFilterError: "An error occurred while encoding the given search filter",
|
||||
LDAPResultUserCanceled: "The user canceled the operation",
|
||||
LDAPResultParamError: "An invalid parameter was specified",
|
||||
LDAPResultNoMemory: "Out of memory error",
|
||||
LDAPResultConnectError: "A connection to the server could not be established",
|
||||
LDAPResultNotSupported: "An attempt has been made to use a feature not supported LDAP",
|
||||
LDAPResultControlNotFound: "The controls required to perform the requested operation were not found",
|
||||
LDAPResultNoResultsReturned: "No results were returned from the server",
|
||||
LDAPResultMoreResultsToReturn: "There are more results in the chain of results",
|
||||
LDAPResultClientLoop: "A loop has been detected. For example when following referrals",
|
||||
LDAPResultReferralLimitExceeded: "The referral hop limit has been exceeded",
|
||||
LDAPResultCanceled: "Operation was canceled",
|
||||
LDAPResultNoSuchOperation: "Server has no knowledge of the operation requested for cancellation",
|
||||
LDAPResultTooLate: "Too late to cancel the outstanding operation",
|
||||
LDAPResultCannotCancel: "The identified operation does not support cancellation or the cancel operation cannot be performed",
|
||||
LDAPResultAssertionFailed: "An assertion control given in the LDAP operation evaluated to false causing the operation to not be performed",
|
||||
LDAPResultSyncRefreshRequired: "Refresh Required",
|
||||
LDAPResultInvalidResponse: "Invalid Response",
|
||||
LDAPResultAmbiguousResponse: "Ambiguous Response",
|
||||
LDAPResultTLSNotSupported: "Tls Not Supported",
|
||||
LDAPResultIntermediateResponse: "Intermediate Response",
|
||||
LDAPResultUnknownType: "Unknown Type",
|
||||
LDAPResultAuthorizationDenied: "Authorization Denied",
|
||||
|
||||
ErrorNetwork: "Network Error",
|
||||
ErrorFilterCompile: "Filter Compile Error",
|
||||
ErrorFilterDecompile: "Filter Decompile Error",
|
||||
ErrorDebugging: "Debugging Error",
|
||||
ErrorUnexpectedMessage: "Unexpected Message",
|
||||
ErrorUnexpectedResponse: "Unexpected Response",
|
||||
ErrorEmptyPassword: "Empty password not allowed by the client",
|
||||
}
|
||||
|
||||
// Error holds LDAP error information
|
||||
type Error struct {
|
||||
// Err is the underlying error
|
||||
Err error
|
||||
// ResultCode is the LDAP error code
|
||||
ResultCode uint16
|
||||
// MatchedDN is the matchedDN returned if any
|
||||
MatchedDN string
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error())
|
||||
}
|
||||
|
||||
// GetLDAPError creates an Error out of a BER packet representing a LDAPResult
|
||||
// The return is an error object. It can be casted to a Error structure.
|
||||
// This function returns nil if resultCode in the LDAPResult sequence is success(0).
|
||||
func GetLDAPError(packet *ber.Packet) error {
|
||||
if packet == nil {
|
||||
return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty packet")}
|
||||
}
|
||||
|
||||
if len(packet.Children) >= 2 {
|
||||
response := packet.Children[1]
|
||||
if response == nil {
|
||||
return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty response in packet")}
|
||||
}
|
||||
if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 {
|
||||
resultCode := uint16(response.Children[0].Value.(int64))
|
||||
if resultCode == 0 { // No error
|
||||
return nil
|
||||
}
|
||||
return &Error{ResultCode: resultCode, MatchedDN: response.Children[1].Value.(string),
|
||||
Err: fmt.Errorf("%s", response.Children[2].Value.(string))}
|
||||
}
|
||||
}
|
||||
|
||||
return &Error{ResultCode: ErrorNetwork, Err: fmt.Errorf("Invalid packet format")}
|
||||
}
|
||||
|
||||
// NewError creates an LDAP error with the given code and underlying error
|
||||
func NewError(resultCode uint16, err error) error {
|
||||
return &Error{ResultCode: resultCode, Err: err}
|
||||
}
|
||||
|
||||
// IsErrorWithCode returns true if the given error is an LDAP error with the given result code
|
||||
func IsErrorWithCode(err error, desiredResultCode uint16) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
serverError, ok := err.(*Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return serverError.ResultCode == desiredResultCode
|
||||
}
|
487
vendor/github.com/go-ldap/ldap/v3/filter.go
generated
vendored
Normal file
487
vendor/github.com/go-ldap/ldap/v3/filter.go
generated
vendored
Normal file
@ -0,0 +1,487 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
hexpac "encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// Filter choices
|
||||
const (
|
||||
FilterAnd = 0
|
||||
FilterOr = 1
|
||||
FilterNot = 2
|
||||
FilterEqualityMatch = 3
|
||||
FilterSubstrings = 4
|
||||
FilterGreaterOrEqual = 5
|
||||
FilterLessOrEqual = 6
|
||||
FilterPresent = 7
|
||||
FilterApproxMatch = 8
|
||||
FilterExtensibleMatch = 9
|
||||
)
|
||||
|
||||
// FilterMap contains human readable descriptions of Filter choices
|
||||
var FilterMap = map[uint64]string{
|
||||
FilterAnd: "And",
|
||||
FilterOr: "Or",
|
||||
FilterNot: "Not",
|
||||
FilterEqualityMatch: "Equality Match",
|
||||
FilterSubstrings: "Substrings",
|
||||
FilterGreaterOrEqual: "Greater Or Equal",
|
||||
FilterLessOrEqual: "Less Or Equal",
|
||||
FilterPresent: "Present",
|
||||
FilterApproxMatch: "Approx Match",
|
||||
FilterExtensibleMatch: "Extensible Match",
|
||||
}
|
||||
|
||||
// SubstringFilter options
|
||||
const (
|
||||
FilterSubstringsInitial = 0
|
||||
FilterSubstringsAny = 1
|
||||
FilterSubstringsFinal = 2
|
||||
)
|
||||
|
||||
// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices
|
||||
var FilterSubstringsMap = map[uint64]string{
|
||||
FilterSubstringsInitial: "Substrings Initial",
|
||||
FilterSubstringsAny: "Substrings Any",
|
||||
FilterSubstringsFinal: "Substrings Final",
|
||||
}
|
||||
|
||||
// MatchingRuleAssertion choices
|
||||
const (
|
||||
MatchingRuleAssertionMatchingRule = 1
|
||||
MatchingRuleAssertionType = 2
|
||||
MatchingRuleAssertionMatchValue = 3
|
||||
MatchingRuleAssertionDNAttributes = 4
|
||||
)
|
||||
|
||||
// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices
|
||||
var MatchingRuleAssertionMap = map[uint64]string{
|
||||
MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule",
|
||||
MatchingRuleAssertionType: "Matching Rule Assertion Type",
|
||||
MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value",
|
||||
MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes",
|
||||
}
|
||||
|
||||
var _SymbolAny = []byte{'*'}
|
||||
|
||||
// CompileFilter converts a string representation of a filter into a BER-encoded packet
|
||||
func CompileFilter(filter string) (*ber.Packet, error) {
|
||||
if len(filter) == 0 || filter[0] != '(' {
|
||||
return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('"))
|
||||
}
|
||||
packet, pos, err := compileFilter(filter, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch {
|
||||
case pos > len(filter):
|
||||
return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
|
||||
case pos < len(filter):
|
||||
return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:])))
|
||||
}
|
||||
return packet, nil
|
||||
}
|
||||
|
||||
// DecompileFilter converts a packet representation of a filter into a string representation
|
||||
func DecompileFilter(packet *ber.Packet) (_ string, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter"))
|
||||
}
|
||||
}()
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
buf.WriteByte('(')
|
||||
childStr := ""
|
||||
|
||||
switch packet.Tag {
|
||||
case FilterAnd:
|
||||
buf.WriteByte('&')
|
||||
for _, child := range packet.Children {
|
||||
childStr, err = DecompileFilter(child)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
buf.WriteString(childStr)
|
||||
}
|
||||
case FilterOr:
|
||||
buf.WriteByte('|')
|
||||
for _, child := range packet.Children {
|
||||
childStr, err = DecompileFilter(child)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
buf.WriteString(childStr)
|
||||
}
|
||||
case FilterNot:
|
||||
buf.WriteByte('!')
|
||||
childStr, err = DecompileFilter(packet.Children[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
buf.WriteString(childStr)
|
||||
|
||||
case FilterSubstrings:
|
||||
buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
|
||||
buf.WriteByte('=')
|
||||
for i, child := range packet.Children[1].Children {
|
||||
if i == 0 && child.Tag != FilterSubstringsInitial {
|
||||
buf.Write(_SymbolAny)
|
||||
}
|
||||
buf.WriteString(EscapeFilter(ber.DecodeString(child.Data.Bytes())))
|
||||
if child.Tag != FilterSubstringsFinal {
|
||||
buf.Write(_SymbolAny)
|
||||
}
|
||||
}
|
||||
case FilterEqualityMatch:
|
||||
buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
|
||||
buf.WriteByte('=')
|
||||
buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())))
|
||||
case FilterGreaterOrEqual:
|
||||
buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
|
||||
buf.WriteString(">=")
|
||||
buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())))
|
||||
case FilterLessOrEqual:
|
||||
buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
|
||||
buf.WriteString("<=")
|
||||
buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())))
|
||||
case FilterPresent:
|
||||
buf.WriteString(ber.DecodeString(packet.Data.Bytes()))
|
||||
buf.WriteString("=*")
|
||||
case FilterApproxMatch:
|
||||
buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
|
||||
buf.WriteString("~=")
|
||||
buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())))
|
||||
case FilterExtensibleMatch:
|
||||
attr := ""
|
||||
dnAttributes := false
|
||||
matchingRule := ""
|
||||
value := ""
|
||||
|
||||
for _, child := range packet.Children {
|
||||
switch child.Tag {
|
||||
case MatchingRuleAssertionMatchingRule:
|
||||
matchingRule = ber.DecodeString(child.Data.Bytes())
|
||||
case MatchingRuleAssertionType:
|
||||
attr = ber.DecodeString(child.Data.Bytes())
|
||||
case MatchingRuleAssertionMatchValue:
|
||||
value = ber.DecodeString(child.Data.Bytes())
|
||||
case MatchingRuleAssertionDNAttributes:
|
||||
dnAttributes = child.Value.(bool)
|
||||
}
|
||||
}
|
||||
|
||||
if len(attr) > 0 {
|
||||
buf.WriteString(attr)
|
||||
}
|
||||
if dnAttributes {
|
||||
buf.WriteString(":dn")
|
||||
}
|
||||
if len(matchingRule) > 0 {
|
||||
buf.WriteString(":")
|
||||
buf.WriteString(matchingRule)
|
||||
}
|
||||
buf.WriteString(":=")
|
||||
buf.WriteString(EscapeFilter(value))
|
||||
}
|
||||
|
||||
buf.WriteByte(')')
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) {
|
||||
for pos < len(filter) && filter[pos] == '(' {
|
||||
child, newPos, err := compileFilter(filter, pos+1)
|
||||
if err != nil {
|
||||
return pos, err
|
||||
}
|
||||
pos = newPos
|
||||
parent.AppendChild(child)
|
||||
}
|
||||
if pos == len(filter) {
|
||||
return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
|
||||
}
|
||||
|
||||
return pos + 1, nil
|
||||
}
|
||||
|
||||
func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
|
||||
var (
|
||||
packet *ber.Packet
|
||||
err error
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter"))
|
||||
}
|
||||
}()
|
||||
newPos := pos
|
||||
|
||||
currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:])
|
||||
|
||||
switch currentRune {
|
||||
case utf8.RuneError:
|
||||
return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
|
||||
case '(':
|
||||
packet, newPos, err = compileFilter(filter, pos+currentWidth)
|
||||
newPos++
|
||||
return packet, newPos, err
|
||||
case '&':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd])
|
||||
newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
|
||||
return packet, newPos, err
|
||||
case '|':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr])
|
||||
newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
|
||||
return packet, newPos, err
|
||||
case '!':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot])
|
||||
var child *ber.Packet
|
||||
child, newPos, err = compileFilter(filter, pos+currentWidth)
|
||||
packet.AppendChild(child)
|
||||
return packet, newPos, err
|
||||
default:
|
||||
const (
|
||||
stateReadingAttr = 0
|
||||
stateReadingExtensibleMatchingRule = 1
|
||||
stateReadingCondition = 2
|
||||
)
|
||||
|
||||
state := stateReadingAttr
|
||||
attribute := bytes.NewBuffer(nil)
|
||||
extensibleDNAttributes := false
|
||||
extensibleMatchingRule := bytes.NewBuffer(nil)
|
||||
condition := bytes.NewBuffer(nil)
|
||||
|
||||
for newPos < len(filter) {
|
||||
remainingFilter := filter[newPos:]
|
||||
currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter)
|
||||
if currentRune == ')' {
|
||||
break
|
||||
}
|
||||
if currentRune == utf8.RuneError {
|
||||
return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
|
||||
}
|
||||
|
||||
switch state {
|
||||
case stateReadingAttr:
|
||||
switch {
|
||||
// Extensible rule, with only DN-matching
|
||||
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
|
||||
extensibleDNAttributes = true
|
||||
state = stateReadingCondition
|
||||
newPos += 5
|
||||
|
||||
// Extensible rule, with DN-matching and a matching OID
|
||||
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
|
||||
extensibleDNAttributes = true
|
||||
state = stateReadingExtensibleMatchingRule
|
||||
newPos += 4
|
||||
|
||||
// Extensible rule, with attr only
|
||||
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Extensible rule, with no DN attribute matching
|
||||
case currentRune == ':':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
|
||||
state = stateReadingExtensibleMatchingRule
|
||||
newPos++
|
||||
|
||||
// Equality condition
|
||||
case currentRune == '=':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch])
|
||||
state = stateReadingCondition
|
||||
newPos++
|
||||
|
||||
// Greater-than or equal
|
||||
case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual])
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Less-than or equal
|
||||
case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual])
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Approx
|
||||
case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch])
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Still reading the attribute name
|
||||
default:
|
||||
attribute.WriteRune(currentRune)
|
||||
newPos += currentWidth
|
||||
}
|
||||
|
||||
case stateReadingExtensibleMatchingRule:
|
||||
switch {
|
||||
|
||||
// Matching rule OID is done
|
||||
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Still reading the matching rule oid
|
||||
default:
|
||||
extensibleMatchingRule.WriteRune(currentRune)
|
||||
newPos += currentWidth
|
||||
}
|
||||
|
||||
case stateReadingCondition:
|
||||
// append to the condition
|
||||
condition.WriteRune(currentRune)
|
||||
newPos += currentWidth
|
||||
}
|
||||
}
|
||||
|
||||
if newPos == len(filter) {
|
||||
err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
|
||||
return packet, newPos, err
|
||||
}
|
||||
if packet == nil {
|
||||
err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter"))
|
||||
return packet, newPos, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case packet.Tag == FilterExtensibleMatch:
|
||||
// MatchingRuleAssertion ::= SEQUENCE {
|
||||
// matchingRule [1] MatchingRuleID OPTIONAL,
|
||||
// type [2] AttributeDescription OPTIONAL,
|
||||
// matchValue [3] AssertionValue,
|
||||
// dnAttributes [4] BOOLEAN DEFAULT FALSE
|
||||
// }
|
||||
|
||||
// Include the matching rule oid, if specified
|
||||
if extensibleMatchingRule.Len() > 0 {
|
||||
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule.String(), MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule]))
|
||||
}
|
||||
|
||||
// Include the attribute, if specified
|
||||
if attribute.Len() > 0 {
|
||||
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute.String(), MatchingRuleAssertionMap[MatchingRuleAssertionType]))
|
||||
}
|
||||
|
||||
// Add the value (only required child)
|
||||
encodedString, encodeErr := decodeEscapedSymbols(condition.Bytes())
|
||||
if encodeErr != nil {
|
||||
return packet, newPos, encodeErr
|
||||
}
|
||||
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue]))
|
||||
|
||||
// Defaults to false, so only include in the sequence if true
|
||||
if extensibleDNAttributes {
|
||||
packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes]))
|
||||
}
|
||||
|
||||
case packet.Tag == FilterEqualityMatch && bytes.Equal(condition.Bytes(), _SymbolAny):
|
||||
packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute.String(), FilterMap[FilterPresent])
|
||||
case packet.Tag == FilterEqualityMatch && bytes.Index(condition.Bytes(), _SymbolAny) > -1:
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute.String(), "Attribute"))
|
||||
packet.Tag = FilterSubstrings
|
||||
packet.Description = FilterMap[uint64(packet.Tag)]
|
||||
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings")
|
||||
parts := bytes.Split(condition.Bytes(), _SymbolAny)
|
||||
for i, part := range parts {
|
||||
if len(part) == 0 {
|
||||
continue
|
||||
}
|
||||
var tag ber.Tag
|
||||
switch i {
|
||||
case 0:
|
||||
tag = FilterSubstringsInitial
|
||||
case len(parts) - 1:
|
||||
tag = FilterSubstringsFinal
|
||||
default:
|
||||
tag = FilterSubstringsAny
|
||||
}
|
||||
encodedString, encodeErr := decodeEscapedSymbols(part)
|
||||
if encodeErr != nil {
|
||||
return packet, newPos, encodeErr
|
||||
}
|
||||
seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)]))
|
||||
}
|
||||
packet.AppendChild(seq)
|
||||
default:
|
||||
encodedString, encodeErr := decodeEscapedSymbols(condition.Bytes())
|
||||
if encodeErr != nil {
|
||||
return packet, newPos, encodeErr
|
||||
}
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute.String(), "Attribute"))
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition"))
|
||||
}
|
||||
|
||||
newPos += currentWidth
|
||||
return packet, newPos, err
|
||||
}
|
||||
}
|
||||
|
||||
// Convert from "ABC\xx\xx\xx" form to literal bytes for transport
|
||||
func decodeEscapedSymbols(src []byte) (string, error) {
|
||||
|
||||
var (
|
||||
buffer bytes.Buffer
|
||||
offset int
|
||||
reader = bytes.NewReader(src)
|
||||
byteHex []byte
|
||||
byteVal []byte
|
||||
)
|
||||
|
||||
for {
|
||||
runeVal, runeSize, err := reader.ReadRune()
|
||||
if err == io.EOF {
|
||||
return buffer.String(), nil
|
||||
} else if err != nil {
|
||||
return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: failed to read filter: %v", err))
|
||||
} else if runeVal == unicode.ReplacementChar {
|
||||
return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", offset))
|
||||
}
|
||||
|
||||
if runeVal == '\\' {
|
||||
// http://tools.ietf.org/search/rfc4515
|
||||
// \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not
|
||||
// being a member of UTF1SUBSET.
|
||||
if byteHex == nil {
|
||||
byteHex = make([]byte, 2)
|
||||
byteVal = make([]byte, 1)
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(reader, byteHex); err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter"))
|
||||
}
|
||||
return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: invalid characters for escape in filter: %v", err))
|
||||
}
|
||||
|
||||
if _, err := hexpac.Decode(byteVal, byteHex); err != nil {
|
||||
return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: invalid characters for escape in filter: %v", err))
|
||||
}
|
||||
|
||||
buffer.Write(byteVal)
|
||||
} else {
|
||||
buffer.WriteRune(runeVal)
|
||||
}
|
||||
|
||||
offset += runeSize
|
||||
}
|
||||
}
|
5
vendor/github.com/go-ldap/ldap/v3/go.mod
generated
vendored
Normal file
5
vendor/github.com/go-ldap/ldap/v3/go.mod
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
module github.com/go-ldap/ldap/v3
|
||||
|
||||
go 1.13
|
||||
|
||||
require github.com/go-asn1-ber/asn1-ber v1.3.1
|
2
vendor/github.com/go-ldap/ldap/v3/go.sum
generated
vendored
Normal file
2
vendor/github.com/go-ldap/ldap/v3/go.sum
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
github.com/go-asn1-ber/asn1-ber v1.3.1 h1:gvPdv/Hr++TRFCl0UbPFHC54P9N9jgsRPnmnr419Uck=
|
||||
github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
340
vendor/github.com/go-ldap/ldap/v3/ldap.go
generated
vendored
Normal file
340
vendor/github.com/go-ldap/ldap/v3/ldap.go
generated
vendored
Normal file
@ -0,0 +1,340 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// LDAP Application Codes
|
||||
const (
|
||||
ApplicationBindRequest = 0
|
||||
ApplicationBindResponse = 1
|
||||
ApplicationUnbindRequest = 2
|
||||
ApplicationSearchRequest = 3
|
||||
ApplicationSearchResultEntry = 4
|
||||
ApplicationSearchResultDone = 5
|
||||
ApplicationModifyRequest = 6
|
||||
ApplicationModifyResponse = 7
|
||||
ApplicationAddRequest = 8
|
||||
ApplicationAddResponse = 9
|
||||
ApplicationDelRequest = 10
|
||||
ApplicationDelResponse = 11
|
||||
ApplicationModifyDNRequest = 12
|
||||
ApplicationModifyDNResponse = 13
|
||||
ApplicationCompareRequest = 14
|
||||
ApplicationCompareResponse = 15
|
||||
ApplicationAbandonRequest = 16
|
||||
ApplicationSearchResultReference = 19
|
||||
ApplicationExtendedRequest = 23
|
||||
ApplicationExtendedResponse = 24
|
||||
)
|
||||
|
||||
// ApplicationMap contains human readable descriptions of LDAP Application Codes
|
||||
var ApplicationMap = map[uint8]string{
|
||||
ApplicationBindRequest: "Bind Request",
|
||||
ApplicationBindResponse: "Bind Response",
|
||||
ApplicationUnbindRequest: "Unbind Request",
|
||||
ApplicationSearchRequest: "Search Request",
|
||||
ApplicationSearchResultEntry: "Search Result Entry",
|
||||
ApplicationSearchResultDone: "Search Result Done",
|
||||
ApplicationModifyRequest: "Modify Request",
|
||||
ApplicationModifyResponse: "Modify Response",
|
||||
ApplicationAddRequest: "Add Request",
|
||||
ApplicationAddResponse: "Add Response",
|
||||
ApplicationDelRequest: "Del Request",
|
||||
ApplicationDelResponse: "Del Response",
|
||||
ApplicationModifyDNRequest: "Modify DN Request",
|
||||
ApplicationModifyDNResponse: "Modify DN Response",
|
||||
ApplicationCompareRequest: "Compare Request",
|
||||
ApplicationCompareResponse: "Compare Response",
|
||||
ApplicationAbandonRequest: "Abandon Request",
|
||||
ApplicationSearchResultReference: "Search Result Reference",
|
||||
ApplicationExtendedRequest: "Extended Request",
|
||||
ApplicationExtendedResponse: "Extended Response",
|
||||
}
|
||||
|
||||
// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10)
|
||||
const (
|
||||
BeheraPasswordExpired = 0
|
||||
BeheraAccountLocked = 1
|
||||
BeheraChangeAfterReset = 2
|
||||
BeheraPasswordModNotAllowed = 3
|
||||
BeheraMustSupplyOldPassword = 4
|
||||
BeheraInsufficientPasswordQuality = 5
|
||||
BeheraPasswordTooShort = 6
|
||||
BeheraPasswordTooYoung = 7
|
||||
BeheraPasswordInHistory = 8
|
||||
)
|
||||
|
||||
// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes
|
||||
var BeheraPasswordPolicyErrorMap = map[int8]string{
|
||||
BeheraPasswordExpired: "Password expired",
|
||||
BeheraAccountLocked: "Account locked",
|
||||
BeheraChangeAfterReset: "Password must be changed",
|
||||
BeheraPasswordModNotAllowed: "Policy prevents password modification",
|
||||
BeheraMustSupplyOldPassword: "Policy requires old password in order to change password",
|
||||
BeheraInsufficientPasswordQuality: "Password fails quality checks",
|
||||
BeheraPasswordTooShort: "Password is too short for policy",
|
||||
BeheraPasswordTooYoung: "Password has been changed too recently",
|
||||
BeheraPasswordInHistory: "New password is in list of old passwords",
|
||||
}
|
||||
|
||||
// Adds descriptions to an LDAP Response packet for debugging
|
||||
func addLDAPDescriptions(packet *ber.Packet) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = NewError(ErrorDebugging, fmt.Errorf("ldap: cannot process packet to add descriptions: %s", r))
|
||||
}
|
||||
}()
|
||||
packet.Description = "LDAP Response"
|
||||
packet.Children[0].Description = "Message ID"
|
||||
|
||||
application := uint8(packet.Children[1].Tag)
|
||||
packet.Children[1].Description = ApplicationMap[application]
|
||||
|
||||
switch application {
|
||||
case ApplicationBindRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationBindResponse:
|
||||
err = addDefaultLDAPResponseDescriptions(packet)
|
||||
case ApplicationUnbindRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationSearchRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationSearchResultEntry:
|
||||
packet.Children[1].Children[0].Description = "Object Name"
|
||||
packet.Children[1].Children[1].Description = "Attributes"
|
||||
for _, child := range packet.Children[1].Children[1].Children {
|
||||
child.Description = "Attribute"
|
||||
child.Children[0].Description = "Attribute Name"
|
||||
child.Children[1].Description = "Attribute Values"
|
||||
for _, grandchild := range child.Children[1].Children {
|
||||
grandchild.Description = "Attribute Value"
|
||||
}
|
||||
}
|
||||
if len(packet.Children) == 3 {
|
||||
err = addControlDescriptions(packet.Children[2])
|
||||
}
|
||||
case ApplicationSearchResultDone:
|
||||
err = addDefaultLDAPResponseDescriptions(packet)
|
||||
case ApplicationModifyRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationModifyResponse:
|
||||
case ApplicationAddRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationAddResponse:
|
||||
case ApplicationDelRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationDelResponse:
|
||||
case ApplicationModifyDNRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationModifyDNResponse:
|
||||
case ApplicationCompareRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationCompareResponse:
|
||||
case ApplicationAbandonRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationSearchResultReference:
|
||||
case ApplicationExtendedRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationExtendedResponse:
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func addControlDescriptions(packet *ber.Packet) error {
|
||||
packet.Description = "Controls"
|
||||
for _, child := range packet.Children {
|
||||
var value *ber.Packet
|
||||
controlType := ""
|
||||
child.Description = "Control"
|
||||
switch len(child.Children) {
|
||||
case 0:
|
||||
// at least one child is required for control type
|
||||
return fmt.Errorf("at least one child is required for control type")
|
||||
|
||||
case 1:
|
||||
// just type, no criticality or value
|
||||
controlType = child.Children[0].Value.(string)
|
||||
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
|
||||
|
||||
case 2:
|
||||
controlType = child.Children[0].Value.(string)
|
||||
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
|
||||
// Children[1] could be criticality or value (both are optional)
|
||||
// duck-type on whether this is a boolean
|
||||
if _, ok := child.Children[1].Value.(bool); ok {
|
||||
child.Children[1].Description = "Criticality"
|
||||
} else {
|
||||
child.Children[1].Description = "Control Value"
|
||||
value = child.Children[1]
|
||||
}
|
||||
|
||||
case 3:
|
||||
// criticality and value present
|
||||
controlType = child.Children[0].Value.(string)
|
||||
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
|
||||
child.Children[1].Description = "Criticality"
|
||||
child.Children[2].Description = "Control Value"
|
||||
value = child.Children[2]
|
||||
|
||||
default:
|
||||
// more than 3 children is invalid
|
||||
return fmt.Errorf("more than 3 children for control packet found")
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
continue
|
||||
}
|
||||
switch controlType {
|
||||
case ControlTypePaging:
|
||||
value.Description += " (Paging)"
|
||||
if value.Value != nil {
|
||||
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode data bytes: %s", err)
|
||||
}
|
||||
value.Data.Truncate(0)
|
||||
value.Value = nil
|
||||
valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes()
|
||||
value.AppendChild(valueChildren)
|
||||
}
|
||||
value.Children[0].Description = "Real Search Control Value"
|
||||
value.Children[0].Children[0].Description = "Paging Size"
|
||||
value.Children[0].Children[1].Description = "Cookie"
|
||||
|
||||
case ControlTypeBeheraPasswordPolicy:
|
||||
value.Description += " (Password Policy - Behera Draft)"
|
||||
if value.Value != nil {
|
||||
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode data bytes: %s", err)
|
||||
}
|
||||
value.Data.Truncate(0)
|
||||
value.Value = nil
|
||||
value.AppendChild(valueChildren)
|
||||
}
|
||||
sequence := value.Children[0]
|
||||
for _, child := range sequence.Children {
|
||||
if child.Tag == 0 {
|
||||
//Warning
|
||||
warningPacket := child.Children[0]
|
||||
packet, err := ber.DecodePacketErr(warningPacket.Data.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode data bytes: %s", err)
|
||||
}
|
||||
val, ok := packet.Value.(int64)
|
||||
if ok {
|
||||
if warningPacket.Tag == 0 {
|
||||
//timeBeforeExpiration
|
||||
value.Description += " (TimeBeforeExpiration)"
|
||||
warningPacket.Value = val
|
||||
} else if warningPacket.Tag == 1 {
|
||||
//graceAuthNsRemaining
|
||||
value.Description += " (GraceAuthNsRemaining)"
|
||||
warningPacket.Value = val
|
||||
}
|
||||
}
|
||||
} else if child.Tag == 1 {
|
||||
// Error
|
||||
packet, err := ber.DecodePacketErr(child.Data.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode data bytes: %s", err)
|
||||
}
|
||||
val, ok := packet.Value.(int8)
|
||||
if !ok {
|
||||
val = -1
|
||||
}
|
||||
child.Description = "Error"
|
||||
child.Value = val
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addRequestDescriptions(packet *ber.Packet) error {
|
||||
packet.Description = "LDAP Request"
|
||||
packet.Children[0].Description = "Message ID"
|
||||
packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)]
|
||||
if len(packet.Children) == 3 {
|
||||
return addControlDescriptions(packet.Children[2])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addDefaultLDAPResponseDescriptions(packet *ber.Packet) error {
|
||||
err := GetLDAPError(packet)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[err.(*Error).ResultCode] + ")"
|
||||
packet.Children[1].Children[1].Description = "Matched DN (" + err.(*Error).MatchedDN + ")"
|
||||
packet.Children[1].Children[2].Description = "Error Message"
|
||||
if len(packet.Children[1].Children) > 3 {
|
||||
packet.Children[1].Children[3].Description = "Referral"
|
||||
}
|
||||
if len(packet.Children) == 3 {
|
||||
return addControlDescriptions(packet.Children[2])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DebugBinaryFile reads and prints packets from the given filename
|
||||
func DebugBinaryFile(fileName string) error {
|
||||
file, err := ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
return NewError(ErrorDebugging, err)
|
||||
}
|
||||
ber.PrintBytes(os.Stdout, file, "")
|
||||
packet, err := ber.DecodePacketErr(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode packet: %s", err)
|
||||
}
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
return err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var hex = "0123456789abcdef"
|
||||
|
||||
func mustEscape(c byte) bool {
|
||||
return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0
|
||||
}
|
||||
|
||||
// EscapeFilter escapes from the provided LDAP filter string the special
|
||||
// characters in the set `()*\` and those out of the range 0 < c < 0x80,
|
||||
// as defined in RFC4515.
|
||||
func EscapeFilter(filter string) string {
|
||||
escape := 0
|
||||
for i := 0; i < len(filter); i++ {
|
||||
if mustEscape(filter[i]) {
|
||||
escape++
|
||||
}
|
||||
}
|
||||
if escape == 0 {
|
||||
return filter
|
||||
}
|
||||
buf := make([]byte, len(filter)+escape*2)
|
||||
for i, j := 0, 0; i < len(filter); i++ {
|
||||
c := filter[i]
|
||||
if mustEscape(c) {
|
||||
buf[j+0] = '\\'
|
||||
buf[j+1] = hex[c>>4]
|
||||
buf[j+2] = hex[c&0xf]
|
||||
j += 3
|
||||
} else {
|
||||
buf[j] = c
|
||||
j++
|
||||
}
|
||||
}
|
||||
return string(buf)
|
||||
}
|
75
vendor/github.com/go-ldap/ldap/v3/moddn.go
generated
vendored
Normal file
75
vendor/github.com/go-ldap/ldap/v3/moddn.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// ModifyDNRequest holds the request to modify a DN
|
||||
type ModifyDNRequest struct {
|
||||
DN string
|
||||
NewRDN string
|
||||
DeleteOldRDN bool
|
||||
NewSuperior string
|
||||
}
|
||||
|
||||
// NewModifyDNRequest creates a new request which can be passed to ModifyDN().
|
||||
//
|
||||
// To move an object in the tree, set the "newSup" to the new parent entry DN. Use an
|
||||
// empty string for just changing the object's RDN.
|
||||
//
|
||||
// For moving the object without renaming, the "rdn" must be the first
|
||||
// RDN of the given DN.
|
||||
//
|
||||
// A call like
|
||||
// mdnReq := NewModifyDNRequest("uid=someone,dc=example,dc=org", "uid=newname", true, "")
|
||||
// will setup the request to just rename uid=someone,dc=example,dc=org to
|
||||
// uid=newname,dc=example,dc=org.
|
||||
func NewModifyDNRequest(dn string, rdn string, delOld bool, newSup string) *ModifyDNRequest {
|
||||
return &ModifyDNRequest{
|
||||
DN: dn,
|
||||
NewRDN: rdn,
|
||||
DeleteOldRDN: delOld,
|
||||
NewSuperior: newSup,
|
||||
}
|
||||
}
|
||||
|
||||
func (req *ModifyDNRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyDNRequest, nil, "Modify DN Request")
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.NewRDN, "New RDN"))
|
||||
pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.DeleteOldRDN, "Delete old RDN"))
|
||||
if req.NewSuperior != "" {
|
||||
pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.NewSuperior, "New Superior"))
|
||||
}
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ModifyDN renames the given DN and optionally move to another base (when the "newSup" argument
|
||||
// to NewModifyDNRequest() is not "").
|
||||
func (l *Conn) ModifyDN(m *ModifyDNRequest) error {
|
||||
msgCtx, err := l.doRequest(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationModifyDNResponse {
|
||||
err := GetLDAPError(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
|
||||
}
|
||||
return nil
|
||||
}
|
132
vendor/github.com/go-ldap/ldap/v3/modify.go
generated
vendored
Normal file
132
vendor/github.com/go-ldap/ldap/v3/modify.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// Change operation choices
|
||||
const (
|
||||
AddAttribute = 0
|
||||
DeleteAttribute = 1
|
||||
ReplaceAttribute = 2
|
||||
IncrementAttribute = 3 // (https://tools.ietf.org/html/rfc4525)
|
||||
)
|
||||
|
||||
// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
|
||||
type PartialAttribute struct {
|
||||
// Type is the type of the partial attribute
|
||||
Type string
|
||||
// Vals are the values of the partial attribute
|
||||
Vals []string
|
||||
}
|
||||
|
||||
func (p *PartialAttribute) encode() *ber.Packet {
|
||||
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute")
|
||||
seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type"))
|
||||
set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
|
||||
for _, value := range p.Vals {
|
||||
set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
|
||||
}
|
||||
seq.AppendChild(set)
|
||||
return seq
|
||||
}
|
||||
|
||||
// Change for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
|
||||
type Change struct {
|
||||
// Operation is the type of change to be made
|
||||
Operation uint
|
||||
// Modification is the attribute to be modified
|
||||
Modification PartialAttribute
|
||||
}
|
||||
|
||||
func (c *Change) encode() *ber.Packet {
|
||||
change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
|
||||
change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(c.Operation), "Operation"))
|
||||
change.AppendChild(c.Modification.encode())
|
||||
return change
|
||||
}
|
||||
|
||||
// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
|
||||
type ModifyRequest struct {
|
||||
// DN is the distinguishedName of the directory entry to modify
|
||||
DN string
|
||||
// Changes contain the attributes to modify
|
||||
Changes []Change
|
||||
// Controls hold optional controls to send with the request
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// Add appends the given attribute to the list of changes to be made
|
||||
func (req *ModifyRequest) Add(attrType string, attrVals []string) {
|
||||
req.appendChange(AddAttribute, attrType, attrVals)
|
||||
}
|
||||
|
||||
// Delete appends the given attribute to the list of changes to be made
|
||||
func (req *ModifyRequest) Delete(attrType string, attrVals []string) {
|
||||
req.appendChange(DeleteAttribute, attrType, attrVals)
|
||||
}
|
||||
|
||||
// Replace appends the given attribute to the list of changes to be made
|
||||
func (req *ModifyRequest) Replace(attrType string, attrVals []string) {
|
||||
req.appendChange(ReplaceAttribute, attrType, attrVals)
|
||||
}
|
||||
|
||||
// Increment appends the given attribute to the list of changes to be made
|
||||
func (req *ModifyRequest) Increment(attrType string, attrVal string) {
|
||||
req.appendChange(IncrementAttribute, attrType, []string{attrVal})
|
||||
}
|
||||
|
||||
func (req *ModifyRequest) appendChange(operation uint, attrType string, attrVals []string) {
|
||||
req.Changes = append(req.Changes, Change{operation, PartialAttribute{Type: attrType, Vals: attrVals}})
|
||||
}
|
||||
|
||||
func (req *ModifyRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request")
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
|
||||
changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes")
|
||||
for _, change := range req.Changes {
|
||||
changes.AppendChild(change.encode())
|
||||
}
|
||||
pkt.AppendChild(changes)
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewModifyRequest creates a modify request for the given DN
|
||||
func NewModifyRequest(dn string, controls []Control) *ModifyRequest {
|
||||
return &ModifyRequest{
|
||||
DN: dn,
|
||||
Controls: controls,
|
||||
}
|
||||
}
|
||||
|
||||
// Modify performs the ModifyRequest
|
||||
func (l *Conn) Modify(modifyRequest *ModifyRequest) error {
|
||||
msgCtx, err := l.doRequest(modifyRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationModifyResponse {
|
||||
err := GetLDAPError(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
|
||||
}
|
||||
return nil
|
||||
}
|
126
vendor/github.com/go-ldap/ldap/v3/passwdmodify.go
generated
vendored
Normal file
126
vendor/github.com/go-ldap/ldap/v3/passwdmodify.go
generated
vendored
Normal file
@ -0,0 +1,126 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
const (
|
||||
passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1"
|
||||
)
|
||||
|
||||
// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt
|
||||
type PasswordModifyRequest struct {
|
||||
// UserIdentity is an optional string representation of the user associated with the request.
|
||||
// This string may or may not be an LDAPDN [RFC2253].
|
||||
// If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session
|
||||
UserIdentity string
|
||||
// OldPassword, if present, contains the user's current password
|
||||
OldPassword string
|
||||
// NewPassword, if present, contains the desired password for this user
|
||||
NewPassword string
|
||||
}
|
||||
|
||||
// PasswordModifyResult holds the server response to a PasswordModifyRequest
|
||||
type PasswordModifyResult struct {
|
||||
// GeneratedPassword holds a password generated by the server, if present
|
||||
GeneratedPassword string
|
||||
// Referral are the returned referral
|
||||
Referral string
|
||||
}
|
||||
|
||||
func (req *PasswordModifyRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation")
|
||||
pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID"))
|
||||
|
||||
extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request")
|
||||
passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request")
|
||||
if req.UserIdentity != "" {
|
||||
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.UserIdentity, "User Identity"))
|
||||
}
|
||||
if req.OldPassword != "" {
|
||||
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, req.OldPassword, "Old Password"))
|
||||
}
|
||||
if req.NewPassword != "" {
|
||||
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, req.NewPassword, "New Password"))
|
||||
}
|
||||
extendedRequestValue.AppendChild(passwordModifyRequestValue)
|
||||
|
||||
pkt.AppendChild(extendedRequestValue)
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewPasswordModifyRequest creates a new PasswordModifyRequest
|
||||
//
|
||||
// According to the RFC 3602 (https://tools.ietf.org/html/rfc3062):
|
||||
// userIdentity is a string representing the user associated with the request.
|
||||
// This string may or may not be an LDAPDN (RFC 2253).
|
||||
// If userIdentity is empty then the operation will act on the user associated
|
||||
// with the session.
|
||||
//
|
||||
// oldPassword is the current user's password, it can be empty or it can be
|
||||
// needed depending on the session user access rights (usually an administrator
|
||||
// can change a user's password without knowing the current one) and the
|
||||
// password policy (see pwdSafeModify password policy's attribute)
|
||||
//
|
||||
// newPassword is the desired user's password. If empty the server can return
|
||||
// an error or generate a new password that will be available in the
|
||||
// PasswordModifyResult.GeneratedPassword
|
||||
//
|
||||
func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest {
|
||||
return &PasswordModifyRequest{
|
||||
UserIdentity: userIdentity,
|
||||
OldPassword: oldPassword,
|
||||
NewPassword: newPassword,
|
||||
}
|
||||
}
|
||||
|
||||
// PasswordModify performs the modification request
|
||||
func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) {
|
||||
msgCtx, err := l.doRequest(passwordModifyRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &PasswordModifyResult{}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationExtendedResponse {
|
||||
err := GetLDAPError(packet)
|
||||
if err != nil {
|
||||
if IsErrorWithCode(err, LDAPResultReferral) {
|
||||
for _, child := range packet.Children[1].Children {
|
||||
if child.Tag == 3 {
|
||||
result.Referral = child.Children[0].Value.(string)
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
} else {
|
||||
return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag))
|
||||
}
|
||||
|
||||
extendedResponse := packet.Children[1]
|
||||
for _, child := range extendedResponse.Children {
|
||||
if child.Tag == 11 {
|
||||
passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes())
|
||||
if len(passwordModifyResponseValue.Children) == 1 {
|
||||
if passwordModifyResponseValue.Children[0].Tag == 0 {
|
||||
result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
66
vendor/github.com/go-ldap/ldap/v3/request.go
generated
vendored
Normal file
66
vendor/github.com/go-ldap/ldap/v3/request.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
var (
|
||||
errRespChanClosed = errors.New("ldap: response channel closed")
|
||||
errCouldNotRetMsg = errors.New("ldap: could not retrieve message")
|
||||
)
|
||||
|
||||
type request interface {
|
||||
appendTo(*ber.Packet) error
|
||||
}
|
||||
|
||||
type requestFunc func(*ber.Packet) error
|
||||
|
||||
func (f requestFunc) appendTo(p *ber.Packet) error {
|
||||
return f(p)
|
||||
}
|
||||
|
||||
func (l *Conn) doRequest(req request) (*messageContext, error) {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
if err := req.appendTo(packet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
l.Debug.PrintPacket(packet)
|
||||
}
|
||||
|
||||
msgCtx, err := l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.Debug.Printf("%d: returning", msgCtx.id)
|
||||
return msgCtx, nil
|
||||
}
|
||||
|
||||
func (l *Conn) readPacket(msgCtx *messageContext) (*ber.Packet, error) {
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return nil, NewError(ErrorNetwork, errRespChanClosed)
|
||||
}
|
||||
packet, err := packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if packet == nil {
|
||||
return nil, NewError(ErrorNetwork, errCouldNotRetMsg)
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err = addLDAPDescriptions(packet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.Debug.PrintPacket(packet)
|
||||
}
|
||||
return packet, nil
|
||||
}
|
370
vendor/github.com/go-ldap/ldap/v3/search.go
generated
vendored
Normal file
370
vendor/github.com/go-ldap/ldap/v3/search.go
generated
vendored
Normal file
@ -0,0 +1,370 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// scope choices
|
||||
const (
|
||||
ScopeBaseObject = 0
|
||||
ScopeSingleLevel = 1
|
||||
ScopeWholeSubtree = 2
|
||||
)
|
||||
|
||||
// ScopeMap contains human readable descriptions of scope choices
|
||||
var ScopeMap = map[int]string{
|
||||
ScopeBaseObject: "Base Object",
|
||||
ScopeSingleLevel: "Single Level",
|
||||
ScopeWholeSubtree: "Whole Subtree",
|
||||
}
|
||||
|
||||
// derefAliases
|
||||
const (
|
||||
NeverDerefAliases = 0
|
||||
DerefInSearching = 1
|
||||
DerefFindingBaseObj = 2
|
||||
DerefAlways = 3
|
||||
)
|
||||
|
||||
// DerefMap contains human readable descriptions of derefAliases choices
|
||||
var DerefMap = map[int]string{
|
||||
NeverDerefAliases: "NeverDerefAliases",
|
||||
DerefInSearching: "DerefInSearching",
|
||||
DerefFindingBaseObj: "DerefFindingBaseObj",
|
||||
DerefAlways: "DerefAlways",
|
||||
}
|
||||
|
||||
// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs.
|
||||
// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the
|
||||
// same input map of attributes, the output entry will contain the same order of attributes
|
||||
func NewEntry(dn string, attributes map[string][]string) *Entry {
|
||||
var attributeNames []string
|
||||
for attributeName := range attributes {
|
||||
attributeNames = append(attributeNames, attributeName)
|
||||
}
|
||||
sort.Strings(attributeNames)
|
||||
|
||||
var encodedAttributes []*EntryAttribute
|
||||
for _, attributeName := range attributeNames {
|
||||
encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName]))
|
||||
}
|
||||
return &Entry{
|
||||
DN: dn,
|
||||
Attributes: encodedAttributes,
|
||||
}
|
||||
}
|
||||
|
||||
// Entry represents a single search result entry
|
||||
type Entry struct {
|
||||
// DN is the distinguished name of the entry
|
||||
DN string
|
||||
// Attributes are the returned attributes for the entry
|
||||
Attributes []*EntryAttribute
|
||||
}
|
||||
|
||||
// GetAttributeValues returns the values for the named attribute, or an empty list
|
||||
func (e *Entry) GetAttributeValues(attribute string) []string {
|
||||
for _, attr := range e.Attributes {
|
||||
if attr.Name == attribute {
|
||||
return attr.Values
|
||||
}
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// GetRawAttributeValues returns the byte values for the named attribute, or an empty list
|
||||
func (e *Entry) GetRawAttributeValues(attribute string) [][]byte {
|
||||
for _, attr := range e.Attributes {
|
||||
if attr.Name == attribute {
|
||||
return attr.ByteValues
|
||||
}
|
||||
}
|
||||
return [][]byte{}
|
||||
}
|
||||
|
||||
// GetAttributeValue returns the first value for the named attribute, or ""
|
||||
func (e *Entry) GetAttributeValue(attribute string) string {
|
||||
values := e.GetAttributeValues(attribute)
|
||||
if len(values) == 0 {
|
||||
return ""
|
||||
}
|
||||
return values[0]
|
||||
}
|
||||
|
||||
// GetRawAttributeValue returns the first value for the named attribute, or an empty slice
|
||||
func (e *Entry) GetRawAttributeValue(attribute string) []byte {
|
||||
values := e.GetRawAttributeValues(attribute)
|
||||
if len(values) == 0 {
|
||||
return []byte{}
|
||||
}
|
||||
return values[0]
|
||||
}
|
||||
|
||||
// Print outputs a human-readable description
|
||||
func (e *Entry) Print() {
|
||||
fmt.Printf("DN: %s\n", e.DN)
|
||||
for _, attr := range e.Attributes {
|
||||
attr.Print()
|
||||
}
|
||||
}
|
||||
|
||||
// PrettyPrint outputs a human-readable description indenting
|
||||
func (e *Entry) PrettyPrint(indent int) {
|
||||
fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN)
|
||||
for _, attr := range e.Attributes {
|
||||
attr.PrettyPrint(indent + 2)
|
||||
}
|
||||
}
|
||||
|
||||
// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair
|
||||
func NewEntryAttribute(name string, values []string) *EntryAttribute {
|
||||
var bytes [][]byte
|
||||
for _, value := range values {
|
||||
bytes = append(bytes, []byte(value))
|
||||
}
|
||||
return &EntryAttribute{
|
||||
Name: name,
|
||||
Values: values,
|
||||
ByteValues: bytes,
|
||||
}
|
||||
}
|
||||
|
||||
// EntryAttribute holds a single attribute
|
||||
type EntryAttribute struct {
|
||||
// Name is the name of the attribute
|
||||
Name string
|
||||
// Values contain the string values of the attribute
|
||||
Values []string
|
||||
// ByteValues contain the raw values of the attribute
|
||||
ByteValues [][]byte
|
||||
}
|
||||
|
||||
// Print outputs a human-readable description
|
||||
func (e *EntryAttribute) Print() {
|
||||
fmt.Printf("%s: %s\n", e.Name, e.Values)
|
||||
}
|
||||
|
||||
// PrettyPrint outputs a human-readable description with indenting
|
||||
func (e *EntryAttribute) PrettyPrint(indent int) {
|
||||
fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values)
|
||||
}
|
||||
|
||||
// SearchResult holds the server's response to a search request
|
||||
type SearchResult struct {
|
||||
// Entries are the returned entries
|
||||
Entries []*Entry
|
||||
// Referrals are the returned referrals
|
||||
Referrals []string
|
||||
// Controls are the returned controls
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// Print outputs a human-readable description
|
||||
func (s *SearchResult) Print() {
|
||||
for _, entry := range s.Entries {
|
||||
entry.Print()
|
||||
}
|
||||
}
|
||||
|
||||
// PrettyPrint outputs a human-readable description with indenting
|
||||
func (s *SearchResult) PrettyPrint(indent int) {
|
||||
for _, entry := range s.Entries {
|
||||
entry.PrettyPrint(indent)
|
||||
}
|
||||
}
|
||||
|
||||
// SearchRequest represents a search request to send to the server
|
||||
type SearchRequest struct {
|
||||
BaseDN string
|
||||
Scope int
|
||||
DerefAliases int
|
||||
SizeLimit int
|
||||
TimeLimit int
|
||||
TypesOnly bool
|
||||
Filter string
|
||||
Attributes []string
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
func (req *SearchRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request")
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.BaseDN, "Base DN"))
|
||||
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.Scope), "Scope"))
|
||||
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.DerefAliases), "Deref Aliases"))
|
||||
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.SizeLimit), "Size Limit"))
|
||||
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.TimeLimit), "Time Limit"))
|
||||
pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.TypesOnly, "Types Only"))
|
||||
// compile and encode filter
|
||||
filterPacket, err := CompileFilter(req.Filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkt.AppendChild(filterPacket)
|
||||
// encode attributes
|
||||
attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
|
||||
for _, attribute := range req.Attributes {
|
||||
attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
|
||||
}
|
||||
pkt.AppendChild(attributesPacket)
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewSearchRequest creates a new search request
|
||||
func NewSearchRequest(
|
||||
BaseDN string,
|
||||
Scope, DerefAliases, SizeLimit, TimeLimit int,
|
||||
TypesOnly bool,
|
||||
Filter string,
|
||||
Attributes []string,
|
||||
Controls []Control,
|
||||
) *SearchRequest {
|
||||
return &SearchRequest{
|
||||
BaseDN: BaseDN,
|
||||
Scope: Scope,
|
||||
DerefAliases: DerefAliases,
|
||||
SizeLimit: SizeLimit,
|
||||
TimeLimit: TimeLimit,
|
||||
TypesOnly: TypesOnly,
|
||||
Filter: Filter,
|
||||
Attributes: Attributes,
|
||||
Controls: Controls,
|
||||
}
|
||||
}
|
||||
|
||||
// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the
|
||||
// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically.
|
||||
// The following four cases are possible given the arguments:
|
||||
// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size
|
||||
// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries
|
||||
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request
|
||||
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries
|
||||
// A requested pagingSize of 0 is interpreted as no limit by LDAP servers.
|
||||
func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) {
|
||||
var pagingControl *ControlPaging
|
||||
|
||||
control := FindControl(searchRequest.Controls, ControlTypePaging)
|
||||
if control == nil {
|
||||
pagingControl = NewControlPaging(pagingSize)
|
||||
searchRequest.Controls = append(searchRequest.Controls, pagingControl)
|
||||
} else {
|
||||
castControl, ok := control.(*ControlPaging)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected paging control to be of type *ControlPaging, got %v", control)
|
||||
}
|
||||
if castControl.PagingSize != pagingSize {
|
||||
return nil, fmt.Errorf("paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize)
|
||||
}
|
||||
pagingControl = castControl
|
||||
}
|
||||
|
||||
searchResult := new(SearchResult)
|
||||
for {
|
||||
result, err := l.Search(searchRequest)
|
||||
l.Debug.Printf("Looking for Paging Control...")
|
||||
if err != nil {
|
||||
return searchResult, err
|
||||
}
|
||||
if result == nil {
|
||||
return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received"))
|
||||
}
|
||||
|
||||
for _, entry := range result.Entries {
|
||||
searchResult.Entries = append(searchResult.Entries, entry)
|
||||
}
|
||||
for _, referral := range result.Referrals {
|
||||
searchResult.Referrals = append(searchResult.Referrals, referral)
|
||||
}
|
||||
for _, control := range result.Controls {
|
||||
searchResult.Controls = append(searchResult.Controls, control)
|
||||
}
|
||||
|
||||
l.Debug.Printf("Looking for Paging Control...")
|
||||
pagingResult := FindControl(result.Controls, ControlTypePaging)
|
||||
if pagingResult == nil {
|
||||
pagingControl = nil
|
||||
l.Debug.Printf("Could not find paging control. Breaking...")
|
||||
break
|
||||
}
|
||||
|
||||
cookie := pagingResult.(*ControlPaging).Cookie
|
||||
if len(cookie) == 0 {
|
||||
pagingControl = nil
|
||||
l.Debug.Printf("Could not find cookie. Breaking...")
|
||||
break
|
||||
}
|
||||
pagingControl.SetCookie(cookie)
|
||||
}
|
||||
|
||||
if pagingControl != nil {
|
||||
l.Debug.Printf("Abandoning Paging...")
|
||||
pagingControl.PagingSize = 0
|
||||
l.Search(searchRequest)
|
||||
}
|
||||
|
||||
return searchResult, nil
|
||||
}
|
||||
|
||||
// Search performs the given search request
|
||||
func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) {
|
||||
msgCtx, err := l.doRequest(searchRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
result := &SearchResult{
|
||||
Entries: make([]*Entry, 0),
|
||||
Referrals: make([]string, 0),
|
||||
Controls: make([]Control, 0)}
|
||||
|
||||
for {
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch packet.Children[1].Tag {
|
||||
case 4:
|
||||
entry := new(Entry)
|
||||
entry.DN = packet.Children[1].Children[0].Value.(string)
|
||||
for _, child := range packet.Children[1].Children[1].Children {
|
||||
attr := new(EntryAttribute)
|
||||
attr.Name = child.Children[0].Value.(string)
|
||||
for _, value := range child.Children[1].Children {
|
||||
attr.Values = append(attr.Values, value.Value.(string))
|
||||
attr.ByteValues = append(attr.ByteValues, value.ByteValue)
|
||||
}
|
||||
entry.Attributes = append(entry.Attributes, attr)
|
||||
}
|
||||
result.Entries = append(result.Entries, entry)
|
||||
case 5:
|
||||
err := GetLDAPError(packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(packet.Children) == 3 {
|
||||
for _, child := range packet.Children[2].Children {
|
||||
decodedChild, err := DecodeControl(child)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode child control: %s", err)
|
||||
}
|
||||
result.Controls = append(result.Controls, decodedChild)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
case 19:
|
||||
result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string))
|
||||
}
|
||||
}
|
||||
}
|
19
vendor/github.com/goshuirc/e-nfa/.travis.yml
generated
vendored
Normal file
19
vendor/github.com/goshuirc/e-nfa/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- tip
|
||||
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get golang.org/x/tools/cmd/vet
|
||||
- go get golang.org/x/tools/cmd/goimports
|
||||
- go get github.com/golang/lint/golint
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- go vet ./...
|
||||
# - $HOME/gopath/bin/goveralls -coverprofile=coverage.cov -service=travis-ci
|
||||
# - bash <(curl -s https://codecov.io/bash)
|
||||
- go test -bench=. -benchmem ./...
|
||||
#- sh ./install_all_cmd.sh
|
122
vendor/github.com/goshuirc/e-nfa/README.md
generated
vendored
Normal file
122
vendor/github.com/goshuirc/e-nfa/README.md
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
ε-NFA: Epsilon-Nondeterministic finite automaton
|
||||
==============
|
||||
|
||||
[![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://raw.githubusercontent.com/kkdai/e-nfa/master/LICENSE) [![GoDoc](https://godoc.org/github.com/kkdai/e-nfa?status.svg)](https://godoc.org/github.com/kkdai/e-nfa) [![Build Status](https://travis-ci.org/kkdai/e-nfa.svg?branch=master)](https://travis-ci.org/kkdai/e-nfa)
|
||||
|
||||
|
||||
|
||||
![image](https://upload.wikimedia.org/wikipedia/commons/thumb/0/0e/NFAexample.svg/250px-NFAexample.svg.png)
|
||||
|
||||
|
||||
|
||||
What is Epsilon-Nondeterministic finite automaton
|
||||
=============
|
||||
|
||||
`ε-NFA`: Epsilon-Nondeterministic finite automaton (so call:Nondeterministic finite automaton with ε-moves)
|
||||
|
||||
In the automata theory, a nondeterministic finite automaton with ε-moves (NFA-ε)(also known as NFA-λ) is an extension of nondeterministic finite automaton(NFA), which allows a transformation to a new state without consuming any input symbols. The transitions without consuming an input symbol are called ε-transitions or λ-transitions. In the state diagrams, they are usually labeled with the Greek letter ε or λ.
|
||||
|
||||
(sited from [here](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton))
|
||||
|
||||
|
||||
Looking for DFA implement?
|
||||
=============
|
||||
|
||||
I also write a DFA implenent in Go here. [https://github.com/kkdai/dfa](https://github.com/kkdai/dfa)
|
||||
|
||||
Looking for NFA implement?
|
||||
=============
|
||||
|
||||
I also write a NFA implenent in Go here. [https://github.com/kkdai/nfa](https://github.com/kkdai/nfa)
|
||||
|
||||
|
||||
Installation and Usage
|
||||
=============
|
||||
|
||||
|
||||
Install
|
||||
---------------
|
||||
|
||||
go get github.com/kkdai/e-nfa
|
||||
|
||||
|
||||
|
||||
Usage
|
||||
---------------
|
||||
|
||||
Following is sample code to implement a epsilon-NFA automata diagram as follow:
|
||||
|
||||
![image](image/eNFA.png)
|
||||
|
||||
|
||||
|
||||
```go
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/kkdai/enfa"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
nfa := NewENFA(0, false)
|
||||
nfa.AddState(1, false)
|
||||
nfa.AddState(2, false)
|
||||
nfa.AddState(3, true)
|
||||
nfa.AddState(4, false)
|
||||
nfa.AddState(5, false)
|
||||
|
||||
nfa.AddTransition(0, "1", 1)
|
||||
nfa.AddTransition(0, "0", 4)
|
||||
|
||||
nfa.AddTransition(1, "1", 2)
|
||||
nfa.AddTransition(1, "", 3) //epsilon
|
||||
nfa.AddTransition(2, "1", 3)
|
||||
nfa.AddTransition(4, "0", 5)
|
||||
nfa.AddTransition(4, "", 1, 2) //E -> epsilon B C
|
||||
nfa.AddTransition(5, "0", 3)
|
||||
|
||||
nfa.PrintTransitionTable()
|
||||
|
||||
if !nfa.VerifyInputs([]string{"1"}) {
|
||||
fmt.Printf("Verify inputs is failed")
|
||||
}
|
||||
|
||||
nfa.Reset()
|
||||
|
||||
if !nfa.VerifyInputs([]string{"1", "1", "1"}) {
|
||||
fmt.Printf("Verify inputs is failed")
|
||||
}
|
||||
|
||||
nfa.Reset()
|
||||
|
||||
if !nfa.VerifyInputs([]string{"0", "1"}) {
|
||||
fmt.Printf"Verify inputs is failed")
|
||||
}
|
||||
|
||||
nfa.Reset()
|
||||
if !nfa.VerifyInputs([]string{"0", "0", "0"}) {
|
||||
fmt.Printf("Verify inputs is failed")
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Inspired By
|
||||
=============
|
||||
|
||||
- [ε-NFA: Wiki](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton_with_%CE%B5-moves)
|
||||
- [Coursera: Automata](https://class.coursera.org/automata-004/)
|
||||
|
||||
Project52
|
||||
---------------
|
||||
|
||||
It is one of my [project 52](https://github.com/kkdai/project52).
|
||||
|
||||
|
||||
License
|
||||
---------------
|
||||
|
||||
This package is licensed under MIT license. See LICENSE for details.
|
185
vendor/github.com/goshuirc/e-nfa/enfa.go
generated
vendored
Normal file
185
vendor/github.com/goshuirc/e-nfa/enfa.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
package enfa
|
||||
|
||||
import "fmt"
|
||||
|
||||
type transitionInput struct {
|
||||
srcState int
|
||||
input string
|
||||
}
|
||||
|
||||
type destState map[int]bool
|
||||
|
||||
type ENFA struct {
|
||||
initState int
|
||||
currentState map[int]bool
|
||||
totalStates []int
|
||||
finalStates []int
|
||||
transition map[transitionInput]destState
|
||||
inputMap map[string]bool
|
||||
}
|
||||
|
||||
//New a new NFA
|
||||
func NewENFA(initState int, isFinal bool) *ENFA {
|
||||
|
||||
retNFA := &ENFA{
|
||||
transition: make(map[transitionInput]destState),
|
||||
inputMap: make(map[string]bool),
|
||||
initState: initState}
|
||||
|
||||
retNFA.currentState = make(map[int]bool)
|
||||
retNFA.currentState[initState] = true
|
||||
retNFA.AddState(initState, isFinal)
|
||||
return retNFA
|
||||
}
|
||||
|
||||
//Add new state in this NFA
|
||||
func (d *ENFA) AddState(state int, isFinal bool) {
|
||||
if state == -1 {
|
||||
fmt.Println("Cannot add state as -1, it is dead state")
|
||||
return
|
||||
}
|
||||
|
||||
d.totalStates = append(d.totalStates, state)
|
||||
if isFinal {
|
||||
d.finalStates = append(d.finalStates, state)
|
||||
}
|
||||
}
|
||||
|
||||
//Add new transition function into NFA
|
||||
func (d *ENFA) AddTransition(srcState int, input string, dstStateList ...int) {
|
||||
find := false
|
||||
|
||||
//find input if exist in NFA input List
|
||||
if _, ok := d.inputMap[input]; !ok {
|
||||
//not exist, new input in this NFA
|
||||
d.inputMap[input] = true
|
||||
}
|
||||
|
||||
for _, v := range d.totalStates {
|
||||
if v == srcState {
|
||||
find = true
|
||||
}
|
||||
}
|
||||
|
||||
if !find {
|
||||
fmt.Println("No such state:", srcState, " in current NFA")
|
||||
return
|
||||
}
|
||||
|
||||
dstMap := make(map[int]bool)
|
||||
for _, destState := range dstStateList {
|
||||
dstMap[destState] = true
|
||||
}
|
||||
|
||||
targetTrans := transitionInput{srcState: srcState, input: input}
|
||||
d.transition[targetTrans] = dstMap
|
||||
}
|
||||
|
||||
func (d *ENFA) CheckPathExist(src int, input string, dst int) bool {
|
||||
retMap, _ := d.transition[transitionInput{srcState: src, input: input}]
|
||||
if _, ok := retMap[dst]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *ENFA) Input(testInput string) []int {
|
||||
updateCurrentState := make(map[int]bool)
|
||||
for current, _ := range d.currentState {
|
||||
for _, realTestInput := range []string{testInput, "*", "?"} {
|
||||
intputTrans := transitionInput{srcState: current, input: realTestInput}
|
||||
valMap, ok := d.transition[intputTrans]
|
||||
if ok {
|
||||
for dst, _ := range valMap {
|
||||
updateCurrentState[dst] = true
|
||||
|
||||
//Update epsilon input way... if exist
|
||||
epsilonTrans := transitionInput{srcState: dst}
|
||||
if eMap, ok := d.transition[epsilonTrans]; ok {
|
||||
for eDst, _ := range eMap {
|
||||
updateCurrentState[eDst] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
//dead state, remove in current state
|
||||
//do nothing.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//update curret state
|
||||
d.currentState = updateCurrentState
|
||||
|
||||
//return result
|
||||
var ret []int
|
||||
for state, _ := range updateCurrentState {
|
||||
ret = append(ret, state)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
//To verify current state if it is final state
|
||||
func (d *ENFA) Verify() bool {
|
||||
for _, val := range d.finalStates {
|
||||
for cState, _ := range d.currentState {
|
||||
if val == cState {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//Reset NFA state to initilize state, but all state and transition function will remain
|
||||
func (d *ENFA) Reset() {
|
||||
initState := make(map[int]bool)
|
||||
initState[d.initState] = true
|
||||
d.currentState = initState
|
||||
}
|
||||
|
||||
//Verify if list of input could be accept by NFA or not
|
||||
func (d *ENFA) VerifyInputs(inputs []string) bool {
|
||||
for _, v := range inputs {
|
||||
d.Input(v)
|
||||
}
|
||||
return d.Verify()
|
||||
}
|
||||
|
||||
//To print detail transition table contain of current NFA
|
||||
func (d *ENFA) PrintTransitionTable() {
|
||||
fmt.Println("===================================================")
|
||||
//list all inputs
|
||||
var inputList []string
|
||||
for key, _ := range d.inputMap {
|
||||
if key == "" {
|
||||
fmt.Printf("\tε|")
|
||||
} else {
|
||||
fmt.Printf("\t%s|", key)
|
||||
}
|
||||
inputList = append(inputList, key)
|
||||
}
|
||||
|
||||
fmt.Printf("\n")
|
||||
fmt.Println("---------------------------------------------------")
|
||||
|
||||
for _, state := range d.totalStates {
|
||||
fmt.Printf("%d |", state)
|
||||
for _, key := range inputList {
|
||||
checkInput := transitionInput{srcState: state, input: key}
|
||||
if dstState, ok := d.transition[checkInput]; ok {
|
||||
fmt.Printf("\t")
|
||||
for val, _ := range dstState {
|
||||
fmt.Printf("%d,", val)
|
||||
}
|
||||
fmt.Printf("|")
|
||||
} else {
|
||||
fmt.Printf("\tNA|")
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
|
||||
fmt.Println("---------------------------------------------------")
|
||||
fmt.Println("===================================================")
|
||||
}
|
13
vendor/github.com/goshuirc/irc-go/LICENSE
generated
vendored
Normal file
13
vendor/github.com/goshuirc/irc-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
Copyright (c) 2016-2017 Daniel Oaks
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THIS SOFTWARE.
|
86
vendor/github.com/goshuirc/irc-go/ircfmt/doc.go
generated
vendored
Normal file
86
vendor/github.com/goshuirc/irc-go/ircfmt/doc.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
// written by Daniel Oaks <daniel@danieloaks.net>
|
||||
// released under the ISC license
|
||||
|
||||
/*
|
||||
Package ircfmt handles IRC formatting codes, escaping and unescaping.
|
||||
|
||||
This allows for a simpler representation of strings that contain colour codes,
|
||||
bold codes, and such, without having to write and handle raw bytes when
|
||||
assembling outgoing messages.
|
||||
|
||||
This lets you turn raw IRC messages into our escaped versions, and turn escaped
|
||||
versions back into raw messages suitable for sending on IRC connections. This
|
||||
is designed to be used on things like PRIVMSG / NOTICE commands, MOTD blocks,
|
||||
and such.
|
||||
|
||||
The escape character we use in this library is the dollar sign ("$"), along
|
||||
with the given escape characters:
|
||||
|
||||
--------------------------------
|
||||
Name | Escape | Raw
|
||||
--------------------------------
|
||||
Dollarsign | $$ | $
|
||||
Bold | $b | 0x02
|
||||
Colour | $c | 0x03
|
||||
Monospace | $m | 0x11
|
||||
Reverse Colour | $v | 0x16
|
||||
Italic | $i | 0x1d
|
||||
Strikethrough | $s | 0x1e
|
||||
Underscore | $u | 0x1f
|
||||
Reset | $r | 0x0f
|
||||
--------------------------------
|
||||
|
||||
Colours are escaped in a slightly different way, using the actual names of them
|
||||
rather than just the raw numbers.
|
||||
|
||||
In our escaped format, the colours for the fore and background are contained in
|
||||
square brackets after the colour ("$c") escape. For example:
|
||||
|
||||
Red foreground:
|
||||
Escaped: This is a $c[red]cool message!
|
||||
Raw: This is a 0x034cool message!
|
||||
|
||||
Blue foreground, green background:
|
||||
Escaped: This is a $c[blue,green]rad message!
|
||||
Raw: This is a 0x032,3rad message!
|
||||
|
||||
When assembling a raw message, we make sure to use the full colour code
|
||||
("02" vs just "2") when it could become confused due to numbers just after the
|
||||
colour escape code. For instance, lines like this will be unescaped correctly:
|
||||
|
||||
No number after colour escape:
|
||||
Escaped: This is a $c[red]cool message!
|
||||
Raw: This is a 0x034cool message!
|
||||
|
||||
Number after colour escape:
|
||||
Escaped: This is $c[blue]20% cooler!
|
||||
Raw: This is 0x030220% cooler
|
||||
|
||||
Here are the colour names and codes we recognise:
|
||||
|
||||
--------------------
|
||||
Code | Name
|
||||
--------------------
|
||||
00 | white
|
||||
01 | black
|
||||
02 | blue
|
||||
03 | green
|
||||
04 | red
|
||||
05 | brown
|
||||
06 | magenta
|
||||
07 | orange
|
||||
08 | yellow
|
||||
09 | light green
|
||||
10 | cyan
|
||||
11 | light cyan
|
||||
12 | light blue
|
||||
13 | pink
|
||||
14 | grey
|
||||
15 | light grey
|
||||
99 | default
|
||||
--------------------
|
||||
|
||||
These other colours aren't given names:
|
||||
https://modern.ircdocs.horse/formatting.html#colors-16-98
|
||||
*/
|
||||
package ircfmt
|
330
vendor/github.com/goshuirc/irc-go/ircfmt/ircfmt.go
generated
vendored
Normal file
330
vendor/github.com/goshuirc/irc-go/ircfmt/ircfmt.go
generated
vendored
Normal file
@ -0,0 +1,330 @@
|
||||
// written by Daniel Oaks <daniel@danieloaks.net>
|
||||
// released under the ISC license
|
||||
|
||||
package ircfmt
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// raw bytes and strings to do replacing with
|
||||
bold string = "\x02"
|
||||
colour string = "\x03"
|
||||
monospace string = "\x11"
|
||||
reverseColour string = "\x16"
|
||||
italic string = "\x1d"
|
||||
strikethrough string = "\x1e"
|
||||
underline string = "\x1f"
|
||||
reset string = "\x0f"
|
||||
|
||||
runecolour rune = '\x03'
|
||||
runebold rune = '\x02'
|
||||
runemonospace rune = '\x11'
|
||||
runereverseColour rune = '\x16'
|
||||
runeitalic rune = '\x1d'
|
||||
runestrikethrough rune = '\x1e'
|
||||
runereset rune = '\x0f'
|
||||
runeunderline rune = '\x1f'
|
||||
|
||||
// valid characters in a colour code character, for speed
|
||||
colours1 string = "0123456789"
|
||||
)
|
||||
|
||||
var (
|
||||
// valtoescape replaces most of IRC characters with our escapes.
|
||||
valtoescape = strings.NewReplacer("$", "$$", colour, "$c", reverseColour, "$v", bold, "$b", italic, "$i", strikethrough, "$s", underline, "$u", monospace, "$m", reset, "$r")
|
||||
// valToStrip replaces most of the IRC characters with nothing
|
||||
valToStrip = strings.NewReplacer(colour, "$c", reverseColour, "", bold, "", italic, "", strikethrough, "", underline, "", monospace, "", reset, "")
|
||||
|
||||
// escapetoval contains most of our escapes and how they map to real IRC characters.
|
||||
// intentionally skips colour, since that's handled elsewhere.
|
||||
escapetoval = map[rune]string{
|
||||
'$': "$",
|
||||
'b': bold,
|
||||
'i': italic,
|
||||
'v': reverseColour,
|
||||
's': strikethrough,
|
||||
'u': underline,
|
||||
'm': monospace,
|
||||
'r': reset,
|
||||
}
|
||||
|
||||
// valid colour codes
|
||||
numtocolour = map[string]string{
|
||||
"99": "default",
|
||||
"15": "light grey",
|
||||
"14": "grey",
|
||||
"13": "pink",
|
||||
"12": "light blue",
|
||||
"11": "light cyan",
|
||||
"10": "cyan",
|
||||
"09": "light green",
|
||||
"08": "yellow",
|
||||
"07": "orange",
|
||||
"06": "magenta",
|
||||
"05": "brown",
|
||||
"04": "red",
|
||||
"03": "green",
|
||||
"02": "blue",
|
||||
"01": "black",
|
||||
"00": "white",
|
||||
"9": "light green",
|
||||
"8": "yellow",
|
||||
"7": "orange",
|
||||
"6": "magenta",
|
||||
"5": "brown",
|
||||
"4": "red",
|
||||
"3": "green",
|
||||
"2": "blue",
|
||||
"1": "black",
|
||||
"0": "white",
|
||||
}
|
||||
|
||||
// full and truncated colour codes
|
||||
colourcodesFull = map[string]string{
|
||||
"white": "00",
|
||||
"black": "01",
|
||||
"blue": "02",
|
||||
"green": "03",
|
||||
"red": "04",
|
||||
"brown": "05",
|
||||
"magenta": "06",
|
||||
"orange": "07",
|
||||
"yellow": "08",
|
||||
"light green": "09",
|
||||
"cyan": "10",
|
||||
"light cyan": "11",
|
||||
"light blue": "12",
|
||||
"pink": "13",
|
||||
"grey": "14",
|
||||
"light grey": "15",
|
||||
"default": "99",
|
||||
}
|
||||
colourcodesTruncated = map[string]string{
|
||||
"white": "0",
|
||||
"black": "1",
|
||||
"blue": "2",
|
||||
"green": "3",
|
||||
"red": "4",
|
||||
"brown": "5",
|
||||
"magenta": "6",
|
||||
"orange": "7",
|
||||
"yellow": "8",
|
||||
"light green": "9",
|
||||
"cyan": "10",
|
||||
"light cyan": "11",
|
||||
"light blue": "12",
|
||||
"pink": "13",
|
||||
"grey": "14",
|
||||
"light grey": "15",
|
||||
"default": "99",
|
||||
}
|
||||
)
|
||||
|
||||
// Escape takes a raw IRC string and returns it with our escapes.
|
||||
//
|
||||
// IE, it turns this: "This is a \x02cool\x02, \x034red\x0f message!"
|
||||
// into: "This is a $bcool$b, $c[red]red$r message!"
|
||||
func Escape(in string) string {
|
||||
// replace all our usual escapes
|
||||
in = valtoescape.Replace(in)
|
||||
|
||||
inRunes := []rune(in)
|
||||
//var out string
|
||||
out := strings.Builder{}
|
||||
for 0 < len(inRunes) {
|
||||
if 1 < len(inRunes) && inRunes[0] == '$' && inRunes[1] == 'c' {
|
||||
// handle colours
|
||||
out.WriteString("$c")
|
||||
inRunes = inRunes[2:] // strip colour code chars
|
||||
|
||||
if len(inRunes) < 1 || !strings.Contains(colours1, string(inRunes[0])) {
|
||||
out.WriteString("[]")
|
||||
continue
|
||||
}
|
||||
|
||||
var foreBuffer, backBuffer string
|
||||
foreBuffer += string(inRunes[0])
|
||||
inRunes = inRunes[1:]
|
||||
if 0 < len(inRunes) && strings.Contains(colours1, string(inRunes[0])) {
|
||||
foreBuffer += string(inRunes[0])
|
||||
inRunes = inRunes[1:]
|
||||
}
|
||||
if 1 < len(inRunes) && inRunes[0] == ',' && strings.Contains(colours1, string(inRunes[1])) {
|
||||
backBuffer += string(inRunes[1])
|
||||
inRunes = inRunes[2:]
|
||||
if 0 < len(inRunes) && strings.Contains(colours1, string(inRunes[0])) {
|
||||
backBuffer += string(inRunes[0])
|
||||
inRunes = inRunes[1:]
|
||||
}
|
||||
}
|
||||
|
||||
foreName, exists := numtocolour[foreBuffer]
|
||||
if !exists {
|
||||
foreName = foreBuffer
|
||||
}
|
||||
backName, exists := numtocolour[backBuffer]
|
||||
if !exists {
|
||||
backName = backBuffer
|
||||
}
|
||||
|
||||
out.WriteRune('[')
|
||||
out.WriteString(foreName)
|
||||
if backName != "" {
|
||||
out.WriteRune(',')
|
||||
out.WriteString(backName)
|
||||
}
|
||||
out.WriteRune(']')
|
||||
|
||||
} else {
|
||||
// special case for $$c
|
||||
if len(inRunes) > 2 && inRunes[0] == '$' && inRunes[1] == '$' && inRunes[2] == 'c' {
|
||||
out.WriteRune(inRunes[0])
|
||||
out.WriteRune(inRunes[1])
|
||||
out.WriteRune(inRunes[2])
|
||||
inRunes = inRunes[3:]
|
||||
} else {
|
||||
out.WriteRune(inRunes[0])
|
||||
inRunes = inRunes[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return out.String()
|
||||
}
|
||||
|
||||
// Strip takes a raw IRC string and removes it with all formatting codes removed
|
||||
// IE, it turns this: "This is a \x02cool\x02, \x034red\x0f message!"
|
||||
// into: "This is a cool, red message!"
|
||||
func Strip(in string) string {
|
||||
out := strings.Builder{}
|
||||
runes := []rune(in)
|
||||
if out.Len() < len(runes) { // Reduce allocations where needed
|
||||
out.Grow(len(in) - out.Len())
|
||||
}
|
||||
for len(runes) > 0 {
|
||||
switch runes[0] {
|
||||
case runebold, runemonospace, runereverseColour, runeitalic, runestrikethrough, runeunderline, runereset:
|
||||
runes = runes[1:]
|
||||
case runecolour:
|
||||
runes = removeColour(runes)
|
||||
default:
|
||||
out.WriteRune(runes[0])
|
||||
runes = runes[1:]
|
||||
}
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func removeNumber(runes []rune) []rune {
|
||||
if len(runes) > 0 && runes[0] >= '0' && runes[0] <= '9' {
|
||||
runes = runes[1:]
|
||||
}
|
||||
return runes
|
||||
}
|
||||
|
||||
func removeColour(runes []rune) []rune {
|
||||
if runes[0] != runecolour {
|
||||
return runes
|
||||
}
|
||||
|
||||
runes = runes[1:]
|
||||
runes = removeNumber(runes)
|
||||
runes = removeNumber(runes)
|
||||
|
||||
if len(runes) > 1 && runes[0] == ',' && runes[1] >= '0' && runes[1] <= '9' {
|
||||
runes = runes[2:]
|
||||
} else {
|
||||
return runes // Nothing else because we dont have a comma
|
||||
}
|
||||
runes = removeNumber(runes)
|
||||
return runes
|
||||
}
|
||||
|
||||
// Unescape takes our escaped string and returns a raw IRC string.
|
||||
//
|
||||
// IE, it turns this: "This is a $bcool$b, $c[red]red$r message!"
|
||||
// into this: "This is a \x02cool\x02, \x034red\x0f message!"
|
||||
func Unescape(in string) string {
|
||||
out := strings.Builder{}
|
||||
|
||||
remaining := []rune(in)
|
||||
for 0 < len(remaining) {
|
||||
char := remaining[0]
|
||||
remaining = remaining[1:]
|
||||
|
||||
if char == '$' && 0 < len(remaining) {
|
||||
char = remaining[0]
|
||||
remaining = remaining[1:]
|
||||
|
||||
val, exists := escapetoval[char]
|
||||
if exists {
|
||||
out.WriteString(val)
|
||||
} else if char == 'c' {
|
||||
out.WriteString(colour)
|
||||
|
||||
if len(remaining) < 2 || remaining[0] != '[' {
|
||||
continue
|
||||
}
|
||||
|
||||
// get colour names
|
||||
var coloursBuffer string
|
||||
remaining = remaining[1:]
|
||||
for remaining[0] != ']' {
|
||||
coloursBuffer += string(remaining[0])
|
||||
remaining = remaining[1:]
|
||||
}
|
||||
remaining = remaining[1:] // strip final ']'
|
||||
|
||||
colours := strings.Split(coloursBuffer, ",")
|
||||
var foreColour, backColour string
|
||||
foreColour = colours[0]
|
||||
if 1 < len(colours) {
|
||||
backColour = colours[1]
|
||||
}
|
||||
|
||||
// decide whether we can use truncated colour codes
|
||||
canUseTruncated := len(remaining) < 1 || !strings.Contains(colours1, string(remaining[0]))
|
||||
|
||||
// turn colour names into real codes
|
||||
var foreColourCode, backColourCode string
|
||||
var exists bool
|
||||
|
||||
if backColour != "" || canUseTruncated {
|
||||
foreColourCode, exists = colourcodesTruncated[foreColour]
|
||||
} else {
|
||||
foreColourCode, exists = colourcodesFull[foreColour]
|
||||
}
|
||||
if exists {
|
||||
foreColour = foreColourCode
|
||||
}
|
||||
|
||||
if backColour != "" {
|
||||
if canUseTruncated {
|
||||
backColourCode, exists = colourcodesTruncated[backColour]
|
||||
} else {
|
||||
backColourCode, exists = colourcodesFull[backColour]
|
||||
}
|
||||
if exists {
|
||||
backColour = backColourCode
|
||||
}
|
||||
}
|
||||
|
||||
// output colour codes
|
||||
out.WriteString(foreColour)
|
||||
if backColour != "" {
|
||||
out.WriteRune(',')
|
||||
out.WriteString(backColour)
|
||||
}
|
||||
} else {
|
||||
// unknown char
|
||||
out.WriteRune(char)
|
||||
}
|
||||
} else {
|
||||
out.WriteRune(char)
|
||||
}
|
||||
}
|
||||
|
||||
return out.String()
|
||||
}
|
7
vendor/github.com/goshuirc/irc-go/ircmatch/doc.go
generated
vendored
Normal file
7
vendor/github.com/goshuirc/irc-go/ircmatch/doc.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// written by Daniel Oaks <daniel@danieloaks.net>
|
||||
// released under the ISC license
|
||||
|
||||
/*
|
||||
Package ircmatch handles matching IRC strings with the traditional glob-like syntax.
|
||||
*/
|
||||
package ircmatch
|
57
vendor/github.com/goshuirc/irc-go/ircmatch/ircmatch.go
generated
vendored
Normal file
57
vendor/github.com/goshuirc/irc-go/ircmatch/ircmatch.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
package ircmatch
|
||||
|
||||
import enfa "github.com/goshuirc/e-nfa"
|
||||
|
||||
// Matcher represents an object that can match IRC strings.
|
||||
type Matcher struct {
|
||||
internalENFA *enfa.ENFA
|
||||
}
|
||||
|
||||
// MakeMatch creates a Matcher.
|
||||
func MakeMatch(globTemplate string) Matcher {
|
||||
var newmatch Matcher
|
||||
|
||||
// assemble internal enfa
|
||||
newmatch.internalENFA = enfa.NewENFA(0, false)
|
||||
|
||||
var currentState int
|
||||
var lastWasStar bool
|
||||
for _, char := range globTemplate {
|
||||
if char == '*' {
|
||||
if lastWasStar {
|
||||
continue
|
||||
}
|
||||
newmatch.internalENFA.AddTransition(currentState, "*", currentState)
|
||||
lastWasStar = true
|
||||
continue
|
||||
} else if char == '?' {
|
||||
newmatch.internalENFA.AddState(currentState+1, false)
|
||||
newmatch.internalENFA.AddTransition(currentState, "?", currentState+1)
|
||||
currentState++
|
||||
} else {
|
||||
newmatch.internalENFA.AddState(currentState+1, false)
|
||||
newmatch.internalENFA.AddTransition(currentState, string(char), currentState+1)
|
||||
currentState++
|
||||
}
|
||||
|
||||
lastWasStar = false
|
||||
}
|
||||
|
||||
// create end state
|
||||
newmatch.internalENFA.AddState(currentState+1, true)
|
||||
newmatch.internalENFA.AddTransition(currentState, "", currentState+1)
|
||||
|
||||
return newmatch
|
||||
}
|
||||
|
||||
// Match returns true if the given string matches this glob.
|
||||
func (menfa *Matcher) Match(search string) bool {
|
||||
var searchChars []string
|
||||
for _, char := range search {
|
||||
searchChars = append(searchChars, string(char))
|
||||
}
|
||||
|
||||
isMatch := menfa.internalENFA.VerifyInputs(searchChars)
|
||||
menfa.internalENFA.Reset()
|
||||
return isMatch
|
||||
}
|
7
vendor/github.com/goshuirc/irc-go/ircmsg/doc.go
generated
vendored
Normal file
7
vendor/github.com/goshuirc/irc-go/ircmsg/doc.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// written by Daniel Oaks <daniel@danieloaks.net>
|
||||
// released under the ISC license
|
||||
|
||||
/*
|
||||
Package ircmsg helps parse and create lines for IRC connections.
|
||||
*/
|
||||
package ircmsg
|
401
vendor/github.com/goshuirc/irc-go/ircmsg/message.go
generated
vendored
Normal file
401
vendor/github.com/goshuirc/irc-go/ircmsg/message.go
generated
vendored
Normal file
@ -0,0 +1,401 @@
|
||||
// Copyright (c) 2016-2019 Daniel Oaks <daniel@danieloaks.net>
|
||||
// Copyright (c) 2018-2019 Shivaram Lingamneni <slingamn@cs.stanford.edu>
|
||||
|
||||
// released under the ISC license
|
||||
|
||||
package ircmsg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// "The size limit for message tags is 8191 bytes, including the leading
|
||||
// '@' (0x40) and trailing space ' ' (0x20) characters."
|
||||
MaxlenTags = 8191
|
||||
|
||||
// MaxlenTags - ('@' + ' ')
|
||||
MaxlenTagData = MaxlenTags - 2
|
||||
|
||||
// "Clients MUST NOT send messages with tag data exceeding 4094 bytes,
|
||||
// this includes tags with or without the client-only prefix."
|
||||
MaxlenClientTagData = 4094
|
||||
|
||||
// "Servers MUST NOT add tag data exceeding 4094 bytes to messages."
|
||||
MaxlenServerTagData = 4094
|
||||
|
||||
// '@' + MaxlenClientTagData + ' '
|
||||
// this is the analogue of MaxlenTags when the source of the message is a client
|
||||
MaxlenTagsFromClient = MaxlenClientTagData + 2
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorLineIsEmpty indicates that the given IRC line was empty.
|
||||
ErrorLineIsEmpty = errors.New("Line is empty")
|
||||
// ErrorLineContainsBadChar indicates that the line contained invalid characters
|
||||
ErrorLineContainsBadChar = errors.New("Line contains invalid characters")
|
||||
// ErrorLineTooLong indicates that the message exceeded the maximum tag length
|
||||
// (the name references 417 ERR_INPUTTOOLONG; we reserve the right to return it
|
||||
// for messages that exceed the non-tag length limit)
|
||||
ErrorLineTooLong = errors.New("Line could not be parsed because a specified length limit was exceeded")
|
||||
|
||||
ErrorCommandMissing = errors.New("IRC messages MUST have a command")
|
||||
ErrorBadParam = errors.New("Cannot have an empty param, a param with spaces, or a param that starts with ':' before the last parameter")
|
||||
)
|
||||
|
||||
// IrcMessage represents an IRC message, as defined by the RFCs and as
|
||||
// extended by the IRCv3 Message Tags specification with the introduction
|
||||
// of message tags.
|
||||
type IrcMessage struct {
|
||||
Prefix string
|
||||
Command string
|
||||
Params []string
|
||||
tags map[string]string
|
||||
clientOnlyTags map[string]string
|
||||
}
|
||||
|
||||
// GetTag returns whether a tag is present, and if so, what its value is.
|
||||
func (msg *IrcMessage) GetTag(tagName string) (present bool, value string) {
|
||||
if len(tagName) == 0 {
|
||||
return
|
||||
} else if tagName[0] == '+' {
|
||||
value, present = msg.clientOnlyTags[tagName]
|
||||
return
|
||||
} else {
|
||||
value, present = msg.tags[tagName]
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// HasTag returns whether a tag is present.
|
||||
func (msg *IrcMessage) HasTag(tagName string) (present bool) {
|
||||
present, _ = msg.GetTag(tagName)
|
||||
return
|
||||
}
|
||||
|
||||
// SetTag sets a tag.
|
||||
func (msg *IrcMessage) SetTag(tagName, tagValue string) {
|
||||
if len(tagName) == 0 {
|
||||
return
|
||||
} else if tagName[0] == '+' {
|
||||
if msg.clientOnlyTags == nil {
|
||||
msg.clientOnlyTags = make(map[string]string)
|
||||
}
|
||||
msg.clientOnlyTags[tagName] = tagValue
|
||||
} else {
|
||||
if msg.tags == nil {
|
||||
msg.tags = make(map[string]string)
|
||||
}
|
||||
msg.tags[tagName] = tagValue
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteTag deletes a tag.
|
||||
func (msg *IrcMessage) DeleteTag(tagName string) {
|
||||
if len(tagName) == 0 {
|
||||
return
|
||||
} else if tagName[0] == '+' {
|
||||
delete(msg.clientOnlyTags, tagName)
|
||||
} else {
|
||||
delete(msg.tags, tagName)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateTags is a convenience to set multiple tags at once.
|
||||
func (msg *IrcMessage) UpdateTags(tags map[string]string) {
|
||||
for name, value := range tags {
|
||||
msg.SetTag(name, value)
|
||||
}
|
||||
}
|
||||
|
||||
// AllTags returns all tags as a single map.
|
||||
func (msg *IrcMessage) AllTags() (result map[string]string) {
|
||||
result = make(map[string]string, len(msg.tags)+len(msg.clientOnlyTags))
|
||||
for name, value := range msg.tags {
|
||||
result[name] = value
|
||||
}
|
||||
for name, value := range msg.clientOnlyTags {
|
||||
result[name] = value
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ClientOnlyTags returns the client-only tags (the tags with the + prefix).
|
||||
// The returned map may be internal storage of the IrcMessage object and
|
||||
// should not be modified.
|
||||
func (msg *IrcMessage) ClientOnlyTags() map[string]string {
|
||||
return msg.clientOnlyTags
|
||||
}
|
||||
|
||||
// ParseLine creates and returns a message from the given IRC line.
|
||||
func ParseLine(line string) (ircmsg IrcMessage, err error) {
|
||||
return parseLine(line, 0, 0)
|
||||
}
|
||||
|
||||
// ParseLineStrict creates and returns an IrcMessage from the given IRC line,
|
||||
// taking the maximum length into account and truncating the message as appropriate.
|
||||
// If fromClient is true, it enforces the client limit on tag data length (4094 bytes),
|
||||
// allowing the server to return ERR_INPUTTOOLONG as appropriate. If truncateLen is
|
||||
// nonzero, it is the length at which the non-tag portion of the message is truncated.
|
||||
func ParseLineStrict(line string, fromClient bool, truncateLen int) (ircmsg IrcMessage, err error) {
|
||||
maxTagDataLength := MaxlenTagData
|
||||
if fromClient {
|
||||
maxTagDataLength = MaxlenClientTagData
|
||||
}
|
||||
return parseLine(line, maxTagDataLength, truncateLen)
|
||||
}
|
||||
|
||||
// slice off any amount of ' ' from the front of the string
|
||||
func trimInitialSpaces(str string) string {
|
||||
var i int
|
||||
for i = 0; i < len(str) && str[i] == ' '; i += 1 {
|
||||
}
|
||||
return str[i:]
|
||||
}
|
||||
|
||||
func parseLine(line string, maxTagDataLength int, truncateLen int) (ircmsg IrcMessage, err error) {
|
||||
if strings.IndexByte(line, '\x00') != -1 {
|
||||
err = ErrorLineContainsBadChar
|
||||
return
|
||||
}
|
||||
|
||||
// trim to the first appearance of either '\r' or '\n':
|
||||
lineEnd := strings.IndexByte(line, '\r')
|
||||
newlineIndex := strings.IndexByte(line, '\n')
|
||||
if newlineIndex != -1 && (lineEnd == -1 || newlineIndex < lineEnd) {
|
||||
lineEnd = newlineIndex
|
||||
}
|
||||
if lineEnd != -1 {
|
||||
line = line[:lineEnd]
|
||||
}
|
||||
|
||||
if len(line) < 1 {
|
||||
return ircmsg, ErrorLineIsEmpty
|
||||
}
|
||||
|
||||
// tags
|
||||
if line[0] == '@' {
|
||||
tagEnd := strings.IndexByte(line, ' ')
|
||||
if tagEnd == -1 {
|
||||
return ircmsg, ErrorLineIsEmpty
|
||||
}
|
||||
tags := line[1:tagEnd]
|
||||
if 0 < maxTagDataLength && maxTagDataLength < len(tags) {
|
||||
return ircmsg, ErrorLineTooLong
|
||||
}
|
||||
err = ircmsg.parseTags(tags)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// skip over the tags and the separating space
|
||||
line = line[tagEnd+1:]
|
||||
}
|
||||
|
||||
// truncate if desired
|
||||
if 0 < truncateLen && truncateLen < len(line) {
|
||||
line = line[:truncateLen]
|
||||
}
|
||||
|
||||
// modern: "These message parts, and parameters themselves, are separated
|
||||
// by one or more ASCII SPACE characters"
|
||||
line = trimInitialSpaces(line)
|
||||
|
||||
// prefix
|
||||
if 0 < len(line) && line[0] == ':' {
|
||||
prefixEnd := strings.IndexByte(line, ' ')
|
||||
if prefixEnd == -1 {
|
||||
return ircmsg, ErrorLineIsEmpty
|
||||
}
|
||||
ircmsg.Prefix = line[1:prefixEnd]
|
||||
// skip over the prefix and the separating space
|
||||
line = line[prefixEnd+1:]
|
||||
}
|
||||
|
||||
line = trimInitialSpaces(line)
|
||||
|
||||
// command
|
||||
commandEnd := strings.IndexByte(line, ' ')
|
||||
paramStart := commandEnd + 1
|
||||
if commandEnd == -1 {
|
||||
commandEnd = len(line)
|
||||
paramStart = len(line)
|
||||
}
|
||||
// normalize command to uppercase:
|
||||
ircmsg.Command = strings.ToUpper(line[:commandEnd])
|
||||
if len(ircmsg.Command) == 0 {
|
||||
return ircmsg, ErrorLineIsEmpty
|
||||
}
|
||||
line = line[paramStart:]
|
||||
|
||||
for {
|
||||
line = trimInitialSpaces(line)
|
||||
if len(line) == 0 {
|
||||
break
|
||||
}
|
||||
// handle trailing
|
||||
if line[0] == ':' {
|
||||
ircmsg.Params = append(ircmsg.Params, line[1:])
|
||||
break
|
||||
}
|
||||
paramEnd := strings.IndexByte(line, ' ')
|
||||
if paramEnd == -1 {
|
||||
ircmsg.Params = append(ircmsg.Params, line)
|
||||
break
|
||||
}
|
||||
ircmsg.Params = append(ircmsg.Params, line[:paramEnd])
|
||||
line = line[paramEnd+1:]
|
||||
}
|
||||
|
||||
return ircmsg, nil
|
||||
}
|
||||
|
||||
// helper to parse tags
|
||||
func (ircmsg *IrcMessage) parseTags(tags string) (err error) {
|
||||
for 0 < len(tags) {
|
||||
tagEnd := strings.IndexByte(tags, ';')
|
||||
endPos := tagEnd
|
||||
nextPos := tagEnd + 1
|
||||
if tagEnd == -1 {
|
||||
endPos = len(tags)
|
||||
nextPos = len(tags)
|
||||
}
|
||||
tagPair := tags[:endPos]
|
||||
equalsIndex := strings.IndexByte(tagPair, '=')
|
||||
var tagName, tagValue string
|
||||
if equalsIndex == -1 {
|
||||
// tag with no value
|
||||
tagName = tagPair
|
||||
} else {
|
||||
tagName, tagValue = tagPair[:equalsIndex], tagPair[equalsIndex+1:]
|
||||
}
|
||||
ircmsg.SetTag(tagName, UnescapeTagValue(tagValue))
|
||||
// skip over the tag just processed, plus the delimiting ; if any
|
||||
tags = tags[nextPos:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeMessage provides a simple way to create a new IrcMessage.
|
||||
func MakeMessage(tags map[string]string, prefix string, command string, params ...string) (ircmsg IrcMessage) {
|
||||
ircmsg.Prefix = prefix
|
||||
ircmsg.Command = command
|
||||
ircmsg.Params = params
|
||||
ircmsg.UpdateTags(tags)
|
||||
return ircmsg
|
||||
}
|
||||
|
||||
// Line returns a sendable line created from an IrcMessage.
|
||||
func (ircmsg *IrcMessage) Line() (result string, err error) {
|
||||
bytes, err := ircmsg.line(0, 0, 0, 0)
|
||||
if err == nil {
|
||||
result = string(bytes)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// LineBytes returns a sendable line created from an IrcMessage.
|
||||
func (ircmsg *IrcMessage) LineBytes() (result []byte, err error) {
|
||||
result, err = ircmsg.line(0, 0, 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
// LineBytesStrict returns a sendable line, as a []byte, created from an IrcMessage.
|
||||
// fromClient controls whether the server-side or client-side tag length limit
|
||||
// is enforced. If truncateLen is nonzero, it is the length at which the
|
||||
// non-tag portion of the message is truncated.
|
||||
func (ircmsg *IrcMessage) LineBytesStrict(fromClient bool, truncateLen int) ([]byte, error) {
|
||||
var tagLimit, clientOnlyTagDataLimit, serverAddedTagDataLimit int
|
||||
if fromClient {
|
||||
// enforce client max tags:
|
||||
// <client_max> (4096) :: '@' <tag_data 4094> ' '
|
||||
tagLimit = MaxlenTagsFromClient
|
||||
} else {
|
||||
// on the server side, enforce separate client-only and server-added tag budgets:
|
||||
// "Servers MUST NOT add tag data exceeding 4094 bytes to messages."
|
||||
// <combined_max> (8191) :: '@' <tag_data 4094> ';' <tag_data 4094> ' '
|
||||
clientOnlyTagDataLimit = MaxlenClientTagData
|
||||
serverAddedTagDataLimit = MaxlenServerTagData
|
||||
}
|
||||
return ircmsg.line(tagLimit, clientOnlyTagDataLimit, serverAddedTagDataLimit, truncateLen)
|
||||
}
|
||||
|
||||
// line returns a sendable line created from an IrcMessage.
|
||||
func (ircmsg *IrcMessage) line(tagLimit, clientOnlyTagDataLimit, serverAddedTagDataLimit, truncateLen int) ([]byte, error) {
|
||||
if len(ircmsg.Command) < 1 {
|
||||
return nil, ErrorCommandMissing
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
// write the tags, computing the budgets for client-only tags and regular tags
|
||||
var lenRegularTags, lenClientOnlyTags, lenTags int
|
||||
if 0 < len(ircmsg.tags) || 0 < len(ircmsg.clientOnlyTags) {
|
||||
buf.WriteByte('@')
|
||||
firstTag := true
|
||||
writeTags := func(tags map[string]string) {
|
||||
for tag, val := range tags {
|
||||
if !firstTag {
|
||||
buf.WriteByte(';') // delimiter
|
||||
}
|
||||
buf.WriteString(tag)
|
||||
if val != "" {
|
||||
buf.WriteByte('=')
|
||||
buf.WriteString(EscapeTagValue(val))
|
||||
}
|
||||
firstTag = false
|
||||
}
|
||||
}
|
||||
writeTags(ircmsg.tags)
|
||||
lenRegularTags = buf.Len() - 1 // '@' is not counted
|
||||
writeTags(ircmsg.clientOnlyTags)
|
||||
lenClientOnlyTags = (buf.Len() - 1) - lenRegularTags // '@' is not counted
|
||||
if lenRegularTags != 0 {
|
||||
// semicolon between regular and client-only tags is not counted
|
||||
lenClientOnlyTags -= 1
|
||||
}
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
lenTags = buf.Len()
|
||||
|
||||
if 0 < tagLimit && tagLimit < buf.Len() {
|
||||
return nil, ErrorLineTooLong
|
||||
}
|
||||
if (0 < clientOnlyTagDataLimit && clientOnlyTagDataLimit < lenClientOnlyTags) || (0 < serverAddedTagDataLimit && serverAddedTagDataLimit < lenRegularTags) {
|
||||
return nil, ErrorLineTooLong
|
||||
}
|
||||
|
||||
if len(ircmsg.Prefix) > 0 {
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(ircmsg.Prefix)
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
|
||||
buf.WriteString(ircmsg.Command)
|
||||
|
||||
for i, param := range ircmsg.Params {
|
||||
buf.WriteByte(' ')
|
||||
if len(param) < 1 || strings.IndexByte(param, ' ') != -1 || param[0] == ':' {
|
||||
if i != len(ircmsg.Params)-1 {
|
||||
return nil, ErrorBadParam
|
||||
}
|
||||
buf.WriteByte(':')
|
||||
}
|
||||
buf.WriteString(param)
|
||||
}
|
||||
|
||||
// truncate if desired
|
||||
// -2 for \r\n
|
||||
restLen := buf.Len() - lenTags
|
||||
if 0 < truncateLen && (truncateLen-2) < restLen {
|
||||
buf.Truncate(lenTags + (truncateLen - 2))
|
||||
}
|
||||
buf.WriteString("\r\n")
|
||||
|
||||
result := buf.Bytes()
|
||||
if bytes.IndexByte(result, '\x00') != -1 {
|
||||
return nil, ErrorLineContainsBadChar
|
||||
}
|
||||
return result, nil
|
||||
}
|
75
vendor/github.com/goshuirc/irc-go/ircmsg/tags.go
generated
vendored
Normal file
75
vendor/github.com/goshuirc/irc-go/ircmsg/tags.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
// written by Daniel Oaks <daniel@danieloaks.net>
|
||||
// released under the ISC license
|
||||
|
||||
package ircmsg
|
||||
|
||||
import "bytes"
|
||||
import "strings"
|
||||
|
||||
var (
|
||||
// valtoescape replaces real characters with message tag escapes.
|
||||
valtoescape = strings.NewReplacer("\\", "\\\\", ";", "\\:", " ", "\\s", "\r", "\\r", "\n", "\\n")
|
||||
|
||||
escapedCharLookupTable [256]byte
|
||||
)
|
||||
|
||||
func init() {
|
||||
// most chars escape to themselves
|
||||
for i := 0; i < 256; i += 1 {
|
||||
escapedCharLookupTable[i] = byte(i)
|
||||
}
|
||||
// these are the exceptions
|
||||
escapedCharLookupTable[':'] = ';'
|
||||
escapedCharLookupTable['s'] = ' '
|
||||
escapedCharLookupTable['r'] = '\r'
|
||||
escapedCharLookupTable['n'] = '\n'
|
||||
}
|
||||
|
||||
// EscapeTagValue takes a value, and returns an escaped message tag value.
|
||||
//
|
||||
// This function is automatically used when lines are created from an
|
||||
// IrcMessage, so you don't need to call it yourself before creating a line.
|
||||
func EscapeTagValue(inString string) string {
|
||||
return valtoescape.Replace(inString)
|
||||
}
|
||||
|
||||
// UnescapeTagValue takes an escaped message tag value, and returns the raw value.
|
||||
//
|
||||
// This function is automatically used when lines are interpreted by ParseLine,
|
||||
// so you don't need to call it yourself after parsing a line.
|
||||
func UnescapeTagValue(inString string) string {
|
||||
// buf.Len() == 0 is the fastpath where we have not needed to unescape any chars
|
||||
var buf bytes.Buffer
|
||||
remainder := inString
|
||||
for {
|
||||
backslashPos := strings.IndexByte(remainder, '\\')
|
||||
|
||||
if backslashPos == -1 {
|
||||
if buf.Len() == 0 {
|
||||
return inString
|
||||
} else {
|
||||
buf.WriteString(remainder)
|
||||
break
|
||||
}
|
||||
} else if backslashPos == len(remainder)-1 {
|
||||
// trailing backslash, which we strip
|
||||
if buf.Len() == 0 {
|
||||
return inString[:len(inString)-1]
|
||||
} else {
|
||||
buf.WriteString(remainder[:len(remainder)-1])
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// non-trailing backslash detected; we're now on the slowpath
|
||||
// where we modify the string
|
||||
if buf.Len() == 0 {
|
||||
buf.Grow(len(inString)) // just an optimization
|
||||
}
|
||||
buf.WriteString(remainder[:backslashPos])
|
||||
buf.WriteByte(escapedCharLookupTable[remainder[backslashPos+1]])
|
||||
remainder = remainder[backslashPos+2:]
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
9
vendor/github.com/mattn/go-colorable/.travis.yml
generated
vendored
Normal file
9
vendor/github.com/mattn/go-colorable/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
language: go
|
||||
go:
|
||||
- tip
|
||||
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
script:
|
||||
- $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw
|
21
vendor/github.com/mattn/go-colorable/LICENSE
generated
vendored
Normal file
21
vendor/github.com/mattn/go-colorable/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Yasuhiro Matsumoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
48
vendor/github.com/mattn/go-colorable/README.md
generated
vendored
Normal file
48
vendor/github.com/mattn/go-colorable/README.md
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
# go-colorable
|
||||
|
||||
[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable)
|
||||
[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable)
|
||||
[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master)
|
||||
[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable)
|
||||
|
||||
Colorable writer for windows.
|
||||
|
||||
For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
|
||||
This package is possible to handle escape sequence for ansi color on windows.
|
||||
|
||||
## Too Bad!
|
||||
|
||||
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
|
||||
|
||||
|
||||
## So Good!
|
||||
|
||||
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
|
||||
logrus.SetOutput(colorable.NewColorableStdout())
|
||||
|
||||
logrus.Info("succeeded")
|
||||
logrus.Warn("not correct")
|
||||
logrus.Error("something error")
|
||||
logrus.Fatal("panic")
|
||||
```
|
||||
|
||||
You can compile above code on non-windows OSs.
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go get github.com/mattn/go-colorable
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
MIT
|
||||
|
||||
# Author
|
||||
|
||||
Yasuhiro Matsumoto (a.k.a mattn)
|
29
vendor/github.com/mattn/go-colorable/colorable_appengine.go
generated
vendored
Normal file
29
vendor/github.com/mattn/go-colorable/colorable_appengine.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// +build appengine
|
||||
|
||||
package colorable
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
_ "github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// NewColorable returns new instance of Writer which handles escape sequence.
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
}
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
|
||||
func NewColorableStdout() io.Writer {
|
||||
return os.Stdout
|
||||
}
|
||||
|
||||
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
|
||||
func NewColorableStderr() io.Writer {
|
||||
return os.Stderr
|
||||
}
|
30
vendor/github.com/mattn/go-colorable/colorable_others.go
generated
vendored
Normal file
30
vendor/github.com/mattn/go-colorable/colorable_others.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// +build !windows
|
||||
// +build !appengine
|
||||
|
||||
package colorable
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
_ "github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// NewColorable returns new instance of Writer which handles escape sequence.
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
}
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
|
||||
func NewColorableStdout() io.Writer {
|
||||
return os.Stdout
|
||||
}
|
||||
|
||||
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
|
||||
func NewColorableStderr() io.Writer {
|
||||
return os.Stderr
|
||||
}
|
1005
vendor/github.com/mattn/go-colorable/colorable_windows.go
generated
vendored
Normal file
1005
vendor/github.com/mattn/go-colorable/colorable_windows.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3
vendor/github.com/mattn/go-colorable/go.mod
generated
vendored
Normal file
3
vendor/github.com/mattn/go-colorable/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
module github.com/mattn/go-colorable
|
||||
|
||||
require github.com/mattn/go-isatty v0.0.8
|
4
vendor/github.com/mattn/go-colorable/go.sum
generated
vendored
Normal file
4
vendor/github.com/mattn/go-colorable/go.sum
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
55
vendor/github.com/mattn/go-colorable/noncolorable.go
generated
vendored
Normal file
55
vendor/github.com/mattn/go-colorable/noncolorable.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package colorable
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// NonColorable holds writer but removes escape sequence.
|
||||
type NonColorable struct {
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
// NewNonColorable returns new instance of Writer which removes escape sequence from Writer.
|
||||
func NewNonColorable(w io.Writer) io.Writer {
|
||||
return &NonColorable{out: w}
|
||||
}
|
||||
|
||||
// Write writes data on console
|
||||
func (w *NonColorable) Write(data []byte) (n int, err error) {
|
||||
er := bytes.NewReader(data)
|
||||
var bw [1]byte
|
||||
loop:
|
||||
for {
|
||||
c1, err := er.ReadByte()
|
||||
if err != nil {
|
||||
break loop
|
||||
}
|
||||
if c1 != 0x1b {
|
||||
bw[0] = c1
|
||||
w.out.Write(bw[:])
|
||||
continue
|
||||
}
|
||||
c2, err := er.ReadByte()
|
||||
if err != nil {
|
||||
break loop
|
||||
}
|
||||
if c2 != 0x5b {
|
||||
continue
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
c, err := er.ReadByte()
|
||||
if err != nil {
|
||||
break loop
|
||||
}
|
||||
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
|
||||
break
|
||||
}
|
||||
buf.Write([]byte(string(c)))
|
||||
}
|
||||
}
|
||||
|
||||
return len(data), nil
|
||||
}
|
13
vendor/github.com/mattn/go-isatty/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/mattn/go-isatty/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
language: go
|
||||
go:
|
||||
- tip
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
script:
|
||||
- $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5
|
9
vendor/github.com/mattn/go-isatty/LICENSE
generated
vendored
Normal file
9
vendor/github.com/mattn/go-isatty/LICENSE
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
||||
|
||||
MIT License (Expat)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
50
vendor/github.com/mattn/go-isatty/README.md
generated
vendored
Normal file
50
vendor/github.com/mattn/go-isatty/README.md
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
# go-isatty
|
||||
|
||||
[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty)
|
||||
[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty)
|
||||
[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master)
|
||||
[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty)
|
||||
|
||||
isatty for golang
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mattn/go-isatty"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
fmt.Println("Is Terminal")
|
||||
} else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
|
||||
fmt.Println("Is Cygwin/MSYS2 Terminal")
|
||||
} else {
|
||||
fmt.Println("Is Not Terminal")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go get github.com/mattn/go-isatty
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
## Author
|
||||
|
||||
Yasuhiro Matsumoto (a.k.a mattn)
|
||||
|
||||
## Thanks
|
||||
|
||||
* k-takata: base idea for IsCygwinTerminal
|
||||
|
||||
https://github.com/k-takata/go-iscygpty
|
2
vendor/github.com/mattn/go-isatty/doc.go
generated
vendored
Normal file
2
vendor/github.com/mattn/go-isatty/doc.go
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
// Package isatty implements interface to isatty
|
||||
package isatty
|
5
vendor/github.com/mattn/go-isatty/go.mod
generated
vendored
Normal file
5
vendor/github.com/mattn/go-isatty/go.mod
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
module github.com/mattn/go-isatty
|
||||
|
||||
require golang.org/x/sys v0.0.0-20191008105621-543471e840be
|
||||
|
||||
go 1.14
|
4
vendor/github.com/mattn/go-isatty/go.sum
generated
vendored
Normal file
4
vendor/github.com/mattn/go-isatty/go.sum
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
23
vendor/github.com/mattn/go-isatty/isatty_android.go
generated
vendored
Normal file
23
vendor/github.com/mattn/go-isatty/isatty_android.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// +build android
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const ioctlReadTermios = syscall.TCGETS
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var termios syscall.Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
24
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
Normal file
24
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var termios syscall.Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
15
vendor/github.com/mattn/go-isatty/isatty_others.go
generated
vendored
Normal file
15
vendor/github.com/mattn/go-isatty/isatty_others.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// +build appengine js nacl
|
||||
|
||||
package isatty
|
||||
|
||||
// IsTerminal returns true if the file descriptor is terminal which
|
||||
// is always false on js and appengine classic which is a sandboxed PaaS.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
22
vendor/github.com/mattn/go-isatty/isatty_plan9.go
generated
vendored
Normal file
22
vendor/github.com/mattn/go-isatty/isatty_plan9.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
// +build plan9
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
path, err := syscall.Fd2path(fd)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return path == "/dev/cons" || path == "/mnt/term/dev/cons"
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
22
vendor/github.com/mattn/go-isatty/isatty_solaris.go
generated
vendored
Normal file
22
vendor/github.com/mattn/go-isatty/isatty_solaris.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
// +build solaris
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var termio unix.Termio
|
||||
err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
19
vendor/github.com/mattn/go-isatty/isatty_tcgets.go
generated
vendored
Normal file
19
vendor/github.com/mattn/go-isatty/isatty_tcgets.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// +build linux aix
|
||||
// +build !appengine
|
||||
// +build !android
|
||||
|
||||
package isatty
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
_, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
125
vendor/github.com/mattn/go-isatty/isatty_windows.go
generated
vendored
Normal file
125
vendor/github.com/mattn/go-isatty/isatty_windows.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
// +build windows
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
objectNameInfo uintptr = 1
|
||||
fileNameInfo = 2
|
||||
fileTypePipe = 3
|
||||
)
|
||||
|
||||
var (
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
ntdll = syscall.NewLazyDLL("ntdll.dll")
|
||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
|
||||
procGetFileType = kernel32.NewProc("GetFileType")
|
||||
procNtQueryObject = ntdll.NewProc("NtQueryObject")
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Check if GetFileInformationByHandleEx is available.
|
||||
if procGetFileInformationByHandleEx.Find() != nil {
|
||||
procGetFileInformationByHandleEx = nil
|
||||
}
|
||||
}
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
}
|
||||
|
||||
// Check pipe name is used for cygwin/msys2 pty.
|
||||
// Cygwin/MSYS2 PTY has a name like:
|
||||
// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
|
||||
func isCygwinPipeName(name string) bool {
|
||||
token := strings.Split(name, "-")
|
||||
if len(token) < 5 {
|
||||
return false
|
||||
}
|
||||
|
||||
if token[0] != `\msys` &&
|
||||
token[0] != `\cygwin` &&
|
||||
token[0] != `\Device\NamedPipe\msys` &&
|
||||
token[0] != `\Device\NamedPipe\cygwin` {
|
||||
return false
|
||||
}
|
||||
|
||||
if token[1] == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(token[2], "pty") {
|
||||
return false
|
||||
}
|
||||
|
||||
if token[3] != `from` && token[3] != `to` {
|
||||
return false
|
||||
}
|
||||
|
||||
if token[4] != "master" {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler
|
||||
// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion
|
||||
// guys are using Windows XP, this is a workaround for those guys, it will also work on system from
|
||||
// Windows vista to 10
|
||||
// see https://stackoverflow.com/a/18792477 for details
|
||||
func getFileNameByHandle(fd uintptr) (string, error) {
|
||||
if procNtQueryObject == nil {
|
||||
return "", errors.New("ntdll.dll: NtQueryObject not supported")
|
||||
}
|
||||
|
||||
var buf [4 + syscall.MAX_PATH]uint16
|
||||
var result int
|
||||
r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5,
|
||||
fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0)
|
||||
if r != 0 {
|
||||
return "", e
|
||||
}
|
||||
return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil
|
||||
}
|
||||
|
||||
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
|
||||
// terminal.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
if procGetFileInformationByHandleEx == nil {
|
||||
name, err := getFileNameByHandle(fd)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return isCygwinPipeName(name)
|
||||
}
|
||||
|
||||
// Cygwin/msys's pty is a pipe.
|
||||
ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
|
||||
if ft != fileTypePipe || e != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var buf [2 + syscall.MAX_PATH]uint16
|
||||
r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
|
||||
4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
|
||||
uintptr(len(buf)*2), 0, 0)
|
||||
if r == 0 || e != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
l := *(*uint32)(unsafe.Pointer(&buf))
|
||||
return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
|
||||
}
|
1
vendor/github.com/mgutz/ansi/.gitignore
generated
vendored
Normal file
1
vendor/github.com/mgutz/ansi/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.test
|
9
vendor/github.com/mgutz/ansi/LICENSE
generated
vendored
Normal file
9
vendor/github.com/mgutz/ansi/LICENSE
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
The MIT License (MIT)
|
||||
Copyright (c) 2013 Mario L. Gutierrez
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
121
vendor/github.com/mgutz/ansi/README.md
generated
vendored
Normal file
121
vendor/github.com/mgutz/ansi/README.md
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
# ansi
|
||||
|
||||
Package ansi is a small, fast library to create ANSI colored strings and codes.
|
||||
|
||||
## Install
|
||||
|
||||
Get it
|
||||
|
||||
```sh
|
||||
go get -u github.com/mgutz/ansi
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
```go
|
||||
import "github.com/mgutz/ansi"
|
||||
|
||||
// colorize a string, SLOW
|
||||
msg := ansi.Color("foo", "red+b:white")
|
||||
|
||||
// create a FAST closure function to avoid computation of ANSI code
|
||||
phosphorize := ansi.ColorFunc("green+h:black")
|
||||
msg = phosphorize("Bring back the 80s!")
|
||||
msg2 := phospohorize("Look, I'm a CRT!")
|
||||
|
||||
// cache escape codes and build strings manually
|
||||
lime := ansi.ColorCode("green+h:black")
|
||||
reset := ansi.ColorCode("reset")
|
||||
|
||||
fmt.Println(lime, "Bring back the 80s!", reset)
|
||||
```
|
||||
|
||||
Other examples
|
||||
|
||||
```go
|
||||
Color(s, "red") // red
|
||||
Color(s, "red+b") // red bold
|
||||
Color(s, "red+B") // red blinking
|
||||
Color(s, "red+u") // red underline
|
||||
Color(s, "red+bh") // red bold bright
|
||||
Color(s, "red:white") // red on white
|
||||
Color(s, "red+b:white+h") // red bold on white bright
|
||||
Color(s, "red+B:white+h") // red blink on white bright
|
||||
Color(s, "off") // turn off ansi codes
|
||||
```
|
||||
|
||||
To view color combinations, from project directory in terminal.
|
||||
|
||||
```sh
|
||||
go test
|
||||
```
|
||||
|
||||
## Style format
|
||||
|
||||
```go
|
||||
"foregroundColor+attributes:backgroundColor+attributes"
|
||||
```
|
||||
|
||||
Colors
|
||||
|
||||
* black
|
||||
* red
|
||||
* green
|
||||
* yellow
|
||||
* blue
|
||||
* magenta
|
||||
* cyan
|
||||
* white
|
||||
* 0...255 (256 colors)
|
||||
|
||||
Foreground Attributes
|
||||
|
||||
* B = Blink
|
||||
* b = bold
|
||||
* h = high intensity (bright)
|
||||
* i = inverse
|
||||
* s = strikethrough
|
||||
* u = underline
|
||||
|
||||
Background Attributes
|
||||
|
||||
* h = high intensity (bright)
|
||||
|
||||
## Constants
|
||||
|
||||
* ansi.Reset
|
||||
* ansi.DefaultBG
|
||||
* ansi.DefaultFG
|
||||
* ansi.Black
|
||||
* ansi.Red
|
||||
* ansi.Green
|
||||
* ansi.Yellow
|
||||
* ansi.Blue
|
||||
* ansi.Magenta
|
||||
* ansi.Cyan
|
||||
* ansi.White
|
||||
* ansi.LightBlack
|
||||
* ansi.LightRed
|
||||
* ansi.LightGreen
|
||||
* ansi.LightYellow
|
||||
* ansi.LightBlue
|
||||
* ansi.LightMagenta
|
||||
* ansi.LightCyan
|
||||
* ansi.LightWhite
|
||||
|
||||
## References
|
||||
|
||||
Wikipedia ANSI escape codes [Colors](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors)
|
||||
|
||||
General [tips and formatting](http://misc.flogisoft.com/bash/tip_colors_and_formatting)
|
||||
|
||||
What about support on Windows? Use [colorable by mattn](https://github.com/mattn/go-colorable).
|
||||
Ansi and colorable are used by [logxi](https://github.com/mgutz/logxi) to support logging in
|
||||
color on Windows.
|
||||
|
||||
## MIT License
|
||||
|
||||
Copyright (c) 2013 Mario Gutierrez mario@mgutz.com
|
||||
|
||||
See the file LICENSE for copying permission.
|
||||
|
285
vendor/github.com/mgutz/ansi/ansi.go
generated
vendored
Normal file
285
vendor/github.com/mgutz/ansi/ansi.go
generated
vendored
Normal file
@ -0,0 +1,285 @@
|
||||
package ansi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
black = iota
|
||||
red
|
||||
green
|
||||
yellow
|
||||
blue
|
||||
magenta
|
||||
cyan
|
||||
white
|
||||
defaultt = 9
|
||||
|
||||
normalIntensityFG = 30
|
||||
highIntensityFG = 90
|
||||
normalIntensityBG = 40
|
||||
highIntensityBG = 100
|
||||
|
||||
start = "\033["
|
||||
bold = "1;"
|
||||
blink = "5;"
|
||||
underline = "4;"
|
||||
inverse = "7;"
|
||||
strikethrough = "9;"
|
||||
|
||||
// Reset is the ANSI reset escape sequence
|
||||
Reset = "\033[0m"
|
||||
// DefaultBG is the default background
|
||||
DefaultBG = "\033[49m"
|
||||
// DefaultFG is the default foreground
|
||||
DefaultFG = "\033[39m"
|
||||
)
|
||||
|
||||
// Black FG
|
||||
var Black string
|
||||
|
||||
// Red FG
|
||||
var Red string
|
||||
|
||||
// Green FG
|
||||
var Green string
|
||||
|
||||
// Yellow FG
|
||||
var Yellow string
|
||||
|
||||
// Blue FG
|
||||
var Blue string
|
||||
|
||||
// Magenta FG
|
||||
var Magenta string
|
||||
|
||||
// Cyan FG
|
||||
var Cyan string
|
||||
|
||||
// White FG
|
||||
var White string
|
||||
|
||||
// LightBlack FG
|
||||
var LightBlack string
|
||||
|
||||
// LightRed FG
|
||||
var LightRed string
|
||||
|
||||
// LightGreen FG
|
||||
var LightGreen string
|
||||
|
||||
// LightYellow FG
|
||||
var LightYellow string
|
||||
|
||||
// LightBlue FG
|
||||
var LightBlue string
|
||||
|
||||
// LightMagenta FG
|
||||
var LightMagenta string
|
||||
|
||||
// LightCyan FG
|
||||
var LightCyan string
|
||||
|
||||
// LightWhite FG
|
||||
var LightWhite string
|
||||
|
||||
var (
|
||||
plain = false
|
||||
// Colors maps common color names to their ANSI color code.
|
||||
Colors = map[string]int{
|
||||
"black": black,
|
||||
"red": red,
|
||||
"green": green,
|
||||
"yellow": yellow,
|
||||
"blue": blue,
|
||||
"magenta": magenta,
|
||||
"cyan": cyan,
|
||||
"white": white,
|
||||
"default": defaultt,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
for i := 0; i < 256; i++ {
|
||||
Colors[strconv.Itoa(i)] = i
|
||||
}
|
||||
|
||||
Black = ColorCode("black")
|
||||
Red = ColorCode("red")
|
||||
Green = ColorCode("green")
|
||||
Yellow = ColorCode("yellow")
|
||||
Blue = ColorCode("blue")
|
||||
Magenta = ColorCode("magenta")
|
||||
Cyan = ColorCode("cyan")
|
||||
White = ColorCode("white")
|
||||
LightBlack = ColorCode("black+h")
|
||||
LightRed = ColorCode("red+h")
|
||||
LightGreen = ColorCode("green+h")
|
||||
LightYellow = ColorCode("yellow+h")
|
||||
LightBlue = ColorCode("blue+h")
|
||||
LightMagenta = ColorCode("magenta+h")
|
||||
LightCyan = ColorCode("cyan+h")
|
||||
LightWhite = ColorCode("white+h")
|
||||
}
|
||||
|
||||
// ColorCode returns the ANSI color color code for style.
|
||||
func ColorCode(style string) string {
|
||||
return colorCode(style).String()
|
||||
}
|
||||
|
||||
// Gets the ANSI color code for a style.
|
||||
func colorCode(style string) *bytes.Buffer {
|
||||
buf := bytes.NewBufferString("")
|
||||
if plain || style == "" {
|
||||
return buf
|
||||
}
|
||||
if style == "reset" {
|
||||
buf.WriteString(Reset)
|
||||
return buf
|
||||
} else if style == "off" {
|
||||
return buf
|
||||
}
|
||||
|
||||
foregroundBackground := strings.Split(style, ":")
|
||||
foreground := strings.Split(foregroundBackground[0], "+")
|
||||
fgKey := foreground[0]
|
||||
fg := Colors[fgKey]
|
||||
fgStyle := ""
|
||||
if len(foreground) > 1 {
|
||||
fgStyle = foreground[1]
|
||||
}
|
||||
|
||||
bg, bgStyle := "", ""
|
||||
|
||||
if len(foregroundBackground) > 1 {
|
||||
background := strings.Split(foregroundBackground[1], "+")
|
||||
bg = background[0]
|
||||
if len(background) > 1 {
|
||||
bgStyle = background[1]
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(start)
|
||||
base := normalIntensityFG
|
||||
if len(fgStyle) > 0 {
|
||||
if strings.Contains(fgStyle, "b") {
|
||||
buf.WriteString(bold)
|
||||
}
|
||||
if strings.Contains(fgStyle, "B") {
|
||||
buf.WriteString(blink)
|
||||
}
|
||||
if strings.Contains(fgStyle, "u") {
|
||||
buf.WriteString(underline)
|
||||
}
|
||||
if strings.Contains(fgStyle, "i") {
|
||||
buf.WriteString(inverse)
|
||||
}
|
||||
if strings.Contains(fgStyle, "s") {
|
||||
buf.WriteString(strikethrough)
|
||||
}
|
||||
if strings.Contains(fgStyle, "h") {
|
||||
base = highIntensityFG
|
||||
}
|
||||
}
|
||||
|
||||
// if 256-color
|
||||
n, err := strconv.Atoi(fgKey)
|
||||
if err == nil {
|
||||
fmt.Fprintf(buf, "38;5;%d;", n)
|
||||
} else {
|
||||
fmt.Fprintf(buf, "%d;", base+fg)
|
||||
}
|
||||
|
||||
base = normalIntensityBG
|
||||
if len(bg) > 0 {
|
||||
if strings.Contains(bgStyle, "h") {
|
||||
base = highIntensityBG
|
||||
}
|
||||
// if 256-color
|
||||
n, err := strconv.Atoi(bg)
|
||||
if err == nil {
|
||||
fmt.Fprintf(buf, "48;5;%d;", n)
|
||||
} else {
|
||||
fmt.Fprintf(buf, "%d;", base+Colors[bg])
|
||||
}
|
||||
}
|
||||
|
||||
// remove last ";"
|
||||
buf.Truncate(buf.Len() - 1)
|
||||
buf.WriteRune('m')
|
||||
return buf
|
||||
}
|
||||
|
||||
// Color colors a string based on the ANSI color code for style.
|
||||
func Color(s, style string) string {
|
||||
if plain || len(style) < 1 {
|
||||
return s
|
||||
}
|
||||
buf := colorCode(style)
|
||||
buf.WriteString(s)
|
||||
buf.WriteString(Reset)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// ColorFunc creates a closure to avoid computation ANSI color code.
|
||||
func ColorFunc(style string) func(string) string {
|
||||
if style == "" {
|
||||
return func(s string) string {
|
||||
return s
|
||||
}
|
||||
}
|
||||
color := ColorCode(style)
|
||||
return func(s string) string {
|
||||
if plain || s == "" {
|
||||
return s
|
||||
}
|
||||
buf := bytes.NewBufferString(color)
|
||||
buf.WriteString(s)
|
||||
buf.WriteString(Reset)
|
||||
result := buf.String()
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// DisableColors disables ANSI color codes. The default is false (colors are on).
|
||||
func DisableColors(disable bool) {
|
||||
plain = disable
|
||||
if plain {
|
||||
Black = ""
|
||||
Red = ""
|
||||
Green = ""
|
||||
Yellow = ""
|
||||
Blue = ""
|
||||
Magenta = ""
|
||||
Cyan = ""
|
||||
White = ""
|
||||
LightBlack = ""
|
||||
LightRed = ""
|
||||
LightGreen = ""
|
||||
LightYellow = ""
|
||||
LightBlue = ""
|
||||
LightMagenta = ""
|
||||
LightCyan = ""
|
||||
LightWhite = ""
|
||||
} else {
|
||||
Black = ColorCode("black")
|
||||
Red = ColorCode("red")
|
||||
Green = ColorCode("green")
|
||||
Yellow = ColorCode("yellow")
|
||||
Blue = ColorCode("blue")
|
||||
Magenta = ColorCode("magenta")
|
||||
Cyan = ColorCode("cyan")
|
||||
White = ColorCode("white")
|
||||
LightBlack = ColorCode("black+h")
|
||||
LightRed = ColorCode("red+h")
|
||||
LightGreen = ColorCode("green+h")
|
||||
LightYellow = ColorCode("yellow+h")
|
||||
LightBlue = ColorCode("blue+h")
|
||||
LightMagenta = ColorCode("magenta+h")
|
||||
LightCyan = ColorCode("cyan+h")
|
||||
LightWhite = ColorCode("white+h")
|
||||
}
|
||||
}
|
65
vendor/github.com/mgutz/ansi/doc.go
generated
vendored
Normal file
65
vendor/github.com/mgutz/ansi/doc.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
Package ansi is a small, fast library to create ANSI colored strings and codes.
|
||||
|
||||
Installation
|
||||
|
||||
# this installs the color viewer and the package
|
||||
go get -u github.com/mgutz/ansi/cmd/ansi-mgutz
|
||||
|
||||
Example
|
||||
|
||||
// colorize a string, SLOW
|
||||
msg := ansi.Color("foo", "red+b:white")
|
||||
|
||||
// create a closure to avoid recalculating ANSI code compilation
|
||||
phosphorize := ansi.ColorFunc("green+h:black")
|
||||
msg = phosphorize("Bring back the 80s!")
|
||||
msg2 := phospohorize("Look, I'm a CRT!")
|
||||
|
||||
// cache escape codes and build strings manually
|
||||
lime := ansi.ColorCode("green+h:black")
|
||||
reset := ansi.ColorCode("reset")
|
||||
|
||||
fmt.Println(lime, "Bring back the 80s!", reset)
|
||||
|
||||
Other examples
|
||||
|
||||
Color(s, "red") // red
|
||||
Color(s, "red+b") // red bold
|
||||
Color(s, "red+B") // red blinking
|
||||
Color(s, "red+u") // red underline
|
||||
Color(s, "red+bh") // red bold bright
|
||||
Color(s, "red:white") // red on white
|
||||
Color(s, "red+b:white+h") // red bold on white bright
|
||||
Color(s, "red+B:white+h") // red blink on white bright
|
||||
|
||||
To view color combinations, from terminal
|
||||
|
||||
ansi-mgutz
|
||||
|
||||
Style format
|
||||
|
||||
"foregroundColor+attributes:backgroundColor+attributes"
|
||||
|
||||
Colors
|
||||
|
||||
black
|
||||
red
|
||||
green
|
||||
yellow
|
||||
blue
|
||||
magenta
|
||||
cyan
|
||||
white
|
||||
|
||||
Attributes
|
||||
|
||||
b = bold foreground
|
||||
B = Blink foreground
|
||||
u = underline foreground
|
||||
h = high intensity (bright) foreground, background
|
||||
i = inverse
|
||||
|
||||
Wikipedia ANSI escape codes [Colors](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors)
|
||||
*/
|
||||
package ansi
|
57
vendor/github.com/mgutz/ansi/print.go
generated
vendored
Normal file
57
vendor/github.com/mgutz/ansi/print.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
package ansi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
// PrintStyles prints all style combinations to the terminal.
|
||||
func PrintStyles() {
|
||||
// for compatibility with Windows, not needed for *nix
|
||||
stdout := colorable.NewColorableStdout()
|
||||
|
||||
bgColors := []string{
|
||||
"",
|
||||
":black",
|
||||
":red",
|
||||
":green",
|
||||
":yellow",
|
||||
":blue",
|
||||
":magenta",
|
||||
":cyan",
|
||||
":white",
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(Colors))
|
||||
for k := range Colors {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
sort.Sort(sort.StringSlice(keys))
|
||||
|
||||
for _, fg := range keys {
|
||||
for _, bg := range bgColors {
|
||||
fmt.Fprintln(stdout, padColor(fg, []string{"" + bg, "+b" + bg, "+bh" + bg, "+u" + bg}))
|
||||
fmt.Fprintln(stdout, padColor(fg, []string{"+s" + bg, "+i" + bg}))
|
||||
fmt.Fprintln(stdout, padColor(fg, []string{"+uh" + bg, "+B" + bg, "+Bb" + bg /* backgrounds */, "" + bg + "+h"}))
|
||||
fmt.Fprintln(stdout, padColor(fg, []string{"+b" + bg + "+h", "+bh" + bg + "+h", "+u" + bg + "+h", "+uh" + bg + "+h"}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pad(s string, length int) string {
|
||||
for len(s) < length {
|
||||
s += " "
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func padColor(color string, styles []string) string {
|
||||
buffer := ""
|
||||
for _, style := range styles {
|
||||
buffer += Color(pad(color+style, 20), color+style)
|
||||
}
|
||||
return buffer
|
||||
}
|
2
vendor/github.com/oragono/confusables/.gitignore
generated
vendored
Normal file
2
vendor/github.com/oragono/confusables/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/maketables
|
||||
confusables.txt
|
28
vendor/github.com/oragono/confusables/LICENSE
generated
vendored
Normal file
28
vendor/github.com/oragono/confusables/LICENSE
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
Copyright (c) 2013 Michael Tibben. All rights reserved.
|
||||
Copyright (c) 2014 Filippo Valsorda. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
17
vendor/github.com/oragono/confusables/README.md
generated
vendored
Normal file
17
vendor/github.com/oragono/confusables/README.md
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
# Unicode confusables
|
||||
|
||||
This Go library implements the `Skeleton` algorithm from Unicode TR39
|
||||
|
||||
See http://www.unicode.org/reports/tr39/
|
||||
|
||||
### Examples
|
||||
```
|
||||
import "github.com/mtibben/confusables"
|
||||
|
||||
confusables.Skeleton("𝔭𝒶ỿ𝕡𝕒ℓ") # "paypal"
|
||||
confusables.Confusable("𝔭𝒶ỿ𝕡𝕒ℓ", "paypal") # true
|
||||
```
|
||||
|
||||
*Note on the use of `Skeleton`, from TR39:*
|
||||
|
||||
> A skeleton is intended only for internal use for testing confusability of strings; the resulting text is not suitable for display to users, because it will appear to be a hodgepodge of different scripts. In particular, the result of mapping an identifier will not necessary be an identifier. Thus the confusability mappings can be used to test whether two identifiers are confusable (if their skeletons are the same), but should definitely not be used as a "normalization" of identifiers.
|
82
vendor/github.com/oragono/confusables/confusables.go
generated
vendored
Normal file
82
vendor/github.com/oragono/confusables/confusables.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
//go:generate go run maketables.go > tables.go
|
||||
|
||||
package confusables
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// TODO: document casefolding approaches
|
||||
// (suggest to force casefold strings; explain how to catch paypal - pAypal)
|
||||
// TODO: DOC you might want to store the Skeleton and check against it later
|
||||
// TODO: implement xidmodifications.txt restricted characters
|
||||
|
||||
type lookupFunc func(rune) (string)
|
||||
|
||||
func lookupReplacement(r rune) string {
|
||||
return confusablesMap[r]
|
||||
}
|
||||
|
||||
func lookupReplacementTweaked(r rune) string {
|
||||
if replacement, ok := tweaksMap[r]; ok {
|
||||
return replacement
|
||||
}
|
||||
return confusablesMap[r]
|
||||
}
|
||||
|
||||
func skeletonBase(s string, lookup lookupFunc) string {
|
||||
|
||||
// 1. Converting X to NFD format
|
||||
s = norm.NFD.String(s)
|
||||
|
||||
// 2. Successively mapping each source character in X to the target string
|
||||
// according to the specified data table
|
||||
var buf bytes.Buffer
|
||||
changed := false // fast path: if this remains false, keep s intact
|
||||
prevPos := 0
|
||||
var replacement string
|
||||
for i, r := range s {
|
||||
if changed && replacement == "" {
|
||||
buf.WriteString(s[prevPos:i])
|
||||
}
|
||||
prevPos = i
|
||||
replacement = lookup(r)
|
||||
if replacement != "" {
|
||||
if !changed {
|
||||
changed = true
|
||||
// first replacement: copy over the previously unmodified text
|
||||
buf.WriteString(s[:i])
|
||||
}
|
||||
buf.WriteString(replacement)
|
||||
}
|
||||
}
|
||||
if changed && replacement == "" {
|
||||
buf.WriteString(s[prevPos:]) // loop-and-a-half
|
||||
}
|
||||
if changed {
|
||||
s = buf.String()
|
||||
}
|
||||
|
||||
// 3. Reapplying NFD
|
||||
s = norm.NFD.String(s)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Skeleton converts a string to its "skeleton" form
|
||||
// as described in http://www.unicode.org/reports/tr39/#Confusable_Detection
|
||||
func Skeleton(s string) string {
|
||||
return skeletonBase(s, lookupReplacement)
|
||||
}
|
||||
|
||||
// SkeletonTweaked is like Skeleton, but it implements some custom overrides
|
||||
// to the confusables table (currently it removes the m -> rn mapping):
|
||||
func SkeletonTweaked(s string) string {
|
||||
return skeletonBase(s, lookupReplacementTweaked)
|
||||
}
|
||||
|
||||
func Confusable(x, y string) bool {
|
||||
return Skeleton(x) == Skeleton(y)
|
||||
}
|
6317
vendor/github.com/oragono/confusables/tables.go
generated
vendored
Normal file
6317
vendor/github.com/oragono/confusables/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
38
vendor/github.com/oragono/confusables/tweaks.go
generated
vendored
Normal file
38
vendor/github.com/oragono/confusables/tweaks.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
package confusables
|
||||
|
||||
// these are overrides for the standard confusables table:
|
||||
// a mapping to "" means "don't map", a mapping to a replacement means
|
||||
// "replace with this", no entry means "defer to the standard table"
|
||||
|
||||
var tweaksMap = map[rune]string{
|
||||
// ASCII-to-ASCII mapping that we are removing:
|
||||
0x6d: "", // m -> rn
|
||||
// these characters are confusable with m, hence the official table
|
||||
// maps them to rn (`grep "LATIN SMALL LETTER R, LATIN SMALL LETTER N" confusables.txt`)
|
||||
0x118E3: "m", // 118E3 ; 0072 006E ; MA # ( 𑣣 → rn ) WARANG CITI DIGIT THREE → LATIN SMALL LETTER R, LATIN SMALL LETTER N
|
||||
0x11700: "m", // 11700 ; 0072 006E ; MA # ( 𑜀 → rn ) AHOM LETTER KA → LATIN SMALL LETTER R, LATIN SMALL LETTER N
|
||||
// the table thinks this is confusable with m̦ but I think it's confusable with m:
|
||||
0x0271: "m", // 0271 ; 0072 006E 0326 ; MA # ( ɱ → rn̦ ) LATIN SMALL LETTER M WITH HOOK → LATIN SMALL LETTER R, LATIN SMALL LETTER N, COMBINING COMMA BELOW # →m̡→
|
||||
|
||||
/*
|
||||
// ASCII-to-ASCII mapping that we are removing:
|
||||
0x49: "", // I -> l
|
||||
// these characters are confusable with I, hence the official table
|
||||
// maps them to l (`grep "LATIN SMALL LETTER L" confusables.txt`)
|
||||
0x0399: "I", // 0399 ; 006C ; MA # ( Ι → l ) GREEK CAPITAL LETTER IOTA → LATIN SMALL LETTER L #
|
||||
0x0406: "I", // 0406 ; 006C ; MA # ( І → l ) CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I → LATIN SMALL LETTER L #
|
||||
0x04C0: "I", // 04C0 ; 006C ; MA # ( Ӏ → l ) CYRILLIC LETTER PALOCHKA → LATIN SMALL LETTER L #
|
||||
|
||||
// ASCII-to-ASCII mapping that we are removing:
|
||||
0x31: "", // 1 -> l
|
||||
// these characters are confusable with 1, hence the official table
|
||||
// maps them to l (`grep "LATIN SMALL LETTER L" confusables.txt`)
|
||||
// [nothing yet]
|
||||
|
||||
// ASCII-to-ASCII mapping that we are removing:
|
||||
0x30: "", // 0 -> O
|
||||
// these characters are confusable with 0, hence the official table
|
||||
// maps them to O (`grep "LATIN CAPITAL LETTER O\>" confusables.txt`)
|
||||
// [nothing yet]
|
||||
*/
|
||||
}
|
20
vendor/github.com/oragono/go-ident/LICENSE
generated
vendored
Normal file
20
vendor/github.com/oragono/go-ident/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
Copyright (c) 2013 Dominik Honnef
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
19
vendor/github.com/oragono/go-ident/README.md
generated
vendored
Normal file
19
vendor/github.com/oragono/go-ident/README.md
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
# RFC 1413 (Identification Protocol) client
|
||||
|
||||
This package provides a client for the [Identification Protocol](https://tools.ietf.org/html/rfc1413).
|
||||
|
||||
---
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/DanielOaks/go-ident?status.svg)](https://godoc.org/github.com/DanielOaks/go-ident) [![Go Report Card](https://goreportcard.com/badge/github.com/DanielOaks/go-ident)](https://goreportcard.com/report/github.com/DanielOaks/go-ident)
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
```sh
|
||||
go get github.com/DanielOaks/go-ident
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
Documentation can be found at [godoc.org](http://godoc.org/github.com/DanielOaks/go-ident).
|
108
vendor/github.com/oragono/go-ident/client.go
generated
vendored
Normal file
108
vendor/github.com/oragono/go-ident/client.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
// Package ident implements an RFC 1413 client
|
||||
package ident
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Response is a successful answer to our query to the identd server.
|
||||
type Response struct {
|
||||
OS string
|
||||
Charset string
|
||||
Identifier string
|
||||
}
|
||||
|
||||
// ResponseError indicates that the identd server returned an error rather than an
|
||||
// identifying string.
|
||||
type ResponseError struct {
|
||||
Type string
|
||||
}
|
||||
|
||||
func (e ResponseError) Error() string {
|
||||
return fmt.Sprintf("Ident error: %s", e.Type)
|
||||
}
|
||||
|
||||
// ProtocolError indicates that an error occurred with the protocol itself, that the response
|
||||
// could not be successfully parsed or was malformed.
|
||||
type ProtocolError struct {
|
||||
Line string
|
||||
}
|
||||
|
||||
func (e ProtocolError) Error() string {
|
||||
return fmt.Sprintf("Unexpected response from server: %s", e.Line)
|
||||
}
|
||||
|
||||
// Query makes an Ident query, if timeout is >0 the query is timed out after that many seconds.
|
||||
func Query(ip string, portOnServer, portOnClient int, timeout float64) (Response, error) {
|
||||
var (
|
||||
conn net.Conn
|
||||
err error
|
||||
fields []string
|
||||
r *bufio.Reader
|
||||
resp string
|
||||
)
|
||||
|
||||
if timeout > 0 {
|
||||
conn, err = net.DialTimeout("tcp", net.JoinHostPort(ip, "113"), time.Duration(timeout)*time.Second)
|
||||
} else {
|
||||
conn, err = net.Dial("tcp", net.JoinHostPort(ip, "113"))
|
||||
}
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
|
||||
// stop the ident read after <timeout> seconds
|
||||
if timeout > 0 {
|
||||
conn.SetDeadline(time.Now().Add(time.Second * time.Duration(timeout)))
|
||||
}
|
||||
|
||||
_, err = conn.Write([]byte(fmt.Sprintf("%d, %d", portOnClient, portOnServer) + "\r\n"))
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
|
||||
r = bufio.NewReader(conn)
|
||||
resp, err = r.ReadString('\n')
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
|
||||
fields = strings.SplitN(strings.TrimSpace(resp), " : ", 4)
|
||||
if len(fields) < 3 {
|
||||
return Response{}, ProtocolError{resp}
|
||||
}
|
||||
|
||||
switch fields[1] {
|
||||
case "USERID":
|
||||
if len(fields) != 4 {
|
||||
return Response{}, ProtocolError{resp}
|
||||
}
|
||||
|
||||
var os, charset string
|
||||
osAndCharset := strings.SplitN(fields[2], ",", 2)
|
||||
if len(osAndCharset) == 2 {
|
||||
os = osAndCharset[0]
|
||||
charset = osAndCharset[1]
|
||||
} else {
|
||||
os = osAndCharset[0]
|
||||
charset = "US-ASCII"
|
||||
}
|
||||
|
||||
return Response{
|
||||
OS: os,
|
||||
Charset: charset,
|
||||
Identifier: fields[3],
|
||||
}, nil
|
||||
case "ERROR":
|
||||
if len(fields) != 3 {
|
||||
return Response{}, ProtocolError{resp}
|
||||
}
|
||||
|
||||
return Response{}, ResponseError{fields[2]}
|
||||
}
|
||||
return Response{}, err
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user