mirror of
https://github.com/42wim/matterbridge.git
synced 2024-12-23 19:52:41 +01:00
Add vendor github.com/dfordsoft/golib/ic
This commit is contained in:
parent
406a54b597
commit
bf0b9959d1
21
vendor/github.com/dfordsoft/golib/ic/LICENSE
generated
vendored
Normal file
21
vendor/github.com/dfordsoft/golib/ic/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2017 DForD Software
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
72
vendor/github.com/dfordsoft/golib/ic/convutf8.go
generated
vendored
Normal file
72
vendor/github.com/dfordsoft/golib/ic/convutf8.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
// Package ic convert text between CJK and UTF-8 in pure Go way
|
||||||
|
package ic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/japanese"
|
||||||
|
"golang.org/x/text/encoding/korean"
|
||||||
|
"golang.org/x/text/encoding/simplifiedchinese"
|
||||||
|
"golang.org/x/text/encoding/traditionalchinese"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
transformers = map[string]encoding.Encoding{
|
||||||
|
"gbk": simplifiedchinese.GBK,
|
||||||
|
"cp936": simplifiedchinese.GBK,
|
||||||
|
"windows-936": simplifiedchinese.GBK,
|
||||||
|
"gb18030": simplifiedchinese.GB18030,
|
||||||
|
"gb2312": simplifiedchinese.HZGB2312,
|
||||||
|
"big5": traditionalchinese.Big5,
|
||||||
|
"big-5": traditionalchinese.Big5,
|
||||||
|
"cp950": traditionalchinese.Big5,
|
||||||
|
"euc-kr": korean.EUCKR,
|
||||||
|
"euckr": korean.EUCKR,
|
||||||
|
"cp949": korean.EUCKR,
|
||||||
|
"euc-jp": japanese.EUCJP,
|
||||||
|
"eucjp": japanese.EUCJP,
|
||||||
|
"shift-jis": japanese.ShiftJIS,
|
||||||
|
"iso-2022-jp": japanese.ISO2022JP,
|
||||||
|
"cp932": japanese.ISO2022JP,
|
||||||
|
"windows-31j": japanese.ISO2022JP,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// ToUTF8 convert from CJK encoding to UTF-8
|
||||||
|
func ToUTF8(from string, s []byte) ([]byte, error) {
|
||||||
|
var reader *transform.Reader
|
||||||
|
|
||||||
|
transformer, ok := transformers[strings.ToLower(from)]
|
||||||
|
if !ok {
|
||||||
|
return s, errors.New("Unsupported encoding " + from)
|
||||||
|
}
|
||||||
|
reader = transform.NewReader(bytes.NewReader(s), transformer.NewDecoder())
|
||||||
|
|
||||||
|
d, e := ioutil.ReadAll(reader)
|
||||||
|
if e != nil {
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromUTF8 convert from UTF-8 encoding to CJK encoding
|
||||||
|
func FromUTF8(to string, s []byte) ([]byte, error) {
|
||||||
|
var reader *transform.Reader
|
||||||
|
|
||||||
|
transformer, ok := transformers[strings.ToLower(to)]
|
||||||
|
if !ok {
|
||||||
|
return s, errors.New("Unsupported encoding " + to)
|
||||||
|
}
|
||||||
|
reader = transform.NewReader(bytes.NewReader(s), transformer.NewEncoder())
|
||||||
|
|
||||||
|
d, e := ioutil.ReadAll(reader)
|
||||||
|
if e != nil {
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
31
vendor/github.com/dfordsoft/golib/ic/ic.go
generated
vendored
Normal file
31
vendor/github.com/dfordsoft/golib/ic/ic.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package ic
|
||||||
|
|
||||||
|
import "log"
|
||||||
|
|
||||||
|
// Convert convert bytes from CJK or UTF-8 to UTF-8 or CJK
|
||||||
|
func Convert(from string, to string, src []byte) []byte {
|
||||||
|
if to == "utf-8" {
|
||||||
|
out, e := ToUTF8(from, src)
|
||||||
|
if e == nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
log.Printf("converting from %s to UTF-8 failed: %v", from, e)
|
||||||
|
return src
|
||||||
|
}
|
||||||
|
|
||||||
|
if from == "utf-8" {
|
||||||
|
out, e := FromUTF8(to, src)
|
||||||
|
if e == nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
log.Printf("converting from UTF-8 to %s failed: %v", to, e)
|
||||||
|
return src
|
||||||
|
}
|
||||||
|
log.Println("only converting between CJK encodings and UTF-8 is supported")
|
||||||
|
return src
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertString convert string from CJK or UTF-8 to UTF-8 or CJK
|
||||||
|
func ConvertString(from string, to string, src string) string {
|
||||||
|
return string(Convert(from, to, []byte(src)))
|
||||||
|
}
|
84
vendor/golang.org/x/text/encoding/charmap/charmap.go
generated
vendored
84
vendor/golang.org/x/text/encoding/charmap/charmap.go
generated
vendored
@ -33,32 +33,32 @@ var (
|
|||||||
ISO8859_8I encoding.Encoding = &iso8859_8I
|
ISO8859_8I encoding.Encoding = &iso8859_8I
|
||||||
|
|
||||||
iso8859_6E = internal.Encoding{
|
iso8859_6E = internal.Encoding{
|
||||||
ISO8859_6,
|
Encoding: ISO8859_6,
|
||||||
"ISO-8859-6E",
|
Name: "ISO-8859-6E",
|
||||||
identifier.ISO88596E,
|
MIB: identifier.ISO88596E,
|
||||||
}
|
}
|
||||||
|
|
||||||
iso8859_6I = internal.Encoding{
|
iso8859_6I = internal.Encoding{
|
||||||
ISO8859_6,
|
Encoding: ISO8859_6,
|
||||||
"ISO-8859-6I",
|
Name: "ISO-8859-6I",
|
||||||
identifier.ISO88596I,
|
MIB: identifier.ISO88596I,
|
||||||
}
|
}
|
||||||
|
|
||||||
iso8859_8E = internal.Encoding{
|
iso8859_8E = internal.Encoding{
|
||||||
ISO8859_8,
|
Encoding: ISO8859_8,
|
||||||
"ISO-8859-8E",
|
Name: "ISO-8859-8E",
|
||||||
identifier.ISO88598E,
|
MIB: identifier.ISO88598E,
|
||||||
}
|
}
|
||||||
|
|
||||||
iso8859_8I = internal.Encoding{
|
iso8859_8I = internal.Encoding{
|
||||||
ISO8859_8,
|
Encoding: ISO8859_8,
|
||||||
"ISO-8859-8I",
|
Name: "ISO-8859-8I",
|
||||||
identifier.ISO88598I,
|
MIB: identifier.ISO88598I,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// All is a list of all defined encodings in this package.
|
// All is a list of all defined encodings in this package.
|
||||||
var All = listAll
|
var All []encoding.Encoding = listAll
|
||||||
|
|
||||||
// TODO: implement these encodings, in order of importance.
|
// TODO: implement these encodings, in order of importance.
|
||||||
// ASCII, ISO8859_1: Rather common. Close to Windows 1252.
|
// ASCII, ISO8859_1: Rather common. Close to Windows 1252.
|
||||||
@ -70,8 +70,8 @@ type utf8Enc struct {
|
|||||||
data [3]byte
|
data [3]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// charmap describes an 8-bit character set encoding.
|
// Charmap is an 8-bit character set encoding.
|
||||||
type charmap struct {
|
type Charmap struct {
|
||||||
// name is the encoding's name.
|
// name is the encoding's name.
|
||||||
name string
|
name string
|
||||||
// mib is the encoding type of this encoder.
|
// mib is the encoding type of this encoder.
|
||||||
@ -79,7 +79,7 @@ type charmap struct {
|
|||||||
// asciiSuperset states whether the encoding is a superset of ASCII.
|
// asciiSuperset states whether the encoding is a superset of ASCII.
|
||||||
asciiSuperset bool
|
asciiSuperset bool
|
||||||
// low is the lower bound of the encoded byte for a non-ASCII rune. If
|
// low is the lower bound of the encoded byte for a non-ASCII rune. If
|
||||||
// charmap.asciiSuperset is true then this will be 0x80, otherwise 0x00.
|
// Charmap.asciiSuperset is true then this will be 0x80, otherwise 0x00.
|
||||||
low uint8
|
low uint8
|
||||||
// replacement is the encoded replacement character.
|
// replacement is the encoded replacement character.
|
||||||
replacement byte
|
replacement byte
|
||||||
@ -91,26 +91,30 @@ type charmap struct {
|
|||||||
encode [256]uint32
|
encode [256]uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *charmap) NewDecoder() *encoding.Decoder {
|
// NewDecoder implements the encoding.Encoding interface.
|
||||||
|
func (m *Charmap) NewDecoder() *encoding.Decoder {
|
||||||
return &encoding.Decoder{Transformer: charmapDecoder{charmap: m}}
|
return &encoding.Decoder{Transformer: charmapDecoder{charmap: m}}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *charmap) NewEncoder() *encoding.Encoder {
|
// NewEncoder implements the encoding.Encoding interface.
|
||||||
|
func (m *Charmap) NewEncoder() *encoding.Encoder {
|
||||||
return &encoding.Encoder{Transformer: charmapEncoder{charmap: m}}
|
return &encoding.Encoder{Transformer: charmapEncoder{charmap: m}}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *charmap) String() string {
|
// String returns the Charmap's name.
|
||||||
|
func (m *Charmap) String() string {
|
||||||
return m.name
|
return m.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *charmap) ID() (mib identifier.MIB, other string) {
|
// ID implements an internal interface.
|
||||||
|
func (m *Charmap) ID() (mib identifier.MIB, other string) {
|
||||||
return m.mib, ""
|
return m.mib, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// charmapDecoder implements transform.Transformer by decoding to UTF-8.
|
// charmapDecoder implements transform.Transformer by decoding to UTF-8.
|
||||||
type charmapDecoder struct {
|
type charmapDecoder struct {
|
||||||
transform.NopResetter
|
transform.NopResetter
|
||||||
charmap *charmap
|
charmap *Charmap
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m charmapDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (m charmapDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
@ -142,10 +146,22 @@ func (m charmapDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int,
|
|||||||
return nDst, nSrc, err
|
return nDst, nSrc, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DecodeByte returns the Charmap's rune decoding of the byte b.
|
||||||
|
func (m *Charmap) DecodeByte(b byte) rune {
|
||||||
|
switch x := &m.decode[b]; x.len {
|
||||||
|
case 1:
|
||||||
|
return rune(x.data[0])
|
||||||
|
case 2:
|
||||||
|
return rune(x.data[0]&0x1f)<<6 | rune(x.data[1]&0x3f)
|
||||||
|
default:
|
||||||
|
return rune(x.data[0]&0x0f)<<12 | rune(x.data[1]&0x3f)<<6 | rune(x.data[2]&0x3f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// charmapEncoder implements transform.Transformer by encoding from UTF-8.
|
// charmapEncoder implements transform.Transformer by encoding from UTF-8.
|
||||||
type charmapEncoder struct {
|
type charmapEncoder struct {
|
||||||
transform.NopResetter
|
transform.NopResetter
|
||||||
charmap *charmap
|
charmap *Charmap
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m charmapEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (m charmapEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
@ -207,3 +223,27 @@ loop:
|
|||||||
}
|
}
|
||||||
return nDst, nSrc, err
|
return nDst, nSrc, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EncodeRune returns the Charmap's byte encoding of the rune r. ok is whether
|
||||||
|
// r is in the Charmap's repertoire. If not, b is set to the Charmap's
|
||||||
|
// replacement byte. This is often the ASCII substitute character '\x1a'.
|
||||||
|
func (m *Charmap) EncodeRune(r rune) (b byte, ok bool) {
|
||||||
|
if r < utf8.RuneSelf && m.asciiSuperset {
|
||||||
|
return byte(r), true
|
||||||
|
}
|
||||||
|
for low, high := int(m.low), 0x100; ; {
|
||||||
|
if low >= high {
|
||||||
|
return m.replacement, false
|
||||||
|
}
|
||||||
|
mid := (low + high) / 2
|
||||||
|
got := m.encode[mid]
|
||||||
|
gotRune := rune(got & (1<<24 - 1))
|
||||||
|
if gotRune < r {
|
||||||
|
low = mid + 1
|
||||||
|
} else if gotRune > r {
|
||||||
|
high = mid
|
||||||
|
} else {
|
||||||
|
return byte(got >> 24), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
4
vendor/golang.org/x/text/encoding/charmap/maketables.go
generated
vendored
4
vendor/golang.org/x/text/encoding/charmap/maketables.go
generated
vendored
@ -494,7 +494,7 @@ func main() {
|
|||||||
if e.comment != "" {
|
if e.comment != "" {
|
||||||
printf("//\n// %s\n", e.comment)
|
printf("//\n// %s\n", e.comment)
|
||||||
}
|
}
|
||||||
printf("var %s encoding.Encoding = &%s\n\nvar %s = charmap{\nname: %q,\n",
|
printf("var %s *Charmap = &%s\n\nvar %s = Charmap{\nname: %q,\n",
|
||||||
varName, lowerVarName, lowerVarName, e.name)
|
varName, lowerVarName, lowerVarName, e.name)
|
||||||
if mibs[e.mib] {
|
if mibs[e.mib] {
|
||||||
log.Fatalf("MIB type %q declared multiple times.", e.mib)
|
log.Fatalf("MIB type %q declared multiple times.", e.mib)
|
||||||
@ -540,7 +540,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
printf("},\n}\n")
|
printf("},\n}\n")
|
||||||
|
|
||||||
// Add an estimate of the size of a single charmap{} struct value, which
|
// Add an estimate of the size of a single Charmap{} struct value, which
|
||||||
// includes two 256 elem arrays of 4 bytes and some extra fields, which
|
// includes two 256 elem arrays of 4 bytes and some extra fields, which
|
||||||
// align to 3 uint64s on 64-bit architectures.
|
// align to 3 uint64s on 64-bit architectures.
|
||||||
w.Size += 2*4*256 + 3*8
|
w.Size += 2*4*256 + 3*8
|
||||||
|
190
vendor/golang.org/x/text/encoding/charmap/tables.go
generated
vendored
190
vendor/golang.org/x/text/encoding/charmap/tables.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// This file was generated by go generate; DO NOT EDIT
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
package charmap
|
package charmap
|
||||||
|
|
||||||
@ -8,9 +8,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// CodePage037 is the IBM Code Page 037 encoding.
|
// CodePage037 is the IBM Code Page 037 encoding.
|
||||||
var CodePage037 encoding.Encoding = &codePage037
|
var CodePage037 *Charmap = &codePage037
|
||||||
|
|
||||||
var codePage037 = charmap{
|
var codePage037 = Charmap{
|
||||||
name: "IBM Code Page 037",
|
name: "IBM Code Page 037",
|
||||||
mib: identifier.IBM037,
|
mib: identifier.IBM037,
|
||||||
asciiSuperset: false,
|
asciiSuperset: false,
|
||||||
@ -183,9 +183,9 @@ var codePage037 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CodePage437 is the IBM Code Page 437 encoding.
|
// CodePage437 is the IBM Code Page 437 encoding.
|
||||||
var CodePage437 encoding.Encoding = &codePage437
|
var CodePage437 *Charmap = &codePage437
|
||||||
|
|
||||||
var codePage437 = charmap{
|
var codePage437 = Charmap{
|
||||||
name: "IBM Code Page 437",
|
name: "IBM Code Page 437",
|
||||||
mib: identifier.PC8CodePage437,
|
mib: identifier.PC8CodePage437,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -358,9 +358,9 @@ var codePage437 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CodePage850 is the IBM Code Page 850 encoding.
|
// CodePage850 is the IBM Code Page 850 encoding.
|
||||||
var CodePage850 encoding.Encoding = &codePage850
|
var CodePage850 *Charmap = &codePage850
|
||||||
|
|
||||||
var codePage850 = charmap{
|
var codePage850 = Charmap{
|
||||||
name: "IBM Code Page 850",
|
name: "IBM Code Page 850",
|
||||||
mib: identifier.PC850Multilingual,
|
mib: identifier.PC850Multilingual,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -533,9 +533,9 @@ var codePage850 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CodePage852 is the IBM Code Page 852 encoding.
|
// CodePage852 is the IBM Code Page 852 encoding.
|
||||||
var CodePage852 encoding.Encoding = &codePage852
|
var CodePage852 *Charmap = &codePage852
|
||||||
|
|
||||||
var codePage852 = charmap{
|
var codePage852 = Charmap{
|
||||||
name: "IBM Code Page 852",
|
name: "IBM Code Page 852",
|
||||||
mib: identifier.PCp852,
|
mib: identifier.PCp852,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -708,9 +708,9 @@ var codePage852 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CodePage855 is the IBM Code Page 855 encoding.
|
// CodePage855 is the IBM Code Page 855 encoding.
|
||||||
var CodePage855 encoding.Encoding = &codePage855
|
var CodePage855 *Charmap = &codePage855
|
||||||
|
|
||||||
var codePage855 = charmap{
|
var codePage855 = Charmap{
|
||||||
name: "IBM Code Page 855",
|
name: "IBM Code Page 855",
|
||||||
mib: identifier.IBM855,
|
mib: identifier.IBM855,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -883,9 +883,9 @@ var codePage855 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CodePage858 is the Windows Code Page 858 encoding.
|
// CodePage858 is the Windows Code Page 858 encoding.
|
||||||
var CodePage858 encoding.Encoding = &codePage858
|
var CodePage858 *Charmap = &codePage858
|
||||||
|
|
||||||
var codePage858 = charmap{
|
var codePage858 = Charmap{
|
||||||
name: "Windows Code Page 858",
|
name: "Windows Code Page 858",
|
||||||
mib: identifier.IBM00858,
|
mib: identifier.IBM00858,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -1058,9 +1058,9 @@ var codePage858 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CodePage860 is the IBM Code Page 860 encoding.
|
// CodePage860 is the IBM Code Page 860 encoding.
|
||||||
var CodePage860 encoding.Encoding = &codePage860
|
var CodePage860 *Charmap = &codePage860
|
||||||
|
|
||||||
var codePage860 = charmap{
|
var codePage860 = Charmap{
|
||||||
name: "IBM Code Page 860",
|
name: "IBM Code Page 860",
|
||||||
mib: identifier.IBM860,
|
mib: identifier.IBM860,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -1233,9 +1233,9 @@ var codePage860 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CodePage862 is the IBM Code Page 862 encoding.
|
// CodePage862 is the IBM Code Page 862 encoding.
|
||||||
var CodePage862 encoding.Encoding = &codePage862
|
var CodePage862 *Charmap = &codePage862
|
||||||
|
|
||||||
var codePage862 = charmap{
|
var codePage862 = Charmap{
|
||||||
name: "IBM Code Page 862",
|
name: "IBM Code Page 862",
|
||||||
mib: identifier.PC862LatinHebrew,
|
mib: identifier.PC862LatinHebrew,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -1408,9 +1408,9 @@ var codePage862 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CodePage863 is the IBM Code Page 863 encoding.
|
// CodePage863 is the IBM Code Page 863 encoding.
|
||||||
var CodePage863 encoding.Encoding = &codePage863
|
var CodePage863 *Charmap = &codePage863
|
||||||
|
|
||||||
var codePage863 = charmap{
|
var codePage863 = Charmap{
|
||||||
name: "IBM Code Page 863",
|
name: "IBM Code Page 863",
|
||||||
mib: identifier.IBM863,
|
mib: identifier.IBM863,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -1583,9 +1583,9 @@ var codePage863 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CodePage865 is the IBM Code Page 865 encoding.
|
// CodePage865 is the IBM Code Page 865 encoding.
|
||||||
var CodePage865 encoding.Encoding = &codePage865
|
var CodePage865 *Charmap = &codePage865
|
||||||
|
|
||||||
var codePage865 = charmap{
|
var codePage865 = Charmap{
|
||||||
name: "IBM Code Page 865",
|
name: "IBM Code Page 865",
|
||||||
mib: identifier.IBM865,
|
mib: identifier.IBM865,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -1758,9 +1758,9 @@ var codePage865 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CodePage866 is the IBM Code Page 866 encoding.
|
// CodePage866 is the IBM Code Page 866 encoding.
|
||||||
var CodePage866 encoding.Encoding = &codePage866
|
var CodePage866 *Charmap = &codePage866
|
||||||
|
|
||||||
var codePage866 = charmap{
|
var codePage866 = Charmap{
|
||||||
name: "IBM Code Page 866",
|
name: "IBM Code Page 866",
|
||||||
mib: identifier.IBM866,
|
mib: identifier.IBM866,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -1933,9 +1933,9 @@ var codePage866 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CodePage1047 is the IBM Code Page 1047 encoding.
|
// CodePage1047 is the IBM Code Page 1047 encoding.
|
||||||
var CodePage1047 encoding.Encoding = &codePage1047
|
var CodePage1047 *Charmap = &codePage1047
|
||||||
|
|
||||||
var codePage1047 = charmap{
|
var codePage1047 = Charmap{
|
||||||
name: "IBM Code Page 1047",
|
name: "IBM Code Page 1047",
|
||||||
mib: identifier.IBM1047,
|
mib: identifier.IBM1047,
|
||||||
asciiSuperset: false,
|
asciiSuperset: false,
|
||||||
@ -2108,9 +2108,9 @@ var codePage1047 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CodePage1140 is the IBM Code Page 1140 encoding.
|
// CodePage1140 is the IBM Code Page 1140 encoding.
|
||||||
var CodePage1140 encoding.Encoding = &codePage1140
|
var CodePage1140 *Charmap = &codePage1140
|
||||||
|
|
||||||
var codePage1140 = charmap{
|
var codePage1140 = Charmap{
|
||||||
name: "IBM Code Page 1140",
|
name: "IBM Code Page 1140",
|
||||||
mib: identifier.IBM01140,
|
mib: identifier.IBM01140,
|
||||||
asciiSuperset: false,
|
asciiSuperset: false,
|
||||||
@ -2283,9 +2283,9 @@ var codePage1140 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_1 is the ISO 8859-1 encoding.
|
// ISO8859_1 is the ISO 8859-1 encoding.
|
||||||
var ISO8859_1 encoding.Encoding = &iso8859_1
|
var ISO8859_1 *Charmap = &iso8859_1
|
||||||
|
|
||||||
var iso8859_1 = charmap{
|
var iso8859_1 = Charmap{
|
||||||
name: "ISO 8859-1",
|
name: "ISO 8859-1",
|
||||||
mib: identifier.ISOLatin1,
|
mib: identifier.ISOLatin1,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -2458,9 +2458,9 @@ var iso8859_1 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_2 is the ISO 8859-2 encoding.
|
// ISO8859_2 is the ISO 8859-2 encoding.
|
||||||
var ISO8859_2 encoding.Encoding = &iso8859_2
|
var ISO8859_2 *Charmap = &iso8859_2
|
||||||
|
|
||||||
var iso8859_2 = charmap{
|
var iso8859_2 = Charmap{
|
||||||
name: "ISO 8859-2",
|
name: "ISO 8859-2",
|
||||||
mib: identifier.ISOLatin2,
|
mib: identifier.ISOLatin2,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -2633,9 +2633,9 @@ var iso8859_2 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_3 is the ISO 8859-3 encoding.
|
// ISO8859_3 is the ISO 8859-3 encoding.
|
||||||
var ISO8859_3 encoding.Encoding = &iso8859_3
|
var ISO8859_3 *Charmap = &iso8859_3
|
||||||
|
|
||||||
var iso8859_3 = charmap{
|
var iso8859_3 = Charmap{
|
||||||
name: "ISO 8859-3",
|
name: "ISO 8859-3",
|
||||||
mib: identifier.ISOLatin3,
|
mib: identifier.ISOLatin3,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -2808,9 +2808,9 @@ var iso8859_3 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_4 is the ISO 8859-4 encoding.
|
// ISO8859_4 is the ISO 8859-4 encoding.
|
||||||
var ISO8859_4 encoding.Encoding = &iso8859_4
|
var ISO8859_4 *Charmap = &iso8859_4
|
||||||
|
|
||||||
var iso8859_4 = charmap{
|
var iso8859_4 = Charmap{
|
||||||
name: "ISO 8859-4",
|
name: "ISO 8859-4",
|
||||||
mib: identifier.ISOLatin4,
|
mib: identifier.ISOLatin4,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -2983,9 +2983,9 @@ var iso8859_4 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_5 is the ISO 8859-5 encoding.
|
// ISO8859_5 is the ISO 8859-5 encoding.
|
||||||
var ISO8859_5 encoding.Encoding = &iso8859_5
|
var ISO8859_5 *Charmap = &iso8859_5
|
||||||
|
|
||||||
var iso8859_5 = charmap{
|
var iso8859_5 = Charmap{
|
||||||
name: "ISO 8859-5",
|
name: "ISO 8859-5",
|
||||||
mib: identifier.ISOLatinCyrillic,
|
mib: identifier.ISOLatinCyrillic,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -3158,9 +3158,9 @@ var iso8859_5 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_6 is the ISO 8859-6 encoding.
|
// ISO8859_6 is the ISO 8859-6 encoding.
|
||||||
var ISO8859_6 encoding.Encoding = &iso8859_6
|
var ISO8859_6 *Charmap = &iso8859_6
|
||||||
|
|
||||||
var iso8859_6 = charmap{
|
var iso8859_6 = Charmap{
|
||||||
name: "ISO 8859-6",
|
name: "ISO 8859-6",
|
||||||
mib: identifier.ISOLatinArabic,
|
mib: identifier.ISOLatinArabic,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -3333,9 +3333,9 @@ var iso8859_6 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_7 is the ISO 8859-7 encoding.
|
// ISO8859_7 is the ISO 8859-7 encoding.
|
||||||
var ISO8859_7 encoding.Encoding = &iso8859_7
|
var ISO8859_7 *Charmap = &iso8859_7
|
||||||
|
|
||||||
var iso8859_7 = charmap{
|
var iso8859_7 = Charmap{
|
||||||
name: "ISO 8859-7",
|
name: "ISO 8859-7",
|
||||||
mib: identifier.ISOLatinGreek,
|
mib: identifier.ISOLatinGreek,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -3508,9 +3508,9 @@ var iso8859_7 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_8 is the ISO 8859-8 encoding.
|
// ISO8859_8 is the ISO 8859-8 encoding.
|
||||||
var ISO8859_8 encoding.Encoding = &iso8859_8
|
var ISO8859_8 *Charmap = &iso8859_8
|
||||||
|
|
||||||
var iso8859_8 = charmap{
|
var iso8859_8 = Charmap{
|
||||||
name: "ISO 8859-8",
|
name: "ISO 8859-8",
|
||||||
mib: identifier.ISOLatinHebrew,
|
mib: identifier.ISOLatinHebrew,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -3683,9 +3683,9 @@ var iso8859_8 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_9 is the ISO 8859-9 encoding.
|
// ISO8859_9 is the ISO 8859-9 encoding.
|
||||||
var ISO8859_9 encoding.Encoding = &iso8859_9
|
var ISO8859_9 *Charmap = &iso8859_9
|
||||||
|
|
||||||
var iso8859_9 = charmap{
|
var iso8859_9 = Charmap{
|
||||||
name: "ISO 8859-9",
|
name: "ISO 8859-9",
|
||||||
mib: identifier.ISOLatin5,
|
mib: identifier.ISOLatin5,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -3858,9 +3858,9 @@ var iso8859_9 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_10 is the ISO 8859-10 encoding.
|
// ISO8859_10 is the ISO 8859-10 encoding.
|
||||||
var ISO8859_10 encoding.Encoding = &iso8859_10
|
var ISO8859_10 *Charmap = &iso8859_10
|
||||||
|
|
||||||
var iso8859_10 = charmap{
|
var iso8859_10 = Charmap{
|
||||||
name: "ISO 8859-10",
|
name: "ISO 8859-10",
|
||||||
mib: identifier.ISOLatin6,
|
mib: identifier.ISOLatin6,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -4033,9 +4033,9 @@ var iso8859_10 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_13 is the ISO 8859-13 encoding.
|
// ISO8859_13 is the ISO 8859-13 encoding.
|
||||||
var ISO8859_13 encoding.Encoding = &iso8859_13
|
var ISO8859_13 *Charmap = &iso8859_13
|
||||||
|
|
||||||
var iso8859_13 = charmap{
|
var iso8859_13 = Charmap{
|
||||||
name: "ISO 8859-13",
|
name: "ISO 8859-13",
|
||||||
mib: identifier.ISO885913,
|
mib: identifier.ISO885913,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -4208,9 +4208,9 @@ var iso8859_13 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_14 is the ISO 8859-14 encoding.
|
// ISO8859_14 is the ISO 8859-14 encoding.
|
||||||
var ISO8859_14 encoding.Encoding = &iso8859_14
|
var ISO8859_14 *Charmap = &iso8859_14
|
||||||
|
|
||||||
var iso8859_14 = charmap{
|
var iso8859_14 = Charmap{
|
||||||
name: "ISO 8859-14",
|
name: "ISO 8859-14",
|
||||||
mib: identifier.ISO885914,
|
mib: identifier.ISO885914,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -4383,9 +4383,9 @@ var iso8859_14 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_15 is the ISO 8859-15 encoding.
|
// ISO8859_15 is the ISO 8859-15 encoding.
|
||||||
var ISO8859_15 encoding.Encoding = &iso8859_15
|
var ISO8859_15 *Charmap = &iso8859_15
|
||||||
|
|
||||||
var iso8859_15 = charmap{
|
var iso8859_15 = Charmap{
|
||||||
name: "ISO 8859-15",
|
name: "ISO 8859-15",
|
||||||
mib: identifier.ISO885915,
|
mib: identifier.ISO885915,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -4558,9 +4558,9 @@ var iso8859_15 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO8859_16 is the ISO 8859-16 encoding.
|
// ISO8859_16 is the ISO 8859-16 encoding.
|
||||||
var ISO8859_16 encoding.Encoding = &iso8859_16
|
var ISO8859_16 *Charmap = &iso8859_16
|
||||||
|
|
||||||
var iso8859_16 = charmap{
|
var iso8859_16 = Charmap{
|
||||||
name: "ISO 8859-16",
|
name: "ISO 8859-16",
|
||||||
mib: identifier.ISO885916,
|
mib: identifier.ISO885916,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -4733,9 +4733,9 @@ var iso8859_16 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// KOI8R is the KOI8-R encoding.
|
// KOI8R is the KOI8-R encoding.
|
||||||
var KOI8R encoding.Encoding = &koi8R
|
var KOI8R *Charmap = &koi8R
|
||||||
|
|
||||||
var koi8R = charmap{
|
var koi8R = Charmap{
|
||||||
name: "KOI8-R",
|
name: "KOI8-R",
|
||||||
mib: identifier.KOI8R,
|
mib: identifier.KOI8R,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -4908,9 +4908,9 @@ var koi8R = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// KOI8U is the KOI8-U encoding.
|
// KOI8U is the KOI8-U encoding.
|
||||||
var KOI8U encoding.Encoding = &koi8U
|
var KOI8U *Charmap = &koi8U
|
||||||
|
|
||||||
var koi8U = charmap{
|
var koi8U = Charmap{
|
||||||
name: "KOI8-U",
|
name: "KOI8-U",
|
||||||
mib: identifier.KOI8U,
|
mib: identifier.KOI8U,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -5083,9 +5083,9 @@ var koi8U = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Macintosh is the Macintosh encoding.
|
// Macintosh is the Macintosh encoding.
|
||||||
var Macintosh encoding.Encoding = &macintosh
|
var Macintosh *Charmap = &macintosh
|
||||||
|
|
||||||
var macintosh = charmap{
|
var macintosh = Charmap{
|
||||||
name: "Macintosh",
|
name: "Macintosh",
|
||||||
mib: identifier.Macintosh,
|
mib: identifier.Macintosh,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -5258,9 +5258,9 @@ var macintosh = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MacintoshCyrillic is the Macintosh Cyrillic encoding.
|
// MacintoshCyrillic is the Macintosh Cyrillic encoding.
|
||||||
var MacintoshCyrillic encoding.Encoding = &macintoshCyrillic
|
var MacintoshCyrillic *Charmap = &macintoshCyrillic
|
||||||
|
|
||||||
var macintoshCyrillic = charmap{
|
var macintoshCyrillic = Charmap{
|
||||||
name: "Macintosh Cyrillic",
|
name: "Macintosh Cyrillic",
|
||||||
mib: identifier.MacintoshCyrillic,
|
mib: identifier.MacintoshCyrillic,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -5433,9 +5433,9 @@ var macintoshCyrillic = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Windows874 is the Windows 874 encoding.
|
// Windows874 is the Windows 874 encoding.
|
||||||
var Windows874 encoding.Encoding = &windows874
|
var Windows874 *Charmap = &windows874
|
||||||
|
|
||||||
var windows874 = charmap{
|
var windows874 = Charmap{
|
||||||
name: "Windows 874",
|
name: "Windows 874",
|
||||||
mib: identifier.Windows874,
|
mib: identifier.Windows874,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -5608,9 +5608,9 @@ var windows874 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Windows1250 is the Windows 1250 encoding.
|
// Windows1250 is the Windows 1250 encoding.
|
||||||
var Windows1250 encoding.Encoding = &windows1250
|
var Windows1250 *Charmap = &windows1250
|
||||||
|
|
||||||
var windows1250 = charmap{
|
var windows1250 = Charmap{
|
||||||
name: "Windows 1250",
|
name: "Windows 1250",
|
||||||
mib: identifier.Windows1250,
|
mib: identifier.Windows1250,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -5783,9 +5783,9 @@ var windows1250 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Windows1251 is the Windows 1251 encoding.
|
// Windows1251 is the Windows 1251 encoding.
|
||||||
var Windows1251 encoding.Encoding = &windows1251
|
var Windows1251 *Charmap = &windows1251
|
||||||
|
|
||||||
var windows1251 = charmap{
|
var windows1251 = Charmap{
|
||||||
name: "Windows 1251",
|
name: "Windows 1251",
|
||||||
mib: identifier.Windows1251,
|
mib: identifier.Windows1251,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -5958,9 +5958,9 @@ var windows1251 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Windows1252 is the Windows 1252 encoding.
|
// Windows1252 is the Windows 1252 encoding.
|
||||||
var Windows1252 encoding.Encoding = &windows1252
|
var Windows1252 *Charmap = &windows1252
|
||||||
|
|
||||||
var windows1252 = charmap{
|
var windows1252 = Charmap{
|
||||||
name: "Windows 1252",
|
name: "Windows 1252",
|
||||||
mib: identifier.Windows1252,
|
mib: identifier.Windows1252,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -6133,9 +6133,9 @@ var windows1252 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Windows1253 is the Windows 1253 encoding.
|
// Windows1253 is the Windows 1253 encoding.
|
||||||
var Windows1253 encoding.Encoding = &windows1253
|
var Windows1253 *Charmap = &windows1253
|
||||||
|
|
||||||
var windows1253 = charmap{
|
var windows1253 = Charmap{
|
||||||
name: "Windows 1253",
|
name: "Windows 1253",
|
||||||
mib: identifier.Windows1253,
|
mib: identifier.Windows1253,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -6308,9 +6308,9 @@ var windows1253 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Windows1254 is the Windows 1254 encoding.
|
// Windows1254 is the Windows 1254 encoding.
|
||||||
var Windows1254 encoding.Encoding = &windows1254
|
var Windows1254 *Charmap = &windows1254
|
||||||
|
|
||||||
var windows1254 = charmap{
|
var windows1254 = Charmap{
|
||||||
name: "Windows 1254",
|
name: "Windows 1254",
|
||||||
mib: identifier.Windows1254,
|
mib: identifier.Windows1254,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -6483,9 +6483,9 @@ var windows1254 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Windows1255 is the Windows 1255 encoding.
|
// Windows1255 is the Windows 1255 encoding.
|
||||||
var Windows1255 encoding.Encoding = &windows1255
|
var Windows1255 *Charmap = &windows1255
|
||||||
|
|
||||||
var windows1255 = charmap{
|
var windows1255 = Charmap{
|
||||||
name: "Windows 1255",
|
name: "Windows 1255",
|
||||||
mib: identifier.Windows1255,
|
mib: identifier.Windows1255,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -6593,7 +6593,7 @@ var windows1255 = charmap{
|
|||||||
{2, [3]byte{0xd6, 0xb4, 0x00}}, {2, [3]byte{0xd6, 0xb5, 0x00}},
|
{2, [3]byte{0xd6, 0xb4, 0x00}}, {2, [3]byte{0xd6, 0xb5, 0x00}},
|
||||||
{2, [3]byte{0xd6, 0xb6, 0x00}}, {2, [3]byte{0xd6, 0xb7, 0x00}},
|
{2, [3]byte{0xd6, 0xb6, 0x00}}, {2, [3]byte{0xd6, 0xb7, 0x00}},
|
||||||
{2, [3]byte{0xd6, 0xb8, 0x00}}, {2, [3]byte{0xd6, 0xb9, 0x00}},
|
{2, [3]byte{0xd6, 0xb8, 0x00}}, {2, [3]byte{0xd6, 0xb9, 0x00}},
|
||||||
{3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xd6, 0xbb, 0x00}},
|
{2, [3]byte{0xd6, 0xba, 0x00}}, {2, [3]byte{0xd6, 0xbb, 0x00}},
|
||||||
{2, [3]byte{0xd6, 0xbc, 0x00}}, {2, [3]byte{0xd6, 0xbd, 0x00}},
|
{2, [3]byte{0xd6, 0xbc, 0x00}}, {2, [3]byte{0xd6, 0xbd, 0x00}},
|
||||||
{2, [3]byte{0xd6, 0xbe, 0x00}}, {2, [3]byte{0xd6, 0xbf, 0x00}},
|
{2, [3]byte{0xd6, 0xbe, 0x00}}, {2, [3]byte{0xd6, 0xbf, 0x00}},
|
||||||
{2, [3]byte{0xd7, 0x80, 0x00}}, {2, [3]byte{0xd7, 0x81, 0x00}},
|
{2, [3]byte{0xd7, 0x80, 0x00}}, {2, [3]byte{0xd7, 0x81, 0x00}},
|
||||||
@ -6643,24 +6643,24 @@ var windows1255 = charmap{
|
|||||||
0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xb80000b8, 0xb90000b9,
|
0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xb80000b8, 0xb90000b9,
|
||||||
0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, 0xaa0000d7, 0xba0000f7, 0x83000192,
|
0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, 0xaa0000d7, 0xba0000f7, 0x83000192,
|
||||||
0x880002c6, 0x980002dc, 0xc00005b0, 0xc10005b1, 0xc20005b2, 0xc30005b3, 0xc40005b4, 0xc50005b5,
|
0x880002c6, 0x980002dc, 0xc00005b0, 0xc10005b1, 0xc20005b2, 0xc30005b3, 0xc40005b4, 0xc50005b5,
|
||||||
0xc60005b6, 0xc70005b7, 0xc80005b8, 0xc90005b9, 0xcb0005bb, 0xcc0005bc, 0xcd0005bd, 0xce0005be,
|
0xc60005b6, 0xc70005b7, 0xc80005b8, 0xc90005b9, 0xca0005ba, 0xcb0005bb, 0xcc0005bc, 0xcd0005bd,
|
||||||
0xcf0005bf, 0xd00005c0, 0xd10005c1, 0xd20005c2, 0xd30005c3, 0xe00005d0, 0xe10005d1, 0xe20005d2,
|
0xce0005be, 0xcf0005bf, 0xd00005c0, 0xd10005c1, 0xd20005c2, 0xd30005c3, 0xe00005d0, 0xe10005d1,
|
||||||
0xe30005d3, 0xe40005d4, 0xe50005d5, 0xe60005d6, 0xe70005d7, 0xe80005d8, 0xe90005d9, 0xea0005da,
|
0xe20005d2, 0xe30005d3, 0xe40005d4, 0xe50005d5, 0xe60005d6, 0xe70005d7, 0xe80005d8, 0xe90005d9,
|
||||||
0xeb0005db, 0xec0005dc, 0xed0005dd, 0xee0005de, 0xef0005df, 0xf00005e0, 0xf10005e1, 0xf20005e2,
|
0xea0005da, 0xeb0005db, 0xec0005dc, 0xed0005dd, 0xee0005de, 0xef0005df, 0xf00005e0, 0xf10005e1,
|
||||||
0xf30005e3, 0xf40005e4, 0xf50005e5, 0xf60005e6, 0xf70005e7, 0xf80005e8, 0xf90005e9, 0xfa0005ea,
|
0xf20005e2, 0xf30005e3, 0xf40005e4, 0xf50005e5, 0xf60005e6, 0xf70005e7, 0xf80005e8, 0xf90005e9,
|
||||||
0xd40005f0, 0xd50005f1, 0xd60005f2, 0xd70005f3, 0xd80005f4, 0xfd00200e, 0xfe00200f, 0x96002013,
|
0xfa0005ea, 0xd40005f0, 0xd50005f1, 0xd60005f2, 0xd70005f3, 0xd80005f4, 0xfd00200e, 0xfe00200f,
|
||||||
0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, 0x86002020,
|
0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e,
|
||||||
0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0xa40020aa, 0x800020ac,
|
0x86002020, 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0xa40020aa,
|
||||||
0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122,
|
0x800020ac, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122,
|
||||||
0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122,
|
0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122,
|
||||||
0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122,
|
0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Windows1256 is the Windows 1256 encoding.
|
// Windows1256 is the Windows 1256 encoding.
|
||||||
var Windows1256 encoding.Encoding = &windows1256
|
var Windows1256 *Charmap = &windows1256
|
||||||
|
|
||||||
var windows1256 = charmap{
|
var windows1256 = Charmap{
|
||||||
name: "Windows 1256",
|
name: "Windows 1256",
|
||||||
mib: identifier.Windows1256,
|
mib: identifier.Windows1256,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -6833,9 +6833,9 @@ var windows1256 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Windows1257 is the Windows 1257 encoding.
|
// Windows1257 is the Windows 1257 encoding.
|
||||||
var Windows1257 encoding.Encoding = &windows1257
|
var Windows1257 *Charmap = &windows1257
|
||||||
|
|
||||||
var windows1257 = charmap{
|
var windows1257 = Charmap{
|
||||||
name: "Windows 1257",
|
name: "Windows 1257",
|
||||||
mib: identifier.Windows1257,
|
mib: identifier.Windows1257,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -7008,9 +7008,9 @@ var windows1257 = charmap{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Windows1258 is the Windows 1258 encoding.
|
// Windows1258 is the Windows 1258 encoding.
|
||||||
var Windows1258 encoding.Encoding = &windows1258
|
var Windows1258 *Charmap = &windows1258
|
||||||
|
|
||||||
var windows1258 = charmap{
|
var windows1258 = Charmap{
|
||||||
name: "Windows 1258",
|
name: "Windows 1258",
|
||||||
mib: identifier.Windows1258,
|
mib: identifier.Windows1258,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
@ -7185,9 +7185,9 @@ var windows1258 = charmap{
|
|||||||
// XUserDefined is the X-User-Defined encoding.
|
// XUserDefined is the X-User-Defined encoding.
|
||||||
//
|
//
|
||||||
// It is defined at http://encoding.spec.whatwg.org/#x-user-defined
|
// It is defined at http://encoding.spec.whatwg.org/#x-user-defined
|
||||||
var XUserDefined encoding.Encoding = &xUserDefined
|
var XUserDefined *Charmap = &xUserDefined
|
||||||
|
|
||||||
var xUserDefined = charmap{
|
var xUserDefined = Charmap{
|
||||||
name: "X-User-Defined",
|
name: "X-User-Defined",
|
||||||
mib: identifier.XUserDefined,
|
mib: identifier.XUserDefined,
|
||||||
asciiSuperset: true,
|
asciiSuperset: true,
|
||||||
|
5
vendor/golang.org/x/text/encoding/htmlindex/gen.go
generated
vendored
5
vendor/golang.org/x/text/encoding/htmlindex/gen.go
generated
vendored
@ -133,7 +133,10 @@ var consts = map[string]string{
|
|||||||
// locales is taken from
|
// locales is taken from
|
||||||
// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm.
|
// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm.
|
||||||
var locales = []struct{ tag, name string }{
|
var locales = []struct{ tag, name string }{
|
||||||
{"und", "windows-1252"}, // The default value.
|
// The default value. Explicitly state latin to benefit from the exact
|
||||||
|
// script option, while still making 1252 the default encoding for languages
|
||||||
|
// written in Latin script.
|
||||||
|
{"und_Latn", "windows-1252"},
|
||||||
{"ar", "windows-1256"},
|
{"ar", "windows-1256"},
|
||||||
{"ba", "windows-1251"},
|
{"ba", "windows-1251"},
|
||||||
{"be", "windows-1251"},
|
{"be", "windows-1251"},
|
||||||
|
2
vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go
generated
vendored
2
vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go
generated
vendored
@ -50,7 +50,7 @@ func LanguageDefault(tag language.Tag) string {
|
|||||||
for _, t := range strings.Split(locales, " ") {
|
for _, t := range strings.Split(locales, " ") {
|
||||||
tags = append(tags, language.MustParse(t))
|
tags = append(tags, language.MustParse(t))
|
||||||
}
|
}
|
||||||
matcher = language.NewMatcher(tags)
|
matcher = language.NewMatcher(tags, language.PreferSameScript(true))
|
||||||
})
|
})
|
||||||
_, i, _ := matcher.Match(tag)
|
_, i, _ := matcher.Match(tag)
|
||||||
return canonical[localeMap[i]] // Default is Windows-1252.
|
return canonical[localeMap[i]] // Default is Windows-1252.
|
||||||
|
6
vendor/golang.org/x/text/encoding/htmlindex/tables.go
generated
vendored
6
vendor/golang.org/x/text/encoding/htmlindex/tables.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// This file was generated by go generate; DO NOT EDIT
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
package htmlindex
|
package htmlindex
|
||||||
|
|
||||||
@ -313,7 +313,7 @@ var nameMap = map[string]htmlEncoding{
|
|||||||
}
|
}
|
||||||
|
|
||||||
var localeMap = []htmlEncoding{
|
var localeMap = []htmlEncoding{
|
||||||
windows1252, // und
|
windows1252, // und_Latn
|
||||||
windows1256, // ar
|
windows1256, // ar
|
||||||
windows1251, // ba
|
windows1251, // ba
|
||||||
windows1251, // be
|
windows1251, // be
|
||||||
@ -349,4 +349,4 @@ var localeMap = []htmlEncoding{
|
|||||||
big5, // zh-hant
|
big5, // zh-hant
|
||||||
}
|
}
|
||||||
|
|
||||||
const locales = "und ar ba be bg cs el et fa he hr hu ja kk ko ku ky lt lv mk pl ru sah sk sl sr tg th tr tt uk vi zh-hans zh-hant"
|
const locales = "und_Latn ar ba be bg cs el et fa he hr hu ja kk ko ku ky lt lv mk pl ru sah sk sl sr tg th tr tt uk vi zh-hans zh-hant"
|
||||||
|
192
vendor/golang.org/x/text/encoding/ianaindex/gen.go
generated
vendored
Normal file
192
vendor/golang.org/x/text/encoding/ianaindex/gen.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/internal/gen"
|
||||||
|
)
|
||||||
|
|
||||||
|
type registry struct {
|
||||||
|
XMLName xml.Name `xml:"registry"`
|
||||||
|
Updated string `xml:"updated"`
|
||||||
|
Registry []struct {
|
||||||
|
ID string `xml:"id,attr"`
|
||||||
|
Record []struct {
|
||||||
|
Name string `xml:"name"`
|
||||||
|
Xref []struct {
|
||||||
|
Type string `xml:"type,attr"`
|
||||||
|
Data string `xml:"data,attr"`
|
||||||
|
} `xml:"xref"`
|
||||||
|
Desc struct {
|
||||||
|
Data string `xml:",innerxml"`
|
||||||
|
} `xml:"description,"`
|
||||||
|
MIB string `xml:"value"`
|
||||||
|
Alias []string `xml:"alias"`
|
||||||
|
MIME string `xml:"preferred_alias"`
|
||||||
|
} `xml:"record"`
|
||||||
|
} `xml:"registry"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml")
|
||||||
|
reg := ®istry{}
|
||||||
|
if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF {
|
||||||
|
log.Fatalf("Error decoding charset registry: %v", err)
|
||||||
|
}
|
||||||
|
if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" {
|
||||||
|
log.Fatalf("Unexpected ID %s", reg.Registry[0].ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
x := &indexInfo{}
|
||||||
|
|
||||||
|
for _, rec := range reg.Registry[0].Record {
|
||||||
|
mib := identifier.MIB(parseInt(rec.MIB))
|
||||||
|
x.addEntry(mib, rec.Name)
|
||||||
|
for _, a := range rec.Alias {
|
||||||
|
a = strings.Split(a, " ")[0] // strip comments.
|
||||||
|
x.addAlias(a, mib)
|
||||||
|
// MIB name aliases are prefixed with a "cs" (character set) in the
|
||||||
|
// registry to identify them as display names and to ensure that
|
||||||
|
// the name starts with a lowercase letter in case it is used as
|
||||||
|
// an identifier. We remove it to be left with a nice clean name.
|
||||||
|
if strings.HasPrefix(a, "cs") {
|
||||||
|
x.setName(2, a[2:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if rec.MIME != "" {
|
||||||
|
x.addAlias(rec.MIME, mib)
|
||||||
|
x.setName(1, rec.MIME)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
w := gen.NewCodeWriter()
|
||||||
|
|
||||||
|
fmt.Fprintln(w, `import "golang.org/x/text/encoding/internal/identifier"`)
|
||||||
|
|
||||||
|
writeIndex(w, x)
|
||||||
|
|
||||||
|
w.WriteGoFile("tables.go", "ianaindex")
|
||||||
|
}
|
||||||
|
|
||||||
|
type alias struct {
|
||||||
|
name string
|
||||||
|
mib identifier.MIB
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexInfo struct {
|
||||||
|
// compacted index from code to MIB
|
||||||
|
codeToMIB []identifier.MIB
|
||||||
|
alias []alias
|
||||||
|
names [][3]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ii *indexInfo) Len() int {
|
||||||
|
return len(ii.codeToMIB)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ii *indexInfo) Less(a, b int) bool {
|
||||||
|
return ii.codeToMIB[a] < ii.codeToMIB[b]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ii *indexInfo) Swap(a, b int) {
|
||||||
|
ii.codeToMIB[a], ii.codeToMIB[b] = ii.codeToMIB[b], ii.codeToMIB[a]
|
||||||
|
// Co-sort the names.
|
||||||
|
ii.names[a], ii.names[b] = ii.names[b], ii.names[a]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ii *indexInfo) setName(i int, name string) {
|
||||||
|
ii.names[len(ii.names)-1][i] = name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ii *indexInfo) addEntry(mib identifier.MIB, name string) {
|
||||||
|
ii.names = append(ii.names, [3]string{name, name, name})
|
||||||
|
ii.addAlias(name, mib)
|
||||||
|
ii.codeToMIB = append(ii.codeToMIB, mib)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ii *indexInfo) addAlias(name string, mib identifier.MIB) {
|
||||||
|
// Don't add duplicates for the same mib. Adding duplicate aliases for
|
||||||
|
// different MIBs will cause the compiler to barf on an invalid map: great!.
|
||||||
|
for i := len(ii.alias) - 1; i >= 0 && ii.alias[i].mib == mib; i-- {
|
||||||
|
if ii.alias[i].name == name {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ii.alias = append(ii.alias, alias{name, mib})
|
||||||
|
lower := strings.ToLower(name)
|
||||||
|
if lower != name {
|
||||||
|
ii.addAlias(lower, mib)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxMIMENameLen = '0' - 1 // officially 40, but we leave some buffer.
|
||||||
|
|
||||||
|
func writeIndex(w *gen.CodeWriter, x *indexInfo) {
|
||||||
|
sort.Stable(x)
|
||||||
|
|
||||||
|
// Write constants.
|
||||||
|
fmt.Fprintln(w, "const (")
|
||||||
|
for i, m := range x.codeToMIB {
|
||||||
|
if i == 0 {
|
||||||
|
fmt.Fprintf(w, "enc%d = iota\n", m)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(w, "enc%d\n", m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintln(w, "numIANA")
|
||||||
|
fmt.Fprintln(w, ")")
|
||||||
|
|
||||||
|
w.WriteVar("ianaToMIB", x.codeToMIB)
|
||||||
|
|
||||||
|
var ianaNames, mibNames []string
|
||||||
|
for _, names := range x.names {
|
||||||
|
n := names[0]
|
||||||
|
if names[0] != names[1] {
|
||||||
|
// MIME names are mostly identical to IANA names. We share the
|
||||||
|
// tables by setting the first byte of the string to an index into
|
||||||
|
// the string itself (< maxMIMENameLen) to the IANA name. The MIME
|
||||||
|
// name immediately follows the index.
|
||||||
|
x := len(names[1]) + 1
|
||||||
|
if x > maxMIMENameLen {
|
||||||
|
log.Fatalf("MIME name length (%d) > %d", x, maxMIMENameLen)
|
||||||
|
}
|
||||||
|
n = string(x) + names[1] + names[0]
|
||||||
|
}
|
||||||
|
ianaNames = append(ianaNames, n)
|
||||||
|
mibNames = append(mibNames, names[2])
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteVar("ianaNames", ianaNames)
|
||||||
|
w.WriteVar("mibNames", mibNames)
|
||||||
|
|
||||||
|
w.WriteComment(`
|
||||||
|
TODO: Instead of using a map, we could use binary search strings doing
|
||||||
|
on-the fly lower-casing per character. This allows to always avoid
|
||||||
|
allocation and will be considerably more compact.`)
|
||||||
|
fmt.Fprintln(w, "var ianaAliases = map[string]int{")
|
||||||
|
for _, a := range x.alias {
|
||||||
|
fmt.Fprintf(w, "%q: enc%d,\n", a.name, a.mib)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(w, "}")
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseInt(s string) int {
|
||||||
|
x, err := strconv.ParseInt(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Could not parse integer: %v", err)
|
||||||
|
}
|
||||||
|
return int(x)
|
||||||
|
}
|
160
vendor/golang.org/x/text/encoding/ianaindex/ianaindex.go
generated
vendored
160
vendor/golang.org/x/text/encoding/ianaindex/ianaindex.go
generated
vendored
@ -2,17 +2,28 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:generate go run gen.go
|
||||||
|
|
||||||
// Package ianaindex maps names to Encodings as specified by the IANA registry.
|
// Package ianaindex maps names to Encodings as specified by the IANA registry.
|
||||||
// This includes both the MIME and IANA names.
|
// This includes both the MIME and IANA names.
|
||||||
//
|
//
|
||||||
// Status: this package is an incomplete API sketch, and isn't usable yet.
|
|
||||||
//
|
|
||||||
// See http://www.iana.org/assignments/character-sets/character-sets.xhtml for
|
// See http://www.iana.org/assignments/character-sets/character-sets.xhtml for
|
||||||
// more details.
|
// more details.
|
||||||
package ianaindex
|
package ianaindex
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/text/encoding"
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/charmap"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/encoding/japanese"
|
||||||
|
"golang.org/x/text/encoding/korean"
|
||||||
|
"golang.org/x/text/encoding/simplifiedchinese"
|
||||||
|
"golang.org/x/text/encoding/traditionalchinese"
|
||||||
|
"golang.org/x/text/encoding/unicode"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: remove the "Status... incomplete" in the package doc comment.
|
// TODO: remove the "Status... incomplete" in the package doc comment.
|
||||||
@ -25,28 +36,68 @@ import (
|
|||||||
// support MIME otherwise.
|
// support MIME otherwise.
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// MIME is an index to map MIME names. It does not support aliases.
|
// MIME is an index to map MIME names.
|
||||||
MIME *Index
|
MIME *Index = mime
|
||||||
|
|
||||||
// IANA is an index that supports all names and aliases using IANA names as
|
// IANA is an index that supports all names and aliases using IANA names as
|
||||||
// the canonical identifier.
|
// the canonical identifier.
|
||||||
IANA *Index
|
IANA *Index = iana
|
||||||
|
|
||||||
|
// MIB is an index that associates the MIB display name with an Encoding.
|
||||||
|
MIB *Index = mib
|
||||||
|
|
||||||
|
mime = &Index{mimeName, ianaToMIB, ianaAliases, encodings[:]}
|
||||||
|
iana = &Index{ianaName, ianaToMIB, ianaAliases, encodings[:]}
|
||||||
|
mib = &Index{mibName, ianaToMIB, ianaAliases, encodings[:]}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Index maps names registered by IANA to Encodings.
|
// Index maps names registered by IANA to Encodings.
|
||||||
|
// Currently different Indexes only differ in the names they return for
|
||||||
|
// encodings. In the future they may also differ in supported aliases.
|
||||||
type Index struct {
|
type Index struct {
|
||||||
|
names func(i int) string
|
||||||
|
toMIB []identifier.MIB // Sorted slice of supported MIBs
|
||||||
|
alias map[string]int
|
||||||
|
enc []encoding.Encoding
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns an Encoding for IANA-registered names. Matching is
|
var (
|
||||||
|
errInvalidName = errors.New("ianaindex: invalid encoding name")
|
||||||
|
errUnknown = errors.New("ianaindex: unknown Encoding")
|
||||||
|
errUnsupported = errors.New("ianaindex: unsupported Encoding")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Encoding returns an Encoding for IANA-registered names. Matching is
|
||||||
// case-insensitive.
|
// case-insensitive.
|
||||||
func (x *Index) Get(name string) (encoding.Encoding, error) {
|
func (x *Index) Encoding(name string) (encoding.Encoding, error) {
|
||||||
panic("TODO: implement")
|
name = strings.TrimSpace(name)
|
||||||
|
// First try without lowercasing (possibly creating an allocation).
|
||||||
|
i, ok := x.alias[name]
|
||||||
|
if !ok {
|
||||||
|
i, ok = x.alias[strings.ToLower(name)]
|
||||||
|
if !ok {
|
||||||
|
return nil, errInvalidName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return x.enc[i], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name reports the canonical name of the given Encoding. It will return an
|
// Name reports the canonical name of the given Encoding. It will return an
|
||||||
// error if the e is not associated with a known encoding scheme.
|
// error if the e is not associated with a known encoding scheme.
|
||||||
func (x *Index) Name(e encoding.Encoding) (string, error) {
|
func (x *Index) Name(e encoding.Encoding) (string, error) {
|
||||||
panic("TODO: implement")
|
id, ok := e.(identifier.Interface)
|
||||||
|
if !ok {
|
||||||
|
return "", errUnknown
|
||||||
|
}
|
||||||
|
mib, _ := id.ID()
|
||||||
|
if mib == 0 {
|
||||||
|
return "", errUnknown
|
||||||
|
}
|
||||||
|
v := findMIB(x.toMIB, mib)
|
||||||
|
if v == -1 {
|
||||||
|
return "", errUnsupported
|
||||||
|
}
|
||||||
|
return x.names(v), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: the coverage of this index is rather spotty. Allowing users to set
|
// TODO: the coverage of this index is rather spotty. Allowing users to set
|
||||||
@ -65,3 +116,94 @@ func (x *Index) Name(e encoding.Encoding) (string, error) {
|
|||||||
// func (x *Index) Set(name string, e encoding.Encoding) error {
|
// func (x *Index) Set(name string, e encoding.Encoding) error {
|
||||||
// panic("TODO: implement")
|
// panic("TODO: implement")
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
func findMIB(x []identifier.MIB, mib identifier.MIB) int {
|
||||||
|
i := sort.Search(len(x), func(i int) bool { return x[i] >= mib })
|
||||||
|
if i < len(x) && x[i] == mib {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxMIMENameLen = '0' - 1 // officially 40, but we leave some buffer.
|
||||||
|
|
||||||
|
func mimeName(x int) string {
|
||||||
|
n := ianaNames[x]
|
||||||
|
// See gen.go for a description of the encoding.
|
||||||
|
if n[0] <= maxMIMENameLen {
|
||||||
|
return n[1:n[0]]
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func ianaName(x int) string {
|
||||||
|
n := ianaNames[x]
|
||||||
|
// See gen.go for a description of the encoding.
|
||||||
|
if n[0] <= maxMIMENameLen {
|
||||||
|
return n[n[0]:]
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func mibName(x int) string {
|
||||||
|
return mibNames[x]
|
||||||
|
}
|
||||||
|
|
||||||
|
var encodings = [numIANA]encoding.Encoding{
|
||||||
|
enc106: unicode.UTF8,
|
||||||
|
enc1015: unicode.UTF16(unicode.BigEndian, unicode.UseBOM),
|
||||||
|
enc1013: unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM),
|
||||||
|
enc1014: unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM),
|
||||||
|
enc2028: charmap.CodePage037,
|
||||||
|
enc2011: charmap.CodePage437,
|
||||||
|
enc2009: charmap.CodePage850,
|
||||||
|
enc2010: charmap.CodePage852,
|
||||||
|
enc2046: charmap.CodePage855,
|
||||||
|
enc2089: charmap.CodePage858,
|
||||||
|
enc2048: charmap.CodePage860,
|
||||||
|
enc2013: charmap.CodePage862,
|
||||||
|
enc2050: charmap.CodePage863,
|
||||||
|
enc2052: charmap.CodePage865,
|
||||||
|
enc2086: charmap.CodePage866,
|
||||||
|
enc2102: charmap.CodePage1047,
|
||||||
|
enc2091: charmap.CodePage1140,
|
||||||
|
enc4: charmap.ISO8859_1,
|
||||||
|
enc5: charmap.ISO8859_2,
|
||||||
|
enc6: charmap.ISO8859_3,
|
||||||
|
enc7: charmap.ISO8859_4,
|
||||||
|
enc8: charmap.ISO8859_5,
|
||||||
|
enc9: charmap.ISO8859_6,
|
||||||
|
enc81: charmap.ISO8859_6E,
|
||||||
|
enc82: charmap.ISO8859_6I,
|
||||||
|
enc10: charmap.ISO8859_7,
|
||||||
|
enc11: charmap.ISO8859_8,
|
||||||
|
enc84: charmap.ISO8859_8E,
|
||||||
|
enc85: charmap.ISO8859_8I,
|
||||||
|
enc12: charmap.ISO8859_9,
|
||||||
|
enc13: charmap.ISO8859_10,
|
||||||
|
enc109: charmap.ISO8859_13,
|
||||||
|
enc110: charmap.ISO8859_14,
|
||||||
|
enc111: charmap.ISO8859_15,
|
||||||
|
enc112: charmap.ISO8859_16,
|
||||||
|
enc2084: charmap.KOI8R,
|
||||||
|
enc2088: charmap.KOI8U,
|
||||||
|
enc2027: charmap.Macintosh,
|
||||||
|
enc2109: charmap.Windows874,
|
||||||
|
enc2250: charmap.Windows1250,
|
||||||
|
enc2251: charmap.Windows1251,
|
||||||
|
enc2252: charmap.Windows1252,
|
||||||
|
enc2253: charmap.Windows1253,
|
||||||
|
enc2254: charmap.Windows1254,
|
||||||
|
enc2255: charmap.Windows1255,
|
||||||
|
enc2256: charmap.Windows1256,
|
||||||
|
enc2257: charmap.Windows1257,
|
||||||
|
enc2258: charmap.Windows1258,
|
||||||
|
enc18: japanese.EUCJP,
|
||||||
|
enc39: japanese.ISO2022JP,
|
||||||
|
enc17: japanese.ShiftJIS,
|
||||||
|
enc38: korean.EUCKR,
|
||||||
|
enc114: simplifiedchinese.GB18030,
|
||||||
|
enc113: simplifiedchinese.GBK,
|
||||||
|
enc2085: simplifiedchinese.HZGB2312,
|
||||||
|
enc2026: traditionalchinese.Big5,
|
||||||
|
}
|
||||||
|
2348
vendor/golang.org/x/text/encoding/ianaindex/tables.go
generated
vendored
Normal file
2348
vendor/golang.org/x/text/encoding/ianaindex/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
180
vendor/golang.org/x/text/encoding/internal/enctest/enctest.go
generated
vendored
Normal file
180
vendor/golang.org/x/text/encoding/internal/enctest/enctest.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package enctest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Encoder or Decoder
|
||||||
|
type Transcoder interface {
|
||||||
|
transform.Transformer
|
||||||
|
Bytes([]byte) ([]byte, error)
|
||||||
|
String(string) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncoding(t *testing.T, e encoding.Encoding, encoded, utf8, prefix, suffix string) {
|
||||||
|
for _, direction := range []string{"Decode", "Encode"} {
|
||||||
|
t.Run(fmt.Sprintf("%v/%s", e, direction), func(t *testing.T) {
|
||||||
|
|
||||||
|
var coder Transcoder
|
||||||
|
var want, src, wPrefix, sPrefix, wSuffix, sSuffix string
|
||||||
|
if direction == "Decode" {
|
||||||
|
coder, want, src = e.NewDecoder(), utf8, encoded
|
||||||
|
wPrefix, sPrefix, wSuffix, sSuffix = "", prefix, "", suffix
|
||||||
|
} else {
|
||||||
|
coder, want, src = e.NewEncoder(), encoded, utf8
|
||||||
|
wPrefix, sPrefix, wSuffix, sSuffix = prefix, "", suffix, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
dst := make([]byte, len(wPrefix)+len(want)+len(wSuffix))
|
||||||
|
nDst, nSrc, err := coder.Transform(dst, []byte(sPrefix+src+sSuffix), true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if nDst != len(wPrefix)+len(want)+len(wSuffix) {
|
||||||
|
t.Fatalf("nDst got %d, want %d",
|
||||||
|
nDst, len(wPrefix)+len(want)+len(wSuffix))
|
||||||
|
}
|
||||||
|
if nSrc != len(sPrefix)+len(src)+len(sSuffix) {
|
||||||
|
t.Fatalf("nSrc got %d, want %d",
|
||||||
|
nSrc, len(sPrefix)+len(src)+len(sSuffix))
|
||||||
|
}
|
||||||
|
if got := string(dst); got != wPrefix+want+wSuffix {
|
||||||
|
t.Fatalf("\ngot %q\nwant %q", got, wPrefix+want+wSuffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, n := range []int{0, 1, 2, 10, 123, 4567} {
|
||||||
|
input := sPrefix + strings.Repeat(src, n) + sSuffix
|
||||||
|
g, err := coder.String(input)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Bytes: n=%d: %v", n, err)
|
||||||
|
}
|
||||||
|
if len(g) == 0 && len(input) == 0 {
|
||||||
|
// If the input is empty then the output can be empty,
|
||||||
|
// regardless of whatever wPrefix is.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
got1, want1 := string(g), wPrefix+strings.Repeat(want, n)+wSuffix
|
||||||
|
if got1 != want1 {
|
||||||
|
t.Fatalf("ReadAll: n=%d\ngot %q\nwant %q",
|
||||||
|
n, trim(got1), trim(want1))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFile(t *testing.T, e encoding.Encoding) {
|
||||||
|
for _, dir := range []string{"Decode", "Encode"} {
|
||||||
|
t.Run(fmt.Sprintf("%s/%s", e, dir), func(t *testing.T) {
|
||||||
|
dst, src, transformer, err := load(dir, e)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("load: %v", err)
|
||||||
|
}
|
||||||
|
buf, err := transformer.Bytes(src)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("transform: %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(buf, dst) {
|
||||||
|
t.Error("transformed bytes did not match golden file")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Benchmark(b *testing.B, enc encoding.Encoding) {
|
||||||
|
for _, direction := range []string{"Decode", "Encode"} {
|
||||||
|
b.Run(fmt.Sprintf("%s/%s", enc, direction), func(b *testing.B) {
|
||||||
|
_, src, transformer, err := load(direction, enc)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
b.SetBytes(int64(len(src)))
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
r := transform.NewReader(bytes.NewReader(src), transformer)
|
||||||
|
io.Copy(ioutil.Discard, r)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// testdataFiles are files in testdata/*.txt.
|
||||||
|
var testdataFiles = []struct {
|
||||||
|
mib identifier.MIB
|
||||||
|
basename, ext string
|
||||||
|
}{
|
||||||
|
{identifier.Windows1252, "candide", "windows-1252"},
|
||||||
|
{identifier.EUCPkdFmtJapanese, "rashomon", "euc-jp"},
|
||||||
|
{identifier.ISO2022JP, "rashomon", "iso-2022-jp"},
|
||||||
|
{identifier.ShiftJIS, "rashomon", "shift-jis"},
|
||||||
|
{identifier.EUCKR, "unsu-joh-eun-nal", "euc-kr"},
|
||||||
|
{identifier.GBK, "sunzi-bingfa-simplified", "gbk"},
|
||||||
|
{identifier.HZGB2312, "sunzi-bingfa-gb-levels-1-and-2", "hz-gb2312"},
|
||||||
|
{identifier.Big5, "sunzi-bingfa-traditional", "big5"},
|
||||||
|
{identifier.UTF16LE, "candide", "utf-16le"},
|
||||||
|
{identifier.UTF8, "candide", "utf-8"},
|
||||||
|
{identifier.UTF32BE, "candide", "utf-32be"},
|
||||||
|
|
||||||
|
// GB18030 is a superset of GBK and is nominally a Simplified Chinese
|
||||||
|
// encoding, but it can also represent the entire Basic Multilingual
|
||||||
|
// Plane, including codepoints like 'â' that aren't encodable by GBK.
|
||||||
|
// GB18030 on Simplified Chinese should perform similarly to GBK on
|
||||||
|
// Simplified Chinese. GB18030 on "candide" is more interesting.
|
||||||
|
{identifier.GB18030, "candide", "gb18030"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func load(direction string, enc encoding.Encoding) ([]byte, []byte, Transcoder, error) {
|
||||||
|
basename, ext, count := "", "", 0
|
||||||
|
for _, tf := range testdataFiles {
|
||||||
|
if mib, _ := enc.(identifier.Interface).ID(); tf.mib == mib {
|
||||||
|
basename, ext = tf.basename, tf.ext
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if count != 1 {
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil, nil, fmt.Errorf("no testdataFiles for %s", enc)
|
||||||
|
}
|
||||||
|
return nil, nil, nil, fmt.Errorf("too many testdataFiles for %s", enc)
|
||||||
|
}
|
||||||
|
dstFile := fmt.Sprintf("../testdata/%s-%s.txt", basename, ext)
|
||||||
|
srcFile := fmt.Sprintf("../testdata/%s-utf-8.txt", basename)
|
||||||
|
var coder Transcoder = encoding.ReplaceUnsupported(enc.NewEncoder())
|
||||||
|
if direction == "Decode" {
|
||||||
|
dstFile, srcFile = srcFile, dstFile
|
||||||
|
coder = enc.NewDecoder()
|
||||||
|
}
|
||||||
|
dst, err := ioutil.ReadFile(dstFile)
|
||||||
|
if err != nil {
|
||||||
|
if dst, err = ioutil.ReadFile("../" + dstFile); err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
src, err := ioutil.ReadFile(srcFile)
|
||||||
|
if err != nil {
|
||||||
|
if src, err = ioutil.ReadFile("../" + srcFile); err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst, src, coder, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func trim(s string) string {
|
||||||
|
if len(s) < 120 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return s[:50] + "..." + s[len(s)-50:]
|
||||||
|
}
|
2
vendor/golang.org/x/text/encoding/internal/identifier/mib.go
generated
vendored
2
vendor/golang.org/x/text/encoding/internal/identifier/mib.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// This file was generated by go generate; DO NOT EDIT
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
package identifier
|
package identifier
|
||||||
|
|
||||||
|
58
vendor/golang.org/x/text/encoding/japanese/eucjp.go
generated
vendored
58
vendor/golang.org/x/text/encoding/japanese/eucjp.go
generated
vendored
@ -5,7 +5,6 @@
|
|||||||
package japanese
|
package japanese
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"golang.org/x/text/encoding"
|
"golang.org/x/text/encoding"
|
||||||
@ -23,10 +22,9 @@ var eucJP = internal.Encoding{
|
|||||||
identifier.EUCPkdFmtJapanese,
|
identifier.EUCPkdFmtJapanese,
|
||||||
}
|
}
|
||||||
|
|
||||||
var errInvalidEUCJP = errors.New("japanese: invalid EUC-JP encoding")
|
|
||||||
|
|
||||||
type eucJPDecoder struct{ transform.NopResetter }
|
type eucJPDecoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
// See https://encoding.spec.whatwg.org/#euc-jp-decoder.
|
||||||
func (eucJPDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (eucJPDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
r, size := rune(0), 0
|
r, size := rune(0), 0
|
||||||
loop:
|
loop:
|
||||||
@ -37,60 +35,79 @@ loop:
|
|||||||
|
|
||||||
case c0 == 0x8e:
|
case c0 == 0x8e:
|
||||||
if nSrc+1 >= len(src) {
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
err = transform.ErrShortSrc
|
err = transform.ErrShortSrc
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
break
|
||||||
|
}
|
||||||
c1 := src[nSrc+1]
|
c1 := src[nSrc+1]
|
||||||
if c1 < 0xa1 || 0xdf < c1 {
|
switch {
|
||||||
err = errInvalidEUCJP
|
case c1 < 0xa1:
|
||||||
break loop
|
r, size = utf8.RuneError, 1
|
||||||
|
case c1 > 0xdf:
|
||||||
|
r, size = utf8.RuneError, 2
|
||||||
|
if c1 == 0xff {
|
||||||
|
size = 1
|
||||||
}
|
}
|
||||||
|
default:
|
||||||
r, size = rune(c1)+(0xff61-0xa1), 2
|
r, size = rune(c1)+(0xff61-0xa1), 2
|
||||||
|
}
|
||||||
case c0 == 0x8f:
|
case c0 == 0x8f:
|
||||||
if nSrc+2 >= len(src) {
|
if nSrc+2 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
err = transform.ErrShortSrc
|
err = transform.ErrShortSrc
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
if p := nSrc + 1; p < len(src) && 0xa1 <= src[p] && src[p] < 0xfe {
|
||||||
|
size = 2
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
c1 := src[nSrc+1]
|
c1 := src[nSrc+1]
|
||||||
if c1 < 0xa1 || 0xfe < c1 {
|
if c1 < 0xa1 || 0xfe < c1 {
|
||||||
err = errInvalidEUCJP
|
r, size = utf8.RuneError, 1
|
||||||
break loop
|
break
|
||||||
}
|
}
|
||||||
c2 := src[nSrc+2]
|
c2 := src[nSrc+2]
|
||||||
if c2 < 0xa1 || 0xfe < c2 {
|
if c2 < 0xa1 || 0xfe < c2 {
|
||||||
err = errInvalidEUCJP
|
r, size = utf8.RuneError, 2
|
||||||
break loop
|
break
|
||||||
}
|
}
|
||||||
r, size = '\ufffd', 3
|
r, size = utf8.RuneError, 3
|
||||||
if i := int(c1-0xa1)*94 + int(c2-0xa1); i < len(jis0212Decode) {
|
if i := int(c1-0xa1)*94 + int(c2-0xa1); i < len(jis0212Decode) {
|
||||||
r = rune(jis0212Decode[i])
|
r = rune(jis0212Decode[i])
|
||||||
if r == 0 {
|
if r == 0 {
|
||||||
r = '\ufffd'
|
r = utf8.RuneError
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
case 0xa1 <= c0 && c0 <= 0xfe:
|
case 0xa1 <= c0 && c0 <= 0xfe:
|
||||||
if nSrc+1 >= len(src) {
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
err = transform.ErrShortSrc
|
err = transform.ErrShortSrc
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
break
|
||||||
|
}
|
||||||
c1 := src[nSrc+1]
|
c1 := src[nSrc+1]
|
||||||
if c1 < 0xa1 || 0xfe < c1 {
|
if c1 < 0xa1 || 0xfe < c1 {
|
||||||
err = errInvalidEUCJP
|
r, size = utf8.RuneError, 1
|
||||||
break loop
|
break
|
||||||
}
|
}
|
||||||
r, size = '\ufffd', 2
|
r, size = utf8.RuneError, 2
|
||||||
if i := int(c0-0xa1)*94 + int(c1-0xa1); i < len(jis0208Decode) {
|
if i := int(c0-0xa1)*94 + int(c1-0xa1); i < len(jis0208Decode) {
|
||||||
r = rune(jis0208Decode[i])
|
r = rune(jis0208Decode[i])
|
||||||
if r == 0 {
|
if r == 0 {
|
||||||
r = '\ufffd'
|
r = utf8.RuneError
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
err = errInvalidEUCJP
|
r, size = utf8.RuneError, 1
|
||||||
break loop
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if nDst+utf8.RuneLen(r) > len(dst) {
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
@ -99,9 +116,6 @@ loop:
|
|||||||
}
|
}
|
||||||
nDst += utf8.EncodeRune(dst[nDst:], r)
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
}
|
}
|
||||||
if atEOF && err == transform.ErrShortSrc {
|
|
||||||
err = errInvalidEUCJP
|
|
||||||
}
|
|
||||||
return nDst, nSrc, err
|
return nDst, nSrc, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
59
vendor/golang.org/x/text/encoding/japanese/iso2022jp.go
generated
vendored
59
vendor/golang.org/x/text/encoding/japanese/iso2022jp.go
generated
vendored
@ -5,7 +5,6 @@
|
|||||||
package japanese
|
package japanese
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"golang.org/x/text/encoding"
|
"golang.org/x/text/encoding"
|
||||||
@ -31,8 +30,6 @@ func iso2022JPNewEncoder() transform.Transformer {
|
|||||||
return new(iso2022JPEncoder)
|
return new(iso2022JPEncoder)
|
||||||
}
|
}
|
||||||
|
|
||||||
var errInvalidISO2022JP = errors.New("japanese: invalid ISO-2022-JP encoding")
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
asciiState = iota
|
asciiState = iota
|
||||||
katakanaState
|
katakanaState
|
||||||
@ -50,45 +47,51 @@ func (d *iso2022JPDecoder) Reset() {
|
|||||||
|
|
||||||
func (d *iso2022JPDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (d *iso2022JPDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
r, size := rune(0), 0
|
r, size := rune(0), 0
|
||||||
loop:
|
|
||||||
for ; nSrc < len(src); nSrc += size {
|
for ; nSrc < len(src); nSrc += size {
|
||||||
c0 := src[nSrc]
|
c0 := src[nSrc]
|
||||||
if c0 >= utf8.RuneSelf {
|
if c0 >= utf8.RuneSelf {
|
||||||
err = errInvalidISO2022JP
|
r, size = '\ufffd', 1
|
||||||
break loop
|
goto write
|
||||||
}
|
}
|
||||||
|
|
||||||
if c0 == asciiEsc {
|
if c0 == asciiEsc {
|
||||||
if nSrc+2 >= len(src) {
|
if nSrc+2 >= len(src) {
|
||||||
err = transform.ErrShortSrc
|
if !atEOF {
|
||||||
break loop
|
return nDst, nSrc, transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
// TODO: is it correct to only skip 1??
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
goto write
|
||||||
}
|
}
|
||||||
size = 3
|
size = 3
|
||||||
c1 := src[nSrc+1]
|
c1 := src[nSrc+1]
|
||||||
c2 := src[nSrc+2]
|
c2 := src[nSrc+2]
|
||||||
switch {
|
switch {
|
||||||
case c1 == '$' && (c2 == '@' || c2 == 'B'):
|
case c1 == '$' && (c2 == '@' || c2 == 'B'): // 0x24 {0x40, 0x42}
|
||||||
*d = jis0208State
|
*d = jis0208State
|
||||||
continue
|
continue
|
||||||
case c1 == '$' && c2 == '(':
|
case c1 == '$' && c2 == '(': // 0x24 0x28
|
||||||
if nSrc+3 >= len(src) {
|
if nSrc+3 >= len(src) {
|
||||||
err = transform.ErrShortSrc
|
if !atEOF {
|
||||||
break loop
|
return nDst, nSrc, transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
goto write
|
||||||
}
|
}
|
||||||
size = 4
|
size = 4
|
||||||
if src[nSrc]+3 == 'D' {
|
if src[nSrc+3] == 'D' {
|
||||||
*d = jis0212State
|
*d = jis0212State
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
case c1 == '(' && (c2 == 'B' || c2 == 'J'):
|
case c1 == '(' && (c2 == 'B' || c2 == 'J'): // 0x28 {0x42, 0x4A}
|
||||||
*d = asciiState
|
*d = asciiState
|
||||||
continue
|
continue
|
||||||
case c1 == '(' && c2 == 'I':
|
case c1 == '(' && c2 == 'I': // 0x28 0x49
|
||||||
*d = katakanaState
|
*d = katakanaState
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
err = errInvalidISO2022JP
|
r, size = '\ufffd', 1
|
||||||
break loop
|
goto write
|
||||||
}
|
}
|
||||||
|
|
||||||
switch *d {
|
switch *d {
|
||||||
@ -97,8 +100,8 @@ loop:
|
|||||||
|
|
||||||
case katakanaState:
|
case katakanaState:
|
||||||
if c0 < 0x21 || 0x60 <= c0 {
|
if c0 < 0x21 || 0x60 <= c0 {
|
||||||
err = errInvalidISO2022JP
|
r, size = '\ufffd', 1
|
||||||
break loop
|
goto write
|
||||||
}
|
}
|
||||||
r, size = rune(c0)+(0xff61-0x21), 1
|
r, size = rune(c0)+(0xff61-0x21), 1
|
||||||
|
|
||||||
@ -106,11 +109,14 @@ loop:
|
|||||||
if c0 == 0x0a {
|
if c0 == 0x0a {
|
||||||
*d = asciiState
|
*d = asciiState
|
||||||
r, size = rune(c0), 1
|
r, size = rune(c0), 1
|
||||||
break
|
goto write
|
||||||
}
|
}
|
||||||
if nSrc+1 >= len(src) {
|
if nSrc+1 >= len(src) {
|
||||||
err = transform.ErrShortSrc
|
if !atEOF {
|
||||||
break loop
|
return nDst, nSrc, transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
goto write
|
||||||
}
|
}
|
||||||
size = 2
|
size = 2
|
||||||
c1 := src[nSrc+1]
|
c1 := src[nSrc+1]
|
||||||
@ -121,22 +127,19 @@ loop:
|
|||||||
r = rune(jis0212Decode[i])
|
r = rune(jis0212Decode[i])
|
||||||
} else {
|
} else {
|
||||||
r = '\ufffd'
|
r = '\ufffd'
|
||||||
break
|
goto write
|
||||||
}
|
}
|
||||||
if r == 0 {
|
if r == 0 {
|
||||||
r = '\ufffd'
|
r = '\ufffd'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
write:
|
||||||
if nDst+utf8.RuneLen(r) > len(dst) {
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
err = transform.ErrShortDst
|
return nDst, nSrc, transform.ErrShortDst
|
||||||
break loop
|
|
||||||
}
|
}
|
||||||
nDst += utf8.EncodeRune(dst[nDst:], r)
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
}
|
}
|
||||||
if atEOF && err == transform.ErrShortSrc {
|
|
||||||
err = errInvalidISO2022JP
|
|
||||||
}
|
|
||||||
return nDst, nSrc, err
|
return nDst, nSrc, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
32
vendor/golang.org/x/text/encoding/japanese/shiftjis.go
generated
vendored
32
vendor/golang.org/x/text/encoding/japanese/shiftjis.go
generated
vendored
@ -5,7 +5,6 @@
|
|||||||
package japanese
|
package japanese
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"golang.org/x/text/encoding"
|
"golang.org/x/text/encoding"
|
||||||
@ -24,8 +23,6 @@ var shiftJIS = internal.Encoding{
|
|||||||
identifier.ShiftJIS,
|
identifier.ShiftJIS,
|
||||||
}
|
}
|
||||||
|
|
||||||
var errInvalidShiftJIS = errors.New("japanese: invalid Shift JIS encoding")
|
|
||||||
|
|
||||||
type shiftJISDecoder struct{ transform.NopResetter }
|
type shiftJISDecoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
func (shiftJISDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (shiftJISDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
@ -48,28 +45,32 @@ loop:
|
|||||||
c0 = 2*c0 - 0x21
|
c0 = 2*c0 - 0x21
|
||||||
|
|
||||||
if nSrc+1 >= len(src) {
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
err = transform.ErrShortSrc
|
err = transform.ErrShortSrc
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
c1 := src[nSrc+1]
|
c1 := src[nSrc+1]
|
||||||
switch {
|
switch {
|
||||||
case c1 < 0x40:
|
case c1 < 0x40:
|
||||||
err = errInvalidShiftJIS
|
r, size = '\ufffd', 1 // c1 is ASCII so output on next round
|
||||||
break loop
|
goto write
|
||||||
case c1 < 0x7f:
|
case c1 < 0x7f:
|
||||||
c0--
|
c0--
|
||||||
c1 -= 0x40
|
c1 -= 0x40
|
||||||
case c1 == 0x7f:
|
case c1 == 0x7f:
|
||||||
err = errInvalidShiftJIS
|
r, size = '\ufffd', 1 // c1 is ASCII so output on next round
|
||||||
break loop
|
goto write
|
||||||
case c1 < 0x9f:
|
case c1 < 0x9f:
|
||||||
c0--
|
c0--
|
||||||
c1 -= 0x41
|
c1 -= 0x41
|
||||||
case c1 < 0xfd:
|
case c1 < 0xfd:
|
||||||
c1 -= 0x9f
|
c1 -= 0x9f
|
||||||
default:
|
default:
|
||||||
err = errInvalidShiftJIS
|
r, size = '\ufffd', 2
|
||||||
break loop
|
goto write
|
||||||
}
|
}
|
||||||
r, size = '\ufffd', 2
|
r, size = '\ufffd', 2
|
||||||
if i := int(c0)*94 + int(c1); i < len(jis0208Decode) {
|
if i := int(c0)*94 + int(c1); i < len(jis0208Decode) {
|
||||||
@ -79,20 +80,19 @@ loop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
case c0 == 0x80:
|
||||||
err = errInvalidShiftJIS
|
r, size = 0x80, 1
|
||||||
break loop
|
|
||||||
}
|
|
||||||
|
|
||||||
|
default:
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
}
|
||||||
|
write:
|
||||||
if nDst+utf8.RuneLen(r) > len(dst) {
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
err = transform.ErrShortDst
|
err = transform.ErrShortDst
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
nDst += utf8.EncodeRune(dst[nDst:], r)
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
}
|
}
|
||||||
if atEOF && err == transform.ErrShortSrc {
|
|
||||||
err = errInvalidShiftJIS
|
|
||||||
}
|
|
||||||
return nDst, nSrc, err
|
return nDst, nSrc, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
35
vendor/golang.org/x/text/encoding/korean/euckr.go
generated
vendored
35
vendor/golang.org/x/text/encoding/korean/euckr.go
generated
vendored
@ -5,7 +5,6 @@
|
|||||||
package korean
|
package korean
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"golang.org/x/text/encoding"
|
"golang.org/x/text/encoding"
|
||||||
@ -26,8 +25,6 @@ var eucKR = internal.Encoding{
|
|||||||
identifier.EUCKR,
|
identifier.EUCKR,
|
||||||
}
|
}
|
||||||
|
|
||||||
var errInvalidEUCKR = errors.New("korean: invalid EUC-KR encoding")
|
|
||||||
|
|
||||||
type eucKRDecoder struct{ transform.NopResetter }
|
type eucKRDecoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
func (eucKRDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (eucKRDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
@ -40,10 +37,15 @@ loop:
|
|||||||
|
|
||||||
case 0x81 <= c0 && c0 < 0xff:
|
case 0x81 <= c0 && c0 < 0xff:
|
||||||
if nSrc+1 >= len(src) {
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
err = transform.ErrShortSrc
|
err = transform.ErrShortSrc
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
break
|
||||||
|
}
|
||||||
c1 := src[nSrc+1]
|
c1 := src[nSrc+1]
|
||||||
|
size = 2
|
||||||
if c0 < 0xc7 {
|
if c0 < 0xc7 {
|
||||||
r = 178 * rune(c0-0x81)
|
r = 178 * rune(c0-0x81)
|
||||||
switch {
|
switch {
|
||||||
@ -54,39 +56,36 @@ loop:
|
|||||||
case 0x81 <= c1 && c1 < 0xff:
|
case 0x81 <= c1 && c1 < 0xff:
|
||||||
r += rune(c1) - (0x81 - 2*26)
|
r += rune(c1) - (0x81 - 2*26)
|
||||||
default:
|
default:
|
||||||
err = errInvalidEUCKR
|
goto decError
|
||||||
break loop
|
|
||||||
}
|
}
|
||||||
} else if 0xa1 <= c1 && c1 < 0xff {
|
} else if 0xa1 <= c1 && c1 < 0xff {
|
||||||
r = 178*(0xc7-0x81) + rune(c0-0xc7)*94 + rune(c1-0xa1)
|
r = 178*(0xc7-0x81) + rune(c0-0xc7)*94 + rune(c1-0xa1)
|
||||||
} else {
|
} else {
|
||||||
err = errInvalidEUCKR
|
goto decError
|
||||||
break loop
|
|
||||||
}
|
}
|
||||||
if int(r) < len(decode) {
|
if int(r) < len(decode) {
|
||||||
r = rune(decode[r])
|
r = rune(decode[r])
|
||||||
if r == 0 {
|
if r != 0 {
|
||||||
r = '\ufffd'
|
break
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
r = '\ufffd'
|
|
||||||
}
|
}
|
||||||
size = 2
|
decError:
|
||||||
|
r = utf8.RuneError
|
||||||
|
if c1 < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
err = errInvalidEUCKR
|
r, size = utf8.RuneError, 1
|
||||||
break loop
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if nDst+utf8.RuneLen(r) > len(dst) {
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
err = transform.ErrShortDst
|
err = transform.ErrShortDst
|
||||||
break loop
|
break
|
||||||
}
|
}
|
||||||
nDst += utf8.EncodeRune(dst[nDst:], r)
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
}
|
}
|
||||||
if atEOF && err == transform.ErrShortSrc {
|
|
||||||
err = errInvalidEUCKR
|
|
||||||
}
|
|
||||||
return nDst, nSrc, err
|
return nDst, nSrc, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
52
vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go
generated
vendored
52
vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go
generated
vendored
@ -5,7 +5,6 @@
|
|||||||
package simplifiedchinese
|
package simplifiedchinese
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"golang.org/x/text/encoding"
|
"golang.org/x/text/encoding"
|
||||||
@ -40,11 +39,6 @@ var gbk18030 = internal.Encoding{
|
|||||||
identifier.GB18030,
|
identifier.GB18030,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
errInvalidGB18030 = errors.New("simplifiedchinese: invalid GB18030 encoding")
|
|
||||||
errInvalidGBK = errors.New("simplifiedchinese: invalid GBK encoding")
|
|
||||||
)
|
|
||||||
|
|
||||||
type gbkDecoder struct {
|
type gbkDecoder struct {
|
||||||
transform.NopResetter
|
transform.NopResetter
|
||||||
gb18030 bool
|
gb18030 bool
|
||||||
@ -66,9 +60,13 @@ loop:
|
|||||||
|
|
||||||
case c0 < 0xff:
|
case c0 < 0xff:
|
||||||
if nSrc+1 >= len(src) {
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
err = transform.ErrShortSrc
|
err = transform.ErrShortSrc
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
c1 := src[nSrc+1]
|
c1 := src[nSrc+1]
|
||||||
switch {
|
switch {
|
||||||
case 0x40 <= c1 && c1 < 0x7f:
|
case 0x40 <= c1 && c1 < 0x7f:
|
||||||
@ -77,18 +75,24 @@ loop:
|
|||||||
c1 -= 0x41
|
c1 -= 0x41
|
||||||
case d.gb18030 && 0x30 <= c1 && c1 < 0x40:
|
case d.gb18030 && 0x30 <= c1 && c1 < 0x40:
|
||||||
if nSrc+3 >= len(src) {
|
if nSrc+3 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
err = transform.ErrShortSrc
|
err = transform.ErrShortSrc
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
// The second byte here is always ASCII, so we can set size
|
||||||
|
// to 1 in all cases.
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
c2 := src[nSrc+2]
|
c2 := src[nSrc+2]
|
||||||
if c2 < 0x81 || 0xff <= c2 {
|
if c2 < 0x81 || 0xff <= c2 {
|
||||||
err = errInvalidGB18030
|
r, size = utf8.RuneError, 1
|
||||||
break loop
|
goto write
|
||||||
}
|
}
|
||||||
c3 := src[nSrc+3]
|
c3 := src[nSrc+3]
|
||||||
if c3 < 0x30 || 0x3a <= c3 {
|
if c3 < 0x30 || 0x3a <= c3 {
|
||||||
err = errInvalidGB18030
|
r, size = utf8.RuneError, 1
|
||||||
break loop
|
goto write
|
||||||
}
|
}
|
||||||
size = 4
|
size = 4
|
||||||
r = ((rune(c0-0x81)*10+rune(c1-0x30))*126+rune(c2-0x81))*10 + rune(c3-0x30)
|
r = ((rune(c0-0x81)*10+rune(c1-0x30))*126+rune(c2-0x81))*10 + rune(c3-0x30)
|
||||||
@ -109,17 +113,13 @@ loop:
|
|||||||
r -= 189000
|
r -= 189000
|
||||||
if 0 <= r && r < 0x100000 {
|
if 0 <= r && r < 0x100000 {
|
||||||
r += 0x10000
|
r += 0x10000
|
||||||
goto write
|
|
||||||
}
|
|
||||||
err = errInvalidGB18030
|
|
||||||
break loop
|
|
||||||
default:
|
|
||||||
if d.gb18030 {
|
|
||||||
err = errInvalidGB18030
|
|
||||||
} else {
|
} else {
|
||||||
err = errInvalidGBK
|
r, size = utf8.RuneError, 1
|
||||||
}
|
}
|
||||||
break loop
|
goto write
|
||||||
|
default:
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
}
|
}
|
||||||
r, size = '\ufffd', 2
|
r, size = '\ufffd', 2
|
||||||
if i := int(c0-0x81)*190 + int(c1); i < len(decode) {
|
if i := int(c0-0x81)*190 + int(c1); i < len(decode) {
|
||||||
@ -130,12 +130,7 @@ loop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
if d.gb18030 {
|
r, size = utf8.RuneError, 1
|
||||||
err = errInvalidGB18030
|
|
||||||
} else {
|
|
||||||
err = errInvalidGBK
|
|
||||||
}
|
|
||||||
break loop
|
|
||||||
}
|
}
|
||||||
|
|
||||||
write:
|
write:
|
||||||
@ -145,13 +140,6 @@ loop:
|
|||||||
}
|
}
|
||||||
nDst += utf8.EncodeRune(dst[nDst:], r)
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
}
|
}
|
||||||
if atEOF && err == transform.ErrShortSrc {
|
|
||||||
if d.gb18030 {
|
|
||||||
err = errInvalidGB18030
|
|
||||||
} else {
|
|
||||||
err = errInvalidGBK
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nDst, nSrc, err
|
return nDst, nSrc, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
41
vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go
generated
vendored
41
vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go
generated
vendored
@ -5,7 +5,6 @@
|
|||||||
package simplifiedchinese
|
package simplifiedchinese
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"golang.org/x/text/encoding"
|
"golang.org/x/text/encoding"
|
||||||
@ -31,8 +30,6 @@ func hzGB2312NewEncoder() transform.Transformer {
|
|||||||
return new(hzGB2312Encoder)
|
return new(hzGB2312Encoder)
|
||||||
}
|
}
|
||||||
|
|
||||||
var errInvalidHZGB2312 = errors.New("simplifiedchinese: invalid HZ-GB2312 encoding")
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
asciiState = iota
|
asciiState = iota
|
||||||
gbState
|
gbState
|
||||||
@ -50,15 +47,19 @@ loop:
|
|||||||
for ; nSrc < len(src); nSrc += size {
|
for ; nSrc < len(src); nSrc += size {
|
||||||
c0 := src[nSrc]
|
c0 := src[nSrc]
|
||||||
if c0 >= utf8.RuneSelf {
|
if c0 >= utf8.RuneSelf {
|
||||||
err = errInvalidHZGB2312
|
r, size = utf8.RuneError, 1
|
||||||
break loop
|
goto write
|
||||||
}
|
}
|
||||||
|
|
||||||
if c0 == '~' {
|
if c0 == '~' {
|
||||||
if nSrc+1 >= len(src) {
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
err = transform.ErrShortSrc
|
err = transform.ErrShortSrc
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
r = utf8.RuneError
|
||||||
|
goto write
|
||||||
|
}
|
||||||
size = 2
|
size = 2
|
||||||
switch src[nSrc+1] {
|
switch src[nSrc+1] {
|
||||||
case '{':
|
case '{':
|
||||||
@ -78,8 +79,8 @@ loop:
|
|||||||
case '\n':
|
case '\n':
|
||||||
continue
|
continue
|
||||||
default:
|
default:
|
||||||
err = errInvalidHZGB2312
|
r = utf8.RuneError
|
||||||
break loop
|
goto write
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,33 +88,37 @@ loop:
|
|||||||
r, size = rune(c0), 1
|
r, size = rune(c0), 1
|
||||||
} else {
|
} else {
|
||||||
if nSrc+1 >= len(src) {
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
err = transform.ErrShortSrc
|
err = transform.ErrShortSrc
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
size = 2
|
||||||
c1 := src[nSrc+1]
|
c1 := src[nSrc+1]
|
||||||
if c0 < 0x21 || 0x7e <= c0 || c1 < 0x21 || 0x7f <= c1 {
|
if c0 < 0x21 || 0x7e <= c0 || c1 < 0x21 || 0x7f <= c1 {
|
||||||
err = errInvalidHZGB2312
|
// error
|
||||||
break loop
|
} else if i := int(c0-0x01)*190 + int(c1+0x3f); i < len(decode) {
|
||||||
}
|
|
||||||
|
|
||||||
r, size = '\ufffd', 2
|
|
||||||
if i := int(c0-0x01)*190 + int(c1+0x3f); i < len(decode) {
|
|
||||||
r = rune(decode[i])
|
r = rune(decode[i])
|
||||||
if r == 0 {
|
if r != 0 {
|
||||||
r = '\ufffd'
|
goto write
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if c1 > utf8.RuneSelf {
|
||||||
|
// Be consistent and always treat non-ASCII as a single error.
|
||||||
|
size = 1
|
||||||
|
}
|
||||||
|
r = utf8.RuneError
|
||||||
}
|
}
|
||||||
|
|
||||||
|
write:
|
||||||
if nDst+utf8.RuneLen(r) > len(dst) {
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
err = transform.ErrShortDst
|
err = transform.ErrShortDst
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
nDst += utf8.EncodeRune(dst[nDst:], r)
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
}
|
}
|
||||||
if atEOF && err == transform.ErrShortSrc {
|
|
||||||
err = errInvalidHZGB2312
|
|
||||||
}
|
|
||||||
return nDst, nSrc, err
|
return nDst, nSrc, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
21
vendor/golang.org/x/text/encoding/traditionalchinese/big5.go
generated
vendored
21
vendor/golang.org/x/text/encoding/traditionalchinese/big5.go
generated
vendored
@ -5,7 +5,6 @@
|
|||||||
package traditionalchinese
|
package traditionalchinese
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"golang.org/x/text/encoding"
|
"golang.org/x/text/encoding"
|
||||||
@ -26,8 +25,6 @@ var big5 = internal.Encoding{
|
|||||||
identifier.Big5,
|
identifier.Big5,
|
||||||
}
|
}
|
||||||
|
|
||||||
var errInvalidBig5 = errors.New("traditionalchinese: invalid Big5 encoding")
|
|
||||||
|
|
||||||
type big5Decoder struct{ transform.NopResetter }
|
type big5Decoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
func (big5Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
func (big5Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
@ -40,18 +37,25 @@ loop:
|
|||||||
|
|
||||||
case 0x81 <= c0 && c0 < 0xff:
|
case 0x81 <= c0 && c0 < 0xff:
|
||||||
if nSrc+1 >= len(src) {
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
err = transform.ErrShortSrc
|
err = transform.ErrShortSrc
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
c1 := src[nSrc+1]
|
c1 := src[nSrc+1]
|
||||||
switch {
|
switch {
|
||||||
case 0x40 <= c1 && c1 < 0x7f:
|
case 0x40 <= c1 && c1 < 0x7f:
|
||||||
c1 -= 0x40
|
c1 -= 0x40
|
||||||
case 0xa1 <= c1 && c1 < 0xff:
|
case 0xa1 <= c1 && c1 < 0xff:
|
||||||
c1 -= 0x62
|
c1 -= 0x62
|
||||||
|
case c1 < 0x40:
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
default:
|
default:
|
||||||
err = errInvalidBig5
|
r, size = utf8.RuneError, 2
|
||||||
break loop
|
goto write
|
||||||
}
|
}
|
||||||
r, size = '\ufffd', 2
|
r, size = '\ufffd', 2
|
||||||
if i := int(c0-0x81)*157 + int(c1); i < len(decode) {
|
if i := int(c0-0x81)*157 + int(c1); i < len(decode) {
|
||||||
@ -80,10 +84,10 @@ loop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
err = errInvalidBig5
|
r, size = utf8.RuneError, 1
|
||||||
break loop
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
write:
|
||||||
if nDst+utf8.RuneLen(r) > len(dst) {
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
err = transform.ErrShortDst
|
err = transform.ErrShortDst
|
||||||
break loop
|
break loop
|
||||||
@ -99,9 +103,6 @@ loop:
|
|||||||
nDst += copy(dst[nDst:], s)
|
nDst += copy(dst[nDst:], s)
|
||||||
continue loop
|
continue loop
|
||||||
}
|
}
|
||||||
if atEOF && err == transform.ErrShortSrc {
|
|
||||||
err = errInvalidBig5
|
|
||||||
}
|
|
||||||
return nDst, nSrc, err
|
return nDst, nSrc, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
27
vendor/golang.org/x/text/internal/format/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/text/internal/format/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
41
vendor/golang.org/x/text/internal/format/format.go
generated
vendored
Normal file
41
vendor/golang.org/x/text/internal/format/format.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package format contains types for defining language-specific formatting of
|
||||||
|
// values.
|
||||||
|
//
|
||||||
|
// This package is internal now, but will eventually be exposed after the API
|
||||||
|
// settles.
|
||||||
|
package format // import "golang.org/x/text/internal/format"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"golang.org/x/text/language"
|
||||||
|
)
|
||||||
|
|
||||||
|
// State represents the printer state passed to custom formatters. It provides
|
||||||
|
// access to the fmt.State interface and the sentence and language-related
|
||||||
|
// context.
|
||||||
|
type State interface {
|
||||||
|
fmt.State
|
||||||
|
|
||||||
|
// Language reports the requested language in which to render a message.
|
||||||
|
Language() language.Tag
|
||||||
|
|
||||||
|
// TODO: consider this and removing rune from the Format method in the
|
||||||
|
// Formatter interface.
|
||||||
|
//
|
||||||
|
// Verb returns the format variant to render, analogous to the types used
|
||||||
|
// in fmt. Use 'v' for the default or only variant.
|
||||||
|
// Verb() rune
|
||||||
|
|
||||||
|
// TODO: more info:
|
||||||
|
// - sentence context such as linguistic features passed by the translator.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Formatter is analogous to fmt.Formatter.
|
||||||
|
type Formatter interface {
|
||||||
|
Format(state State, verb rune)
|
||||||
|
}
|
358
vendor/golang.org/x/text/internal/format/parser.go
generated
vendored
Normal file
358
vendor/golang.org/x/text/internal/format/parser.go
generated
vendored
Normal file
@ -0,0 +1,358 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package format
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Parser parses a format string. The result from the parse are set in the
|
||||||
|
// struct fields.
|
||||||
|
type Parser struct {
|
||||||
|
Verb rune
|
||||||
|
|
||||||
|
WidthPresent bool
|
||||||
|
PrecPresent bool
|
||||||
|
Minus bool
|
||||||
|
Plus bool
|
||||||
|
Sharp bool
|
||||||
|
Space bool
|
||||||
|
Zero bool
|
||||||
|
|
||||||
|
// For the formats %+v %#v, we set the plusV/sharpV flags
|
||||||
|
// and clear the plus/sharp flags since %+v and %#v are in effect
|
||||||
|
// different, flagless formats set at the top level.
|
||||||
|
PlusV bool
|
||||||
|
SharpV bool
|
||||||
|
|
||||||
|
HasIndex bool
|
||||||
|
|
||||||
|
Width int
|
||||||
|
Prec int // precision
|
||||||
|
|
||||||
|
// retain arguments across calls.
|
||||||
|
Args []interface{}
|
||||||
|
// retain current argument number across calls
|
||||||
|
ArgNum int
|
||||||
|
|
||||||
|
// reordered records whether the format string used argument reordering.
|
||||||
|
Reordered bool
|
||||||
|
// goodArgNum records whether the most recent reordering directive was valid.
|
||||||
|
goodArgNum bool
|
||||||
|
|
||||||
|
// position info
|
||||||
|
format string
|
||||||
|
startPos int
|
||||||
|
endPos int
|
||||||
|
Status Status
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset initializes a parser to scan format strings for the given args.
|
||||||
|
func (p *Parser) Reset(args []interface{}) {
|
||||||
|
p.Args = args
|
||||||
|
p.ArgNum = 0
|
||||||
|
p.startPos = 0
|
||||||
|
p.Reordered = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Text returns the part of the format string that was parsed by the last call
|
||||||
|
// to Scan. It returns the original substitution clause if the current scan
|
||||||
|
// parsed a substitution.
|
||||||
|
func (p *Parser) Text() string { return p.format[p.startPos:p.endPos] }
|
||||||
|
|
||||||
|
// SetFormat sets a new format string to parse. It does not reset the argument
|
||||||
|
// count.
|
||||||
|
func (p *Parser) SetFormat(format string) {
|
||||||
|
p.format = format
|
||||||
|
p.startPos = 0
|
||||||
|
p.endPos = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status indicates the result type of a call to Scan.
|
||||||
|
type Status int
|
||||||
|
|
||||||
|
const (
|
||||||
|
StatusText Status = iota
|
||||||
|
StatusSubstitution
|
||||||
|
StatusBadWidthSubstitution
|
||||||
|
StatusBadPrecSubstitution
|
||||||
|
StatusNoVerb
|
||||||
|
StatusBadArgNum
|
||||||
|
StatusMissingArg
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClearFlags reset the parser to default behavior.
|
||||||
|
func (p *Parser) ClearFlags() {
|
||||||
|
p.WidthPresent = false
|
||||||
|
p.PrecPresent = false
|
||||||
|
p.Minus = false
|
||||||
|
p.Plus = false
|
||||||
|
p.Sharp = false
|
||||||
|
p.Space = false
|
||||||
|
p.Zero = false
|
||||||
|
|
||||||
|
p.PlusV = false
|
||||||
|
p.SharpV = false
|
||||||
|
|
||||||
|
p.HasIndex = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan scans the next part of the format string and sets the status to
|
||||||
|
// indicate whether it scanned a string literal, substitution or error.
|
||||||
|
func (p *Parser) Scan() bool {
|
||||||
|
p.Status = StatusText
|
||||||
|
format := p.format
|
||||||
|
end := len(format)
|
||||||
|
if p.endPos >= end {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
afterIndex := false // previous item in format was an index like [3].
|
||||||
|
|
||||||
|
p.startPos = p.endPos
|
||||||
|
p.goodArgNum = true
|
||||||
|
i := p.startPos
|
||||||
|
for i < end && format[i] != '%' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i > p.startPos {
|
||||||
|
p.endPos = i
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Process one verb
|
||||||
|
i++
|
||||||
|
|
||||||
|
p.Status = StatusSubstitution
|
||||||
|
|
||||||
|
// Do we have flags?
|
||||||
|
p.ClearFlags()
|
||||||
|
|
||||||
|
simpleFormat:
|
||||||
|
for ; i < end; i++ {
|
||||||
|
c := p.format[i]
|
||||||
|
switch c {
|
||||||
|
case '#':
|
||||||
|
p.Sharp = true
|
||||||
|
case '0':
|
||||||
|
p.Zero = !p.Minus // Only allow zero padding to the left.
|
||||||
|
case '+':
|
||||||
|
p.Plus = true
|
||||||
|
case '-':
|
||||||
|
p.Minus = true
|
||||||
|
p.Zero = false // Do not pad with zeros to the right.
|
||||||
|
case ' ':
|
||||||
|
p.Space = true
|
||||||
|
default:
|
||||||
|
// Fast path for common case of ascii lower case simple verbs
|
||||||
|
// without precision or width or argument indices.
|
||||||
|
if 'a' <= c && c <= 'z' && p.ArgNum < len(p.Args) {
|
||||||
|
if c == 'v' {
|
||||||
|
// Go syntax
|
||||||
|
p.SharpV = p.Sharp
|
||||||
|
p.Sharp = false
|
||||||
|
// Struct-field syntax
|
||||||
|
p.PlusV = p.Plus
|
||||||
|
p.Plus = false
|
||||||
|
}
|
||||||
|
p.Verb = rune(c)
|
||||||
|
p.ArgNum++
|
||||||
|
p.endPos = i + 1
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Format is more complex than simple flags and a verb or is malformed.
|
||||||
|
break simpleFormat
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do we have an explicit argument index?
|
||||||
|
i, afterIndex = p.updateArgNumber(format, i)
|
||||||
|
|
||||||
|
// Do we have width?
|
||||||
|
if i < end && format[i] == '*' {
|
||||||
|
i++
|
||||||
|
p.Width, p.WidthPresent = p.intFromArg()
|
||||||
|
|
||||||
|
if !p.WidthPresent {
|
||||||
|
p.Status = StatusBadWidthSubstitution
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have a negative width, so take its value and ensure
|
||||||
|
// that the minus flag is set
|
||||||
|
if p.Width < 0 {
|
||||||
|
p.Width = -p.Width
|
||||||
|
p.Minus = true
|
||||||
|
p.Zero = false // Do not pad with zeros to the right.
|
||||||
|
}
|
||||||
|
afterIndex = false
|
||||||
|
} else {
|
||||||
|
p.Width, p.WidthPresent, i = parsenum(format, i, end)
|
||||||
|
if afterIndex && p.WidthPresent { // "%[3]2d"
|
||||||
|
p.goodArgNum = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do we have precision?
|
||||||
|
if i+1 < end && format[i] == '.' {
|
||||||
|
i++
|
||||||
|
if afterIndex { // "%[3].2d"
|
||||||
|
p.goodArgNum = false
|
||||||
|
}
|
||||||
|
i, afterIndex = p.updateArgNumber(format, i)
|
||||||
|
if i < end && format[i] == '*' {
|
||||||
|
i++
|
||||||
|
p.Prec, p.PrecPresent = p.intFromArg()
|
||||||
|
// Negative precision arguments don't make sense
|
||||||
|
if p.Prec < 0 {
|
||||||
|
p.Prec = 0
|
||||||
|
p.PrecPresent = false
|
||||||
|
}
|
||||||
|
if !p.PrecPresent {
|
||||||
|
p.Status = StatusBadPrecSubstitution
|
||||||
|
}
|
||||||
|
afterIndex = false
|
||||||
|
} else {
|
||||||
|
p.Prec, p.PrecPresent, i = parsenum(format, i, end)
|
||||||
|
if !p.PrecPresent {
|
||||||
|
p.Prec = 0
|
||||||
|
p.PrecPresent = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !afterIndex {
|
||||||
|
i, afterIndex = p.updateArgNumber(format, i)
|
||||||
|
}
|
||||||
|
p.HasIndex = afterIndex
|
||||||
|
|
||||||
|
if i >= end {
|
||||||
|
p.endPos = i
|
||||||
|
p.Status = StatusNoVerb
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
verb, w := utf8.DecodeRuneInString(format[i:])
|
||||||
|
p.endPos = i + w
|
||||||
|
p.Verb = verb
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case verb == '%': // Percent does not absorb operands and ignores f.wid and f.prec.
|
||||||
|
p.startPos = p.endPos - 1
|
||||||
|
p.Status = StatusText
|
||||||
|
case !p.goodArgNum:
|
||||||
|
p.Status = StatusBadArgNum
|
||||||
|
case p.ArgNum >= len(p.Args): // No argument left over to print for the current verb.
|
||||||
|
p.Status = StatusMissingArg
|
||||||
|
p.ArgNum++
|
||||||
|
case verb == 'v':
|
||||||
|
// Go syntax
|
||||||
|
p.SharpV = p.Sharp
|
||||||
|
p.Sharp = false
|
||||||
|
// Struct-field syntax
|
||||||
|
p.PlusV = p.Plus
|
||||||
|
p.Plus = false
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
p.ArgNum++
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// intFromArg gets the ArgNumth element of Args. On return, isInt reports
|
||||||
|
// whether the argument has integer type.
|
||||||
|
func (p *Parser) intFromArg() (num int, isInt bool) {
|
||||||
|
if p.ArgNum < len(p.Args) {
|
||||||
|
arg := p.Args[p.ArgNum]
|
||||||
|
num, isInt = arg.(int) // Almost always OK.
|
||||||
|
if !isInt {
|
||||||
|
// Work harder.
|
||||||
|
switch v := reflect.ValueOf(arg); v.Kind() {
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
n := v.Int()
|
||||||
|
if int64(int(n)) == n {
|
||||||
|
num = int(n)
|
||||||
|
isInt = true
|
||||||
|
}
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
n := v.Uint()
|
||||||
|
if int64(n) >= 0 && uint64(int(n)) == n {
|
||||||
|
num = int(n)
|
||||||
|
isInt = true
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Already 0, false.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.ArgNum++
|
||||||
|
if tooLarge(num) {
|
||||||
|
num = 0
|
||||||
|
isInt = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseArgNumber returns the value of the bracketed number, minus 1
|
||||||
|
// (explicit argument numbers are one-indexed but we want zero-indexed).
|
||||||
|
// The opening bracket is known to be present at format[0].
|
||||||
|
// The returned values are the index, the number of bytes to consume
|
||||||
|
// up to the closing paren, if present, and whether the number parsed
|
||||||
|
// ok. The bytes to consume will be 1 if no closing paren is present.
|
||||||
|
func parseArgNumber(format string) (index int, wid int, ok bool) {
|
||||||
|
// There must be at least 3 bytes: [n].
|
||||||
|
if len(format) < 3 {
|
||||||
|
return 0, 1, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find closing bracket.
|
||||||
|
for i := 1; i < len(format); i++ {
|
||||||
|
if format[i] == ']' {
|
||||||
|
width, ok, newi := parsenum(format, 1, i)
|
||||||
|
if !ok || newi != i {
|
||||||
|
return 0, i + 1, false
|
||||||
|
}
|
||||||
|
return width - 1, i + 1, true // arg numbers are one-indexed and skip paren.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, 1, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateArgNumber returns the next argument to evaluate, which is either the value of the passed-in
|
||||||
|
// argNum or the value of the bracketed integer that begins format[i:]. It also returns
|
||||||
|
// the new value of i, that is, the index of the next byte of the format to process.
|
||||||
|
func (p *Parser) updateArgNumber(format string, i int) (newi int, found bool) {
|
||||||
|
if len(format) <= i || format[i] != '[' {
|
||||||
|
return i, false
|
||||||
|
}
|
||||||
|
p.Reordered = true
|
||||||
|
index, wid, ok := parseArgNumber(format[i:])
|
||||||
|
if ok && 0 <= index && index < len(p.Args) {
|
||||||
|
p.ArgNum = index
|
||||||
|
return i + wid, true
|
||||||
|
}
|
||||||
|
p.goodArgNum = false
|
||||||
|
return i + wid, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// tooLarge reports whether the magnitude of the integer is
|
||||||
|
// too large to be used as a formatting width or precision.
|
||||||
|
func tooLarge(x int) bool {
|
||||||
|
const max int = 1e6
|
||||||
|
return x > max || x < -max
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsenum converts ASCII to integer. num is 0 (and isnum is false) if no number present.
|
||||||
|
func parsenum(s string, start, end int) (num int, isnum bool, newi int) {
|
||||||
|
if start >= end {
|
||||||
|
return 0, false, end
|
||||||
|
}
|
||||||
|
for newi = start; newi < end && '0' <= s[newi] && s[newi] <= '9'; newi++ {
|
||||||
|
if tooLarge(num) {
|
||||||
|
return 0, false, end // Overflow; crazy long number most likely.
|
||||||
|
}
|
||||||
|
num = num*10 + int(s[newi]-'0')
|
||||||
|
isnum = true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
27
vendor/golang.org/x/text/internal/language/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/text/internal/language/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
16
vendor/golang.org/x/text/internal/language/common.go
generated
vendored
Normal file
16
vendor/golang.org/x/text/internal/language/common.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
// This file contains code common to the maketables.go and the package code.
|
||||||
|
|
||||||
|
// AliasType is the type of an alias in AliasMap.
|
||||||
|
type AliasType int8
|
||||||
|
|
||||||
|
const (
|
||||||
|
Deprecated AliasType = iota
|
||||||
|
Macro
|
||||||
|
Legacy
|
||||||
|
|
||||||
|
AliasTypeUnknown AliasType = -1
|
||||||
|
)
|
29
vendor/golang.org/x/text/internal/language/compact.go
generated
vendored
Normal file
29
vendor/golang.org/x/text/internal/language/compact.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
// CompactCoreInfo is a compact integer with the three core tags encoded.
|
||||||
|
type CompactCoreInfo uint32
|
||||||
|
|
||||||
|
// GetCompactCore generates a uint32 value that is guaranteed to be unique for
|
||||||
|
// different language, region, and script values.
|
||||||
|
func GetCompactCore(t Tag) (cci CompactCoreInfo, ok bool) {
|
||||||
|
if t.LangID > langNoIndexOffset {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
cci |= CompactCoreInfo(t.LangID) << (8 + 12)
|
||||||
|
cci |= CompactCoreInfo(t.ScriptID) << 12
|
||||||
|
cci |= CompactCoreInfo(t.RegionID)
|
||||||
|
return cci, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tag generates a tag from c.
|
||||||
|
func (c CompactCoreInfo) Tag() Tag {
|
||||||
|
return Tag{
|
||||||
|
LangID: Language(c >> 20),
|
||||||
|
RegionID: Region(c & 0x3ff),
|
||||||
|
ScriptID: Script(c>>12) & 0xff,
|
||||||
|
}
|
||||||
|
}
|
61
vendor/golang.org/x/text/internal/language/compact/compact.go
generated
vendored
Normal file
61
vendor/golang.org/x/text/internal/language/compact/compact.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package compact defines a compact representation of language tags.
|
||||||
|
//
|
||||||
|
// Common language tags (at least all for which locale information is defined
|
||||||
|
// in CLDR) are assigned a unique index. Each Tag is associated with such an
|
||||||
|
// ID for selecting language-related resources (such as translations) as well
|
||||||
|
// as one for selecting regional defaults (currency, number formatting, etc.)
|
||||||
|
//
|
||||||
|
// It may want to export this functionality at some point, but at this point
|
||||||
|
// this is only available for use within x/text.
|
||||||
|
package compact // import "golang.org/x/text/internal/language/compact"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/language"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID is an integer identifying a single tag.
|
||||||
|
type ID uint16
|
||||||
|
|
||||||
|
func getCoreIndex(t language.Tag) (id ID, ok bool) {
|
||||||
|
cci, ok := language.GetCompactCore(t)
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
i := sort.Search(len(coreTags), func(i int) bool {
|
||||||
|
return cci <= coreTags[i]
|
||||||
|
})
|
||||||
|
if i == len(coreTags) || coreTags[i] != cci {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return ID(i), true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent returns the ID of the parent or the root ID if id is already the root.
|
||||||
|
func (id ID) Parent() ID {
|
||||||
|
return parents[id]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tag converts id to an internal language Tag.
|
||||||
|
func (id ID) Tag() language.Tag {
|
||||||
|
if int(id) >= len(coreTags) {
|
||||||
|
return specialTags[int(id)-len(coreTags)]
|
||||||
|
}
|
||||||
|
return coreTags[id].Tag()
|
||||||
|
}
|
||||||
|
|
||||||
|
var specialTags []language.Tag
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
tags := strings.Split(specialTagsStr, " ")
|
||||||
|
specialTags = make([]language.Tag, len(tags))
|
||||||
|
for i, t := range tags {
|
||||||
|
specialTags[i] = language.MustParse(t)
|
||||||
|
}
|
||||||
|
}
|
64
vendor/golang.org/x/text/internal/language/compact/gen.go
generated
vendored
Normal file
64
vendor/golang.org/x/text/internal/language/compact/gen.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// Language tag table generator.
|
||||||
|
// Data read from the web.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/gen"
|
||||||
|
"golang.org/x/text/unicode/cldr"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
test = flag.Bool("test",
|
||||||
|
false,
|
||||||
|
"test existing tables; can be used to compare web data with package data.")
|
||||||
|
outputFile = flag.String("output",
|
||||||
|
"tables.go",
|
||||||
|
"output file for generated tables")
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
gen.Init()
|
||||||
|
|
||||||
|
w := gen.NewCodeWriter()
|
||||||
|
defer w.WriteGoFile("tables.go", "compact")
|
||||||
|
|
||||||
|
fmt.Fprintln(w, `import "golang.org/x/text/internal/language"`)
|
||||||
|
|
||||||
|
b := newBuilder(w)
|
||||||
|
gen.WriteCLDRVersion(w)
|
||||||
|
|
||||||
|
b.writeCompactIndex()
|
||||||
|
}
|
||||||
|
|
||||||
|
type builder struct {
|
||||||
|
w *gen.CodeWriter
|
||||||
|
data *cldr.CLDR
|
||||||
|
supp *cldr.SupplementalData
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBuilder(w *gen.CodeWriter) *builder {
|
||||||
|
r := gen.OpenCLDRCoreZip()
|
||||||
|
defer r.Close()
|
||||||
|
d := &cldr.Decoder{}
|
||||||
|
data, err := d.DecodeZip(r)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
b := builder{
|
||||||
|
w: w,
|
||||||
|
data: data,
|
||||||
|
supp: data.Supplemental(),
|
||||||
|
}
|
||||||
|
return &b
|
||||||
|
}
|
113
vendor/golang.org/x/text/internal/language/compact/gen_index.go
generated
vendored
Normal file
113
vendor/golang.org/x/text/internal/language/compact/gen_index.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
// This file generates derivative tables based on the language package itself.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/language"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compact indices:
|
||||||
|
// Note -va-X variants only apply to localization variants.
|
||||||
|
// BCP variants only ever apply to language.
|
||||||
|
// The only ambiguity between tags is with regions.
|
||||||
|
|
||||||
|
func (b *builder) writeCompactIndex() {
|
||||||
|
// Collect all language tags for which we have any data in CLDR.
|
||||||
|
m := map[language.Tag]bool{}
|
||||||
|
for _, lang := range b.data.Locales() {
|
||||||
|
// We include all locales unconditionally to be consistent with en_US.
|
||||||
|
// We want en_US, even though it has no data associated with it.
|
||||||
|
|
||||||
|
// TODO: put any of the languages for which no data exists at the end
|
||||||
|
// of the index. This allows all components based on ICU to use that
|
||||||
|
// as the cutoff point.
|
||||||
|
// if x := data.RawLDML(lang); false ||
|
||||||
|
// x.LocaleDisplayNames != nil ||
|
||||||
|
// x.Characters != nil ||
|
||||||
|
// x.Delimiters != nil ||
|
||||||
|
// x.Measurement != nil ||
|
||||||
|
// x.Dates != nil ||
|
||||||
|
// x.Numbers != nil ||
|
||||||
|
// x.Units != nil ||
|
||||||
|
// x.ListPatterns != nil ||
|
||||||
|
// x.Collations != nil ||
|
||||||
|
// x.Segmentations != nil ||
|
||||||
|
// x.Rbnf != nil ||
|
||||||
|
// x.Annotations != nil ||
|
||||||
|
// x.Metadata != nil {
|
||||||
|
|
||||||
|
// TODO: support POSIX natively, albeit non-standard.
|
||||||
|
tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1))
|
||||||
|
m[tag] = true
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: plural rules are also defined for the deprecated tags:
|
||||||
|
// iw mo sh tl
|
||||||
|
// Consider removing these as compact tags.
|
||||||
|
|
||||||
|
// Include locales for plural rules, which uses a different structure.
|
||||||
|
for _, plurals := range b.supp.Plurals {
|
||||||
|
for _, rules := range plurals.PluralRules {
|
||||||
|
for _, lang := range strings.Split(rules.Locales, " ") {
|
||||||
|
m[language.Make(lang)] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var coreTags []language.CompactCoreInfo
|
||||||
|
var special []string
|
||||||
|
|
||||||
|
for t := range m {
|
||||||
|
if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" {
|
||||||
|
log.Fatalf("Unexpected extension %v in %v", x, t)
|
||||||
|
}
|
||||||
|
if len(t.Variants()) == 0 && len(t.Extensions()) == 0 {
|
||||||
|
cci, ok := language.GetCompactCore(t)
|
||||||
|
if !ok {
|
||||||
|
log.Fatalf("Locale for non-basic language %q", t)
|
||||||
|
}
|
||||||
|
coreTags = append(coreTags, cci)
|
||||||
|
} else {
|
||||||
|
special = append(special, t.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
w := b.w
|
||||||
|
|
||||||
|
sort.Slice(coreTags, func(i, j int) bool { return coreTags[i] < coreTags[j] })
|
||||||
|
sort.Strings(special)
|
||||||
|
|
||||||
|
w.WriteComment(`
|
||||||
|
NumCompactTags is the number of common tags. The maximum tag is
|
||||||
|
NumCompactTags-1.`)
|
||||||
|
w.WriteConst("NumCompactTags", len(m))
|
||||||
|
|
||||||
|
fmt.Fprintln(w, "const (")
|
||||||
|
for i, t := range coreTags {
|
||||||
|
fmt.Fprintf(w, "%s ID = %d\n", ident(t.Tag().String()), i)
|
||||||
|
}
|
||||||
|
for i, t := range special {
|
||||||
|
fmt.Fprintf(w, "%s ID = %d\n", ident(t), i+len(coreTags))
|
||||||
|
}
|
||||||
|
fmt.Fprintln(w, ")")
|
||||||
|
|
||||||
|
w.WriteVar("coreTags", coreTags)
|
||||||
|
|
||||||
|
w.WriteConst("specialTagsStr", strings.Join(special, " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
func ident(s string) string {
|
||||||
|
return strings.Replace(s, "-", "", -1) + "Index"
|
||||||
|
}
|
54
vendor/golang.org/x/text/internal/language/compact/gen_parents.go
generated
vendored
Normal file
54
vendor/golang.org/x/text/internal/language/compact/gen_parents.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/gen"
|
||||||
|
"golang.org/x/text/internal/language"
|
||||||
|
"golang.org/x/text/internal/language/compact"
|
||||||
|
"golang.org/x/text/unicode/cldr"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
r := gen.OpenCLDRCoreZip()
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
d := &cldr.Decoder{}
|
||||||
|
data, err := d.DecodeZip(r)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("DecodeZip: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w := gen.NewCodeWriter()
|
||||||
|
defer w.WriteGoFile("parents.go", "compact")
|
||||||
|
|
||||||
|
// Create parents table.
|
||||||
|
type ID uint16
|
||||||
|
parents := make([]ID, compact.NumCompactTags)
|
||||||
|
for _, loc := range data.Locales() {
|
||||||
|
tag := language.MustParse(loc)
|
||||||
|
index, ok := compact.FromTag(tag)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parentIndex := compact.ID(0) // und
|
||||||
|
for p := tag.Parent(); p != language.Und; p = p.Parent() {
|
||||||
|
if x, ok := compact.FromTag(p); ok {
|
||||||
|
parentIndex = x
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
parents[index] = ID(parentIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteComment(`
|
||||||
|
parents maps a compact index of a tag to the compact index of the parent of
|
||||||
|
this tag.`)
|
||||||
|
w.WriteVar("parents", parents)
|
||||||
|
}
|
260
vendor/golang.org/x/text/internal/language/compact/language.go
generated
vendored
Normal file
260
vendor/golang.org/x/text/internal/language/compact/language.go
generated
vendored
Normal file
@ -0,0 +1,260 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:generate go run gen.go gen_index.go -output tables.go
|
||||||
|
//go:generate go run gen_parents.go
|
||||||
|
|
||||||
|
package compact
|
||||||
|
|
||||||
|
// TODO: Remove above NOTE after:
|
||||||
|
// - verifying that tables are dropped correctly (most notably matcher tables).
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/language"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tag represents a BCP 47 language tag. It is used to specify an instance of a
|
||||||
|
// specific language or locale. All language tag values are guaranteed to be
|
||||||
|
// well-formed.
|
||||||
|
type Tag struct {
|
||||||
|
// NOTE: exported tags will become part of the public API.
|
||||||
|
language ID
|
||||||
|
locale ID
|
||||||
|
full fullTag // always a language.Tag for now.
|
||||||
|
}
|
||||||
|
|
||||||
|
const _und = 0
|
||||||
|
|
||||||
|
type fullTag interface {
|
||||||
|
IsRoot() bool
|
||||||
|
Parent() language.Tag
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a compact Tag from a fully specified internal language Tag.
|
||||||
|
func Make(t language.Tag) (tag Tag) {
|
||||||
|
if region := t.TypeForKey("rg"); len(region) == 6 && region[2:] == "zzzz" {
|
||||||
|
if r, err := language.ParseRegion(region[:2]); err == nil {
|
||||||
|
tFull := t
|
||||||
|
t, _ = t.SetTypeForKey("rg", "")
|
||||||
|
// TODO: should we not consider "va" for the language tag?
|
||||||
|
var exact1, exact2 bool
|
||||||
|
tag.language, exact1 = FromTag(t)
|
||||||
|
t.RegionID = r
|
||||||
|
tag.locale, exact2 = FromTag(t)
|
||||||
|
if !exact1 || !exact2 {
|
||||||
|
tag.full = tFull
|
||||||
|
}
|
||||||
|
return tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lang, ok := FromTag(t)
|
||||||
|
tag.language = lang
|
||||||
|
tag.locale = lang
|
||||||
|
if !ok {
|
||||||
|
tag.full = t
|
||||||
|
}
|
||||||
|
return tag
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tag returns an internal language Tag version of this tag.
|
||||||
|
func (t Tag) Tag() language.Tag {
|
||||||
|
if t.full != nil {
|
||||||
|
return t.full.(language.Tag)
|
||||||
|
}
|
||||||
|
tag := t.language.Tag()
|
||||||
|
if t.language != t.locale {
|
||||||
|
loc := t.locale.Tag()
|
||||||
|
tag, _ = tag.SetTypeForKey("rg", strings.ToLower(loc.RegionID.String())+"zzzz")
|
||||||
|
}
|
||||||
|
return tag
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCompact reports whether this tag is fully defined in terms of ID.
|
||||||
|
func (t *Tag) IsCompact() bool {
|
||||||
|
return t.full == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MayHaveVariants reports whether a tag may have variants. If it returns false
|
||||||
|
// it is guaranteed the tag does not have variants.
|
||||||
|
func (t Tag) MayHaveVariants() bool {
|
||||||
|
return t.full != nil || int(t.language) >= len(coreTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MayHaveExtensions reports whether a tag may have extensions. If it returns
|
||||||
|
// false it is guaranteed the tag does not have them.
|
||||||
|
func (t Tag) MayHaveExtensions() bool {
|
||||||
|
return t.full != nil ||
|
||||||
|
int(t.language) >= len(coreTags) ||
|
||||||
|
t.language != t.locale
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRoot returns true if t is equal to language "und".
|
||||||
|
func (t Tag) IsRoot() bool {
|
||||||
|
if t.full != nil {
|
||||||
|
return t.full.IsRoot()
|
||||||
|
}
|
||||||
|
return t.language == _und
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
|
||||||
|
// specific language are substituted with fields from the parent language.
|
||||||
|
// The parent for a language may change for newer versions of CLDR.
|
||||||
|
func (t Tag) Parent() Tag {
|
||||||
|
if t.full != nil {
|
||||||
|
return Make(t.full.Parent())
|
||||||
|
}
|
||||||
|
if t.language != t.locale {
|
||||||
|
// Simulate stripping -u-rg-xxxxxx
|
||||||
|
return Tag{language: t.language, locale: t.language}
|
||||||
|
}
|
||||||
|
// TODO: use parent lookup table once cycle from internal package is
|
||||||
|
// removed. Probably by internalizing the table and declaring this fast
|
||||||
|
// enough.
|
||||||
|
// lang := compactID(internal.Parent(uint16(t.language)))
|
||||||
|
lang, _ := FromTag(t.language.Tag().Parent())
|
||||||
|
return Tag{language: lang, locale: lang}
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns token t and the rest of the string.
|
||||||
|
func nextToken(s string) (t, tail string) {
|
||||||
|
p := strings.Index(s[1:], "-")
|
||||||
|
if p == -1 {
|
||||||
|
return s[1:], ""
|
||||||
|
}
|
||||||
|
p++
|
||||||
|
return s[1:p], s[p:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// LanguageID returns an index, where 0 <= index < NumCompactTags, for tags
|
||||||
|
// for which data exists in the text repository.The index will change over time
|
||||||
|
// and should not be stored in persistent storage. If t does not match a compact
|
||||||
|
// index, exact will be false and the compact index will be returned for the
|
||||||
|
// first match after repeatedly taking the Parent of t.
|
||||||
|
func LanguageID(t Tag) (id ID, exact bool) {
|
||||||
|
return t.language, t.full == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegionalID returns the ID for the regional variant of this tag. This index is
|
||||||
|
// used to indicate region-specific overrides, such as default currency, default
|
||||||
|
// calendar and week data, default time cycle, and default measurement system
|
||||||
|
// and unit preferences.
|
||||||
|
//
|
||||||
|
// For instance, the tag en-GB-u-rg-uszzzz specifies British English with US
|
||||||
|
// settings for currency, number formatting, etc. The CompactIndex for this tag
|
||||||
|
// will be that for en-GB, while the RegionalID will be the one corresponding to
|
||||||
|
// en-US.
|
||||||
|
func RegionalID(t Tag) (id ID, exact bool) {
|
||||||
|
return t.locale, t.full == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LanguageTag returns t stripped of regional variant indicators.
|
||||||
|
//
|
||||||
|
// At the moment this means it is stripped of a regional and variant subtag "rg"
|
||||||
|
// and "va" in the "u" extension.
|
||||||
|
func (t Tag) LanguageTag() Tag {
|
||||||
|
if t.full == nil {
|
||||||
|
return Tag{language: t.language, locale: t.language}
|
||||||
|
}
|
||||||
|
tt := t.Tag()
|
||||||
|
tt.SetTypeForKey("rg", "")
|
||||||
|
tt.SetTypeForKey("va", "")
|
||||||
|
return Make(tt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegionalTag returns the regional variant of the tag.
|
||||||
|
//
|
||||||
|
// At the moment this means that the region is set from the regional subtag
|
||||||
|
// "rg" in the "u" extension.
|
||||||
|
func (t Tag) RegionalTag() Tag {
|
||||||
|
rt := Tag{language: t.locale, locale: t.locale}
|
||||||
|
if t.full == nil {
|
||||||
|
return rt
|
||||||
|
}
|
||||||
|
b := language.Builder{}
|
||||||
|
tag := t.Tag()
|
||||||
|
// tag, _ = tag.SetTypeForKey("rg", "")
|
||||||
|
b.SetTag(t.locale.Tag())
|
||||||
|
if v := tag.Variants(); v != "" {
|
||||||
|
for _, v := range strings.Split(v, "-") {
|
||||||
|
b.AddVariant(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, e := range tag.Extensions() {
|
||||||
|
b.AddExt(e)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromTag reports closest matching ID for an internal language Tag.
|
||||||
|
func FromTag(t language.Tag) (id ID, exact bool) {
|
||||||
|
// TODO: perhaps give more frequent tags a lower index.
|
||||||
|
// TODO: we could make the indexes stable. This will excluded some
|
||||||
|
// possibilities for optimization, so don't do this quite yet.
|
||||||
|
exact = true
|
||||||
|
|
||||||
|
b, s, r := t.Raw()
|
||||||
|
if t.HasString() {
|
||||||
|
if t.IsPrivateUse() {
|
||||||
|
// We have no entries for user-defined tags.
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
hasExtra := false
|
||||||
|
if t.HasVariants() {
|
||||||
|
if t.HasExtensions() {
|
||||||
|
build := language.Builder{}
|
||||||
|
build.SetTag(language.Tag{LangID: b, ScriptID: s, RegionID: r})
|
||||||
|
build.AddVariant(t.Variants())
|
||||||
|
exact = false
|
||||||
|
t = build.Make()
|
||||||
|
}
|
||||||
|
hasExtra = true
|
||||||
|
} else if _, ok := t.Extension('u'); ok {
|
||||||
|
// TODO: va may mean something else. Consider not considering it.
|
||||||
|
// Strip all but the 'va' entry.
|
||||||
|
old := t
|
||||||
|
variant := t.TypeForKey("va")
|
||||||
|
t = language.Tag{LangID: b, ScriptID: s, RegionID: r}
|
||||||
|
if variant != "" {
|
||||||
|
t, _ = t.SetTypeForKey("va", variant)
|
||||||
|
hasExtra = true
|
||||||
|
}
|
||||||
|
exact = old == t
|
||||||
|
} else {
|
||||||
|
exact = false
|
||||||
|
}
|
||||||
|
if hasExtra {
|
||||||
|
// We have some variants.
|
||||||
|
for i, s := range specialTags {
|
||||||
|
if s == t {
|
||||||
|
return ID(i + len(coreTags)), exact
|
||||||
|
}
|
||||||
|
}
|
||||||
|
exact = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if x, ok := getCoreIndex(t); ok {
|
||||||
|
return x, exact
|
||||||
|
}
|
||||||
|
exact = false
|
||||||
|
if r != 0 && s == 0 {
|
||||||
|
// Deal with cases where an extra script is inserted for the region.
|
||||||
|
t, _ := t.Maximize()
|
||||||
|
if x, ok := getCoreIndex(t); ok {
|
||||||
|
return x, exact
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for t = t.Parent(); t != root; t = t.Parent() {
|
||||||
|
// No variants specified: just compare core components.
|
||||||
|
// The key has the form lllssrrr, where l, s, and r are nibbles for
|
||||||
|
// respectively the langID, scriptID, and regionID.
|
||||||
|
if x, ok := getCoreIndex(t); ok {
|
||||||
|
return x, exact
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, exact
|
||||||
|
}
|
||||||
|
|
||||||
|
var root = language.Tag{}
|
120
vendor/golang.org/x/text/internal/language/compact/parents.go
generated
vendored
Normal file
120
vendor/golang.org/x/text/internal/language/compact/parents.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
package compact
|
||||||
|
|
||||||
|
// parents maps a compact index of a tag to the compact index of the parent of
|
||||||
|
// this tag.
|
||||||
|
var parents = []ID{ // 775 elements
|
||||||
|
// Entry 0 - 3F
|
||||||
|
0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0004, 0x0000, 0x0006,
|
||||||
|
0x0000, 0x0008, 0x0000, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a,
|
||||||
|
0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a,
|
||||||
|
0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a,
|
||||||
|
0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x0000,
|
||||||
|
0x0000, 0x0028, 0x0000, 0x002a, 0x0000, 0x002c, 0x0000, 0x0000,
|
||||||
|
0x002f, 0x002e, 0x002e, 0x0000, 0x0033, 0x0000, 0x0035, 0x0000,
|
||||||
|
0x0037, 0x0000, 0x0039, 0x0000, 0x003b, 0x0000, 0x0000, 0x003e,
|
||||||
|
// Entry 40 - 7F
|
||||||
|
0x0000, 0x0040, 0x0040, 0x0000, 0x0043, 0x0043, 0x0000, 0x0046,
|
||||||
|
0x0000, 0x0048, 0x0000, 0x0000, 0x004b, 0x004a, 0x004a, 0x0000,
|
||||||
|
0x004f, 0x004f, 0x004f, 0x004f, 0x0000, 0x0054, 0x0054, 0x0000,
|
||||||
|
0x0057, 0x0000, 0x0059, 0x0000, 0x005b, 0x0000, 0x005d, 0x005d,
|
||||||
|
0x0000, 0x0060, 0x0000, 0x0062, 0x0000, 0x0064, 0x0000, 0x0066,
|
||||||
|
0x0066, 0x0000, 0x0069, 0x0000, 0x006b, 0x006b, 0x006b, 0x006b,
|
||||||
|
0x006b, 0x006b, 0x006b, 0x0000, 0x0073, 0x0000, 0x0075, 0x0000,
|
||||||
|
0x0077, 0x0000, 0x0000, 0x007a, 0x0000, 0x007c, 0x0000, 0x007e,
|
||||||
|
// Entry 80 - BF
|
||||||
|
0x0000, 0x0080, 0x0080, 0x0000, 0x0083, 0x0083, 0x0000, 0x0086,
|
||||||
|
0x0087, 0x0087, 0x0087, 0x0086, 0x0088, 0x0087, 0x0087, 0x0087,
|
||||||
|
0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0088,
|
||||||
|
0x0087, 0x0087, 0x0087, 0x0087, 0x0088, 0x0087, 0x0088, 0x0087,
|
||||||
|
0x0087, 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
|
||||||
|
0x0087, 0x0087, 0x0087, 0x0086, 0x0087, 0x0087, 0x0087, 0x0087,
|
||||||
|
0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
|
||||||
|
0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0086, 0x0087, 0x0086,
|
||||||
|
// Entry C0 - FF
|
||||||
|
0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
|
||||||
|
0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
|
||||||
|
0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, 0x0087,
|
||||||
|
0x0087, 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
|
||||||
|
0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0086, 0x0086, 0x0087,
|
||||||
|
0x0087, 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0000,
|
||||||
|
0x00ef, 0x0000, 0x00f1, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2,
|
||||||
|
0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f1, 0x00f2, 0x00f1, 0x00f1,
|
||||||
|
// Entry 100 - 13F
|
||||||
|
0x00f2, 0x00f2, 0x00f1, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f1,
|
||||||
|
0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x0000, 0x010e,
|
||||||
|
0x0000, 0x0110, 0x0000, 0x0112, 0x0000, 0x0114, 0x0114, 0x0000,
|
||||||
|
0x0117, 0x0117, 0x0117, 0x0117, 0x0000, 0x011c, 0x0000, 0x011e,
|
||||||
|
0x0000, 0x0120, 0x0120, 0x0000, 0x0123, 0x0123, 0x0123, 0x0123,
|
||||||
|
0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
|
||||||
|
0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
|
||||||
|
0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
|
||||||
|
// Entry 140 - 17F
|
||||||
|
0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
|
||||||
|
0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
|
||||||
|
0x0123, 0x0123, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156,
|
||||||
|
0x0000, 0x0158, 0x0000, 0x015a, 0x0000, 0x015c, 0x015c, 0x015c,
|
||||||
|
0x0000, 0x0160, 0x0000, 0x0000, 0x0163, 0x0000, 0x0165, 0x0000,
|
||||||
|
0x0167, 0x0167, 0x0167, 0x0000, 0x016b, 0x0000, 0x016d, 0x0000,
|
||||||
|
0x016f, 0x0000, 0x0171, 0x0171, 0x0000, 0x0174, 0x0000, 0x0176,
|
||||||
|
0x0000, 0x0178, 0x0000, 0x017a, 0x0000, 0x017c, 0x0000, 0x017e,
|
||||||
|
// Entry 180 - 1BF
|
||||||
|
0x0000, 0x0000, 0x0000, 0x0182, 0x0000, 0x0184, 0x0184, 0x0184,
|
||||||
|
0x0184, 0x0000, 0x0000, 0x0000, 0x018b, 0x0000, 0x0000, 0x018e,
|
||||||
|
0x0000, 0x0000, 0x0191, 0x0000, 0x0000, 0x0000, 0x0195, 0x0000,
|
||||||
|
0x0197, 0x0000, 0x0000, 0x019a, 0x0000, 0x0000, 0x019d, 0x0000,
|
||||||
|
0x019f, 0x0000, 0x01a1, 0x0000, 0x01a3, 0x0000, 0x01a5, 0x0000,
|
||||||
|
0x01a7, 0x0000, 0x01a9, 0x0000, 0x01ab, 0x0000, 0x01ad, 0x0000,
|
||||||
|
0x01af, 0x0000, 0x01b1, 0x01b1, 0x0000, 0x01b4, 0x0000, 0x01b6,
|
||||||
|
0x0000, 0x01b8, 0x0000, 0x01ba, 0x0000, 0x01bc, 0x0000, 0x0000,
|
||||||
|
// Entry 1C0 - 1FF
|
||||||
|
0x01bf, 0x0000, 0x01c1, 0x0000, 0x01c3, 0x0000, 0x01c5, 0x0000,
|
||||||
|
0x01c7, 0x0000, 0x01c9, 0x0000, 0x01cb, 0x01cb, 0x01cb, 0x01cb,
|
||||||
|
0x0000, 0x01d0, 0x0000, 0x01d2, 0x01d2, 0x0000, 0x01d5, 0x0000,
|
||||||
|
0x01d7, 0x0000, 0x01d9, 0x0000, 0x01db, 0x0000, 0x01dd, 0x0000,
|
||||||
|
0x01df, 0x01df, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6,
|
||||||
|
0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x0000, 0x01ee,
|
||||||
|
0x0000, 0x01f0, 0x0000, 0x0000, 0x01f3, 0x0000, 0x01f5, 0x01f5,
|
||||||
|
0x01f5, 0x0000, 0x01f9, 0x0000, 0x01fb, 0x0000, 0x01fd, 0x0000,
|
||||||
|
// Entry 200 - 23F
|
||||||
|
0x01ff, 0x0000, 0x0000, 0x0202, 0x0000, 0x0204, 0x0204, 0x0000,
|
||||||
|
0x0207, 0x0000, 0x0209, 0x0209, 0x0000, 0x020c, 0x020c, 0x0000,
|
||||||
|
0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x0000,
|
||||||
|
0x0217, 0x0000, 0x0219, 0x0000, 0x021b, 0x0000, 0x0000, 0x0000,
|
||||||
|
0x0000, 0x0000, 0x0221, 0x0000, 0x0000, 0x0224, 0x0000, 0x0226,
|
||||||
|
0x0226, 0x0000, 0x0229, 0x0000, 0x022b, 0x022b, 0x0000, 0x0000,
|
||||||
|
0x022f, 0x022e, 0x022e, 0x0000, 0x0000, 0x0234, 0x0000, 0x0236,
|
||||||
|
0x0000, 0x0238, 0x0000, 0x0244, 0x023a, 0x0244, 0x0244, 0x0244,
|
||||||
|
// Entry 240 - 27F
|
||||||
|
0x0244, 0x0244, 0x0244, 0x0244, 0x023a, 0x0244, 0x0244, 0x0000,
|
||||||
|
0x0247, 0x0247, 0x0247, 0x0000, 0x024b, 0x0000, 0x024d, 0x0000,
|
||||||
|
0x024f, 0x024f, 0x0000, 0x0252, 0x0000, 0x0254, 0x0254, 0x0254,
|
||||||
|
0x0254, 0x0254, 0x0254, 0x0000, 0x025b, 0x0000, 0x025d, 0x0000,
|
||||||
|
0x025f, 0x0000, 0x0261, 0x0000, 0x0263, 0x0000, 0x0265, 0x0000,
|
||||||
|
0x0000, 0x0268, 0x0268, 0x0268, 0x0000, 0x026c, 0x0000, 0x026e,
|
||||||
|
0x0000, 0x0270, 0x0000, 0x0000, 0x0000, 0x0274, 0x0273, 0x0273,
|
||||||
|
0x0000, 0x0278, 0x0000, 0x027a, 0x0000, 0x027c, 0x0000, 0x0000,
|
||||||
|
// Entry 280 - 2BF
|
||||||
|
0x0000, 0x0000, 0x0281, 0x0000, 0x0000, 0x0284, 0x0000, 0x0286,
|
||||||
|
0x0286, 0x0286, 0x0286, 0x0000, 0x028b, 0x028b, 0x028b, 0x0000,
|
||||||
|
0x028f, 0x028f, 0x028f, 0x028f, 0x028f, 0x0000, 0x0295, 0x0295,
|
||||||
|
0x0295, 0x0295, 0x0000, 0x0000, 0x0000, 0x0000, 0x029d, 0x029d,
|
||||||
|
0x029d, 0x0000, 0x02a1, 0x02a1, 0x02a1, 0x02a1, 0x0000, 0x0000,
|
||||||
|
0x02a7, 0x02a7, 0x02a7, 0x02a7, 0x0000, 0x02ac, 0x0000, 0x02ae,
|
||||||
|
0x02ae, 0x0000, 0x02b1, 0x0000, 0x02b3, 0x0000, 0x02b5, 0x02b5,
|
||||||
|
0x0000, 0x0000, 0x02b9, 0x0000, 0x0000, 0x0000, 0x02bd, 0x0000,
|
||||||
|
// Entry 2C0 - 2FF
|
||||||
|
0x02bf, 0x02bf, 0x0000, 0x0000, 0x02c3, 0x0000, 0x02c5, 0x0000,
|
||||||
|
0x02c7, 0x0000, 0x02c9, 0x0000, 0x02cb, 0x0000, 0x02cd, 0x02cd,
|
||||||
|
0x0000, 0x0000, 0x02d1, 0x0000, 0x02d3, 0x02d0, 0x02d0, 0x0000,
|
||||||
|
0x0000, 0x02d8, 0x02d7, 0x02d7, 0x0000, 0x0000, 0x02dd, 0x0000,
|
||||||
|
0x02df, 0x0000, 0x02e1, 0x0000, 0x0000, 0x02e4, 0x0000, 0x02e6,
|
||||||
|
0x0000, 0x0000, 0x02e9, 0x0000, 0x02eb, 0x0000, 0x02ed, 0x0000,
|
||||||
|
0x02ef, 0x02ef, 0x0000, 0x0000, 0x02f3, 0x02f2, 0x02f2, 0x0000,
|
||||||
|
0x02f7, 0x0000, 0x02f9, 0x02f9, 0x02f9, 0x02f9, 0x02f9, 0x0000,
|
||||||
|
// Entry 300 - 33F
|
||||||
|
0x02ff, 0x0300, 0x02ff, 0x0000, 0x0303, 0x0051, 0x00e6,
|
||||||
|
} // Size: 1574 bytes
|
||||||
|
|
||||||
|
// Total table size 1574 bytes (1KiB); checksum: 895AAF0B
|
1015
vendor/golang.org/x/text/internal/language/compact/tables.go
generated
vendored
Normal file
1015
vendor/golang.org/x/text/internal/language/compact/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
91
vendor/golang.org/x/text/internal/language/compact/tags.go
generated
vendored
Normal file
91
vendor/golang.org/x/text/internal/language/compact/tags.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package compact
|
||||||
|
|
||||||
|
var (
|
||||||
|
und = Tag{}
|
||||||
|
|
||||||
|
Und Tag = Tag{}
|
||||||
|
|
||||||
|
Afrikaans Tag = Tag{language: afIndex, locale: afIndex}
|
||||||
|
Amharic Tag = Tag{language: amIndex, locale: amIndex}
|
||||||
|
Arabic Tag = Tag{language: arIndex, locale: arIndex}
|
||||||
|
ModernStandardArabic Tag = Tag{language: ar001Index, locale: ar001Index}
|
||||||
|
Azerbaijani Tag = Tag{language: azIndex, locale: azIndex}
|
||||||
|
Bulgarian Tag = Tag{language: bgIndex, locale: bgIndex}
|
||||||
|
Bengali Tag = Tag{language: bnIndex, locale: bnIndex}
|
||||||
|
Catalan Tag = Tag{language: caIndex, locale: caIndex}
|
||||||
|
Czech Tag = Tag{language: csIndex, locale: csIndex}
|
||||||
|
Danish Tag = Tag{language: daIndex, locale: daIndex}
|
||||||
|
German Tag = Tag{language: deIndex, locale: deIndex}
|
||||||
|
Greek Tag = Tag{language: elIndex, locale: elIndex}
|
||||||
|
English Tag = Tag{language: enIndex, locale: enIndex}
|
||||||
|
AmericanEnglish Tag = Tag{language: enUSIndex, locale: enUSIndex}
|
||||||
|
BritishEnglish Tag = Tag{language: enGBIndex, locale: enGBIndex}
|
||||||
|
Spanish Tag = Tag{language: esIndex, locale: esIndex}
|
||||||
|
EuropeanSpanish Tag = Tag{language: esESIndex, locale: esESIndex}
|
||||||
|
LatinAmericanSpanish Tag = Tag{language: es419Index, locale: es419Index}
|
||||||
|
Estonian Tag = Tag{language: etIndex, locale: etIndex}
|
||||||
|
Persian Tag = Tag{language: faIndex, locale: faIndex}
|
||||||
|
Finnish Tag = Tag{language: fiIndex, locale: fiIndex}
|
||||||
|
Filipino Tag = Tag{language: filIndex, locale: filIndex}
|
||||||
|
French Tag = Tag{language: frIndex, locale: frIndex}
|
||||||
|
CanadianFrench Tag = Tag{language: frCAIndex, locale: frCAIndex}
|
||||||
|
Gujarati Tag = Tag{language: guIndex, locale: guIndex}
|
||||||
|
Hebrew Tag = Tag{language: heIndex, locale: heIndex}
|
||||||
|
Hindi Tag = Tag{language: hiIndex, locale: hiIndex}
|
||||||
|
Croatian Tag = Tag{language: hrIndex, locale: hrIndex}
|
||||||
|
Hungarian Tag = Tag{language: huIndex, locale: huIndex}
|
||||||
|
Armenian Tag = Tag{language: hyIndex, locale: hyIndex}
|
||||||
|
Indonesian Tag = Tag{language: idIndex, locale: idIndex}
|
||||||
|
Icelandic Tag = Tag{language: isIndex, locale: isIndex}
|
||||||
|
Italian Tag = Tag{language: itIndex, locale: itIndex}
|
||||||
|
Japanese Tag = Tag{language: jaIndex, locale: jaIndex}
|
||||||
|
Georgian Tag = Tag{language: kaIndex, locale: kaIndex}
|
||||||
|
Kazakh Tag = Tag{language: kkIndex, locale: kkIndex}
|
||||||
|
Khmer Tag = Tag{language: kmIndex, locale: kmIndex}
|
||||||
|
Kannada Tag = Tag{language: knIndex, locale: knIndex}
|
||||||
|
Korean Tag = Tag{language: koIndex, locale: koIndex}
|
||||||
|
Kirghiz Tag = Tag{language: kyIndex, locale: kyIndex}
|
||||||
|
Lao Tag = Tag{language: loIndex, locale: loIndex}
|
||||||
|
Lithuanian Tag = Tag{language: ltIndex, locale: ltIndex}
|
||||||
|
Latvian Tag = Tag{language: lvIndex, locale: lvIndex}
|
||||||
|
Macedonian Tag = Tag{language: mkIndex, locale: mkIndex}
|
||||||
|
Malayalam Tag = Tag{language: mlIndex, locale: mlIndex}
|
||||||
|
Mongolian Tag = Tag{language: mnIndex, locale: mnIndex}
|
||||||
|
Marathi Tag = Tag{language: mrIndex, locale: mrIndex}
|
||||||
|
Malay Tag = Tag{language: msIndex, locale: msIndex}
|
||||||
|
Burmese Tag = Tag{language: myIndex, locale: myIndex}
|
||||||
|
Nepali Tag = Tag{language: neIndex, locale: neIndex}
|
||||||
|
Dutch Tag = Tag{language: nlIndex, locale: nlIndex}
|
||||||
|
Norwegian Tag = Tag{language: noIndex, locale: noIndex}
|
||||||
|
Punjabi Tag = Tag{language: paIndex, locale: paIndex}
|
||||||
|
Polish Tag = Tag{language: plIndex, locale: plIndex}
|
||||||
|
Portuguese Tag = Tag{language: ptIndex, locale: ptIndex}
|
||||||
|
BrazilianPortuguese Tag = Tag{language: ptBRIndex, locale: ptBRIndex}
|
||||||
|
EuropeanPortuguese Tag = Tag{language: ptPTIndex, locale: ptPTIndex}
|
||||||
|
Romanian Tag = Tag{language: roIndex, locale: roIndex}
|
||||||
|
Russian Tag = Tag{language: ruIndex, locale: ruIndex}
|
||||||
|
Sinhala Tag = Tag{language: siIndex, locale: siIndex}
|
||||||
|
Slovak Tag = Tag{language: skIndex, locale: skIndex}
|
||||||
|
Slovenian Tag = Tag{language: slIndex, locale: slIndex}
|
||||||
|
Albanian Tag = Tag{language: sqIndex, locale: sqIndex}
|
||||||
|
Serbian Tag = Tag{language: srIndex, locale: srIndex}
|
||||||
|
SerbianLatin Tag = Tag{language: srLatnIndex, locale: srLatnIndex}
|
||||||
|
Swedish Tag = Tag{language: svIndex, locale: svIndex}
|
||||||
|
Swahili Tag = Tag{language: swIndex, locale: swIndex}
|
||||||
|
Tamil Tag = Tag{language: taIndex, locale: taIndex}
|
||||||
|
Telugu Tag = Tag{language: teIndex, locale: teIndex}
|
||||||
|
Thai Tag = Tag{language: thIndex, locale: thIndex}
|
||||||
|
Turkish Tag = Tag{language: trIndex, locale: trIndex}
|
||||||
|
Ukrainian Tag = Tag{language: ukIndex, locale: ukIndex}
|
||||||
|
Urdu Tag = Tag{language: urIndex, locale: urIndex}
|
||||||
|
Uzbek Tag = Tag{language: uzIndex, locale: uzIndex}
|
||||||
|
Vietnamese Tag = Tag{language: viIndex, locale: viIndex}
|
||||||
|
Chinese Tag = Tag{language: zhIndex, locale: zhIndex}
|
||||||
|
SimplifiedChinese Tag = Tag{language: zhHansIndex, locale: zhHansIndex}
|
||||||
|
TraditionalChinese Tag = Tag{language: zhHantIndex, locale: zhHantIndex}
|
||||||
|
Zulu Tag = Tag{language: zuIndex, locale: zuIndex}
|
||||||
|
)
|
167
vendor/golang.org/x/text/internal/language/compose.go
generated
vendored
Normal file
167
vendor/golang.org/x/text/internal/language/compose.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Builder allows constructing a Tag from individual components.
|
||||||
|
// Its main user is Compose in the top-level language package.
|
||||||
|
type Builder struct {
|
||||||
|
Tag Tag
|
||||||
|
|
||||||
|
private string // the x extension
|
||||||
|
variants []string
|
||||||
|
extensions []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make returns a new Tag from the current settings.
|
||||||
|
func (b *Builder) Make() Tag {
|
||||||
|
t := b.Tag
|
||||||
|
|
||||||
|
if len(b.extensions) > 0 || len(b.variants) > 0 {
|
||||||
|
sort.Sort(sortVariants(b.variants))
|
||||||
|
sort.Strings(b.extensions)
|
||||||
|
|
||||||
|
if b.private != "" {
|
||||||
|
b.extensions = append(b.extensions, b.private)
|
||||||
|
}
|
||||||
|
n := maxCoreSize + tokenLen(b.variants...) + tokenLen(b.extensions...)
|
||||||
|
buf := make([]byte, n)
|
||||||
|
p := t.genCoreBytes(buf)
|
||||||
|
t.pVariant = byte(p)
|
||||||
|
p += appendTokens(buf[p:], b.variants...)
|
||||||
|
t.pExt = uint16(p)
|
||||||
|
p += appendTokens(buf[p:], b.extensions...)
|
||||||
|
t.str = string(buf[:p])
|
||||||
|
// We may not always need to remake the string, but when or when not
|
||||||
|
// to do so is rather tricky.
|
||||||
|
scan := makeScanner(buf[:p])
|
||||||
|
t, _ = parse(&scan, "")
|
||||||
|
return t
|
||||||
|
|
||||||
|
} else if b.private != "" {
|
||||||
|
t.str = b.private
|
||||||
|
t.RemakeString()
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTag copies all the settings from a given Tag. Any previously set values
|
||||||
|
// are discarded.
|
||||||
|
func (b *Builder) SetTag(t Tag) {
|
||||||
|
b.Tag.LangID = t.LangID
|
||||||
|
b.Tag.RegionID = t.RegionID
|
||||||
|
b.Tag.ScriptID = t.ScriptID
|
||||||
|
// TODO: optimize
|
||||||
|
b.variants = b.variants[:0]
|
||||||
|
if variants := t.Variants(); variants != "" {
|
||||||
|
for _, vr := range strings.Split(variants[1:], "-") {
|
||||||
|
b.variants = append(b.variants, vr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.extensions, b.private = b.extensions[:0], ""
|
||||||
|
for _, e := range t.Extensions() {
|
||||||
|
b.AddExt(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddExt adds extension e to the tag. e must be a valid extension as returned
|
||||||
|
// by Tag.Extension. If the extension already exists, it will be discarded,
|
||||||
|
// except for a -u extension, where non-existing key-type pairs will added.
|
||||||
|
func (b *Builder) AddExt(e string) {
|
||||||
|
if e[0] == 'x' {
|
||||||
|
if b.private == "" {
|
||||||
|
b.private = e
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i, s := range b.extensions {
|
||||||
|
if s[0] == e[0] {
|
||||||
|
if e[0] == 'u' {
|
||||||
|
b.extensions[i] += e[1:]
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.extensions = append(b.extensions, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetExt sets the extension e to the tag. e must be a valid extension as
|
||||||
|
// returned by Tag.Extension. If the extension already exists, it will be
|
||||||
|
// overwritten, except for a -u extension, where the individual key-type pairs
|
||||||
|
// will be set.
|
||||||
|
func (b *Builder) SetExt(e string) {
|
||||||
|
if e[0] == 'x' {
|
||||||
|
b.private = e
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i, s := range b.extensions {
|
||||||
|
if s[0] == e[0] {
|
||||||
|
if e[0] == 'u' {
|
||||||
|
b.extensions[i] = e + s[1:]
|
||||||
|
} else {
|
||||||
|
b.extensions[i] = e
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.extensions = append(b.extensions, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddVariant adds any number of variants.
|
||||||
|
func (b *Builder) AddVariant(v ...string) {
|
||||||
|
for _, v := range v {
|
||||||
|
if v != "" {
|
||||||
|
b.variants = append(b.variants, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearVariants removes any variants previously added, including those
|
||||||
|
// copied from a Tag in SetTag.
|
||||||
|
func (b *Builder) ClearVariants() {
|
||||||
|
b.variants = b.variants[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearExtensions removes any extensions previously added, including those
|
||||||
|
// copied from a Tag in SetTag.
|
||||||
|
func (b *Builder) ClearExtensions() {
|
||||||
|
b.private = ""
|
||||||
|
b.extensions = b.extensions[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func tokenLen(token ...string) (n int) {
|
||||||
|
for _, t := range token {
|
||||||
|
n += len(t) + 1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendTokens(b []byte, token ...string) int {
|
||||||
|
p := 0
|
||||||
|
for _, t := range token {
|
||||||
|
b[p] = '-'
|
||||||
|
copy(b[p+1:], t)
|
||||||
|
p += 1 + len(t)
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
type sortVariants []string
|
||||||
|
|
||||||
|
func (s sortVariants) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sortVariants) Swap(i, j int) {
|
||||||
|
s[j], s[i] = s[i], s[j]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sortVariants) Less(i, j int) bool {
|
||||||
|
return variantIndex[s[i]] < variantIndex[s[j]]
|
||||||
|
}
|
28
vendor/golang.org/x/text/internal/language/coverage.go
generated
vendored
Normal file
28
vendor/golang.org/x/text/internal/language/coverage.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
// BaseLanguages returns the list of all supported base languages. It generates
|
||||||
|
// the list by traversing the internal structures.
|
||||||
|
func BaseLanguages() []Language {
|
||||||
|
base := make([]Language, 0, NumLanguages)
|
||||||
|
for i := 0; i < langNoIndexOffset; i++ {
|
||||||
|
// We included "und" already for the value 0.
|
||||||
|
if i != nonCanonicalUnd {
|
||||||
|
base = append(base, Language(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i := langNoIndexOffset
|
||||||
|
for _, v := range langNoIndex {
|
||||||
|
for k := 0; k < 8; k++ {
|
||||||
|
if v&1 == 1 {
|
||||||
|
base = append(base, Language(i))
|
||||||
|
}
|
||||||
|
v >>= 1
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return base
|
||||||
|
}
|
@ -57,7 +57,7 @@ Each 3-letter code is followed by its 1-byte langID.`,
|
|||||||
`
|
`
|
||||||
altLangIndex is used to convert indexes in altLangISO3 to langIDs.`,
|
altLangIndex is used to convert indexes in altLangISO3 to langIDs.`,
|
||||||
`
|
`
|
||||||
langAliasMap maps langIDs to their suggested replacements.`,
|
AliasMap maps langIDs to their suggested replacements.`,
|
||||||
`
|
`
|
||||||
script is an alphabetically sorted list of ISO 15924 codes. The index
|
script is an alphabetically sorted list of ISO 15924 codes. The index
|
||||||
of the script in the string, divided by 4, is the internal scriptID.`,
|
of the script in the string, divided by 4, is the internal scriptID.`,
|
||||||
@ -114,13 +114,6 @@ likelyRegionList holds lists info associated with likelyRegion.`,
|
|||||||
likelyScript is a lookup table, indexed by scriptID, for the most likely
|
likelyScript is a lookup table, indexed by scriptID, for the most likely
|
||||||
languages and regions given a script.`,
|
languages and regions given a script.`,
|
||||||
`
|
`
|
||||||
matchLang holds pairs of langIDs of base languages that are typically
|
|
||||||
mutually intelligible. Each pair is associated with a confidence and
|
|
||||||
whether the intelligibility goes one or both ways.`,
|
|
||||||
`
|
|
||||||
matchScript holds pairs of scriptIDs where readers of one script
|
|
||||||
can typically also read the other. Each is associated with a confidence.`,
|
|
||||||
`
|
|
||||||
nRegionGroups is the number of region groups.`,
|
nRegionGroups is the number of region groups.`,
|
||||||
`
|
`
|
||||||
regionInclusion maps region identifiers to sets of regions in regionInclusionBits,
|
regionInclusion maps region identifiers to sets of regions in regionInclusionBits,
|
||||||
@ -481,17 +474,17 @@ func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{})
|
|||||||
b.p()
|
b.p()
|
||||||
}
|
}
|
||||||
|
|
||||||
type fromTo struct {
|
type FromTo struct {
|
||||||
from, to uint16
|
From, To uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) {
|
func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) {
|
||||||
ss.sortFunc(func(a, b string) bool {
|
ss.sortFunc(func(a, b string) bool {
|
||||||
return index(a) < index(b)
|
return index(a) < index(b)
|
||||||
})
|
})
|
||||||
m := []fromTo{}
|
m := []FromTo{}
|
||||||
for _, s := range ss.s {
|
for _, s := range ss.s {
|
||||||
m = append(m, fromTo{index(s), index(ss.update[s])})
|
m = append(m, FromTo{index(s), index(ss.update[s])})
|
||||||
}
|
}
|
||||||
b.writeSlice(name, m)
|
b.writeSlice(name, m)
|
||||||
}
|
}
|
||||||
@ -665,9 +658,9 @@ func (b *builder) parseIndices() {
|
|||||||
b.langNoIndex.remove(s)
|
b.langNoIndex.remove(s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
b.writeConst("numLanguages", len(b.lang.slice())+len(b.langNoIndex.slice()))
|
b.writeConst("NumLanguages", len(b.lang.slice())+len(b.langNoIndex.slice()))
|
||||||
b.writeConst("numScripts", len(b.script.slice()))
|
b.writeConst("NumScripts", len(b.script.slice()))
|
||||||
b.writeConst("numRegions", len(b.region.slice()))
|
b.writeConst("NumRegions", len(b.region.slice()))
|
||||||
|
|
||||||
// Add dummy codes at the start of each list to represent "unspecified".
|
// Add dummy codes at the start of each list to represent "unspecified".
|
||||||
b.lang.add("---")
|
b.lang.add("---")
|
||||||
@ -698,8 +691,8 @@ func (b *builder) computeRegionGroups() {
|
|||||||
b.groups[group] = index(len(b.groups))
|
b.groups[group] = index(len(b.groups))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(b.groups) > 32 {
|
if len(b.groups) > 64 {
|
||||||
log.Fatalf("only 32 groups supported, found %d", len(b.groups))
|
log.Fatalf("only 64 groups supported, found %d", len(b.groups))
|
||||||
}
|
}
|
||||||
b.writeConst("nRegionGroups", len(b.groups))
|
b.writeConst("nRegionGroups", len(b.groups))
|
||||||
}
|
}
|
||||||
@ -729,7 +722,7 @@ func (b *builder) writeLanguage() {
|
|||||||
// Get language codes that need to be mapped (overlong 3-letter codes,
|
// Get language codes that need to be mapped (overlong 3-letter codes,
|
||||||
// deprecated 2-letter codes, legacy and grandfathered tags.)
|
// deprecated 2-letter codes, legacy and grandfathered tags.)
|
||||||
langAliasMap := stringSet{}
|
langAliasMap := stringSet{}
|
||||||
aliasTypeMap := map[string]langAliasType{}
|
aliasTypeMap := map[string]AliasType{}
|
||||||
|
|
||||||
// altLangISO3 get the alternative ISO3 names that need to be mapped.
|
// altLangISO3 get the alternative ISO3 names that need to be mapped.
|
||||||
altLangISO3 := stringSet{}
|
altLangISO3 := stringSet{}
|
||||||
@ -751,7 +744,7 @@ func (b *builder) writeLanguage() {
|
|||||||
} else if len(a.Type) <= 3 {
|
} else if len(a.Type) <= 3 {
|
||||||
switch a.Reason {
|
switch a.Reason {
|
||||||
case "macrolanguage":
|
case "macrolanguage":
|
||||||
aliasTypeMap[a.Type] = langMacro
|
aliasTypeMap[a.Type] = Macro
|
||||||
case "deprecated":
|
case "deprecated":
|
||||||
// handled elsewhere
|
// handled elsewhere
|
||||||
continue
|
continue
|
||||||
@ -759,7 +752,7 @@ func (b *builder) writeLanguage() {
|
|||||||
if a.Type == "no" {
|
if a.Type == "no" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
aliasTypeMap[a.Type] = langLegacy
|
aliasTypeMap[a.Type] = Legacy
|
||||||
default:
|
default:
|
||||||
log.Fatalf("new %s alias: %s", a.Reason, a.Type)
|
log.Fatalf("new %s alias: %s", a.Reason, a.Type)
|
||||||
}
|
}
|
||||||
@ -771,14 +764,14 @@ func (b *builder) writeLanguage() {
|
|||||||
// This can be removed if CLDR adopts this change.
|
// This can be removed if CLDR adopts this change.
|
||||||
langAliasMap.add("nb")
|
langAliasMap.add("nb")
|
||||||
langAliasMap.updateLater("nb", "no")
|
langAliasMap.updateLater("nb", "no")
|
||||||
aliasTypeMap["nb"] = langMacro
|
aliasTypeMap["nb"] = Macro
|
||||||
|
|
||||||
for k, v := range b.registry {
|
for k, v := range b.registry {
|
||||||
// Also add deprecated values for 3-letter ISO codes, which CLDR omits.
|
// Also add deprecated values for 3-letter ISO codes, which CLDR omits.
|
||||||
if v.typ == "language" && v.deprecated != "" && v.preferred != "" {
|
if v.typ == "language" && v.deprecated != "" && v.preferred != "" {
|
||||||
langAliasMap.add(k)
|
langAliasMap.add(k)
|
||||||
langAliasMap.updateLater(k, v.preferred)
|
langAliasMap.updateLater(k, v.preferred)
|
||||||
aliasTypeMap[k] = langDeprecated
|
aliasTypeMap[k] = Deprecated
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Fix CLDR mappings.
|
// Fix CLDR mappings.
|
||||||
@ -806,10 +799,10 @@ func (b *builder) writeLanguage() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Complete canonialized language tags.
|
// Complete canonicalized language tags.
|
||||||
lang.freeze()
|
lang.freeze()
|
||||||
for i, v := range lang.s {
|
for i, v := range lang.s {
|
||||||
// We can avoid these manual entries by using the IANI registry directly.
|
// We can avoid these manual entries by using the IANA registry directly.
|
||||||
// Seems easier to update the list manually, as changes are rare.
|
// Seems easier to update the list manually, as changes are rare.
|
||||||
// The panic in this loop will trigger if we miss an entry.
|
// The panic in this loop will trigger if we miss an entry.
|
||||||
add := ""
|
add := ""
|
||||||
@ -844,12 +837,12 @@ func (b *builder) writeLanguage() {
|
|||||||
b.writeConst("altLangISO3", tag.Index(altLangISO3.join()))
|
b.writeConst("altLangISO3", tag.Index(altLangISO3.join()))
|
||||||
b.writeSlice("altLangIndex", altLangIndex)
|
b.writeSlice("altLangIndex", altLangIndex)
|
||||||
|
|
||||||
b.writeSortedMap("langAliasMap", &langAliasMap, b.langIndex)
|
b.writeSortedMap("AliasMap", &langAliasMap, b.langIndex)
|
||||||
types := make([]langAliasType, len(langAliasMap.s))
|
types := make([]AliasType, len(langAliasMap.s))
|
||||||
for i, s := range langAliasMap.s {
|
for i, s := range langAliasMap.s {
|
||||||
types[i] = aliasTypeMap[s]
|
types[i] = aliasTypeMap[s]
|
||||||
}
|
}
|
||||||
b.writeSlice("langAliasTypes", types)
|
b.writeSlice("AliasTypes", types)
|
||||||
}
|
}
|
||||||
|
|
||||||
var scriptConsts = []string{
|
var scriptConsts = []string{
|
||||||
@ -916,7 +909,7 @@ func (b *builder) writeRegion() {
|
|||||||
i := b.region.index(s)
|
i := b.region.index(s)
|
||||||
for _, d := range e.description {
|
for _, d := range e.description {
|
||||||
if strings.Contains(d, "Private use") {
|
if strings.Contains(d, "Private use") {
|
||||||
regionTypes[i] = iso3166UserAssgined
|
regionTypes[i] = iso3166UserAssigned
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
regionTypes[i] |= bcp47Region
|
regionTypes[i] |= bcp47Region
|
||||||
@ -1073,7 +1066,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
iso3166UserAssgined = 1 << iota
|
iso3166UserAssigned = 1 << iota
|
||||||
ccTLD
|
ccTLD
|
||||||
bcp47Region
|
bcp47Region
|
||||||
)
|
)
|
||||||
@ -1361,125 +1354,6 @@ func (b *builder) writeLikelyData() {
|
|||||||
b.writeSlice("likelyRegionGroup", likelyRegionGroup)
|
b.writeSlice("likelyRegionGroup", likelyRegionGroup)
|
||||||
}
|
}
|
||||||
|
|
||||||
type mutualIntelligibility struct {
|
|
||||||
want, have uint16
|
|
||||||
conf uint8
|
|
||||||
oneway bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type scriptIntelligibility struct {
|
|
||||||
lang uint16 // langID or 0 if *
|
|
||||||
want, have uint8
|
|
||||||
conf uint8
|
|
||||||
}
|
|
||||||
|
|
||||||
type sortByConf []mutualIntelligibility
|
|
||||||
|
|
||||||
func (l sortByConf) Less(a, b int) bool {
|
|
||||||
return l[a].conf > l[b].conf
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l sortByConf) Swap(a, b int) {
|
|
||||||
l[a], l[b] = l[b], l[a]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l sortByConf) Len() int {
|
|
||||||
return len(l)
|
|
||||||
}
|
|
||||||
|
|
||||||
// toConf converts a percentage value [0, 100] to a confidence class.
|
|
||||||
func toConf(pct uint8) uint8 {
|
|
||||||
switch {
|
|
||||||
case pct == 100:
|
|
||||||
return 3 // Exact
|
|
||||||
case pct >= 90:
|
|
||||||
return 2 // High
|
|
||||||
case pct > 50:
|
|
||||||
return 1 // Low
|
|
||||||
default:
|
|
||||||
return 0 // No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeMatchData writes tables with languages and scripts for which there is
|
|
||||||
// mutual intelligibility. The data is based on CLDR's languageMatching data.
|
|
||||||
// Note that we use a different algorithm than the one defined by CLDR and that
|
|
||||||
// we slightly modify the data. For example, we convert scores to confidence levels.
|
|
||||||
// We also drop all region-related data as we use a different algorithm to
|
|
||||||
// determine region equivalence.
|
|
||||||
func (b *builder) writeMatchData() {
|
|
||||||
b.writeType(mutualIntelligibility{})
|
|
||||||
b.writeType(scriptIntelligibility{})
|
|
||||||
lm := b.supp.LanguageMatching.LanguageMatches
|
|
||||||
cldr.MakeSlice(&lm).SelectAnyOf("type", "written")
|
|
||||||
|
|
||||||
matchLang := []mutualIntelligibility{}
|
|
||||||
matchScript := []scriptIntelligibility{}
|
|
||||||
// Convert the languageMatch entries in lists keyed by desired language.
|
|
||||||
for _, m := range lm[0].LanguageMatch {
|
|
||||||
// Different versions of CLDR use different separators.
|
|
||||||
desired := strings.Replace(m.Desired, "-", "_", -1)
|
|
||||||
supported := strings.Replace(m.Supported, "-", "_", -1)
|
|
||||||
d := strings.Split(desired, "_")
|
|
||||||
s := strings.Split(supported, "_")
|
|
||||||
if len(d) != len(s) || len(d) > 2 {
|
|
||||||
// Skip all entries with regions and work around CLDR bug.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
pct, _ := strconv.ParseInt(m.Percent, 10, 8)
|
|
||||||
if len(d) == 2 && d[0] == s[0] && len(d[1]) == 4 {
|
|
||||||
// language-script pair.
|
|
||||||
lang := uint16(0)
|
|
||||||
if d[0] != "*" {
|
|
||||||
lang = uint16(b.langIndex(d[0]))
|
|
||||||
}
|
|
||||||
matchScript = append(matchScript, scriptIntelligibility{
|
|
||||||
lang: lang,
|
|
||||||
want: uint8(b.script.index(d[1])),
|
|
||||||
have: uint8(b.script.index(s[1])),
|
|
||||||
conf: toConf(uint8(pct)),
|
|
||||||
})
|
|
||||||
if m.Oneway != "true" {
|
|
||||||
matchScript = append(matchScript, scriptIntelligibility{
|
|
||||||
lang: lang,
|
|
||||||
want: uint8(b.script.index(s[1])),
|
|
||||||
have: uint8(b.script.index(d[1])),
|
|
||||||
conf: toConf(uint8(pct)),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
} else if len(d) == 1 && d[0] != "*" {
|
|
||||||
if pct == 100 {
|
|
||||||
// nb == no is already handled by macro mapping. Check there
|
|
||||||
// really is only this case.
|
|
||||||
if d[0] != "no" || s[0] != "nb" {
|
|
||||||
log.Fatalf("unhandled equivalence %s == %s", s[0], d[0])
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
matchLang = append(matchLang, mutualIntelligibility{
|
|
||||||
want: uint16(b.langIndex(d[0])),
|
|
||||||
have: uint16(b.langIndex(s[0])),
|
|
||||||
conf: uint8(pct),
|
|
||||||
oneway: m.Oneway == "true",
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
// TODO: Handle other mappings.
|
|
||||||
a := []string{"*;*", "*_*;*_*", "es_MX;es_419"}
|
|
||||||
s := strings.Join([]string{desired, supported}, ";")
|
|
||||||
if i := sort.SearchStrings(a, s); i == len(a) || a[i] != s {
|
|
||||||
log.Printf("%q not handled", s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Stable(sortByConf(matchLang))
|
|
||||||
// collapse percentage into confidence classes
|
|
||||||
for i, m := range matchLang {
|
|
||||||
matchLang[i].conf = toConf(m.conf)
|
|
||||||
}
|
|
||||||
b.writeSlice("matchLang", matchLang)
|
|
||||||
b.writeSlice("matchScript", matchScript)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *builder) writeRegionInclusionData() {
|
func (b *builder) writeRegionInclusionData() {
|
||||||
var (
|
var (
|
||||||
// mm holds for each group the set of groups with a distance of 1.
|
// mm holds for each group the set of groups with a distance of 1.
|
||||||
@ -1507,7 +1381,7 @@ func (b *builder) writeRegionInclusionData() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
regionContainment := make([]uint32, len(b.groups))
|
regionContainment := make([]uint64, len(b.groups))
|
||||||
for _, g := range b.groups {
|
for _, g := range b.groups {
|
||||||
l := containment[g]
|
l := containment[g]
|
||||||
|
|
||||||
@ -1521,15 +1395,14 @@ func (b *builder) writeRegionInclusionData() {
|
|||||||
for _, v := range l {
|
for _, v := range l {
|
||||||
regionContainment[g] |= 1 << v
|
regionContainment[g] |= 1 << v
|
||||||
}
|
}
|
||||||
// log.Printf("%d: %X", g, regionContainment[g])
|
|
||||||
}
|
}
|
||||||
b.writeSlice("regionContainment", regionContainment)
|
b.writeSlice("regionContainment", regionContainment)
|
||||||
|
|
||||||
regionInclusion := make([]uint8, len(b.region.s))
|
regionInclusion := make([]uint8, len(b.region.s))
|
||||||
bvs := make(map[uint32]index)
|
bvs := make(map[uint64]index)
|
||||||
// Make the first bitvector positions correspond with the groups.
|
// Make the first bitvector positions correspond with the groups.
|
||||||
for r, i := range b.groups {
|
for r, i := range b.groups {
|
||||||
bv := uint32(1 << i)
|
bv := uint64(1 << i)
|
||||||
for _, g := range mm[r] {
|
for _, g := range mm[r] {
|
||||||
bv |= 1 << g
|
bv |= 1 << g
|
||||||
}
|
}
|
||||||
@ -1538,7 +1411,7 @@ func (b *builder) writeRegionInclusionData() {
|
|||||||
}
|
}
|
||||||
for r := 1; r < len(b.region.s); r++ {
|
for r := 1; r < len(b.region.s); r++ {
|
||||||
if _, ok := b.groups[r]; !ok {
|
if _, ok := b.groups[r]; !ok {
|
||||||
bv := uint32(0)
|
bv := uint64(0)
|
||||||
for _, g := range mm[r] {
|
for _, g := range mm[r] {
|
||||||
bv |= 1 << g
|
bv |= 1 << g
|
||||||
}
|
}
|
||||||
@ -1553,9 +1426,9 @@ func (b *builder) writeRegionInclusionData() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
b.writeSlice("regionInclusion", regionInclusion)
|
b.writeSlice("regionInclusion", regionInclusion)
|
||||||
regionInclusionBits := make([]uint32, len(bvs))
|
regionInclusionBits := make([]uint64, len(bvs))
|
||||||
for k, v := range bvs {
|
for k, v := range bvs {
|
||||||
regionInclusionBits[v] = uint32(k)
|
regionInclusionBits[v] = uint64(k)
|
||||||
}
|
}
|
||||||
// Add bit vectors for increasingly large distances until a fixed point is reached.
|
// Add bit vectors for increasingly large distances until a fixed point is reached.
|
||||||
regionInclusionNext := []uint8{}
|
regionInclusionNext := []uint8{}
|
||||||
@ -1634,7 +1507,7 @@ func main() {
|
|||||||
gen.WriteCLDRVersion(w)
|
gen.WriteCLDRVersion(w)
|
||||||
|
|
||||||
b.parseIndices()
|
b.parseIndices()
|
||||||
b.writeType(fromTo{})
|
b.writeType(FromTo{})
|
||||||
b.writeLanguage()
|
b.writeLanguage()
|
||||||
b.writeScript()
|
b.writeScript()
|
||||||
b.writeRegion()
|
b.writeRegion()
|
||||||
@ -1642,7 +1515,6 @@ func main() {
|
|||||||
// TODO: b.writeLocale()
|
// TODO: b.writeLocale()
|
||||||
b.computeRegionGroups()
|
b.computeRegionGroups()
|
||||||
b.writeLikelyData()
|
b.writeLikelyData()
|
||||||
b.writeMatchData()
|
|
||||||
b.writeRegionInclusionData()
|
b.writeRegionInclusionData()
|
||||||
b.writeParents()
|
b.writeParents()
|
||||||
}
|
}
|
@ -8,13 +8,13 @@ package main
|
|||||||
|
|
||||||
// This file contains code common to the maketables.go and the package code.
|
// This file contains code common to the maketables.go and the package code.
|
||||||
|
|
||||||
// langAliasType is the type of an alias in langAliasMap.
|
// AliasType is the type of an alias in AliasMap.
|
||||||
type langAliasType int8
|
type AliasType int8
|
||||||
|
|
||||||
const (
|
const (
|
||||||
langDeprecated langAliasType = iota
|
Deprecated AliasType = iota
|
||||||
langMacro
|
Macro
|
||||||
langLegacy
|
Legacy
|
||||||
|
|
||||||
langAliasTypeUnknown langAliasType = -1
|
AliasTypeUnknown AliasType = -1
|
||||||
)
|
)
|
596
vendor/golang.org/x/text/internal/language/language.go
generated
vendored
Normal file
596
vendor/golang.org/x/text/internal/language/language.go
generated
vendored
Normal file
@ -0,0 +1,596 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:generate go run gen.go gen_common.go -output tables.go
|
||||||
|
|
||||||
|
package language // import "golang.org/x/text/internal/language"
|
||||||
|
|
||||||
|
// TODO: Remove above NOTE after:
|
||||||
|
// - verifying that tables are dropped correctly (most notably matcher tables).
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// maxCoreSize is the maximum size of a BCP 47 tag without variants and
|
||||||
|
// extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes.
|
||||||
|
maxCoreSize = 12
|
||||||
|
|
||||||
|
// max99thPercentileSize is a somewhat arbitrary buffer size that presumably
|
||||||
|
// is large enough to hold at least 99% of the BCP 47 tags.
|
||||||
|
max99thPercentileSize = 32
|
||||||
|
|
||||||
|
// maxSimpleUExtensionSize is the maximum size of a -u extension with one
|
||||||
|
// key-type pair. Equals len("-u-") + key (2) + dash + max value (8).
|
||||||
|
maxSimpleUExtensionSize = 14
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tag represents a BCP 47 language tag. It is used to specify an instance of a
|
||||||
|
// specific language or locale. All language tag values are guaranteed to be
|
||||||
|
// well-formed. The zero value of Tag is Und.
|
||||||
|
type Tag struct {
|
||||||
|
// TODO: the following fields have the form TagTypeID. This name is chosen
|
||||||
|
// to allow refactoring the public package without conflicting with its
|
||||||
|
// Base, Script, and Region methods. Once the transition is fully completed
|
||||||
|
// the ID can be stripped from the name.
|
||||||
|
|
||||||
|
LangID Language
|
||||||
|
RegionID Region
|
||||||
|
// TODO: we will soon run out of positions for ScriptID. Idea: instead of
|
||||||
|
// storing lang, region, and ScriptID codes, store only the compact index and
|
||||||
|
// have a lookup table from this code to its expansion. This greatly speeds
|
||||||
|
// up table lookup, speed up common variant cases.
|
||||||
|
// This will also immediately free up 3 extra bytes. Also, the pVariant
|
||||||
|
// field can now be moved to the lookup table, as the compact index uniquely
|
||||||
|
// determines the offset of a possible variant.
|
||||||
|
ScriptID Script
|
||||||
|
pVariant byte // offset in str, includes preceding '-'
|
||||||
|
pExt uint16 // offset of first extension, includes preceding '-'
|
||||||
|
|
||||||
|
// str is the string representation of the Tag. It will only be used if the
|
||||||
|
// tag has variants or extensions.
|
||||||
|
str string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make is a convenience wrapper for Parse that omits the error.
|
||||||
|
// In case of an error, a sensible default is returned.
|
||||||
|
func Make(s string) Tag {
|
||||||
|
t, _ := Parse(s)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Raw returns the raw base language, script and region, without making an
|
||||||
|
// attempt to infer their values.
|
||||||
|
// TODO: consider removing
|
||||||
|
func (t Tag) Raw() (b Language, s Script, r Region) {
|
||||||
|
return t.LangID, t.ScriptID, t.RegionID
|
||||||
|
}
|
||||||
|
|
||||||
|
// equalTags compares language, script and region subtags only.
|
||||||
|
func (t Tag) equalTags(a Tag) bool {
|
||||||
|
return t.LangID == a.LangID && t.ScriptID == a.ScriptID && t.RegionID == a.RegionID
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRoot returns true if t is equal to language "und".
|
||||||
|
func (t Tag) IsRoot() bool {
|
||||||
|
if int(t.pVariant) < len(t.str) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return t.equalTags(Und)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPrivateUse reports whether the Tag consists solely of an IsPrivateUse use
|
||||||
|
// tag.
|
||||||
|
func (t Tag) IsPrivateUse() bool {
|
||||||
|
return t.str != "" && t.pVariant == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemakeString is used to update t.str in case lang, script or region changed.
|
||||||
|
// It is assumed that pExt and pVariant still point to the start of the
|
||||||
|
// respective parts.
|
||||||
|
func (t *Tag) RemakeString() {
|
||||||
|
if t.str == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
extra := t.str[t.pVariant:]
|
||||||
|
if t.pVariant > 0 {
|
||||||
|
extra = extra[1:]
|
||||||
|
}
|
||||||
|
if t.equalTags(Und) && strings.HasPrefix(extra, "x-") {
|
||||||
|
t.str = extra
|
||||||
|
t.pVariant = 0
|
||||||
|
t.pExt = 0
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases.
|
||||||
|
b := buf[:t.genCoreBytes(buf[:])]
|
||||||
|
if extra != "" {
|
||||||
|
diff := len(b) - int(t.pVariant)
|
||||||
|
b = append(b, '-')
|
||||||
|
b = append(b, extra...)
|
||||||
|
t.pVariant = uint8(int(t.pVariant) + diff)
|
||||||
|
t.pExt = uint16(int(t.pExt) + diff)
|
||||||
|
} else {
|
||||||
|
t.pVariant = uint8(len(b))
|
||||||
|
t.pExt = uint16(len(b))
|
||||||
|
}
|
||||||
|
t.str = string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// genCoreBytes writes a string for the base languages, script and region tags
|
||||||
|
// to the given buffer and returns the number of bytes written. It will never
|
||||||
|
// write more than maxCoreSize bytes.
|
||||||
|
func (t *Tag) genCoreBytes(buf []byte) int {
|
||||||
|
n := t.LangID.StringToBuf(buf[:])
|
||||||
|
if t.ScriptID != 0 {
|
||||||
|
n += copy(buf[n:], "-")
|
||||||
|
n += copy(buf[n:], t.ScriptID.String())
|
||||||
|
}
|
||||||
|
if t.RegionID != 0 {
|
||||||
|
n += copy(buf[n:], "-")
|
||||||
|
n += copy(buf[n:], t.RegionID.String())
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the canonical string representation of the language tag.
|
||||||
|
func (t Tag) String() string {
|
||||||
|
if t.str != "" {
|
||||||
|
return t.str
|
||||||
|
}
|
||||||
|
if t.ScriptID == 0 && t.RegionID == 0 {
|
||||||
|
return t.LangID.String()
|
||||||
|
}
|
||||||
|
buf := [maxCoreSize]byte{}
|
||||||
|
return string(buf[:t.genCoreBytes(buf[:])])
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText implements encoding.TextMarshaler.
|
||||||
|
func (t Tag) MarshalText() (text []byte, err error) {
|
||||||
|
if t.str != "" {
|
||||||
|
text = append(text, t.str...)
|
||||||
|
} else if t.ScriptID == 0 && t.RegionID == 0 {
|
||||||
|
text = append(text, t.LangID.String()...)
|
||||||
|
} else {
|
||||||
|
buf := [maxCoreSize]byte{}
|
||||||
|
text = buf[:t.genCoreBytes(buf[:])]
|
||||||
|
}
|
||||||
|
return text, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||||
|
func (t *Tag) UnmarshalText(text []byte) error {
|
||||||
|
tag, err := Parse(string(text))
|
||||||
|
*t = tag
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variants returns the part of the tag holding all variants or the empty string
|
||||||
|
// if there are no variants defined.
|
||||||
|
func (t Tag) Variants() string {
|
||||||
|
if t.pVariant == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return t.str[t.pVariant:t.pExt]
|
||||||
|
}
|
||||||
|
|
||||||
|
// VariantOrPrivateUseTags returns variants or private use tags.
|
||||||
|
func (t Tag) VariantOrPrivateUseTags() string {
|
||||||
|
if t.pExt > 0 {
|
||||||
|
return t.str[t.pVariant:t.pExt]
|
||||||
|
}
|
||||||
|
return t.str[t.pVariant:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasString reports whether this tag defines more than just the raw
|
||||||
|
// components.
|
||||||
|
func (t Tag) HasString() bool {
|
||||||
|
return t.str != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
|
||||||
|
// specific language are substituted with fields from the parent language.
|
||||||
|
// The parent for a language may change for newer versions of CLDR.
|
||||||
|
func (t Tag) Parent() Tag {
|
||||||
|
if t.str != "" {
|
||||||
|
// Strip the variants and extensions.
|
||||||
|
b, s, r := t.Raw()
|
||||||
|
t = Tag{LangID: b, ScriptID: s, RegionID: r}
|
||||||
|
if t.RegionID == 0 && t.ScriptID != 0 && t.LangID != 0 {
|
||||||
|
base, _ := addTags(Tag{LangID: t.LangID})
|
||||||
|
if base.ScriptID == t.ScriptID {
|
||||||
|
return Tag{LangID: t.LangID}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
if t.LangID != 0 {
|
||||||
|
if t.RegionID != 0 {
|
||||||
|
maxScript := t.ScriptID
|
||||||
|
if maxScript == 0 {
|
||||||
|
max, _ := addTags(t)
|
||||||
|
maxScript = max.ScriptID
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range parents {
|
||||||
|
if Language(parents[i].lang) == t.LangID && Script(parents[i].maxScript) == maxScript {
|
||||||
|
for _, r := range parents[i].fromRegion {
|
||||||
|
if Region(r) == t.RegionID {
|
||||||
|
return Tag{
|
||||||
|
LangID: t.LangID,
|
||||||
|
ScriptID: Script(parents[i].script),
|
||||||
|
RegionID: Region(parents[i].toRegion),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip the script if it is the default one.
|
||||||
|
base, _ := addTags(Tag{LangID: t.LangID})
|
||||||
|
if base.ScriptID != maxScript {
|
||||||
|
return Tag{LangID: t.LangID, ScriptID: maxScript}
|
||||||
|
}
|
||||||
|
return Tag{LangID: t.LangID}
|
||||||
|
} else if t.ScriptID != 0 {
|
||||||
|
// The parent for an base-script pair with a non-default script is
|
||||||
|
// "und" instead of the base language.
|
||||||
|
base, _ := addTags(Tag{LangID: t.LangID})
|
||||||
|
if base.ScriptID != t.ScriptID {
|
||||||
|
return Und
|
||||||
|
}
|
||||||
|
return Tag{LangID: t.LangID}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Und
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseExtension parses s as an extension and returns it on success.
|
||||||
|
func ParseExtension(s string) (ext string, err error) {
|
||||||
|
scan := makeScannerString(s)
|
||||||
|
var end int
|
||||||
|
if n := len(scan.token); n != 1 {
|
||||||
|
return "", ErrSyntax
|
||||||
|
}
|
||||||
|
scan.toLower(0, len(scan.b))
|
||||||
|
end = parseExtension(&scan)
|
||||||
|
if end != len(s) {
|
||||||
|
return "", ErrSyntax
|
||||||
|
}
|
||||||
|
return string(scan.b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasVariants reports whether t has variants.
|
||||||
|
func (t Tag) HasVariants() bool {
|
||||||
|
return uint16(t.pVariant) < t.pExt
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasExtensions reports whether t has extensions.
|
||||||
|
func (t Tag) HasExtensions() bool {
|
||||||
|
return int(t.pExt) < len(t.str)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extension returns the extension of type x for tag t. It will return
|
||||||
|
// false for ok if t does not have the requested extension. The returned
|
||||||
|
// extension will be invalid in this case.
|
||||||
|
func (t Tag) Extension(x byte) (ext string, ok bool) {
|
||||||
|
for i := int(t.pExt); i < len(t.str)-1; {
|
||||||
|
var ext string
|
||||||
|
i, ext = getExtension(t.str, i)
|
||||||
|
if ext[0] == x {
|
||||||
|
return ext, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extensions returns all extensions of t.
|
||||||
|
func (t Tag) Extensions() []string {
|
||||||
|
e := []string{}
|
||||||
|
for i := int(t.pExt); i < len(t.str)-1; {
|
||||||
|
var ext string
|
||||||
|
i, ext = getExtension(t.str, i)
|
||||||
|
e = append(e, ext)
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeForKey returns the type associated with the given key, where key and type
|
||||||
|
// are of the allowed values defined for the Unicode locale extension ('u') in
|
||||||
|
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||||
|
// TypeForKey will traverse the inheritance chain to get the correct value.
|
||||||
|
func (t Tag) TypeForKey(key string) string {
|
||||||
|
if start, end, _ := t.findTypeForKey(key); end != start {
|
||||||
|
return t.str[start:end]
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errPrivateUse = errors.New("cannot set a key on a private use tag")
|
||||||
|
errInvalidArguments = errors.New("invalid key or type")
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetTypeForKey returns a new Tag with the key set to type, where key and type
|
||||||
|
// are of the allowed values defined for the Unicode locale extension ('u') in
|
||||||
|
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||||
|
// An empty value removes an existing pair with the same key.
|
||||||
|
func (t Tag) SetTypeForKey(key, value string) (Tag, error) {
|
||||||
|
if t.IsPrivateUse() {
|
||||||
|
return t, errPrivateUse
|
||||||
|
}
|
||||||
|
if len(key) != 2 {
|
||||||
|
return t, errInvalidArguments
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the setting if value is "".
|
||||||
|
if value == "" {
|
||||||
|
start, end, _ := t.findTypeForKey(key)
|
||||||
|
if start != end {
|
||||||
|
// Remove key tag and leading '-'.
|
||||||
|
start -= 4
|
||||||
|
|
||||||
|
// Remove a possible empty extension.
|
||||||
|
if (end == len(t.str) || t.str[end+2] == '-') && t.str[start-2] == '-' {
|
||||||
|
start -= 2
|
||||||
|
}
|
||||||
|
if start == int(t.pVariant) && end == len(t.str) {
|
||||||
|
t.str = ""
|
||||||
|
t.pVariant, t.pExt = 0, 0
|
||||||
|
} else {
|
||||||
|
t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(value) < 3 || len(value) > 8 {
|
||||||
|
return t, errInvalidArguments
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
buf [maxCoreSize + maxSimpleUExtensionSize]byte
|
||||||
|
uStart int // start of the -u extension.
|
||||||
|
)
|
||||||
|
|
||||||
|
// Generate the tag string if needed.
|
||||||
|
if t.str == "" {
|
||||||
|
uStart = t.genCoreBytes(buf[:])
|
||||||
|
buf[uStart] = '-'
|
||||||
|
uStart++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new key-type pair and parse it to verify.
|
||||||
|
b := buf[uStart:]
|
||||||
|
copy(b, "u-")
|
||||||
|
copy(b[2:], key)
|
||||||
|
b[4] = '-'
|
||||||
|
b = b[:5+copy(b[5:], value)]
|
||||||
|
scan := makeScanner(b)
|
||||||
|
if parseExtensions(&scan); scan.err != nil {
|
||||||
|
return t, scan.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assemble the replacement string.
|
||||||
|
if t.str == "" {
|
||||||
|
t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1)
|
||||||
|
t.str = string(buf[:uStart+len(b)])
|
||||||
|
} else {
|
||||||
|
s := t.str
|
||||||
|
start, end, hasExt := t.findTypeForKey(key)
|
||||||
|
if start == end {
|
||||||
|
if hasExt {
|
||||||
|
b = b[2:]
|
||||||
|
}
|
||||||
|
t.str = fmt.Sprintf("%s-%s%s", s[:start], b, s[end:])
|
||||||
|
} else {
|
||||||
|
t.str = fmt.Sprintf("%s%s%s", s[:start], value, s[end:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findKeyAndType returns the start and end position for the type corresponding
|
||||||
|
// to key or the point at which to insert the key-value pair if the type
|
||||||
|
// wasn't found. The hasExt return value reports whether an -u extension was present.
|
||||||
|
// Note: the extensions are typically very small and are likely to contain
|
||||||
|
// only one key-type pair.
|
||||||
|
func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) {
|
||||||
|
p := int(t.pExt)
|
||||||
|
if len(key) != 2 || p == len(t.str) || p == 0 {
|
||||||
|
return p, p, false
|
||||||
|
}
|
||||||
|
s := t.str
|
||||||
|
|
||||||
|
// Find the correct extension.
|
||||||
|
for p++; s[p] != 'u'; p++ {
|
||||||
|
if s[p] > 'u' {
|
||||||
|
p--
|
||||||
|
return p, p, false
|
||||||
|
}
|
||||||
|
if p = nextExtension(s, p); p == len(s) {
|
||||||
|
return len(s), len(s), false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Proceed to the hyphen following the extension name.
|
||||||
|
p++
|
||||||
|
|
||||||
|
// curKey is the key currently being processed.
|
||||||
|
curKey := ""
|
||||||
|
|
||||||
|
// Iterate over keys until we get the end of a section.
|
||||||
|
for {
|
||||||
|
// p points to the hyphen preceding the current token.
|
||||||
|
if p3 := p + 3; s[p3] == '-' {
|
||||||
|
// Found a key.
|
||||||
|
// Check whether we just processed the key that was requested.
|
||||||
|
if curKey == key {
|
||||||
|
return start, p, true
|
||||||
|
}
|
||||||
|
// Set to the next key and continue scanning type tokens.
|
||||||
|
curKey = s[p+1 : p3]
|
||||||
|
if curKey > key {
|
||||||
|
return p, p, true
|
||||||
|
}
|
||||||
|
// Start of the type token sequence.
|
||||||
|
start = p + 4
|
||||||
|
// A type is at least 3 characters long.
|
||||||
|
p += 7 // 4 + 3
|
||||||
|
} else {
|
||||||
|
// Attribute or type, which is at least 3 characters long.
|
||||||
|
p += 4
|
||||||
|
}
|
||||||
|
// p points past the third character of a type or attribute.
|
||||||
|
max := p + 5 // maximum length of token plus hyphen.
|
||||||
|
if len(s) < max {
|
||||||
|
max = len(s)
|
||||||
|
}
|
||||||
|
for ; p < max && s[p] != '-'; p++ {
|
||||||
|
}
|
||||||
|
// Bail if we have exhausted all tokens or if the next token starts
|
||||||
|
// a new extension.
|
||||||
|
if p == len(s) || s[p+2] == '-' {
|
||||||
|
if curKey == key {
|
||||||
|
return start, p, true
|
||||||
|
}
|
||||||
|
return p, p, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseBase parses a 2- or 3-letter ISO 639 code.
|
||||||
|
// It returns a ValueError if s is a well-formed but unknown language identifier
|
||||||
|
// or another error if another error occurred.
|
||||||
|
func ParseBase(s string) (Language, error) {
|
||||||
|
if n := len(s); n < 2 || 3 < n {
|
||||||
|
return 0, ErrSyntax
|
||||||
|
}
|
||||||
|
var buf [3]byte
|
||||||
|
return getLangID(buf[:copy(buf[:], s)])
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseScript parses a 4-letter ISO 15924 code.
|
||||||
|
// It returns a ValueError if s is a well-formed but unknown script identifier
|
||||||
|
// or another error if another error occurred.
|
||||||
|
func ParseScript(s string) (Script, error) {
|
||||||
|
if len(s) != 4 {
|
||||||
|
return 0, ErrSyntax
|
||||||
|
}
|
||||||
|
var buf [4]byte
|
||||||
|
return getScriptID(script, buf[:copy(buf[:], s)])
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeM49 returns the Region for the given UN M.49 code.
|
||||||
|
// It returns an error if r is not a valid code.
|
||||||
|
func EncodeM49(r int) (Region, error) {
|
||||||
|
return getRegionM49(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code.
|
||||||
|
// It returns a ValueError if s is a well-formed but unknown region identifier
|
||||||
|
// or another error if another error occurred.
|
||||||
|
func ParseRegion(s string) (Region, error) {
|
||||||
|
if n := len(s); n < 2 || 3 < n {
|
||||||
|
return 0, ErrSyntax
|
||||||
|
}
|
||||||
|
var buf [3]byte
|
||||||
|
return getRegionID(buf[:copy(buf[:], s)])
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCountry returns whether this region is a country or autonomous area. This
|
||||||
|
// includes non-standard definitions from CLDR.
|
||||||
|
func (r Region) IsCountry() bool {
|
||||||
|
if r == 0 || r.IsGroup() || r.IsPrivateUse() && r != _XK {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsGroup returns whether this region defines a collection of regions. This
|
||||||
|
// includes non-standard definitions from CLDR.
|
||||||
|
func (r Region) IsGroup() bool {
|
||||||
|
if r == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return int(regionInclusion[r]) < len(regionContainment)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains returns whether Region c is contained by Region r. It returns true
|
||||||
|
// if c == r.
|
||||||
|
func (r Region) Contains(c Region) bool {
|
||||||
|
if r == c {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
g := regionInclusion[r]
|
||||||
|
if g >= nRegionGroups {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
m := regionContainment[g]
|
||||||
|
|
||||||
|
d := regionInclusion[c]
|
||||||
|
b := regionInclusionBits[d]
|
||||||
|
|
||||||
|
// A contained country may belong to multiple disjoint groups. Matching any
|
||||||
|
// of these indicates containment. If the contained region is a group, it
|
||||||
|
// must strictly be a subset.
|
||||||
|
if d >= nRegionGroups {
|
||||||
|
return b&m != 0
|
||||||
|
}
|
||||||
|
return b&^m == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var errNoTLD = errors.New("language: region is not a valid ccTLD")
|
||||||
|
|
||||||
|
// TLD returns the country code top-level domain (ccTLD). UK is returned for GB.
|
||||||
|
// In all other cases it returns either the region itself or an error.
|
||||||
|
//
|
||||||
|
// This method may return an error for a region for which there exists a
|
||||||
|
// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The
|
||||||
|
// region will already be canonicalized it was obtained from a Tag that was
|
||||||
|
// obtained using any of the default methods.
|
||||||
|
func (r Region) TLD() (Region, error) {
|
||||||
|
// See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the
|
||||||
|
// difference between ISO 3166-1 and IANA ccTLD.
|
||||||
|
if r == _GB {
|
||||||
|
r = _UK
|
||||||
|
}
|
||||||
|
if (r.typ() & ccTLD) == 0 {
|
||||||
|
return 0, errNoTLD
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Canonicalize returns the region or a possible replacement if the region is
|
||||||
|
// deprecated. It will not return a replacement for deprecated regions that
|
||||||
|
// are split into multiple regions.
|
||||||
|
func (r Region) Canonicalize() Region {
|
||||||
|
if cr := normRegion(r); cr != 0 {
|
||||||
|
return cr
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variant represents a registered variant of a language as defined by BCP 47.
|
||||||
|
type Variant struct {
|
||||||
|
ID uint8
|
||||||
|
str string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseVariant parses and returns a Variant. An error is returned if s is not
|
||||||
|
// a valid variant.
|
||||||
|
func ParseVariant(s string) (Variant, error) {
|
||||||
|
s = strings.ToLower(s)
|
||||||
|
if id, ok := variantIndex[s]; ok {
|
||||||
|
return Variant{id, s}, nil
|
||||||
|
}
|
||||||
|
return Variant{}, NewValueError([]byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string representation of the variant.
|
||||||
|
func (v Variant) String() string {
|
||||||
|
return v.str
|
||||||
|
}
|
@ -17,11 +17,11 @@ import (
|
|||||||
// if it could not be found.
|
// if it could not be found.
|
||||||
func findIndex(idx tag.Index, key []byte, form string) (index int, err error) {
|
func findIndex(idx tag.Index, key []byte, form string) (index int, err error) {
|
||||||
if !tag.FixCase(form, key) {
|
if !tag.FixCase(form, key) {
|
||||||
return 0, errSyntax
|
return 0, ErrSyntax
|
||||||
}
|
}
|
||||||
i := idx.Index(key)
|
i := idx.Index(key)
|
||||||
if i == -1 {
|
if i == -1 {
|
||||||
return 0, mkErrInvalid(key)
|
return 0, NewValueError(key)
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
@ -32,38 +32,45 @@ func searchUint(imap []uint16, key uint16) int {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type langID uint16
|
type Language uint16
|
||||||
|
|
||||||
// getLangID returns the langID of s if s is a canonical subtag
|
// getLangID returns the langID of s if s is a canonical subtag
|
||||||
// or langUnknown if s is not a canonical subtag.
|
// or langUnknown if s is not a canonical subtag.
|
||||||
func getLangID(s []byte) (langID, error) {
|
func getLangID(s []byte) (Language, error) {
|
||||||
if len(s) == 2 {
|
if len(s) == 2 {
|
||||||
return getLangISO2(s)
|
return getLangISO2(s)
|
||||||
}
|
}
|
||||||
return getLangISO3(s)
|
return getLangISO3(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// mapLang returns the mapped langID of id according to mapping m.
|
// TODO language normalization as well as the AliasMaps could be moved to the
|
||||||
func normLang(id langID) (langID, langAliasType) {
|
// higher level package, but it is a bit tricky to separate the generation.
|
||||||
k := sort.Search(len(langAliasMap), func(i int) bool {
|
|
||||||
return langAliasMap[i].from >= uint16(id)
|
func (id Language) Canonicalize() (Language, AliasType) {
|
||||||
})
|
return normLang(id)
|
||||||
if k < len(langAliasMap) && langAliasMap[k].from == uint16(id) {
|
|
||||||
return langID(langAliasMap[k].to), langAliasTypes[k]
|
|
||||||
}
|
}
|
||||||
return id, langAliasTypeUnknown
|
|
||||||
|
// mapLang returns the mapped langID of id according to mapping m.
|
||||||
|
func normLang(id Language) (Language, AliasType) {
|
||||||
|
k := sort.Search(len(AliasMap), func(i int) bool {
|
||||||
|
return AliasMap[i].From >= uint16(id)
|
||||||
|
})
|
||||||
|
if k < len(AliasMap) && AliasMap[k].From == uint16(id) {
|
||||||
|
return Language(AliasMap[k].To), AliasTypes[k]
|
||||||
|
}
|
||||||
|
return id, AliasTypeUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
// getLangISO2 returns the langID for the given 2-letter ISO language code
|
// getLangISO2 returns the langID for the given 2-letter ISO language code
|
||||||
// or unknownLang if this does not exist.
|
// or unknownLang if this does not exist.
|
||||||
func getLangISO2(s []byte) (langID, error) {
|
func getLangISO2(s []byte) (Language, error) {
|
||||||
if !tag.FixCase("zz", s) {
|
if !tag.FixCase("zz", s) {
|
||||||
return 0, errSyntax
|
return 0, ErrSyntax
|
||||||
}
|
}
|
||||||
if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 {
|
if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 {
|
||||||
return langID(i), nil
|
return Language(i), nil
|
||||||
}
|
}
|
||||||
return 0, mkErrInvalid(s)
|
return 0, NewValueError(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
const base = 'z' - 'a' + 1
|
const base = 'z' - 'a' + 1
|
||||||
@ -88,7 +95,7 @@ func intToStr(v uint, s []byte) {
|
|||||||
|
|
||||||
// getLangISO3 returns the langID for the given 3-letter ISO language code
|
// getLangISO3 returns the langID for the given 3-letter ISO language code
|
||||||
// or unknownLang if this does not exist.
|
// or unknownLang if this does not exist.
|
||||||
func getLangISO3(s []byte) (langID, error) {
|
func getLangISO3(s []byte) (Language, error) {
|
||||||
if tag.FixCase("und", s) {
|
if tag.FixCase("und", s) {
|
||||||
// first try to match canonical 3-letter entries
|
// first try to match canonical 3-letter entries
|
||||||
for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) {
|
for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) {
|
||||||
@ -96,7 +103,7 @@ func getLangISO3(s []byte) (langID, error) {
|
|||||||
// We treat "und" as special and always translate it to "unspecified".
|
// We treat "und" as special and always translate it to "unspecified".
|
||||||
// Note that ZZ and Zzzz are private use and are not treated as
|
// Note that ZZ and Zzzz are private use and are not treated as
|
||||||
// unspecified by default.
|
// unspecified by default.
|
||||||
id := langID(i)
|
id := Language(i)
|
||||||
if id == nonCanonicalUnd {
|
if id == nonCanonicalUnd {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
@ -104,26 +111,26 @@ func getLangISO3(s []byte) (langID, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if i := altLangISO3.Index(s); i != -1 {
|
if i := altLangISO3.Index(s); i != -1 {
|
||||||
return langID(altLangIndex[altLangISO3.Elem(i)[3]]), nil
|
return Language(altLangIndex[altLangISO3.Elem(i)[3]]), nil
|
||||||
}
|
}
|
||||||
n := strToInt(s)
|
n := strToInt(s)
|
||||||
if langNoIndex[n/8]&(1<<(n%8)) != 0 {
|
if langNoIndex[n/8]&(1<<(n%8)) != 0 {
|
||||||
return langID(n) + langNoIndexOffset, nil
|
return Language(n) + langNoIndexOffset, nil
|
||||||
}
|
}
|
||||||
// Check for non-canonical uses of ISO3.
|
// Check for non-canonical uses of ISO3.
|
||||||
for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) {
|
for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) {
|
||||||
if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] {
|
if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] {
|
||||||
return langID(i), nil
|
return Language(i), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0, mkErrInvalid(s)
|
return 0, NewValueError(s)
|
||||||
}
|
}
|
||||||
return 0, errSyntax
|
return 0, ErrSyntax
|
||||||
}
|
}
|
||||||
|
|
||||||
// stringToBuf writes the string to b and returns the number of bytes
|
// StringToBuf writes the string to b and returns the number of bytes
|
||||||
// written. cap(b) must be >= 3.
|
// written. cap(b) must be >= 3.
|
||||||
func (id langID) stringToBuf(b []byte) int {
|
func (id Language) StringToBuf(b []byte) int {
|
||||||
if id >= langNoIndexOffset {
|
if id >= langNoIndexOffset {
|
||||||
intToStr(uint(id)-langNoIndexOffset, b[:3])
|
intToStr(uint(id)-langNoIndexOffset, b[:3])
|
||||||
return 3
|
return 3
|
||||||
@ -140,7 +147,7 @@ func (id langID) stringToBuf(b []byte) int {
|
|||||||
// String returns the BCP 47 representation of the langID.
|
// String returns the BCP 47 representation of the langID.
|
||||||
// Use b as variable name, instead of id, to ensure the variable
|
// Use b as variable name, instead of id, to ensure the variable
|
||||||
// used is consistent with that of Base in which this type is embedded.
|
// used is consistent with that of Base in which this type is embedded.
|
||||||
func (b langID) String() string {
|
func (b Language) String() string {
|
||||||
if b == 0 {
|
if b == 0 {
|
||||||
return "und"
|
return "und"
|
||||||
} else if b >= langNoIndexOffset {
|
} else if b >= langNoIndexOffset {
|
||||||
@ -157,7 +164,7 @@ func (b langID) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ISO3 returns the ISO 639-3 language code.
|
// ISO3 returns the ISO 639-3 language code.
|
||||||
func (b langID) ISO3() string {
|
func (b Language) ISO3() string {
|
||||||
if b == 0 || b >= langNoIndexOffset {
|
if b == 0 || b >= langNoIndexOffset {
|
||||||
return b.String()
|
return b.String()
|
||||||
}
|
}
|
||||||
@ -173,15 +180,24 @@ func (b langID) ISO3() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IsPrivateUse reports whether this language code is reserved for private use.
|
// IsPrivateUse reports whether this language code is reserved for private use.
|
||||||
func (b langID) IsPrivateUse() bool {
|
func (b Language) IsPrivateUse() bool {
|
||||||
return langPrivateStart <= b && b <= langPrivateEnd
|
return langPrivateStart <= b && b <= langPrivateEnd
|
||||||
}
|
}
|
||||||
|
|
||||||
type regionID uint16
|
// SuppressScript returns the script marked as SuppressScript in the IANA
|
||||||
|
// language tag repository, or 0 if there is no such script.
|
||||||
|
func (b Language) SuppressScript() Script {
|
||||||
|
if b < langNoIndexOffset {
|
||||||
|
return Script(suppressScript[b])
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type Region uint16
|
||||||
|
|
||||||
// getRegionID returns the region id for s if s is a valid 2-letter region code
|
// getRegionID returns the region id for s if s is a valid 2-letter region code
|
||||||
// or unknownRegion.
|
// or unknownRegion.
|
||||||
func getRegionID(s []byte) (regionID, error) {
|
func getRegionID(s []byte) (Region, error) {
|
||||||
if len(s) == 3 {
|
if len(s) == 3 {
|
||||||
if isAlpha(s[0]) {
|
if isAlpha(s[0]) {
|
||||||
return getRegionISO3(s)
|
return getRegionISO3(s)
|
||||||
@ -195,34 +211,34 @@ func getRegionID(s []byte) (regionID, error) {
|
|||||||
|
|
||||||
// getRegionISO2 returns the regionID for the given 2-letter ISO country code
|
// getRegionISO2 returns the regionID for the given 2-letter ISO country code
|
||||||
// or unknownRegion if this does not exist.
|
// or unknownRegion if this does not exist.
|
||||||
func getRegionISO2(s []byte) (regionID, error) {
|
func getRegionISO2(s []byte) (Region, error) {
|
||||||
i, err := findIndex(regionISO, s, "ZZ")
|
i, err := findIndex(regionISO, s, "ZZ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return regionID(i) + isoRegionOffset, nil
|
return Region(i) + isoRegionOffset, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRegionISO3 returns the regionID for the given 3-letter ISO country code
|
// getRegionISO3 returns the regionID for the given 3-letter ISO country code
|
||||||
// or unknownRegion if this does not exist.
|
// or unknownRegion if this does not exist.
|
||||||
func getRegionISO3(s []byte) (regionID, error) {
|
func getRegionISO3(s []byte) (Region, error) {
|
||||||
if tag.FixCase("ZZZ", s) {
|
if tag.FixCase("ZZZ", s) {
|
||||||
for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) {
|
for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) {
|
||||||
if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] {
|
if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] {
|
||||||
return regionID(i) + isoRegionOffset, nil
|
return Region(i) + isoRegionOffset, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < len(altRegionISO3); i += 3 {
|
for i := 0; i < len(altRegionISO3); i += 3 {
|
||||||
if tag.Compare(altRegionISO3[i:i+3], s) == 0 {
|
if tag.Compare(altRegionISO3[i:i+3], s) == 0 {
|
||||||
return regionID(altRegionIDs[i/3]), nil
|
return Region(altRegionIDs[i/3]), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0, mkErrInvalid(s)
|
return 0, NewValueError(s)
|
||||||
}
|
}
|
||||||
return 0, errSyntax
|
return 0, ErrSyntax
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRegionM49(n int) (regionID, error) {
|
func getRegionM49(n int) (Region, error) {
|
||||||
if 0 < n && n <= 999 {
|
if 0 < n && n <= 999 {
|
||||||
const (
|
const (
|
||||||
searchBits = 7
|
searchBits = 7
|
||||||
@ -236,7 +252,7 @@ func getRegionM49(n int) (regionID, error) {
|
|||||||
return buf[i] >= val
|
return buf[i] >= val
|
||||||
})
|
})
|
||||||
if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val {
|
if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val {
|
||||||
return regionID(r & regionMask), nil
|
return Region(r & regionMask), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var e ValueError
|
var e ValueError
|
||||||
@ -247,13 +263,13 @@ func getRegionM49(n int) (regionID, error) {
|
|||||||
// normRegion returns a region if r is deprecated or 0 otherwise.
|
// normRegion returns a region if r is deprecated or 0 otherwise.
|
||||||
// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ).
|
// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ).
|
||||||
// TODO: consider mapping split up regions to new most populous one (like CLDR).
|
// TODO: consider mapping split up regions to new most populous one (like CLDR).
|
||||||
func normRegion(r regionID) regionID {
|
func normRegion(r Region) Region {
|
||||||
m := regionOldMap
|
m := regionOldMap
|
||||||
k := sort.Search(len(m), func(i int) bool {
|
k := sort.Search(len(m), func(i int) bool {
|
||||||
return m[i].from >= uint16(r)
|
return m[i].From >= uint16(r)
|
||||||
})
|
})
|
||||||
if k < len(m) && m[k].from == uint16(r) {
|
if k < len(m) && m[k].From == uint16(r) {
|
||||||
return regionID(m[k].to)
|
return Region(m[k].To)
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@ -264,13 +280,13 @@ const (
|
|||||||
bcp47Region
|
bcp47Region
|
||||||
)
|
)
|
||||||
|
|
||||||
func (r regionID) typ() byte {
|
func (r Region) typ() byte {
|
||||||
return regionTypes[r]
|
return regionTypes[r]
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the BCP 47 representation for the region.
|
// String returns the BCP 47 representation for the region.
|
||||||
// It returns "ZZ" for an unspecified region.
|
// It returns "ZZ" for an unspecified region.
|
||||||
func (r regionID) String() string {
|
func (r Region) String() string {
|
||||||
if r < isoRegionOffset {
|
if r < isoRegionOffset {
|
||||||
if r == 0 {
|
if r == 0 {
|
||||||
return "ZZ"
|
return "ZZ"
|
||||||
@ -284,7 +300,7 @@ func (r regionID) String() string {
|
|||||||
// ISO3 returns the 3-letter ISO code of r.
|
// ISO3 returns the 3-letter ISO code of r.
|
||||||
// Note that not all regions have a 3-letter ISO code.
|
// Note that not all regions have a 3-letter ISO code.
|
||||||
// In such cases this method returns "ZZZ".
|
// In such cases this method returns "ZZZ".
|
||||||
func (r regionID) ISO3() string {
|
func (r Region) ISO3() string {
|
||||||
if r < isoRegionOffset {
|
if r < isoRegionOffset {
|
||||||
return "ZZZ"
|
return "ZZZ"
|
||||||
}
|
}
|
||||||
@ -301,29 +317,29 @@ func (r regionID) ISO3() string {
|
|||||||
|
|
||||||
// M49 returns the UN M.49 encoding of r, or 0 if this encoding
|
// M49 returns the UN M.49 encoding of r, or 0 if this encoding
|
||||||
// is not defined for r.
|
// is not defined for r.
|
||||||
func (r regionID) M49() int {
|
func (r Region) M49() int {
|
||||||
return int(m49[r])
|
return int(m49[r])
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This
|
// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This
|
||||||
// may include private-use tags that are assigned by CLDR and used in this
|
// may include private-use tags that are assigned by CLDR and used in this
|
||||||
// implementation. So IsPrivateUse and IsCountry can be simultaneously true.
|
// implementation. So IsPrivateUse and IsCountry can be simultaneously true.
|
||||||
func (r regionID) IsPrivateUse() bool {
|
func (r Region) IsPrivateUse() bool {
|
||||||
return r.typ()&iso3166UserAssigned != 0
|
return r.typ()&iso3166UserAssigned != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
type scriptID uint8
|
type Script uint8
|
||||||
|
|
||||||
// getScriptID returns the script id for string s. It assumes that s
|
// getScriptID returns the script id for string s. It assumes that s
|
||||||
// is of the format [A-Z][a-z]{3}.
|
// is of the format [A-Z][a-z]{3}.
|
||||||
func getScriptID(idx tag.Index, s []byte) (scriptID, error) {
|
func getScriptID(idx tag.Index, s []byte) (Script, error) {
|
||||||
i, err := findIndex(idx, s, "Zzzz")
|
i, err := findIndex(idx, s, "Zzzz")
|
||||||
return scriptID(i), err
|
return Script(i), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the script code in title case.
|
// String returns the script code in title case.
|
||||||
// It returns "Zzzz" for an unspecified script.
|
// It returns "Zzzz" for an unspecified script.
|
||||||
func (s scriptID) String() string {
|
func (s Script) String() string {
|
||||||
if s == 0 {
|
if s == 0 {
|
||||||
return "Zzzz"
|
return "Zzzz"
|
||||||
}
|
}
|
||||||
@ -331,7 +347,7 @@ func (s scriptID) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IsPrivateUse reports whether this script code is reserved for private use.
|
// IsPrivateUse reports whether this script code is reserved for private use.
|
||||||
func (s scriptID) IsPrivateUse() bool {
|
func (s Script) IsPrivateUse() bool {
|
||||||
return _Qaaa <= s && s <= _Qabx
|
return _Qaaa <= s && s <= _Qabx
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -389,7 +405,7 @@ func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) {
|
|||||||
if v < 0 {
|
if v < 0 {
|
||||||
return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true
|
return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true
|
||||||
}
|
}
|
||||||
t.lang = langID(v)
|
t.LangID = Language(v)
|
||||||
return t, true
|
return t, true
|
||||||
}
|
}
|
||||||
return t, false
|
return t, false
|
226
vendor/golang.org/x/text/internal/language/match.go
generated
vendored
Normal file
226
vendor/golang.org/x/text/internal/language/match.go
generated
vendored
Normal file
@ -0,0 +1,226 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
type scriptRegionFlags uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
isList = 1 << iota
|
||||||
|
scriptInFrom
|
||||||
|
regionInFrom
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t *Tag) setUndefinedLang(id Language) {
|
||||||
|
if t.LangID == 0 {
|
||||||
|
t.LangID = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tag) setUndefinedScript(id Script) {
|
||||||
|
if t.ScriptID == 0 {
|
||||||
|
t.ScriptID = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tag) setUndefinedRegion(id Region) {
|
||||||
|
if t.RegionID == 0 || t.RegionID.Contains(id) {
|
||||||
|
t.RegionID = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrMissingLikelyTagsData indicates no information was available
|
||||||
|
// to compute likely values of missing tags.
|
||||||
|
var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
|
||||||
|
|
||||||
|
// addLikelySubtags sets subtags to their most likely value, given the locale.
|
||||||
|
// In most cases this means setting fields for unknown values, but in some
|
||||||
|
// cases it may alter a value. It returns an ErrMissingLikelyTagsData error
|
||||||
|
// if the given locale cannot be expanded.
|
||||||
|
func (t Tag) addLikelySubtags() (Tag, error) {
|
||||||
|
id, err := addTags(t)
|
||||||
|
if err != nil {
|
||||||
|
return t, err
|
||||||
|
} else if id.equalTags(t) {
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
id.RemakeString()
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// specializeRegion attempts to specialize a group region.
|
||||||
|
func specializeRegion(t *Tag) bool {
|
||||||
|
if i := regionInclusion[t.RegionID]; i < nRegionGroups {
|
||||||
|
x := likelyRegionGroup[i]
|
||||||
|
if Language(x.lang) == t.LangID && Script(x.script) == t.ScriptID {
|
||||||
|
t.RegionID = Region(x.region)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Maximize returns a new tag with missing tags filled in.
|
||||||
|
func (t Tag) Maximize() (Tag, error) {
|
||||||
|
return addTags(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTags(t Tag) (Tag, error) {
|
||||||
|
// We leave private use identifiers alone.
|
||||||
|
if t.IsPrivateUse() {
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
if t.ScriptID != 0 && t.RegionID != 0 {
|
||||||
|
if t.LangID != 0 {
|
||||||
|
// already fully specified
|
||||||
|
specializeRegion(&t)
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
// Search matches for und-script-region. Note that for these cases
|
||||||
|
// region will never be a group so there is no need to check for this.
|
||||||
|
list := likelyRegion[t.RegionID : t.RegionID+1]
|
||||||
|
if x := list[0]; x.flags&isList != 0 {
|
||||||
|
list = likelyRegionList[x.lang : x.lang+uint16(x.script)]
|
||||||
|
}
|
||||||
|
for _, x := range list {
|
||||||
|
// Deviating from the spec. See match_test.go for details.
|
||||||
|
if Script(x.script) == t.ScriptID {
|
||||||
|
t.setUndefinedLang(Language(x.lang))
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if t.LangID != 0 {
|
||||||
|
// Search matches for lang-script and lang-region, where lang != und.
|
||||||
|
if t.LangID < langNoIndexOffset {
|
||||||
|
x := likelyLang[t.LangID]
|
||||||
|
if x.flags&isList != 0 {
|
||||||
|
list := likelyLangList[x.region : x.region+uint16(x.script)]
|
||||||
|
if t.ScriptID != 0 {
|
||||||
|
for _, x := range list {
|
||||||
|
if Script(x.script) == t.ScriptID && x.flags&scriptInFrom != 0 {
|
||||||
|
t.setUndefinedRegion(Region(x.region))
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if t.RegionID != 0 {
|
||||||
|
count := 0
|
||||||
|
goodScript := true
|
||||||
|
tt := t
|
||||||
|
for _, x := range list {
|
||||||
|
// We visit all entries for which the script was not
|
||||||
|
// defined, including the ones where the region was not
|
||||||
|
// defined. This allows for proper disambiguation within
|
||||||
|
// regions.
|
||||||
|
if x.flags&scriptInFrom == 0 && t.RegionID.Contains(Region(x.region)) {
|
||||||
|
tt.RegionID = Region(x.region)
|
||||||
|
tt.setUndefinedScript(Script(x.script))
|
||||||
|
goodScript = goodScript && tt.ScriptID == Script(x.script)
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if count == 1 {
|
||||||
|
return tt, nil
|
||||||
|
}
|
||||||
|
// Even if we fail to find a unique Region, we might have
|
||||||
|
// an unambiguous script.
|
||||||
|
if goodScript {
|
||||||
|
t.ScriptID = tt.ScriptID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Search matches for und-script.
|
||||||
|
if t.ScriptID != 0 {
|
||||||
|
x := likelyScript[t.ScriptID]
|
||||||
|
if x.region != 0 {
|
||||||
|
t.setUndefinedRegion(Region(x.region))
|
||||||
|
t.setUndefinedLang(Language(x.lang))
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Search matches for und-region. If und-script-region exists, it would
|
||||||
|
// have been found earlier.
|
||||||
|
if t.RegionID != 0 {
|
||||||
|
if i := regionInclusion[t.RegionID]; i < nRegionGroups {
|
||||||
|
x := likelyRegionGroup[i]
|
||||||
|
if x.region != 0 {
|
||||||
|
t.setUndefinedLang(Language(x.lang))
|
||||||
|
t.setUndefinedScript(Script(x.script))
|
||||||
|
t.RegionID = Region(x.region)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
x := likelyRegion[t.RegionID]
|
||||||
|
if x.flags&isList != 0 {
|
||||||
|
x = likelyRegionList[x.lang]
|
||||||
|
}
|
||||||
|
if x.script != 0 && x.flags != scriptInFrom {
|
||||||
|
t.setUndefinedLang(Language(x.lang))
|
||||||
|
t.setUndefinedScript(Script(x.script))
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search matches for lang.
|
||||||
|
if t.LangID < langNoIndexOffset {
|
||||||
|
x := likelyLang[t.LangID]
|
||||||
|
if x.flags&isList != 0 {
|
||||||
|
x = likelyLangList[x.region]
|
||||||
|
}
|
||||||
|
if x.region != 0 {
|
||||||
|
t.setUndefinedScript(Script(x.script))
|
||||||
|
t.setUndefinedRegion(Region(x.region))
|
||||||
|
}
|
||||||
|
specializeRegion(&t)
|
||||||
|
if t.LangID == 0 {
|
||||||
|
t.LangID = _en // default language
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
return t, ErrMissingLikelyTagsData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tag) setTagsFrom(id Tag) {
|
||||||
|
t.LangID = id.LangID
|
||||||
|
t.ScriptID = id.ScriptID
|
||||||
|
t.RegionID = id.RegionID
|
||||||
|
}
|
||||||
|
|
||||||
|
// minimize removes the region or script subtags from t such that
|
||||||
|
// t.addLikelySubtags() == t.minimize().addLikelySubtags().
|
||||||
|
func (t Tag) minimize() (Tag, error) {
|
||||||
|
t, err := minimizeTags(t)
|
||||||
|
if err != nil {
|
||||||
|
return t, err
|
||||||
|
}
|
||||||
|
t.RemakeString()
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// minimizeTags mimics the behavior of the ICU 51 C implementation.
|
||||||
|
func minimizeTags(t Tag) (Tag, error) {
|
||||||
|
if t.equalTags(Und) {
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
max, err := addTags(t)
|
||||||
|
if err != nil {
|
||||||
|
return t, err
|
||||||
|
}
|
||||||
|
for _, id := range [...]Tag{
|
||||||
|
{LangID: t.LangID},
|
||||||
|
{LangID: t.LangID, RegionID: t.RegionID},
|
||||||
|
{LangID: t.LangID, ScriptID: t.ScriptID},
|
||||||
|
} {
|
||||||
|
if x, err := addTags(id); err == nil && max.equalTags(x) {
|
||||||
|
t.setTagsFrom(id)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
594
vendor/golang.org/x/text/internal/language/parse.go
generated
vendored
Normal file
594
vendor/golang.org/x/text/internal/language/parse.go
generated
vendored
Normal file
@ -0,0 +1,594 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
// isAlpha returns true if the byte is not a digit.
|
||||||
|
// b must be an ASCII letter or digit.
|
||||||
|
func isAlpha(b byte) bool {
|
||||||
|
return b > '9'
|
||||||
|
}
|
||||||
|
|
||||||
|
// isAlphaNum returns true if the string contains only ASCII letters or digits.
|
||||||
|
func isAlphaNum(s []byte) bool {
|
||||||
|
for _, c := range s {
|
||||||
|
if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrSyntax is returned by any of the parsing functions when the
|
||||||
|
// input is not well-formed, according to BCP 47.
|
||||||
|
// TODO: return the position at which the syntax error occurred?
|
||||||
|
var ErrSyntax = errors.New("language: tag is not well-formed")
|
||||||
|
|
||||||
|
// ErrDuplicateKey is returned when a tag contains the same key twice with
|
||||||
|
// different values in the -u section.
|
||||||
|
var ErrDuplicateKey = errors.New("language: different values for same key in -u extension")
|
||||||
|
|
||||||
|
// ValueError is returned by any of the parsing functions when the
|
||||||
|
// input is well-formed but the respective subtag is not recognized
|
||||||
|
// as a valid value.
|
||||||
|
type ValueError struct {
|
||||||
|
v [8]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewValueError creates a new ValueError.
|
||||||
|
func NewValueError(tag []byte) ValueError {
|
||||||
|
var e ValueError
|
||||||
|
copy(e.v[:], tag)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ValueError) tag() []byte {
|
||||||
|
n := bytes.IndexByte(e.v[:], 0)
|
||||||
|
if n == -1 {
|
||||||
|
n = 8
|
||||||
|
}
|
||||||
|
return e.v[:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements the error interface.
|
||||||
|
func (e ValueError) Error() string {
|
||||||
|
return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subtag returns the subtag for which the error occurred.
|
||||||
|
func (e ValueError) Subtag() string {
|
||||||
|
return string(e.tag())
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanner is used to scan BCP 47 tokens, which are separated by _ or -.
|
||||||
|
type scanner struct {
|
||||||
|
b []byte
|
||||||
|
bytes [max99thPercentileSize]byte
|
||||||
|
token []byte
|
||||||
|
start int // start position of the current token
|
||||||
|
end int // end position of the current token
|
||||||
|
next int // next point for scan
|
||||||
|
err error
|
||||||
|
done bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeScannerString(s string) scanner {
|
||||||
|
scan := scanner{}
|
||||||
|
if len(s) <= len(scan.bytes) {
|
||||||
|
scan.b = scan.bytes[:copy(scan.bytes[:], s)]
|
||||||
|
} else {
|
||||||
|
scan.b = []byte(s)
|
||||||
|
}
|
||||||
|
scan.init()
|
||||||
|
return scan
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeScanner returns a scanner using b as the input buffer.
|
||||||
|
// b is not copied and may be modified by the scanner routines.
|
||||||
|
func makeScanner(b []byte) scanner {
|
||||||
|
scan := scanner{b: b}
|
||||||
|
scan.init()
|
||||||
|
return scan
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) init() {
|
||||||
|
for i, c := range s.b {
|
||||||
|
if c == '_' {
|
||||||
|
s.b[i] = '-'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.scan()
|
||||||
|
}
|
||||||
|
|
||||||
|
// restToLower converts the string between start and end to lower case.
|
||||||
|
func (s *scanner) toLower(start, end int) {
|
||||||
|
for i := start; i < end; i++ {
|
||||||
|
c := s.b[i]
|
||||||
|
if 'A' <= c && c <= 'Z' {
|
||||||
|
s.b[i] += 'a' - 'A'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) setError(e error) {
|
||||||
|
if s.err == nil || (e == ErrSyntax && s.err != ErrSyntax) {
|
||||||
|
s.err = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resizeRange shrinks or grows the array at position oldStart such that
|
||||||
|
// a new string of size newSize can fit between oldStart and oldEnd.
|
||||||
|
// Sets the scan point to after the resized range.
|
||||||
|
func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) {
|
||||||
|
s.start = oldStart
|
||||||
|
if end := oldStart + newSize; end != oldEnd {
|
||||||
|
diff := end - oldEnd
|
||||||
|
if end < cap(s.b) {
|
||||||
|
b := make([]byte, len(s.b)+diff)
|
||||||
|
copy(b, s.b[:oldStart])
|
||||||
|
copy(b[end:], s.b[oldEnd:])
|
||||||
|
s.b = b
|
||||||
|
} else {
|
||||||
|
s.b = append(s.b[end:], s.b[oldEnd:]...)
|
||||||
|
}
|
||||||
|
s.next = end + (s.next - s.end)
|
||||||
|
s.end = end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// replace replaces the current token with repl.
|
||||||
|
func (s *scanner) replace(repl string) {
|
||||||
|
s.resizeRange(s.start, s.end, len(repl))
|
||||||
|
copy(s.b[s.start:], repl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// gobble removes the current token from the input.
|
||||||
|
// Caller must call scan after calling gobble.
|
||||||
|
func (s *scanner) gobble(e error) {
|
||||||
|
s.setError(e)
|
||||||
|
if s.start == 0 {
|
||||||
|
s.b = s.b[:+copy(s.b, s.b[s.next:])]
|
||||||
|
s.end = 0
|
||||||
|
} else {
|
||||||
|
s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])]
|
||||||
|
s.end = s.start - 1
|
||||||
|
}
|
||||||
|
s.next = s.start
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteRange removes the given range from s.b before the current token.
|
||||||
|
func (s *scanner) deleteRange(start, end int) {
|
||||||
|
s.b = s.b[:start+copy(s.b[start:], s.b[end:])]
|
||||||
|
diff := end - start
|
||||||
|
s.next -= diff
|
||||||
|
s.start -= diff
|
||||||
|
s.end -= diff
|
||||||
|
}
|
||||||
|
|
||||||
|
// scan parses the next token of a BCP 47 string. Tokens that are larger
|
||||||
|
// than 8 characters or include non-alphanumeric characters result in an error
|
||||||
|
// and are gobbled and removed from the output.
|
||||||
|
// It returns the end position of the last token consumed.
|
||||||
|
func (s *scanner) scan() (end int) {
|
||||||
|
end = s.end
|
||||||
|
s.token = nil
|
||||||
|
for s.start = s.next; s.next < len(s.b); {
|
||||||
|
i := bytes.IndexByte(s.b[s.next:], '-')
|
||||||
|
if i == -1 {
|
||||||
|
s.end = len(s.b)
|
||||||
|
s.next = len(s.b)
|
||||||
|
i = s.end - s.start
|
||||||
|
} else {
|
||||||
|
s.end = s.next + i
|
||||||
|
s.next = s.end + 1
|
||||||
|
}
|
||||||
|
token := s.b[s.start:s.end]
|
||||||
|
if i < 1 || i > 8 || !isAlphaNum(token) {
|
||||||
|
s.gobble(ErrSyntax)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
s.token = token
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
if n := len(s.b); n > 0 && s.b[n-1] == '-' {
|
||||||
|
s.setError(ErrSyntax)
|
||||||
|
s.b = s.b[:len(s.b)-1]
|
||||||
|
}
|
||||||
|
s.done = true
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
|
||||||
|
// acceptMinSize parses multiple tokens of the given size or greater.
|
||||||
|
// It returns the end position of the last token consumed.
|
||||||
|
func (s *scanner) acceptMinSize(min int) (end int) {
|
||||||
|
end = s.end
|
||||||
|
s.scan()
|
||||||
|
for ; len(s.token) >= min; s.scan() {
|
||||||
|
end = s.end
|
||||||
|
}
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
|
||||||
|
// failed it returns an error and any part of the tag that could be parsed.
|
||||||
|
// If parsing succeeded but an unknown value was found, it returns
|
||||||
|
// ValueError. The Tag returned in this case is just stripped of the unknown
|
||||||
|
// value. All other values are preserved. It accepts tags in the BCP 47 format
|
||||||
|
// and extensions to this standard defined in
|
||||||
|
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||||
|
func Parse(s string) (t Tag, err error) {
|
||||||
|
// TODO: consider supporting old-style locale key-value pairs.
|
||||||
|
if s == "" {
|
||||||
|
return Und, ErrSyntax
|
||||||
|
}
|
||||||
|
if len(s) <= maxAltTaglen {
|
||||||
|
b := [maxAltTaglen]byte{}
|
||||||
|
for i, c := range s {
|
||||||
|
// Generating invalid UTF-8 is okay as it won't match.
|
||||||
|
if 'A' <= c && c <= 'Z' {
|
||||||
|
c += 'a' - 'A'
|
||||||
|
} else if c == '_' {
|
||||||
|
c = '-'
|
||||||
|
}
|
||||||
|
b[i] = byte(c)
|
||||||
|
}
|
||||||
|
if t, ok := grandfathered(b); ok {
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scan := makeScannerString(s)
|
||||||
|
return parse(&scan, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parse(scan *scanner, s string) (t Tag, err error) {
|
||||||
|
t = Und
|
||||||
|
var end int
|
||||||
|
if n := len(scan.token); n <= 1 {
|
||||||
|
scan.toLower(0, len(scan.b))
|
||||||
|
if n == 0 || scan.token[0] != 'x' {
|
||||||
|
return t, ErrSyntax
|
||||||
|
}
|
||||||
|
end = parseExtensions(scan)
|
||||||
|
} else if n >= 4 {
|
||||||
|
return Und, ErrSyntax
|
||||||
|
} else { // the usual case
|
||||||
|
t, end = parseTag(scan)
|
||||||
|
if n := len(scan.token); n == 1 {
|
||||||
|
t.pExt = uint16(end)
|
||||||
|
end = parseExtensions(scan)
|
||||||
|
} else if end < len(scan.b) {
|
||||||
|
scan.setError(ErrSyntax)
|
||||||
|
scan.b = scan.b[:end]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if int(t.pVariant) < len(scan.b) {
|
||||||
|
if end < len(s) {
|
||||||
|
s = s[:end]
|
||||||
|
}
|
||||||
|
if len(s) > 0 && tag.Compare(s, scan.b) == 0 {
|
||||||
|
t.str = s
|
||||||
|
} else {
|
||||||
|
t.str = string(scan.b)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.pVariant, t.pExt = 0, 0
|
||||||
|
}
|
||||||
|
return t, scan.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTag parses language, script, region and variants.
|
||||||
|
// It returns a Tag and the end position in the input that was parsed.
|
||||||
|
func parseTag(scan *scanner) (t Tag, end int) {
|
||||||
|
var e error
|
||||||
|
// TODO: set an error if an unknown lang, script or region is encountered.
|
||||||
|
t.LangID, e = getLangID(scan.token)
|
||||||
|
scan.setError(e)
|
||||||
|
scan.replace(t.LangID.String())
|
||||||
|
langStart := scan.start
|
||||||
|
end = scan.scan()
|
||||||
|
for len(scan.token) == 3 && isAlpha(scan.token[0]) {
|
||||||
|
// From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent
|
||||||
|
// to a tag of the form <extlang>.
|
||||||
|
lang, e := getLangID(scan.token)
|
||||||
|
if lang != 0 {
|
||||||
|
t.LangID = lang
|
||||||
|
copy(scan.b[langStart:], lang.String())
|
||||||
|
scan.b[langStart+3] = '-'
|
||||||
|
scan.start = langStart + 4
|
||||||
|
}
|
||||||
|
scan.gobble(e)
|
||||||
|
end = scan.scan()
|
||||||
|
}
|
||||||
|
if len(scan.token) == 4 && isAlpha(scan.token[0]) {
|
||||||
|
t.ScriptID, e = getScriptID(script, scan.token)
|
||||||
|
if t.ScriptID == 0 {
|
||||||
|
scan.gobble(e)
|
||||||
|
}
|
||||||
|
end = scan.scan()
|
||||||
|
}
|
||||||
|
if n := len(scan.token); n >= 2 && n <= 3 {
|
||||||
|
t.RegionID, e = getRegionID(scan.token)
|
||||||
|
if t.RegionID == 0 {
|
||||||
|
scan.gobble(e)
|
||||||
|
} else {
|
||||||
|
scan.replace(t.RegionID.String())
|
||||||
|
}
|
||||||
|
end = scan.scan()
|
||||||
|
}
|
||||||
|
scan.toLower(scan.start, len(scan.b))
|
||||||
|
t.pVariant = byte(end)
|
||||||
|
end = parseVariants(scan, end, t)
|
||||||
|
t.pExt = uint16(end)
|
||||||
|
return t, end
|
||||||
|
}
|
||||||
|
|
||||||
|
var separator = []byte{'-'}
|
||||||
|
|
||||||
|
// parseVariants scans tokens as long as each token is a valid variant string.
|
||||||
|
// Duplicate variants are removed.
|
||||||
|
func parseVariants(scan *scanner, end int, t Tag) int {
|
||||||
|
start := scan.start
|
||||||
|
varIDBuf := [4]uint8{}
|
||||||
|
variantBuf := [4][]byte{}
|
||||||
|
varID := varIDBuf[:0]
|
||||||
|
variant := variantBuf[:0]
|
||||||
|
last := -1
|
||||||
|
needSort := false
|
||||||
|
for ; len(scan.token) >= 4; scan.scan() {
|
||||||
|
// TODO: measure the impact of needing this conversion and redesign
|
||||||
|
// the data structure if there is an issue.
|
||||||
|
v, ok := variantIndex[string(scan.token)]
|
||||||
|
if !ok {
|
||||||
|
// unknown variant
|
||||||
|
// TODO: allow user-defined variants?
|
||||||
|
scan.gobble(NewValueError(scan.token))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
varID = append(varID, v)
|
||||||
|
variant = append(variant, scan.token)
|
||||||
|
if !needSort {
|
||||||
|
if last < int(v) {
|
||||||
|
last = int(v)
|
||||||
|
} else {
|
||||||
|
needSort = true
|
||||||
|
// There is no legal combinations of more than 7 variants
|
||||||
|
// (and this is by no means a useful sequence).
|
||||||
|
const maxVariants = 8
|
||||||
|
if len(varID) > maxVariants {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
end = scan.end
|
||||||
|
}
|
||||||
|
if needSort {
|
||||||
|
sort.Sort(variantsSort{varID, variant})
|
||||||
|
k, l := 0, -1
|
||||||
|
for i, v := range varID {
|
||||||
|
w := int(v)
|
||||||
|
if l == w {
|
||||||
|
// Remove duplicates.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
varID[k] = varID[i]
|
||||||
|
variant[k] = variant[i]
|
||||||
|
k++
|
||||||
|
l = w
|
||||||
|
}
|
||||||
|
if str := bytes.Join(variant[:k], separator); len(str) == 0 {
|
||||||
|
end = start - 1
|
||||||
|
} else {
|
||||||
|
scan.resizeRange(start, end, len(str))
|
||||||
|
copy(scan.b[scan.start:], str)
|
||||||
|
end = scan.end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
|
||||||
|
type variantsSort struct {
|
||||||
|
i []uint8
|
||||||
|
v [][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s variantsSort) Len() int {
|
||||||
|
return len(s.i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s variantsSort) Swap(i, j int) {
|
||||||
|
s.i[i], s.i[j] = s.i[j], s.i[i]
|
||||||
|
s.v[i], s.v[j] = s.v[j], s.v[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s variantsSort) Less(i, j int) bool {
|
||||||
|
return s.i[i] < s.i[j]
|
||||||
|
}
|
||||||
|
|
||||||
|
type bytesSort struct {
|
||||||
|
b [][]byte
|
||||||
|
n int // first n bytes to compare
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b bytesSort) Len() int {
|
||||||
|
return len(b.b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b bytesSort) Swap(i, j int) {
|
||||||
|
b.b[i], b.b[j] = b.b[j], b.b[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b bytesSort) Less(i, j int) bool {
|
||||||
|
for k := 0; k < b.n; k++ {
|
||||||
|
if b.b[i][k] == b.b[j][k] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return b.b[i][k] < b.b[j][k]
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseExtensions parses and normalizes the extensions in the buffer.
|
||||||
|
// It returns the last position of scan.b that is part of any extension.
|
||||||
|
// It also trims scan.b to remove excess parts accordingly.
|
||||||
|
func parseExtensions(scan *scanner) int {
|
||||||
|
start := scan.start
|
||||||
|
exts := [][]byte{}
|
||||||
|
private := []byte{}
|
||||||
|
end := scan.end
|
||||||
|
for len(scan.token) == 1 {
|
||||||
|
extStart := scan.start
|
||||||
|
ext := scan.token[0]
|
||||||
|
end = parseExtension(scan)
|
||||||
|
extension := scan.b[extStart:end]
|
||||||
|
if len(extension) < 3 || (ext != 'x' && len(extension) < 4) {
|
||||||
|
scan.setError(ErrSyntax)
|
||||||
|
end = extStart
|
||||||
|
continue
|
||||||
|
} else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) {
|
||||||
|
scan.b = scan.b[:end]
|
||||||
|
return end
|
||||||
|
} else if ext == 'x' {
|
||||||
|
private = extension
|
||||||
|
break
|
||||||
|
}
|
||||||
|
exts = append(exts, extension)
|
||||||
|
}
|
||||||
|
sort.Sort(bytesSort{exts, 1})
|
||||||
|
if len(private) > 0 {
|
||||||
|
exts = append(exts, private)
|
||||||
|
}
|
||||||
|
scan.b = scan.b[:start]
|
||||||
|
if len(exts) > 0 {
|
||||||
|
scan.b = append(scan.b, bytes.Join(exts, separator)...)
|
||||||
|
} else if start > 0 {
|
||||||
|
// Strip trailing '-'.
|
||||||
|
scan.b = scan.b[:start-1]
|
||||||
|
}
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseExtension parses a single extension and returns the position of
|
||||||
|
// the extension end.
|
||||||
|
func parseExtension(scan *scanner) int {
|
||||||
|
start, end := scan.start, scan.end
|
||||||
|
switch scan.token[0] {
|
||||||
|
case 'u':
|
||||||
|
attrStart := end
|
||||||
|
scan.scan()
|
||||||
|
for last := []byte{}; len(scan.token) > 2; scan.scan() {
|
||||||
|
if bytes.Compare(scan.token, last) != -1 {
|
||||||
|
// Attributes are unsorted. Start over from scratch.
|
||||||
|
p := attrStart + 1
|
||||||
|
scan.next = p
|
||||||
|
attrs := [][]byte{}
|
||||||
|
for scan.scan(); len(scan.token) > 2; scan.scan() {
|
||||||
|
attrs = append(attrs, scan.token)
|
||||||
|
end = scan.end
|
||||||
|
}
|
||||||
|
sort.Sort(bytesSort{attrs, 3})
|
||||||
|
copy(scan.b[p:], bytes.Join(attrs, separator))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
last = scan.token
|
||||||
|
end = scan.end
|
||||||
|
}
|
||||||
|
var last, key []byte
|
||||||
|
for attrEnd := end; len(scan.token) == 2; last = key {
|
||||||
|
key = scan.token
|
||||||
|
keyEnd := scan.end
|
||||||
|
end = scan.acceptMinSize(3)
|
||||||
|
// TODO: check key value validity
|
||||||
|
if keyEnd == end || bytes.Compare(key, last) != 1 {
|
||||||
|
// We have an invalid key or the keys are not sorted.
|
||||||
|
// Start scanning keys from scratch and reorder.
|
||||||
|
p := attrEnd + 1
|
||||||
|
scan.next = p
|
||||||
|
keys := [][]byte{}
|
||||||
|
for scan.scan(); len(scan.token) == 2; {
|
||||||
|
keyStart, keyEnd := scan.start, scan.end
|
||||||
|
end = scan.acceptMinSize(3)
|
||||||
|
if keyEnd != end {
|
||||||
|
keys = append(keys, scan.b[keyStart:end])
|
||||||
|
} else {
|
||||||
|
scan.setError(ErrSyntax)
|
||||||
|
end = keyStart
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Stable(bytesSort{keys, 2})
|
||||||
|
if n := len(keys); n > 0 {
|
||||||
|
k := 0
|
||||||
|
for i := 1; i < n; i++ {
|
||||||
|
if !bytes.Equal(keys[k][:2], keys[i][:2]) {
|
||||||
|
k++
|
||||||
|
keys[k] = keys[i]
|
||||||
|
} else if !bytes.Equal(keys[k], keys[i]) {
|
||||||
|
scan.setError(ErrDuplicateKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
keys = keys[:k+1]
|
||||||
|
}
|
||||||
|
reordered := bytes.Join(keys, separator)
|
||||||
|
if e := p + len(reordered); e < end {
|
||||||
|
scan.deleteRange(e, end)
|
||||||
|
end = e
|
||||||
|
}
|
||||||
|
copy(scan.b[p:], reordered)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 't':
|
||||||
|
scan.scan()
|
||||||
|
if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) {
|
||||||
|
_, end = parseTag(scan)
|
||||||
|
scan.toLower(start, end)
|
||||||
|
}
|
||||||
|
for len(scan.token) == 2 && !isAlpha(scan.token[1]) {
|
||||||
|
end = scan.acceptMinSize(3)
|
||||||
|
}
|
||||||
|
case 'x':
|
||||||
|
end = scan.acceptMinSize(1)
|
||||||
|
default:
|
||||||
|
end = scan.acceptMinSize(2)
|
||||||
|
}
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
|
||||||
|
// getExtension returns the name, body and end position of the extension.
|
||||||
|
func getExtension(s string, p int) (end int, ext string) {
|
||||||
|
if s[p] == '-' {
|
||||||
|
p++
|
||||||
|
}
|
||||||
|
if s[p] == 'x' {
|
||||||
|
return len(s), s[p:]
|
||||||
|
}
|
||||||
|
end = nextExtension(s, p)
|
||||||
|
return end, s[p:end]
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextExtension finds the next extension within the string, searching
|
||||||
|
// for the -<char>- pattern from position p.
|
||||||
|
// In the fast majority of cases, language tags will have at most
|
||||||
|
// one extension and extensions tend to be small.
|
||||||
|
func nextExtension(s string, p int) int {
|
||||||
|
for n := len(s) - 3; p < n; {
|
||||||
|
if s[p] == '-' {
|
||||||
|
if s[p+2] == '-' {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
p += 3
|
||||||
|
} else {
|
||||||
|
p++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(s)
|
||||||
|
}
|
3431
vendor/golang.org/x/text/internal/language/tables.go
generated
vendored
Normal file
3431
vendor/golang.org/x/text/internal/language/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
48
vendor/golang.org/x/text/internal/language/tags.go
generated
vendored
Normal file
48
vendor/golang.org/x/text/internal/language/tags.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
|
||||||
|
// It simplifies safe initialization of Tag values.
|
||||||
|
func MustParse(s string) Tag {
|
||||||
|
t, err := Parse(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustParseBase is like ParseBase, but panics if the given base cannot be parsed.
|
||||||
|
// It simplifies safe initialization of Base values.
|
||||||
|
func MustParseBase(s string) Language {
|
||||||
|
b, err := ParseBase(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustParseScript is like ParseScript, but panics if the given script cannot be
|
||||||
|
// parsed. It simplifies safe initialization of Script values.
|
||||||
|
func MustParseScript(s string) Script {
|
||||||
|
scr, err := ParseScript(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return scr
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustParseRegion is like ParseRegion, but panics if the given region cannot be
|
||||||
|
// parsed. It simplifies safe initialization of Region values.
|
||||||
|
func MustParseRegion(s string) Region {
|
||||||
|
r, err := ParseRegion(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Und is the root language.
|
||||||
|
var Und Tag
|
16
vendor/golang.org/x/text/language/common.go
generated
vendored
16
vendor/golang.org/x/text/language/common.go
generated
vendored
@ -1,16 +0,0 @@
|
|||||||
// This file was generated by go generate; DO NOT EDIT
|
|
||||||
|
|
||||||
package language
|
|
||||||
|
|
||||||
// This file contains code common to the maketables.go and the package code.
|
|
||||||
|
|
||||||
// langAliasType is the type of an alias in langAliasMap.
|
|
||||||
type langAliasType int8
|
|
||||||
|
|
||||||
const (
|
|
||||||
langDeprecated langAliasType = iota
|
|
||||||
langMacro
|
|
||||||
langLegacy
|
|
||||||
|
|
||||||
langAliasTypeUnknown langAliasType = -1
|
|
||||||
)
|
|
32
vendor/golang.org/x/text/language/coverage.go
generated
vendored
32
vendor/golang.org/x/text/language/coverage.go
generated
vendored
@ -7,6 +7,8 @@ package language
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/language"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The Coverage interface is used to define the level of coverage of an
|
// The Coverage interface is used to define the level of coverage of an
|
||||||
@ -44,9 +46,9 @@ type allSubtags struct{}
|
|||||||
// consecutive range, it simply returns a slice of numbers in increasing order.
|
// consecutive range, it simply returns a slice of numbers in increasing order.
|
||||||
// The "undefined" region is not returned.
|
// The "undefined" region is not returned.
|
||||||
func (s allSubtags) Regions() []Region {
|
func (s allSubtags) Regions() []Region {
|
||||||
reg := make([]Region, numRegions)
|
reg := make([]Region, language.NumRegions)
|
||||||
for i := range reg {
|
for i := range reg {
|
||||||
reg[i] = Region{regionID(i + 1)}
|
reg[i] = Region{language.Region(i + 1)}
|
||||||
}
|
}
|
||||||
return reg
|
return reg
|
||||||
}
|
}
|
||||||
@ -55,9 +57,9 @@ func (s allSubtags) Regions() []Region {
|
|||||||
// consecutive range, it simply returns a slice of numbers in increasing order.
|
// consecutive range, it simply returns a slice of numbers in increasing order.
|
||||||
// The "undefined" script is not returned.
|
// The "undefined" script is not returned.
|
||||||
func (s allSubtags) Scripts() []Script {
|
func (s allSubtags) Scripts() []Script {
|
||||||
scr := make([]Script, numScripts)
|
scr := make([]Script, language.NumScripts)
|
||||||
for i := range scr {
|
for i := range scr {
|
||||||
scr[i] = Script{scriptID(i + 1)}
|
scr[i] = Script{language.Script(i + 1)}
|
||||||
}
|
}
|
||||||
return scr
|
return scr
|
||||||
}
|
}
|
||||||
@ -65,22 +67,10 @@ func (s allSubtags) Scripts() []Script {
|
|||||||
// BaseLanguages returns the list of all supported base languages. It generates
|
// BaseLanguages returns the list of all supported base languages. It generates
|
||||||
// the list by traversing the internal structures.
|
// the list by traversing the internal structures.
|
||||||
func (s allSubtags) BaseLanguages() []Base {
|
func (s allSubtags) BaseLanguages() []Base {
|
||||||
base := make([]Base, 0, numLanguages)
|
bs := language.BaseLanguages()
|
||||||
for i := 0; i < langNoIndexOffset; i++ {
|
base := make([]Base, len(bs))
|
||||||
// We included "und" already for the value 0.
|
for i, b := range bs {
|
||||||
if i != nonCanonicalUnd {
|
base[i] = Base{b}
|
||||||
base = append(base, Base{langID(i)})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
i := langNoIndexOffset
|
|
||||||
for _, v := range langNoIndex {
|
|
||||||
for k := 0; k < 8; k++ {
|
|
||||||
if v&1 == 1 {
|
|
||||||
base = append(base, Base{langID(i)})
|
|
||||||
}
|
|
||||||
v >>= 1
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return base
|
return base
|
||||||
}
|
}
|
||||||
@ -134,7 +124,7 @@ func (s *coverage) BaseLanguages() []Base {
|
|||||||
}
|
}
|
||||||
a := make([]Base, len(tags))
|
a := make([]Base, len(tags))
|
||||||
for i, t := range tags {
|
for i, t := range tags {
|
||||||
a[i] = Base{langID(t.lang)}
|
a[i] = Base{language.Language(t.lang())}
|
||||||
}
|
}
|
||||||
sort.Sort(bases(a))
|
sort.Sort(bases(a))
|
||||||
k := 0
|
k := 0
|
||||||
|
77
vendor/golang.org/x/text/language/display/display.go
generated
vendored
77
vendor/golang.org/x/text/language/display/display.go
generated
vendored
@ -15,8 +15,10 @@
|
|||||||
package display // import "golang.org/x/text/language/display"
|
package display // import "golang.org/x/text/language/display"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/format"
|
||||||
"golang.org/x/text/language"
|
"golang.org/x/text/language"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -32,6 +34,65 @@ All fairly low priority at the moment:
|
|||||||
- Consider compressing infrequently used languages and decompress on demand.
|
- Consider compressing infrequently used languages and decompress on demand.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
// A Formatter formats a tag in the current language. It is used in conjunction
|
||||||
|
// with the message package.
|
||||||
|
type Formatter struct {
|
||||||
|
lookup func(tag int, x interface{}) string
|
||||||
|
x interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format implements "golang.org/x/text/internal/format".Formatter.
|
||||||
|
func (f Formatter) Format(state format.State, verb rune) {
|
||||||
|
// TODO: there are a lot of inefficiencies in this code. Fix it when we
|
||||||
|
// language.Tag has embedded compact tags.
|
||||||
|
t := state.Language()
|
||||||
|
_, index, _ := matcher.Match(t)
|
||||||
|
str := f.lookup(index, f.x)
|
||||||
|
if str == "" {
|
||||||
|
// TODO: use language-specific punctuation.
|
||||||
|
// TODO: use codePattern instead of language?
|
||||||
|
if unknown := f.lookup(index, language.Und); unknown != "" {
|
||||||
|
fmt.Fprintf(state, "%v (%v)", unknown, f.x)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(state, "[language: %v]", f.x)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
state.Write([]byte(str))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Language returns a Formatter that renders the name for lang in the
|
||||||
|
// the current language. x may be a language.Base or a language.Tag.
|
||||||
|
// It renders lang in the default language if no translation for the current
|
||||||
|
// language is supported.
|
||||||
|
func Language(lang interface{}) Formatter {
|
||||||
|
return Formatter{langFunc, lang}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Region returns a Formatter that renders the name for region in the current
|
||||||
|
// language. region may be a language.Region or a language.Tag.
|
||||||
|
// It renders region in the default language if no translation for the current
|
||||||
|
// language is supported.
|
||||||
|
func Region(region interface{}) Formatter {
|
||||||
|
return Formatter{regionFunc, region}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Script returns a Formatter that renders the name for script in the current
|
||||||
|
// language. script may be a language.Script or a language.Tag.
|
||||||
|
// It renders script in the default language if no translation for the current
|
||||||
|
// language is supported.
|
||||||
|
func Script(script interface{}) Formatter {
|
||||||
|
return Formatter{scriptFunc, script}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Script returns a Formatter that renders the name for tag in the current
|
||||||
|
// language. tag may be a language.Tag.
|
||||||
|
// It renders tag in the default language if no translation for the current
|
||||||
|
// language is supported.
|
||||||
|
func Tag(tag interface{}) Formatter {
|
||||||
|
return Formatter{tagFunc, tag}
|
||||||
|
}
|
||||||
|
|
||||||
// A Namer is used to get the name for a given value, such as a Tag, Language,
|
// A Namer is used to get the name for a given value, such as a Tag, Language,
|
||||||
// Script or Region.
|
// Script or Region.
|
||||||
type Namer interface {
|
type Namer interface {
|
||||||
@ -84,6 +145,10 @@ func Languages(t language.Tag) Namer {
|
|||||||
|
|
||||||
type languageNamer int
|
type languageNamer int
|
||||||
|
|
||||||
|
func langFunc(i int, x interface{}) string {
|
||||||
|
return nameLanguage(languageNamer(i), x)
|
||||||
|
}
|
||||||
|
|
||||||
func (n languageNamer) name(i int) string {
|
func (n languageNamer) name(i int) string {
|
||||||
return lookup(langHeaders[:], int(n), i)
|
return lookup(langHeaders[:], int(n), i)
|
||||||
}
|
}
|
||||||
@ -116,6 +181,10 @@ func Scripts(t language.Tag) Namer {
|
|||||||
|
|
||||||
type scriptNamer int
|
type scriptNamer int
|
||||||
|
|
||||||
|
func scriptFunc(i int, x interface{}) string {
|
||||||
|
return nameScript(scriptNamer(i), x)
|
||||||
|
}
|
||||||
|
|
||||||
func (n scriptNamer) name(i int) string {
|
func (n scriptNamer) name(i int) string {
|
||||||
return lookup(scriptHeaders[:], int(n), i)
|
return lookup(scriptHeaders[:], int(n), i)
|
||||||
}
|
}
|
||||||
@ -140,6 +209,10 @@ func Regions(t language.Tag) Namer {
|
|||||||
|
|
||||||
type regionNamer int
|
type regionNamer int
|
||||||
|
|
||||||
|
func regionFunc(i int, x interface{}) string {
|
||||||
|
return nameRegion(regionNamer(i), x)
|
||||||
|
}
|
||||||
|
|
||||||
func (n regionNamer) name(i int) string {
|
func (n regionNamer) name(i int) string {
|
||||||
return lookup(regionHeaders[:], int(n), i)
|
return lookup(regionHeaders[:], int(n), i)
|
||||||
}
|
}
|
||||||
@ -162,6 +235,10 @@ func Tags(t language.Tag) Namer {
|
|||||||
|
|
||||||
type tagNamer int
|
type tagNamer int
|
||||||
|
|
||||||
|
func tagFunc(i int, x interface{}) string {
|
||||||
|
return nameTag(languageNamer(i), scriptNamer(i), regionNamer(i), x)
|
||||||
|
}
|
||||||
|
|
||||||
// Name implements the Namer interface for tag names.
|
// Name implements the Namer interface for tag names.
|
||||||
func (n tagNamer) Name(x interface{}) string {
|
func (n tagNamer) Name(x interface{}) string {
|
||||||
return nameTag(languageNamer(n), scriptNamer(n), regionNamer(n), x)
|
return nameTag(languageNamer(n), scriptNamer(n), regionNamer(n), x)
|
||||||
|
8
vendor/golang.org/x/text/language/display/maketables.go
generated
vendored
8
vendor/golang.org/x/text/language/display/maketables.go
generated
vendored
@ -205,7 +205,13 @@ func (b *builder) generate() {
|
|||||||
b.setData("lang", func(g *group, loc language.Tag, ldn *cldr.LocaleDisplayNames) {
|
b.setData("lang", func(g *group, loc language.Tag, ldn *cldr.LocaleDisplayNames) {
|
||||||
if ldn.Languages != nil {
|
if ldn.Languages != nil {
|
||||||
for _, v := range ldn.Languages.Language {
|
for _, v := range ldn.Languages.Language {
|
||||||
tag := tagForm.MustParse(v.Type)
|
lang := v.Type
|
||||||
|
if lang == "root" {
|
||||||
|
// We prefer the data from "und"
|
||||||
|
// TODO: allow both the data for root and und somehow.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tag := tagForm.MustParse(lang)
|
||||||
if tags.contains(tag) {
|
if tags.contains(tag) {
|
||||||
g.set(loc, tag.String(), v.Data())
|
g.set(loc, tag.String(), v.Data())
|
||||||
}
|
}
|
||||||
|
46103
vendor/golang.org/x/text/language/display/tables.go
generated
vendored
46103
vendor/golang.org/x/text/language/display/tables.go
generated
vendored
File diff suppressed because it is too large
Load Diff
102
vendor/golang.org/x/text/language/doc.go
generated
vendored
Normal file
102
vendor/golang.org/x/text/language/doc.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package language implements BCP 47 language tags and related functionality.
|
||||||
|
//
|
||||||
|
// The most important function of package language is to match a list of
|
||||||
|
// user-preferred languages to a list of supported languages.
|
||||||
|
// It alleviates the developer of dealing with the complexity of this process
|
||||||
|
// and provides the user with the best experience
|
||||||
|
// (see https://blog.golang.org/matchlang).
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Matching preferred against supported languages
|
||||||
|
//
|
||||||
|
// A Matcher for an application that supports English, Australian English,
|
||||||
|
// Danish, and standard Mandarin can be created as follows:
|
||||||
|
//
|
||||||
|
// var matcher = language.NewMatcher([]language.Tag{
|
||||||
|
// language.English, // The first language is used as fallback.
|
||||||
|
// language.MustParse("en-AU"),
|
||||||
|
// language.Danish,
|
||||||
|
// language.Chinese,
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// This list of supported languages is typically implied by the languages for
|
||||||
|
// which there exists translations of the user interface.
|
||||||
|
//
|
||||||
|
// User-preferred languages usually come as a comma-separated list of BCP 47
|
||||||
|
// language tags.
|
||||||
|
// The MatchString finds best matches for such strings:
|
||||||
|
//
|
||||||
|
// handler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// lang, _ := r.Cookie("lang")
|
||||||
|
// accept := r.Header.Get("Accept-Language")
|
||||||
|
// tag, _ := language.MatchStrings(matcher, lang.String(), accept)
|
||||||
|
//
|
||||||
|
// // tag should now be used for the initialization of any
|
||||||
|
// // locale-specific service.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The Matcher's Match method can be used to match Tags directly.
|
||||||
|
//
|
||||||
|
// Matchers are aware of the intricacies of equivalence between languages, such
|
||||||
|
// as deprecated subtags, legacy tags, macro languages, mutual
|
||||||
|
// intelligibility between scripts and languages, and transparently passing
|
||||||
|
// BCP 47 user configuration.
|
||||||
|
// For instance, it will know that a reader of Bokmål Danish can read Norwegian
|
||||||
|
// and will know that Cantonese ("yue") is a good match for "zh-HK".
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Using match results
|
||||||
|
//
|
||||||
|
// To guarantee a consistent user experience to the user it is important to
|
||||||
|
// use the same language tag for the selection of any locale-specific services.
|
||||||
|
// For example, it is utterly confusing to substitute spelled-out numbers
|
||||||
|
// or dates in one language in text of another language.
|
||||||
|
// More subtly confusing is using the wrong sorting order or casing
|
||||||
|
// algorithm for a certain language.
|
||||||
|
//
|
||||||
|
// All the packages in x/text that provide locale-specific services
|
||||||
|
// (e.g. collate, cases) should be initialized with the tag that was
|
||||||
|
// obtained at the start of an interaction with the user.
|
||||||
|
//
|
||||||
|
// Note that Tag that is returned by Match and MatchString may differ from any
|
||||||
|
// of the supported languages, as it may contain carried over settings from
|
||||||
|
// the user tags.
|
||||||
|
// This may be inconvenient when your application has some additional
|
||||||
|
// locale-specific data for your supported languages.
|
||||||
|
// Match and MatchString both return the index of the matched supported tag
|
||||||
|
// to simplify associating such data with the matched tag.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Canonicalization
|
||||||
|
//
|
||||||
|
// If one uses the Matcher to compare languages one does not need to
|
||||||
|
// worry about canonicalization.
|
||||||
|
//
|
||||||
|
// The meaning of a Tag varies per application. The language package
|
||||||
|
// therefore delays canonicalization and preserves information as much
|
||||||
|
// as possible. The Matcher, however, will always take into account that
|
||||||
|
// two different tags may represent the same language.
|
||||||
|
//
|
||||||
|
// By default, only legacy and deprecated tags are converted into their
|
||||||
|
// canonical equivalent. All other information is preserved. This approach makes
|
||||||
|
// the confidence scores more accurate and allows matchers to distinguish
|
||||||
|
// between variants that are otherwise lost.
|
||||||
|
//
|
||||||
|
// As a consequence, two tags that should be treated as identical according to
|
||||||
|
// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The
|
||||||
|
// Matcher handles such distinctions, though, and is aware of the
|
||||||
|
// equivalence relations. The CanonType type can be used to alter the
|
||||||
|
// canonicalization form.
|
||||||
|
//
|
||||||
|
// References
|
||||||
|
//
|
||||||
|
// BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47
|
||||||
|
//
|
||||||
|
package language // import "golang.org/x/text/language"
|
||||||
|
|
||||||
|
// TODO: explanation on how to match languages for your own locale-specific
|
||||||
|
// service.
|
305
vendor/golang.org/x/text/language/gen.go
generated
vendored
Normal file
305
vendor/golang.org/x/text/language/gen.go
generated
vendored
Normal file
@ -0,0 +1,305 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// Language tag table generator.
|
||||||
|
// Data read from the web.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/gen"
|
||||||
|
"golang.org/x/text/internal/language"
|
||||||
|
"golang.org/x/text/unicode/cldr"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
test = flag.Bool("test",
|
||||||
|
false,
|
||||||
|
"test existing tables; can be used to compare web data with package data.")
|
||||||
|
outputFile = flag.String("output",
|
||||||
|
"tables.go",
|
||||||
|
"output file for generated tables")
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
gen.Init()
|
||||||
|
|
||||||
|
w := gen.NewCodeWriter()
|
||||||
|
defer w.WriteGoFile("tables.go", "language")
|
||||||
|
|
||||||
|
b := newBuilder(w)
|
||||||
|
gen.WriteCLDRVersion(w)
|
||||||
|
|
||||||
|
b.writeConstants()
|
||||||
|
b.writeMatchData()
|
||||||
|
}
|
||||||
|
|
||||||
|
type builder struct {
|
||||||
|
w *gen.CodeWriter
|
||||||
|
hw io.Writer // MultiWriter for w and w.Hash
|
||||||
|
data *cldr.CLDR
|
||||||
|
supp *cldr.SupplementalData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) langIndex(s string) uint16 {
|
||||||
|
return uint16(language.MustParseBase(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) regionIndex(s string) int {
|
||||||
|
return int(language.MustParseRegion(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) scriptIndex(s string) int {
|
||||||
|
return int(language.MustParseScript(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBuilder(w *gen.CodeWriter) *builder {
|
||||||
|
r := gen.OpenCLDRCoreZip()
|
||||||
|
defer r.Close()
|
||||||
|
d := &cldr.Decoder{}
|
||||||
|
data, err := d.DecodeZip(r)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
b := builder{
|
||||||
|
w: w,
|
||||||
|
hw: io.MultiWriter(w, w.Hash),
|
||||||
|
data: data,
|
||||||
|
supp: data.Supplemental(),
|
||||||
|
}
|
||||||
|
return &b
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeConsts computes f(v) for all v in values and writes the results
|
||||||
|
// as constants named _v to a single constant block.
|
||||||
|
func (b *builder) writeConsts(f func(string) int, values ...string) {
|
||||||
|
fmt.Fprintln(b.w, "const (")
|
||||||
|
for _, v := range values {
|
||||||
|
fmt.Fprintf(b.w, "\t_%s = %v\n", v, f(v))
|
||||||
|
}
|
||||||
|
fmt.Fprintln(b.w, ")")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: region inclusion data will probably not be use used in future matchers.
|
||||||
|
|
||||||
|
var langConsts = []string{
|
||||||
|
"de", "en", "fr", "it", "mo", "no", "nb", "pt", "sh", "mul", "und",
|
||||||
|
}
|
||||||
|
|
||||||
|
var scriptConsts = []string{
|
||||||
|
"Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy",
|
||||||
|
"Zzzz",
|
||||||
|
}
|
||||||
|
|
||||||
|
var regionConsts = []string{
|
||||||
|
"001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US",
|
||||||
|
"ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) writeConstants() {
|
||||||
|
b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...)
|
||||||
|
b.writeConsts(b.regionIndex, regionConsts...)
|
||||||
|
b.writeConsts(b.scriptIndex, scriptConsts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mutualIntelligibility struct {
|
||||||
|
want, have uint16
|
||||||
|
distance uint8
|
||||||
|
oneway bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type scriptIntelligibility struct {
|
||||||
|
wantLang, haveLang uint16
|
||||||
|
wantScript, haveScript uint8
|
||||||
|
distance uint8
|
||||||
|
// Always oneway
|
||||||
|
}
|
||||||
|
|
||||||
|
type regionIntelligibility struct {
|
||||||
|
lang uint16 // compact language id
|
||||||
|
script uint8 // 0 means any
|
||||||
|
group uint8 // 0 means any; if bit 7 is set it means inverse
|
||||||
|
distance uint8
|
||||||
|
// Always twoway.
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeMatchData writes tables with languages and scripts for which there is
|
||||||
|
// mutual intelligibility. The data is based on CLDR's languageMatching data.
|
||||||
|
// Note that we use a different algorithm than the one defined by CLDR and that
|
||||||
|
// we slightly modify the data. For example, we convert scores to confidence levels.
|
||||||
|
// We also drop all region-related data as we use a different algorithm to
|
||||||
|
// determine region equivalence.
|
||||||
|
func (b *builder) writeMatchData() {
|
||||||
|
lm := b.supp.LanguageMatching.LanguageMatches
|
||||||
|
cldr.MakeSlice(&lm).SelectAnyOf("type", "written_new")
|
||||||
|
|
||||||
|
regionHierarchy := map[string][]string{}
|
||||||
|
for _, g := range b.supp.TerritoryContainment.Group {
|
||||||
|
regions := strings.Split(g.Contains, " ")
|
||||||
|
regionHierarchy[g.Type] = append(regionHierarchy[g.Type], regions...)
|
||||||
|
}
|
||||||
|
regionToGroups := make([]uint8, language.NumRegions)
|
||||||
|
|
||||||
|
idToIndex := map[string]uint8{}
|
||||||
|
for i, mv := range lm[0].MatchVariable {
|
||||||
|
if i > 6 {
|
||||||
|
log.Fatalf("Too many groups: %d", i)
|
||||||
|
}
|
||||||
|
idToIndex[mv.Id] = uint8(i + 1)
|
||||||
|
// TODO: also handle '-'
|
||||||
|
for _, r := range strings.Split(mv.Value, "+") {
|
||||||
|
todo := []string{r}
|
||||||
|
for k := 0; k < len(todo); k++ {
|
||||||
|
r := todo[k]
|
||||||
|
regionToGroups[b.regionIndex(r)] |= 1 << uint8(i)
|
||||||
|
todo = append(todo, regionHierarchy[r]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.w.WriteVar("regionToGroups", regionToGroups)
|
||||||
|
|
||||||
|
// maps language id to in- and out-of-group region.
|
||||||
|
paradigmLocales := [][3]uint16{}
|
||||||
|
locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ")
|
||||||
|
for i := 0; i < len(locales); i += 2 {
|
||||||
|
x := [3]uint16{}
|
||||||
|
for j := 0; j < 2; j++ {
|
||||||
|
pc := strings.SplitN(locales[i+j], "-", 2)
|
||||||
|
x[0] = b.langIndex(pc[0])
|
||||||
|
if len(pc) == 2 {
|
||||||
|
x[1+j] = uint16(b.regionIndex(pc[1]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
paradigmLocales = append(paradigmLocales, x)
|
||||||
|
}
|
||||||
|
b.w.WriteVar("paradigmLocales", paradigmLocales)
|
||||||
|
|
||||||
|
b.w.WriteType(mutualIntelligibility{})
|
||||||
|
b.w.WriteType(scriptIntelligibility{})
|
||||||
|
b.w.WriteType(regionIntelligibility{})
|
||||||
|
|
||||||
|
matchLang := []mutualIntelligibility{}
|
||||||
|
matchScript := []scriptIntelligibility{}
|
||||||
|
matchRegion := []regionIntelligibility{}
|
||||||
|
// Convert the languageMatch entries in lists keyed by desired language.
|
||||||
|
for _, m := range lm[0].LanguageMatch {
|
||||||
|
// Different versions of CLDR use different separators.
|
||||||
|
desired := strings.Replace(m.Desired, "-", "_", -1)
|
||||||
|
supported := strings.Replace(m.Supported, "-", "_", -1)
|
||||||
|
d := strings.Split(desired, "_")
|
||||||
|
s := strings.Split(supported, "_")
|
||||||
|
if len(d) != len(s) {
|
||||||
|
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
distance, _ := strconv.ParseInt(m.Distance, 10, 8)
|
||||||
|
switch len(d) {
|
||||||
|
case 2:
|
||||||
|
if desired == supported && desired == "*_*" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// language-script pair.
|
||||||
|
matchScript = append(matchScript, scriptIntelligibility{
|
||||||
|
wantLang: uint16(b.langIndex(d[0])),
|
||||||
|
haveLang: uint16(b.langIndex(s[0])),
|
||||||
|
wantScript: uint8(b.scriptIndex(d[1])),
|
||||||
|
haveScript: uint8(b.scriptIndex(s[1])),
|
||||||
|
distance: uint8(distance),
|
||||||
|
})
|
||||||
|
if m.Oneway != "true" {
|
||||||
|
matchScript = append(matchScript, scriptIntelligibility{
|
||||||
|
wantLang: uint16(b.langIndex(s[0])),
|
||||||
|
haveLang: uint16(b.langIndex(d[0])),
|
||||||
|
wantScript: uint8(b.scriptIndex(s[1])),
|
||||||
|
haveScript: uint8(b.scriptIndex(d[1])),
|
||||||
|
distance: uint8(distance),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
if desired == supported && desired == "*" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if distance == 1 {
|
||||||
|
// nb == no is already handled by macro mapping. Check there
|
||||||
|
// really is only this case.
|
||||||
|
if d[0] != "no" || s[0] != "nb" {
|
||||||
|
log.Fatalf("unhandled equivalence %s == %s", s[0], d[0])
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// TODO: consider dropping oneway field and just doubling the entry.
|
||||||
|
matchLang = append(matchLang, mutualIntelligibility{
|
||||||
|
want: uint16(b.langIndex(d[0])),
|
||||||
|
have: uint16(b.langIndex(s[0])),
|
||||||
|
distance: uint8(distance),
|
||||||
|
oneway: m.Oneway == "true",
|
||||||
|
})
|
||||||
|
case 3:
|
||||||
|
if desired == supported && desired == "*_*_*" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if desired != supported {
|
||||||
|
// This is now supported by CLDR, but only one case, which
|
||||||
|
// should already be covered by paradigm locales. For instance,
|
||||||
|
// test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in
|
||||||
|
// testdata/CLDRLocaleMatcherTest.txt tests this.
|
||||||
|
if supported != "en_*_GB" {
|
||||||
|
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ri := regionIntelligibility{
|
||||||
|
lang: b.langIndex(d[0]),
|
||||||
|
distance: uint8(distance),
|
||||||
|
}
|
||||||
|
if d[1] != "*" {
|
||||||
|
ri.script = uint8(b.scriptIndex(d[1]))
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case d[2] == "*":
|
||||||
|
ri.group = 0x80 // not contained in anything
|
||||||
|
case strings.HasPrefix(d[2], "$!"):
|
||||||
|
ri.group = 0x80
|
||||||
|
d[2] = "$" + d[2][len("$!"):]
|
||||||
|
fallthrough
|
||||||
|
case strings.HasPrefix(d[2], "$"):
|
||||||
|
ri.group |= idToIndex[d[2]]
|
||||||
|
}
|
||||||
|
matchRegion = append(matchRegion, ri)
|
||||||
|
default:
|
||||||
|
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.SliceStable(matchLang, func(i, j int) bool {
|
||||||
|
return matchLang[i].distance < matchLang[j].distance
|
||||||
|
})
|
||||||
|
b.w.WriteComment(`
|
||||||
|
matchLang holds pairs of langIDs of base languages that are typically
|
||||||
|
mutually intelligible. Each pair is associated with a confidence and
|
||||||
|
whether the intelligibility goes one or both ways.`)
|
||||||
|
b.w.WriteVar("matchLang", matchLang)
|
||||||
|
|
||||||
|
b.w.WriteComment(`
|
||||||
|
matchScript holds pairs of scriptIDs where readers of one script
|
||||||
|
can typically also read the other. Each is associated with a confidence.`)
|
||||||
|
sort.SliceStable(matchScript, func(i, j int) bool {
|
||||||
|
return matchScript[i].distance < matchScript[j].distance
|
||||||
|
})
|
||||||
|
b.w.WriteVar("matchScript", matchScript)
|
||||||
|
|
||||||
|
sort.SliceStable(matchRegion, func(i, j int) bool {
|
||||||
|
return matchRegion[i].distance < matchRegion[j].distance
|
||||||
|
})
|
||||||
|
b.w.WriteVar("matchRegion", matchRegion)
|
||||||
|
}
|
162
vendor/golang.org/x/text/language/gen_index.go
generated
vendored
162
vendor/golang.org/x/text/language/gen_index.go
generated
vendored
@ -1,162 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
// This file generates derivative tables based on the language package itself.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/gen"
|
|
||||||
"golang.org/x/text/language"
|
|
||||||
"golang.org/x/text/unicode/cldr"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
test = flag.Bool("test", false,
|
|
||||||
"test existing tables; can be used to compare web data with package data.")
|
|
||||||
|
|
||||||
draft = flag.String("draft",
|
|
||||||
"contributed",
|
|
||||||
`Minimal draft requirements (approved, contributed, provisional, unconfirmed).`)
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
gen.Init()
|
|
||||||
|
|
||||||
// Read the CLDR zip file.
|
|
||||||
r := gen.OpenCLDRCoreZip()
|
|
||||||
defer r.Close()
|
|
||||||
|
|
||||||
d := &cldr.Decoder{}
|
|
||||||
data, err := d.DecodeZip(r)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("DecodeZip: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w := gen.NewCodeWriter()
|
|
||||||
defer func() {
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
|
|
||||||
if _, err = w.WriteGo(buf, "language"); err != nil {
|
|
||||||
log.Fatalf("Error formatting file index.go: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since we're generating a table for our own package we need to rewrite
|
|
||||||
// doing the equivalent of go fmt -r 'language.b -> b'. Using
|
|
||||||
// bytes.Replace will do.
|
|
||||||
out := bytes.Replace(buf.Bytes(), []byte("language."), nil, -1)
|
|
||||||
if err := ioutil.WriteFile("index.go", out, 0600); err != nil {
|
|
||||||
log.Fatalf("Could not create file index.go: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
m := map[language.Tag]bool{}
|
|
||||||
for _, lang := range data.Locales() {
|
|
||||||
// We include all locales unconditionally to be consistent with en_US.
|
|
||||||
// We want en_US, even though it has no data associated with it.
|
|
||||||
|
|
||||||
// TODO: put any of the languages for which no data exists at the end
|
|
||||||
// of the index. This allows all components based on ICU to use that
|
|
||||||
// as the cutoff point.
|
|
||||||
// if x := data.RawLDML(lang); false ||
|
|
||||||
// x.LocaleDisplayNames != nil ||
|
|
||||||
// x.Characters != nil ||
|
|
||||||
// x.Delimiters != nil ||
|
|
||||||
// x.Measurement != nil ||
|
|
||||||
// x.Dates != nil ||
|
|
||||||
// x.Numbers != nil ||
|
|
||||||
// x.Units != nil ||
|
|
||||||
// x.ListPatterns != nil ||
|
|
||||||
// x.Collations != nil ||
|
|
||||||
// x.Segmentations != nil ||
|
|
||||||
// x.Rbnf != nil ||
|
|
||||||
// x.Annotations != nil ||
|
|
||||||
// x.Metadata != nil {
|
|
||||||
|
|
||||||
// TODO: support POSIX natively, albeit non-standard.
|
|
||||||
tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1))
|
|
||||||
m[tag] = true
|
|
||||||
// }
|
|
||||||
}
|
|
||||||
// Include locales for plural rules, which uses a different structure.
|
|
||||||
for _, plurals := range data.Supplemental().Plurals {
|
|
||||||
for _, rules := range plurals.PluralRules {
|
|
||||||
for _, lang := range strings.Split(rules.Locales, " ") {
|
|
||||||
m[language.Make(lang)] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var core, special []language.Tag
|
|
||||||
|
|
||||||
for t := range m {
|
|
||||||
if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" {
|
|
||||||
log.Fatalf("Unexpected extension %v in %v", x, t)
|
|
||||||
}
|
|
||||||
if len(t.Variants()) == 0 && len(t.Extensions()) == 0 {
|
|
||||||
core = append(core, t)
|
|
||||||
} else {
|
|
||||||
special = append(special, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteComment(`
|
|
||||||
NumCompactTags is the number of common tags. The maximum tag is
|
|
||||||
NumCompactTags-1.`)
|
|
||||||
w.WriteConst("NumCompactTags", len(core)+len(special))
|
|
||||||
|
|
||||||
sort.Sort(byAlpha(special))
|
|
||||||
w.WriteVar("specialTags", special)
|
|
||||||
|
|
||||||
// TODO: order by frequency?
|
|
||||||
sort.Sort(byAlpha(core))
|
|
||||||
|
|
||||||
// Size computations are just an estimate.
|
|
||||||
w.Size += int(reflect.TypeOf(map[uint32]uint16{}).Size())
|
|
||||||
w.Size += len(core) * 6 // size of uint32 and uint16
|
|
||||||
|
|
||||||
fmt.Fprintln(w)
|
|
||||||
fmt.Fprintln(w, "var coreTags = map[uint32]uint16{")
|
|
||||||
fmt.Fprintln(w, "0x0: 0, // und")
|
|
||||||
i := len(special) + 1 // Und and special tags already written.
|
|
||||||
for _, t := range core {
|
|
||||||
if t == language.Und {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Fprint(w.Hash, t, i)
|
|
||||||
b, s, r := t.Raw()
|
|
||||||
fmt.Fprintf(w, "0x%s%s%s: %d, // %s\n",
|
|
||||||
getIndex(b, 3), // 3 is enough as it is guaranteed to be a compact number
|
|
||||||
getIndex(s, 2),
|
|
||||||
getIndex(r, 3),
|
|
||||||
i, t)
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, "}")
|
|
||||||
}
|
|
||||||
|
|
||||||
// getIndex prints the subtag type and extracts its index of size nibble.
|
|
||||||
// If the index is less than n nibbles, the result is prefixed with 0s.
|
|
||||||
func getIndex(x interface{}, n int) string {
|
|
||||||
s := fmt.Sprintf("%#v", x) // s is of form Type{typeID: 0x00}
|
|
||||||
s = s[strings.Index(s, "0x")+2 : len(s)-1]
|
|
||||||
return strings.Repeat("0", n-len(s)) + s
|
|
||||||
}
|
|
||||||
|
|
||||||
type byAlpha []language.Tag
|
|
||||||
|
|
||||||
func (a byAlpha) Len() int { return len(a) }
|
|
||||||
func (a byAlpha) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
||||||
func (a byAlpha) Less(i, j int) bool { return a[i].String() < a[j].String() }
|
|
767
vendor/golang.org/x/text/language/index.go
generated
vendored
767
vendor/golang.org/x/text/language/index.go
generated
vendored
@ -1,767 +0,0 @@
|
|||||||
// This file was generated by go generate; DO NOT EDIT
|
|
||||||
|
|
||||||
package language
|
|
||||||
|
|
||||||
// NumCompactTags is the number of common tags. The maximum tag is
|
|
||||||
// NumCompactTags-1.
|
|
||||||
const NumCompactTags = 752
|
|
||||||
|
|
||||||
var specialTags = []Tag{ // 2 elements
|
|
||||||
0: {lang: 0xd5, region: 0x6d, script: 0x0, pVariant: 0x5, pExt: 0xe, str: "ca-ES-valencia"},
|
|
||||||
1: {lang: 0x134, region: 0x134, script: 0x0, pVariant: 0x5, pExt: 0x5, str: "en-US-u-va-posix"},
|
|
||||||
} // Size: 72 bytes
|
|
||||||
|
|
||||||
var coreTags = map[uint32]uint16{
|
|
||||||
0x0: 0, // und
|
|
||||||
0x01500000: 3, // af
|
|
||||||
0x015000d1: 4, // af-NA
|
|
||||||
0x01500160: 5, // af-ZA
|
|
||||||
0x01b00000: 6, // agq
|
|
||||||
0x01b00051: 7, // agq-CM
|
|
||||||
0x02000000: 8, // ak
|
|
||||||
0x0200007f: 9, // ak-GH
|
|
||||||
0x02600000: 10, // am
|
|
||||||
0x0260006e: 11, // am-ET
|
|
||||||
0x03900000: 12, // ar
|
|
||||||
0x03900001: 13, // ar-001
|
|
||||||
0x03900022: 14, // ar-AE
|
|
||||||
0x03900038: 15, // ar-BH
|
|
||||||
0x03900061: 16, // ar-DJ
|
|
||||||
0x03900066: 17, // ar-DZ
|
|
||||||
0x0390006a: 18, // ar-EG
|
|
||||||
0x0390006b: 19, // ar-EH
|
|
||||||
0x0390006c: 20, // ar-ER
|
|
||||||
0x03900096: 21, // ar-IL
|
|
||||||
0x0390009a: 22, // ar-IQ
|
|
||||||
0x039000a0: 23, // ar-JO
|
|
||||||
0x039000a7: 24, // ar-KM
|
|
||||||
0x039000ab: 25, // ar-KW
|
|
||||||
0x039000af: 26, // ar-LB
|
|
||||||
0x039000b8: 27, // ar-LY
|
|
||||||
0x039000b9: 28, // ar-MA
|
|
||||||
0x039000c8: 29, // ar-MR
|
|
||||||
0x039000e0: 30, // ar-OM
|
|
||||||
0x039000ec: 31, // ar-PS
|
|
||||||
0x039000f2: 32, // ar-QA
|
|
||||||
0x03900107: 33, // ar-SA
|
|
||||||
0x0390010a: 34, // ar-SD
|
|
||||||
0x03900114: 35, // ar-SO
|
|
||||||
0x03900116: 36, // ar-SS
|
|
||||||
0x0390011b: 37, // ar-SY
|
|
||||||
0x0390011f: 38, // ar-TD
|
|
||||||
0x03900127: 39, // ar-TN
|
|
||||||
0x0390015d: 40, // ar-YE
|
|
||||||
0x03f00000: 41, // ars
|
|
||||||
0x04200000: 42, // as
|
|
||||||
0x04200098: 43, // as-IN
|
|
||||||
0x04300000: 44, // asa
|
|
||||||
0x0430012e: 45, // asa-TZ
|
|
||||||
0x04700000: 46, // ast
|
|
||||||
0x0470006d: 47, // ast-ES
|
|
||||||
0x05700000: 48, // az
|
|
||||||
0x0571e000: 49, // az-Cyrl
|
|
||||||
0x0571e031: 50, // az-Cyrl-AZ
|
|
||||||
0x05752000: 51, // az-Latn
|
|
||||||
0x05752031: 52, // az-Latn-AZ
|
|
||||||
0x05d00000: 53, // bas
|
|
||||||
0x05d00051: 54, // bas-CM
|
|
||||||
0x07000000: 55, // be
|
|
||||||
0x07000046: 56, // be-BY
|
|
||||||
0x07400000: 57, // bem
|
|
||||||
0x07400161: 58, // bem-ZM
|
|
||||||
0x07800000: 59, // bez
|
|
||||||
0x0780012e: 60, // bez-TZ
|
|
||||||
0x07d00000: 61, // bg
|
|
||||||
0x07d00037: 62, // bg-BG
|
|
||||||
0x08100000: 63, // bh
|
|
||||||
0x09e00000: 64, // bm
|
|
||||||
0x09e000c2: 65, // bm-ML
|
|
||||||
0x0a300000: 66, // bn
|
|
||||||
0x0a300034: 67, // bn-BD
|
|
||||||
0x0a300098: 68, // bn-IN
|
|
||||||
0x0a700000: 69, // bo
|
|
||||||
0x0a700052: 70, // bo-CN
|
|
||||||
0x0a700098: 71, // bo-IN
|
|
||||||
0x0b000000: 72, // br
|
|
||||||
0x0b000077: 73, // br-FR
|
|
||||||
0x0b300000: 74, // brx
|
|
||||||
0x0b300098: 75, // brx-IN
|
|
||||||
0x0b500000: 76, // bs
|
|
||||||
0x0b51e000: 77, // bs-Cyrl
|
|
||||||
0x0b51e032: 78, // bs-Cyrl-BA
|
|
||||||
0x0b552000: 79, // bs-Latn
|
|
||||||
0x0b552032: 80, // bs-Latn-BA
|
|
||||||
0x0d500000: 81, // ca
|
|
||||||
0x0d500021: 82, // ca-AD
|
|
||||||
0x0d50006d: 83, // ca-ES
|
|
||||||
0x0d500077: 84, // ca-FR
|
|
||||||
0x0d50009d: 85, // ca-IT
|
|
||||||
0x0da00000: 86, // ce
|
|
||||||
0x0da00105: 87, // ce-RU
|
|
||||||
0x0dd00000: 88, // cgg
|
|
||||||
0x0dd00130: 89, // cgg-UG
|
|
||||||
0x0e300000: 90, // chr
|
|
||||||
0x0e300134: 91, // chr-US
|
|
||||||
0x0e700000: 92, // ckb
|
|
||||||
0x0e70009a: 93, // ckb-IQ
|
|
||||||
0x0e70009b: 94, // ckb-IR
|
|
||||||
0x0f600000: 95, // cs
|
|
||||||
0x0f60005d: 96, // cs-CZ
|
|
||||||
0x0fa00000: 97, // cu
|
|
||||||
0x0fa00105: 98, // cu-RU
|
|
||||||
0x0fc00000: 99, // cy
|
|
||||||
0x0fc0007a: 100, // cy-GB
|
|
||||||
0x0fd00000: 101, // da
|
|
||||||
0x0fd00062: 102, // da-DK
|
|
||||||
0x0fd00081: 103, // da-GL
|
|
||||||
0x10400000: 104, // dav
|
|
||||||
0x104000a3: 105, // dav-KE
|
|
||||||
0x10900000: 106, // de
|
|
||||||
0x1090002d: 107, // de-AT
|
|
||||||
0x10900035: 108, // de-BE
|
|
||||||
0x1090004d: 109, // de-CH
|
|
||||||
0x1090005f: 110, // de-DE
|
|
||||||
0x1090009d: 111, // de-IT
|
|
||||||
0x109000b1: 112, // de-LI
|
|
||||||
0x109000b6: 113, // de-LU
|
|
||||||
0x11300000: 114, // dje
|
|
||||||
0x113000d3: 115, // dje-NE
|
|
||||||
0x11b00000: 116, // dsb
|
|
||||||
0x11b0005f: 117, // dsb-DE
|
|
||||||
0x12000000: 118, // dua
|
|
||||||
0x12000051: 119, // dua-CM
|
|
||||||
0x12400000: 120, // dv
|
|
||||||
0x12700000: 121, // dyo
|
|
||||||
0x12700113: 122, // dyo-SN
|
|
||||||
0x12900000: 123, // dz
|
|
||||||
0x12900042: 124, // dz-BT
|
|
||||||
0x12b00000: 125, // ebu
|
|
||||||
0x12b000a3: 126, // ebu-KE
|
|
||||||
0x12c00000: 127, // ee
|
|
||||||
0x12c0007f: 128, // ee-GH
|
|
||||||
0x12c00121: 129, // ee-TG
|
|
||||||
0x13100000: 130, // el
|
|
||||||
0x1310005c: 131, // el-CY
|
|
||||||
0x13100086: 132, // el-GR
|
|
||||||
0x13400000: 133, // en
|
|
||||||
0x13400001: 134, // en-001
|
|
||||||
0x1340001a: 135, // en-150
|
|
||||||
0x13400024: 136, // en-AG
|
|
||||||
0x13400025: 137, // en-AI
|
|
||||||
0x1340002c: 138, // en-AS
|
|
||||||
0x1340002d: 139, // en-AT
|
|
||||||
0x1340002e: 140, // en-AU
|
|
||||||
0x13400033: 141, // en-BB
|
|
||||||
0x13400035: 142, // en-BE
|
|
||||||
0x13400039: 143, // en-BI
|
|
||||||
0x1340003c: 144, // en-BM
|
|
||||||
0x13400041: 145, // en-BS
|
|
||||||
0x13400045: 146, // en-BW
|
|
||||||
0x13400047: 147, // en-BZ
|
|
||||||
0x13400048: 148, // en-CA
|
|
||||||
0x13400049: 149, // en-CC
|
|
||||||
0x1340004d: 150, // en-CH
|
|
||||||
0x1340004f: 151, // en-CK
|
|
||||||
0x13400051: 152, // en-CM
|
|
||||||
0x1340005b: 153, // en-CX
|
|
||||||
0x1340005c: 154, // en-CY
|
|
||||||
0x1340005f: 155, // en-DE
|
|
||||||
0x13400060: 156, // en-DG
|
|
||||||
0x13400062: 157, // en-DK
|
|
||||||
0x13400063: 158, // en-DM
|
|
||||||
0x1340006c: 159, // en-ER
|
|
||||||
0x13400071: 160, // en-FI
|
|
||||||
0x13400072: 161, // en-FJ
|
|
||||||
0x13400073: 162, // en-FK
|
|
||||||
0x13400074: 163, // en-FM
|
|
||||||
0x1340007a: 164, // en-GB
|
|
||||||
0x1340007b: 165, // en-GD
|
|
||||||
0x1340007e: 166, // en-GG
|
|
||||||
0x1340007f: 167, // en-GH
|
|
||||||
0x13400080: 168, // en-GI
|
|
||||||
0x13400082: 169, // en-GM
|
|
||||||
0x13400089: 170, // en-GU
|
|
||||||
0x1340008b: 171, // en-GY
|
|
||||||
0x1340008c: 172, // en-HK
|
|
||||||
0x13400095: 173, // en-IE
|
|
||||||
0x13400096: 174, // en-IL
|
|
||||||
0x13400097: 175, // en-IM
|
|
||||||
0x13400098: 176, // en-IN
|
|
||||||
0x13400099: 177, // en-IO
|
|
||||||
0x1340009e: 178, // en-JE
|
|
||||||
0x1340009f: 179, // en-JM
|
|
||||||
0x134000a3: 180, // en-KE
|
|
||||||
0x134000a6: 181, // en-KI
|
|
||||||
0x134000a8: 182, // en-KN
|
|
||||||
0x134000ac: 183, // en-KY
|
|
||||||
0x134000b0: 184, // en-LC
|
|
||||||
0x134000b3: 185, // en-LR
|
|
||||||
0x134000b4: 186, // en-LS
|
|
||||||
0x134000be: 187, // en-MG
|
|
||||||
0x134000bf: 188, // en-MH
|
|
||||||
0x134000c5: 189, // en-MO
|
|
||||||
0x134000c6: 190, // en-MP
|
|
||||||
0x134000c9: 191, // en-MS
|
|
||||||
0x134000ca: 192, // en-MT
|
|
||||||
0x134000cb: 193, // en-MU
|
|
||||||
0x134000cd: 194, // en-MW
|
|
||||||
0x134000cf: 195, // en-MY
|
|
||||||
0x134000d1: 196, // en-NA
|
|
||||||
0x134000d4: 197, // en-NF
|
|
||||||
0x134000d5: 198, // en-NG
|
|
||||||
0x134000d8: 199, // en-NL
|
|
||||||
0x134000dc: 200, // en-NR
|
|
||||||
0x134000de: 201, // en-NU
|
|
||||||
0x134000df: 202, // en-NZ
|
|
||||||
0x134000e5: 203, // en-PG
|
|
||||||
0x134000e6: 204, // en-PH
|
|
||||||
0x134000e7: 205, // en-PK
|
|
||||||
0x134000ea: 206, // en-PN
|
|
||||||
0x134000eb: 207, // en-PR
|
|
||||||
0x134000ef: 208, // en-PW
|
|
||||||
0x13400106: 209, // en-RW
|
|
||||||
0x13400108: 210, // en-SB
|
|
||||||
0x13400109: 211, // en-SC
|
|
||||||
0x1340010a: 212, // en-SD
|
|
||||||
0x1340010b: 213, // en-SE
|
|
||||||
0x1340010c: 214, // en-SG
|
|
||||||
0x1340010d: 215, // en-SH
|
|
||||||
0x1340010e: 216, // en-SI
|
|
||||||
0x13400111: 217, // en-SL
|
|
||||||
0x13400116: 218, // en-SS
|
|
||||||
0x1340011a: 219, // en-SX
|
|
||||||
0x1340011c: 220, // en-SZ
|
|
||||||
0x1340011e: 221, // en-TC
|
|
||||||
0x13400124: 222, // en-TK
|
|
||||||
0x13400128: 223, // en-TO
|
|
||||||
0x1340012b: 224, // en-TT
|
|
||||||
0x1340012c: 225, // en-TV
|
|
||||||
0x1340012e: 226, // en-TZ
|
|
||||||
0x13400130: 227, // en-UG
|
|
||||||
0x13400132: 228, // en-UM
|
|
||||||
0x13400134: 229, // en-US
|
|
||||||
0x13400138: 230, // en-VC
|
|
||||||
0x1340013b: 231, // en-VG
|
|
||||||
0x1340013c: 232, // en-VI
|
|
||||||
0x1340013e: 233, // en-VU
|
|
||||||
0x13400141: 234, // en-WS
|
|
||||||
0x13400160: 235, // en-ZA
|
|
||||||
0x13400161: 236, // en-ZM
|
|
||||||
0x13400163: 237, // en-ZW
|
|
||||||
0x13700000: 238, // eo
|
|
||||||
0x13700001: 239, // eo-001
|
|
||||||
0x13900000: 240, // es
|
|
||||||
0x1390001e: 241, // es-419
|
|
||||||
0x1390002b: 242, // es-AR
|
|
||||||
0x1390003e: 243, // es-BO
|
|
||||||
0x13900040: 244, // es-BR
|
|
||||||
0x13900050: 245, // es-CL
|
|
||||||
0x13900053: 246, // es-CO
|
|
||||||
0x13900055: 247, // es-CR
|
|
||||||
0x13900058: 248, // es-CU
|
|
||||||
0x13900064: 249, // es-DO
|
|
||||||
0x13900067: 250, // es-EA
|
|
||||||
0x13900068: 251, // es-EC
|
|
||||||
0x1390006d: 252, // es-ES
|
|
||||||
0x13900085: 253, // es-GQ
|
|
||||||
0x13900088: 254, // es-GT
|
|
||||||
0x1390008e: 255, // es-HN
|
|
||||||
0x13900093: 256, // es-IC
|
|
||||||
0x139000ce: 257, // es-MX
|
|
||||||
0x139000d7: 258, // es-NI
|
|
||||||
0x139000e1: 259, // es-PA
|
|
||||||
0x139000e3: 260, // es-PE
|
|
||||||
0x139000e6: 261, // es-PH
|
|
||||||
0x139000eb: 262, // es-PR
|
|
||||||
0x139000f0: 263, // es-PY
|
|
||||||
0x13900119: 264, // es-SV
|
|
||||||
0x13900134: 265, // es-US
|
|
||||||
0x13900135: 266, // es-UY
|
|
||||||
0x1390013a: 267, // es-VE
|
|
||||||
0x13b00000: 268, // et
|
|
||||||
0x13b00069: 269, // et-EE
|
|
||||||
0x14000000: 270, // eu
|
|
||||||
0x1400006d: 271, // eu-ES
|
|
||||||
0x14100000: 272, // ewo
|
|
||||||
0x14100051: 273, // ewo-CM
|
|
||||||
0x14300000: 274, // fa
|
|
||||||
0x14300023: 275, // fa-AF
|
|
||||||
0x1430009b: 276, // fa-IR
|
|
||||||
0x14900000: 277, // ff
|
|
||||||
0x14900051: 278, // ff-CM
|
|
||||||
0x14900083: 279, // ff-GN
|
|
||||||
0x149000c8: 280, // ff-MR
|
|
||||||
0x14900113: 281, // ff-SN
|
|
||||||
0x14c00000: 282, // fi
|
|
||||||
0x14c00071: 283, // fi-FI
|
|
||||||
0x14e00000: 284, // fil
|
|
||||||
0x14e000e6: 285, // fil-PH
|
|
||||||
0x15300000: 286, // fo
|
|
||||||
0x15300062: 287, // fo-DK
|
|
||||||
0x15300075: 288, // fo-FO
|
|
||||||
0x15900000: 289, // fr
|
|
||||||
0x15900035: 290, // fr-BE
|
|
||||||
0x15900036: 291, // fr-BF
|
|
||||||
0x15900039: 292, // fr-BI
|
|
||||||
0x1590003a: 293, // fr-BJ
|
|
||||||
0x1590003b: 294, // fr-BL
|
|
||||||
0x15900048: 295, // fr-CA
|
|
||||||
0x1590004a: 296, // fr-CD
|
|
||||||
0x1590004b: 297, // fr-CF
|
|
||||||
0x1590004c: 298, // fr-CG
|
|
||||||
0x1590004d: 299, // fr-CH
|
|
||||||
0x1590004e: 300, // fr-CI
|
|
||||||
0x15900051: 301, // fr-CM
|
|
||||||
0x15900061: 302, // fr-DJ
|
|
||||||
0x15900066: 303, // fr-DZ
|
|
||||||
0x15900077: 304, // fr-FR
|
|
||||||
0x15900079: 305, // fr-GA
|
|
||||||
0x1590007d: 306, // fr-GF
|
|
||||||
0x15900083: 307, // fr-GN
|
|
||||||
0x15900084: 308, // fr-GP
|
|
||||||
0x15900085: 309, // fr-GQ
|
|
||||||
0x15900090: 310, // fr-HT
|
|
||||||
0x159000a7: 311, // fr-KM
|
|
||||||
0x159000b6: 312, // fr-LU
|
|
||||||
0x159000b9: 313, // fr-MA
|
|
||||||
0x159000ba: 314, // fr-MC
|
|
||||||
0x159000bd: 315, // fr-MF
|
|
||||||
0x159000be: 316, // fr-MG
|
|
||||||
0x159000c2: 317, // fr-ML
|
|
||||||
0x159000c7: 318, // fr-MQ
|
|
||||||
0x159000c8: 319, // fr-MR
|
|
||||||
0x159000cb: 320, // fr-MU
|
|
||||||
0x159000d2: 321, // fr-NC
|
|
||||||
0x159000d3: 322, // fr-NE
|
|
||||||
0x159000e4: 323, // fr-PF
|
|
||||||
0x159000e9: 324, // fr-PM
|
|
||||||
0x15900101: 325, // fr-RE
|
|
||||||
0x15900106: 326, // fr-RW
|
|
||||||
0x15900109: 327, // fr-SC
|
|
||||||
0x15900113: 328, // fr-SN
|
|
||||||
0x1590011b: 329, // fr-SY
|
|
||||||
0x1590011f: 330, // fr-TD
|
|
||||||
0x15900121: 331, // fr-TG
|
|
||||||
0x15900127: 332, // fr-TN
|
|
||||||
0x1590013e: 333, // fr-VU
|
|
||||||
0x1590013f: 334, // fr-WF
|
|
||||||
0x1590015e: 335, // fr-YT
|
|
||||||
0x16400000: 336, // fur
|
|
||||||
0x1640009d: 337, // fur-IT
|
|
||||||
0x16800000: 338, // fy
|
|
||||||
0x168000d8: 339, // fy-NL
|
|
||||||
0x16900000: 340, // ga
|
|
||||||
0x16900095: 341, // ga-IE
|
|
||||||
0x17800000: 342, // gd
|
|
||||||
0x1780007a: 343, // gd-GB
|
|
||||||
0x18a00000: 344, // gl
|
|
||||||
0x18a0006d: 345, // gl-ES
|
|
||||||
0x19c00000: 346, // gsw
|
|
||||||
0x19c0004d: 347, // gsw-CH
|
|
||||||
0x19c00077: 348, // gsw-FR
|
|
||||||
0x19c000b1: 349, // gsw-LI
|
|
||||||
0x19d00000: 350, // gu
|
|
||||||
0x19d00098: 351, // gu-IN
|
|
||||||
0x1a200000: 352, // guw
|
|
||||||
0x1a400000: 353, // guz
|
|
||||||
0x1a4000a3: 354, // guz-KE
|
|
||||||
0x1a500000: 355, // gv
|
|
||||||
0x1a500097: 356, // gv-IM
|
|
||||||
0x1ad00000: 357, // ha
|
|
||||||
0x1ad0007f: 358, // ha-GH
|
|
||||||
0x1ad000d3: 359, // ha-NE
|
|
||||||
0x1ad000d5: 360, // ha-NG
|
|
||||||
0x1b100000: 361, // haw
|
|
||||||
0x1b100134: 362, // haw-US
|
|
||||||
0x1b500000: 363, // he
|
|
||||||
0x1b500096: 364, // he-IL
|
|
||||||
0x1b700000: 365, // hi
|
|
||||||
0x1b700098: 366, // hi-IN
|
|
||||||
0x1ca00000: 367, // hr
|
|
||||||
0x1ca00032: 368, // hr-BA
|
|
||||||
0x1ca0008f: 369, // hr-HR
|
|
||||||
0x1cb00000: 370, // hsb
|
|
||||||
0x1cb0005f: 371, // hsb-DE
|
|
||||||
0x1ce00000: 372, // hu
|
|
||||||
0x1ce00091: 373, // hu-HU
|
|
||||||
0x1d000000: 374, // hy
|
|
||||||
0x1d000027: 375, // hy-AM
|
|
||||||
0x1da00000: 376, // id
|
|
||||||
0x1da00094: 377, // id-ID
|
|
||||||
0x1df00000: 378, // ig
|
|
||||||
0x1df000d5: 379, // ig-NG
|
|
||||||
0x1e200000: 380, // ii
|
|
||||||
0x1e200052: 381, // ii-CN
|
|
||||||
0x1f000000: 382, // is
|
|
||||||
0x1f00009c: 383, // is-IS
|
|
||||||
0x1f100000: 384, // it
|
|
||||||
0x1f10004d: 385, // it-CH
|
|
||||||
0x1f10009d: 386, // it-IT
|
|
||||||
0x1f100112: 387, // it-SM
|
|
||||||
0x1f200000: 388, // iu
|
|
||||||
0x1f800000: 389, // ja
|
|
||||||
0x1f8000a1: 390, // ja-JP
|
|
||||||
0x1fb00000: 391, // jbo
|
|
||||||
0x1ff00000: 392, // jgo
|
|
||||||
0x1ff00051: 393, // jgo-CM
|
|
||||||
0x20200000: 394, // jmc
|
|
||||||
0x2020012e: 395, // jmc-TZ
|
|
||||||
0x20600000: 396, // jv
|
|
||||||
0x20800000: 397, // ka
|
|
||||||
0x2080007c: 398, // ka-GE
|
|
||||||
0x20a00000: 399, // kab
|
|
||||||
0x20a00066: 400, // kab-DZ
|
|
||||||
0x20e00000: 401, // kaj
|
|
||||||
0x20f00000: 402, // kam
|
|
||||||
0x20f000a3: 403, // kam-KE
|
|
||||||
0x21700000: 404, // kcg
|
|
||||||
0x21b00000: 405, // kde
|
|
||||||
0x21b0012e: 406, // kde-TZ
|
|
||||||
0x21f00000: 407, // kea
|
|
||||||
0x21f00059: 408, // kea-CV
|
|
||||||
0x22c00000: 409, // khq
|
|
||||||
0x22c000c2: 410, // khq-ML
|
|
||||||
0x23100000: 411, // ki
|
|
||||||
0x231000a3: 412, // ki-KE
|
|
||||||
0x23a00000: 413, // kk
|
|
||||||
0x23a000ad: 414, // kk-KZ
|
|
||||||
0x23c00000: 415, // kkj
|
|
||||||
0x23c00051: 416, // kkj-CM
|
|
||||||
0x23d00000: 417, // kl
|
|
||||||
0x23d00081: 418, // kl-GL
|
|
||||||
0x23e00000: 419, // kln
|
|
||||||
0x23e000a3: 420, // kln-KE
|
|
||||||
0x24200000: 421, // km
|
|
||||||
0x242000a5: 422, // km-KH
|
|
||||||
0x24900000: 423, // kn
|
|
||||||
0x24900098: 424, // kn-IN
|
|
||||||
0x24b00000: 425, // ko
|
|
||||||
0x24b000a9: 426, // ko-KP
|
|
||||||
0x24b000aa: 427, // ko-KR
|
|
||||||
0x24d00000: 428, // kok
|
|
||||||
0x24d00098: 429, // kok-IN
|
|
||||||
0x26100000: 430, // ks
|
|
||||||
0x26100098: 431, // ks-IN
|
|
||||||
0x26200000: 432, // ksb
|
|
||||||
0x2620012e: 433, // ksb-TZ
|
|
||||||
0x26400000: 434, // ksf
|
|
||||||
0x26400051: 435, // ksf-CM
|
|
||||||
0x26500000: 436, // ksh
|
|
||||||
0x2650005f: 437, // ksh-DE
|
|
||||||
0x26b00000: 438, // ku
|
|
||||||
0x27800000: 439, // kw
|
|
||||||
0x2780007a: 440, // kw-GB
|
|
||||||
0x28100000: 441, // ky
|
|
||||||
0x281000a4: 442, // ky-KG
|
|
||||||
0x28800000: 443, // lag
|
|
||||||
0x2880012e: 444, // lag-TZ
|
|
||||||
0x28c00000: 445, // lb
|
|
||||||
0x28c000b6: 446, // lb-LU
|
|
||||||
0x29a00000: 447, // lg
|
|
||||||
0x29a00130: 448, // lg-UG
|
|
||||||
0x2a600000: 449, // lkt
|
|
||||||
0x2a600134: 450, // lkt-US
|
|
||||||
0x2ac00000: 451, // ln
|
|
||||||
0x2ac00029: 452, // ln-AO
|
|
||||||
0x2ac0004a: 453, // ln-CD
|
|
||||||
0x2ac0004b: 454, // ln-CF
|
|
||||||
0x2ac0004c: 455, // ln-CG
|
|
||||||
0x2af00000: 456, // lo
|
|
||||||
0x2af000ae: 457, // lo-LA
|
|
||||||
0x2b600000: 458, // lrc
|
|
||||||
0x2b60009a: 459, // lrc-IQ
|
|
||||||
0x2b60009b: 460, // lrc-IR
|
|
||||||
0x2b700000: 461, // lt
|
|
||||||
0x2b7000b5: 462, // lt-LT
|
|
||||||
0x2b900000: 463, // lu
|
|
||||||
0x2b90004a: 464, // lu-CD
|
|
||||||
0x2bb00000: 465, // luo
|
|
||||||
0x2bb000a3: 466, // luo-KE
|
|
||||||
0x2bc00000: 467, // luy
|
|
||||||
0x2bc000a3: 468, // luy-KE
|
|
||||||
0x2be00000: 469, // lv
|
|
||||||
0x2be000b7: 470, // lv-LV
|
|
||||||
0x2c800000: 471, // mas
|
|
||||||
0x2c8000a3: 472, // mas-KE
|
|
||||||
0x2c80012e: 473, // mas-TZ
|
|
||||||
0x2e000000: 474, // mer
|
|
||||||
0x2e0000a3: 475, // mer-KE
|
|
||||||
0x2e400000: 476, // mfe
|
|
||||||
0x2e4000cb: 477, // mfe-MU
|
|
||||||
0x2e800000: 478, // mg
|
|
||||||
0x2e8000be: 479, // mg-MG
|
|
||||||
0x2e900000: 480, // mgh
|
|
||||||
0x2e9000d0: 481, // mgh-MZ
|
|
||||||
0x2eb00000: 482, // mgo
|
|
||||||
0x2eb00051: 483, // mgo-CM
|
|
||||||
0x2f600000: 484, // mk
|
|
||||||
0x2f6000c1: 485, // mk-MK
|
|
||||||
0x2fb00000: 486, // ml
|
|
||||||
0x2fb00098: 487, // ml-IN
|
|
||||||
0x30200000: 488, // mn
|
|
||||||
0x302000c4: 489, // mn-MN
|
|
||||||
0x31200000: 490, // mr
|
|
||||||
0x31200098: 491, // mr-IN
|
|
||||||
0x31600000: 492, // ms
|
|
||||||
0x3160003d: 493, // ms-BN
|
|
||||||
0x316000cf: 494, // ms-MY
|
|
||||||
0x3160010c: 495, // ms-SG
|
|
||||||
0x31700000: 496, // mt
|
|
||||||
0x317000ca: 497, // mt-MT
|
|
||||||
0x31c00000: 498, // mua
|
|
||||||
0x31c00051: 499, // mua-CM
|
|
||||||
0x32800000: 500, // my
|
|
||||||
0x328000c3: 501, // my-MM
|
|
||||||
0x33100000: 502, // mzn
|
|
||||||
0x3310009b: 503, // mzn-IR
|
|
||||||
0x33800000: 504, // nah
|
|
||||||
0x33c00000: 505, // naq
|
|
||||||
0x33c000d1: 506, // naq-NA
|
|
||||||
0x33e00000: 507, // nb
|
|
||||||
0x33e000d9: 508, // nb-NO
|
|
||||||
0x33e0010f: 509, // nb-SJ
|
|
||||||
0x34500000: 510, // nd
|
|
||||||
0x34500163: 511, // nd-ZW
|
|
||||||
0x34700000: 512, // nds
|
|
||||||
0x3470005f: 513, // nds-DE
|
|
||||||
0x347000d8: 514, // nds-NL
|
|
||||||
0x34800000: 515, // ne
|
|
||||||
0x34800098: 516, // ne-IN
|
|
||||||
0x348000da: 517, // ne-NP
|
|
||||||
0x35e00000: 518, // nl
|
|
||||||
0x35e0002f: 519, // nl-AW
|
|
||||||
0x35e00035: 520, // nl-BE
|
|
||||||
0x35e0003f: 521, // nl-BQ
|
|
||||||
0x35e0005a: 522, // nl-CW
|
|
||||||
0x35e000d8: 523, // nl-NL
|
|
||||||
0x35e00115: 524, // nl-SR
|
|
||||||
0x35e0011a: 525, // nl-SX
|
|
||||||
0x35f00000: 526, // nmg
|
|
||||||
0x35f00051: 527, // nmg-CM
|
|
||||||
0x36100000: 528, // nn
|
|
||||||
0x361000d9: 529, // nn-NO
|
|
||||||
0x36300000: 530, // nnh
|
|
||||||
0x36300051: 531, // nnh-CM
|
|
||||||
0x36600000: 532, // no
|
|
||||||
0x36c00000: 533, // nqo
|
|
||||||
0x36d00000: 534, // nr
|
|
||||||
0x37100000: 535, // nso
|
|
||||||
0x37700000: 536, // nus
|
|
||||||
0x37700116: 537, // nus-SS
|
|
||||||
0x37e00000: 538, // ny
|
|
||||||
0x38000000: 539, // nyn
|
|
||||||
0x38000130: 540, // nyn-UG
|
|
||||||
0x38700000: 541, // om
|
|
||||||
0x3870006e: 542, // om-ET
|
|
||||||
0x387000a3: 543, // om-KE
|
|
||||||
0x38c00000: 544, // or
|
|
||||||
0x38c00098: 545, // or-IN
|
|
||||||
0x38f00000: 546, // os
|
|
||||||
0x38f0007c: 547, // os-GE
|
|
||||||
0x38f00105: 548, // os-RU
|
|
||||||
0x39400000: 549, // pa
|
|
||||||
0x39405000: 550, // pa-Arab
|
|
||||||
0x394050e7: 551, // pa-Arab-PK
|
|
||||||
0x3942f000: 552, // pa-Guru
|
|
||||||
0x3942f098: 553, // pa-Guru-IN
|
|
||||||
0x39800000: 554, // pap
|
|
||||||
0x3aa00000: 555, // pl
|
|
||||||
0x3aa000e8: 556, // pl-PL
|
|
||||||
0x3b400000: 557, // prg
|
|
||||||
0x3b400001: 558, // prg-001
|
|
||||||
0x3b500000: 559, // ps
|
|
||||||
0x3b500023: 560, // ps-AF
|
|
||||||
0x3b700000: 561, // pt
|
|
||||||
0x3b700029: 562, // pt-AO
|
|
||||||
0x3b700040: 563, // pt-BR
|
|
||||||
0x3b70004d: 564, // pt-CH
|
|
||||||
0x3b700059: 565, // pt-CV
|
|
||||||
0x3b700085: 566, // pt-GQ
|
|
||||||
0x3b70008a: 567, // pt-GW
|
|
||||||
0x3b7000b6: 568, // pt-LU
|
|
||||||
0x3b7000c5: 569, // pt-MO
|
|
||||||
0x3b7000d0: 570, // pt-MZ
|
|
||||||
0x3b7000ed: 571, // pt-PT
|
|
||||||
0x3b700117: 572, // pt-ST
|
|
||||||
0x3b700125: 573, // pt-TL
|
|
||||||
0x3bb00000: 574, // qu
|
|
||||||
0x3bb0003e: 575, // qu-BO
|
|
||||||
0x3bb00068: 576, // qu-EC
|
|
||||||
0x3bb000e3: 577, // qu-PE
|
|
||||||
0x3cb00000: 578, // rm
|
|
||||||
0x3cb0004d: 579, // rm-CH
|
|
||||||
0x3d000000: 580, // rn
|
|
||||||
0x3d000039: 581, // rn-BI
|
|
||||||
0x3d300000: 582, // ro
|
|
||||||
0x3d3000bb: 583, // ro-MD
|
|
||||||
0x3d300103: 584, // ro-RO
|
|
||||||
0x3d500000: 585, // rof
|
|
||||||
0x3d50012e: 586, // rof-TZ
|
|
||||||
0x3d900000: 587, // ru
|
|
||||||
0x3d900046: 588, // ru-BY
|
|
||||||
0x3d9000a4: 589, // ru-KG
|
|
||||||
0x3d9000ad: 590, // ru-KZ
|
|
||||||
0x3d9000bb: 591, // ru-MD
|
|
||||||
0x3d900105: 592, // ru-RU
|
|
||||||
0x3d90012f: 593, // ru-UA
|
|
||||||
0x3dc00000: 594, // rw
|
|
||||||
0x3dc00106: 595, // rw-RW
|
|
||||||
0x3dd00000: 596, // rwk
|
|
||||||
0x3dd0012e: 597, // rwk-TZ
|
|
||||||
0x3e200000: 598, // sah
|
|
||||||
0x3e200105: 599, // sah-RU
|
|
||||||
0x3e300000: 600, // saq
|
|
||||||
0x3e3000a3: 601, // saq-KE
|
|
||||||
0x3e900000: 602, // sbp
|
|
||||||
0x3e90012e: 603, // sbp-TZ
|
|
||||||
0x3f200000: 604, // sdh
|
|
||||||
0x3f300000: 605, // se
|
|
||||||
0x3f300071: 606, // se-FI
|
|
||||||
0x3f3000d9: 607, // se-NO
|
|
||||||
0x3f30010b: 608, // se-SE
|
|
||||||
0x3f500000: 609, // seh
|
|
||||||
0x3f5000d0: 610, // seh-MZ
|
|
||||||
0x3f700000: 611, // ses
|
|
||||||
0x3f7000c2: 612, // ses-ML
|
|
||||||
0x3f800000: 613, // sg
|
|
||||||
0x3f80004b: 614, // sg-CF
|
|
||||||
0x3fe00000: 615, // shi
|
|
||||||
0x3fe52000: 616, // shi-Latn
|
|
||||||
0x3fe520b9: 617, // shi-Latn-MA
|
|
||||||
0x3fed2000: 618, // shi-Tfng
|
|
||||||
0x3fed20b9: 619, // shi-Tfng-MA
|
|
||||||
0x40200000: 620, // si
|
|
||||||
0x402000b2: 621, // si-LK
|
|
||||||
0x40800000: 622, // sk
|
|
||||||
0x40800110: 623, // sk-SK
|
|
||||||
0x40c00000: 624, // sl
|
|
||||||
0x40c0010e: 625, // sl-SI
|
|
||||||
0x41200000: 626, // sma
|
|
||||||
0x41300000: 627, // smi
|
|
||||||
0x41400000: 628, // smj
|
|
||||||
0x41500000: 629, // smn
|
|
||||||
0x41500071: 630, // smn-FI
|
|
||||||
0x41800000: 631, // sms
|
|
||||||
0x41900000: 632, // sn
|
|
||||||
0x41900163: 633, // sn-ZW
|
|
||||||
0x41f00000: 634, // so
|
|
||||||
0x41f00061: 635, // so-DJ
|
|
||||||
0x41f0006e: 636, // so-ET
|
|
||||||
0x41f000a3: 637, // so-KE
|
|
||||||
0x41f00114: 638, // so-SO
|
|
||||||
0x42700000: 639, // sq
|
|
||||||
0x42700026: 640, // sq-AL
|
|
||||||
0x427000c1: 641, // sq-MK
|
|
||||||
0x4270014c: 642, // sq-XK
|
|
||||||
0x42800000: 643, // sr
|
|
||||||
0x4281e000: 644, // sr-Cyrl
|
|
||||||
0x4281e032: 645, // sr-Cyrl-BA
|
|
||||||
0x4281e0bc: 646, // sr-Cyrl-ME
|
|
||||||
0x4281e104: 647, // sr-Cyrl-RS
|
|
||||||
0x4281e14c: 648, // sr-Cyrl-XK
|
|
||||||
0x42852000: 649, // sr-Latn
|
|
||||||
0x42852032: 650, // sr-Latn-BA
|
|
||||||
0x428520bc: 651, // sr-Latn-ME
|
|
||||||
0x42852104: 652, // sr-Latn-RS
|
|
||||||
0x4285214c: 653, // sr-Latn-XK
|
|
||||||
0x42d00000: 654, // ss
|
|
||||||
0x43000000: 655, // ssy
|
|
||||||
0x43100000: 656, // st
|
|
||||||
0x43a00000: 657, // sv
|
|
||||||
0x43a00030: 658, // sv-AX
|
|
||||||
0x43a00071: 659, // sv-FI
|
|
||||||
0x43a0010b: 660, // sv-SE
|
|
||||||
0x43b00000: 661, // sw
|
|
||||||
0x43b0004a: 662, // sw-CD
|
|
||||||
0x43b000a3: 663, // sw-KE
|
|
||||||
0x43b0012e: 664, // sw-TZ
|
|
||||||
0x43b00130: 665, // sw-UG
|
|
||||||
0x44400000: 666, // syr
|
|
||||||
0x44600000: 667, // ta
|
|
||||||
0x44600098: 668, // ta-IN
|
|
||||||
0x446000b2: 669, // ta-LK
|
|
||||||
0x446000cf: 670, // ta-MY
|
|
||||||
0x4460010c: 671, // ta-SG
|
|
||||||
0x45700000: 672, // te
|
|
||||||
0x45700098: 673, // te-IN
|
|
||||||
0x45a00000: 674, // teo
|
|
||||||
0x45a000a3: 675, // teo-KE
|
|
||||||
0x45a00130: 676, // teo-UG
|
|
||||||
0x46100000: 677, // th
|
|
||||||
0x46100122: 678, // th-TH
|
|
||||||
0x46500000: 679, // ti
|
|
||||||
0x4650006c: 680, // ti-ER
|
|
||||||
0x4650006e: 681, // ti-ET
|
|
||||||
0x46700000: 682, // tig
|
|
||||||
0x46c00000: 683, // tk
|
|
||||||
0x46c00126: 684, // tk-TM
|
|
||||||
0x47600000: 685, // tn
|
|
||||||
0x47800000: 686, // to
|
|
||||||
0x47800128: 687, // to-TO
|
|
||||||
0x48000000: 688, // tr
|
|
||||||
0x4800005c: 689, // tr-CY
|
|
||||||
0x4800012a: 690, // tr-TR
|
|
||||||
0x48400000: 691, // ts
|
|
||||||
0x49a00000: 692, // twq
|
|
||||||
0x49a000d3: 693, // twq-NE
|
|
||||||
0x49f00000: 694, // tzm
|
|
||||||
0x49f000b9: 695, // tzm-MA
|
|
||||||
0x4a200000: 696, // ug
|
|
||||||
0x4a200052: 697, // ug-CN
|
|
||||||
0x4a400000: 698, // uk
|
|
||||||
0x4a40012f: 699, // uk-UA
|
|
||||||
0x4aa00000: 700, // ur
|
|
||||||
0x4aa00098: 701, // ur-IN
|
|
||||||
0x4aa000e7: 702, // ur-PK
|
|
||||||
0x4b200000: 703, // uz
|
|
||||||
0x4b205000: 704, // uz-Arab
|
|
||||||
0x4b205023: 705, // uz-Arab-AF
|
|
||||||
0x4b21e000: 706, // uz-Cyrl
|
|
||||||
0x4b21e136: 707, // uz-Cyrl-UZ
|
|
||||||
0x4b252000: 708, // uz-Latn
|
|
||||||
0x4b252136: 709, // uz-Latn-UZ
|
|
||||||
0x4b400000: 710, // vai
|
|
||||||
0x4b452000: 711, // vai-Latn
|
|
||||||
0x4b4520b3: 712, // vai-Latn-LR
|
|
||||||
0x4b4d9000: 713, // vai-Vaii
|
|
||||||
0x4b4d90b3: 714, // vai-Vaii-LR
|
|
||||||
0x4b600000: 715, // ve
|
|
||||||
0x4b900000: 716, // vi
|
|
||||||
0x4b90013d: 717, // vi-VN
|
|
||||||
0x4bf00000: 718, // vo
|
|
||||||
0x4bf00001: 719, // vo-001
|
|
||||||
0x4c200000: 720, // vun
|
|
||||||
0x4c20012e: 721, // vun-TZ
|
|
||||||
0x4c400000: 722, // wa
|
|
||||||
0x4c500000: 723, // wae
|
|
||||||
0x4c50004d: 724, // wae-CH
|
|
||||||
0x4db00000: 725, // wo
|
|
||||||
0x4e800000: 726, // xh
|
|
||||||
0x4f100000: 727, // xog
|
|
||||||
0x4f100130: 728, // xog-UG
|
|
||||||
0x4ff00000: 729, // yav
|
|
||||||
0x4ff00051: 730, // yav-CM
|
|
||||||
0x50800000: 731, // yi
|
|
||||||
0x50800001: 732, // yi-001
|
|
||||||
0x50e00000: 733, // yo
|
|
||||||
0x50e0003a: 734, // yo-BJ
|
|
||||||
0x50e000d5: 735, // yo-NG
|
|
||||||
0x51500000: 736, // yue
|
|
||||||
0x5150008c: 737, // yue-HK
|
|
||||||
0x51e00000: 738, // zgh
|
|
||||||
0x51e000b9: 739, // zgh-MA
|
|
||||||
0x51f00000: 740, // zh
|
|
||||||
0x51f34000: 741, // zh-Hans
|
|
||||||
0x51f34052: 742, // zh-Hans-CN
|
|
||||||
0x51f3408c: 743, // zh-Hans-HK
|
|
||||||
0x51f340c5: 744, // zh-Hans-MO
|
|
||||||
0x51f3410c: 745, // zh-Hans-SG
|
|
||||||
0x51f35000: 746, // zh-Hant
|
|
||||||
0x51f3508c: 747, // zh-Hant-HK
|
|
||||||
0x51f350c5: 748, // zh-Hant-MO
|
|
||||||
0x51f3512d: 749, // zh-Hant-TW
|
|
||||||
0x52400000: 750, // zu
|
|
||||||
0x52400160: 751, // zu-ZA
|
|
||||||
}
|
|
||||||
|
|
||||||
// Total table size 4580 bytes (4KiB); checksum: A7F72A2A
|
|
773
vendor/golang.org/x/text/language/language.go
generated
vendored
773
vendor/golang.org/x/text/language/language.go
generated
vendored
File diff suppressed because it is too large
Load Diff
702
vendor/golang.org/x/text/language/match.go
generated
vendored
702
vendor/golang.org/x/text/language/match.go
generated
vendored
@ -4,7 +4,45 @@
|
|||||||
|
|
||||||
package language
|
package language
|
||||||
|
|
||||||
import "errors"
|
import (
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/language"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A MatchOption configures a Matcher.
|
||||||
|
type MatchOption func(*matcher)
|
||||||
|
|
||||||
|
// PreferSameScript will, in the absence of a match, result in the first
|
||||||
|
// preferred tag with the same script as a supported tag to match this supported
|
||||||
|
// tag. The default is currently true, but this may change in the future.
|
||||||
|
func PreferSameScript(preferSame bool) MatchOption {
|
||||||
|
return func(m *matcher) { m.preferSameScript = preferSame }
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(v1.0.0): consider making Matcher a concrete type, instead of interface.
|
||||||
|
// There doesn't seem to be too much need for multiple types.
|
||||||
|
// Making it a concrete type allows MatchStrings to be a method, which will
|
||||||
|
// improve its discoverability.
|
||||||
|
|
||||||
|
// MatchStrings parses and matches the given strings until one of them matches
|
||||||
|
// the language in the Matcher. A string may be an Accept-Language header as
|
||||||
|
// handled by ParseAcceptLanguage. The default language is returned if no
|
||||||
|
// other language matched.
|
||||||
|
func MatchStrings(m Matcher, lang ...string) (tag Tag, index int) {
|
||||||
|
for _, accept := range lang {
|
||||||
|
desired, _, err := ParseAcceptLanguage(accept)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tag, index, conf := m.Match(desired...); conf != No {
|
||||||
|
return tag, index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tag, index, _ = m.Match()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Matcher is the interface that wraps the Match method.
|
// Matcher is the interface that wraps the Match method.
|
||||||
//
|
//
|
||||||
@ -36,242 +74,70 @@ func Comprehends(speaker, alternative Tag) Confidence {
|
|||||||
// matched tag in t, but is augmented with the Unicode extension ('u')of the
|
// matched tag in t, but is augmented with the Unicode extension ('u')of the
|
||||||
// corresponding preferred tag. This allows user locale options to be passed
|
// corresponding preferred tag. This allows user locale options to be passed
|
||||||
// transparently.
|
// transparently.
|
||||||
func NewMatcher(t []Tag) Matcher {
|
func NewMatcher(t []Tag, options ...MatchOption) Matcher {
|
||||||
return newMatcher(t)
|
return newMatcher(t, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
|
func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
|
||||||
|
var tt language.Tag
|
||||||
match, w, c := m.getBest(want...)
|
match, w, c := m.getBest(want...)
|
||||||
if match == nil {
|
if match != nil {
|
||||||
t = m.default_.tag
|
tt, index = match.tag, match.index
|
||||||
} else {
|
} else {
|
||||||
t, index = match.tag, match.index
|
// TODO: this should be an option
|
||||||
|
tt = m.default_.tag
|
||||||
|
if m.preferSameScript {
|
||||||
|
outer:
|
||||||
|
for _, w := range want {
|
||||||
|
script, _ := w.Script()
|
||||||
|
if script.scriptID == 0 {
|
||||||
|
// Don't do anything if there is no script, such as with
|
||||||
|
// private subtags.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for i, h := range m.supported {
|
||||||
|
if script.scriptID == h.maxScript {
|
||||||
|
tt, index = h.tag, i
|
||||||
|
break outer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO: select first language tag based on script.
|
||||||
|
}
|
||||||
|
if w.RegionID != tt.RegionID && w.RegionID != 0 {
|
||||||
|
if w.RegionID != 0 && tt.RegionID != 0 && tt.RegionID.Contains(w.RegionID) {
|
||||||
|
tt.RegionID = w.RegionID
|
||||||
|
tt.RemakeString()
|
||||||
|
} else if r := w.RegionID.String(); len(r) == 2 {
|
||||||
|
// TODO: also filter macro and deprecated.
|
||||||
|
tt, _ = tt.SetTypeForKey("rg", strings.ToLower(r)+"zzzz")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Copy options from the user-provided tag into the result tag. This is hard
|
// Copy options from the user-provided tag into the result tag. This is hard
|
||||||
// to do after the fact, so we do it here.
|
// to do after the fact, so we do it here.
|
||||||
// TODO: consider also adding in variants that are compatible with the
|
// TODO: add in alternative variants to -u-va-.
|
||||||
// matched language.
|
// TODO: add preferred region to -u-rg-.
|
||||||
// TODO: Add back region if it is non-ambiguous? Or create another tag to
|
if e := w.Extensions(); len(e) > 0 {
|
||||||
// preserve the region?
|
b := language.Builder{}
|
||||||
if u, ok := w.Extension('u'); ok {
|
b.SetTag(tt)
|
||||||
t, _ = Raw.Compose(t, u)
|
for _, e := range e {
|
||||||
|
b.AddExt(e)
|
||||||
}
|
}
|
||||||
return t, index, c
|
tt = b.Make()
|
||||||
}
|
|
||||||
|
|
||||||
type scriptRegionFlags uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
isList = 1 << iota
|
|
||||||
scriptInFrom
|
|
||||||
regionInFrom
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t *Tag) setUndefinedLang(id langID) {
|
|
||||||
if t.lang == 0 {
|
|
||||||
t.lang = id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Tag) setUndefinedScript(id scriptID) {
|
|
||||||
if t.script == 0 {
|
|
||||||
t.script = id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Tag) setUndefinedRegion(id regionID) {
|
|
||||||
if t.region == 0 || t.region.contains(id) {
|
|
||||||
t.region = id
|
|
||||||
}
|
}
|
||||||
|
return makeTag(tt), index, c
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrMissingLikelyTagsData indicates no information was available
|
// ErrMissingLikelyTagsData indicates no information was available
|
||||||
// to compute likely values of missing tags.
|
// to compute likely values of missing tags.
|
||||||
var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
|
var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
|
||||||
|
|
||||||
// addLikelySubtags sets subtags to their most likely value, given the locale.
|
// func (t *Tag) setTagsFrom(id Tag) {
|
||||||
// In most cases this means setting fields for unknown values, but in some
|
// t.LangID = id.LangID
|
||||||
// cases it may alter a value. It returns a ErrMissingLikelyTagsData error
|
// t.ScriptID = id.ScriptID
|
||||||
// if the given locale cannot be expanded.
|
// t.RegionID = id.RegionID
|
||||||
func (t Tag) addLikelySubtags() (Tag, error) {
|
// }
|
||||||
id, err := addTags(t)
|
|
||||||
if err != nil {
|
|
||||||
return t, err
|
|
||||||
} else if id.equalTags(t) {
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
id.remakeString()
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// specializeRegion attempts to specialize a group region.
|
|
||||||
func specializeRegion(t *Tag) bool {
|
|
||||||
if i := regionInclusion[t.region]; i < nRegionGroups {
|
|
||||||
x := likelyRegionGroup[i]
|
|
||||||
if langID(x.lang) == t.lang && scriptID(x.script) == t.script {
|
|
||||||
t.region = regionID(x.region)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func addTags(t Tag) (Tag, error) {
|
|
||||||
// We leave private use identifiers alone.
|
|
||||||
if t.private() {
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
if t.script != 0 && t.region != 0 {
|
|
||||||
if t.lang != 0 {
|
|
||||||
// already fully specified
|
|
||||||
specializeRegion(&t)
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
// Search matches for und-script-region. Note that for these cases
|
|
||||||
// region will never be a group so there is no need to check for this.
|
|
||||||
list := likelyRegion[t.region : t.region+1]
|
|
||||||
if x := list[0]; x.flags&isList != 0 {
|
|
||||||
list = likelyRegionList[x.lang : x.lang+uint16(x.script)]
|
|
||||||
}
|
|
||||||
for _, x := range list {
|
|
||||||
// Deviating from the spec. See match_test.go for details.
|
|
||||||
if scriptID(x.script) == t.script {
|
|
||||||
t.setUndefinedLang(langID(x.lang))
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if t.lang != 0 {
|
|
||||||
// Search matches for lang-script and lang-region, where lang != und.
|
|
||||||
if t.lang < langNoIndexOffset {
|
|
||||||
x := likelyLang[t.lang]
|
|
||||||
if x.flags&isList != 0 {
|
|
||||||
list := likelyLangList[x.region : x.region+uint16(x.script)]
|
|
||||||
if t.script != 0 {
|
|
||||||
for _, x := range list {
|
|
||||||
if scriptID(x.script) == t.script && x.flags&scriptInFrom != 0 {
|
|
||||||
t.setUndefinedRegion(regionID(x.region))
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if t.region != 0 {
|
|
||||||
count := 0
|
|
||||||
goodScript := true
|
|
||||||
tt := t
|
|
||||||
for _, x := range list {
|
|
||||||
// We visit all entries for which the script was not
|
|
||||||
// defined, including the ones where the region was not
|
|
||||||
// defined. This allows for proper disambiguation within
|
|
||||||
// regions.
|
|
||||||
if x.flags&scriptInFrom == 0 && t.region.contains(regionID(x.region)) {
|
|
||||||
tt.region = regionID(x.region)
|
|
||||||
tt.setUndefinedScript(scriptID(x.script))
|
|
||||||
goodScript = goodScript && tt.script == scriptID(x.script)
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if count == 1 {
|
|
||||||
return tt, nil
|
|
||||||
}
|
|
||||||
// Even if we fail to find a unique Region, we might have
|
|
||||||
// an unambiguous script.
|
|
||||||
if goodScript {
|
|
||||||
t.script = tt.script
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Search matches for und-script.
|
|
||||||
if t.script != 0 {
|
|
||||||
x := likelyScript[t.script]
|
|
||||||
if x.region != 0 {
|
|
||||||
t.setUndefinedRegion(regionID(x.region))
|
|
||||||
t.setUndefinedLang(langID(x.lang))
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Search matches for und-region. If und-script-region exists, it would
|
|
||||||
// have been found earlier.
|
|
||||||
if t.region != 0 {
|
|
||||||
if i := regionInclusion[t.region]; i < nRegionGroups {
|
|
||||||
x := likelyRegionGroup[i]
|
|
||||||
if x.region != 0 {
|
|
||||||
t.setUndefinedLang(langID(x.lang))
|
|
||||||
t.setUndefinedScript(scriptID(x.script))
|
|
||||||
t.region = regionID(x.region)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
x := likelyRegion[t.region]
|
|
||||||
if x.flags&isList != 0 {
|
|
||||||
x = likelyRegionList[x.lang]
|
|
||||||
}
|
|
||||||
if x.script != 0 && x.flags != scriptInFrom {
|
|
||||||
t.setUndefinedLang(langID(x.lang))
|
|
||||||
t.setUndefinedScript(scriptID(x.script))
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search matches for lang.
|
|
||||||
if t.lang < langNoIndexOffset {
|
|
||||||
x := likelyLang[t.lang]
|
|
||||||
if x.flags&isList != 0 {
|
|
||||||
x = likelyLangList[x.region]
|
|
||||||
}
|
|
||||||
if x.region != 0 {
|
|
||||||
t.setUndefinedScript(scriptID(x.script))
|
|
||||||
t.setUndefinedRegion(regionID(x.region))
|
|
||||||
}
|
|
||||||
specializeRegion(&t)
|
|
||||||
if t.lang == 0 {
|
|
||||||
t.lang = _en // default language
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
return t, ErrMissingLikelyTagsData
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Tag) setTagsFrom(id Tag) {
|
|
||||||
t.lang = id.lang
|
|
||||||
t.script = id.script
|
|
||||||
t.region = id.region
|
|
||||||
}
|
|
||||||
|
|
||||||
// minimize removes the region or script subtags from t such that
|
|
||||||
// t.addLikelySubtags() == t.minimize().addLikelySubtags().
|
|
||||||
func (t Tag) minimize() (Tag, error) {
|
|
||||||
t, err := minimizeTags(t)
|
|
||||||
if err != nil {
|
|
||||||
return t, err
|
|
||||||
}
|
|
||||||
t.remakeString()
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// minimizeTags mimics the behavior of the ICU 51 C implementation.
|
|
||||||
func minimizeTags(t Tag) (Tag, error) {
|
|
||||||
if t.equalTags(und) {
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
max, err := addTags(t)
|
|
||||||
if err != nil {
|
|
||||||
return t, err
|
|
||||||
}
|
|
||||||
for _, id := range [...]Tag{
|
|
||||||
{lang: t.lang},
|
|
||||||
{lang: t.lang, region: t.region},
|
|
||||||
{lang: t.lang, script: t.script},
|
|
||||||
} {
|
|
||||||
if x, err := addTags(id); err == nil && max.equalTags(x) {
|
|
||||||
t.setTagsFrom(id)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tag Matching
|
// Tag Matching
|
||||||
// CLDR defines an algorithm for finding the best match between two sets of language
|
// CLDR defines an algorithm for finding the best match between two sets of language
|
||||||
@ -300,8 +166,9 @@ func minimizeTags(t Tag) (Tag, error) {
|
|||||||
// 1) compute the match between the two tags.
|
// 1) compute the match between the two tags.
|
||||||
// 2) if the match is better than the previous best match, replace it
|
// 2) if the match is better than the previous best match, replace it
|
||||||
// with the new match. (see next section)
|
// with the new match. (see next section)
|
||||||
// b) if the current best match is above a certain threshold, return this
|
// b) if the current best match is Exact and pin is true the result will be
|
||||||
// match without proceeding to the next tag in "desired". [See Note 1]
|
// frozen to the language found thusfar, although better matches may
|
||||||
|
// still be found for the same language.
|
||||||
// 3) If the best match so far is below a certain threshold, return "default".
|
// 3) If the best match so far is below a certain threshold, return "default".
|
||||||
//
|
//
|
||||||
// Ranking:
|
// Ranking:
|
||||||
@ -350,9 +217,6 @@ func minimizeTags(t Tag) (Tag, error) {
|
|||||||
// found wins.
|
// found wins.
|
||||||
//
|
//
|
||||||
// Notes:
|
// Notes:
|
||||||
// [1] Note that even if we may not have a perfect match, if a match is above a
|
|
||||||
// certain threshold, it is considered a better match than any other match
|
|
||||||
// to a tag later in the list of preferred language tags.
|
|
||||||
// [2] In practice, as matching of Exact is done in a separate phase from
|
// [2] In practice, as matching of Exact is done in a separate phase from
|
||||||
// matching the other levels, we reuse the Exact level to mean MaxExact in
|
// matching the other levels, we reuse the Exact level to mean MaxExact in
|
||||||
// the second phase. As a consequence, we only need the levels defined by
|
// the second phase. As a consequence, we only need the levels defined by
|
||||||
@ -389,21 +253,23 @@ func minimizeTags(t Tag) (Tag, error) {
|
|||||||
// matcher keeps a set of supported language tags, indexed by language.
|
// matcher keeps a set of supported language tags, indexed by language.
|
||||||
type matcher struct {
|
type matcher struct {
|
||||||
default_ *haveTag
|
default_ *haveTag
|
||||||
index map[langID]*matchHeader
|
supported []*haveTag
|
||||||
|
index map[language.Language]*matchHeader
|
||||||
passSettings bool
|
passSettings bool
|
||||||
|
preferSameScript bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// matchHeader has the lists of tags for exact matches and matches based on
|
// matchHeader has the lists of tags for exact matches and matches based on
|
||||||
// maximized and canonicalized tags for a given language.
|
// maximized and canonicalized tags for a given language.
|
||||||
type matchHeader struct {
|
type matchHeader struct {
|
||||||
exact []*haveTag
|
haveTags []*haveTag
|
||||||
max []*haveTag
|
original bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// haveTag holds a supported Tag and its maximized script and region. The maximized
|
// haveTag holds a supported Tag and its maximized script and region. The maximized
|
||||||
// or canonicalized language is not stored as it is not needed during matching.
|
// or canonicalized language is not stored as it is not needed during matching.
|
||||||
type haveTag struct {
|
type haveTag struct {
|
||||||
tag Tag
|
tag language.Tag
|
||||||
|
|
||||||
// index of this tag in the original list of supported tags.
|
// index of this tag in the original list of supported tags.
|
||||||
index int
|
index int
|
||||||
@ -413,35 +279,37 @@ type haveTag struct {
|
|||||||
conf Confidence
|
conf Confidence
|
||||||
|
|
||||||
// Maximized region and script.
|
// Maximized region and script.
|
||||||
maxRegion regionID
|
maxRegion language.Region
|
||||||
maxScript scriptID
|
maxScript language.Script
|
||||||
|
|
||||||
// altScript may be checked as an alternative match to maxScript. If altScript
|
// altScript may be checked as an alternative match to maxScript. If altScript
|
||||||
// matches, the confidence level for this match is Low. Theoretically there
|
// matches, the confidence level for this match is Low. Theoretically there
|
||||||
// could be multiple alternative scripts. This does not occur in practice.
|
// could be multiple alternative scripts. This does not occur in practice.
|
||||||
altScript scriptID
|
altScript language.Script
|
||||||
|
|
||||||
// nextMax is the index of the next haveTag with the same maximized tags.
|
// nextMax is the index of the next haveTag with the same maximized tags.
|
||||||
nextMax uint16
|
nextMax uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeHaveTag(tag Tag, index int) (haveTag, langID) {
|
func makeHaveTag(tag language.Tag, index int) (haveTag, language.Language) {
|
||||||
max := tag
|
max := tag
|
||||||
if tag.lang != 0 {
|
if tag.LangID != 0 || tag.RegionID != 0 || tag.ScriptID != 0 {
|
||||||
max, _ = max.canonicalize(All)
|
max, _ = canonicalize(All, max)
|
||||||
max, _ = addTags(max)
|
max, _ = max.Maximize()
|
||||||
max.remakeString()
|
max.RemakeString()
|
||||||
}
|
}
|
||||||
return haveTag{tag, index, Exact, max.region, max.script, altScript(max.lang, max.script), 0}, max.lang
|
return haveTag{tag, index, Exact, max.RegionID, max.ScriptID, altScript(max.LangID, max.ScriptID), 0}, max.LangID
|
||||||
}
|
}
|
||||||
|
|
||||||
// altScript returns an alternative script that may match the given script with
|
// altScript returns an alternative script that may match the given script with
|
||||||
// a low confidence. At the moment, the langMatch data allows for at most one
|
// a low confidence. At the moment, the langMatch data allows for at most one
|
||||||
// script to map to another and we rely on this to keep the code simple.
|
// script to map to another and we rely on this to keep the code simple.
|
||||||
func altScript(l langID, s scriptID) scriptID {
|
func altScript(l language.Language, s language.Script) language.Script {
|
||||||
for _, alt := range matchScript {
|
for _, alt := range matchScript {
|
||||||
if (alt.lang == 0 || langID(alt.lang) == l) && scriptID(alt.have) == s {
|
// TODO: also match cases where language is not the same.
|
||||||
return scriptID(alt.want)
|
if (language.Language(alt.wantLang) == l || language.Language(alt.haveLang) == l) &&
|
||||||
|
language.Script(alt.haveScript) == s {
|
||||||
|
return language.Script(alt.wantScript)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
@ -450,34 +318,32 @@ func altScript(l langID, s scriptID) scriptID {
|
|||||||
// addIfNew adds a haveTag to the list of tags only if it is a unique tag.
|
// addIfNew adds a haveTag to the list of tags only if it is a unique tag.
|
||||||
// Tags that have the same maximized values are linked by index.
|
// Tags that have the same maximized values are linked by index.
|
||||||
func (h *matchHeader) addIfNew(n haveTag, exact bool) {
|
func (h *matchHeader) addIfNew(n haveTag, exact bool) {
|
||||||
|
h.original = h.original || exact
|
||||||
// Don't add new exact matches.
|
// Don't add new exact matches.
|
||||||
for _, v := range h.exact {
|
for _, v := range h.haveTags {
|
||||||
if v.tag.equalsRest(n.tag) {
|
if equalsRest(v.tag, n.tag) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if exact {
|
|
||||||
h.exact = append(h.exact, &n)
|
|
||||||
}
|
|
||||||
// Allow duplicate maximized tags, but create a linked list to allow quickly
|
// Allow duplicate maximized tags, but create a linked list to allow quickly
|
||||||
// comparing the equivalents and bail out.
|
// comparing the equivalents and bail out.
|
||||||
for i, v := range h.max {
|
for i, v := range h.haveTags {
|
||||||
if v.maxScript == n.maxScript &&
|
if v.maxScript == n.maxScript &&
|
||||||
v.maxRegion == n.maxRegion &&
|
v.maxRegion == n.maxRegion &&
|
||||||
v.tag.variantOrPrivateTagStr() == n.tag.variantOrPrivateTagStr() {
|
v.tag.VariantOrPrivateUseTags() == n.tag.VariantOrPrivateUseTags() {
|
||||||
for h.max[i].nextMax != 0 {
|
for h.haveTags[i].nextMax != 0 {
|
||||||
i = int(h.max[i].nextMax)
|
i = int(h.haveTags[i].nextMax)
|
||||||
}
|
}
|
||||||
h.max[i].nextMax = uint16(len(h.max))
|
h.haveTags[i].nextMax = uint16(len(h.haveTags))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
h.max = append(h.max, &n)
|
h.haveTags = append(h.haveTags, &n)
|
||||||
}
|
}
|
||||||
|
|
||||||
// header returns the matchHeader for the given language. It creates one if
|
// header returns the matchHeader for the given language. It creates one if
|
||||||
// it doesn't already exist.
|
// it doesn't already exist.
|
||||||
func (m *matcher) header(l langID) *matchHeader {
|
func (m *matcher) header(l language.Language) *matchHeader {
|
||||||
if h := m.index[l]; h != nil {
|
if h := m.index[l]; h != nil {
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
@ -486,12 +352,26 @@ func (m *matcher) header(l langID) *matchHeader {
|
|||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func toConf(d uint8) Confidence {
|
||||||
|
if d <= 10 {
|
||||||
|
return High
|
||||||
|
}
|
||||||
|
if d < 30 {
|
||||||
|
return Low
|
||||||
|
}
|
||||||
|
return No
|
||||||
|
}
|
||||||
|
|
||||||
// newMatcher builds an index for the given supported tags and returns it as
|
// newMatcher builds an index for the given supported tags and returns it as
|
||||||
// a matcher. It also expands the index by considering various equivalence classes
|
// a matcher. It also expands the index by considering various equivalence classes
|
||||||
// for a given tag.
|
// for a given tag.
|
||||||
func newMatcher(supported []Tag) *matcher {
|
func newMatcher(supported []Tag, options []MatchOption) *matcher {
|
||||||
m := &matcher{
|
m := &matcher{
|
||||||
index: make(map[langID]*matchHeader),
|
index: make(map[language.Language]*matchHeader),
|
||||||
|
preferSameScript: true,
|
||||||
|
}
|
||||||
|
for _, o := range options {
|
||||||
|
o(m)
|
||||||
}
|
}
|
||||||
if len(supported) == 0 {
|
if len(supported) == 0 {
|
||||||
m.default_ = &haveTag{}
|
m.default_ = &haveTag{}
|
||||||
@ -500,36 +380,41 @@ func newMatcher(supported []Tag) *matcher {
|
|||||||
// Add supported languages to the index. Add exact matches first to give
|
// Add supported languages to the index. Add exact matches first to give
|
||||||
// them precedence.
|
// them precedence.
|
||||||
for i, tag := range supported {
|
for i, tag := range supported {
|
||||||
pair, _ := makeHaveTag(tag, i)
|
tt := tag.tag()
|
||||||
m.header(tag.lang).addIfNew(pair, true)
|
pair, _ := makeHaveTag(tt, i)
|
||||||
|
m.header(tt.LangID).addIfNew(pair, true)
|
||||||
|
m.supported = append(m.supported, &pair)
|
||||||
}
|
}
|
||||||
m.default_ = m.header(supported[0].lang).exact[0]
|
m.default_ = m.header(supported[0].lang()).haveTags[0]
|
||||||
|
// Keep these in two different loops to support the case that two equivalent
|
||||||
|
// languages are distinguished, such as iw and he.
|
||||||
for i, tag := range supported {
|
for i, tag := range supported {
|
||||||
pair, max := makeHaveTag(tag, i)
|
tt := tag.tag()
|
||||||
if max != tag.lang {
|
pair, max := makeHaveTag(tt, i)
|
||||||
m.header(max).addIfNew(pair, false)
|
if max != tt.LangID {
|
||||||
|
m.header(max).addIfNew(pair, true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// update is used to add indexes in the map for equivalent languages.
|
// update is used to add indexes in the map for equivalent languages.
|
||||||
// If force is true, the update will also apply to derived entries. To
|
// update will only add entries to original indexes, thus not computing any
|
||||||
// avoid applying a "transitive closure", use false.
|
// transitive relations.
|
||||||
update := func(want, have uint16, conf Confidence, force bool) {
|
update := func(want, have uint16, conf Confidence) {
|
||||||
if hh := m.index[langID(have)]; hh != nil {
|
if hh := m.index[language.Language(have)]; hh != nil {
|
||||||
if !force && len(hh.exact) == 0 {
|
if !hh.original {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hw := m.header(langID(want))
|
hw := m.header(language.Language(want))
|
||||||
for _, ht := range hh.max {
|
for _, ht := range hh.haveTags {
|
||||||
v := *ht
|
v := *ht
|
||||||
if conf < v.conf {
|
if conf < v.conf {
|
||||||
v.conf = conf
|
v.conf = conf
|
||||||
}
|
}
|
||||||
v.nextMax = 0 // this value needs to be recomputed
|
v.nextMax = 0 // this value needs to be recomputed
|
||||||
if v.altScript != 0 {
|
if v.altScript != 0 {
|
||||||
v.altScript = altScript(langID(want), v.maxScript)
|
v.altScript = altScript(language.Language(want), v.maxScript)
|
||||||
}
|
}
|
||||||
hw.addIfNew(v, conf == Exact && len(hh.exact) > 0)
|
hw.addIfNew(v, conf == Exact && hh.original)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -537,9 +422,9 @@ func newMatcher(supported []Tag) *matcher {
|
|||||||
// Add entries for languages with mutual intelligibility as defined by CLDR's
|
// Add entries for languages with mutual intelligibility as defined by CLDR's
|
||||||
// languageMatch data.
|
// languageMatch data.
|
||||||
for _, ml := range matchLang {
|
for _, ml := range matchLang {
|
||||||
update(ml.want, ml.have, Confidence(ml.conf), false)
|
update(ml.want, ml.have, toConf(ml.distance))
|
||||||
if !ml.oneway {
|
if !ml.oneway {
|
||||||
update(ml.have, ml.want, Confidence(ml.conf), false)
|
update(ml.have, ml.want, toConf(ml.distance))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -548,84 +433,89 @@ func newMatcher(supported []Tag) *matcher {
|
|||||||
// First we match deprecated equivalents. If they are perfect equivalents
|
// First we match deprecated equivalents. If they are perfect equivalents
|
||||||
// (their canonicalization simply substitutes a different language code, but
|
// (their canonicalization simply substitutes a different language code, but
|
||||||
// nothing else), the match confidence is Exact, otherwise it is High.
|
// nothing else), the match confidence is Exact, otherwise it is High.
|
||||||
for i, lm := range langAliasMap {
|
for i, lm := range language.AliasMap {
|
||||||
if lm.from == _sh {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If deprecated codes match and there is no fiddling with the script or
|
// If deprecated codes match and there is no fiddling with the script or
|
||||||
// or region, we consider it an exact match.
|
// or region, we consider it an exact match.
|
||||||
conf := Exact
|
conf := Exact
|
||||||
if langAliasTypes[i] != langMacro {
|
if language.AliasTypes[i] != language.Macro {
|
||||||
if !isExactEquivalent(langID(lm.from)) {
|
if !isExactEquivalent(language.Language(lm.From)) {
|
||||||
conf = High
|
conf = High
|
||||||
}
|
}
|
||||||
update(lm.to, lm.from, conf, true)
|
update(lm.To, lm.From, conf)
|
||||||
}
|
}
|
||||||
update(lm.from, lm.to, conf, true)
|
update(lm.From, lm.To, conf)
|
||||||
}
|
}
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBest gets the best matching tag in m for any of the given tags, taking into
|
// getBest gets the best matching tag in m for any of the given tags, taking into
|
||||||
// account the order of preference of the given tags.
|
// account the order of preference of the given tags.
|
||||||
func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
|
func (m *matcher) getBest(want ...Tag) (got *haveTag, orig language.Tag, c Confidence) {
|
||||||
best := bestMatch{}
|
best := bestMatch{}
|
||||||
for _, w := range want {
|
for i, ww := range want {
|
||||||
var max Tag
|
w := ww.tag()
|
||||||
|
var max language.Tag
|
||||||
// Check for exact match first.
|
// Check for exact match first.
|
||||||
h := m.index[w.lang]
|
h := m.index[w.LangID]
|
||||||
if w.lang != 0 {
|
if w.LangID != 0 {
|
||||||
// Base language is defined.
|
|
||||||
if h == nil {
|
if h == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for i := range h.exact {
|
// Base language is defined.
|
||||||
have := h.exact[i]
|
max, _ = canonicalize(Legacy|Deprecated|Macro, w)
|
||||||
if have.tag.equalsRest(w) {
|
// A region that is added through canonicalization is stronger than
|
||||||
return have, w, Exact
|
// a maximized region: set it in the original (e.g. mo -> ro-MD).
|
||||||
|
if w.RegionID != max.RegionID {
|
||||||
|
w.RegionID = max.RegionID
|
||||||
}
|
}
|
||||||
}
|
// TODO: should we do the same for scripts?
|
||||||
max, _ = w.canonicalize(Legacy | Deprecated)
|
// See test case: en, sr, nl ; sh ; sr
|
||||||
max, _ = addTags(max)
|
max, _ = max.Maximize()
|
||||||
} else {
|
} else {
|
||||||
// Base language is not defined.
|
// Base language is not defined.
|
||||||
if h != nil {
|
if h != nil {
|
||||||
for i := range h.exact {
|
for i := range h.haveTags {
|
||||||
have := h.exact[i]
|
have := h.haveTags[i]
|
||||||
if have.tag.equalsRest(w) {
|
if equalsRest(have.tag, w) {
|
||||||
return have, w, Exact
|
return have, w, Exact
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if w.script == 0 && w.region == 0 {
|
if w.ScriptID == 0 && w.RegionID == 0 {
|
||||||
// We skip all tags matching und for approximate matching, including
|
// We skip all tags matching und for approximate matching, including
|
||||||
// private tags.
|
// private tags.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
max, _ = addTags(w)
|
max, _ = w.Maximize()
|
||||||
if h = m.index[max.lang]; h == nil {
|
if h = m.index[max.LangID]; h == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pin := true
|
||||||
|
for _, t := range want[i+1:] {
|
||||||
|
if w.LangID == t.lang() {
|
||||||
|
pin = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
// Check for match based on maximized tag.
|
// Check for match based on maximized tag.
|
||||||
for i := range h.max {
|
for i := range h.haveTags {
|
||||||
have := h.max[i]
|
have := h.haveTags[i]
|
||||||
best.update(have, w, max.script, max.region)
|
best.update(have, w, max.ScriptID, max.RegionID, pin)
|
||||||
if best.conf == Exact {
|
if best.conf == Exact {
|
||||||
for have.nextMax != 0 {
|
for have.nextMax != 0 {
|
||||||
have = h.max[have.nextMax]
|
have = h.haveTags[have.nextMax]
|
||||||
best.update(have, w, max.script, max.region)
|
best.update(have, w, max.ScriptID, max.RegionID, pin)
|
||||||
}
|
}
|
||||||
return best.have, best.want, High
|
return best.have, best.want, best.conf
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if best.conf <= No {
|
if best.conf <= No {
|
||||||
if len(want) != 0 {
|
if len(want) != 0 {
|
||||||
return nil, want[0], No
|
return nil, want[0].tag(), No
|
||||||
}
|
}
|
||||||
return nil, Tag{}, No
|
return nil, language.Tag{}, No
|
||||||
}
|
}
|
||||||
return best.have, best.want, best.conf
|
return best.have, best.want, best.conf
|
||||||
}
|
}
|
||||||
@ -633,41 +523,67 @@ func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
|
|||||||
// bestMatch accumulates the best match so far.
|
// bestMatch accumulates the best match so far.
|
||||||
type bestMatch struct {
|
type bestMatch struct {
|
||||||
have *haveTag
|
have *haveTag
|
||||||
want Tag
|
want language.Tag
|
||||||
conf Confidence
|
conf Confidence
|
||||||
|
pinnedRegion language.Region
|
||||||
|
pinLanguage bool
|
||||||
|
sameRegionGroup bool
|
||||||
// Cached results from applying tie-breaking rules.
|
// Cached results from applying tie-breaking rules.
|
||||||
origLang bool
|
origLang bool
|
||||||
origReg bool
|
origReg bool
|
||||||
regDist uint8
|
paradigmReg bool
|
||||||
|
regGroupDist uint8
|
||||||
origScript bool
|
origScript bool
|
||||||
parentDist uint8 // 255 if have is not an ancestor of want tag.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// update updates the existing best match if the new pair is considered to be a
|
// update updates the existing best match if the new pair is considered to be a
|
||||||
// better match.
|
// better match. To determine if the given pair is a better match, it first
|
||||||
// To determine if the given pair is a better match, it first computes the rough
|
// computes the rough confidence level. If this surpasses the current match, it
|
||||||
// confidence level. If this surpasses the current match, it will replace it and
|
// will replace it and update the tie-breaker rule cache. If there is a tie, it
|
||||||
// update the tie-breaker rule cache. If there is a tie, it proceeds with applying
|
// proceeds with applying a series of tie-breaker rules. If there is no
|
||||||
// a series of tie-breaker rules. If there is no conclusive winner after applying
|
// conclusive winner after applying the tie-breaker rules, it leaves the current
|
||||||
// the tie-breaker rules, it leaves the current match as the preferred match.
|
// match as the preferred match.
|
||||||
func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID) {
|
//
|
||||||
|
// If pin is true and have and tag are a strong match, it will henceforth only
|
||||||
|
// consider matches for this language. This corresponds to the nothing that most
|
||||||
|
// users have a strong preference for the first defined language. A user can
|
||||||
|
// still prefer a second language over a dialect of the preferred language by
|
||||||
|
// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should
|
||||||
|
// be false.
|
||||||
|
func (m *bestMatch) update(have *haveTag, tag language.Tag, maxScript language.Script, maxRegion language.Region, pin bool) {
|
||||||
// Bail if the maximum attainable confidence is below that of the current best match.
|
// Bail if the maximum attainable confidence is below that of the current best match.
|
||||||
c := have.conf
|
c := have.conf
|
||||||
if c < m.conf {
|
if c < m.conf {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if have.maxScript != maxScript {
|
// Don't change the language once we already have found an exact match.
|
||||||
|
if m.pinLanguage && tag.LangID != m.want.LangID {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Pin the region group if we are comparing tags for the same language.
|
||||||
|
if tag.LangID == m.want.LangID && m.sameRegionGroup {
|
||||||
|
_, sameGroup := regionGroupDist(m.pinnedRegion, have.maxRegion, have.maxScript, m.want.LangID)
|
||||||
|
if !sameGroup {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c == Exact && have.maxScript == maxScript {
|
||||||
|
// If there is another language and then another entry of this language,
|
||||||
|
// don't pin anything, otherwise pin the language.
|
||||||
|
m.pinLanguage = pin
|
||||||
|
}
|
||||||
|
if equalsRest(have.tag, tag) {
|
||||||
|
} else if have.maxScript != maxScript {
|
||||||
// There is usually very little comprehension between different scripts.
|
// There is usually very little comprehension between different scripts.
|
||||||
// In a few cases there may still be Low comprehension. This possibility is
|
// In a few cases there may still be Low comprehension. This possibility
|
||||||
// pre-computed and stored in have.altScript.
|
// is pre-computed and stored in have.altScript.
|
||||||
if Low < m.conf || have.altScript != maxScript {
|
if Low < m.conf || have.altScript != maxScript {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c = Low
|
c = Low
|
||||||
} else if have.maxRegion != maxRegion {
|
} else if have.maxRegion != maxRegion {
|
||||||
// There is usually a small difference between languages across regions.
|
|
||||||
// We use the region distance (below) to disambiguate between equal matches.
|
|
||||||
if High < c {
|
if High < c {
|
||||||
|
// There is usually a small difference between languages across regions.
|
||||||
c = High
|
c = High
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -686,7 +602,7 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
|
|||||||
|
|
||||||
// Tie-breaker rules:
|
// Tie-breaker rules:
|
||||||
// We prefer if the pre-maximized language was specified and identical.
|
// We prefer if the pre-maximized language was specified and identical.
|
||||||
origLang := have.tag.lang == tag.lang && tag.lang != 0
|
origLang := have.tag.LangID == tag.LangID && tag.LangID != 0
|
||||||
if !beaten && m.origLang != origLang {
|
if !beaten && m.origLang != origLang {
|
||||||
if m.origLang {
|
if m.origLang {
|
||||||
return
|
return
|
||||||
@ -695,7 +611,7 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We prefer if the pre-maximized region was specified and identical.
|
// We prefer if the pre-maximized region was specified and identical.
|
||||||
origReg := have.tag.region == tag.region && tag.region != 0
|
origReg := have.tag.RegionID == tag.RegionID && tag.RegionID != 0
|
||||||
if !beaten && m.origReg != origReg {
|
if !beaten && m.origReg != origReg {
|
||||||
if m.origReg {
|
if m.origReg {
|
||||||
return
|
return
|
||||||
@ -703,17 +619,24 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
|
|||||||
beaten = true
|
beaten = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next we prefer smaller distances between regions, as defined by regionDist.
|
regGroupDist, sameGroup := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.LangID)
|
||||||
regDist := regionDist(have.maxRegion, maxRegion, tag.lang)
|
if !beaten && m.regGroupDist != regGroupDist {
|
||||||
if !beaten && m.regDist != regDist {
|
if regGroupDist > m.regGroupDist {
|
||||||
if regDist > m.regDist {
|
return
|
||||||
|
}
|
||||||
|
beaten = true
|
||||||
|
}
|
||||||
|
|
||||||
|
paradigmReg := isParadigmLocale(tag.LangID, have.maxRegion)
|
||||||
|
if !beaten && m.paradigmReg != paradigmReg {
|
||||||
|
if !paradigmReg {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
beaten = true
|
beaten = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next we prefer if the pre-maximized script was specified and identical.
|
// Next we prefer if the pre-maximized script was specified and identical.
|
||||||
origScript := have.tag.script == tag.script && tag.script != 0
|
origScript := have.tag.ScriptID == tag.ScriptID && tag.ScriptID != 0
|
||||||
if !beaten && m.origScript != origScript {
|
if !beaten && m.origScript != origScript {
|
||||||
if m.origScript {
|
if m.origScript {
|
||||||
return
|
return
|
||||||
@ -721,104 +644,64 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
|
|||||||
beaten = true
|
beaten = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally we prefer tags which have a closer parent relationship.
|
|
||||||
parentDist := parentDistance(have.tag.region, tag)
|
|
||||||
if !beaten && m.parentDist != parentDist {
|
|
||||||
if parentDist > m.parentDist {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
beaten = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update m to the newly found best match.
|
// Update m to the newly found best match.
|
||||||
if beaten {
|
if beaten {
|
||||||
m.have = have
|
m.have = have
|
||||||
m.want = tag
|
m.want = tag
|
||||||
m.conf = c
|
m.conf = c
|
||||||
|
m.pinnedRegion = maxRegion
|
||||||
|
m.sameRegionGroup = sameGroup
|
||||||
m.origLang = origLang
|
m.origLang = origLang
|
||||||
m.origReg = origReg
|
m.origReg = origReg
|
||||||
|
m.paradigmReg = paradigmReg
|
||||||
m.origScript = origScript
|
m.origScript = origScript
|
||||||
m.regDist = regDist
|
m.regGroupDist = regGroupDist
|
||||||
m.parentDist = parentDist
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// parentDistance returns the number of times Parent must be called before the
|
func isParadigmLocale(lang language.Language, r language.Region) bool {
|
||||||
// regions match. It is assumed that it has already been checked that lang and
|
for _, e := range paradigmLocales {
|
||||||
// script are identical. If haveRegion does not occur in the ancestor chain of
|
if language.Language(e[0]) == lang && (r == language.Region(e[1]) || r == language.Region(e[2])) {
|
||||||
// tag, it returns 255.
|
return true
|
||||||
func parentDistance(haveRegion regionID, tag Tag) uint8 {
|
|
||||||
p := tag.Parent()
|
|
||||||
d := uint8(1)
|
|
||||||
for haveRegion != p.region {
|
|
||||||
if p.region == 0 {
|
|
||||||
return 255
|
|
||||||
}
|
}
|
||||||
p = p.Parent()
|
|
||||||
d++
|
|
||||||
}
|
}
|
||||||
return d
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// regionDist wraps regionDistance with some exceptions to the algorithmic distance.
|
// regionGroupDist computes the distance between two regions based on their
|
||||||
func regionDist(a, b regionID, lang langID) uint8 {
|
// CLDR grouping.
|
||||||
if lang == _en {
|
func regionGroupDist(a, b language.Region, script language.Script, lang language.Language) (dist uint8, same bool) {
|
||||||
// Two variants of non-US English are close to each other, regardless of distance.
|
const defaultDistance = 4
|
||||||
if a != _US && b != _US {
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return uint8(regionDistance(a, b))
|
|
||||||
}
|
|
||||||
|
|
||||||
// regionDistance computes the distance between two regions based on the
|
aGroup := uint(regionToGroups[a]) << 1
|
||||||
// distance in the graph of region containments as defined in CLDR. It iterates
|
bGroup := uint(regionToGroups[b]) << 1
|
||||||
// over increasingly inclusive sets of groups, represented as bit vectors, until
|
for _, ri := range matchRegion {
|
||||||
// the source bit vector has bits in common with the destination vector.
|
if language.Language(ri.lang) == lang && (ri.script == 0 || language.Script(ri.script) == script) {
|
||||||
func regionDistance(a, b regionID) int {
|
group := uint(1 << (ri.group &^ 0x80))
|
||||||
if a == b {
|
if 0x80&ri.group == 0 {
|
||||||
return 0
|
if aGroup&bGroup&group != 0 { // Both regions are in the group.
|
||||||
|
return ri.distance, ri.distance == defaultDistance
|
||||||
}
|
}
|
||||||
p, q := regionInclusion[a], regionInclusion[b]
|
} else {
|
||||||
if p < nRegionGroups {
|
if (aGroup|bGroup)&group == 0 { // Both regions are not in the group.
|
||||||
p, q = q, p
|
return ri.distance, ri.distance == defaultDistance
|
||||||
}
|
}
|
||||||
set := regionInclusionBits
|
|
||||||
if q < nRegionGroups && set[p]&(1<<q) != 0 {
|
|
||||||
return 1
|
|
||||||
}
|
}
|
||||||
d := 2
|
|
||||||
for goal := set[q]; set[p]&goal == 0; p = regionInclusionNext[p] {
|
|
||||||
d++
|
|
||||||
}
|
}
|
||||||
return d
|
|
||||||
}
|
}
|
||||||
|
return defaultDistance, true
|
||||||
func (t Tag) variants() string {
|
|
||||||
if t.pVariant == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return t.str[t.pVariant:t.pExt]
|
|
||||||
}
|
|
||||||
|
|
||||||
// variantOrPrivateTagStr returns variants or private use tags.
|
|
||||||
func (t Tag) variantOrPrivateTagStr() string {
|
|
||||||
if t.pExt > 0 {
|
|
||||||
return t.str[t.pVariant:t.pExt]
|
|
||||||
}
|
|
||||||
return t.str[t.pVariant:]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// equalsRest compares everything except the language.
|
// equalsRest compares everything except the language.
|
||||||
func (a Tag) equalsRest(b Tag) bool {
|
func equalsRest(a, b language.Tag) bool {
|
||||||
// TODO: don't include extensions in this comparison. To do this efficiently,
|
// TODO: don't include extensions in this comparison. To do this efficiently,
|
||||||
// though, we should handle private tags separately.
|
// though, we should handle private tags separately.
|
||||||
return a.script == b.script && a.region == b.region && a.variantOrPrivateTagStr() == b.variantOrPrivateTagStr()
|
return a.ScriptID == b.ScriptID && a.RegionID == b.RegionID && a.VariantOrPrivateUseTags() == b.VariantOrPrivateUseTags()
|
||||||
}
|
}
|
||||||
|
|
||||||
// isExactEquivalent returns true if canonicalizing the language will not alter
|
// isExactEquivalent returns true if canonicalizing the language will not alter
|
||||||
// the script or region of a tag.
|
// the script or region of a tag.
|
||||||
func isExactEquivalent(l langID) bool {
|
func isExactEquivalent(l language.Language) bool {
|
||||||
for _, o := range notEquivalent {
|
for _, o := range notEquivalent {
|
||||||
if o == l {
|
if o == l {
|
||||||
return false
|
return false
|
||||||
@ -827,15 +710,26 @@ func isExactEquivalent(l langID) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
var notEquivalent []langID
|
var notEquivalent []language.Language
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Create a list of all languages for which canonicalization may alter the
|
// Create a list of all languages for which canonicalization may alter the
|
||||||
// script or region.
|
// script or region.
|
||||||
for _, lm := range langAliasMap {
|
for _, lm := range language.AliasMap {
|
||||||
tag := Tag{lang: langID(lm.from)}
|
tag := language.Tag{LangID: language.Language(lm.From)}
|
||||||
if tag, _ = tag.canonicalize(All); tag.script != 0 || tag.region != 0 {
|
if tag, _ = canonicalize(All, tag); tag.ScriptID != 0 || tag.RegionID != 0 {
|
||||||
notEquivalent = append(notEquivalent, langID(lm.from))
|
notEquivalent = append(notEquivalent, language.Language(lm.From))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Maximize undefined regions of paradigm locales.
|
||||||
|
for i, v := range paradigmLocales {
|
||||||
|
t := language.Tag{LangID: language.Language(v[0])}
|
||||||
|
max, _ := t.Maximize()
|
||||||
|
if v[1] == 0 {
|
||||||
|
paradigmLocales[i][1] = uint16(max.RegionID)
|
||||||
|
}
|
||||||
|
if v[2] == 0 {
|
||||||
|
paradigmLocales[i][2] = uint16(max.RegionID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
723
vendor/golang.org/x/text/language/parse.go
generated
vendored
723
vendor/golang.org/x/text/language/parse.go
generated
vendored
@ -5,216 +5,21 @@
|
|||||||
package language
|
package language
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/text/internal/tag"
|
"golang.org/x/text/internal/language"
|
||||||
)
|
)
|
||||||
|
|
||||||
// isAlpha returns true if the byte is not a digit.
|
|
||||||
// b must be an ASCII letter or digit.
|
|
||||||
func isAlpha(b byte) bool {
|
|
||||||
return b > '9'
|
|
||||||
}
|
|
||||||
|
|
||||||
// isAlphaNum returns true if the string contains only ASCII letters or digits.
|
|
||||||
func isAlphaNum(s []byte) bool {
|
|
||||||
for _, c := range s {
|
|
||||||
if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// errSyntax is returned by any of the parsing functions when the
|
|
||||||
// input is not well-formed, according to BCP 47.
|
|
||||||
// TODO: return the position at which the syntax error occurred?
|
|
||||||
var errSyntax = errors.New("language: tag is not well-formed")
|
|
||||||
|
|
||||||
// ValueError is returned by any of the parsing functions when the
|
// ValueError is returned by any of the parsing functions when the
|
||||||
// input is well-formed but the respective subtag is not recognized
|
// input is well-formed but the respective subtag is not recognized
|
||||||
// as a valid value.
|
// as a valid value.
|
||||||
type ValueError struct {
|
type ValueError interface {
|
||||||
v [8]byte
|
error
|
||||||
}
|
|
||||||
|
|
||||||
func mkErrInvalid(s []byte) error {
|
|
||||||
var e ValueError
|
|
||||||
copy(e.v[:], s)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ValueError) tag() []byte {
|
|
||||||
n := bytes.IndexByte(e.v[:], 0)
|
|
||||||
if n == -1 {
|
|
||||||
n = 8
|
|
||||||
}
|
|
||||||
return e.v[:n]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e ValueError) Error() string {
|
|
||||||
return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subtag returns the subtag for which the error occurred.
|
// Subtag returns the subtag for which the error occurred.
|
||||||
func (e ValueError) Subtag() string {
|
Subtag() string
|
||||||
return string(e.tag())
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanner is used to scan BCP 47 tokens, which are separated by _ or -.
|
|
||||||
type scanner struct {
|
|
||||||
b []byte
|
|
||||||
bytes [max99thPercentileSize]byte
|
|
||||||
token []byte
|
|
||||||
start int // start position of the current token
|
|
||||||
end int // end position of the current token
|
|
||||||
next int // next point for scan
|
|
||||||
err error
|
|
||||||
done bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeScannerString(s string) scanner {
|
|
||||||
scan := scanner{}
|
|
||||||
if len(s) <= len(scan.bytes) {
|
|
||||||
scan.b = scan.bytes[:copy(scan.bytes[:], s)]
|
|
||||||
} else {
|
|
||||||
scan.b = []byte(s)
|
|
||||||
}
|
|
||||||
scan.init()
|
|
||||||
return scan
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeScanner returns a scanner using b as the input buffer.
|
|
||||||
// b is not copied and may be modified by the scanner routines.
|
|
||||||
func makeScanner(b []byte) scanner {
|
|
||||||
scan := scanner{b: b}
|
|
||||||
scan.init()
|
|
||||||
return scan
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) init() {
|
|
||||||
for i, c := range s.b {
|
|
||||||
if c == '_' {
|
|
||||||
s.b[i] = '-'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.scan()
|
|
||||||
}
|
|
||||||
|
|
||||||
// restToLower converts the string between start and end to lower case.
|
|
||||||
func (s *scanner) toLower(start, end int) {
|
|
||||||
for i := start; i < end; i++ {
|
|
||||||
c := s.b[i]
|
|
||||||
if 'A' <= c && c <= 'Z' {
|
|
||||||
s.b[i] += 'a' - 'A'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) setError(e error) {
|
|
||||||
if s.err == nil || (e == errSyntax && s.err != errSyntax) {
|
|
||||||
s.err = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// resizeRange shrinks or grows the array at position oldStart such that
|
|
||||||
// a new string of size newSize can fit between oldStart and oldEnd.
|
|
||||||
// Sets the scan point to after the resized range.
|
|
||||||
func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) {
|
|
||||||
s.start = oldStart
|
|
||||||
if end := oldStart + newSize; end != oldEnd {
|
|
||||||
diff := end - oldEnd
|
|
||||||
if end < cap(s.b) {
|
|
||||||
b := make([]byte, len(s.b)+diff)
|
|
||||||
copy(b, s.b[:oldStart])
|
|
||||||
copy(b[end:], s.b[oldEnd:])
|
|
||||||
s.b = b
|
|
||||||
} else {
|
|
||||||
s.b = append(s.b[end:], s.b[oldEnd:]...)
|
|
||||||
}
|
|
||||||
s.next = end + (s.next - s.end)
|
|
||||||
s.end = end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// replace replaces the current token with repl.
|
|
||||||
func (s *scanner) replace(repl string) {
|
|
||||||
s.resizeRange(s.start, s.end, len(repl))
|
|
||||||
copy(s.b[s.start:], repl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// gobble removes the current token from the input.
|
|
||||||
// Caller must call scan after calling gobble.
|
|
||||||
func (s *scanner) gobble(e error) {
|
|
||||||
s.setError(e)
|
|
||||||
if s.start == 0 {
|
|
||||||
s.b = s.b[:+copy(s.b, s.b[s.next:])]
|
|
||||||
s.end = 0
|
|
||||||
} else {
|
|
||||||
s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])]
|
|
||||||
s.end = s.start - 1
|
|
||||||
}
|
|
||||||
s.next = s.start
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteRange removes the given range from s.b before the current token.
|
|
||||||
func (s *scanner) deleteRange(start, end int) {
|
|
||||||
s.setError(errSyntax)
|
|
||||||
s.b = s.b[:start+copy(s.b[start:], s.b[end:])]
|
|
||||||
diff := end - start
|
|
||||||
s.next -= diff
|
|
||||||
s.start -= diff
|
|
||||||
s.end -= diff
|
|
||||||
}
|
|
||||||
|
|
||||||
// scan parses the next token of a BCP 47 string. Tokens that are larger
|
|
||||||
// than 8 characters or include non-alphanumeric characters result in an error
|
|
||||||
// and are gobbled and removed from the output.
|
|
||||||
// It returns the end position of the last token consumed.
|
|
||||||
func (s *scanner) scan() (end int) {
|
|
||||||
end = s.end
|
|
||||||
s.token = nil
|
|
||||||
for s.start = s.next; s.next < len(s.b); {
|
|
||||||
i := bytes.IndexByte(s.b[s.next:], '-')
|
|
||||||
if i == -1 {
|
|
||||||
s.end = len(s.b)
|
|
||||||
s.next = len(s.b)
|
|
||||||
i = s.end - s.start
|
|
||||||
} else {
|
|
||||||
s.end = s.next + i
|
|
||||||
s.next = s.end + 1
|
|
||||||
}
|
|
||||||
token := s.b[s.start:s.end]
|
|
||||||
if i < 1 || i > 8 || !isAlphaNum(token) {
|
|
||||||
s.gobble(errSyntax)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s.token = token
|
|
||||||
return end
|
|
||||||
}
|
|
||||||
if n := len(s.b); n > 0 && s.b[n-1] == '-' {
|
|
||||||
s.setError(errSyntax)
|
|
||||||
s.b = s.b[:len(s.b)-1]
|
|
||||||
}
|
|
||||||
s.done = true
|
|
||||||
return end
|
|
||||||
}
|
|
||||||
|
|
||||||
// acceptMinSize parses multiple tokens of the given size or greater.
|
|
||||||
// It returns the end position of the last token consumed.
|
|
||||||
func (s *scanner) acceptMinSize(min int) (end int) {
|
|
||||||
end = s.end
|
|
||||||
s.scan()
|
|
||||||
for ; len(s.token) >= min; s.scan() {
|
|
||||||
end = s.end
|
|
||||||
}
|
|
||||||
return end
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
|
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
|
||||||
@ -238,324 +43,15 @@ func Parse(s string) (t Tag, err error) {
|
|||||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||||
// The resulting tag is canonicalized using the the canonicalization type c.
|
// The resulting tag is canonicalized using the the canonicalization type c.
|
||||||
func (c CanonType) Parse(s string) (t Tag, err error) {
|
func (c CanonType) Parse(s string) (t Tag, err error) {
|
||||||
// TODO: consider supporting old-style locale key-value pairs.
|
tt, err := language.Parse(s)
|
||||||
if s == "" {
|
if err != nil {
|
||||||
return und, errSyntax
|
return makeTag(tt), err
|
||||||
}
|
}
|
||||||
if len(s) <= maxAltTaglen {
|
tt, changed := canonicalize(c, tt)
|
||||||
b := [maxAltTaglen]byte{}
|
|
||||||
for i, c := range s {
|
|
||||||
// Generating invalid UTF-8 is okay as it won't match.
|
|
||||||
if 'A' <= c && c <= 'Z' {
|
|
||||||
c += 'a' - 'A'
|
|
||||||
} else if c == '_' {
|
|
||||||
c = '-'
|
|
||||||
}
|
|
||||||
b[i] = byte(c)
|
|
||||||
}
|
|
||||||
if t, ok := grandfathered(b); ok {
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
scan := makeScannerString(s)
|
|
||||||
t, err = parse(&scan, s)
|
|
||||||
t, changed := t.canonicalize(c)
|
|
||||||
if changed {
|
if changed {
|
||||||
t.remakeString()
|
tt.RemakeString()
|
||||||
}
|
}
|
||||||
return t, err
|
return makeTag(tt), err
|
||||||
}
|
|
||||||
|
|
||||||
func parse(scan *scanner, s string) (t Tag, err error) {
|
|
||||||
t = und
|
|
||||||
var end int
|
|
||||||
if n := len(scan.token); n <= 1 {
|
|
||||||
scan.toLower(0, len(scan.b))
|
|
||||||
if n == 0 || scan.token[0] != 'x' {
|
|
||||||
return t, errSyntax
|
|
||||||
}
|
|
||||||
end = parseExtensions(scan)
|
|
||||||
} else if n >= 4 {
|
|
||||||
return und, errSyntax
|
|
||||||
} else { // the usual case
|
|
||||||
t, end = parseTag(scan)
|
|
||||||
if n := len(scan.token); n == 1 {
|
|
||||||
t.pExt = uint16(end)
|
|
||||||
end = parseExtensions(scan)
|
|
||||||
} else if end < len(scan.b) {
|
|
||||||
scan.setError(errSyntax)
|
|
||||||
scan.b = scan.b[:end]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if int(t.pVariant) < len(scan.b) {
|
|
||||||
if end < len(s) {
|
|
||||||
s = s[:end]
|
|
||||||
}
|
|
||||||
if len(s) > 0 && tag.Compare(s, scan.b) == 0 {
|
|
||||||
t.str = s
|
|
||||||
} else {
|
|
||||||
t.str = string(scan.b)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.pVariant, t.pExt = 0, 0
|
|
||||||
}
|
|
||||||
return t, scan.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTag parses language, script, region and variants.
|
|
||||||
// It returns a Tag and the end position in the input that was parsed.
|
|
||||||
func parseTag(scan *scanner) (t Tag, end int) {
|
|
||||||
var e error
|
|
||||||
// TODO: set an error if an unknown lang, script or region is encountered.
|
|
||||||
t.lang, e = getLangID(scan.token)
|
|
||||||
scan.setError(e)
|
|
||||||
scan.replace(t.lang.String())
|
|
||||||
langStart := scan.start
|
|
||||||
end = scan.scan()
|
|
||||||
for len(scan.token) == 3 && isAlpha(scan.token[0]) {
|
|
||||||
// From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent
|
|
||||||
// to a tag of the form <extlang>.
|
|
||||||
lang, e := getLangID(scan.token)
|
|
||||||
if lang != 0 {
|
|
||||||
t.lang = lang
|
|
||||||
copy(scan.b[langStart:], lang.String())
|
|
||||||
scan.b[langStart+3] = '-'
|
|
||||||
scan.start = langStart + 4
|
|
||||||
}
|
|
||||||
scan.gobble(e)
|
|
||||||
end = scan.scan()
|
|
||||||
}
|
|
||||||
if len(scan.token) == 4 && isAlpha(scan.token[0]) {
|
|
||||||
t.script, e = getScriptID(script, scan.token)
|
|
||||||
if t.script == 0 {
|
|
||||||
scan.gobble(e)
|
|
||||||
}
|
|
||||||
end = scan.scan()
|
|
||||||
}
|
|
||||||
if n := len(scan.token); n >= 2 && n <= 3 {
|
|
||||||
t.region, e = getRegionID(scan.token)
|
|
||||||
if t.region == 0 {
|
|
||||||
scan.gobble(e)
|
|
||||||
} else {
|
|
||||||
scan.replace(t.region.String())
|
|
||||||
}
|
|
||||||
end = scan.scan()
|
|
||||||
}
|
|
||||||
scan.toLower(scan.start, len(scan.b))
|
|
||||||
t.pVariant = byte(end)
|
|
||||||
end = parseVariants(scan, end, t)
|
|
||||||
t.pExt = uint16(end)
|
|
||||||
return t, end
|
|
||||||
}
|
|
||||||
|
|
||||||
var separator = []byte{'-'}
|
|
||||||
|
|
||||||
// parseVariants scans tokens as long as each token is a valid variant string.
|
|
||||||
// Duplicate variants are removed.
|
|
||||||
func parseVariants(scan *scanner, end int, t Tag) int {
|
|
||||||
start := scan.start
|
|
||||||
varIDBuf := [4]uint8{}
|
|
||||||
variantBuf := [4][]byte{}
|
|
||||||
varID := varIDBuf[:0]
|
|
||||||
variant := variantBuf[:0]
|
|
||||||
last := -1
|
|
||||||
needSort := false
|
|
||||||
for ; len(scan.token) >= 4; scan.scan() {
|
|
||||||
// TODO: measure the impact of needing this conversion and redesign
|
|
||||||
// the data structure if there is an issue.
|
|
||||||
v, ok := variantIndex[string(scan.token)]
|
|
||||||
if !ok {
|
|
||||||
// unknown variant
|
|
||||||
// TODO: allow user-defined variants?
|
|
||||||
scan.gobble(mkErrInvalid(scan.token))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
varID = append(varID, v)
|
|
||||||
variant = append(variant, scan.token)
|
|
||||||
if !needSort {
|
|
||||||
if last < int(v) {
|
|
||||||
last = int(v)
|
|
||||||
} else {
|
|
||||||
needSort = true
|
|
||||||
// There is no legal combinations of more than 7 variants
|
|
||||||
// (and this is by no means a useful sequence).
|
|
||||||
const maxVariants = 8
|
|
||||||
if len(varID) > maxVariants {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
end = scan.end
|
|
||||||
}
|
|
||||||
if needSort {
|
|
||||||
sort.Sort(variantsSort{varID, variant})
|
|
||||||
k, l := 0, -1
|
|
||||||
for i, v := range varID {
|
|
||||||
w := int(v)
|
|
||||||
if l == w {
|
|
||||||
// Remove duplicates.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
varID[k] = varID[i]
|
|
||||||
variant[k] = variant[i]
|
|
||||||
k++
|
|
||||||
l = w
|
|
||||||
}
|
|
||||||
if str := bytes.Join(variant[:k], separator); len(str) == 0 {
|
|
||||||
end = start - 1
|
|
||||||
} else {
|
|
||||||
scan.resizeRange(start, end, len(str))
|
|
||||||
copy(scan.b[scan.start:], str)
|
|
||||||
end = scan.end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return end
|
|
||||||
}
|
|
||||||
|
|
||||||
type variantsSort struct {
|
|
||||||
i []uint8
|
|
||||||
v [][]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s variantsSort) Len() int {
|
|
||||||
return len(s.i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s variantsSort) Swap(i, j int) {
|
|
||||||
s.i[i], s.i[j] = s.i[j], s.i[i]
|
|
||||||
s.v[i], s.v[j] = s.v[j], s.v[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s variantsSort) Less(i, j int) bool {
|
|
||||||
return s.i[i] < s.i[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
type bytesSort [][]byte
|
|
||||||
|
|
||||||
func (b bytesSort) Len() int {
|
|
||||||
return len(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b bytesSort) Swap(i, j int) {
|
|
||||||
b[i], b[j] = b[j], b[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b bytesSort) Less(i, j int) bool {
|
|
||||||
return bytes.Compare(b[i], b[j]) == -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseExtensions parses and normalizes the extensions in the buffer.
|
|
||||||
// It returns the last position of scan.b that is part of any extension.
|
|
||||||
// It also trims scan.b to remove excess parts accordingly.
|
|
||||||
func parseExtensions(scan *scanner) int {
|
|
||||||
start := scan.start
|
|
||||||
exts := [][]byte{}
|
|
||||||
private := []byte{}
|
|
||||||
end := scan.end
|
|
||||||
for len(scan.token) == 1 {
|
|
||||||
extStart := scan.start
|
|
||||||
ext := scan.token[0]
|
|
||||||
end = parseExtension(scan)
|
|
||||||
extension := scan.b[extStart:end]
|
|
||||||
if len(extension) < 3 || (ext != 'x' && len(extension) < 4) {
|
|
||||||
scan.setError(errSyntax)
|
|
||||||
end = extStart
|
|
||||||
continue
|
|
||||||
} else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) {
|
|
||||||
scan.b = scan.b[:end]
|
|
||||||
return end
|
|
||||||
} else if ext == 'x' {
|
|
||||||
private = extension
|
|
||||||
break
|
|
||||||
}
|
|
||||||
exts = append(exts, extension)
|
|
||||||
}
|
|
||||||
sort.Sort(bytesSort(exts))
|
|
||||||
if len(private) > 0 {
|
|
||||||
exts = append(exts, private)
|
|
||||||
}
|
|
||||||
scan.b = scan.b[:start]
|
|
||||||
if len(exts) > 0 {
|
|
||||||
scan.b = append(scan.b, bytes.Join(exts, separator)...)
|
|
||||||
} else if start > 0 {
|
|
||||||
// Strip trailing '-'.
|
|
||||||
scan.b = scan.b[:start-1]
|
|
||||||
}
|
|
||||||
return end
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseExtension parses a single extension and returns the position of
|
|
||||||
// the extension end.
|
|
||||||
func parseExtension(scan *scanner) int {
|
|
||||||
start, end := scan.start, scan.end
|
|
||||||
switch scan.token[0] {
|
|
||||||
case 'u':
|
|
||||||
attrStart := end
|
|
||||||
scan.scan()
|
|
||||||
for last := []byte{}; len(scan.token) > 2; scan.scan() {
|
|
||||||
if bytes.Compare(scan.token, last) != -1 {
|
|
||||||
// Attributes are unsorted. Start over from scratch.
|
|
||||||
p := attrStart + 1
|
|
||||||
scan.next = p
|
|
||||||
attrs := [][]byte{}
|
|
||||||
for scan.scan(); len(scan.token) > 2; scan.scan() {
|
|
||||||
attrs = append(attrs, scan.token)
|
|
||||||
end = scan.end
|
|
||||||
}
|
|
||||||
sort.Sort(bytesSort(attrs))
|
|
||||||
copy(scan.b[p:], bytes.Join(attrs, separator))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
last = scan.token
|
|
||||||
end = scan.end
|
|
||||||
}
|
|
||||||
var last, key []byte
|
|
||||||
for attrEnd := end; len(scan.token) == 2; last = key {
|
|
||||||
key = scan.token
|
|
||||||
keyEnd := scan.end
|
|
||||||
end = scan.acceptMinSize(3)
|
|
||||||
// TODO: check key value validity
|
|
||||||
if keyEnd == end || bytes.Compare(key, last) != 1 {
|
|
||||||
// We have an invalid key or the keys are not sorted.
|
|
||||||
// Start scanning keys from scratch and reorder.
|
|
||||||
p := attrEnd + 1
|
|
||||||
scan.next = p
|
|
||||||
keys := [][]byte{}
|
|
||||||
for scan.scan(); len(scan.token) == 2; {
|
|
||||||
keyStart, keyEnd := scan.start, scan.end
|
|
||||||
end = scan.acceptMinSize(3)
|
|
||||||
if keyEnd != end {
|
|
||||||
keys = append(keys, scan.b[keyStart:end])
|
|
||||||
} else {
|
|
||||||
scan.setError(errSyntax)
|
|
||||||
end = keyStart
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Sort(bytesSort(keys))
|
|
||||||
reordered := bytes.Join(keys, separator)
|
|
||||||
if e := p + len(reordered); e < end {
|
|
||||||
scan.deleteRange(e, end)
|
|
||||||
end = e
|
|
||||||
}
|
|
||||||
copy(scan.b[p:], bytes.Join(keys, separator))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case 't':
|
|
||||||
scan.scan()
|
|
||||||
if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) {
|
|
||||||
_, end = parseTag(scan)
|
|
||||||
scan.toLower(start, end)
|
|
||||||
}
|
|
||||||
for len(scan.token) == 2 && !isAlpha(scan.token[1]) {
|
|
||||||
end = scan.acceptMinSize(3)
|
|
||||||
}
|
|
||||||
case 'x':
|
|
||||||
end = scan.acceptMinSize(1)
|
|
||||||
default:
|
|
||||||
end = scan.acceptMinSize(2)
|
|
||||||
}
|
|
||||||
return end
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
|
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
|
||||||
@ -563,10 +59,11 @@ func parseExtension(scan *scanner) int {
|
|||||||
// Base, Script or Region or slice of type Variant or Extension is passed more
|
// Base, Script or Region or slice of type Variant or Extension is passed more
|
||||||
// than once, the latter will overwrite the former. Variants and Extensions are
|
// than once, the latter will overwrite the former. Variants and Extensions are
|
||||||
// accumulated, but if two extensions of the same type are passed, the latter
|
// accumulated, but if two extensions of the same type are passed, the latter
|
||||||
// will replace the former. A Tag overwrites all former values and typically
|
// will replace the former. For -u extensions, though, the key-type pairs are
|
||||||
// only makes sense as the first argument. The resulting tag is returned after
|
// added, where later values overwrite older ones. A Tag overwrites all former
|
||||||
// canonicalizing using the Default CanonType. If one or more errors are
|
// values and typically only makes sense as the first argument. The resulting
|
||||||
// encountered, one of the errors is returned.
|
// tag is returned after canonicalizing using the Default CanonType. If one or
|
||||||
|
// more errors are encountered, one of the errors is returned.
|
||||||
func Compose(part ...interface{}) (t Tag, err error) {
|
func Compose(part ...interface{}) (t Tag, err error) {
|
||||||
return Default.Compose(part...)
|
return Default.Compose(part...)
|
||||||
}
|
}
|
||||||
@ -576,196 +73,68 @@ func Compose(part ...interface{}) (t Tag, err error) {
|
|||||||
// Base, Script or Region or slice of type Variant or Extension is passed more
|
// Base, Script or Region or slice of type Variant or Extension is passed more
|
||||||
// than once, the latter will overwrite the former. Variants and Extensions are
|
// than once, the latter will overwrite the former. Variants and Extensions are
|
||||||
// accumulated, but if two extensions of the same type are passed, the latter
|
// accumulated, but if two extensions of the same type are passed, the latter
|
||||||
// will replace the former. A Tag overwrites all former values and typically
|
// will replace the former. For -u extensions, though, the key-type pairs are
|
||||||
// only makes sense as the first argument. The resulting tag is returned after
|
// added, where later values overwrite older ones. A Tag overwrites all former
|
||||||
// canonicalizing using CanonType c. If one or more errors are encountered,
|
// values and typically only makes sense as the first argument. The resulting
|
||||||
// one of the errors is returned.
|
// tag is returned after canonicalizing using CanonType c. If one or more errors
|
||||||
|
// are encountered, one of the errors is returned.
|
||||||
func (c CanonType) Compose(part ...interface{}) (t Tag, err error) {
|
func (c CanonType) Compose(part ...interface{}) (t Tag, err error) {
|
||||||
var b builder
|
var b language.Builder
|
||||||
if err = b.update(part...); err != nil {
|
if err = update(&b, part...); err != nil {
|
||||||
return und, err
|
return und, err
|
||||||
}
|
}
|
||||||
t, _ = b.tag.canonicalize(c)
|
b.Tag, _ = canonicalize(c, b.Tag)
|
||||||
|
return makeTag(b.Make()), err
|
||||||
if len(b.ext) > 0 || len(b.variant) > 0 {
|
|
||||||
sort.Sort(sortVariant(b.variant))
|
|
||||||
sort.Strings(b.ext)
|
|
||||||
if b.private != "" {
|
|
||||||
b.ext = append(b.ext, b.private)
|
|
||||||
}
|
|
||||||
n := maxCoreSize + tokenLen(b.variant...) + tokenLen(b.ext...)
|
|
||||||
buf := make([]byte, n)
|
|
||||||
p := t.genCoreBytes(buf)
|
|
||||||
t.pVariant = byte(p)
|
|
||||||
p += appendTokens(buf[p:], b.variant...)
|
|
||||||
t.pExt = uint16(p)
|
|
||||||
p += appendTokens(buf[p:], b.ext...)
|
|
||||||
t.str = string(buf[:p])
|
|
||||||
} else if b.private != "" {
|
|
||||||
t.str = b.private
|
|
||||||
t.remakeString()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type builder struct {
|
|
||||||
tag Tag
|
|
||||||
|
|
||||||
private string // the x extension
|
|
||||||
ext []string
|
|
||||||
variant []string
|
|
||||||
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *builder) addExt(e string) {
|
|
||||||
if e == "" {
|
|
||||||
} else if e[0] == 'x' {
|
|
||||||
b.private = e
|
|
||||||
} else {
|
|
||||||
b.ext = append(b.ext, e)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var errInvalidArgument = errors.New("invalid Extension or Variant")
|
var errInvalidArgument = errors.New("invalid Extension or Variant")
|
||||||
|
|
||||||
func (b *builder) update(part ...interface{}) (err error) {
|
func update(b *language.Builder, part ...interface{}) (err error) {
|
||||||
replace := func(l *[]string, s string, eq func(a, b string) bool) bool {
|
|
||||||
if s == "" {
|
|
||||||
b.err = errInvalidArgument
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for i, v := range *l {
|
|
||||||
if eq(v, s) {
|
|
||||||
(*l)[i] = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, x := range part {
|
for _, x := range part {
|
||||||
switch v := x.(type) {
|
switch v := x.(type) {
|
||||||
case Tag:
|
case Tag:
|
||||||
b.tag.lang = v.lang
|
b.SetTag(v.tag())
|
||||||
b.tag.region = v.region
|
|
||||||
b.tag.script = v.script
|
|
||||||
if v.str != "" {
|
|
||||||
b.variant = nil
|
|
||||||
for x, s := "", v.str[v.pVariant:v.pExt]; s != ""; {
|
|
||||||
x, s = nextToken(s)
|
|
||||||
b.variant = append(b.variant, x)
|
|
||||||
}
|
|
||||||
b.ext, b.private = nil, ""
|
|
||||||
for i, e := int(v.pExt), ""; i < len(v.str); {
|
|
||||||
i, e = getExtension(v.str, i)
|
|
||||||
b.addExt(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case Base:
|
case Base:
|
||||||
b.tag.lang = v.langID
|
b.Tag.LangID = v.langID
|
||||||
case Script:
|
case Script:
|
||||||
b.tag.script = v.scriptID
|
b.Tag.ScriptID = v.scriptID
|
||||||
case Region:
|
case Region:
|
||||||
b.tag.region = v.regionID
|
b.Tag.RegionID = v.regionID
|
||||||
case Variant:
|
case Variant:
|
||||||
if !replace(&b.variant, v.variant, func(a, b string) bool { return a == b }) {
|
if v.variant == "" {
|
||||||
b.variant = append(b.variant, v.variant)
|
err = errInvalidArgument
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
b.AddVariant(v.variant)
|
||||||
case Extension:
|
case Extension:
|
||||||
if !replace(&b.ext, v.s, func(a, b string) bool { return a[0] == b[0] }) {
|
if v.s == "" {
|
||||||
b.addExt(v.s)
|
err = errInvalidArgument
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
b.SetExt(v.s)
|
||||||
case []Variant:
|
case []Variant:
|
||||||
b.variant = nil
|
b.ClearVariants()
|
||||||
for _, x := range v {
|
for _, v := range v {
|
||||||
b.update(x)
|
b.AddVariant(v.variant)
|
||||||
}
|
}
|
||||||
case []Extension:
|
case []Extension:
|
||||||
b.ext, b.private = nil, ""
|
b.ClearExtensions()
|
||||||
for _, e := range v {
|
for _, e := range v {
|
||||||
b.update(e)
|
b.SetExt(e.s)
|
||||||
}
|
}
|
||||||
// TODO: support parsing of raw strings based on morphology or just extensions?
|
// TODO: support parsing of raw strings based on morphology or just extensions?
|
||||||
case error:
|
case error:
|
||||||
|
if v != nil {
|
||||||
err = v
|
err = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func tokenLen(token ...string) (n int) {
|
|
||||||
for _, t := range token {
|
|
||||||
n += len(t) + 1
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func appendTokens(b []byte, token ...string) int {
|
|
||||||
p := 0
|
|
||||||
for _, t := range token {
|
|
||||||
b[p] = '-'
|
|
||||||
copy(b[p+1:], t)
|
|
||||||
p += 1 + len(t)
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
type sortVariant []string
|
|
||||||
|
|
||||||
func (s sortVariant) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sortVariant) Swap(i, j int) {
|
|
||||||
s[j], s[i] = s[i], s[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sortVariant) Less(i, j int) bool {
|
|
||||||
return variantIndex[s[i]] < variantIndex[s[j]]
|
|
||||||
}
|
|
||||||
|
|
||||||
func findExt(list []string, x byte) int {
|
|
||||||
for i, e := range list {
|
|
||||||
if e[0] == x {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// getExtension returns the name, body and end position of the extension.
|
|
||||||
func getExtension(s string, p int) (end int, ext string) {
|
|
||||||
if s[p] == '-' {
|
|
||||||
p++
|
|
||||||
}
|
|
||||||
if s[p] == 'x' {
|
|
||||||
return len(s), s[p:]
|
|
||||||
}
|
|
||||||
end = nextExtension(s, p)
|
|
||||||
return end, s[p:end]
|
|
||||||
}
|
|
||||||
|
|
||||||
// nextExtension finds the next extension within the string, searching
|
|
||||||
// for the -<char>- pattern from position p.
|
|
||||||
// In the fast majority of cases, language tags will have at most
|
|
||||||
// one extension and extensions tend to be small.
|
|
||||||
func nextExtension(s string, p int) int {
|
|
||||||
for n := len(s) - 3; p < n; {
|
|
||||||
if s[p] == '-' {
|
|
||||||
if s[p+2] == '-' {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
p += 3
|
|
||||||
} else {
|
|
||||||
p++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight")
|
var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight")
|
||||||
|
|
||||||
// ParseAcceptLanguage parses the contents of a Accept-Language header as
|
// ParseAcceptLanguage parses the contents of an Accept-Language header as
|
||||||
// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and
|
// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and
|
||||||
// a list of corresponding quality weights. It is more permissive than RFC 2616
|
// a list of corresponding quality weights. It is more permissive than RFC 2616
|
||||||
// and may return non-nil slices even if the input is not valid.
|
// and may return non-nil slices even if the input is not valid.
|
||||||
@ -788,7 +157,7 @@ func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
t = Tag{lang: id}
|
t = makeTag(language.Tag{LangID: id})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scan the optional weight.
|
// Scan the optional weight.
|
||||||
@ -832,7 +201,7 @@ func split(s string, c byte) (head, tail string) {
|
|||||||
|
|
||||||
// Add hack mapping to deal with a small number of cases that that occur
|
// Add hack mapping to deal with a small number of cases that that occur
|
||||||
// in Accept-Language (with reasonable frequency).
|
// in Accept-Language (with reasonable frequency).
|
||||||
var acceptFallback = map[string]langID{
|
var acceptFallback = map[string]language.Language{
|
||||||
"english": _en,
|
"english": _en,
|
||||||
"deutsch": _de,
|
"deutsch": _de,
|
||||||
"italian": _it,
|
"italian": _it,
|
||||||
|
3777
vendor/golang.org/x/text/language/tables.go
generated
vendored
3777
vendor/golang.org/x/text/language/tables.go
generated
vendored
File diff suppressed because it is too large
Load Diff
160
vendor/golang.org/x/text/language/tags.go
generated
vendored
160
vendor/golang.org/x/text/language/tags.go
generated
vendored
@ -4,6 +4,8 @@
|
|||||||
|
|
||||||
package language
|
package language
|
||||||
|
|
||||||
|
import "golang.org/x/text/internal/language/compact"
|
||||||
|
|
||||||
// TODO: Various sets of commonly use tags and regions.
|
// TODO: Various sets of commonly use tags and regions.
|
||||||
|
|
||||||
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
|
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
|
||||||
@ -61,83 +63,83 @@ var (
|
|||||||
|
|
||||||
Und Tag = Tag{}
|
Und Tag = Tag{}
|
||||||
|
|
||||||
Afrikaans Tag = Tag{lang: _af} // af
|
Afrikaans Tag = Tag(compact.Afrikaans)
|
||||||
Amharic Tag = Tag{lang: _am} // am
|
Amharic Tag = Tag(compact.Amharic)
|
||||||
Arabic Tag = Tag{lang: _ar} // ar
|
Arabic Tag = Tag(compact.Arabic)
|
||||||
ModernStandardArabic Tag = Tag{lang: _ar, region: _001} // ar-001
|
ModernStandardArabic Tag = Tag(compact.ModernStandardArabic)
|
||||||
Azerbaijani Tag = Tag{lang: _az} // az
|
Azerbaijani Tag = Tag(compact.Azerbaijani)
|
||||||
Bulgarian Tag = Tag{lang: _bg} // bg
|
Bulgarian Tag = Tag(compact.Bulgarian)
|
||||||
Bengali Tag = Tag{lang: _bn} // bn
|
Bengali Tag = Tag(compact.Bengali)
|
||||||
Catalan Tag = Tag{lang: _ca} // ca
|
Catalan Tag = Tag(compact.Catalan)
|
||||||
Czech Tag = Tag{lang: _cs} // cs
|
Czech Tag = Tag(compact.Czech)
|
||||||
Danish Tag = Tag{lang: _da} // da
|
Danish Tag = Tag(compact.Danish)
|
||||||
German Tag = Tag{lang: _de} // de
|
German Tag = Tag(compact.German)
|
||||||
Greek Tag = Tag{lang: _el} // el
|
Greek Tag = Tag(compact.Greek)
|
||||||
English Tag = Tag{lang: _en} // en
|
English Tag = Tag(compact.English)
|
||||||
AmericanEnglish Tag = Tag{lang: _en, region: _US} // en-US
|
AmericanEnglish Tag = Tag(compact.AmericanEnglish)
|
||||||
BritishEnglish Tag = Tag{lang: _en, region: _GB} // en-GB
|
BritishEnglish Tag = Tag(compact.BritishEnglish)
|
||||||
Spanish Tag = Tag{lang: _es} // es
|
Spanish Tag = Tag(compact.Spanish)
|
||||||
EuropeanSpanish Tag = Tag{lang: _es, region: _ES} // es-ES
|
EuropeanSpanish Tag = Tag(compact.EuropeanSpanish)
|
||||||
LatinAmericanSpanish Tag = Tag{lang: _es, region: _419} // es-419
|
LatinAmericanSpanish Tag = Tag(compact.LatinAmericanSpanish)
|
||||||
Estonian Tag = Tag{lang: _et} // et
|
Estonian Tag = Tag(compact.Estonian)
|
||||||
Persian Tag = Tag{lang: _fa} // fa
|
Persian Tag = Tag(compact.Persian)
|
||||||
Finnish Tag = Tag{lang: _fi} // fi
|
Finnish Tag = Tag(compact.Finnish)
|
||||||
Filipino Tag = Tag{lang: _fil} // fil
|
Filipino Tag = Tag(compact.Filipino)
|
||||||
French Tag = Tag{lang: _fr} // fr
|
French Tag = Tag(compact.French)
|
||||||
CanadianFrench Tag = Tag{lang: _fr, region: _CA} // fr-CA
|
CanadianFrench Tag = Tag(compact.CanadianFrench)
|
||||||
Gujarati Tag = Tag{lang: _gu} // gu
|
Gujarati Tag = Tag(compact.Gujarati)
|
||||||
Hebrew Tag = Tag{lang: _he} // he
|
Hebrew Tag = Tag(compact.Hebrew)
|
||||||
Hindi Tag = Tag{lang: _hi} // hi
|
Hindi Tag = Tag(compact.Hindi)
|
||||||
Croatian Tag = Tag{lang: _hr} // hr
|
Croatian Tag = Tag(compact.Croatian)
|
||||||
Hungarian Tag = Tag{lang: _hu} // hu
|
Hungarian Tag = Tag(compact.Hungarian)
|
||||||
Armenian Tag = Tag{lang: _hy} // hy
|
Armenian Tag = Tag(compact.Armenian)
|
||||||
Indonesian Tag = Tag{lang: _id} // id
|
Indonesian Tag = Tag(compact.Indonesian)
|
||||||
Icelandic Tag = Tag{lang: _is} // is
|
Icelandic Tag = Tag(compact.Icelandic)
|
||||||
Italian Tag = Tag{lang: _it} // it
|
Italian Tag = Tag(compact.Italian)
|
||||||
Japanese Tag = Tag{lang: _ja} // ja
|
Japanese Tag = Tag(compact.Japanese)
|
||||||
Georgian Tag = Tag{lang: _ka} // ka
|
Georgian Tag = Tag(compact.Georgian)
|
||||||
Kazakh Tag = Tag{lang: _kk} // kk
|
Kazakh Tag = Tag(compact.Kazakh)
|
||||||
Khmer Tag = Tag{lang: _km} // km
|
Khmer Tag = Tag(compact.Khmer)
|
||||||
Kannada Tag = Tag{lang: _kn} // kn
|
Kannada Tag = Tag(compact.Kannada)
|
||||||
Korean Tag = Tag{lang: _ko} // ko
|
Korean Tag = Tag(compact.Korean)
|
||||||
Kirghiz Tag = Tag{lang: _ky} // ky
|
Kirghiz Tag = Tag(compact.Kirghiz)
|
||||||
Lao Tag = Tag{lang: _lo} // lo
|
Lao Tag = Tag(compact.Lao)
|
||||||
Lithuanian Tag = Tag{lang: _lt} // lt
|
Lithuanian Tag = Tag(compact.Lithuanian)
|
||||||
Latvian Tag = Tag{lang: _lv} // lv
|
Latvian Tag = Tag(compact.Latvian)
|
||||||
Macedonian Tag = Tag{lang: _mk} // mk
|
Macedonian Tag = Tag(compact.Macedonian)
|
||||||
Malayalam Tag = Tag{lang: _ml} // ml
|
Malayalam Tag = Tag(compact.Malayalam)
|
||||||
Mongolian Tag = Tag{lang: _mn} // mn
|
Mongolian Tag = Tag(compact.Mongolian)
|
||||||
Marathi Tag = Tag{lang: _mr} // mr
|
Marathi Tag = Tag(compact.Marathi)
|
||||||
Malay Tag = Tag{lang: _ms} // ms
|
Malay Tag = Tag(compact.Malay)
|
||||||
Burmese Tag = Tag{lang: _my} // my
|
Burmese Tag = Tag(compact.Burmese)
|
||||||
Nepali Tag = Tag{lang: _ne} // ne
|
Nepali Tag = Tag(compact.Nepali)
|
||||||
Dutch Tag = Tag{lang: _nl} // nl
|
Dutch Tag = Tag(compact.Dutch)
|
||||||
Norwegian Tag = Tag{lang: _no} // no
|
Norwegian Tag = Tag(compact.Norwegian)
|
||||||
Punjabi Tag = Tag{lang: _pa} // pa
|
Punjabi Tag = Tag(compact.Punjabi)
|
||||||
Polish Tag = Tag{lang: _pl} // pl
|
Polish Tag = Tag(compact.Polish)
|
||||||
Portuguese Tag = Tag{lang: _pt} // pt
|
Portuguese Tag = Tag(compact.Portuguese)
|
||||||
BrazilianPortuguese Tag = Tag{lang: _pt, region: _BR} // pt-BR
|
BrazilianPortuguese Tag = Tag(compact.BrazilianPortuguese)
|
||||||
EuropeanPortuguese Tag = Tag{lang: _pt, region: _PT} // pt-PT
|
EuropeanPortuguese Tag = Tag(compact.EuropeanPortuguese)
|
||||||
Romanian Tag = Tag{lang: _ro} // ro
|
Romanian Tag = Tag(compact.Romanian)
|
||||||
Russian Tag = Tag{lang: _ru} // ru
|
Russian Tag = Tag(compact.Russian)
|
||||||
Sinhala Tag = Tag{lang: _si} // si
|
Sinhala Tag = Tag(compact.Sinhala)
|
||||||
Slovak Tag = Tag{lang: _sk} // sk
|
Slovak Tag = Tag(compact.Slovak)
|
||||||
Slovenian Tag = Tag{lang: _sl} // sl
|
Slovenian Tag = Tag(compact.Slovenian)
|
||||||
Albanian Tag = Tag{lang: _sq} // sq
|
Albanian Tag = Tag(compact.Albanian)
|
||||||
Serbian Tag = Tag{lang: _sr} // sr
|
Serbian Tag = Tag(compact.Serbian)
|
||||||
SerbianLatin Tag = Tag{lang: _sr, script: _Latn} // sr-Latn
|
SerbianLatin Tag = Tag(compact.SerbianLatin)
|
||||||
Swedish Tag = Tag{lang: _sv} // sv
|
Swedish Tag = Tag(compact.Swedish)
|
||||||
Swahili Tag = Tag{lang: _sw} // sw
|
Swahili Tag = Tag(compact.Swahili)
|
||||||
Tamil Tag = Tag{lang: _ta} // ta
|
Tamil Tag = Tag(compact.Tamil)
|
||||||
Telugu Tag = Tag{lang: _te} // te
|
Telugu Tag = Tag(compact.Telugu)
|
||||||
Thai Tag = Tag{lang: _th} // th
|
Thai Tag = Tag(compact.Thai)
|
||||||
Turkish Tag = Tag{lang: _tr} // tr
|
Turkish Tag = Tag(compact.Turkish)
|
||||||
Ukrainian Tag = Tag{lang: _uk} // uk
|
Ukrainian Tag = Tag(compact.Ukrainian)
|
||||||
Urdu Tag = Tag{lang: _ur} // ur
|
Urdu Tag = Tag(compact.Urdu)
|
||||||
Uzbek Tag = Tag{lang: _uz} // uz
|
Uzbek Tag = Tag(compact.Uzbek)
|
||||||
Vietnamese Tag = Tag{lang: _vi} // vi
|
Vietnamese Tag = Tag(compact.Vietnamese)
|
||||||
Chinese Tag = Tag{lang: _zh} // zh
|
Chinese Tag = Tag(compact.Chinese)
|
||||||
SimplifiedChinese Tag = Tag{lang: _zh, script: _Hans} // zh-Hans
|
SimplifiedChinese Tag = Tag(compact.SimplifiedChinese)
|
||||||
TraditionalChinese Tag = Tag{lang: _zh, script: _Hant} // zh-Hant
|
TraditionalChinese Tag = Tag(compact.TraditionalChinese)
|
||||||
Zulu Tag = Tag{lang: _zu} // zu
|
Zulu Tag = Tag(compact.Zulu)
|
||||||
)
|
)
|
||||||
|
72
vendor/manifest
vendored
72
vendor/manifest
vendored
@ -145,6 +145,15 @@
|
|||||||
"path": "/spew",
|
"path": "/spew",
|
||||||
"notests": true
|
"notests": true
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/dfordsoft/golib/ic",
|
||||||
|
"repository": "https://github.com/dfordsoft/golib",
|
||||||
|
"vcs": "git",
|
||||||
|
"revision": "2ea3495aee1d7f72bb7f388432f2fad974270e12",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "/ic",
|
||||||
|
"notests": true
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"importpath": "github.com/dgrijalva/jwt-go",
|
"importpath": "github.com/dgrijalva/jwt-go",
|
||||||
"repository": "https://github.com/dgrijalva/jwt-go",
|
"repository": "https://github.com/dgrijalva/jwt-go",
|
||||||
@ -1014,6 +1023,24 @@
|
|||||||
"path": "/windows",
|
"path": "/windows",
|
||||||
"notests": true
|
"notests": true
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"importpath": "golang.org/x/text/encoding",
|
||||||
|
"repository": "https://go.googlesource.com/text",
|
||||||
|
"vcs": "git",
|
||||||
|
"revision": "5c1cf69b5978e5a34c5f9ba09a83e56acc4b7877",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "/encoding",
|
||||||
|
"notests": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "golang.org/x/text/internal/format",
|
||||||
|
"repository": "https://go.googlesource.com/text",
|
||||||
|
"vcs": "git",
|
||||||
|
"revision": "5c1cf69b5978e5a34c5f9ba09a83e56acc4b7877",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "internal/format",
|
||||||
|
"notests": true
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"importpath": "golang.org/x/text/internal/gen",
|
"importpath": "golang.org/x/text/internal/gen",
|
||||||
"repository": "https://go.googlesource.com/text",
|
"repository": "https://go.googlesource.com/text",
|
||||||
@ -1023,6 +1050,24 @@
|
|||||||
"path": "internal/gen",
|
"path": "internal/gen",
|
||||||
"notests": true
|
"notests": true
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"importpath": "golang.org/x/text/internal/language",
|
||||||
|
"repository": "https://go.googlesource.com/text",
|
||||||
|
"vcs": "git",
|
||||||
|
"revision": "5c1cf69b5978e5a34c5f9ba09a83e56acc4b7877",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "internal/language",
|
||||||
|
"notests": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "golang.org/x/text/internal/tag",
|
||||||
|
"repository": "https://go.googlesource.com/text",
|
||||||
|
"vcs": "git",
|
||||||
|
"revision": "5c1cf69b5978e5a34c5f9ba09a83e56acc4b7877",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "internal/tag",
|
||||||
|
"notests": true
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"importpath": "golang.org/x/text/internal/triegen",
|
"importpath": "golang.org/x/text/internal/triegen",
|
||||||
"repository": "https://go.googlesource.com/text",
|
"repository": "https://go.googlesource.com/text",
|
||||||
@ -1041,6 +1086,33 @@
|
|||||||
"path": "internal/ucd",
|
"path": "internal/ucd",
|
||||||
"notests": true
|
"notests": true
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"importpath": "golang.org/x/text/internal/utf8internal",
|
||||||
|
"repository": "https://go.googlesource.com/text",
|
||||||
|
"vcs": "git",
|
||||||
|
"revision": "5c1cf69b5978e5a34c5f9ba09a83e56acc4b7877",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "internal/utf8internal",
|
||||||
|
"notests": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "golang.org/x/text/language",
|
||||||
|
"repository": "https://go.googlesource.com/text",
|
||||||
|
"vcs": "git",
|
||||||
|
"revision": "5c1cf69b5978e5a34c5f9ba09a83e56acc4b7877",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "language",
|
||||||
|
"notests": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "golang.org/x/text/runes",
|
||||||
|
"repository": "https://go.googlesource.com/text",
|
||||||
|
"vcs": "git",
|
||||||
|
"revision": "5c1cf69b5978e5a34c5f9ba09a83e56acc4b7877",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "runes",
|
||||||
|
"notests": true
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"importpath": "golang.org/x/text/transform",
|
"importpath": "golang.org/x/text/transform",
|
||||||
"repository": "https://go.googlesource.com/text",
|
"repository": "https://go.googlesource.com/text",
|
||||||
|
Loading…
Reference in New Issue
Block a user