added vendors for project
This commit is contained in:
parent
3e9262e843
commit
1e60a4fd24
12
go.mod
Normal file
12
go.mod
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
module fuelprices
|
||||||
|
|
||||||
|
go 1.12
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/antchfx/xmlquery v1.0.0
|
||||||
|
github.com/antchfx/xpath v0.0.0-20190319080838-ce1d48779e67 // indirect
|
||||||
|
github.com/influxdata/influxdb1-client v0.0.0-20190402204710-8ff2fc3824fc
|
||||||
|
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65 // indirect
|
||||||
|
gopkg.in/ini.v1 v1.42.0
|
||||||
|
)
|
23
go.sum
Normal file
23
go.sum
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
github.com/antchfx/xmlquery v1.0.0 h1:YuEPqexGG2opZKNc9JU3Zw6zFXwC47wNcy6/F8oKsrM=
|
||||||
|
github.com/antchfx/xmlquery v1.0.0/go.mod h1:/+CnyD/DzHRnv2eRxrVbieRU/FIF6N0C+7oTtyUtCKk=
|
||||||
|
github.com/antchfx/xpath v0.0.0-20190319080838-ce1d48779e67 h1:uj4UuiIs53RhHSySIupR1TEIouckjSfnljF3QbN1yh0=
|
||||||
|
github.com/antchfx/xpath v0.0.0-20190319080838-ce1d48779e67/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
|
github.com/influxdata/influxdb1-client v0.0.0-20190402204710-8ff2fc3824fc h1:KpMgaYJRieDkHZJWY3LMafvtqS/U8xX6+lUN+OKpl/Y=
|
||||||
|
github.com/influxdata/influxdb1-client v0.0.0-20190402204710-8ff2fc3824fc/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||||
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
|
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
|
||||||
|
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ=
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk=
|
||||||
|
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
32
vendor/github.com/antchfx/xmlquery/.gitignore
generated
vendored
Normal file
32
vendor/github.com/antchfx/xmlquery/.gitignore
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
# vscode
|
||||||
|
.vscode
|
||||||
|
debug
|
||||||
|
*.test
|
||||||
|
|
||||||
|
./build
|
||||||
|
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
*.test
|
||||||
|
*.prof
|
14
vendor/github.com/antchfx/xmlquery/.travis.yml
generated
vendored
Normal file
14
vendor/github.com/antchfx/xmlquery/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.6
|
||||||
|
- 1.7
|
||||||
|
- 1.8
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get golang.org/x/net/html/charset
|
||||||
|
- go get github.com/antchfx/xpath
|
||||||
|
- go get github.com/mattn/goveralls
|
||||||
|
|
||||||
|
script:
|
||||||
|
- $HOME/gopath/bin/goveralls -service=travis-ci
|
17
vendor/github.com/antchfx/xmlquery/LICENSE
generated
vendored
Normal file
17
vendor/github.com/antchfx/xmlquery/LICENSE
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
186
vendor/github.com/antchfx/xmlquery/README.md
generated
vendored
Normal file
186
vendor/github.com/antchfx/xmlquery/README.md
generated
vendored
Normal file
@ -0,0 +1,186 @@
|
|||||||
|
xmlquery
|
||||||
|
====
|
||||||
|
[![Build Status](https://travis-ci.org/antchfx/xmlquery.svg?branch=master)](https://travis-ci.org/antchfx/xmlquery)
|
||||||
|
[![Coverage Status](https://coveralls.io/repos/github/antchfx/xmlquery/badge.svg?branch=master)](https://coveralls.io/github/antchfx/xmlquery?branch=master)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/antchfx/xmlquery?status.svg)](https://godoc.org/github.com/antchfx/xmlquery)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xmlquery)](https://goreportcard.com/report/github.com/antchfx/xmlquery)
|
||||||
|
|
||||||
|
Overview
|
||||||
|
===
|
||||||
|
|
||||||
|
xmlquery is an XPath query package for XML document, lets you extract data or evaluate from XML documents by an XPath expression.
|
||||||
|
|
||||||
|
Change Logs
|
||||||
|
===
|
||||||
|
|
||||||
|
**2018-12-23**
|
||||||
|
* added XML output will including comment node. [#9](https://github.com/antchfx/xmlquery/issues/9)
|
||||||
|
|
||||||
|
**2018-12-03**
|
||||||
|
* added support attribute name with namespace prefix and XML output. [#6](https://github.com/antchfx/xmlquery/issues/6)
|
||||||
|
|
||||||
|
Installation
|
||||||
|
====
|
||||||
|
|
||||||
|
> $ go get github.com/antchfx/xmlquery
|
||||||
|
|
||||||
|
Getting Started
|
||||||
|
===
|
||||||
|
|
||||||
|
#### Parse a XML from URL.
|
||||||
|
|
||||||
|
```go
|
||||||
|
doc, err := xmlquery.LoadURL("http://www.example.com/sitemap.xml")
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parse a XML from string.
|
||||||
|
|
||||||
|
```go
|
||||||
|
s := `<?xml version="1.0" encoding="utf-8"?><rss version="2.0"></rss>`
|
||||||
|
doc, err := xmlquery.Parse(strings.NewReader(s))
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parse a XML from io.Reader.
|
||||||
|
|
||||||
|
```go
|
||||||
|
f, err := os.Open("../books.xml")
|
||||||
|
doc, err := xmlquery.Parse(f)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Find authors of all books in the bookstore.
|
||||||
|
|
||||||
|
```go
|
||||||
|
list := xmlquery.Find(doc, "//book//author")
|
||||||
|
// or
|
||||||
|
list := xmlquery.Find(doc, "//author")
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Find the second book.
|
||||||
|
|
||||||
|
```go
|
||||||
|
book := xmlquery.FindOne(doc, "//book[2]")
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Find all book elements and only get `id` attribute self. (New Feature)
|
||||||
|
|
||||||
|
```go
|
||||||
|
list := xmlquery.Find(doc,"//book/@id")
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Find all books with id is bk104.
|
||||||
|
|
||||||
|
```go
|
||||||
|
list := xmlquery.Find(doc, "//book[@id='bk104']")
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Find all books that price less than 5.
|
||||||
|
|
||||||
|
```go
|
||||||
|
list := xmlquery.Find(doc, "//book[price<5]")
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Evaluate the total price of all books.
|
||||||
|
|
||||||
|
```go
|
||||||
|
expr, err := xpath.Compile("sum(//book/price)")
|
||||||
|
price := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64)
|
||||||
|
fmt.Printf("total price: %f\n", price)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Evaluate the number of all books element.
|
||||||
|
|
||||||
|
```go
|
||||||
|
expr, err := xpath.Compile("count(//book)")
|
||||||
|
price := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Create XML document.
|
||||||
|
|
||||||
|
```go
|
||||||
|
doc := &xmlquery.Node{
|
||||||
|
Type: xmlquery.DeclarationNode,
|
||||||
|
Data: "xml",
|
||||||
|
Attr: []xml.Attr{
|
||||||
|
xml.Attr{Name: xml.Name{Local: "version"}, Value: "1.0"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
root := &xmlquery.Node{
|
||||||
|
Data: "rss",
|
||||||
|
Type: xmlquery.ElementNode,
|
||||||
|
}
|
||||||
|
doc.FirstChild = root
|
||||||
|
channel := &xmlquery.Node{
|
||||||
|
Data: "channel",
|
||||||
|
Type: xmlquery.ElementNode,
|
||||||
|
}
|
||||||
|
root.FirstChild = channel
|
||||||
|
title := &xmlquery.Node{
|
||||||
|
Data: "title",
|
||||||
|
Type: xmlquery.ElementNode,
|
||||||
|
}
|
||||||
|
title_text := &xmlquery.Node{
|
||||||
|
Data: "W3Schools Home Page",
|
||||||
|
Type: xmlquery.TextNode,
|
||||||
|
}
|
||||||
|
title.FirstChild = title_text
|
||||||
|
channel.FirstChild = title
|
||||||
|
fmt.Println(doc.OutputXML(true))
|
||||||
|
// <?xml version="1.0"?><rss><channel><title>W3Schools Home Page</title></channel></rss>
|
||||||
|
```
|
||||||
|
|
||||||
|
Quick Tutorial
|
||||||
|
===
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"github.com/antchfx/xmlquery"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main(){
|
||||||
|
s := `<?xml version="1.0" encoding="UTF-8" ?>
|
||||||
|
<rss version="2.0">
|
||||||
|
<channel>
|
||||||
|
<title>W3Schools Home Page</title>
|
||||||
|
<link>https://www.w3schools.com</link>
|
||||||
|
<description>Free web building tutorials</description>
|
||||||
|
<item>
|
||||||
|
<title>RSS Tutorial</title>
|
||||||
|
<link>https://www.w3schools.com/xml/xml_rss.asp</link>
|
||||||
|
<description>New RSS tutorial on W3Schools</description>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<title>XML Tutorial</title>
|
||||||
|
<link>https://www.w3schools.com/xml</link>
|
||||||
|
<description>New XML tutorial on W3Schools</description>
|
||||||
|
</item>
|
||||||
|
</channel>
|
||||||
|
</rss>`
|
||||||
|
|
||||||
|
doc, err := xmlquery.Parse(strings.NewReader(s))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
channel := xmlquery.FindOne(doc, "//channel")
|
||||||
|
if n := channel.SelectElement("title"); n != nil {
|
||||||
|
fmt.Printf("title: %s\n", n.InnerText())
|
||||||
|
}
|
||||||
|
if n := channel.SelectElement("link"); n != nil {
|
||||||
|
fmt.Printf("link: %s\n", n.InnerText())
|
||||||
|
}
|
||||||
|
for i, n := range xmlquery.Find(doc, "//item/title") {
|
||||||
|
fmt.Printf("#%d %s\n", i, n.InnerText())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
List of supported XPath query packages
|
||||||
|
===
|
||||||
|
|Name |Description |
|
||||||
|
|--------------------------|----------------|
|
||||||
|
|[htmlquery](https://github.com/antchfx/htmlquery) | XPath query package for the HTML document|
|
||||||
|
|[xmlquery](https://github.com/antchfx/xmlquery) | XPath query package for the XML document|
|
||||||
|
|[jsonquery](https://github.com/antchfx/jsonquery) | XPath query package for the JSON document|
|
||||||
|
|
||||||
|
Questions
|
||||||
|
===
|
||||||
|
Please let me know if you have any questions
|
121
vendor/github.com/antchfx/xmlquery/books.xml
generated
vendored
Normal file
121
vendor/github.com/antchfx/xmlquery/books.xml
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<?xml-stylesheet type="text/xsl" ?>
|
||||||
|
<bookstore specialty="novel">
|
||||||
|
<book id="bk101">
|
||||||
|
<author>Gambardella, Matthew</author>
|
||||||
|
<title>XML Developer's Guide</title>
|
||||||
|
<genre>Computer</genre>
|
||||||
|
<price>44.95</price>
|
||||||
|
<publish_date>2000-10-01</publish_date>
|
||||||
|
<description>An in-depth look at creating applications
|
||||||
|
with XML.</description>
|
||||||
|
</book>
|
||||||
|
<book id="bk102">
|
||||||
|
<author>Ralls, Kim</author>
|
||||||
|
<title>Midnight Rain</title>
|
||||||
|
<genre>Fantasy</genre>
|
||||||
|
<price>5.95</price>
|
||||||
|
<publish_date>2000-12-16</publish_date>
|
||||||
|
<description>A former architect battles corporate zombies,
|
||||||
|
an evil sorceress, and her own childhood to become queen
|
||||||
|
of the world.</description>
|
||||||
|
</book>
|
||||||
|
<book id="bk103">
|
||||||
|
<author>Corets, Eva</author>
|
||||||
|
<title>Maeve Ascendant</title>
|
||||||
|
<genre>Fantasy</genre>
|
||||||
|
<price>5.95</price>
|
||||||
|
<publish_date>2000-11-17</publish_date>
|
||||||
|
<description>After the collapse of a nanotechnology
|
||||||
|
society in England, the young survivors lay the
|
||||||
|
foundation for a new society.</description>
|
||||||
|
</book>
|
||||||
|
<book id="bk104">
|
||||||
|
<author>Corets, Eva</author>
|
||||||
|
<title>Oberon's Legacy</title>
|
||||||
|
<genre>Fantasy</genre>
|
||||||
|
<price>5.95</price>
|
||||||
|
<publish_date>2001-03-10</publish_date>
|
||||||
|
<description>In post-apocalypse England, the mysterious
|
||||||
|
agent known only as Oberon helps to create a new life
|
||||||
|
for the inhabitants of London. Sequel to Maeve
|
||||||
|
Ascendant.</description>
|
||||||
|
</book>
|
||||||
|
<book id="bk105">
|
||||||
|
<author>Corets, Eva</author>
|
||||||
|
<title>The Sundered Grail</title>
|
||||||
|
<genre>Fantasy</genre>
|
||||||
|
<price>5.95</price>
|
||||||
|
<publish_date>2001-09-10</publish_date>
|
||||||
|
<description>The two daughters of Maeve, half-sisters,
|
||||||
|
battle one another for control of England. Sequel to
|
||||||
|
Oberon's Legacy.</description>
|
||||||
|
</book>
|
||||||
|
<book id="bk106">
|
||||||
|
<author>Randall, Cynthia</author>
|
||||||
|
<title>Lover Birds</title>
|
||||||
|
<genre>Romance</genre>
|
||||||
|
<price>4.95</price>
|
||||||
|
<publish_date>2000-09-02</publish_date>
|
||||||
|
<description>When Carla meets Paul at an ornithology
|
||||||
|
conference, tempers fly as feathers get ruffled.</description>
|
||||||
|
</book>
|
||||||
|
<book id="bk107">
|
||||||
|
<author>Thurman, Paula</author>
|
||||||
|
<title>Splish Splash</title>
|
||||||
|
<genre>Romance</genre>
|
||||||
|
<price>4.95</price>
|
||||||
|
<publish_date>2000-11-02</publish_date>
|
||||||
|
<description>A deep sea diver finds true love twenty
|
||||||
|
thousand leagues beneath the sea.</description>
|
||||||
|
</book>
|
||||||
|
<book id="bk108">
|
||||||
|
<author>Knorr, Stefan</author>
|
||||||
|
<title>Creepy Crawlies</title>
|
||||||
|
<genre>Horror</genre>
|
||||||
|
<price>4.95</price>
|
||||||
|
<publish_date>2000-12-06</publish_date>
|
||||||
|
<description>An anthology of horror stories about roaches,
|
||||||
|
centipedes, scorpions and other insects.</description>
|
||||||
|
</book>
|
||||||
|
<book id="bk109">
|
||||||
|
<author>Kress, Peter</author>
|
||||||
|
<title>Paradox Lost</title>
|
||||||
|
<genre>Science Fiction</genre>
|
||||||
|
<price>6.95</price>
|
||||||
|
<publish_date>2000-11-02</publish_date>
|
||||||
|
<description>After an inadvertant trip through a Heisenberg
|
||||||
|
Uncertainty Device, James Salway discovers the problems
|
||||||
|
of being quantum.</description>
|
||||||
|
</book>
|
||||||
|
<book id="bk110">
|
||||||
|
<author>O'Brien, Tim</author>
|
||||||
|
<title>Microsoft .NET: The Programming Bible</title>
|
||||||
|
<genre>Computer</genre>
|
||||||
|
<price>36.95</price>
|
||||||
|
<publish_date>2000-12-09</publish_date>
|
||||||
|
<description>Microsoft's .NET initiative is explored in
|
||||||
|
detail in this deep programmer's reference.</description>
|
||||||
|
</book>
|
||||||
|
<book id="bk111">
|
||||||
|
<author>O'Brien, Tim</author>
|
||||||
|
<title>MSXML3: A Comprehensive Guide</title>
|
||||||
|
<genre>Computer</genre>
|
||||||
|
<price>36.95</price>
|
||||||
|
<publish_date>2000-12-01</publish_date>
|
||||||
|
<description>The Microsoft MSXML3 parser is covered in
|
||||||
|
detail, with attention to XML DOM interfaces, XSLT processing,
|
||||||
|
SAX and more.</description>
|
||||||
|
</book>
|
||||||
|
<book id="bk112">
|
||||||
|
<author>Galos, Mike</author>
|
||||||
|
<title>Visual Studio 7: A Comprehensive Guide</title>
|
||||||
|
<genre>Computer</genre>
|
||||||
|
<price>49.95</price>
|
||||||
|
<publish_date>2001-04-16</publish_date>
|
||||||
|
<description>Microsoft Visual Studio 7 is explored in depth,
|
||||||
|
looking at how Visual Basic, Visual C++, C#, and ASP+ are
|
||||||
|
integrated into a comprehensive development
|
||||||
|
environment.</description>
|
||||||
|
</book>
|
||||||
|
</bookstore>
|
302
vendor/github.com/antchfx/xmlquery/node.go
generated
vendored
Normal file
302
vendor/github.com/antchfx/xmlquery/node.go
generated
vendored
Normal file
@ -0,0 +1,302 @@
|
|||||||
|
package xmlquery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/html/charset"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A NodeType is the type of a Node.
|
||||||
|
type NodeType uint
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DocumentNode is a document object that, as the root of the document tree,
|
||||||
|
// provides access to the entire XML document.
|
||||||
|
DocumentNode NodeType = iota
|
||||||
|
// DeclarationNode is the document type declaration, indicated by the following
|
||||||
|
// tag (for example, <!DOCTYPE...> ).
|
||||||
|
DeclarationNode
|
||||||
|
// ElementNode is an element (for example, <item> ).
|
||||||
|
ElementNode
|
||||||
|
// TextNode is the text content of a node.
|
||||||
|
TextNode
|
||||||
|
// CommentNode a comment (for example, <!-- my comment --> ).
|
||||||
|
CommentNode
|
||||||
|
// AttributeNode is an attribute of element.
|
||||||
|
AttributeNode
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Node consists of a NodeType and some Data (tag name for
|
||||||
|
// element nodes, content for text) and are part of a tree of Nodes.
|
||||||
|
type Node struct {
|
||||||
|
Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
|
||||||
|
|
||||||
|
Type NodeType
|
||||||
|
Data string
|
||||||
|
Prefix string
|
||||||
|
NamespaceURI string
|
||||||
|
Attr []xml.Attr
|
||||||
|
|
||||||
|
level int // node level in the tree
|
||||||
|
}
|
||||||
|
|
||||||
|
// InnerText returns the text between the start and end tags of the object.
|
||||||
|
func (n *Node) InnerText() string {
|
||||||
|
var output func(*bytes.Buffer, *Node)
|
||||||
|
output = func(buf *bytes.Buffer, n *Node) {
|
||||||
|
switch n.Type {
|
||||||
|
case TextNode:
|
||||||
|
buf.WriteString(n.Data)
|
||||||
|
return
|
||||||
|
case CommentNode:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for child := n.FirstChild; child != nil; child = child.NextSibling {
|
||||||
|
output(buf, child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
output(&buf, n)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func outputXML(buf *bytes.Buffer, n *Node) {
|
||||||
|
if n.Type == TextNode {
|
||||||
|
xml.EscapeText(buf, []byte(strings.TrimSpace(n.Data)))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if n.Type == CommentNode {
|
||||||
|
buf.WriteString("<!--")
|
||||||
|
buf.WriteString(n.Data)
|
||||||
|
buf.WriteString("-->")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if n.Type == DeclarationNode {
|
||||||
|
buf.WriteString("<?" + n.Data)
|
||||||
|
} else {
|
||||||
|
if n.Prefix == "" {
|
||||||
|
buf.WriteString("<" + n.Data)
|
||||||
|
} else {
|
||||||
|
buf.WriteString("<" + n.Prefix + ":" + n.Data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, attr := range n.Attr {
|
||||||
|
if attr.Name.Space != "" {
|
||||||
|
buf.WriteString(fmt.Sprintf(` %s:%s="%s"`, attr.Name.Space, attr.Name.Local, attr.Value))
|
||||||
|
} else {
|
||||||
|
buf.WriteString(fmt.Sprintf(` %s="%s"`, attr.Name.Local, attr.Value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n.Type == DeclarationNode {
|
||||||
|
buf.WriteString("?>")
|
||||||
|
} else {
|
||||||
|
buf.WriteString(">")
|
||||||
|
}
|
||||||
|
for child := n.FirstChild; child != nil; child = child.NextSibling {
|
||||||
|
outputXML(buf, child)
|
||||||
|
}
|
||||||
|
if n.Type != DeclarationNode {
|
||||||
|
if n.Prefix == "" {
|
||||||
|
buf.WriteString(fmt.Sprintf("</%s>", n.Data))
|
||||||
|
} else {
|
||||||
|
buf.WriteString(fmt.Sprintf("</%s:%s>", n.Prefix, n.Data))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OutputXML returns the text that including tags name.
|
||||||
|
func (n *Node) OutputXML(self bool) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if self {
|
||||||
|
outputXML(&buf, n)
|
||||||
|
} else {
|
||||||
|
for n := n.FirstChild; n != nil; n = n.NextSibling {
|
||||||
|
outputXML(&buf, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func addAttr(n *Node, key, val string) {
|
||||||
|
var attr xml.Attr
|
||||||
|
if i := strings.Index(key, ":"); i > 0 {
|
||||||
|
attr = xml.Attr{
|
||||||
|
Name: xml.Name{Space: key[:i], Local: key[i+1:]},
|
||||||
|
Value: val,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
attr = xml.Attr{
|
||||||
|
Name: xml.Name{Local: key},
|
||||||
|
Value: val,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
n.Attr = append(n.Attr, attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func addChild(parent, n *Node) {
|
||||||
|
n.Parent = parent
|
||||||
|
if parent.FirstChild == nil {
|
||||||
|
parent.FirstChild = n
|
||||||
|
} else {
|
||||||
|
parent.LastChild.NextSibling = n
|
||||||
|
n.PrevSibling = parent.LastChild
|
||||||
|
}
|
||||||
|
|
||||||
|
parent.LastChild = n
|
||||||
|
}
|
||||||
|
|
||||||
|
func addSibling(sibling, n *Node) {
|
||||||
|
for t := sibling.NextSibling; t != nil; t = t.NextSibling {
|
||||||
|
sibling = t
|
||||||
|
}
|
||||||
|
n.Parent = sibling.Parent
|
||||||
|
sibling.NextSibling = n
|
||||||
|
n.PrevSibling = sibling
|
||||||
|
if sibling.Parent != nil {
|
||||||
|
sibling.Parent.LastChild = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadURL loads the XML document from the specified URL.
|
||||||
|
func LoadURL(url string) (*Node, error) {
|
||||||
|
resp, err := http.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
return parse(resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parse(r io.Reader) (*Node, error) {
|
||||||
|
var (
|
||||||
|
decoder = xml.NewDecoder(r)
|
||||||
|
doc = &Node{Type: DocumentNode}
|
||||||
|
space2prefix = make(map[string]string)
|
||||||
|
level = 0
|
||||||
|
)
|
||||||
|
// http://www.w3.org/XML/1998/namespace is bound by definition to the prefix xml.
|
||||||
|
space2prefix["http://www.w3.org/XML/1998/namespace"] = "xml"
|
||||||
|
decoder.CharsetReader = charset.NewReaderLabel
|
||||||
|
prev := doc
|
||||||
|
for {
|
||||||
|
tok, err := decoder.Token()
|
||||||
|
switch {
|
||||||
|
case err == io.EOF:
|
||||||
|
goto quit
|
||||||
|
case err != nil:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tok := tok.(type) {
|
||||||
|
case xml.StartElement:
|
||||||
|
if level == 0 {
|
||||||
|
// mising XML declaration
|
||||||
|
node := &Node{Type: DeclarationNode, Data: "xml", level: 1}
|
||||||
|
addChild(prev, node)
|
||||||
|
level = 1
|
||||||
|
prev = node
|
||||||
|
}
|
||||||
|
// https://www.w3.org/TR/xml-names/#scoping-defaulting
|
||||||
|
for _, att := range tok.Attr {
|
||||||
|
if att.Name.Local == "xmlns" {
|
||||||
|
space2prefix[att.Value] = ""
|
||||||
|
} else if att.Name.Space == "xmlns" {
|
||||||
|
space2prefix[att.Value] = att.Name.Local
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tok.Name.Space != "" {
|
||||||
|
if _, found := space2prefix[tok.Name.Space]; !found {
|
||||||
|
return nil, errors.New("xmlquery: invalid XML document, namespace is missing")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(tok.Attr); i++ {
|
||||||
|
att := &tok.Attr[i]
|
||||||
|
if prefix, ok := space2prefix[att.Name.Space]; ok {
|
||||||
|
att.Name.Space = prefix
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
node := &Node{
|
||||||
|
Type: ElementNode,
|
||||||
|
Data: tok.Name.Local,
|
||||||
|
Prefix: space2prefix[tok.Name.Space],
|
||||||
|
NamespaceURI: tok.Name.Space,
|
||||||
|
Attr: tok.Attr,
|
||||||
|
level: level,
|
||||||
|
}
|
||||||
|
//fmt.Println(fmt.Sprintf("start > %s : %d", node.Data, level))
|
||||||
|
if level == prev.level {
|
||||||
|
addSibling(prev, node)
|
||||||
|
} else if level > prev.level {
|
||||||
|
addChild(prev, node)
|
||||||
|
} else if level < prev.level {
|
||||||
|
for i := prev.level - level; i > 1; i-- {
|
||||||
|
prev = prev.Parent
|
||||||
|
}
|
||||||
|
addSibling(prev.Parent, node)
|
||||||
|
}
|
||||||
|
prev = node
|
||||||
|
level++
|
||||||
|
case xml.EndElement:
|
||||||
|
level--
|
||||||
|
case xml.CharData:
|
||||||
|
node := &Node{Type: TextNode, Data: string(tok), level: level}
|
||||||
|
if level == prev.level {
|
||||||
|
addSibling(prev, node)
|
||||||
|
} else if level > prev.level {
|
||||||
|
addChild(prev, node)
|
||||||
|
}
|
||||||
|
case xml.Comment:
|
||||||
|
node := &Node{Type: CommentNode, Data: string(tok), level: level}
|
||||||
|
if level == prev.level {
|
||||||
|
addSibling(prev, node)
|
||||||
|
} else if level > prev.level {
|
||||||
|
addChild(prev, node)
|
||||||
|
} else if level < prev.level {
|
||||||
|
for i := prev.level - level; i > 1; i-- {
|
||||||
|
prev = prev.Parent
|
||||||
|
}
|
||||||
|
addSibling(prev.Parent, node)
|
||||||
|
}
|
||||||
|
case xml.ProcInst: // Processing Instruction
|
||||||
|
if prev.Type != DeclarationNode {
|
||||||
|
level++
|
||||||
|
}
|
||||||
|
node := &Node{Type: DeclarationNode, Data: tok.Target, level: level}
|
||||||
|
pairs := strings.Split(string(tok.Inst), " ")
|
||||||
|
for _, pair := range pairs {
|
||||||
|
pair = strings.TrimSpace(pair)
|
||||||
|
if i := strings.Index(pair, "="); i > 0 {
|
||||||
|
addAttr(node, pair[:i], strings.Trim(pair[i+1:], `"`))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if level == prev.level {
|
||||||
|
addSibling(prev, node)
|
||||||
|
} else if level > prev.level {
|
||||||
|
addChild(prev, node)
|
||||||
|
}
|
||||||
|
prev = node
|
||||||
|
case xml.Directive:
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
quit:
|
||||||
|
return doc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse returns the parse tree for the XML from the given Reader.
|
||||||
|
func Parse(r io.Reader) (*Node, error) {
|
||||||
|
return parse(r)
|
||||||
|
}
|
264
vendor/github.com/antchfx/xmlquery/query.go
generated
vendored
Normal file
264
vendor/github.com/antchfx/xmlquery/query.go
generated
vendored
Normal file
@ -0,0 +1,264 @@
|
|||||||
|
/*
|
||||||
|
Package xmlquery provides extract data from XML documents using XPath expression.
|
||||||
|
*/
|
||||||
|
package xmlquery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/antchfx/xpath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SelectElements finds child elements with the specified name.
|
||||||
|
func (n *Node) SelectElements(name string) []*Node {
|
||||||
|
return Find(n, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SelectElement finds child elements with the specified name.
|
||||||
|
func (n *Node) SelectElement(name string) *Node {
|
||||||
|
return FindOne(n, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SelectAttr returns the attribute value with the specified name.
|
||||||
|
func (n *Node) SelectAttr(name string) string {
|
||||||
|
if n.Type == AttributeNode {
|
||||||
|
if n.Data == name {
|
||||||
|
return n.InnerText()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
var local, space string
|
||||||
|
local = name
|
||||||
|
if i := strings.Index(name, ":"); i > 0 {
|
||||||
|
space = name[:i]
|
||||||
|
local = name[i+1:]
|
||||||
|
}
|
||||||
|
for _, attr := range n.Attr {
|
||||||
|
if attr.Name.Local == local && attr.Name.Space == space {
|
||||||
|
return attr.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ xpath.NodeNavigator = &NodeNavigator{}
|
||||||
|
|
||||||
|
// CreateXPathNavigator creates a new xpath.NodeNavigator for the specified html.Node.
|
||||||
|
func CreateXPathNavigator(top *Node) *NodeNavigator {
|
||||||
|
return &NodeNavigator{curr: top, root: top, attr: -1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCurrentNode(it *xpath.NodeIterator) *Node {
|
||||||
|
n := it.Current().(*NodeNavigator)
|
||||||
|
if n.NodeType() == xpath.AttributeNode {
|
||||||
|
childNode := &Node{
|
||||||
|
Type: TextNode,
|
||||||
|
Data: n.Value(),
|
||||||
|
}
|
||||||
|
return &Node{
|
||||||
|
Type: AttributeNode,
|
||||||
|
Data: n.LocalName(),
|
||||||
|
FirstChild: childNode,
|
||||||
|
LastChild: childNode,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n.curr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find searches the Node that matches by the specified XPath expr.
|
||||||
|
func Find(top *Node, expr string) []*Node {
|
||||||
|
exp, err := xpath.Compile(expr)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
t := exp.Select(CreateXPathNavigator(top))
|
||||||
|
var elems []*Node
|
||||||
|
for t.MoveNext() {
|
||||||
|
elems = append(elems, getCurrentNode(t))
|
||||||
|
}
|
||||||
|
return elems
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOne searches the Node that matches by the specified XPath expr,
|
||||||
|
// and returns first element of matched.
|
||||||
|
func FindOne(top *Node, expr string) *Node {
|
||||||
|
exp, err := xpath.Compile(expr)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
t := exp.Select(CreateXPathNavigator(top))
|
||||||
|
var elem *Node
|
||||||
|
if t.MoveNext() {
|
||||||
|
elem = getCurrentNode(t)
|
||||||
|
}
|
||||||
|
return elem
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindEach searches the html.Node and calls functions cb.
|
||||||
|
// Important: this method has deprecated, recommend use for .. = range Find(){}.
|
||||||
|
func FindEach(top *Node, expr string, cb func(int, *Node)) {
|
||||||
|
for i, n := range Find(top, expr) {
|
||||||
|
cb(i, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindEachWithBreak functions the same as FindEach but allows you
|
||||||
|
// to break the loop by returning false from your callback function, cb.
|
||||||
|
// Important: this method has deprecated, recommend use for .. = range Find(){}.
|
||||||
|
func FindEachWithBreak(top *Node, expr string, cb func(int, *Node) bool) {
|
||||||
|
for i, n := range Find(top, expr) {
|
||||||
|
if !cb(i, n) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodeNavigator struct {
|
||||||
|
root, curr *Node
|
||||||
|
attr int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) Current() *Node {
|
||||||
|
return x.curr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) NodeType() xpath.NodeType {
|
||||||
|
switch x.curr.Type {
|
||||||
|
case CommentNode:
|
||||||
|
return xpath.CommentNode
|
||||||
|
case TextNode:
|
||||||
|
return xpath.TextNode
|
||||||
|
case DeclarationNode, DocumentNode:
|
||||||
|
return xpath.RootNode
|
||||||
|
case ElementNode:
|
||||||
|
if x.attr != -1 {
|
||||||
|
return xpath.AttributeNode
|
||||||
|
}
|
||||||
|
return xpath.ElementNode
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("unknown XML node type: %v", x.curr.Type))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) LocalName() string {
|
||||||
|
if x.attr != -1 {
|
||||||
|
return x.curr.Attr[x.attr].Name.Local
|
||||||
|
}
|
||||||
|
return x.curr.Data
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) Prefix() string {
|
||||||
|
if x.NodeType() == xpath.AttributeNode {
|
||||||
|
if x.attr != -1 {
|
||||||
|
return x.curr.Attr[x.attr].Name.Space
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return x.curr.Prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) Value() string {
|
||||||
|
switch x.curr.Type {
|
||||||
|
case CommentNode:
|
||||||
|
return x.curr.Data
|
||||||
|
case ElementNode:
|
||||||
|
if x.attr != -1 {
|
||||||
|
return x.curr.Attr[x.attr].Value
|
||||||
|
}
|
||||||
|
return x.curr.InnerText()
|
||||||
|
case TextNode:
|
||||||
|
return x.curr.Data
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) Copy() xpath.NodeNavigator {
|
||||||
|
n := *x
|
||||||
|
return &n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) MoveToRoot() {
|
||||||
|
x.curr = x.root
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) MoveToParent() bool {
|
||||||
|
if x.attr != -1 {
|
||||||
|
x.attr = -1
|
||||||
|
return true
|
||||||
|
} else if node := x.curr.Parent; node != nil {
|
||||||
|
x.curr = node
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) MoveToNextAttribute() bool {
|
||||||
|
if x.attr >= len(x.curr.Attr)-1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
x.attr++
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) MoveToChild() bool {
|
||||||
|
if x.attr != -1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if node := x.curr.FirstChild; node != nil {
|
||||||
|
x.curr = node
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) MoveToFirst() bool {
|
||||||
|
if x.attr != -1 || x.curr.PrevSibling == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
node := x.curr.PrevSibling
|
||||||
|
if node == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x.curr = node
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) String() string {
|
||||||
|
return x.Value()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) MoveToNext() bool {
|
||||||
|
if x.attr != -1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if node := x.curr.NextSibling; node != nil {
|
||||||
|
x.curr = node
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) MoveToPrevious() bool {
|
||||||
|
if x.attr != -1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if node := x.curr.PrevSibling; node != nil {
|
||||||
|
x.curr = node
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *NodeNavigator) MoveTo(other xpath.NodeNavigator) bool {
|
||||||
|
node, ok := other.(*NodeNavigator)
|
||||||
|
if !ok || node.root != x.root {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
x.curr = node.curr
|
||||||
|
x.attr = node.attr
|
||||||
|
return true
|
||||||
|
}
|
32
vendor/github.com/antchfx/xpath/.gitignore
generated
vendored
Normal file
32
vendor/github.com/antchfx/xpath/.gitignore
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
# vscode
|
||||||
|
.vscode
|
||||||
|
debug
|
||||||
|
*.test
|
||||||
|
|
||||||
|
./build
|
||||||
|
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
*.test
|
||||||
|
*.prof
|
12
vendor/github.com/antchfx/xpath/.travis.yml
generated
vendored
Normal file
12
vendor/github.com/antchfx/xpath/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.6
|
||||||
|
- 1.9
|
||||||
|
- '1.10'
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get github.com/mattn/goveralls
|
||||||
|
|
||||||
|
script:
|
||||||
|
- $HOME/gopath/bin/goveralls -service=travis-ci
|
17
vendor/github.com/antchfx/xpath/LICENSE
generated
vendored
Normal file
17
vendor/github.com/antchfx/xpath/LICENSE
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
167
vendor/github.com/antchfx/xpath/README.md
generated
vendored
Normal file
167
vendor/github.com/antchfx/xpath/README.md
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
XPath
|
||||||
|
====
|
||||||
|
[![GoDoc](https://godoc.org/github.com/antchfx/xpath?status.svg)](https://godoc.org/github.com/antchfx/xpath)
|
||||||
|
[![Coverage Status](https://coveralls.io/repos/github/antchfx/xpath/badge.svg?branch=master)](https://coveralls.io/github/antchfx/xpath?branch=master)
|
||||||
|
[![Build Status](https://travis-ci.org/antchfx/xpath.svg?branch=master)](https://travis-ci.org/antchfx/xpath)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xpath)](https://goreportcard.com/report/github.com/antchfx/xpath)
|
||||||
|
|
||||||
|
XPath is Go package provides selecting nodes from XML, HTML or other documents using XPath expression.
|
||||||
|
|
||||||
|
Implementation
|
||||||
|
===
|
||||||
|
|
||||||
|
- [htmlquery](https://github.com/antchfx/htmlquery) - an XPath query package for HTML document
|
||||||
|
|
||||||
|
- [xmlquery](https://github.com/antchfx/xmlquery) - an XPath query package for XML document.
|
||||||
|
|
||||||
|
- [jsonquery](https://github.com/antchfx/jsonquery) - an XPath query package for JSON document
|
||||||
|
|
||||||
|
Supported Features
|
||||||
|
===
|
||||||
|
|
||||||
|
#### The basic XPath patterns.
|
||||||
|
|
||||||
|
> The basic XPath patterns cover 90% of the cases that most stylesheets will need.
|
||||||
|
|
||||||
|
- `node` : Selects all child elements with nodeName of node.
|
||||||
|
|
||||||
|
- `*` : Selects all child elements.
|
||||||
|
|
||||||
|
- `@attr` : Selects the attribute attr.
|
||||||
|
|
||||||
|
- `@*` : Selects all attributes.
|
||||||
|
|
||||||
|
- `node()` : Matches an org.w3c.dom.Node.
|
||||||
|
|
||||||
|
- `text()` : Matches a org.w3c.dom.Text node.
|
||||||
|
|
||||||
|
- `comment()` : Matches a comment.
|
||||||
|
|
||||||
|
- `.` : Selects the current node.
|
||||||
|
|
||||||
|
- `..` : Selects the parent of current node.
|
||||||
|
|
||||||
|
- `/` : Selects the document node.
|
||||||
|
|
||||||
|
- `a[expr]` : Select only those nodes matching a which also satisfy the expression expr.
|
||||||
|
|
||||||
|
- `a[n]` : Selects the nth matching node matching a When a filter's expression is a number, XPath selects based on position.
|
||||||
|
|
||||||
|
- `a/b` : For each node matching a, add the nodes matching b to the result.
|
||||||
|
|
||||||
|
- `a//b` : For each node matching a, add the descendant nodes matching b to the result.
|
||||||
|
|
||||||
|
- `//b` : Returns elements in the entire document matching b.
|
||||||
|
|
||||||
|
- `a|b` : All nodes matching a or b, union operation(not boolean or).
|
||||||
|
|
||||||
|
- `(a, b, c)` : Evaluates each of its operands and concatenates the resulting sequences, in order, into a single result sequence
|
||||||
|
|
||||||
|
|
||||||
|
#### Node Axes
|
||||||
|
|
||||||
|
- `child::*` : The child axis selects children of the current node.
|
||||||
|
|
||||||
|
- `descendant::*` : The descendant axis selects descendants of the current node. It is equivalent to '//'.
|
||||||
|
|
||||||
|
- `descendant-or-self::*` : Selects descendants including the current node.
|
||||||
|
|
||||||
|
- `attribute::*` : Selects attributes of the current element. It is equivalent to @*
|
||||||
|
|
||||||
|
- `following-sibling::*` : Selects nodes after the current node.
|
||||||
|
|
||||||
|
- `preceding-sibling::*` : Selects nodes before the current node.
|
||||||
|
|
||||||
|
- `following::*` : Selects the first matching node following in document order, excluding descendants.
|
||||||
|
|
||||||
|
- `preceding::*` : Selects the first matching node preceding in document order, excluding ancestors.
|
||||||
|
|
||||||
|
- `parent::*` : Selects the parent if it matches. The '..' pattern from the core is equivalent to 'parent::node()'.
|
||||||
|
|
||||||
|
- `ancestor::*` : Selects matching ancestors.
|
||||||
|
|
||||||
|
- `ancestor-or-self::*` : Selects ancestors including the current node.
|
||||||
|
|
||||||
|
- `self::*` : Selects the current node. '.' is equivalent to 'self::node()'.
|
||||||
|
|
||||||
|
#### Expressions
|
||||||
|
|
||||||
|
The gxpath supported three types: number, boolean, string.
|
||||||
|
|
||||||
|
- `path` : Selects nodes based on the path.
|
||||||
|
|
||||||
|
- `a = b` : Standard comparisons.
|
||||||
|
|
||||||
|
* a = b True if a equals b.
|
||||||
|
* a != b True if a is not equal to b.
|
||||||
|
* a < b True if a is less than b.
|
||||||
|
* a <= b True if a is less than or equal to b.
|
||||||
|
* a > b True if a is greater than b.
|
||||||
|
* a >= b True if a is greater than or equal to b.
|
||||||
|
|
||||||
|
- `a + b` : Arithmetic expressions.
|
||||||
|
|
||||||
|
* `- a` Unary minus
|
||||||
|
* a + b Add
|
||||||
|
* a - b Substract
|
||||||
|
* a * b Multiply
|
||||||
|
* a div b Divide
|
||||||
|
* a mod b Floating point mod, like Java.
|
||||||
|
|
||||||
|
- `a or b` : Boolean `or` operation.
|
||||||
|
|
||||||
|
- `a and b` : Boolean `and` operation.
|
||||||
|
|
||||||
|
- `(expr)` : Parenthesized expressions.
|
||||||
|
|
||||||
|
- `fun(arg1, ..., argn)` : Function calls:
|
||||||
|
|
||||||
|
| Function | Supported |
|
||||||
|
| --- | --- |
|
||||||
|
`boolean()`| ✓ |
|
||||||
|
`ceiling()`| ✓ |
|
||||||
|
`choose()`| ✗ |
|
||||||
|
`concat()`| ✓ |
|
||||||
|
`contains()`| ✓ |
|
||||||
|
`count()`| ✓ |
|
||||||
|
`current()`| ✗ |
|
||||||
|
`document()`| ✗ |
|
||||||
|
`element-available()`| ✗ |
|
||||||
|
`ends-with()`| ✓ |
|
||||||
|
`false()`| ✓ |
|
||||||
|
`floor()`| ✓ |
|
||||||
|
`format-number()`| ✗ |
|
||||||
|
`function-available()`| ✗ |
|
||||||
|
`generate-id()`| ✗ |
|
||||||
|
`id()`| ✗ |
|
||||||
|
`key()`| ✗ |
|
||||||
|
`lang()`| ✗ |
|
||||||
|
`last()`| ✓ |
|
||||||
|
`local-name()`| ✓ |
|
||||||
|
`name()`| ✓ |
|
||||||
|
`namespace-uri()`| ✓ |
|
||||||
|
`normalize-space()`| ✓ |
|
||||||
|
`not()`| ✓ |
|
||||||
|
`number()`| ✓ |
|
||||||
|
`position()`| ✓ |
|
||||||
|
`round()`| ✓ |
|
||||||
|
`starts-with()`| ✓ |
|
||||||
|
`string()`| ✓ |
|
||||||
|
`string-length()`| ✓ |
|
||||||
|
`substring()`| ✓ |
|
||||||
|
`substring-after()`| ✓ |
|
||||||
|
`substring-before()`| ✓ |
|
||||||
|
`sum()`| ✓ |
|
||||||
|
`system-property()`| ✗ |
|
||||||
|
`translate()`| ✓ |
|
||||||
|
`true()`| ✓ |
|
||||||
|
`unparsed-entity-url()` | ✗ |
|
||||||
|
|
||||||
|
Changelogs
|
||||||
|
===
|
||||||
|
|
||||||
|
2019-01-29
|
||||||
|
- improvement `normalize-space` function. [#32](https://github.com/antchfx/xpath/issues/32)
|
||||||
|
|
||||||
|
2018-12-07
|
||||||
|
- supports XPath 2.0 Sequence expressions. [#30](https://github.com/antchfx/xpath/pull/30) by [@minherz](https://github.com/minherz).
|
483
vendor/github.com/antchfx/xpath/build.go
generated
vendored
Normal file
483
vendor/github.com/antchfx/xpath/build.go
generated
vendored
Normal file
@ -0,0 +1,483 @@
|
|||||||
|
package xpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type flag int
|
||||||
|
|
||||||
|
const (
|
||||||
|
noneFlag flag = iota
|
||||||
|
filterFlag
|
||||||
|
)
|
||||||
|
|
||||||
|
// builder provides building an XPath expressions.
|
||||||
|
type builder struct {
|
||||||
|
depth int
|
||||||
|
flag flag
|
||||||
|
firstInput query
|
||||||
|
}
|
||||||
|
|
||||||
|
// axisPredicate creates a predicate to predicating for this axis node.
|
||||||
|
func axisPredicate(root *axisNode) func(NodeNavigator) bool {
|
||||||
|
// get current axix node type.
|
||||||
|
typ := ElementNode
|
||||||
|
switch root.AxeType {
|
||||||
|
case "attribute":
|
||||||
|
typ = AttributeNode
|
||||||
|
case "self", "parent":
|
||||||
|
typ = allNode
|
||||||
|
default:
|
||||||
|
switch root.Prop {
|
||||||
|
case "comment":
|
||||||
|
typ = CommentNode
|
||||||
|
case "text":
|
||||||
|
typ = TextNode
|
||||||
|
// case "processing-instruction":
|
||||||
|
// typ = ProcessingInstructionNode
|
||||||
|
case "node":
|
||||||
|
typ = allNode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nametest := root.LocalName != "" || root.Prefix != ""
|
||||||
|
predicate := func(n NodeNavigator) bool {
|
||||||
|
if typ == n.NodeType() || typ == allNode || typ == TextNode {
|
||||||
|
if nametest {
|
||||||
|
if root.LocalName == n.LocalName() && root.Prefix == n.Prefix() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return predicate
|
||||||
|
}
|
||||||
|
|
||||||
|
// processAxisNode processes a query for the XPath axis node.
|
||||||
|
func (b *builder) processAxisNode(root *axisNode) (query, error) {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
qyInput query
|
||||||
|
qyOutput query
|
||||||
|
predicate = axisPredicate(root)
|
||||||
|
)
|
||||||
|
|
||||||
|
if root.Input == nil {
|
||||||
|
qyInput = &contextQuery{}
|
||||||
|
} else {
|
||||||
|
if root.AxeType == "child" && (root.Input.Type() == nodeAxis) {
|
||||||
|
if input := root.Input.(*axisNode); input.AxeType == "descendant-or-self" {
|
||||||
|
var qyGrandInput query
|
||||||
|
if input.Input != nil {
|
||||||
|
qyGrandInput, _ = b.processNode(input.Input)
|
||||||
|
} else {
|
||||||
|
qyGrandInput = &contextQuery{}
|
||||||
|
}
|
||||||
|
qyOutput = &descendantQuery{Input: qyGrandInput, Predicate: predicate, Self: true}
|
||||||
|
return qyOutput, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
qyInput, err = b.processNode(root.Input)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch root.AxeType {
|
||||||
|
case "ancestor":
|
||||||
|
qyOutput = &ancestorQuery{Input: qyInput, Predicate: predicate}
|
||||||
|
case "ancestor-or-self":
|
||||||
|
qyOutput = &ancestorQuery{Input: qyInput, Predicate: predicate, Self: true}
|
||||||
|
case "attribute":
|
||||||
|
qyOutput = &attributeQuery{Input: qyInput, Predicate: predicate}
|
||||||
|
case "child":
|
||||||
|
filter := func(n NodeNavigator) bool {
|
||||||
|
v := predicate(n)
|
||||||
|
switch root.Prop {
|
||||||
|
case "text":
|
||||||
|
v = v && n.NodeType() == TextNode
|
||||||
|
case "node":
|
||||||
|
v = v && (n.NodeType() == ElementNode || n.NodeType() == TextNode)
|
||||||
|
case "comment":
|
||||||
|
v = v && n.NodeType() == CommentNode
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
qyOutput = &childQuery{Input: qyInput, Predicate: filter}
|
||||||
|
case "descendant":
|
||||||
|
qyOutput = &descendantQuery{Input: qyInput, Predicate: predicate}
|
||||||
|
case "descendant-or-self":
|
||||||
|
qyOutput = &descendantQuery{Input: qyInput, Predicate: predicate, Self: true}
|
||||||
|
case "following":
|
||||||
|
qyOutput = &followingQuery{Input: qyInput, Predicate: predicate}
|
||||||
|
case "following-sibling":
|
||||||
|
qyOutput = &followingQuery{Input: qyInput, Predicate: predicate, Sibling: true}
|
||||||
|
case "parent":
|
||||||
|
qyOutput = &parentQuery{Input: qyInput, Predicate: predicate}
|
||||||
|
case "preceding":
|
||||||
|
qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate}
|
||||||
|
case "preceding-sibling":
|
||||||
|
qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate, Sibling: true}
|
||||||
|
case "self":
|
||||||
|
qyOutput = &selfQuery{Input: qyInput, Predicate: predicate}
|
||||||
|
case "namespace":
|
||||||
|
// haha,what will you do someting??
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("unknown axe type: %s", root.AxeType)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return qyOutput, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processFilterNode builds query for the XPath filter predicate.
|
||||||
|
func (b *builder) processFilterNode(root *filterNode) (query, error) {
|
||||||
|
b.flag |= filterFlag
|
||||||
|
|
||||||
|
qyInput, err := b.processNode(root.Input)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qyCond, err := b.processNode(root.Condition)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qyOutput := &filterQuery{Input: qyInput, Predicate: qyCond}
|
||||||
|
return qyOutput, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processFunctionNode processes query for the XPath function node.
|
||||||
|
func (b *builder) processFunctionNode(root *functionNode) (query, error) {
|
||||||
|
var qyOutput query
|
||||||
|
switch root.FuncName {
|
||||||
|
case "starts-with":
|
||||||
|
arg1, err := b.processNode(root.Args[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
arg2, err := b.processNode(root.Args[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qyOutput = &functionQuery{Input: b.firstInput, Func: startwithFunc(arg1, arg2)}
|
||||||
|
case "ends-with":
|
||||||
|
arg1, err := b.processNode(root.Args[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
arg2, err := b.processNode(root.Args[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qyOutput = &functionQuery{Input: b.firstInput, Func: endwithFunc(arg1, arg2)}
|
||||||
|
case "contains":
|
||||||
|
arg1, err := b.processNode(root.Args[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
arg2, err := b.processNode(root.Args[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
qyOutput = &functionQuery{Input: b.firstInput, Func: containsFunc(arg1, arg2)}
|
||||||
|
case "substring":
|
||||||
|
//substring( string , start [, length] )
|
||||||
|
if len(root.Args) < 2 {
|
||||||
|
return nil, errors.New("xpath: substring function must have at least two parameter")
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
arg1, arg2, arg3 query
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if arg1, err = b.processNode(root.Args[0]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if arg2, err = b.processNode(root.Args[1]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(root.Args) == 3 {
|
||||||
|
if arg3, err = b.processNode(root.Args[2]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
qyOutput = &functionQuery{Input: b.firstInput, Func: substringFunc(arg1, arg2, arg3)}
|
||||||
|
case "substring-before", "substring-after":
|
||||||
|
//substring-xxxx( haystack, needle )
|
||||||
|
if len(root.Args) != 2 {
|
||||||
|
return nil, errors.New("xpath: substring-before function must have two parameters")
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
arg1, arg2 query
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if arg1, err = b.processNode(root.Args[0]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if arg2, err = b.processNode(root.Args[1]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qyOutput = &functionQuery{
|
||||||
|
Input: b.firstInput,
|
||||||
|
Func: substringIndFunc(arg1, arg2, root.FuncName == "substring-after"),
|
||||||
|
}
|
||||||
|
case "string-length":
|
||||||
|
// string-length( [string] )
|
||||||
|
if len(root.Args) < 1 {
|
||||||
|
return nil, errors.New("xpath: string-length function must have at least one parameter")
|
||||||
|
}
|
||||||
|
arg1, err := b.processNode(root.Args[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qyOutput = &functionQuery{Input: b.firstInput, Func: stringLengthFunc(arg1)}
|
||||||
|
case "normalize-space":
|
||||||
|
if len(root.Args) == 0 {
|
||||||
|
return nil, errors.New("xpath: normalize-space function must have at least one parameter")
|
||||||
|
}
|
||||||
|
argQuery, err := b.processNode(root.Args[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qyOutput = &functionQuery{Input: argQuery, Func: normalizespaceFunc}
|
||||||
|
case "translate":
|
||||||
|
//translate( string , string, string )
|
||||||
|
if len(root.Args) != 3 {
|
||||||
|
return nil, errors.New("xpath: translate function must have three parameters")
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
arg1, arg2, arg3 query
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if arg1, err = b.processNode(root.Args[0]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if arg2, err = b.processNode(root.Args[1]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if arg3, err = b.processNode(root.Args[2]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qyOutput = &functionQuery{Input: b.firstInput, Func: translateFunc(arg1, arg2, arg3)}
|
||||||
|
case "not":
|
||||||
|
if len(root.Args) == 0 {
|
||||||
|
return nil, errors.New("xpath: not function must have at least one parameter")
|
||||||
|
}
|
||||||
|
argQuery, err := b.processNode(root.Args[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qyOutput = &functionQuery{Input: argQuery, Func: notFunc}
|
||||||
|
case "name", "local-name", "namespace-uri":
|
||||||
|
inp := b.firstInput
|
||||||
|
if len(root.Args) > 1 {
|
||||||
|
return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName)
|
||||||
|
}
|
||||||
|
if len(root.Args) == 1 {
|
||||||
|
argQuery, err := b.processNode(root.Args[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
inp = argQuery
|
||||||
|
}
|
||||||
|
f := &functionQuery{Input: inp}
|
||||||
|
switch root.FuncName {
|
||||||
|
case "name":
|
||||||
|
f.Func = nameFunc
|
||||||
|
case "local-name":
|
||||||
|
f.Func = localNameFunc
|
||||||
|
case "namespace-uri":
|
||||||
|
f.Func = namespaceFunc
|
||||||
|
}
|
||||||
|
qyOutput = f
|
||||||
|
case "true", "false":
|
||||||
|
val := root.FuncName == "true"
|
||||||
|
qyOutput = &functionQuery{
|
||||||
|
Input: b.firstInput,
|
||||||
|
Func: func(_ query, _ iterator) interface{} {
|
||||||
|
return val
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case "last":
|
||||||
|
qyOutput = &functionQuery{Input: b.firstInput, Func: lastFunc}
|
||||||
|
case "position":
|
||||||
|
qyOutput = &functionQuery{Input: b.firstInput, Func: positionFunc}
|
||||||
|
case "boolean", "number", "string":
|
||||||
|
inp := b.firstInput
|
||||||
|
if len(root.Args) > 1 {
|
||||||
|
return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName)
|
||||||
|
}
|
||||||
|
if len(root.Args) == 1 {
|
||||||
|
argQuery, err := b.processNode(root.Args[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
inp = argQuery
|
||||||
|
}
|
||||||
|
f := &functionQuery{Input: inp}
|
||||||
|
switch root.FuncName {
|
||||||
|
case "boolean":
|
||||||
|
f.Func = booleanFunc
|
||||||
|
case "string":
|
||||||
|
f.Func = stringFunc
|
||||||
|
case "number":
|
||||||
|
f.Func = numberFunc
|
||||||
|
}
|
||||||
|
qyOutput = f
|
||||||
|
case "count":
|
||||||
|
//if b.firstInput == nil {
|
||||||
|
// return nil, errors.New("xpath: expression must evaluate to node-set")
|
||||||
|
//}
|
||||||
|
if len(root.Args) == 0 {
|
||||||
|
return nil, fmt.Errorf("xpath: count(node-sets) function must with have parameters node-sets")
|
||||||
|
}
|
||||||
|
argQuery, err := b.processNode(root.Args[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qyOutput = &functionQuery{Input: argQuery, Func: countFunc}
|
||||||
|
case "sum":
|
||||||
|
if len(root.Args) == 0 {
|
||||||
|
return nil, fmt.Errorf("xpath: sum(node-sets) function must with have parameters node-sets")
|
||||||
|
}
|
||||||
|
argQuery, err := b.processNode(root.Args[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qyOutput = &functionQuery{Input: argQuery, Func: sumFunc}
|
||||||
|
case "ceiling", "floor", "round":
|
||||||
|
if len(root.Args) == 0 {
|
||||||
|
return nil, fmt.Errorf("xpath: ceiling(node-sets) function must with have parameters node-sets")
|
||||||
|
}
|
||||||
|
argQuery, err := b.processNode(root.Args[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f := &functionQuery{Input: argQuery}
|
||||||
|
switch root.FuncName {
|
||||||
|
case "ceiling":
|
||||||
|
f.Func = ceilingFunc
|
||||||
|
case "floor":
|
||||||
|
f.Func = floorFunc
|
||||||
|
case "round":
|
||||||
|
f.Func = roundFunc
|
||||||
|
}
|
||||||
|
qyOutput = f
|
||||||
|
case "concat":
|
||||||
|
if len(root.Args) < 2 {
|
||||||
|
return nil, fmt.Errorf("xpath: concat() must have at least two arguments")
|
||||||
|
}
|
||||||
|
var args []query
|
||||||
|
for _, v := range root.Args {
|
||||||
|
q, err := b.processNode(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
args = append(args, q)
|
||||||
|
}
|
||||||
|
qyOutput = &functionQuery{Input: b.firstInput, Func: concatFunc(args...)}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("not yet support this function %s()", root.FuncName)
|
||||||
|
}
|
||||||
|
return qyOutput, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) processOperatorNode(root *operatorNode) (query, error) {
|
||||||
|
left, err := b.processNode(root.Left)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
right, err := b.processNode(root.Right)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var qyOutput query
|
||||||
|
switch root.Op {
|
||||||
|
case "+", "-", "div", "mod": // Numeric operator
|
||||||
|
var exprFunc func(interface{}, interface{}) interface{}
|
||||||
|
switch root.Op {
|
||||||
|
case "+":
|
||||||
|
exprFunc = plusFunc
|
||||||
|
case "-":
|
||||||
|
exprFunc = minusFunc
|
||||||
|
case "div":
|
||||||
|
exprFunc = divFunc
|
||||||
|
case "mod":
|
||||||
|
exprFunc = modFunc
|
||||||
|
}
|
||||||
|
qyOutput = &numericQuery{Left: left, Right: right, Do: exprFunc}
|
||||||
|
case "=", ">", ">=", "<", "<=", "!=":
|
||||||
|
var exprFunc func(iterator, interface{}, interface{}) interface{}
|
||||||
|
switch root.Op {
|
||||||
|
case "=":
|
||||||
|
exprFunc = eqFunc
|
||||||
|
case ">":
|
||||||
|
exprFunc = gtFunc
|
||||||
|
case ">=":
|
||||||
|
exprFunc = geFunc
|
||||||
|
case "<":
|
||||||
|
exprFunc = ltFunc
|
||||||
|
case "<=":
|
||||||
|
exprFunc = leFunc
|
||||||
|
case "!=":
|
||||||
|
exprFunc = neFunc
|
||||||
|
}
|
||||||
|
qyOutput = &logicalQuery{Left: left, Right: right, Do: exprFunc}
|
||||||
|
case "or", "and":
|
||||||
|
isOr := false
|
||||||
|
if root.Op == "or" {
|
||||||
|
isOr = true
|
||||||
|
}
|
||||||
|
qyOutput = &booleanQuery{Left: left, Right: right, IsOr: isOr}
|
||||||
|
case "|":
|
||||||
|
qyOutput = &unionQuery{Left: left, Right: right}
|
||||||
|
}
|
||||||
|
return qyOutput, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) processNode(root node) (q query, err error) {
|
||||||
|
if b.depth = b.depth + 1; b.depth > 1024 {
|
||||||
|
err = errors.New("the xpath expressions is too complex")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch root.Type() {
|
||||||
|
case nodeConstantOperand:
|
||||||
|
n := root.(*operandNode)
|
||||||
|
q = &constantQuery{Val: n.Val}
|
||||||
|
case nodeRoot:
|
||||||
|
q = &contextQuery{Root: true}
|
||||||
|
case nodeAxis:
|
||||||
|
q, err = b.processAxisNode(root.(*axisNode))
|
||||||
|
b.firstInput = q
|
||||||
|
case nodeFilter:
|
||||||
|
q, err = b.processFilterNode(root.(*filterNode))
|
||||||
|
case nodeFunction:
|
||||||
|
q, err = b.processFunctionNode(root.(*functionNode))
|
||||||
|
case nodeOperator:
|
||||||
|
q, err = b.processOperatorNode(root.(*operatorNode))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// build builds a specified XPath expressions expr.
|
||||||
|
func build(expr string) (q query, err error) {
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil {
|
||||||
|
switch x := e.(type) {
|
||||||
|
case string:
|
||||||
|
err = errors.New(x)
|
||||||
|
case error:
|
||||||
|
err = x
|
||||||
|
default:
|
||||||
|
err = errors.New("unknown panic")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
root := parse(expr)
|
||||||
|
b := &builder{}
|
||||||
|
return b.processNode(root)
|
||||||
|
}
|
484
vendor/github.com/antchfx/xpath/func.go
generated
vendored
Normal file
484
vendor/github.com/antchfx/xpath/func.go
generated
vendored
Normal file
@ -0,0 +1,484 @@
|
|||||||
|
package xpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The XPath function list.
|
||||||
|
|
||||||
|
func predicate(q query) func(NodeNavigator) bool {
|
||||||
|
type Predicater interface {
|
||||||
|
Test(NodeNavigator) bool
|
||||||
|
}
|
||||||
|
if p, ok := q.(Predicater); ok {
|
||||||
|
return p.Test
|
||||||
|
}
|
||||||
|
return func(NodeNavigator) bool { return true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// positionFunc is a XPath Node Set functions position().
|
||||||
|
func positionFunc(q query, t iterator) interface{} {
|
||||||
|
var (
|
||||||
|
count = 1
|
||||||
|
node = t.Current()
|
||||||
|
)
|
||||||
|
test := predicate(q)
|
||||||
|
for node.MoveToPrevious() {
|
||||||
|
if test(node) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return float64(count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// lastFunc is a XPath Node Set functions last().
|
||||||
|
func lastFunc(q query, t iterator) interface{} {
|
||||||
|
var (
|
||||||
|
count = 0
|
||||||
|
node = t.Current()
|
||||||
|
)
|
||||||
|
node.MoveToFirst()
|
||||||
|
test := predicate(q)
|
||||||
|
for {
|
||||||
|
if test(node) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
if !node.MoveToNext() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return float64(count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// countFunc is a XPath Node Set functions count(node-set).
|
||||||
|
func countFunc(q query, t iterator) interface{} {
|
||||||
|
var count = 0
|
||||||
|
test := predicate(q)
|
||||||
|
switch typ := q.Evaluate(t).(type) {
|
||||||
|
case query:
|
||||||
|
for node := typ.Select(t); node != nil; node = typ.Select(t) {
|
||||||
|
if test(node) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return float64(count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sumFunc is a XPath Node Set functions sum(node-set).
|
||||||
|
func sumFunc(q query, t iterator) interface{} {
|
||||||
|
var sum float64
|
||||||
|
switch typ := q.Evaluate(t).(type) {
|
||||||
|
case query:
|
||||||
|
for node := typ.Select(t); node != nil; node = typ.Select(t) {
|
||||||
|
if v, err := strconv.ParseFloat(node.Value(), 64); err == nil {
|
||||||
|
sum += v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case float64:
|
||||||
|
sum = typ
|
||||||
|
case string:
|
||||||
|
v, err := strconv.ParseFloat(typ, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic(errors.New("sum() function argument type must be a node-set or number"))
|
||||||
|
}
|
||||||
|
sum = v
|
||||||
|
}
|
||||||
|
return sum
|
||||||
|
}
|
||||||
|
|
||||||
|
func asNumber(t iterator, o interface{}) float64 {
|
||||||
|
switch typ := o.(type) {
|
||||||
|
case query:
|
||||||
|
node := typ.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return float64(0)
|
||||||
|
}
|
||||||
|
if v, err := strconv.ParseFloat(node.Value(), 64); err == nil {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
case float64:
|
||||||
|
return typ
|
||||||
|
case string:
|
||||||
|
v, err := strconv.ParseFloat(typ, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic(errors.New("ceiling() function argument type must be a node-set or number"))
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ceilingFunc is a XPath Node Set functions ceiling(node-set).
|
||||||
|
func ceilingFunc(q query, t iterator) interface{} {
|
||||||
|
val := asNumber(t, q.Evaluate(t))
|
||||||
|
return math.Ceil(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// floorFunc is a XPath Node Set functions floor(node-set).
|
||||||
|
func floorFunc(q query, t iterator) interface{} {
|
||||||
|
val := asNumber(t, q.Evaluate(t))
|
||||||
|
return math.Floor(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// roundFunc is a XPath Node Set functions round(node-set).
|
||||||
|
func roundFunc(q query, t iterator) interface{} {
|
||||||
|
val := asNumber(t, q.Evaluate(t))
|
||||||
|
//return math.Round(val)
|
||||||
|
return round(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nameFunc is a XPath functions name([node-set]).
|
||||||
|
func nameFunc(q query, t iterator) interface{} {
|
||||||
|
v := q.Select(t)
|
||||||
|
if v == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
ns := v.Prefix()
|
||||||
|
if ns == "" {
|
||||||
|
return v.LocalName()
|
||||||
|
}
|
||||||
|
return ns + ":" + v.LocalName()
|
||||||
|
}
|
||||||
|
|
||||||
|
// localNameFunc is a XPath functions local-name([node-set]).
|
||||||
|
func localNameFunc(q query, t iterator) interface{} {
|
||||||
|
v := q.Select(t)
|
||||||
|
if v == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return v.LocalName()
|
||||||
|
}
|
||||||
|
|
||||||
|
// namespaceFunc is a XPath functions namespace-uri([node-set]).
|
||||||
|
func namespaceFunc(q query, t iterator) interface{} {
|
||||||
|
v := q.Select(t)
|
||||||
|
if v == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return v.Prefix()
|
||||||
|
}
|
||||||
|
|
||||||
|
func asBool(t iterator, v interface{}) bool {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case nil:
|
||||||
|
return false
|
||||||
|
case *NodeIterator:
|
||||||
|
return v.MoveNext()
|
||||||
|
case bool:
|
||||||
|
return bool(v)
|
||||||
|
case float64:
|
||||||
|
return v != 0
|
||||||
|
case string:
|
||||||
|
return v != ""
|
||||||
|
case query:
|
||||||
|
return v.Select(t) != nil
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("unexpected type: %T", v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func asString(t iterator, v interface{}) string {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case nil:
|
||||||
|
return ""
|
||||||
|
case bool:
|
||||||
|
if v {
|
||||||
|
return "true"
|
||||||
|
}
|
||||||
|
return "false"
|
||||||
|
case float64:
|
||||||
|
return strconv.FormatFloat(v, 'g', -1, 64)
|
||||||
|
case string:
|
||||||
|
return v
|
||||||
|
case query:
|
||||||
|
node := v.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return node.Value()
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("unexpected type: %T", v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// booleanFunc is a XPath functions boolean([node-set]).
|
||||||
|
func booleanFunc(q query, t iterator) interface{} {
|
||||||
|
v := q.Evaluate(t)
|
||||||
|
return asBool(t, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// numberFunc is a XPath functions number([node-set]).
|
||||||
|
func numberFunc(q query, t iterator) interface{} {
|
||||||
|
v := q.Evaluate(t)
|
||||||
|
return asNumber(t, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stringFunc is a XPath functions string([node-set]).
|
||||||
|
func stringFunc(q query, t iterator) interface{} {
|
||||||
|
v := q.Evaluate(t)
|
||||||
|
return asString(t, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// startwithFunc is a XPath functions starts-with(string, string).
|
||||||
|
func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
|
||||||
|
return func(q query, t iterator) interface{} {
|
||||||
|
var (
|
||||||
|
m, n string
|
||||||
|
ok bool
|
||||||
|
)
|
||||||
|
switch typ := arg1.Evaluate(t).(type) {
|
||||||
|
case string:
|
||||||
|
m = typ
|
||||||
|
case query:
|
||||||
|
node := typ.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
m = node.Value()
|
||||||
|
default:
|
||||||
|
panic(errors.New("starts-with() function argument type must be string"))
|
||||||
|
}
|
||||||
|
n, ok = arg2.Evaluate(t).(string)
|
||||||
|
if !ok {
|
||||||
|
panic(errors.New("starts-with() function argument type must be string"))
|
||||||
|
}
|
||||||
|
return strings.HasPrefix(m, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// endwithFunc is a XPath functions ends-with(string, string).
|
||||||
|
func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
|
||||||
|
return func(q query, t iterator) interface{} {
|
||||||
|
var (
|
||||||
|
m, n string
|
||||||
|
ok bool
|
||||||
|
)
|
||||||
|
switch typ := arg1.Evaluate(t).(type) {
|
||||||
|
case string:
|
||||||
|
m = typ
|
||||||
|
case query:
|
||||||
|
node := typ.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
m = node.Value()
|
||||||
|
default:
|
||||||
|
panic(errors.New("ends-with() function argument type must be string"))
|
||||||
|
}
|
||||||
|
n, ok = arg2.Evaluate(t).(string)
|
||||||
|
if !ok {
|
||||||
|
panic(errors.New("ends-with() function argument type must be string"))
|
||||||
|
}
|
||||||
|
return strings.HasSuffix(m, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// containsFunc is a XPath functions contains(string or @attr, string).
|
||||||
|
func containsFunc(arg1, arg2 query) func(query, iterator) interface{} {
|
||||||
|
return func(q query, t iterator) interface{} {
|
||||||
|
var (
|
||||||
|
m, n string
|
||||||
|
ok bool
|
||||||
|
)
|
||||||
|
|
||||||
|
switch typ := arg1.Evaluate(t).(type) {
|
||||||
|
case string:
|
||||||
|
m = typ
|
||||||
|
case query:
|
||||||
|
node := typ.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
m = node.Value()
|
||||||
|
default:
|
||||||
|
panic(errors.New("contains() function argument type must be string"))
|
||||||
|
}
|
||||||
|
|
||||||
|
n, ok = arg2.Evaluate(t).(string)
|
||||||
|
if !ok {
|
||||||
|
panic(errors.New("contains() function argument type must be string"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Contains(m, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
regnewline = regexp.MustCompile(`[\r\n\t]`)
|
||||||
|
regseqspace = regexp.MustCompile(`\s{2,}`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// normalizespaceFunc is XPath functions normalize-space(string?)
|
||||||
|
func normalizespaceFunc(q query, t iterator) interface{} {
|
||||||
|
var m string
|
||||||
|
switch typ := q.Evaluate(t).(type) {
|
||||||
|
case string:
|
||||||
|
m = typ
|
||||||
|
case query:
|
||||||
|
node := typ.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
m = node.Value()
|
||||||
|
}
|
||||||
|
m = strings.TrimSpace(m)
|
||||||
|
m = regnewline.ReplaceAllString(m, " ")
|
||||||
|
m = regseqspace.ReplaceAllString(m, " ")
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// substringFunc is XPath functions substring function returns a part of a given string.
|
||||||
|
func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
|
||||||
|
return func(q query, t iterator) interface{} {
|
||||||
|
var m string
|
||||||
|
switch typ := arg1.Evaluate(t).(type) {
|
||||||
|
case string:
|
||||||
|
m = typ
|
||||||
|
case query:
|
||||||
|
node := typ.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
m = node.Value()
|
||||||
|
}
|
||||||
|
|
||||||
|
var start, length float64
|
||||||
|
var ok bool
|
||||||
|
|
||||||
|
if start, ok = arg2.Evaluate(t).(float64); !ok {
|
||||||
|
panic(errors.New("substring() function first argument type must be int"))
|
||||||
|
} else if start < 1 {
|
||||||
|
panic(errors.New("substring() function first argument type must be >= 1"))
|
||||||
|
}
|
||||||
|
start--
|
||||||
|
if arg3 != nil {
|
||||||
|
if length, ok = arg3.Evaluate(t).(float64); !ok {
|
||||||
|
panic(errors.New("substring() function second argument type must be int"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (len(m) - int(start)) < int(length) {
|
||||||
|
panic(errors.New("substring() function start and length argument out of range"))
|
||||||
|
}
|
||||||
|
if length > 0 {
|
||||||
|
return m[int(start):int(length+start)]
|
||||||
|
}
|
||||||
|
return m[int(start):]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// substringIndFunc is XPath functions substring-before/substring-after function returns a part of a given string.
|
||||||
|
func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interface{} {
|
||||||
|
return func(q query, t iterator) interface{} {
|
||||||
|
var str string
|
||||||
|
switch v := arg1.Evaluate(t).(type) {
|
||||||
|
case string:
|
||||||
|
str = v
|
||||||
|
case query:
|
||||||
|
node := v.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
str = node.Value()
|
||||||
|
}
|
||||||
|
var word string
|
||||||
|
switch v := arg2.Evaluate(t).(type) {
|
||||||
|
case string:
|
||||||
|
word = v
|
||||||
|
case query:
|
||||||
|
node := v.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
word = node.Value()
|
||||||
|
}
|
||||||
|
if word == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
i := strings.Index(str, word)
|
||||||
|
if i < 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if after {
|
||||||
|
return str[i+len(word):]
|
||||||
|
}
|
||||||
|
return str[:i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// stringLengthFunc is XPATH string-length( [string] ) function that returns a number
|
||||||
|
// equal to the number of characters in a given string.
|
||||||
|
func stringLengthFunc(arg1 query) func(query, iterator) interface{} {
|
||||||
|
return func(q query, t iterator) interface{} {
|
||||||
|
switch v := arg1.Evaluate(t).(type) {
|
||||||
|
case string:
|
||||||
|
return float64(len(v))
|
||||||
|
case query:
|
||||||
|
node := v.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return float64(len(node.Value()))
|
||||||
|
}
|
||||||
|
return float64(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// translateFunc is XPath functions translate() function returns a replaced string.
|
||||||
|
func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
|
||||||
|
return func(q query, t iterator) interface{} {
|
||||||
|
str := asString(t, arg1.Evaluate(t))
|
||||||
|
src := asString(t, arg2.Evaluate(t))
|
||||||
|
dst := asString(t, arg3.Evaluate(t))
|
||||||
|
|
||||||
|
var replace []string
|
||||||
|
for i, s := range src {
|
||||||
|
d := ""
|
||||||
|
if i < len(dst) {
|
||||||
|
d = string(dst[i])
|
||||||
|
}
|
||||||
|
replace = append(replace, string(s), d)
|
||||||
|
}
|
||||||
|
return strings.NewReplacer(replace...).Replace(str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// notFunc is XPATH functions not(expression) function operation.
|
||||||
|
func notFunc(q query, t iterator) interface{} {
|
||||||
|
switch v := q.Evaluate(t).(type) {
|
||||||
|
case bool:
|
||||||
|
return !v
|
||||||
|
case query:
|
||||||
|
node := v.Select(t)
|
||||||
|
return node == nil
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// concatFunc is the concat function concatenates two or more
|
||||||
|
// strings and returns the resulting string.
|
||||||
|
// concat( string1 , string2 [, stringn]* )
|
||||||
|
func concatFunc(args ...query) func(query, iterator) interface{} {
|
||||||
|
return func(q query, t iterator) interface{} {
|
||||||
|
var a []string
|
||||||
|
for _, v := range args {
|
||||||
|
switch v := v.Evaluate(t).(type) {
|
||||||
|
case string:
|
||||||
|
a = append(a, v)
|
||||||
|
case query:
|
||||||
|
node := v.Select(t)
|
||||||
|
if node != nil {
|
||||||
|
a = append(a, node.Value())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(a, "")
|
||||||
|
}
|
||||||
|
}
|
9
vendor/github.com/antchfx/xpath/func_go110.go
generated
vendored
Normal file
9
vendor/github.com/antchfx/xpath/func_go110.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
// +build go1.10
|
||||||
|
|
||||||
|
package xpath
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
func round(f float64) int {
|
||||||
|
return int(math.Round(f))
|
||||||
|
}
|
15
vendor/github.com/antchfx/xpath/func_pre_go110.go
generated
vendored
Normal file
15
vendor/github.com/antchfx/xpath/func_pre_go110.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
// +build !go1.10
|
||||||
|
|
||||||
|
package xpath
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
// math.Round() is supported by Go 1.10+,
|
||||||
|
// This method just compatible for version <1.10.
|
||||||
|
// https://github.com/golang/go/issues/20100
|
||||||
|
func round(f float64) int {
|
||||||
|
if math.Abs(f) < 0.5 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return int(f + math.Copysign(0.5, f))
|
||||||
|
}
|
295
vendor/github.com/antchfx/xpath/operator.go
generated
vendored
Normal file
295
vendor/github.com/antchfx/xpath/operator.go
generated
vendored
Normal file
@ -0,0 +1,295 @@
|
|||||||
|
package xpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The XPath number operator function list.
|
||||||
|
|
||||||
|
// valueType is a return value type.
|
||||||
|
type valueType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
booleanType valueType = iota
|
||||||
|
numberType
|
||||||
|
stringType
|
||||||
|
nodeSetType
|
||||||
|
)
|
||||||
|
|
||||||
|
func getValueType(i interface{}) valueType {
|
||||||
|
v := reflect.ValueOf(i)
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Float64:
|
||||||
|
return numberType
|
||||||
|
case reflect.String:
|
||||||
|
return stringType
|
||||||
|
case reflect.Bool:
|
||||||
|
return booleanType
|
||||||
|
default:
|
||||||
|
if _, ok := i.(query); ok {
|
||||||
|
return nodeSetType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic(fmt.Errorf("xpath unknown value type: %v", v.Kind()))
|
||||||
|
}
|
||||||
|
|
||||||
|
type logical func(iterator, string, interface{}, interface{}) bool
|
||||||
|
|
||||||
|
var logicalFuncs = [][]logical{
|
||||||
|
{cmpBooleanBoolean, nil, nil, nil},
|
||||||
|
{nil, cmpNumericNumeric, cmpNumericString, cmpNumericNodeSet},
|
||||||
|
{nil, cmpStringNumeric, cmpStringString, cmpStringNodeSet},
|
||||||
|
{nil, cmpNodeSetNumeric, cmpNodeSetString, cmpNodeSetNodeSet},
|
||||||
|
}
|
||||||
|
|
||||||
|
// number vs number
|
||||||
|
func cmpNumberNumberF(op string, a, b float64) bool {
|
||||||
|
switch op {
|
||||||
|
case "=":
|
||||||
|
return a == b
|
||||||
|
case ">":
|
||||||
|
return a > b
|
||||||
|
case "<":
|
||||||
|
return a < b
|
||||||
|
case ">=":
|
||||||
|
return a >= b
|
||||||
|
case "<=":
|
||||||
|
return a <= b
|
||||||
|
case "!=":
|
||||||
|
return a != b
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// string vs string
|
||||||
|
func cmpStringStringF(op string, a, b string) bool {
|
||||||
|
switch op {
|
||||||
|
case "=":
|
||||||
|
return a == b
|
||||||
|
case ">":
|
||||||
|
return a > b
|
||||||
|
case "<":
|
||||||
|
return a < b
|
||||||
|
case ">=":
|
||||||
|
return a >= b
|
||||||
|
case "<=":
|
||||||
|
return a <= b
|
||||||
|
case "!=":
|
||||||
|
return a != b
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpBooleanBooleanF(op string, a, b bool) bool {
|
||||||
|
switch op {
|
||||||
|
case "or":
|
||||||
|
return a || b
|
||||||
|
case "and":
|
||||||
|
return a && b
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpNumericNumeric(t iterator, op string, m, n interface{}) bool {
|
||||||
|
a := m.(float64)
|
||||||
|
b := n.(float64)
|
||||||
|
return cmpNumberNumberF(op, a, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpNumericString(t iterator, op string, m, n interface{}) bool {
|
||||||
|
a := m.(float64)
|
||||||
|
b := n.(string)
|
||||||
|
num, err := strconv.ParseFloat(b, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return cmpNumberNumberF(op, a, num)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpNumericNodeSet(t iterator, op string, m, n interface{}) bool {
|
||||||
|
a := m.(float64)
|
||||||
|
b := n.(query)
|
||||||
|
|
||||||
|
for {
|
||||||
|
node := b.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
num, err := strconv.ParseFloat(node.Value(), 64)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if cmpNumberNumberF(op, a, num) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpNodeSetNumeric(t iterator, op string, m, n interface{}) bool {
|
||||||
|
a := m.(query)
|
||||||
|
b := n.(float64)
|
||||||
|
for {
|
||||||
|
node := a.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
num, err := strconv.ParseFloat(node.Value(), 64)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if cmpNumberNumberF(op, num, b) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpNodeSetString(t iterator, op string, m, n interface{}) bool {
|
||||||
|
a := m.(query)
|
||||||
|
b := n.(string)
|
||||||
|
for {
|
||||||
|
node := a.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if cmpStringStringF(op, b, node.Value()) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpNodeSetNodeSet(t iterator, op string, m, n interface{}) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpStringNumeric(t iterator, op string, m, n interface{}) bool {
|
||||||
|
a := m.(string)
|
||||||
|
b := n.(float64)
|
||||||
|
num, err := strconv.ParseFloat(a, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return cmpNumberNumberF(op, b, num)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpStringString(t iterator, op string, m, n interface{}) bool {
|
||||||
|
a := m.(string)
|
||||||
|
b := n.(string)
|
||||||
|
return cmpStringStringF(op, a, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpStringNodeSet(t iterator, op string, m, n interface{}) bool {
|
||||||
|
a := m.(string)
|
||||||
|
b := n.(query)
|
||||||
|
for {
|
||||||
|
node := b.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if cmpStringStringF(op, a, node.Value()) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpBooleanBoolean(t iterator, op string, m, n interface{}) bool {
|
||||||
|
a := m.(bool)
|
||||||
|
b := n.(bool)
|
||||||
|
return cmpBooleanBooleanF(op, a, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// eqFunc is an `=` operator.
|
||||||
|
func eqFunc(t iterator, m, n interface{}) interface{} {
|
||||||
|
t1 := getValueType(m)
|
||||||
|
t2 := getValueType(n)
|
||||||
|
return logicalFuncs[t1][t2](t, "=", m, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// gtFunc is an `>` operator.
|
||||||
|
func gtFunc(t iterator, m, n interface{}) interface{} {
|
||||||
|
t1 := getValueType(m)
|
||||||
|
t2 := getValueType(n)
|
||||||
|
return logicalFuncs[t1][t2](t, ">", m, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// geFunc is an `>=` operator.
|
||||||
|
func geFunc(t iterator, m, n interface{}) interface{} {
|
||||||
|
t1 := getValueType(m)
|
||||||
|
t2 := getValueType(n)
|
||||||
|
return logicalFuncs[t1][t2](t, ">=", m, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ltFunc is an `<` operator.
|
||||||
|
func ltFunc(t iterator, m, n interface{}) interface{} {
|
||||||
|
t1 := getValueType(m)
|
||||||
|
t2 := getValueType(n)
|
||||||
|
return logicalFuncs[t1][t2](t, "<", m, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// leFunc is an `<=` operator.
|
||||||
|
func leFunc(t iterator, m, n interface{}) interface{} {
|
||||||
|
t1 := getValueType(m)
|
||||||
|
t2 := getValueType(n)
|
||||||
|
return logicalFuncs[t1][t2](t, "<=", m, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// neFunc is an `!=` operator.
|
||||||
|
func neFunc(t iterator, m, n interface{}) interface{} {
|
||||||
|
t1 := getValueType(m)
|
||||||
|
t2 := getValueType(n)
|
||||||
|
return logicalFuncs[t1][t2](t, "!=", m, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// orFunc is an `or` operator.
|
||||||
|
var orFunc = func(t iterator, m, n interface{}) interface{} {
|
||||||
|
t1 := getValueType(m)
|
||||||
|
t2 := getValueType(n)
|
||||||
|
return logicalFuncs[t1][t2](t, "or", m, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func numericExpr(m, n interface{}, cb func(float64, float64) float64) float64 {
|
||||||
|
typ := reflect.TypeOf(float64(0))
|
||||||
|
a := reflect.ValueOf(m).Convert(typ)
|
||||||
|
b := reflect.ValueOf(n).Convert(typ)
|
||||||
|
return cb(a.Float(), b.Float())
|
||||||
|
}
|
||||||
|
|
||||||
|
// plusFunc is an `+` operator.
|
||||||
|
var plusFunc = func(m, n interface{}) interface{} {
|
||||||
|
return numericExpr(m, n, func(a, b float64) float64 {
|
||||||
|
return a + b
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// minusFunc is an `-` operator.
|
||||||
|
var minusFunc = func(m, n interface{}) interface{} {
|
||||||
|
return numericExpr(m, n, func(a, b float64) float64 {
|
||||||
|
return a - b
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// mulFunc is an `*` operator.
|
||||||
|
var mulFunc = func(m, n interface{}) interface{} {
|
||||||
|
return numericExpr(m, n, func(a, b float64) float64 {
|
||||||
|
return a * b
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// divFunc is an `DIV` operator.
|
||||||
|
var divFunc = func(m, n interface{}) interface{} {
|
||||||
|
return numericExpr(m, n, func(a, b float64) float64 {
|
||||||
|
return a / b
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// modFunc is an 'MOD' operator.
|
||||||
|
var modFunc = func(m, n interface{}) interface{} {
|
||||||
|
return numericExpr(m, n, func(a, b float64) float64 {
|
||||||
|
return float64(int(a) % int(b))
|
||||||
|
})
|
||||||
|
}
|
1186
vendor/github.com/antchfx/xpath/parse.go
generated
vendored
Normal file
1186
vendor/github.com/antchfx/xpath/parse.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
824
vendor/github.com/antchfx/xpath/query.go
generated
vendored
Normal file
824
vendor/github.com/antchfx/xpath/query.go
generated
vendored
Normal file
@ -0,0 +1,824 @@
|
|||||||
|
package xpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"hash/fnv"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
type iterator interface {
|
||||||
|
Current() NodeNavigator
|
||||||
|
}
|
||||||
|
|
||||||
|
// An XPath query interface.
|
||||||
|
type query interface {
|
||||||
|
// Select traversing iterator returns a query matched node NodeNavigator.
|
||||||
|
Select(iterator) NodeNavigator
|
||||||
|
|
||||||
|
// Evaluate evaluates query and returns values of the current query.
|
||||||
|
Evaluate(iterator) interface{}
|
||||||
|
|
||||||
|
Clone() query
|
||||||
|
}
|
||||||
|
|
||||||
|
// contextQuery is returns current node on the iterator object query.
|
||||||
|
type contextQuery struct {
|
||||||
|
count int
|
||||||
|
Root bool // Moving to root-level node in the current context iterator.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *contextQuery) Select(t iterator) (n NodeNavigator) {
|
||||||
|
if c.count == 0 {
|
||||||
|
c.count++
|
||||||
|
n = t.Current().Copy()
|
||||||
|
if c.Root {
|
||||||
|
n.MoveToRoot()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *contextQuery) Evaluate(iterator) interface{} {
|
||||||
|
c.count = 0
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *contextQuery) Clone() query {
|
||||||
|
return &contextQuery{count: 0, Root: c.Root}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ancestorQuery is an XPath ancestor node query.(ancestor::*|ancestor-self::*)
|
||||||
|
type ancestorQuery struct {
|
||||||
|
iterator func() NodeNavigator
|
||||||
|
|
||||||
|
Self bool
|
||||||
|
Input query
|
||||||
|
Predicate func(NodeNavigator) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ancestorQuery) Select(t iterator) NodeNavigator {
|
||||||
|
for {
|
||||||
|
if a.iterator == nil {
|
||||||
|
node := a.Input.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
first := true
|
||||||
|
a.iterator = func() NodeNavigator {
|
||||||
|
if first && a.Self {
|
||||||
|
first = false
|
||||||
|
if a.Predicate(node) {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for node.MoveToParent() {
|
||||||
|
if !a.Predicate(node) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if node := a.iterator(); node != nil {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
a.iterator = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ancestorQuery) Evaluate(t iterator) interface{} {
|
||||||
|
a.Input.Evaluate(t)
|
||||||
|
a.iterator = nil
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ancestorQuery) Test(n NodeNavigator) bool {
|
||||||
|
return a.Predicate(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ancestorQuery) Clone() query {
|
||||||
|
return &ancestorQuery{Self: a.Self, Input: a.Input.Clone(), Predicate: a.Predicate}
|
||||||
|
}
|
||||||
|
|
||||||
|
// attributeQuery is an XPath attribute node query.(@*)
|
||||||
|
type attributeQuery struct {
|
||||||
|
iterator func() NodeNavigator
|
||||||
|
|
||||||
|
Input query
|
||||||
|
Predicate func(NodeNavigator) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attributeQuery) Select(t iterator) NodeNavigator {
|
||||||
|
for {
|
||||||
|
if a.iterator == nil {
|
||||||
|
node := a.Input.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node = node.Copy()
|
||||||
|
a.iterator = func() NodeNavigator {
|
||||||
|
for {
|
||||||
|
onAttr := node.MoveToNextAttribute()
|
||||||
|
if !onAttr {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if a.Predicate(node) {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if node := a.iterator(); node != nil {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
a.iterator = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attributeQuery) Evaluate(t iterator) interface{} {
|
||||||
|
a.Input.Evaluate(t)
|
||||||
|
a.iterator = nil
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attributeQuery) Test(n NodeNavigator) bool {
|
||||||
|
return a.Predicate(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attributeQuery) Clone() query {
|
||||||
|
return &attributeQuery{Input: a.Input.Clone(), Predicate: a.Predicate}
|
||||||
|
}
|
||||||
|
|
||||||
|
// childQuery is an XPath child node query.(child::*)
|
||||||
|
type childQuery struct {
|
||||||
|
posit int
|
||||||
|
iterator func() NodeNavigator
|
||||||
|
|
||||||
|
Input query
|
||||||
|
Predicate func(NodeNavigator) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *childQuery) Select(t iterator) NodeNavigator {
|
||||||
|
for {
|
||||||
|
if c.iterator == nil {
|
||||||
|
c.posit = 0
|
||||||
|
node := c.Input.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node = node.Copy()
|
||||||
|
first := true
|
||||||
|
c.iterator = func() NodeNavigator {
|
||||||
|
for {
|
||||||
|
if (first && !node.MoveToChild()) || (!first && !node.MoveToNext()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
first = false
|
||||||
|
if c.Predicate(node) {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if node := c.iterator(); node != nil {
|
||||||
|
c.posit++
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
c.iterator = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *childQuery) Evaluate(t iterator) interface{} {
|
||||||
|
c.Input.Evaluate(t)
|
||||||
|
c.iterator = nil
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *childQuery) Test(n NodeNavigator) bool {
|
||||||
|
return c.Predicate(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *childQuery) Clone() query {
|
||||||
|
return &childQuery{Input: c.Input.Clone(), Predicate: c.Predicate}
|
||||||
|
}
|
||||||
|
|
||||||
|
// position returns a position of current NodeNavigator.
|
||||||
|
func (c *childQuery) position() int {
|
||||||
|
return c.posit
|
||||||
|
}
|
||||||
|
|
||||||
|
// descendantQuery is an XPath descendant node query.(descendant::* | descendant-or-self::*)
|
||||||
|
type descendantQuery struct {
|
||||||
|
iterator func() NodeNavigator
|
||||||
|
posit int
|
||||||
|
|
||||||
|
Self bool
|
||||||
|
Input query
|
||||||
|
Predicate func(NodeNavigator) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *descendantQuery) Select(t iterator) NodeNavigator {
|
||||||
|
for {
|
||||||
|
if d.iterator == nil {
|
||||||
|
d.posit = 0
|
||||||
|
node := d.Input.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node = node.Copy()
|
||||||
|
level := 0
|
||||||
|
first := true
|
||||||
|
d.iterator = func() NodeNavigator {
|
||||||
|
if first && d.Self {
|
||||||
|
first = false
|
||||||
|
if d.Predicate(node) {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
if node.MoveToChild() {
|
||||||
|
level++
|
||||||
|
} else {
|
||||||
|
for {
|
||||||
|
if level == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if node.MoveToNext() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
node.MoveToParent()
|
||||||
|
level--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d.Predicate(node) {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if node := d.iterator(); node != nil {
|
||||||
|
d.posit++
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
d.iterator = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *descendantQuery) Evaluate(t iterator) interface{} {
|
||||||
|
d.Input.Evaluate(t)
|
||||||
|
d.iterator = nil
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *descendantQuery) Test(n NodeNavigator) bool {
|
||||||
|
return d.Predicate(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// position returns a position of current NodeNavigator.
|
||||||
|
func (d *descendantQuery) position() int {
|
||||||
|
return d.posit
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *descendantQuery) Clone() query {
|
||||||
|
return &descendantQuery{Self: d.Self, Input: d.Input.Clone(), Predicate: d.Predicate}
|
||||||
|
}
|
||||||
|
|
||||||
|
// followingQuery is an XPath following node query.(following::*|following-sibling::*)
|
||||||
|
type followingQuery struct {
|
||||||
|
iterator func() NodeNavigator
|
||||||
|
|
||||||
|
Input query
|
||||||
|
Sibling bool // The matching sibling node of current node.
|
||||||
|
Predicate func(NodeNavigator) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *followingQuery) Select(t iterator) NodeNavigator {
|
||||||
|
for {
|
||||||
|
if f.iterator == nil {
|
||||||
|
node := f.Input.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node = node.Copy()
|
||||||
|
if f.Sibling {
|
||||||
|
f.iterator = func() NodeNavigator {
|
||||||
|
for {
|
||||||
|
if !node.MoveToNext() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if f.Predicate(node) {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var q query // descendant query
|
||||||
|
f.iterator = func() NodeNavigator {
|
||||||
|
for {
|
||||||
|
if q == nil {
|
||||||
|
for !node.MoveToNext() {
|
||||||
|
if !node.MoveToParent() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
q = &descendantQuery{
|
||||||
|
Self: true,
|
||||||
|
Input: &contextQuery{},
|
||||||
|
Predicate: f.Predicate,
|
||||||
|
}
|
||||||
|
t.Current().MoveTo(node)
|
||||||
|
}
|
||||||
|
if node := q.Select(t); node != nil {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
q = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if node := f.iterator(); node != nil {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
f.iterator = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *followingQuery) Evaluate(t iterator) interface{} {
|
||||||
|
f.Input.Evaluate(t)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *followingQuery) Test(n NodeNavigator) bool {
|
||||||
|
return f.Predicate(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *followingQuery) Clone() query {
|
||||||
|
return &followingQuery{Input: f.Input.Clone(), Sibling: f.Sibling, Predicate: f.Predicate}
|
||||||
|
}
|
||||||
|
|
||||||
|
// precedingQuery is an XPath preceding node query.(preceding::*)
|
||||||
|
type precedingQuery struct {
|
||||||
|
iterator func() NodeNavigator
|
||||||
|
Input query
|
||||||
|
Sibling bool // The matching sibling node of current node.
|
||||||
|
Predicate func(NodeNavigator) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *precedingQuery) Select(t iterator) NodeNavigator {
|
||||||
|
for {
|
||||||
|
if p.iterator == nil {
|
||||||
|
node := p.Input.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node = node.Copy()
|
||||||
|
if p.Sibling {
|
||||||
|
p.iterator = func() NodeNavigator {
|
||||||
|
for {
|
||||||
|
for !node.MoveToPrevious() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if p.Predicate(node) {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var q query
|
||||||
|
p.iterator = func() NodeNavigator {
|
||||||
|
for {
|
||||||
|
if q == nil {
|
||||||
|
for !node.MoveToPrevious() {
|
||||||
|
if !node.MoveToParent() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
q = &descendantQuery{
|
||||||
|
Self: true,
|
||||||
|
Input: &contextQuery{},
|
||||||
|
Predicate: p.Predicate,
|
||||||
|
}
|
||||||
|
t.Current().MoveTo(node)
|
||||||
|
}
|
||||||
|
if node := q.Select(t); node != nil {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
q = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if node := p.iterator(); node != nil {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
p.iterator = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *precedingQuery) Evaluate(t iterator) interface{} {
|
||||||
|
p.Input.Evaluate(t)
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *precedingQuery) Test(n NodeNavigator) bool {
|
||||||
|
return p.Predicate(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *precedingQuery) Clone() query {
|
||||||
|
return &precedingQuery{Input: p.Input.Clone(), Sibling: p.Sibling, Predicate: p.Predicate}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parentQuery is an XPath parent node query.(parent::*)
|
||||||
|
type parentQuery struct {
|
||||||
|
Input query
|
||||||
|
Predicate func(NodeNavigator) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parentQuery) Select(t iterator) NodeNavigator {
|
||||||
|
for {
|
||||||
|
node := p.Input.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node = node.Copy()
|
||||||
|
if node.MoveToParent() && p.Predicate(node) {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parentQuery) Evaluate(t iterator) interface{} {
|
||||||
|
p.Input.Evaluate(t)
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parentQuery) Clone() query {
|
||||||
|
return &parentQuery{Input: p.Input.Clone(), Predicate: p.Predicate}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parentQuery) Test(n NodeNavigator) bool {
|
||||||
|
return p.Predicate(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// selfQuery is an Self node query.(self::*)
|
||||||
|
type selfQuery struct {
|
||||||
|
Input query
|
||||||
|
Predicate func(NodeNavigator) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *selfQuery) Select(t iterator) NodeNavigator {
|
||||||
|
for {
|
||||||
|
node := s.Input.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Predicate(node) {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *selfQuery) Evaluate(t iterator) interface{} {
|
||||||
|
s.Input.Evaluate(t)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *selfQuery) Test(n NodeNavigator) bool {
|
||||||
|
return s.Predicate(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *selfQuery) Clone() query {
|
||||||
|
return &selfQuery{Input: s.Input.Clone(), Predicate: s.Predicate}
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterQuery is an XPath query for predicate filter.
|
||||||
|
type filterQuery struct {
|
||||||
|
Input query
|
||||||
|
Predicate query
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *filterQuery) do(t iterator) bool {
|
||||||
|
val := reflect.ValueOf(f.Predicate.Evaluate(t))
|
||||||
|
switch val.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
return val.Bool()
|
||||||
|
case reflect.String:
|
||||||
|
return len(val.String()) > 0
|
||||||
|
case reflect.Float64:
|
||||||
|
pt := float64(getNodePosition(f.Input))
|
||||||
|
return int(val.Float()) == int(pt)
|
||||||
|
default:
|
||||||
|
if q, ok := f.Predicate.(query); ok {
|
||||||
|
return q.Select(t) != nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *filterQuery) Select(t iterator) NodeNavigator {
|
||||||
|
for {
|
||||||
|
node := f.Input.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
node = node.Copy()
|
||||||
|
//fmt.Println(node.LocalName())
|
||||||
|
|
||||||
|
t.Current().MoveTo(node)
|
||||||
|
if f.do(t) {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *filterQuery) Evaluate(t iterator) interface{} {
|
||||||
|
f.Input.Evaluate(t)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *filterQuery) Clone() query {
|
||||||
|
return &filterQuery{Input: f.Input.Clone(), Predicate: f.Predicate.Clone()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// functionQuery is an XPath function that call a function to returns
|
||||||
|
// value of current NodeNavigator node.
|
||||||
|
type functionQuery struct {
|
||||||
|
Input query // Node Set
|
||||||
|
Func func(query, iterator) interface{} // The xpath function.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *functionQuery) Select(t iterator) NodeNavigator {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evaluate call a specified function that will returns the
|
||||||
|
// following value type: number,string,boolean.
|
||||||
|
func (f *functionQuery) Evaluate(t iterator) interface{} {
|
||||||
|
return f.Func(f.Input, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *functionQuery) Clone() query {
|
||||||
|
return &functionQuery{Input: f.Input.Clone(), Func: f.Func}
|
||||||
|
}
|
||||||
|
|
||||||
|
// constantQuery is an XPath constant operand.
|
||||||
|
type constantQuery struct {
|
||||||
|
Val interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *constantQuery) Select(t iterator) NodeNavigator {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *constantQuery) Evaluate(t iterator) interface{} {
|
||||||
|
return c.Val
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *constantQuery) Clone() query {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// logicalQuery is an XPath logical expression.
|
||||||
|
type logicalQuery struct {
|
||||||
|
Left, Right query
|
||||||
|
|
||||||
|
Do func(iterator, interface{}, interface{}) interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logicalQuery) Select(t iterator) NodeNavigator {
|
||||||
|
// When a XPath expr is logical expression.
|
||||||
|
node := t.Current().Copy()
|
||||||
|
val := l.Evaluate(t)
|
||||||
|
switch val.(type) {
|
||||||
|
case bool:
|
||||||
|
if val.(bool) == true {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logicalQuery) Evaluate(t iterator) interface{} {
|
||||||
|
m := l.Left.Evaluate(t)
|
||||||
|
n := l.Right.Evaluate(t)
|
||||||
|
return l.Do(t, m, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logicalQuery) Clone() query {
|
||||||
|
return &logicalQuery{Left: l.Left.Clone(), Right: l.Right.Clone(), Do: l.Do}
|
||||||
|
}
|
||||||
|
|
||||||
|
// numericQuery is an XPath numeric operator expression.
|
||||||
|
type numericQuery struct {
|
||||||
|
Left, Right query
|
||||||
|
|
||||||
|
Do func(interface{}, interface{}) interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *numericQuery) Select(t iterator) NodeNavigator {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *numericQuery) Evaluate(t iterator) interface{} {
|
||||||
|
m := n.Left.Evaluate(t)
|
||||||
|
k := n.Right.Evaluate(t)
|
||||||
|
return n.Do(m, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *numericQuery) Clone() query {
|
||||||
|
return &numericQuery{Left: n.Left.Clone(), Right: n.Right.Clone(), Do: n.Do}
|
||||||
|
}
|
||||||
|
|
||||||
|
type booleanQuery struct {
|
||||||
|
IsOr bool
|
||||||
|
Left, Right query
|
||||||
|
iterator func() NodeNavigator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *booleanQuery) Select(t iterator) NodeNavigator {
|
||||||
|
if b.iterator == nil {
|
||||||
|
var list []NodeNavigator
|
||||||
|
i := 0
|
||||||
|
root := t.Current().Copy()
|
||||||
|
if b.IsOr {
|
||||||
|
for {
|
||||||
|
node := b.Left.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
node = node.Copy()
|
||||||
|
list = append(list, node)
|
||||||
|
}
|
||||||
|
t.Current().MoveTo(root)
|
||||||
|
for {
|
||||||
|
node := b.Right.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
node = node.Copy()
|
||||||
|
list = append(list, node)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var m []NodeNavigator
|
||||||
|
var n []NodeNavigator
|
||||||
|
for {
|
||||||
|
node := b.Left.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
node = node.Copy()
|
||||||
|
list = append(m, node)
|
||||||
|
}
|
||||||
|
t.Current().MoveTo(root)
|
||||||
|
for {
|
||||||
|
node := b.Right.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
node = node.Copy()
|
||||||
|
list = append(n, node)
|
||||||
|
}
|
||||||
|
for _, k := range m {
|
||||||
|
for _, j := range n {
|
||||||
|
if k == j {
|
||||||
|
list = append(list, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.iterator = func() NodeNavigator {
|
||||||
|
if i >= len(list) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node := list[i]
|
||||||
|
i++
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b.iterator()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *booleanQuery) Evaluate(t iterator) interface{} {
|
||||||
|
m := b.Left.Evaluate(t)
|
||||||
|
left := asBool(t, m)
|
||||||
|
if b.IsOr && left {
|
||||||
|
return true
|
||||||
|
} else if !b.IsOr && !left {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
m = b.Right.Evaluate(t)
|
||||||
|
return asBool(t, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *booleanQuery) Clone() query {
|
||||||
|
return &booleanQuery{IsOr: b.IsOr, Left: b.Left.Clone(), Right: b.Right.Clone()}
|
||||||
|
}
|
||||||
|
|
||||||
|
type unionQuery struct {
|
||||||
|
Left, Right query
|
||||||
|
iterator func() NodeNavigator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *unionQuery) Select(t iterator) NodeNavigator {
|
||||||
|
if u.iterator == nil {
|
||||||
|
var m = make(map[uint64]NodeNavigator)
|
||||||
|
root := t.Current().Copy()
|
||||||
|
for {
|
||||||
|
node := u.Left.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
code := getHashCode(node.Copy())
|
||||||
|
if _, ok := m[code]; !ok {
|
||||||
|
m[code] = node.Copy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Current().MoveTo(root)
|
||||||
|
for {
|
||||||
|
node := u.Right.Select(t)
|
||||||
|
if node == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
code := getHashCode(node.Copy())
|
||||||
|
if _, ok := m[code]; !ok {
|
||||||
|
m[code] = node.Copy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
list := make([]NodeNavigator, len(m))
|
||||||
|
var i int
|
||||||
|
for _, v := range m {
|
||||||
|
list[i] = v
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
i = 0
|
||||||
|
u.iterator = func() NodeNavigator {
|
||||||
|
if i >= len(list) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node := list[i]
|
||||||
|
i++
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return u.iterator()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *unionQuery) Evaluate(t iterator) interface{} {
|
||||||
|
u.iterator = nil
|
||||||
|
u.Left.Evaluate(t)
|
||||||
|
u.Right.Evaluate(t)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *unionQuery) Clone() query {
|
||||||
|
return &unionQuery{Left: u.Left.Clone(), Right: u.Right.Clone()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHashCode(n NodeNavigator) uint64 {
|
||||||
|
var sb bytes.Buffer
|
||||||
|
switch n.NodeType() {
|
||||||
|
case AttributeNode, TextNode, CommentNode:
|
||||||
|
sb.WriteString(fmt.Sprintf("%s=%s", n.LocalName(), n.Value()))
|
||||||
|
if n.MoveToParent() {
|
||||||
|
sb.WriteString(n.LocalName())
|
||||||
|
}
|
||||||
|
case ElementNode:
|
||||||
|
sb.WriteString(n.Prefix() + n.LocalName())
|
||||||
|
d := 1
|
||||||
|
for n.MoveToPrevious() {
|
||||||
|
d++
|
||||||
|
}
|
||||||
|
sb.WriteString(fmt.Sprintf("-%d", d))
|
||||||
|
|
||||||
|
for n.MoveToParent() {
|
||||||
|
d = 1
|
||||||
|
for n.MoveToPrevious() {
|
||||||
|
d++
|
||||||
|
}
|
||||||
|
sb.WriteString(fmt.Sprintf("-%d", d))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h := fnv.New64a()
|
||||||
|
h.Write([]byte(sb.String()))
|
||||||
|
return h.Sum64()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getNodePosition(q query) int {
|
||||||
|
type Position interface {
|
||||||
|
position() int
|
||||||
|
}
|
||||||
|
if count, ok := q.(Position); ok {
|
||||||
|
return count.position()
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
157
vendor/github.com/antchfx/xpath/xpath.go
generated
vendored
Normal file
157
vendor/github.com/antchfx/xpath/xpath.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
package xpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NodeType represents a type of XPath node.
|
||||||
|
type NodeType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RootNode is a root node of the XML document or node tree.
|
||||||
|
RootNode NodeType = iota
|
||||||
|
|
||||||
|
// ElementNode is an element, such as <element>.
|
||||||
|
ElementNode
|
||||||
|
|
||||||
|
// AttributeNode is an attribute, such as id='123'.
|
||||||
|
AttributeNode
|
||||||
|
|
||||||
|
// TextNode is the text content of a node.
|
||||||
|
TextNode
|
||||||
|
|
||||||
|
// CommentNode is a comment node, such as <!-- my comment -->
|
||||||
|
CommentNode
|
||||||
|
|
||||||
|
// allNode is any types of node, used by xpath package only to predicate match.
|
||||||
|
allNode
|
||||||
|
)
|
||||||
|
|
||||||
|
// NodeNavigator provides cursor model for navigating XML data.
|
||||||
|
type NodeNavigator interface {
|
||||||
|
// NodeType returns the XPathNodeType of the current node.
|
||||||
|
NodeType() NodeType
|
||||||
|
|
||||||
|
// LocalName gets the Name of the current node.
|
||||||
|
LocalName() string
|
||||||
|
|
||||||
|
// Prefix returns namespace prefix associated with the current node.
|
||||||
|
Prefix() string
|
||||||
|
|
||||||
|
// Value gets the value of current node.
|
||||||
|
Value() string
|
||||||
|
|
||||||
|
// Copy does a deep copy of the NodeNavigator and all its components.
|
||||||
|
Copy() NodeNavigator
|
||||||
|
|
||||||
|
// MoveToRoot moves the NodeNavigator to the root node of the current node.
|
||||||
|
MoveToRoot()
|
||||||
|
|
||||||
|
// MoveToParent moves the NodeNavigator to the parent node of the current node.
|
||||||
|
MoveToParent() bool
|
||||||
|
|
||||||
|
// MoveToNextAttribute moves the NodeNavigator to the next attribute on current node.
|
||||||
|
MoveToNextAttribute() bool
|
||||||
|
|
||||||
|
// MoveToChild moves the NodeNavigator to the first child node of the current node.
|
||||||
|
MoveToChild() bool
|
||||||
|
|
||||||
|
// MoveToFirst moves the NodeNavigator to the first sibling node of the current node.
|
||||||
|
MoveToFirst() bool
|
||||||
|
|
||||||
|
// MoveToNext moves the NodeNavigator to the next sibling node of the current node.
|
||||||
|
MoveToNext() bool
|
||||||
|
|
||||||
|
// MoveToPrevious moves the NodeNavigator to the previous sibling node of the current node.
|
||||||
|
MoveToPrevious() bool
|
||||||
|
|
||||||
|
// MoveTo moves the NodeNavigator to the same position as the specified NodeNavigator.
|
||||||
|
MoveTo(NodeNavigator) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeIterator holds all matched Node object.
|
||||||
|
type NodeIterator struct {
|
||||||
|
node NodeNavigator
|
||||||
|
query query
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current returns current node which matched.
|
||||||
|
func (t *NodeIterator) Current() NodeNavigator {
|
||||||
|
return t.node
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveNext moves Navigator to the next match node.
|
||||||
|
func (t *NodeIterator) MoveNext() bool {
|
||||||
|
n := t.query.Select(t)
|
||||||
|
if n != nil {
|
||||||
|
if !t.node.MoveTo(n) {
|
||||||
|
t.node = n.Copy()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select selects a node set using the specified XPath expression.
|
||||||
|
// This method is deprecated, recommend using Expr.Select() method instead.
|
||||||
|
func Select(root NodeNavigator, expr string) *NodeIterator {
|
||||||
|
exp, err := Compile(expr)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exp.Select(root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expr is an XPath expression for query.
|
||||||
|
type Expr struct {
|
||||||
|
s string
|
||||||
|
q query
|
||||||
|
}
|
||||||
|
|
||||||
|
type iteratorFunc func() NodeNavigator
|
||||||
|
|
||||||
|
func (f iteratorFunc) Current() NodeNavigator {
|
||||||
|
return f()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evaluate returns the result of the expression.
|
||||||
|
// The result type of the expression is one of the follow: bool,float64,string,NodeIterator).
|
||||||
|
func (expr *Expr) Evaluate(root NodeNavigator) interface{} {
|
||||||
|
val := expr.q.Evaluate(iteratorFunc(func() NodeNavigator { return root }))
|
||||||
|
switch val.(type) {
|
||||||
|
case query:
|
||||||
|
return &NodeIterator{query: expr.q.Clone(), node: root}
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select selects a node set using the specified XPath expression.
|
||||||
|
func (expr *Expr) Select(root NodeNavigator) *NodeIterator {
|
||||||
|
return &NodeIterator{query: expr.q.Clone(), node: root}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns XPath expression string.
|
||||||
|
func (expr *Expr) String() string {
|
||||||
|
return expr.s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile compiles an XPath expression string.
|
||||||
|
func Compile(expr string) (*Expr, error) {
|
||||||
|
if expr == "" {
|
||||||
|
return nil, errors.New("expr expression is nil")
|
||||||
|
}
|
||||||
|
qy, err := build(expr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Expr{s: expr, q: qy}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustCompile compiles an XPath expression string and ignored error.
|
||||||
|
func MustCompile(expr string) *Expr {
|
||||||
|
exp, err := Compile(expr)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return exp
|
||||||
|
}
|
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# This source code refers to The Go Authors for copyright purposes.
|
||||||
|
# The master list of authors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# This source code was written by the Go contributors.
|
||||||
|
# The master list of contributors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/net/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/net/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/net/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/net/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
Additional IP Rights Grant (Patents)
|
||||||
|
|
||||||
|
"This implementation" means the copyrightable works distributed by
|
||||||
|
Google as part of the Go project.
|
||||||
|
|
||||||
|
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||||
|
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||||
|
patent license to make, have made, use, offer to sell, sell, import,
|
||||||
|
transfer and otherwise run, modify and propagate the contents of this
|
||||||
|
implementation of Go, where such license applies only to those patent
|
||||||
|
claims, both currently owned or controlled by Google and acquired in
|
||||||
|
the future, licensable by Google that are necessarily infringed by this
|
||||||
|
implementation of Go. This grant does not include claims that would be
|
||||||
|
infringed only as a consequence of further modification of this
|
||||||
|
implementation. If you or your agent or exclusive licensee institute or
|
||||||
|
order or agree to the institution of patent litigation against any
|
||||||
|
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||||
|
that this implementation of Go or any code incorporated within this
|
||||||
|
implementation of Go constitutes direct or contributory patent
|
||||||
|
infringement, or inducement of patent infringement, then any patent
|
||||||
|
rights granted to you under this License for this implementation of Go
|
||||||
|
shall terminate as of the date such litigation is filed.
|
78
vendor/golang.org/x/net/html/atom/atom.go
generated
vendored
Normal file
78
vendor/golang.org/x/net/html/atom/atom.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package atom provides integer codes (also known as atoms) for a fixed set of
|
||||||
|
// frequently occurring HTML strings: tag names and attribute keys such as "p"
|
||||||
|
// and "id".
|
||||||
|
//
|
||||||
|
// Sharing an atom's name between all elements with the same tag can result in
|
||||||
|
// fewer string allocations when tokenizing and parsing HTML. Integer
|
||||||
|
// comparisons are also generally faster than string comparisons.
|
||||||
|
//
|
||||||
|
// The value of an atom's particular code is not guaranteed to stay the same
|
||||||
|
// between versions of this package. Neither is any ordering guaranteed:
|
||||||
|
// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
|
||||||
|
// be dense. The only guarantees are that e.g. looking up "div" will yield
|
||||||
|
// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
|
||||||
|
package atom // import "golang.org/x/net/html/atom"
|
||||||
|
|
||||||
|
// Atom is an integer code for a string. The zero value maps to "".
|
||||||
|
type Atom uint32
|
||||||
|
|
||||||
|
// String returns the atom's name.
|
||||||
|
func (a Atom) String() string {
|
||||||
|
start := uint32(a >> 8)
|
||||||
|
n := uint32(a & 0xff)
|
||||||
|
if start+n > uint32(len(atomText)) {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return atomText[start : start+n]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Atom) string() string {
|
||||||
|
return atomText[a>>8 : a>>8+a&0xff]
|
||||||
|
}
|
||||||
|
|
||||||
|
// fnv computes the FNV hash with an arbitrary starting value h.
|
||||||
|
func fnv(h uint32, s []byte) uint32 {
|
||||||
|
for i := range s {
|
||||||
|
h ^= uint32(s[i])
|
||||||
|
h *= 16777619
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func match(s string, t []byte) bool {
|
||||||
|
for i, c := range t {
|
||||||
|
if s[i] != c {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup returns the atom whose name is s. It returns zero if there is no
|
||||||
|
// such atom. The lookup is case sensitive.
|
||||||
|
func Lookup(s []byte) Atom {
|
||||||
|
if len(s) == 0 || len(s) > maxAtomLen {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
h := fnv(hash0, s)
|
||||||
|
if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string whose contents are equal to s. In that sense, it is
|
||||||
|
// equivalent to string(s) but may be more efficient.
|
||||||
|
func String(s []byte) string {
|
||||||
|
if a := Lookup(s); a != 0 {
|
||||||
|
return a.String()
|
||||||
|
}
|
||||||
|
return string(s)
|
||||||
|
}
|
712
vendor/golang.org/x/net/html/atom/gen.go
generated
vendored
Normal file
712
vendor/golang.org/x/net/html/atom/gen.go
generated
vendored
Normal file
@ -0,0 +1,712 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
//go:generate go run gen.go
|
||||||
|
//go:generate go run gen.go -test
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"go/format"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// identifier converts s to a Go exported identifier.
|
||||||
|
// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
|
||||||
|
func identifier(s string) string {
|
||||||
|
b := make([]byte, 0, len(s))
|
||||||
|
cap := true
|
||||||
|
for _, c := range s {
|
||||||
|
if c == '-' {
|
||||||
|
cap = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if cap && 'a' <= c && c <= 'z' {
|
||||||
|
c -= 'a' - 'A'
|
||||||
|
}
|
||||||
|
cap = false
|
||||||
|
b = append(b, byte(c))
|
||||||
|
}
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
var test = flag.Bool("test", false, "generate table_test.go")
|
||||||
|
|
||||||
|
func genFile(name string, buf *bytes.Buffer) {
|
||||||
|
b, err := format.Source(buf.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err := ioutil.WriteFile(name, b, 0644); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
var all []string
|
||||||
|
all = append(all, elements...)
|
||||||
|
all = append(all, attributes...)
|
||||||
|
all = append(all, eventHandlers...)
|
||||||
|
all = append(all, extra...)
|
||||||
|
sort.Strings(all)
|
||||||
|
|
||||||
|
// uniq - lists have dups
|
||||||
|
w := 0
|
||||||
|
for _, s := range all {
|
||||||
|
if w == 0 || all[w-1] != s {
|
||||||
|
all[w] = s
|
||||||
|
w++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
all = all[:w]
|
||||||
|
|
||||||
|
if *test {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
|
||||||
|
fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n")
|
||||||
|
fmt.Fprintln(&buf, "package atom\n")
|
||||||
|
fmt.Fprintln(&buf, "var testAtomList = []string{")
|
||||||
|
for _, s := range all {
|
||||||
|
fmt.Fprintf(&buf, "\t%q,\n", s)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(&buf, "}")
|
||||||
|
|
||||||
|
genFile("table_test.go", &buf)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find hash that minimizes table size.
|
||||||
|
var best *table
|
||||||
|
for i := 0; i < 1000000; i++ {
|
||||||
|
if best != nil && 1<<(best.k-1) < len(all) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
h := rand.Uint32()
|
||||||
|
for k := uint(0); k <= 16; k++ {
|
||||||
|
if best != nil && k >= best.k {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
var t table
|
||||||
|
if t.init(h, k, all) {
|
||||||
|
best = &t
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if best == nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "failed to construct string table\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lay out strings, using overlaps when possible.
|
||||||
|
layout := append([]string{}, all...)
|
||||||
|
|
||||||
|
// Remove strings that are substrings of other strings
|
||||||
|
for changed := true; changed; {
|
||||||
|
changed = false
|
||||||
|
for i, s := range layout {
|
||||||
|
if s == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for j, t := range layout {
|
||||||
|
if i != j && t != "" && strings.Contains(s, t) {
|
||||||
|
changed = true
|
||||||
|
layout[j] = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Join strings where one suffix matches another prefix.
|
||||||
|
for {
|
||||||
|
// Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
|
||||||
|
// maximizing overlap length k.
|
||||||
|
besti := -1
|
||||||
|
bestj := -1
|
||||||
|
bestk := 0
|
||||||
|
for i, s := range layout {
|
||||||
|
if s == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for j, t := range layout {
|
||||||
|
if i == j {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
|
||||||
|
if s[len(s)-k:] == t[:k] {
|
||||||
|
besti = i
|
||||||
|
bestj = j
|
||||||
|
bestk = k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if bestk > 0 {
|
||||||
|
layout[besti] += layout[bestj][bestk:]
|
||||||
|
layout[bestj] = ""
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
text := strings.Join(layout, "")
|
||||||
|
|
||||||
|
atom := map[string]uint32{}
|
||||||
|
for _, s := range all {
|
||||||
|
off := strings.Index(text, s)
|
||||||
|
if off < 0 {
|
||||||
|
panic("lost string " + s)
|
||||||
|
}
|
||||||
|
atom[s] = uint32(off<<8 | len(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
// Generate the Go code.
|
||||||
|
fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
|
||||||
|
fmt.Fprintln(&buf, "//go:generate go run gen.go\n")
|
||||||
|
fmt.Fprintln(&buf, "package atom\n\nconst (")
|
||||||
|
|
||||||
|
// compute max len
|
||||||
|
maxLen := 0
|
||||||
|
for _, s := range all {
|
||||||
|
if maxLen < len(s) {
|
||||||
|
maxLen = len(s)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s])
|
||||||
|
}
|
||||||
|
fmt.Fprintln(&buf, ")\n")
|
||||||
|
|
||||||
|
fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0)
|
||||||
|
fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen)
|
||||||
|
|
||||||
|
fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k)
|
||||||
|
for i, s := range best.tab {
|
||||||
|
if s == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&buf, "}\n")
|
||||||
|
datasize := (1 << best.k) * 4
|
||||||
|
|
||||||
|
fmt.Fprintln(&buf, "const atomText =")
|
||||||
|
textsize := len(text)
|
||||||
|
for len(text) > 60 {
|
||||||
|
fmt.Fprintf(&buf, "\t%q +\n", text[:60])
|
||||||
|
text = text[60:]
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&buf, "\t%q\n\n", text)
|
||||||
|
|
||||||
|
genFile("table.go", &buf)
|
||||||
|
|
||||||
|
fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
|
||||||
|
}
|
||||||
|
|
||||||
|
type byLen []string
|
||||||
|
|
||||||
|
func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
|
||||||
|
func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
|
func (x byLen) Len() int { return len(x) }
|
||||||
|
|
||||||
|
// fnv computes the FNV hash with an arbitrary starting value h.
|
||||||
|
func fnv(h uint32, s string) uint32 {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
h ^= uint32(s[i])
|
||||||
|
h *= 16777619
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// A table represents an attempt at constructing the lookup table.
|
||||||
|
// The lookup table uses cuckoo hashing, meaning that each string
|
||||||
|
// can be found in one of two positions.
|
||||||
|
type table struct {
|
||||||
|
h0 uint32
|
||||||
|
k uint
|
||||||
|
mask uint32
|
||||||
|
tab []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// hash returns the two hashes for s.
|
||||||
|
func (t *table) hash(s string) (h1, h2 uint32) {
|
||||||
|
h := fnv(t.h0, s)
|
||||||
|
h1 = h & t.mask
|
||||||
|
h2 = (h >> 16) & t.mask
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// init initializes the table with the given parameters.
|
||||||
|
// h0 is the initial hash value,
|
||||||
|
// k is the number of bits of hash value to use, and
|
||||||
|
// x is the list of strings to store in the table.
|
||||||
|
// init returns false if the table cannot be constructed.
|
||||||
|
func (t *table) init(h0 uint32, k uint, x []string) bool {
|
||||||
|
t.h0 = h0
|
||||||
|
t.k = k
|
||||||
|
t.tab = make([]string, 1<<k)
|
||||||
|
t.mask = 1<<k - 1
|
||||||
|
for _, s := range x {
|
||||||
|
if !t.insert(s) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert inserts s in the table.
|
||||||
|
func (t *table) insert(s string) bool {
|
||||||
|
h1, h2 := t.hash(s)
|
||||||
|
if t.tab[h1] == "" {
|
||||||
|
t.tab[h1] = s
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if t.tab[h2] == "" {
|
||||||
|
t.tab[h2] = s
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if t.push(h1, 0) {
|
||||||
|
t.tab[h1] = s
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if t.push(h2, 0) {
|
||||||
|
t.tab[h2] = s
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// push attempts to push aside the entry in slot i.
|
||||||
|
func (t *table) push(i uint32, depth int) bool {
|
||||||
|
if depth > len(t.tab) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s := t.tab[i]
|
||||||
|
h1, h2 := t.hash(s)
|
||||||
|
j := h1 + h2 - i
|
||||||
|
if t.tab[j] != "" && !t.push(j, depth+1) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
t.tab[j] = s
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// The lists of element names and attribute keys were taken from
|
||||||
|
// https://html.spec.whatwg.org/multipage/indices.html#index
|
||||||
|
// as of the "HTML Living Standard - Last Updated 16 April 2018" version.
|
||||||
|
|
||||||
|
// "command", "keygen" and "menuitem" have been removed from the spec,
|
||||||
|
// but are kept here for backwards compatibility.
|
||||||
|
var elements = []string{
|
||||||
|
"a",
|
||||||
|
"abbr",
|
||||||
|
"address",
|
||||||
|
"area",
|
||||||
|
"article",
|
||||||
|
"aside",
|
||||||
|
"audio",
|
||||||
|
"b",
|
||||||
|
"base",
|
||||||
|
"bdi",
|
||||||
|
"bdo",
|
||||||
|
"blockquote",
|
||||||
|
"body",
|
||||||
|
"br",
|
||||||
|
"button",
|
||||||
|
"canvas",
|
||||||
|
"caption",
|
||||||
|
"cite",
|
||||||
|
"code",
|
||||||
|
"col",
|
||||||
|
"colgroup",
|
||||||
|
"command",
|
||||||
|
"data",
|
||||||
|
"datalist",
|
||||||
|
"dd",
|
||||||
|
"del",
|
||||||
|
"details",
|
||||||
|
"dfn",
|
||||||
|
"dialog",
|
||||||
|
"div",
|
||||||
|
"dl",
|
||||||
|
"dt",
|
||||||
|
"em",
|
||||||
|
"embed",
|
||||||
|
"fieldset",
|
||||||
|
"figcaption",
|
||||||
|
"figure",
|
||||||
|
"footer",
|
||||||
|
"form",
|
||||||
|
"h1",
|
||||||
|
"h2",
|
||||||
|
"h3",
|
||||||
|
"h4",
|
||||||
|
"h5",
|
||||||
|
"h6",
|
||||||
|
"head",
|
||||||
|
"header",
|
||||||
|
"hgroup",
|
||||||
|
"hr",
|
||||||
|
"html",
|
||||||
|
"i",
|
||||||
|
"iframe",
|
||||||
|
"img",
|
||||||
|
"input",
|
||||||
|
"ins",
|
||||||
|
"kbd",
|
||||||
|
"keygen",
|
||||||
|
"label",
|
||||||
|
"legend",
|
||||||
|
"li",
|
||||||
|
"link",
|
||||||
|
"main",
|
||||||
|
"map",
|
||||||
|
"mark",
|
||||||
|
"menu",
|
||||||
|
"menuitem",
|
||||||
|
"meta",
|
||||||
|
"meter",
|
||||||
|
"nav",
|
||||||
|
"noscript",
|
||||||
|
"object",
|
||||||
|
"ol",
|
||||||
|
"optgroup",
|
||||||
|
"option",
|
||||||
|
"output",
|
||||||
|
"p",
|
||||||
|
"param",
|
||||||
|
"picture",
|
||||||
|
"pre",
|
||||||
|
"progress",
|
||||||
|
"q",
|
||||||
|
"rp",
|
||||||
|
"rt",
|
||||||
|
"ruby",
|
||||||
|
"s",
|
||||||
|
"samp",
|
||||||
|
"script",
|
||||||
|
"section",
|
||||||
|
"select",
|
||||||
|
"slot",
|
||||||
|
"small",
|
||||||
|
"source",
|
||||||
|
"span",
|
||||||
|
"strong",
|
||||||
|
"style",
|
||||||
|
"sub",
|
||||||
|
"summary",
|
||||||
|
"sup",
|
||||||
|
"table",
|
||||||
|
"tbody",
|
||||||
|
"td",
|
||||||
|
"template",
|
||||||
|
"textarea",
|
||||||
|
"tfoot",
|
||||||
|
"th",
|
||||||
|
"thead",
|
||||||
|
"time",
|
||||||
|
"title",
|
||||||
|
"tr",
|
||||||
|
"track",
|
||||||
|
"u",
|
||||||
|
"ul",
|
||||||
|
"var",
|
||||||
|
"video",
|
||||||
|
"wbr",
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
|
||||||
|
//
|
||||||
|
// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup",
|
||||||
|
// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec,
|
||||||
|
// but are kept here for backwards compatibility.
|
||||||
|
var attributes = []string{
|
||||||
|
"abbr",
|
||||||
|
"accept",
|
||||||
|
"accept-charset",
|
||||||
|
"accesskey",
|
||||||
|
"action",
|
||||||
|
"allowfullscreen",
|
||||||
|
"allowpaymentrequest",
|
||||||
|
"allowusermedia",
|
||||||
|
"alt",
|
||||||
|
"as",
|
||||||
|
"async",
|
||||||
|
"autocomplete",
|
||||||
|
"autofocus",
|
||||||
|
"autoplay",
|
||||||
|
"challenge",
|
||||||
|
"charset",
|
||||||
|
"checked",
|
||||||
|
"cite",
|
||||||
|
"class",
|
||||||
|
"color",
|
||||||
|
"cols",
|
||||||
|
"colspan",
|
||||||
|
"command",
|
||||||
|
"content",
|
||||||
|
"contenteditable",
|
||||||
|
"contextmenu",
|
||||||
|
"controls",
|
||||||
|
"coords",
|
||||||
|
"crossorigin",
|
||||||
|
"data",
|
||||||
|
"datetime",
|
||||||
|
"default",
|
||||||
|
"defer",
|
||||||
|
"dir",
|
||||||
|
"dirname",
|
||||||
|
"disabled",
|
||||||
|
"download",
|
||||||
|
"draggable",
|
||||||
|
"dropzone",
|
||||||
|
"enctype",
|
||||||
|
"for",
|
||||||
|
"form",
|
||||||
|
"formaction",
|
||||||
|
"formenctype",
|
||||||
|
"formmethod",
|
||||||
|
"formnovalidate",
|
||||||
|
"formtarget",
|
||||||
|
"headers",
|
||||||
|
"height",
|
||||||
|
"hidden",
|
||||||
|
"high",
|
||||||
|
"href",
|
||||||
|
"hreflang",
|
||||||
|
"http-equiv",
|
||||||
|
"icon",
|
||||||
|
"id",
|
||||||
|
"inputmode",
|
||||||
|
"integrity",
|
||||||
|
"is",
|
||||||
|
"ismap",
|
||||||
|
"itemid",
|
||||||
|
"itemprop",
|
||||||
|
"itemref",
|
||||||
|
"itemscope",
|
||||||
|
"itemtype",
|
||||||
|
"keytype",
|
||||||
|
"kind",
|
||||||
|
"label",
|
||||||
|
"lang",
|
||||||
|
"list",
|
||||||
|
"loop",
|
||||||
|
"low",
|
||||||
|
"manifest",
|
||||||
|
"max",
|
||||||
|
"maxlength",
|
||||||
|
"media",
|
||||||
|
"mediagroup",
|
||||||
|
"method",
|
||||||
|
"min",
|
||||||
|
"minlength",
|
||||||
|
"multiple",
|
||||||
|
"muted",
|
||||||
|
"name",
|
||||||
|
"nomodule",
|
||||||
|
"nonce",
|
||||||
|
"novalidate",
|
||||||
|
"open",
|
||||||
|
"optimum",
|
||||||
|
"pattern",
|
||||||
|
"ping",
|
||||||
|
"placeholder",
|
||||||
|
"playsinline",
|
||||||
|
"poster",
|
||||||
|
"preload",
|
||||||
|
"radiogroup",
|
||||||
|
"readonly",
|
||||||
|
"referrerpolicy",
|
||||||
|
"rel",
|
||||||
|
"required",
|
||||||
|
"reversed",
|
||||||
|
"rows",
|
||||||
|
"rowspan",
|
||||||
|
"sandbox",
|
||||||
|
"spellcheck",
|
||||||
|
"scope",
|
||||||
|
"scoped",
|
||||||
|
"seamless",
|
||||||
|
"selected",
|
||||||
|
"shape",
|
||||||
|
"size",
|
||||||
|
"sizes",
|
||||||
|
"sortable",
|
||||||
|
"sorted",
|
||||||
|
"slot",
|
||||||
|
"span",
|
||||||
|
"spellcheck",
|
||||||
|
"src",
|
||||||
|
"srcdoc",
|
||||||
|
"srclang",
|
||||||
|
"srcset",
|
||||||
|
"start",
|
||||||
|
"step",
|
||||||
|
"style",
|
||||||
|
"tabindex",
|
||||||
|
"target",
|
||||||
|
"title",
|
||||||
|
"translate",
|
||||||
|
"type",
|
||||||
|
"typemustmatch",
|
||||||
|
"updateviacache",
|
||||||
|
"usemap",
|
||||||
|
"value",
|
||||||
|
"width",
|
||||||
|
"workertype",
|
||||||
|
"wrap",
|
||||||
|
}
|
||||||
|
|
||||||
|
// "onautocomplete", "onautocompleteerror", "onmousewheel",
|
||||||
|
// "onshow" and "onsort" have been removed from the spec,
|
||||||
|
// but are kept here for backwards compatibility.
|
||||||
|
var eventHandlers = []string{
|
||||||
|
"onabort",
|
||||||
|
"onautocomplete",
|
||||||
|
"onautocompleteerror",
|
||||||
|
"onauxclick",
|
||||||
|
"onafterprint",
|
||||||
|
"onbeforeprint",
|
||||||
|
"onbeforeunload",
|
||||||
|
"onblur",
|
||||||
|
"oncancel",
|
||||||
|
"oncanplay",
|
||||||
|
"oncanplaythrough",
|
||||||
|
"onchange",
|
||||||
|
"onclick",
|
||||||
|
"onclose",
|
||||||
|
"oncontextmenu",
|
||||||
|
"oncopy",
|
||||||
|
"oncuechange",
|
||||||
|
"oncut",
|
||||||
|
"ondblclick",
|
||||||
|
"ondrag",
|
||||||
|
"ondragend",
|
||||||
|
"ondragenter",
|
||||||
|
"ondragexit",
|
||||||
|
"ondragleave",
|
||||||
|
"ondragover",
|
||||||
|
"ondragstart",
|
||||||
|
"ondrop",
|
||||||
|
"ondurationchange",
|
||||||
|
"onemptied",
|
||||||
|
"onended",
|
||||||
|
"onerror",
|
||||||
|
"onfocus",
|
||||||
|
"onhashchange",
|
||||||
|
"oninput",
|
||||||
|
"oninvalid",
|
||||||
|
"onkeydown",
|
||||||
|
"onkeypress",
|
||||||
|
"onkeyup",
|
||||||
|
"onlanguagechange",
|
||||||
|
"onload",
|
||||||
|
"onloadeddata",
|
||||||
|
"onloadedmetadata",
|
||||||
|
"onloadend",
|
||||||
|
"onloadstart",
|
||||||
|
"onmessage",
|
||||||
|
"onmessageerror",
|
||||||
|
"onmousedown",
|
||||||
|
"onmouseenter",
|
||||||
|
"onmouseleave",
|
||||||
|
"onmousemove",
|
||||||
|
"onmouseout",
|
||||||
|
"onmouseover",
|
||||||
|
"onmouseup",
|
||||||
|
"onmousewheel",
|
||||||
|
"onwheel",
|
||||||
|
"onoffline",
|
||||||
|
"ononline",
|
||||||
|
"onpagehide",
|
||||||
|
"onpageshow",
|
||||||
|
"onpaste",
|
||||||
|
"onpause",
|
||||||
|
"onplay",
|
||||||
|
"onplaying",
|
||||||
|
"onpopstate",
|
||||||
|
"onprogress",
|
||||||
|
"onratechange",
|
||||||
|
"onreset",
|
||||||
|
"onresize",
|
||||||
|
"onrejectionhandled",
|
||||||
|
"onscroll",
|
||||||
|
"onsecuritypolicyviolation",
|
||||||
|
"onseeked",
|
||||||
|
"onseeking",
|
||||||
|
"onselect",
|
||||||
|
"onshow",
|
||||||
|
"onsort",
|
||||||
|
"onstalled",
|
||||||
|
"onstorage",
|
||||||
|
"onsubmit",
|
||||||
|
"onsuspend",
|
||||||
|
"ontimeupdate",
|
||||||
|
"ontoggle",
|
||||||
|
"onunhandledrejection",
|
||||||
|
"onunload",
|
||||||
|
"onvolumechange",
|
||||||
|
"onwaiting",
|
||||||
|
}
|
||||||
|
|
||||||
|
// extra are ad-hoc values not covered by any of the lists above.
|
||||||
|
var extra = []string{
|
||||||
|
"acronym",
|
||||||
|
"align",
|
||||||
|
"annotation",
|
||||||
|
"annotation-xml",
|
||||||
|
"applet",
|
||||||
|
"basefont",
|
||||||
|
"bgsound",
|
||||||
|
"big",
|
||||||
|
"blink",
|
||||||
|
"center",
|
||||||
|
"color",
|
||||||
|
"desc",
|
||||||
|
"face",
|
||||||
|
"font",
|
||||||
|
"foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
|
||||||
|
"foreignobject",
|
||||||
|
"frame",
|
||||||
|
"frameset",
|
||||||
|
"image",
|
||||||
|
"isindex",
|
||||||
|
"listing",
|
||||||
|
"malignmark",
|
||||||
|
"marquee",
|
||||||
|
"math",
|
||||||
|
"mglyph",
|
||||||
|
"mi",
|
||||||
|
"mn",
|
||||||
|
"mo",
|
||||||
|
"ms",
|
||||||
|
"mtext",
|
||||||
|
"nobr",
|
||||||
|
"noembed",
|
||||||
|
"noframes",
|
||||||
|
"plaintext",
|
||||||
|
"prompt",
|
||||||
|
"public",
|
||||||
|
"rb",
|
||||||
|
"rtc",
|
||||||
|
"spacer",
|
||||||
|
"strike",
|
||||||
|
"svg",
|
||||||
|
"system",
|
||||||
|
"tt",
|
||||||
|
"xmp",
|
||||||
|
}
|
783
vendor/golang.org/x/net/html/atom/table.go
generated
vendored
Normal file
783
vendor/golang.org/x/net/html/atom/table.go
generated
vendored
Normal file
@ -0,0 +1,783 @@
|
|||||||
|
// Code generated by go generate gen.go; DO NOT EDIT.
|
||||||
|
|
||||||
|
//go:generate go run gen.go
|
||||||
|
|
||||||
|
package atom
|
||||||
|
|
||||||
|
const (
|
||||||
|
A Atom = 0x1
|
||||||
|
Abbr Atom = 0x4
|
||||||
|
Accept Atom = 0x1a06
|
||||||
|
AcceptCharset Atom = 0x1a0e
|
||||||
|
Accesskey Atom = 0x2c09
|
||||||
|
Acronym Atom = 0xaa07
|
||||||
|
Action Atom = 0x27206
|
||||||
|
Address Atom = 0x6f307
|
||||||
|
Align Atom = 0xb105
|
||||||
|
Allowfullscreen Atom = 0x2080f
|
||||||
|
Allowpaymentrequest Atom = 0xc113
|
||||||
|
Allowusermedia Atom = 0xdd0e
|
||||||
|
Alt Atom = 0xf303
|
||||||
|
Annotation Atom = 0x1c90a
|
||||||
|
AnnotationXml Atom = 0x1c90e
|
||||||
|
Applet Atom = 0x31906
|
||||||
|
Area Atom = 0x35604
|
||||||
|
Article Atom = 0x3fc07
|
||||||
|
As Atom = 0x3c02
|
||||||
|
Aside Atom = 0x10705
|
||||||
|
Async Atom = 0xff05
|
||||||
|
Audio Atom = 0x11505
|
||||||
|
Autocomplete Atom = 0x2780c
|
||||||
|
Autofocus Atom = 0x12109
|
||||||
|
Autoplay Atom = 0x13c08
|
||||||
|
B Atom = 0x101
|
||||||
|
Base Atom = 0x3b04
|
||||||
|
Basefont Atom = 0x3b08
|
||||||
|
Bdi Atom = 0xba03
|
||||||
|
Bdo Atom = 0x14b03
|
||||||
|
Bgsound Atom = 0x15e07
|
||||||
|
Big Atom = 0x17003
|
||||||
|
Blink Atom = 0x17305
|
||||||
|
Blockquote Atom = 0x1870a
|
||||||
|
Body Atom = 0x2804
|
||||||
|
Br Atom = 0x202
|
||||||
|
Button Atom = 0x19106
|
||||||
|
Canvas Atom = 0x10306
|
||||||
|
Caption Atom = 0x23107
|
||||||
|
Center Atom = 0x22006
|
||||||
|
Challenge Atom = 0x29b09
|
||||||
|
Charset Atom = 0x2107
|
||||||
|
Checked Atom = 0x47907
|
||||||
|
Cite Atom = 0x19c04
|
||||||
|
Class Atom = 0x56405
|
||||||
|
Code Atom = 0x5c504
|
||||||
|
Col Atom = 0x1ab03
|
||||||
|
Colgroup Atom = 0x1ab08
|
||||||
|
Color Atom = 0x1bf05
|
||||||
|
Cols Atom = 0x1c404
|
||||||
|
Colspan Atom = 0x1c407
|
||||||
|
Command Atom = 0x1d707
|
||||||
|
Content Atom = 0x58b07
|
||||||
|
Contenteditable Atom = 0x58b0f
|
||||||
|
Contextmenu Atom = 0x3800b
|
||||||
|
Controls Atom = 0x1de08
|
||||||
|
Coords Atom = 0x1ea06
|
||||||
|
Crossorigin Atom = 0x1fb0b
|
||||||
|
Data Atom = 0x4a504
|
||||||
|
Datalist Atom = 0x4a508
|
||||||
|
Datetime Atom = 0x2b808
|
||||||
|
Dd Atom = 0x2d702
|
||||||
|
Default Atom = 0x10a07
|
||||||
|
Defer Atom = 0x5c705
|
||||||
|
Del Atom = 0x45203
|
||||||
|
Desc Atom = 0x56104
|
||||||
|
Details Atom = 0x7207
|
||||||
|
Dfn Atom = 0x8703
|
||||||
|
Dialog Atom = 0xbb06
|
||||||
|
Dir Atom = 0x9303
|
||||||
|
Dirname Atom = 0x9307
|
||||||
|
Disabled Atom = 0x16408
|
||||||
|
Div Atom = 0x16b03
|
||||||
|
Dl Atom = 0x5e602
|
||||||
|
Download Atom = 0x46308
|
||||||
|
Draggable Atom = 0x17a09
|
||||||
|
Dropzone Atom = 0x40508
|
||||||
|
Dt Atom = 0x64b02
|
||||||
|
Em Atom = 0x6e02
|
||||||
|
Embed Atom = 0x6e05
|
||||||
|
Enctype Atom = 0x28d07
|
||||||
|
Face Atom = 0x21e04
|
||||||
|
Fieldset Atom = 0x22608
|
||||||
|
Figcaption Atom = 0x22e0a
|
||||||
|
Figure Atom = 0x24806
|
||||||
|
Font Atom = 0x3f04
|
||||||
|
Footer Atom = 0xf606
|
||||||
|
For Atom = 0x25403
|
||||||
|
ForeignObject Atom = 0x2540d
|
||||||
|
Foreignobject Atom = 0x2610d
|
||||||
|
Form Atom = 0x26e04
|
||||||
|
Formaction Atom = 0x26e0a
|
||||||
|
Formenctype Atom = 0x2890b
|
||||||
|
Formmethod Atom = 0x2a40a
|
||||||
|
Formnovalidate Atom = 0x2ae0e
|
||||||
|
Formtarget Atom = 0x2c00a
|
||||||
|
Frame Atom = 0x8b05
|
||||||
|
Frameset Atom = 0x8b08
|
||||||
|
H1 Atom = 0x15c02
|
||||||
|
H2 Atom = 0x2de02
|
||||||
|
H3 Atom = 0x30d02
|
||||||
|
H4 Atom = 0x34502
|
||||||
|
H5 Atom = 0x34f02
|
||||||
|
H6 Atom = 0x64d02
|
||||||
|
Head Atom = 0x33104
|
||||||
|
Header Atom = 0x33106
|
||||||
|
Headers Atom = 0x33107
|
||||||
|
Height Atom = 0x5206
|
||||||
|
Hgroup Atom = 0x2ca06
|
||||||
|
Hidden Atom = 0x2d506
|
||||||
|
High Atom = 0x2db04
|
||||||
|
Hr Atom = 0x15702
|
||||||
|
Href Atom = 0x2e004
|
||||||
|
Hreflang Atom = 0x2e008
|
||||||
|
Html Atom = 0x5604
|
||||||
|
HttpEquiv Atom = 0x2e80a
|
||||||
|
I Atom = 0x601
|
||||||
|
Icon Atom = 0x58a04
|
||||||
|
Id Atom = 0x10902
|
||||||
|
Iframe Atom = 0x2fc06
|
||||||
|
Image Atom = 0x30205
|
||||||
|
Img Atom = 0x30703
|
||||||
|
Input Atom = 0x44b05
|
||||||
|
Inputmode Atom = 0x44b09
|
||||||
|
Ins Atom = 0x20403
|
||||||
|
Integrity Atom = 0x23f09
|
||||||
|
Is Atom = 0x16502
|
||||||
|
Isindex Atom = 0x30f07
|
||||||
|
Ismap Atom = 0x31605
|
||||||
|
Itemid Atom = 0x38b06
|
||||||
|
Itemprop Atom = 0x19d08
|
||||||
|
Itemref Atom = 0x3cd07
|
||||||
|
Itemscope Atom = 0x67109
|
||||||
|
Itemtype Atom = 0x31f08
|
||||||
|
Kbd Atom = 0xb903
|
||||||
|
Keygen Atom = 0x3206
|
||||||
|
Keytype Atom = 0xd607
|
||||||
|
Kind Atom = 0x17704
|
||||||
|
Label Atom = 0x5905
|
||||||
|
Lang Atom = 0x2e404
|
||||||
|
Legend Atom = 0x18106
|
||||||
|
Li Atom = 0xb202
|
||||||
|
Link Atom = 0x17404
|
||||||
|
List Atom = 0x4a904
|
||||||
|
Listing Atom = 0x4a907
|
||||||
|
Loop Atom = 0x5d04
|
||||||
|
Low Atom = 0xc303
|
||||||
|
Main Atom = 0x1004
|
||||||
|
Malignmark Atom = 0xb00a
|
||||||
|
Manifest Atom = 0x6d708
|
||||||
|
Map Atom = 0x31803
|
||||||
|
Mark Atom = 0xb604
|
||||||
|
Marquee Atom = 0x32707
|
||||||
|
Math Atom = 0x32e04
|
||||||
|
Max Atom = 0x33d03
|
||||||
|
Maxlength Atom = 0x33d09
|
||||||
|
Media Atom = 0xe605
|
||||||
|
Mediagroup Atom = 0xe60a
|
||||||
|
Menu Atom = 0x38704
|
||||||
|
Menuitem Atom = 0x38708
|
||||||
|
Meta Atom = 0x4b804
|
||||||
|
Meter Atom = 0x9805
|
||||||
|
Method Atom = 0x2a806
|
||||||
|
Mglyph Atom = 0x30806
|
||||||
|
Mi Atom = 0x34702
|
||||||
|
Min Atom = 0x34703
|
||||||
|
Minlength Atom = 0x34709
|
||||||
|
Mn Atom = 0x2b102
|
||||||
|
Mo Atom = 0xa402
|
||||||
|
Ms Atom = 0x67402
|
||||||
|
Mtext Atom = 0x35105
|
||||||
|
Multiple Atom = 0x35f08
|
||||||
|
Muted Atom = 0x36705
|
||||||
|
Name Atom = 0x9604
|
||||||
|
Nav Atom = 0x1303
|
||||||
|
Nobr Atom = 0x3704
|
||||||
|
Noembed Atom = 0x6c07
|
||||||
|
Noframes Atom = 0x8908
|
||||||
|
Nomodule Atom = 0xa208
|
||||||
|
Nonce Atom = 0x1a605
|
||||||
|
Noscript Atom = 0x21608
|
||||||
|
Novalidate Atom = 0x2b20a
|
||||||
|
Object Atom = 0x26806
|
||||||
|
Ol Atom = 0x13702
|
||||||
|
Onabort Atom = 0x19507
|
||||||
|
Onafterprint Atom = 0x2360c
|
||||||
|
Onautocomplete Atom = 0x2760e
|
||||||
|
Onautocompleteerror Atom = 0x27613
|
||||||
|
Onauxclick Atom = 0x61f0a
|
||||||
|
Onbeforeprint Atom = 0x69e0d
|
||||||
|
Onbeforeunload Atom = 0x6e70e
|
||||||
|
Onblur Atom = 0x56d06
|
||||||
|
Oncancel Atom = 0x11908
|
||||||
|
Oncanplay Atom = 0x14d09
|
||||||
|
Oncanplaythrough Atom = 0x14d10
|
||||||
|
Onchange Atom = 0x41b08
|
||||||
|
Onclick Atom = 0x2f507
|
||||||
|
Onclose Atom = 0x36c07
|
||||||
|
Oncontextmenu Atom = 0x37e0d
|
||||||
|
Oncopy Atom = 0x39106
|
||||||
|
Oncuechange Atom = 0x3970b
|
||||||
|
Oncut Atom = 0x3a205
|
||||||
|
Ondblclick Atom = 0x3a70a
|
||||||
|
Ondrag Atom = 0x3b106
|
||||||
|
Ondragend Atom = 0x3b109
|
||||||
|
Ondragenter Atom = 0x3ba0b
|
||||||
|
Ondragexit Atom = 0x3c50a
|
||||||
|
Ondragleave Atom = 0x3df0b
|
||||||
|
Ondragover Atom = 0x3ea0a
|
||||||
|
Ondragstart Atom = 0x3f40b
|
||||||
|
Ondrop Atom = 0x40306
|
||||||
|
Ondurationchange Atom = 0x41310
|
||||||
|
Onemptied Atom = 0x40a09
|
||||||
|
Onended Atom = 0x42307
|
||||||
|
Onerror Atom = 0x42a07
|
||||||
|
Onfocus Atom = 0x43107
|
||||||
|
Onhashchange Atom = 0x43d0c
|
||||||
|
Oninput Atom = 0x44907
|
||||||
|
Oninvalid Atom = 0x45509
|
||||||
|
Onkeydown Atom = 0x45e09
|
||||||
|
Onkeypress Atom = 0x46b0a
|
||||||
|
Onkeyup Atom = 0x48007
|
||||||
|
Onlanguagechange Atom = 0x48d10
|
||||||
|
Onload Atom = 0x49d06
|
||||||
|
Onloadeddata Atom = 0x49d0c
|
||||||
|
Onloadedmetadata Atom = 0x4b010
|
||||||
|
Onloadend Atom = 0x4c609
|
||||||
|
Onloadstart Atom = 0x4cf0b
|
||||||
|
Onmessage Atom = 0x4da09
|
||||||
|
Onmessageerror Atom = 0x4da0e
|
||||||
|
Onmousedown Atom = 0x4e80b
|
||||||
|
Onmouseenter Atom = 0x4f30c
|
||||||
|
Onmouseleave Atom = 0x4ff0c
|
||||||
|
Onmousemove Atom = 0x50b0b
|
||||||
|
Onmouseout Atom = 0x5160a
|
||||||
|
Onmouseover Atom = 0x5230b
|
||||||
|
Onmouseup Atom = 0x52e09
|
||||||
|
Onmousewheel Atom = 0x53c0c
|
||||||
|
Onoffline Atom = 0x54809
|
||||||
|
Ononline Atom = 0x55108
|
||||||
|
Onpagehide Atom = 0x5590a
|
||||||
|
Onpageshow Atom = 0x5730a
|
||||||
|
Onpaste Atom = 0x57f07
|
||||||
|
Onpause Atom = 0x59a07
|
||||||
|
Onplay Atom = 0x5a406
|
||||||
|
Onplaying Atom = 0x5a409
|
||||||
|
Onpopstate Atom = 0x5ad0a
|
||||||
|
Onprogress Atom = 0x5b70a
|
||||||
|
Onratechange Atom = 0x5cc0c
|
||||||
|
Onrejectionhandled Atom = 0x5d812
|
||||||
|
Onreset Atom = 0x5ea07
|
||||||
|
Onresize Atom = 0x5f108
|
||||||
|
Onscroll Atom = 0x60008
|
||||||
|
Onsecuritypolicyviolation Atom = 0x60819
|
||||||
|
Onseeked Atom = 0x62908
|
||||||
|
Onseeking Atom = 0x63109
|
||||||
|
Onselect Atom = 0x63a08
|
||||||
|
Onshow Atom = 0x64406
|
||||||
|
Onsort Atom = 0x64f06
|
||||||
|
Onstalled Atom = 0x65909
|
||||||
|
Onstorage Atom = 0x66209
|
||||||
|
Onsubmit Atom = 0x66b08
|
||||||
|
Onsuspend Atom = 0x67b09
|
||||||
|
Ontimeupdate Atom = 0x400c
|
||||||
|
Ontoggle Atom = 0x68408
|
||||||
|
Onunhandledrejection Atom = 0x68c14
|
||||||
|
Onunload Atom = 0x6ab08
|
||||||
|
Onvolumechange Atom = 0x6b30e
|
||||||
|
Onwaiting Atom = 0x6c109
|
||||||
|
Onwheel Atom = 0x6ca07
|
||||||
|
Open Atom = 0x1a304
|
||||||
|
Optgroup Atom = 0x5f08
|
||||||
|
Optimum Atom = 0x6d107
|
||||||
|
Option Atom = 0x6e306
|
||||||
|
Output Atom = 0x51d06
|
||||||
|
P Atom = 0xc01
|
||||||
|
Param Atom = 0xc05
|
||||||
|
Pattern Atom = 0x6607
|
||||||
|
Picture Atom = 0x7b07
|
||||||
|
Ping Atom = 0xef04
|
||||||
|
Placeholder Atom = 0x1310b
|
||||||
|
Plaintext Atom = 0x1b209
|
||||||
|
Playsinline Atom = 0x1400b
|
||||||
|
Poster Atom = 0x2cf06
|
||||||
|
Pre Atom = 0x47003
|
||||||
|
Preload Atom = 0x48607
|
||||||
|
Progress Atom = 0x5b908
|
||||||
|
Prompt Atom = 0x53606
|
||||||
|
Public Atom = 0x58606
|
||||||
|
Q Atom = 0xcf01
|
||||||
|
Radiogroup Atom = 0x30a
|
||||||
|
Rb Atom = 0x3a02
|
||||||
|
Readonly Atom = 0x35708
|
||||||
|
Referrerpolicy Atom = 0x3d10e
|
||||||
|
Rel Atom = 0x48703
|
||||||
|
Required Atom = 0x24c08
|
||||||
|
Reversed Atom = 0x8008
|
||||||
|
Rows Atom = 0x9c04
|
||||||
|
Rowspan Atom = 0x9c07
|
||||||
|
Rp Atom = 0x23c02
|
||||||
|
Rt Atom = 0x19a02
|
||||||
|
Rtc Atom = 0x19a03
|
||||||
|
Ruby Atom = 0xfb04
|
||||||
|
S Atom = 0x2501
|
||||||
|
Samp Atom = 0x7804
|
||||||
|
Sandbox Atom = 0x12907
|
||||||
|
Scope Atom = 0x67505
|
||||||
|
Scoped Atom = 0x67506
|
||||||
|
Script Atom = 0x21806
|
||||||
|
Seamless Atom = 0x37108
|
||||||
|
Section Atom = 0x56807
|
||||||
|
Select Atom = 0x63c06
|
||||||
|
Selected Atom = 0x63c08
|
||||||
|
Shape Atom = 0x1e505
|
||||||
|
Size Atom = 0x5f504
|
||||||
|
Sizes Atom = 0x5f505
|
||||||
|
Slot Atom = 0x1ef04
|
||||||
|
Small Atom = 0x20605
|
||||||
|
Sortable Atom = 0x65108
|
||||||
|
Sorted Atom = 0x33706
|
||||||
|
Source Atom = 0x37806
|
||||||
|
Spacer Atom = 0x43706
|
||||||
|
Span Atom = 0x9f04
|
||||||
|
Spellcheck Atom = 0x4740a
|
||||||
|
Src Atom = 0x5c003
|
||||||
|
Srcdoc Atom = 0x5c006
|
||||||
|
Srclang Atom = 0x5f907
|
||||||
|
Srcset Atom = 0x6f906
|
||||||
|
Start Atom = 0x3fa05
|
||||||
|
Step Atom = 0x58304
|
||||||
|
Strike Atom = 0xd206
|
||||||
|
Strong Atom = 0x6dd06
|
||||||
|
Style Atom = 0x6ff05
|
||||||
|
Sub Atom = 0x66d03
|
||||||
|
Summary Atom = 0x70407
|
||||||
|
Sup Atom = 0x70b03
|
||||||
|
Svg Atom = 0x70e03
|
||||||
|
System Atom = 0x71106
|
||||||
|
Tabindex Atom = 0x4be08
|
||||||
|
Table Atom = 0x59505
|
||||||
|
Target Atom = 0x2c406
|
||||||
|
Tbody Atom = 0x2705
|
||||||
|
Td Atom = 0x9202
|
||||||
|
Template Atom = 0x71408
|
||||||
|
Textarea Atom = 0x35208
|
||||||
|
Tfoot Atom = 0xf505
|
||||||
|
Th Atom = 0x15602
|
||||||
|
Thead Atom = 0x33005
|
||||||
|
Time Atom = 0x4204
|
||||||
|
Title Atom = 0x11005
|
||||||
|
Tr Atom = 0xcc02
|
||||||
|
Track Atom = 0x1ba05
|
||||||
|
Translate Atom = 0x1f209
|
||||||
|
Tt Atom = 0x6802
|
||||||
|
Type Atom = 0xd904
|
||||||
|
Typemustmatch Atom = 0x2900d
|
||||||
|
U Atom = 0xb01
|
||||||
|
Ul Atom = 0xa702
|
||||||
|
Updateviacache Atom = 0x460e
|
||||||
|
Usemap Atom = 0x59e06
|
||||||
|
Value Atom = 0x1505
|
||||||
|
Var Atom = 0x16d03
|
||||||
|
Video Atom = 0x2f105
|
||||||
|
Wbr Atom = 0x57c03
|
||||||
|
Width Atom = 0x64905
|
||||||
|
Workertype Atom = 0x71c0a
|
||||||
|
Wrap Atom = 0x72604
|
||||||
|
Xmp Atom = 0x12f03
|
||||||
|
)
|
||||||
|
|
||||||
|
const hash0 = 0x81cdf10e
|
||||||
|
|
||||||
|
const maxAtomLen = 25
|
||||||
|
|
||||||
|
var table = [1 << 9]Atom{
|
||||||
|
0x1: 0xe60a, // mediagroup
|
||||||
|
0x2: 0x2e404, // lang
|
||||||
|
0x4: 0x2c09, // accesskey
|
||||||
|
0x5: 0x8b08, // frameset
|
||||||
|
0x7: 0x63a08, // onselect
|
||||||
|
0x8: 0x71106, // system
|
||||||
|
0xa: 0x64905, // width
|
||||||
|
0xc: 0x2890b, // formenctype
|
||||||
|
0xd: 0x13702, // ol
|
||||||
|
0xe: 0x3970b, // oncuechange
|
||||||
|
0x10: 0x14b03, // bdo
|
||||||
|
0x11: 0x11505, // audio
|
||||||
|
0x12: 0x17a09, // draggable
|
||||||
|
0x14: 0x2f105, // video
|
||||||
|
0x15: 0x2b102, // mn
|
||||||
|
0x16: 0x38704, // menu
|
||||||
|
0x17: 0x2cf06, // poster
|
||||||
|
0x19: 0xf606, // footer
|
||||||
|
0x1a: 0x2a806, // method
|
||||||
|
0x1b: 0x2b808, // datetime
|
||||||
|
0x1c: 0x19507, // onabort
|
||||||
|
0x1d: 0x460e, // updateviacache
|
||||||
|
0x1e: 0xff05, // async
|
||||||
|
0x1f: 0x49d06, // onload
|
||||||
|
0x21: 0x11908, // oncancel
|
||||||
|
0x22: 0x62908, // onseeked
|
||||||
|
0x23: 0x30205, // image
|
||||||
|
0x24: 0x5d812, // onrejectionhandled
|
||||||
|
0x26: 0x17404, // link
|
||||||
|
0x27: 0x51d06, // output
|
||||||
|
0x28: 0x33104, // head
|
||||||
|
0x29: 0x4ff0c, // onmouseleave
|
||||||
|
0x2a: 0x57f07, // onpaste
|
||||||
|
0x2b: 0x5a409, // onplaying
|
||||||
|
0x2c: 0x1c407, // colspan
|
||||||
|
0x2f: 0x1bf05, // color
|
||||||
|
0x30: 0x5f504, // size
|
||||||
|
0x31: 0x2e80a, // http-equiv
|
||||||
|
0x33: 0x601, // i
|
||||||
|
0x34: 0x5590a, // onpagehide
|
||||||
|
0x35: 0x68c14, // onunhandledrejection
|
||||||
|
0x37: 0x42a07, // onerror
|
||||||
|
0x3a: 0x3b08, // basefont
|
||||||
|
0x3f: 0x1303, // nav
|
||||||
|
0x40: 0x17704, // kind
|
||||||
|
0x41: 0x35708, // readonly
|
||||||
|
0x42: 0x30806, // mglyph
|
||||||
|
0x44: 0xb202, // li
|
||||||
|
0x46: 0x2d506, // hidden
|
||||||
|
0x47: 0x70e03, // svg
|
||||||
|
0x48: 0x58304, // step
|
||||||
|
0x49: 0x23f09, // integrity
|
||||||
|
0x4a: 0x58606, // public
|
||||||
|
0x4c: 0x1ab03, // col
|
||||||
|
0x4d: 0x1870a, // blockquote
|
||||||
|
0x4e: 0x34f02, // h5
|
||||||
|
0x50: 0x5b908, // progress
|
||||||
|
0x51: 0x5f505, // sizes
|
||||||
|
0x52: 0x34502, // h4
|
||||||
|
0x56: 0x33005, // thead
|
||||||
|
0x57: 0xd607, // keytype
|
||||||
|
0x58: 0x5b70a, // onprogress
|
||||||
|
0x59: 0x44b09, // inputmode
|
||||||
|
0x5a: 0x3b109, // ondragend
|
||||||
|
0x5d: 0x3a205, // oncut
|
||||||
|
0x5e: 0x43706, // spacer
|
||||||
|
0x5f: 0x1ab08, // colgroup
|
||||||
|
0x62: 0x16502, // is
|
||||||
|
0x65: 0x3c02, // as
|
||||||
|
0x66: 0x54809, // onoffline
|
||||||
|
0x67: 0x33706, // sorted
|
||||||
|
0x69: 0x48d10, // onlanguagechange
|
||||||
|
0x6c: 0x43d0c, // onhashchange
|
||||||
|
0x6d: 0x9604, // name
|
||||||
|
0x6e: 0xf505, // tfoot
|
||||||
|
0x6f: 0x56104, // desc
|
||||||
|
0x70: 0x33d03, // max
|
||||||
|
0x72: 0x1ea06, // coords
|
||||||
|
0x73: 0x30d02, // h3
|
||||||
|
0x74: 0x6e70e, // onbeforeunload
|
||||||
|
0x75: 0x9c04, // rows
|
||||||
|
0x76: 0x63c06, // select
|
||||||
|
0x77: 0x9805, // meter
|
||||||
|
0x78: 0x38b06, // itemid
|
||||||
|
0x79: 0x53c0c, // onmousewheel
|
||||||
|
0x7a: 0x5c006, // srcdoc
|
||||||
|
0x7d: 0x1ba05, // track
|
||||||
|
0x7f: 0x31f08, // itemtype
|
||||||
|
0x82: 0xa402, // mo
|
||||||
|
0x83: 0x41b08, // onchange
|
||||||
|
0x84: 0x33107, // headers
|
||||||
|
0x85: 0x5cc0c, // onratechange
|
||||||
|
0x86: 0x60819, // onsecuritypolicyviolation
|
||||||
|
0x88: 0x4a508, // datalist
|
||||||
|
0x89: 0x4e80b, // onmousedown
|
||||||
|
0x8a: 0x1ef04, // slot
|
||||||
|
0x8b: 0x4b010, // onloadedmetadata
|
||||||
|
0x8c: 0x1a06, // accept
|
||||||
|
0x8d: 0x26806, // object
|
||||||
|
0x91: 0x6b30e, // onvolumechange
|
||||||
|
0x92: 0x2107, // charset
|
||||||
|
0x93: 0x27613, // onautocompleteerror
|
||||||
|
0x94: 0xc113, // allowpaymentrequest
|
||||||
|
0x95: 0x2804, // body
|
||||||
|
0x96: 0x10a07, // default
|
||||||
|
0x97: 0x63c08, // selected
|
||||||
|
0x98: 0x21e04, // face
|
||||||
|
0x99: 0x1e505, // shape
|
||||||
|
0x9b: 0x68408, // ontoggle
|
||||||
|
0x9e: 0x64b02, // dt
|
||||||
|
0x9f: 0xb604, // mark
|
||||||
|
0xa1: 0xb01, // u
|
||||||
|
0xa4: 0x6ab08, // onunload
|
||||||
|
0xa5: 0x5d04, // loop
|
||||||
|
0xa6: 0x16408, // disabled
|
||||||
|
0xaa: 0x42307, // onended
|
||||||
|
0xab: 0xb00a, // malignmark
|
||||||
|
0xad: 0x67b09, // onsuspend
|
||||||
|
0xae: 0x35105, // mtext
|
||||||
|
0xaf: 0x64f06, // onsort
|
||||||
|
0xb0: 0x19d08, // itemprop
|
||||||
|
0xb3: 0x67109, // itemscope
|
||||||
|
0xb4: 0x17305, // blink
|
||||||
|
0xb6: 0x3b106, // ondrag
|
||||||
|
0xb7: 0xa702, // ul
|
||||||
|
0xb8: 0x26e04, // form
|
||||||
|
0xb9: 0x12907, // sandbox
|
||||||
|
0xba: 0x8b05, // frame
|
||||||
|
0xbb: 0x1505, // value
|
||||||
|
0xbc: 0x66209, // onstorage
|
||||||
|
0xbf: 0xaa07, // acronym
|
||||||
|
0xc0: 0x19a02, // rt
|
||||||
|
0xc2: 0x202, // br
|
||||||
|
0xc3: 0x22608, // fieldset
|
||||||
|
0xc4: 0x2900d, // typemustmatch
|
||||||
|
0xc5: 0xa208, // nomodule
|
||||||
|
0xc6: 0x6c07, // noembed
|
||||||
|
0xc7: 0x69e0d, // onbeforeprint
|
||||||
|
0xc8: 0x19106, // button
|
||||||
|
0xc9: 0x2f507, // onclick
|
||||||
|
0xca: 0x70407, // summary
|
||||||
|
0xcd: 0xfb04, // ruby
|
||||||
|
0xce: 0x56405, // class
|
||||||
|
0xcf: 0x3f40b, // ondragstart
|
||||||
|
0xd0: 0x23107, // caption
|
||||||
|
0xd4: 0xdd0e, // allowusermedia
|
||||||
|
0xd5: 0x4cf0b, // onloadstart
|
||||||
|
0xd9: 0x16b03, // div
|
||||||
|
0xda: 0x4a904, // list
|
||||||
|
0xdb: 0x32e04, // math
|
||||||
|
0xdc: 0x44b05, // input
|
||||||
|
0xdf: 0x3ea0a, // ondragover
|
||||||
|
0xe0: 0x2de02, // h2
|
||||||
|
0xe2: 0x1b209, // plaintext
|
||||||
|
0xe4: 0x4f30c, // onmouseenter
|
||||||
|
0xe7: 0x47907, // checked
|
||||||
|
0xe8: 0x47003, // pre
|
||||||
|
0xea: 0x35f08, // multiple
|
||||||
|
0xeb: 0xba03, // bdi
|
||||||
|
0xec: 0x33d09, // maxlength
|
||||||
|
0xed: 0xcf01, // q
|
||||||
|
0xee: 0x61f0a, // onauxclick
|
||||||
|
0xf0: 0x57c03, // wbr
|
||||||
|
0xf2: 0x3b04, // base
|
||||||
|
0xf3: 0x6e306, // option
|
||||||
|
0xf5: 0x41310, // ondurationchange
|
||||||
|
0xf7: 0x8908, // noframes
|
||||||
|
0xf9: 0x40508, // dropzone
|
||||||
|
0xfb: 0x67505, // scope
|
||||||
|
0xfc: 0x8008, // reversed
|
||||||
|
0xfd: 0x3ba0b, // ondragenter
|
||||||
|
0xfe: 0x3fa05, // start
|
||||||
|
0xff: 0x12f03, // xmp
|
||||||
|
0x100: 0x5f907, // srclang
|
||||||
|
0x101: 0x30703, // img
|
||||||
|
0x104: 0x101, // b
|
||||||
|
0x105: 0x25403, // for
|
||||||
|
0x106: 0x10705, // aside
|
||||||
|
0x107: 0x44907, // oninput
|
||||||
|
0x108: 0x35604, // area
|
||||||
|
0x109: 0x2a40a, // formmethod
|
||||||
|
0x10a: 0x72604, // wrap
|
||||||
|
0x10c: 0x23c02, // rp
|
||||||
|
0x10d: 0x46b0a, // onkeypress
|
||||||
|
0x10e: 0x6802, // tt
|
||||||
|
0x110: 0x34702, // mi
|
||||||
|
0x111: 0x36705, // muted
|
||||||
|
0x112: 0xf303, // alt
|
||||||
|
0x113: 0x5c504, // code
|
||||||
|
0x114: 0x6e02, // em
|
||||||
|
0x115: 0x3c50a, // ondragexit
|
||||||
|
0x117: 0x9f04, // span
|
||||||
|
0x119: 0x6d708, // manifest
|
||||||
|
0x11a: 0x38708, // menuitem
|
||||||
|
0x11b: 0x58b07, // content
|
||||||
|
0x11d: 0x6c109, // onwaiting
|
||||||
|
0x11f: 0x4c609, // onloadend
|
||||||
|
0x121: 0x37e0d, // oncontextmenu
|
||||||
|
0x123: 0x56d06, // onblur
|
||||||
|
0x124: 0x3fc07, // article
|
||||||
|
0x125: 0x9303, // dir
|
||||||
|
0x126: 0xef04, // ping
|
||||||
|
0x127: 0x24c08, // required
|
||||||
|
0x128: 0x45509, // oninvalid
|
||||||
|
0x129: 0xb105, // align
|
||||||
|
0x12b: 0x58a04, // icon
|
||||||
|
0x12c: 0x64d02, // h6
|
||||||
|
0x12d: 0x1c404, // cols
|
||||||
|
0x12e: 0x22e0a, // figcaption
|
||||||
|
0x12f: 0x45e09, // onkeydown
|
||||||
|
0x130: 0x66b08, // onsubmit
|
||||||
|
0x131: 0x14d09, // oncanplay
|
||||||
|
0x132: 0x70b03, // sup
|
||||||
|
0x133: 0xc01, // p
|
||||||
|
0x135: 0x40a09, // onemptied
|
||||||
|
0x136: 0x39106, // oncopy
|
||||||
|
0x137: 0x19c04, // cite
|
||||||
|
0x138: 0x3a70a, // ondblclick
|
||||||
|
0x13a: 0x50b0b, // onmousemove
|
||||||
|
0x13c: 0x66d03, // sub
|
||||||
|
0x13d: 0x48703, // rel
|
||||||
|
0x13e: 0x5f08, // optgroup
|
||||||
|
0x142: 0x9c07, // rowspan
|
||||||
|
0x143: 0x37806, // source
|
||||||
|
0x144: 0x21608, // noscript
|
||||||
|
0x145: 0x1a304, // open
|
||||||
|
0x146: 0x20403, // ins
|
||||||
|
0x147: 0x2540d, // foreignObject
|
||||||
|
0x148: 0x5ad0a, // onpopstate
|
||||||
|
0x14a: 0x28d07, // enctype
|
||||||
|
0x14b: 0x2760e, // onautocomplete
|
||||||
|
0x14c: 0x35208, // textarea
|
||||||
|
0x14e: 0x2780c, // autocomplete
|
||||||
|
0x14f: 0x15702, // hr
|
||||||
|
0x150: 0x1de08, // controls
|
||||||
|
0x151: 0x10902, // id
|
||||||
|
0x153: 0x2360c, // onafterprint
|
||||||
|
0x155: 0x2610d, // foreignobject
|
||||||
|
0x156: 0x32707, // marquee
|
||||||
|
0x157: 0x59a07, // onpause
|
||||||
|
0x158: 0x5e602, // dl
|
||||||
|
0x159: 0x5206, // height
|
||||||
|
0x15a: 0x34703, // min
|
||||||
|
0x15b: 0x9307, // dirname
|
||||||
|
0x15c: 0x1f209, // translate
|
||||||
|
0x15d: 0x5604, // html
|
||||||
|
0x15e: 0x34709, // minlength
|
||||||
|
0x15f: 0x48607, // preload
|
||||||
|
0x160: 0x71408, // template
|
||||||
|
0x161: 0x3df0b, // ondragleave
|
||||||
|
0x162: 0x3a02, // rb
|
||||||
|
0x164: 0x5c003, // src
|
||||||
|
0x165: 0x6dd06, // strong
|
||||||
|
0x167: 0x7804, // samp
|
||||||
|
0x168: 0x6f307, // address
|
||||||
|
0x169: 0x55108, // ononline
|
||||||
|
0x16b: 0x1310b, // placeholder
|
||||||
|
0x16c: 0x2c406, // target
|
||||||
|
0x16d: 0x20605, // small
|
||||||
|
0x16e: 0x6ca07, // onwheel
|
||||||
|
0x16f: 0x1c90a, // annotation
|
||||||
|
0x170: 0x4740a, // spellcheck
|
||||||
|
0x171: 0x7207, // details
|
||||||
|
0x172: 0x10306, // canvas
|
||||||
|
0x173: 0x12109, // autofocus
|
||||||
|
0x174: 0xc05, // param
|
||||||
|
0x176: 0x46308, // download
|
||||||
|
0x177: 0x45203, // del
|
||||||
|
0x178: 0x36c07, // onclose
|
||||||
|
0x179: 0xb903, // kbd
|
||||||
|
0x17a: 0x31906, // applet
|
||||||
|
0x17b: 0x2e004, // href
|
||||||
|
0x17c: 0x5f108, // onresize
|
||||||
|
0x17e: 0x49d0c, // onloadeddata
|
||||||
|
0x180: 0xcc02, // tr
|
||||||
|
0x181: 0x2c00a, // formtarget
|
||||||
|
0x182: 0x11005, // title
|
||||||
|
0x183: 0x6ff05, // style
|
||||||
|
0x184: 0xd206, // strike
|
||||||
|
0x185: 0x59e06, // usemap
|
||||||
|
0x186: 0x2fc06, // iframe
|
||||||
|
0x187: 0x1004, // main
|
||||||
|
0x189: 0x7b07, // picture
|
||||||
|
0x18c: 0x31605, // ismap
|
||||||
|
0x18e: 0x4a504, // data
|
||||||
|
0x18f: 0x5905, // label
|
||||||
|
0x191: 0x3d10e, // referrerpolicy
|
||||||
|
0x192: 0x15602, // th
|
||||||
|
0x194: 0x53606, // prompt
|
||||||
|
0x195: 0x56807, // section
|
||||||
|
0x197: 0x6d107, // optimum
|
||||||
|
0x198: 0x2db04, // high
|
||||||
|
0x199: 0x15c02, // h1
|
||||||
|
0x19a: 0x65909, // onstalled
|
||||||
|
0x19b: 0x16d03, // var
|
||||||
|
0x19c: 0x4204, // time
|
||||||
|
0x19e: 0x67402, // ms
|
||||||
|
0x19f: 0x33106, // header
|
||||||
|
0x1a0: 0x4da09, // onmessage
|
||||||
|
0x1a1: 0x1a605, // nonce
|
||||||
|
0x1a2: 0x26e0a, // formaction
|
||||||
|
0x1a3: 0x22006, // center
|
||||||
|
0x1a4: 0x3704, // nobr
|
||||||
|
0x1a5: 0x59505, // table
|
||||||
|
0x1a6: 0x4a907, // listing
|
||||||
|
0x1a7: 0x18106, // legend
|
||||||
|
0x1a9: 0x29b09, // challenge
|
||||||
|
0x1aa: 0x24806, // figure
|
||||||
|
0x1ab: 0xe605, // media
|
||||||
|
0x1ae: 0xd904, // type
|
||||||
|
0x1af: 0x3f04, // font
|
||||||
|
0x1b0: 0x4da0e, // onmessageerror
|
||||||
|
0x1b1: 0x37108, // seamless
|
||||||
|
0x1b2: 0x8703, // dfn
|
||||||
|
0x1b3: 0x5c705, // defer
|
||||||
|
0x1b4: 0xc303, // low
|
||||||
|
0x1b5: 0x19a03, // rtc
|
||||||
|
0x1b6: 0x5230b, // onmouseover
|
||||||
|
0x1b7: 0x2b20a, // novalidate
|
||||||
|
0x1b8: 0x71c0a, // workertype
|
||||||
|
0x1ba: 0x3cd07, // itemref
|
||||||
|
0x1bd: 0x1, // a
|
||||||
|
0x1be: 0x31803, // map
|
||||||
|
0x1bf: 0x400c, // ontimeupdate
|
||||||
|
0x1c0: 0x15e07, // bgsound
|
||||||
|
0x1c1: 0x3206, // keygen
|
||||||
|
0x1c2: 0x2705, // tbody
|
||||||
|
0x1c5: 0x64406, // onshow
|
||||||
|
0x1c7: 0x2501, // s
|
||||||
|
0x1c8: 0x6607, // pattern
|
||||||
|
0x1cc: 0x14d10, // oncanplaythrough
|
||||||
|
0x1ce: 0x2d702, // dd
|
||||||
|
0x1cf: 0x6f906, // srcset
|
||||||
|
0x1d0: 0x17003, // big
|
||||||
|
0x1d2: 0x65108, // sortable
|
||||||
|
0x1d3: 0x48007, // onkeyup
|
||||||
|
0x1d5: 0x5a406, // onplay
|
||||||
|
0x1d7: 0x4b804, // meta
|
||||||
|
0x1d8: 0x40306, // ondrop
|
||||||
|
0x1da: 0x60008, // onscroll
|
||||||
|
0x1db: 0x1fb0b, // crossorigin
|
||||||
|
0x1dc: 0x5730a, // onpageshow
|
||||||
|
0x1dd: 0x4, // abbr
|
||||||
|
0x1de: 0x9202, // td
|
||||||
|
0x1df: 0x58b0f, // contenteditable
|
||||||
|
0x1e0: 0x27206, // action
|
||||||
|
0x1e1: 0x1400b, // playsinline
|
||||||
|
0x1e2: 0x43107, // onfocus
|
||||||
|
0x1e3: 0x2e008, // hreflang
|
||||||
|
0x1e5: 0x5160a, // onmouseout
|
||||||
|
0x1e6: 0x5ea07, // onreset
|
||||||
|
0x1e7: 0x13c08, // autoplay
|
||||||
|
0x1e8: 0x63109, // onseeking
|
||||||
|
0x1ea: 0x67506, // scoped
|
||||||
|
0x1ec: 0x30a, // radiogroup
|
||||||
|
0x1ee: 0x3800b, // contextmenu
|
||||||
|
0x1ef: 0x52e09, // onmouseup
|
||||||
|
0x1f1: 0x2ca06, // hgroup
|
||||||
|
0x1f2: 0x2080f, // allowfullscreen
|
||||||
|
0x1f3: 0x4be08, // tabindex
|
||||||
|
0x1f6: 0x30f07, // isindex
|
||||||
|
0x1f7: 0x1a0e, // accept-charset
|
||||||
|
0x1f8: 0x2ae0e, // formnovalidate
|
||||||
|
0x1fb: 0x1c90e, // annotation-xml
|
||||||
|
0x1fc: 0x6e05, // embed
|
||||||
|
0x1fd: 0x21806, // script
|
||||||
|
0x1fe: 0xbb06, // dialog
|
||||||
|
0x1ff: 0x1d707, // command
|
||||||
|
}
|
||||||
|
|
||||||
|
const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" +
|
||||||
|
"asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" +
|
||||||
|
"sampictureversedfnoframesetdirnameterowspanomoduleacronymali" +
|
||||||
|
"gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" +
|
||||||
|
"ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" +
|
||||||
|
"dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" +
|
||||||
|
"bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" +
|
||||||
|
"penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" +
|
||||||
|
"ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" +
|
||||||
|
"ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" +
|
||||||
|
"ignObjectforeignobjectformactionautocompleteerrorformenctype" +
|
||||||
|
"mustmatchallengeformmethodformnovalidatetimeformtargethgroup" +
|
||||||
|
"osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" +
|
||||||
|
"ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" +
|
||||||
|
"inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" +
|
||||||
|
"extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" +
|
||||||
|
"enterondragexitemreferrerpolicyondragleaveondragoverondragst" +
|
||||||
|
"articleondropzonemptiedondurationchangeonendedonerroronfocus" +
|
||||||
|
"paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" +
|
||||||
|
"spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" +
|
||||||
|
"onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" +
|
||||||
|
"usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" +
|
||||||
|
"seoveronmouseupromptonmousewheelonofflineononlineonpagehides" +
|
||||||
|
"classectionbluronpageshowbronpastepublicontenteditableonpaus" +
|
||||||
|
"emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" +
|
||||||
|
"jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" +
|
||||||
|
"violationauxclickonseekedonseekingonselectedonshowidth6onsor" +
|
||||||
|
"tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" +
|
||||||
|
"handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" +
|
||||||
|
"wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" +
|
||||||
|
"arysupsvgsystemplateworkertypewrap"
|
257
vendor/golang.org/x/net/html/charset/charset.go
generated
vendored
Normal file
257
vendor/golang.org/x/net/html/charset/charset.go
generated
vendored
Normal file
@ -0,0 +1,257 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package charset provides common text encodings for HTML documents.
|
||||||
|
//
|
||||||
|
// The mapping from encoding labels to encodings is defined at
|
||||||
|
// https://encoding.spec.whatwg.org/.
|
||||||
|
package charset // import "golang.org/x/net/html/charset"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"mime"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/net/html"
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/charmap"
|
||||||
|
"golang.org/x/text/encoding/htmlindex"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Lookup returns the encoding with the specified label, and its canonical
|
||||||
|
// name. It returns nil and the empty string if label is not one of the
|
||||||
|
// standard encodings for HTML. Matching is case-insensitive and ignores
|
||||||
|
// leading and trailing whitespace. Encoders will use HTML escape sequences for
|
||||||
|
// runes that are not supported by the character set.
|
||||||
|
func Lookup(label string) (e encoding.Encoding, name string) {
|
||||||
|
e, err := htmlindex.Get(label)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ""
|
||||||
|
}
|
||||||
|
name, _ = htmlindex.Name(e)
|
||||||
|
return &htmlEncoding{e}, name
|
||||||
|
}
|
||||||
|
|
||||||
|
type htmlEncoding struct{ encoding.Encoding }
|
||||||
|
|
||||||
|
func (h *htmlEncoding) NewEncoder() *encoding.Encoder {
|
||||||
|
// HTML requires a non-terminating legacy encoder. We use HTML escapes to
|
||||||
|
// substitute unsupported code points.
|
||||||
|
return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder())
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetermineEncoding determines the encoding of an HTML document by examining
|
||||||
|
// up to the first 1024 bytes of content and the declared Content-Type.
|
||||||
|
//
|
||||||
|
// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding
|
||||||
|
func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) {
|
||||||
|
if len(content) > 1024 {
|
||||||
|
content = content[:1024]
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, b := range boms {
|
||||||
|
if bytes.HasPrefix(content, b.bom) {
|
||||||
|
e, name = Lookup(b.enc)
|
||||||
|
return e, name, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, params, err := mime.ParseMediaType(contentType); err == nil {
|
||||||
|
if cs, ok := params["charset"]; ok {
|
||||||
|
if e, name = Lookup(cs); e != nil {
|
||||||
|
return e, name, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(content) > 0 {
|
||||||
|
e, name = prescan(content)
|
||||||
|
if e != nil {
|
||||||
|
return e, name, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to detect UTF-8.
|
||||||
|
// First eliminate any partial rune at the end.
|
||||||
|
for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {
|
||||||
|
b := content[i]
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if utf8.RuneStart(b) {
|
||||||
|
content = content[:i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hasHighBit := false
|
||||||
|
for _, c := range content {
|
||||||
|
if c >= 0x80 {
|
||||||
|
hasHighBit = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if hasHighBit && utf8.Valid(content) {
|
||||||
|
return encoding.Nop, "utf-8", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: change default depending on user's locale?
|
||||||
|
return charmap.Windows1252, "windows-1252", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader returns an io.Reader that converts the content of r to UTF-8.
|
||||||
|
// It calls DetermineEncoding to find out what r's encoding is.
|
||||||
|
func NewReader(r io.Reader, contentType string) (io.Reader, error) {
|
||||||
|
preview := make([]byte, 1024)
|
||||||
|
n, err := io.ReadFull(r, preview)
|
||||||
|
switch {
|
||||||
|
case err == io.ErrUnexpectedEOF:
|
||||||
|
preview = preview[:n]
|
||||||
|
r = bytes.NewReader(preview)
|
||||||
|
case err != nil:
|
||||||
|
return nil, err
|
||||||
|
default:
|
||||||
|
r = io.MultiReader(bytes.NewReader(preview), r)
|
||||||
|
}
|
||||||
|
|
||||||
|
if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop {
|
||||||
|
r = transform.NewReader(r, e.NewDecoder())
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReaderLabel returns a reader that converts from the specified charset to
|
||||||
|
// UTF-8. It uses Lookup to find the encoding that corresponds to label, and
|
||||||
|
// returns an error if Lookup returns nil. It is suitable for use as
|
||||||
|
// encoding/xml.Decoder's CharsetReader function.
|
||||||
|
func NewReaderLabel(label string, input io.Reader) (io.Reader, error) {
|
||||||
|
e, _ := Lookup(label)
|
||||||
|
if e == nil {
|
||||||
|
return nil, fmt.Errorf("unsupported charset: %q", label)
|
||||||
|
}
|
||||||
|
return transform.NewReader(input, e.NewDecoder()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func prescan(content []byte) (e encoding.Encoding, name string) {
|
||||||
|
z := html.NewTokenizer(bytes.NewReader(content))
|
||||||
|
for {
|
||||||
|
switch z.Next() {
|
||||||
|
case html.ErrorToken:
|
||||||
|
return nil, ""
|
||||||
|
|
||||||
|
case html.StartTagToken, html.SelfClosingTagToken:
|
||||||
|
tagName, hasAttr := z.TagName()
|
||||||
|
if !bytes.Equal(tagName, []byte("meta")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
attrList := make(map[string]bool)
|
||||||
|
gotPragma := false
|
||||||
|
|
||||||
|
const (
|
||||||
|
dontKnow = iota
|
||||||
|
doNeedPragma
|
||||||
|
doNotNeedPragma
|
||||||
|
)
|
||||||
|
needPragma := dontKnow
|
||||||
|
|
||||||
|
name = ""
|
||||||
|
e = nil
|
||||||
|
for hasAttr {
|
||||||
|
var key, val []byte
|
||||||
|
key, val, hasAttr = z.TagAttr()
|
||||||
|
ks := string(key)
|
||||||
|
if attrList[ks] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
attrList[ks] = true
|
||||||
|
for i, c := range val {
|
||||||
|
if 'A' <= c && c <= 'Z' {
|
||||||
|
val[i] = c + 0x20
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch ks {
|
||||||
|
case "http-equiv":
|
||||||
|
if bytes.Equal(val, []byte("content-type")) {
|
||||||
|
gotPragma = true
|
||||||
|
}
|
||||||
|
|
||||||
|
case "content":
|
||||||
|
if e == nil {
|
||||||
|
name = fromMetaElement(string(val))
|
||||||
|
if name != "" {
|
||||||
|
e, name = Lookup(name)
|
||||||
|
if e != nil {
|
||||||
|
needPragma = doNeedPragma
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case "charset":
|
||||||
|
e, name = Lookup(string(val))
|
||||||
|
needPragma = doNotNeedPragma
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(name, "utf-16") {
|
||||||
|
name = "utf-8"
|
||||||
|
e = encoding.Nop
|
||||||
|
}
|
||||||
|
|
||||||
|
if e != nil {
|
||||||
|
return e, name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromMetaElement(s string) string {
|
||||||
|
for s != "" {
|
||||||
|
csLoc := strings.Index(s, "charset")
|
||||||
|
if csLoc == -1 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
s = s[csLoc+len("charset"):]
|
||||||
|
s = strings.TrimLeft(s, " \t\n\f\r")
|
||||||
|
if !strings.HasPrefix(s, "=") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
s = s[1:]
|
||||||
|
s = strings.TrimLeft(s, " \t\n\f\r")
|
||||||
|
if s == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if q := s[0]; q == '"' || q == '\'' {
|
||||||
|
s = s[1:]
|
||||||
|
closeQuote := strings.IndexRune(s, rune(q))
|
||||||
|
if closeQuote == -1 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return s[:closeQuote]
|
||||||
|
}
|
||||||
|
|
||||||
|
end := strings.IndexAny(s, "; \t\n\f\r")
|
||||||
|
if end == -1 {
|
||||||
|
end = len(s)
|
||||||
|
}
|
||||||
|
return s[:end]
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var boms = []struct {
|
||||||
|
bom []byte
|
||||||
|
enc string
|
||||||
|
}{
|
||||||
|
{[]byte{0xfe, 0xff}, "utf-16be"},
|
||||||
|
{[]byte{0xff, 0xfe}, "utf-16le"},
|
||||||
|
{[]byte{0xef, 0xbb, 0xbf}, "utf-8"},
|
||||||
|
}
|
112
vendor/golang.org/x/net/html/const.go
generated
vendored
Normal file
112
vendor/golang.org/x/net/html/const.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package html
|
||||||
|
|
||||||
|
// Section 12.2.4.2 of the HTML5 specification says "The following elements
|
||||||
|
// have varying levels of special parsing rules".
|
||||||
|
// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
|
||||||
|
var isSpecialElementMap = map[string]bool{
|
||||||
|
"address": true,
|
||||||
|
"applet": true,
|
||||||
|
"area": true,
|
||||||
|
"article": true,
|
||||||
|
"aside": true,
|
||||||
|
"base": true,
|
||||||
|
"basefont": true,
|
||||||
|
"bgsound": true,
|
||||||
|
"blockquote": true,
|
||||||
|
"body": true,
|
||||||
|
"br": true,
|
||||||
|
"button": true,
|
||||||
|
"caption": true,
|
||||||
|
"center": true,
|
||||||
|
"col": true,
|
||||||
|
"colgroup": true,
|
||||||
|
"dd": true,
|
||||||
|
"details": true,
|
||||||
|
"dir": true,
|
||||||
|
"div": true,
|
||||||
|
"dl": true,
|
||||||
|
"dt": true,
|
||||||
|
"embed": true,
|
||||||
|
"fieldset": true,
|
||||||
|
"figcaption": true,
|
||||||
|
"figure": true,
|
||||||
|
"footer": true,
|
||||||
|
"form": true,
|
||||||
|
"frame": true,
|
||||||
|
"frameset": true,
|
||||||
|
"h1": true,
|
||||||
|
"h2": true,
|
||||||
|
"h3": true,
|
||||||
|
"h4": true,
|
||||||
|
"h5": true,
|
||||||
|
"h6": true,
|
||||||
|
"head": true,
|
||||||
|
"header": true,
|
||||||
|
"hgroup": true,
|
||||||
|
"hr": true,
|
||||||
|
"html": true,
|
||||||
|
"iframe": true,
|
||||||
|
"img": true,
|
||||||
|
"input": true,
|
||||||
|
"isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility.
|
||||||
|
"keygen": true,
|
||||||
|
"li": true,
|
||||||
|
"link": true,
|
||||||
|
"listing": true,
|
||||||
|
"main": true,
|
||||||
|
"marquee": true,
|
||||||
|
"menu": true,
|
||||||
|
"meta": true,
|
||||||
|
"nav": true,
|
||||||
|
"noembed": true,
|
||||||
|
"noframes": true,
|
||||||
|
"noscript": true,
|
||||||
|
"object": true,
|
||||||
|
"ol": true,
|
||||||
|
"p": true,
|
||||||
|
"param": true,
|
||||||
|
"plaintext": true,
|
||||||
|
"pre": true,
|
||||||
|
"script": true,
|
||||||
|
"section": true,
|
||||||
|
"select": true,
|
||||||
|
"source": true,
|
||||||
|
"style": true,
|
||||||
|
"summary": true,
|
||||||
|
"table": true,
|
||||||
|
"tbody": true,
|
||||||
|
"td": true,
|
||||||
|
"template": true,
|
||||||
|
"textarea": true,
|
||||||
|
"tfoot": true,
|
||||||
|
"th": true,
|
||||||
|
"thead": true,
|
||||||
|
"title": true,
|
||||||
|
"tr": true,
|
||||||
|
"track": true,
|
||||||
|
"ul": true,
|
||||||
|
"wbr": true,
|
||||||
|
"xmp": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSpecialElement(element *Node) bool {
|
||||||
|
switch element.Namespace {
|
||||||
|
case "", "html":
|
||||||
|
return isSpecialElementMap[element.Data]
|
||||||
|
case "math":
|
||||||
|
switch element.Data {
|
||||||
|
case "mi", "mo", "mn", "ms", "mtext", "annotation-xml":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
case "svg":
|
||||||
|
switch element.Data {
|
||||||
|
case "foreignObject", "desc", "title":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
106
vendor/golang.org/x/net/html/doc.go
generated
vendored
Normal file
106
vendor/golang.org/x/net/html/doc.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package html implements an HTML5-compliant tokenizer and parser.
|
||||||
|
|
||||||
|
Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
|
||||||
|
caller's responsibility to ensure that r provides UTF-8 encoded HTML.
|
||||||
|
|
||||||
|
z := html.NewTokenizer(r)
|
||||||
|
|
||||||
|
Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
|
||||||
|
which parses the next token and returns its type, or an error:
|
||||||
|
|
||||||
|
for {
|
||||||
|
tt := z.Next()
|
||||||
|
if tt == html.ErrorToken {
|
||||||
|
// ...
|
||||||
|
return ...
|
||||||
|
}
|
||||||
|
// Process the current token.
|
||||||
|
}
|
||||||
|
|
||||||
|
There are two APIs for retrieving the current token. The high-level API is to
|
||||||
|
call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
|
||||||
|
allow optionally calling Raw after Next but before Token, Text, TagName, or
|
||||||
|
TagAttr. In EBNF notation, the valid call sequence per token is:
|
||||||
|
|
||||||
|
Next {Raw} [ Token | Text | TagName {TagAttr} ]
|
||||||
|
|
||||||
|
Token returns an independent data structure that completely describes a token.
|
||||||
|
Entities (such as "<") are unescaped, tag names and attribute keys are
|
||||||
|
lower-cased, and attributes are collected into a []Attribute. For example:
|
||||||
|
|
||||||
|
for {
|
||||||
|
if z.Next() == html.ErrorToken {
|
||||||
|
// Returning io.EOF indicates success.
|
||||||
|
return z.Err()
|
||||||
|
}
|
||||||
|
emitToken(z.Token())
|
||||||
|
}
|
||||||
|
|
||||||
|
The low-level API performs fewer allocations and copies, but the contents of
|
||||||
|
the []byte values returned by Text, TagName and TagAttr may change on the next
|
||||||
|
call to Next. For example, to extract an HTML page's anchor text:
|
||||||
|
|
||||||
|
depth := 0
|
||||||
|
for {
|
||||||
|
tt := z.Next()
|
||||||
|
switch tt {
|
||||||
|
case html.ErrorToken:
|
||||||
|
return z.Err()
|
||||||
|
case html.TextToken:
|
||||||
|
if depth > 0 {
|
||||||
|
// emitBytes should copy the []byte it receives,
|
||||||
|
// if it doesn't process it immediately.
|
||||||
|
emitBytes(z.Text())
|
||||||
|
}
|
||||||
|
case html.StartTagToken, html.EndTagToken:
|
||||||
|
tn, _ := z.TagName()
|
||||||
|
if len(tn) == 1 && tn[0] == 'a' {
|
||||||
|
if tt == html.StartTagToken {
|
||||||
|
depth++
|
||||||
|
} else {
|
||||||
|
depth--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Parsing is done by calling Parse with an io.Reader, which returns the root of
|
||||||
|
the parse tree (the document element) as a *Node. It is the caller's
|
||||||
|
responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
|
||||||
|
example, to process each anchor node in depth-first order:
|
||||||
|
|
||||||
|
doc, err := html.Parse(r)
|
||||||
|
if err != nil {
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
var f func(*html.Node)
|
||||||
|
f = func(n *html.Node) {
|
||||||
|
if n.Type == html.ElementNode && n.Data == "a" {
|
||||||
|
// Do something with n...
|
||||||
|
}
|
||||||
|
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||||
|
f(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f(doc)
|
||||||
|
|
||||||
|
The relevant specifications include:
|
||||||
|
https://html.spec.whatwg.org/multipage/syntax.html and
|
||||||
|
https://html.spec.whatwg.org/multipage/syntax.html#tokenization
|
||||||
|
*/
|
||||||
|
package html // import "golang.org/x/net/html"
|
||||||
|
|
||||||
|
// The tokenization algorithm implemented by this package is not a line-by-line
|
||||||
|
// transliteration of the relatively verbose state-machine in the WHATWG
|
||||||
|
// specification. A more direct approach is used instead, where the program
|
||||||
|
// counter implies the state, such as whether it is tokenizing a tag or a text
|
||||||
|
// node. Specification compliance is verified by checking expected and actual
|
||||||
|
// outputs over a test suite rather than aiming for algorithmic fidelity.
|
||||||
|
|
||||||
|
// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
|
||||||
|
// TODO(nigeltao): How does parsing interact with a JavaScript engine?
|
156
vendor/golang.org/x/net/html/doctype.go
generated
vendored
Normal file
156
vendor/golang.org/x/net/html/doctype.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package html
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// parseDoctype parses the data from a DoctypeToken into a name,
|
||||||
|
// public identifier, and system identifier. It returns a Node whose Type
|
||||||
|
// is DoctypeNode, whose Data is the name, and which has attributes
|
||||||
|
// named "system" and "public" for the two identifiers if they were present.
|
||||||
|
// quirks is whether the document should be parsed in "quirks mode".
|
||||||
|
func parseDoctype(s string) (n *Node, quirks bool) {
|
||||||
|
n = &Node{Type: DoctypeNode}
|
||||||
|
|
||||||
|
// Find the name.
|
||||||
|
space := strings.IndexAny(s, whitespace)
|
||||||
|
if space == -1 {
|
||||||
|
space = len(s)
|
||||||
|
}
|
||||||
|
n.Data = s[:space]
|
||||||
|
// The comparison to "html" is case-sensitive.
|
||||||
|
if n.Data != "html" {
|
||||||
|
quirks = true
|
||||||
|
}
|
||||||
|
n.Data = strings.ToLower(n.Data)
|
||||||
|
s = strings.TrimLeft(s[space:], whitespace)
|
||||||
|
|
||||||
|
if len(s) < 6 {
|
||||||
|
// It can't start with "PUBLIC" or "SYSTEM".
|
||||||
|
// Ignore the rest of the string.
|
||||||
|
return n, quirks || s != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
key := strings.ToLower(s[:6])
|
||||||
|
s = s[6:]
|
||||||
|
for key == "public" || key == "system" {
|
||||||
|
s = strings.TrimLeft(s, whitespace)
|
||||||
|
if s == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
quote := s[0]
|
||||||
|
if quote != '"' && quote != '\'' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s = s[1:]
|
||||||
|
q := strings.IndexRune(s, rune(quote))
|
||||||
|
var id string
|
||||||
|
if q == -1 {
|
||||||
|
id = s
|
||||||
|
s = ""
|
||||||
|
} else {
|
||||||
|
id = s[:q]
|
||||||
|
s = s[q+1:]
|
||||||
|
}
|
||||||
|
n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
|
||||||
|
if key == "public" {
|
||||||
|
key = "system"
|
||||||
|
} else {
|
||||||
|
key = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if key != "" || s != "" {
|
||||||
|
quirks = true
|
||||||
|
} else if len(n.Attr) > 0 {
|
||||||
|
if n.Attr[0].Key == "public" {
|
||||||
|
public := strings.ToLower(n.Attr[0].Val)
|
||||||
|
switch public {
|
||||||
|
case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
|
||||||
|
quirks = true
|
||||||
|
default:
|
||||||
|
for _, q := range quirkyIDs {
|
||||||
|
if strings.HasPrefix(public, q) {
|
||||||
|
quirks = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// The following two public IDs only cause quirks mode if there is no system ID.
|
||||||
|
if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
|
||||||
|
strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
|
||||||
|
quirks = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
|
||||||
|
strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
|
||||||
|
quirks = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, quirks
|
||||||
|
}
|
||||||
|
|
||||||
|
// quirkyIDs is a list of public doctype identifiers that cause a document
|
||||||
|
// to be interpreted in quirks mode. The identifiers should be in lower case.
|
||||||
|
var quirkyIDs = []string{
|
||||||
|
"+//silmaril//dtd html pro v0r11 19970101//",
|
||||||
|
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
|
||||||
|
"-//as//dtd html 3.0 aswedit + extensions//",
|
||||||
|
"-//ietf//dtd html 2.0 level 1//",
|
||||||
|
"-//ietf//dtd html 2.0 level 2//",
|
||||||
|
"-//ietf//dtd html 2.0 strict level 1//",
|
||||||
|
"-//ietf//dtd html 2.0 strict level 2//",
|
||||||
|
"-//ietf//dtd html 2.0 strict//",
|
||||||
|
"-//ietf//dtd html 2.0//",
|
||||||
|
"-//ietf//dtd html 2.1e//",
|
||||||
|
"-//ietf//dtd html 3.0//",
|
||||||
|
"-//ietf//dtd html 3.2 final//",
|
||||||
|
"-//ietf//dtd html 3.2//",
|
||||||
|
"-//ietf//dtd html 3//",
|
||||||
|
"-//ietf//dtd html level 0//",
|
||||||
|
"-//ietf//dtd html level 1//",
|
||||||
|
"-//ietf//dtd html level 2//",
|
||||||
|
"-//ietf//dtd html level 3//",
|
||||||
|
"-//ietf//dtd html strict level 0//",
|
||||||
|
"-//ietf//dtd html strict level 1//",
|
||||||
|
"-//ietf//dtd html strict level 2//",
|
||||||
|
"-//ietf//dtd html strict level 3//",
|
||||||
|
"-//ietf//dtd html strict//",
|
||||||
|
"-//ietf//dtd html//",
|
||||||
|
"-//metrius//dtd metrius presentational//",
|
||||||
|
"-//microsoft//dtd internet explorer 2.0 html strict//",
|
||||||
|
"-//microsoft//dtd internet explorer 2.0 html//",
|
||||||
|
"-//microsoft//dtd internet explorer 2.0 tables//",
|
||||||
|
"-//microsoft//dtd internet explorer 3.0 html strict//",
|
||||||
|
"-//microsoft//dtd internet explorer 3.0 html//",
|
||||||
|
"-//microsoft//dtd internet explorer 3.0 tables//",
|
||||||
|
"-//netscape comm. corp.//dtd html//",
|
||||||
|
"-//netscape comm. corp.//dtd strict html//",
|
||||||
|
"-//o'reilly and associates//dtd html 2.0//",
|
||||||
|
"-//o'reilly and associates//dtd html extended 1.0//",
|
||||||
|
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
|
||||||
|
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
|
||||||
|
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
|
||||||
|
"-//spyglass//dtd html 2.0 extended//",
|
||||||
|
"-//sq//dtd html 2.0 hotmetal + extensions//",
|
||||||
|
"-//sun microsystems corp.//dtd hotjava html//",
|
||||||
|
"-//sun microsystems corp.//dtd hotjava strict html//",
|
||||||
|
"-//w3c//dtd html 3 1995-03-24//",
|
||||||
|
"-//w3c//dtd html 3.2 draft//",
|
||||||
|
"-//w3c//dtd html 3.2 final//",
|
||||||
|
"-//w3c//dtd html 3.2//",
|
||||||
|
"-//w3c//dtd html 3.2s draft//",
|
||||||
|
"-//w3c//dtd html 4.0 frameset//",
|
||||||
|
"-//w3c//dtd html 4.0 transitional//",
|
||||||
|
"-//w3c//dtd html experimental 19960712//",
|
||||||
|
"-//w3c//dtd html experimental 970421//",
|
||||||
|
"-//w3c//dtd w3 html//",
|
||||||
|
"-//w3o//dtd w3 html 3.0//",
|
||||||
|
"-//webtechs//dtd mozilla html 2.0//",
|
||||||
|
"-//webtechs//dtd mozilla html//",
|
||||||
|
}
|
2253
vendor/golang.org/x/net/html/entity.go
generated
vendored
Normal file
2253
vendor/golang.org/x/net/html/entity.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
258
vendor/golang.org/x/net/html/escape.go
generated
vendored
Normal file
258
vendor/golang.org/x/net/html/escape.go
generated
vendored
Normal file
@ -0,0 +1,258 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package html
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// These replacements permit compatibility with old numeric entities that
|
||||||
|
// assumed Windows-1252 encoding.
|
||||||
|
// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
|
||||||
|
var replacementTable = [...]rune{
|
||||||
|
'\u20AC', // First entry is what 0x80 should be replaced with.
|
||||||
|
'\u0081',
|
||||||
|
'\u201A',
|
||||||
|
'\u0192',
|
||||||
|
'\u201E',
|
||||||
|
'\u2026',
|
||||||
|
'\u2020',
|
||||||
|
'\u2021',
|
||||||
|
'\u02C6',
|
||||||
|
'\u2030',
|
||||||
|
'\u0160',
|
||||||
|
'\u2039',
|
||||||
|
'\u0152',
|
||||||
|
'\u008D',
|
||||||
|
'\u017D',
|
||||||
|
'\u008F',
|
||||||
|
'\u0090',
|
||||||
|
'\u2018',
|
||||||
|
'\u2019',
|
||||||
|
'\u201C',
|
||||||
|
'\u201D',
|
||||||
|
'\u2022',
|
||||||
|
'\u2013',
|
||||||
|
'\u2014',
|
||||||
|
'\u02DC',
|
||||||
|
'\u2122',
|
||||||
|
'\u0161',
|
||||||
|
'\u203A',
|
||||||
|
'\u0153',
|
||||||
|
'\u009D',
|
||||||
|
'\u017E',
|
||||||
|
'\u0178', // Last entry is 0x9F.
|
||||||
|
// 0x00->'\uFFFD' is handled programmatically.
|
||||||
|
// 0x0D->'\u000D' is a no-op.
|
||||||
|
}
|
||||||
|
|
||||||
|
// unescapeEntity reads an entity like "<" from b[src:] and writes the
|
||||||
|
// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
|
||||||
|
// Precondition: b[src] == '&' && dst <= src.
|
||||||
|
// attribute should be true if parsing an attribute value.
|
||||||
|
func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
|
||||||
|
// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
|
||||||
|
|
||||||
|
// i starts at 1 because we already know that s[0] == '&'.
|
||||||
|
i, s := 1, b[src:]
|
||||||
|
|
||||||
|
if len(s) <= 1 {
|
||||||
|
b[dst] = b[src]
|
||||||
|
return dst + 1, src + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if s[i] == '#' {
|
||||||
|
if len(s) <= 3 { // We need to have at least "&#.".
|
||||||
|
b[dst] = b[src]
|
||||||
|
return dst + 1, src + 1
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
c := s[i]
|
||||||
|
hex := false
|
||||||
|
if c == 'x' || c == 'X' {
|
||||||
|
hex = true
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
x := '\x00'
|
||||||
|
for i < len(s) {
|
||||||
|
c = s[i]
|
||||||
|
i++
|
||||||
|
if hex {
|
||||||
|
if '0' <= c && c <= '9' {
|
||||||
|
x = 16*x + rune(c) - '0'
|
||||||
|
continue
|
||||||
|
} else if 'a' <= c && c <= 'f' {
|
||||||
|
x = 16*x + rune(c) - 'a' + 10
|
||||||
|
continue
|
||||||
|
} else if 'A' <= c && c <= 'F' {
|
||||||
|
x = 16*x + rune(c) - 'A' + 10
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if '0' <= c && c <= '9' {
|
||||||
|
x = 10*x + rune(c) - '0'
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if c != ';' {
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if i <= 3 { // No characters matched.
|
||||||
|
b[dst] = b[src]
|
||||||
|
return dst + 1, src + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if 0x80 <= x && x <= 0x9F {
|
||||||
|
// Replace characters from Windows-1252 with UTF-8 equivalents.
|
||||||
|
x = replacementTable[x-0x80]
|
||||||
|
} else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
|
||||||
|
// Replace invalid characters with the replacement character.
|
||||||
|
x = '\uFFFD'
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst + utf8.EncodeRune(b[dst:], x), src + i
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consume the maximum number of characters possible, with the
|
||||||
|
// consumed characters matching one of the named references.
|
||||||
|
|
||||||
|
for i < len(s) {
|
||||||
|
c := s[i]
|
||||||
|
i++
|
||||||
|
// Lower-cased characters are more common in entities, so we check for them first.
|
||||||
|
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if c != ';' {
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
entityName := string(s[1:i])
|
||||||
|
if entityName == "" {
|
||||||
|
// No-op.
|
||||||
|
} else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
|
||||||
|
// No-op.
|
||||||
|
} else if x := entity[entityName]; x != 0 {
|
||||||
|
return dst + utf8.EncodeRune(b[dst:], x), src + i
|
||||||
|
} else if x := entity2[entityName]; x[0] != 0 {
|
||||||
|
dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
|
||||||
|
return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
|
||||||
|
} else if !attribute {
|
||||||
|
maxLen := len(entityName) - 1
|
||||||
|
if maxLen > longestEntityWithoutSemicolon {
|
||||||
|
maxLen = longestEntityWithoutSemicolon
|
||||||
|
}
|
||||||
|
for j := maxLen; j > 1; j-- {
|
||||||
|
if x := entity[entityName[:j]]; x != 0 {
|
||||||
|
return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dst1, src1 = dst+i, src+i
|
||||||
|
copy(b[dst:dst1], b[src:src1])
|
||||||
|
return dst1, src1
|
||||||
|
}
|
||||||
|
|
||||||
|
// unescape unescapes b's entities in-place, so that "a<b" becomes "a<b".
|
||||||
|
// attribute should be true if parsing an attribute value.
|
||||||
|
func unescape(b []byte, attribute bool) []byte {
|
||||||
|
for i, c := range b {
|
||||||
|
if c == '&' {
|
||||||
|
dst, src := unescapeEntity(b, i, i, attribute)
|
||||||
|
for src < len(b) {
|
||||||
|
c := b[src]
|
||||||
|
if c == '&' {
|
||||||
|
dst, src = unescapeEntity(b, dst, src, attribute)
|
||||||
|
} else {
|
||||||
|
b[dst] = c
|
||||||
|
dst, src = dst+1, src+1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b[0:dst]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// lower lower-cases the A-Z bytes in b in-place, so that "aBc" becomes "abc".
|
||||||
|
func lower(b []byte) []byte {
|
||||||
|
for i, c := range b {
|
||||||
|
if 'A' <= c && c <= 'Z' {
|
||||||
|
b[i] = c + 'a' - 'A'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
const escapedChars = "&'<>\"\r"
|
||||||
|
|
||||||
|
func escape(w writer, s string) error {
|
||||||
|
i := strings.IndexAny(s, escapedChars)
|
||||||
|
for i != -1 {
|
||||||
|
if _, err := w.WriteString(s[:i]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var esc string
|
||||||
|
switch s[i] {
|
||||||
|
case '&':
|
||||||
|
esc = "&"
|
||||||
|
case '\'':
|
||||||
|
// "'" is shorter than "'" and apos was not in HTML until HTML5.
|
||||||
|
esc = "'"
|
||||||
|
case '<':
|
||||||
|
esc = "<"
|
||||||
|
case '>':
|
||||||
|
esc = ">"
|
||||||
|
case '"':
|
||||||
|
// """ is shorter than """.
|
||||||
|
esc = """
|
||||||
|
case '\r':
|
||||||
|
esc = " "
|
||||||
|
default:
|
||||||
|
panic("unrecognized escape character")
|
||||||
|
}
|
||||||
|
s = s[i+1:]
|
||||||
|
if _, err := w.WriteString(esc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
i = strings.IndexAny(s, escapedChars)
|
||||||
|
}
|
||||||
|
_, err := w.WriteString(s)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// EscapeString escapes special characters like "<" to become "<". It
|
||||||
|
// escapes only five such characters: <, >, &, ' and ".
|
||||||
|
// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
|
||||||
|
// always true.
|
||||||
|
func EscapeString(s string) string {
|
||||||
|
if strings.IndexAny(s, escapedChars) == -1 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
escape(&buf, s)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnescapeString unescapes entities like "<" to become "<". It unescapes a
|
||||||
|
// larger range of entities than EscapeString escapes. For example, "á"
|
||||||
|
// unescapes to "á", as does "á" and "&xE1;".
|
||||||
|
// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
|
||||||
|
// always true.
|
||||||
|
func UnescapeString(s string) string {
|
||||||
|
for _, c := range s {
|
||||||
|
if c == '&' {
|
||||||
|
return string(unescape([]byte(s), false))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
226
vendor/golang.org/x/net/html/foreign.go
generated
vendored
Normal file
226
vendor/golang.org/x/net/html/foreign.go
generated
vendored
Normal file
@ -0,0 +1,226 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package html
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func adjustAttributeNames(aa []Attribute, nameMap map[string]string) {
|
||||||
|
for i := range aa {
|
||||||
|
if newName, ok := nameMap[aa[i].Key]; ok {
|
||||||
|
aa[i].Key = newName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func adjustForeignAttributes(aa []Attribute) {
|
||||||
|
for i, a := range aa {
|
||||||
|
if a.Key == "" || a.Key[0] != 'x' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch a.Key {
|
||||||
|
case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show",
|
||||||
|
"xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink":
|
||||||
|
j := strings.Index(a.Key, ":")
|
||||||
|
aa[i].Namespace = a.Key[:j]
|
||||||
|
aa[i].Key = a.Key[j+1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func htmlIntegrationPoint(n *Node) bool {
|
||||||
|
if n.Type != ElementNode {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
switch n.Namespace {
|
||||||
|
case "math":
|
||||||
|
if n.Data == "annotation-xml" {
|
||||||
|
for _, a := range n.Attr {
|
||||||
|
if a.Key == "encoding" {
|
||||||
|
val := strings.ToLower(a.Val)
|
||||||
|
if val == "text/html" || val == "application/xhtml+xml" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "svg":
|
||||||
|
switch n.Data {
|
||||||
|
case "desc", "foreignObject", "title":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func mathMLTextIntegrationPoint(n *Node) bool {
|
||||||
|
if n.Namespace != "math" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
switch n.Data {
|
||||||
|
case "mi", "mo", "mn", "ms", "mtext":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Section 12.2.6.5.
|
||||||
|
var breakout = map[string]bool{
|
||||||
|
"b": true,
|
||||||
|
"big": true,
|
||||||
|
"blockquote": true,
|
||||||
|
"body": true,
|
||||||
|
"br": true,
|
||||||
|
"center": true,
|
||||||
|
"code": true,
|
||||||
|
"dd": true,
|
||||||
|
"div": true,
|
||||||
|
"dl": true,
|
||||||
|
"dt": true,
|
||||||
|
"em": true,
|
||||||
|
"embed": true,
|
||||||
|
"h1": true,
|
||||||
|
"h2": true,
|
||||||
|
"h3": true,
|
||||||
|
"h4": true,
|
||||||
|
"h5": true,
|
||||||
|
"h6": true,
|
||||||
|
"head": true,
|
||||||
|
"hr": true,
|
||||||
|
"i": true,
|
||||||
|
"img": true,
|
||||||
|
"li": true,
|
||||||
|
"listing": true,
|
||||||
|
"menu": true,
|
||||||
|
"meta": true,
|
||||||
|
"nobr": true,
|
||||||
|
"ol": true,
|
||||||
|
"p": true,
|
||||||
|
"pre": true,
|
||||||
|
"ruby": true,
|
||||||
|
"s": true,
|
||||||
|
"small": true,
|
||||||
|
"span": true,
|
||||||
|
"strong": true,
|
||||||
|
"strike": true,
|
||||||
|
"sub": true,
|
||||||
|
"sup": true,
|
||||||
|
"table": true,
|
||||||
|
"tt": true,
|
||||||
|
"u": true,
|
||||||
|
"ul": true,
|
||||||
|
"var": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Section 12.2.6.5.
|
||||||
|
var svgTagNameAdjustments = map[string]string{
|
||||||
|
"altglyph": "altGlyph",
|
||||||
|
"altglyphdef": "altGlyphDef",
|
||||||
|
"altglyphitem": "altGlyphItem",
|
||||||
|
"animatecolor": "animateColor",
|
||||||
|
"animatemotion": "animateMotion",
|
||||||
|
"animatetransform": "animateTransform",
|
||||||
|
"clippath": "clipPath",
|
||||||
|
"feblend": "feBlend",
|
||||||
|
"fecolormatrix": "feColorMatrix",
|
||||||
|
"fecomponenttransfer": "feComponentTransfer",
|
||||||
|
"fecomposite": "feComposite",
|
||||||
|
"feconvolvematrix": "feConvolveMatrix",
|
||||||
|
"fediffuselighting": "feDiffuseLighting",
|
||||||
|
"fedisplacementmap": "feDisplacementMap",
|
||||||
|
"fedistantlight": "feDistantLight",
|
||||||
|
"feflood": "feFlood",
|
||||||
|
"fefunca": "feFuncA",
|
||||||
|
"fefuncb": "feFuncB",
|
||||||
|
"fefuncg": "feFuncG",
|
||||||
|
"fefuncr": "feFuncR",
|
||||||
|
"fegaussianblur": "feGaussianBlur",
|
||||||
|
"feimage": "feImage",
|
||||||
|
"femerge": "feMerge",
|
||||||
|
"femergenode": "feMergeNode",
|
||||||
|
"femorphology": "feMorphology",
|
||||||
|
"feoffset": "feOffset",
|
||||||
|
"fepointlight": "fePointLight",
|
||||||
|
"fespecularlighting": "feSpecularLighting",
|
||||||
|
"fespotlight": "feSpotLight",
|
||||||
|
"fetile": "feTile",
|
||||||
|
"feturbulence": "feTurbulence",
|
||||||
|
"foreignobject": "foreignObject",
|
||||||
|
"glyphref": "glyphRef",
|
||||||
|
"lineargradient": "linearGradient",
|
||||||
|
"radialgradient": "radialGradient",
|
||||||
|
"textpath": "textPath",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Section 12.2.6.1
|
||||||
|
var mathMLAttributeAdjustments = map[string]string{
|
||||||
|
"definitionurl": "definitionURL",
|
||||||
|
}
|
||||||
|
|
||||||
|
var svgAttributeAdjustments = map[string]string{
|
||||||
|
"attributename": "attributeName",
|
||||||
|
"attributetype": "attributeType",
|
||||||
|
"basefrequency": "baseFrequency",
|
||||||
|
"baseprofile": "baseProfile",
|
||||||
|
"calcmode": "calcMode",
|
||||||
|
"clippathunits": "clipPathUnits",
|
||||||
|
"contentscripttype": "contentScriptType",
|
||||||
|
"contentstyletype": "contentStyleType",
|
||||||
|
"diffuseconstant": "diffuseConstant",
|
||||||
|
"edgemode": "edgeMode",
|
||||||
|
"externalresourcesrequired": "externalResourcesRequired",
|
||||||
|
"filterres": "filterRes",
|
||||||
|
"filterunits": "filterUnits",
|
||||||
|
"glyphref": "glyphRef",
|
||||||
|
"gradienttransform": "gradientTransform",
|
||||||
|
"gradientunits": "gradientUnits",
|
||||||
|
"kernelmatrix": "kernelMatrix",
|
||||||
|
"kernelunitlength": "kernelUnitLength",
|
||||||
|
"keypoints": "keyPoints",
|
||||||
|
"keysplines": "keySplines",
|
||||||
|
"keytimes": "keyTimes",
|
||||||
|
"lengthadjust": "lengthAdjust",
|
||||||
|
"limitingconeangle": "limitingConeAngle",
|
||||||
|
"markerheight": "markerHeight",
|
||||||
|
"markerunits": "markerUnits",
|
||||||
|
"markerwidth": "markerWidth",
|
||||||
|
"maskcontentunits": "maskContentUnits",
|
||||||
|
"maskunits": "maskUnits",
|
||||||
|
"numoctaves": "numOctaves",
|
||||||
|
"pathlength": "pathLength",
|
||||||
|
"patterncontentunits": "patternContentUnits",
|
||||||
|
"patterntransform": "patternTransform",
|
||||||
|
"patternunits": "patternUnits",
|
||||||
|
"pointsatx": "pointsAtX",
|
||||||
|
"pointsaty": "pointsAtY",
|
||||||
|
"pointsatz": "pointsAtZ",
|
||||||
|
"preservealpha": "preserveAlpha",
|
||||||
|
"preserveaspectratio": "preserveAspectRatio",
|
||||||
|
"primitiveunits": "primitiveUnits",
|
||||||
|
"refx": "refX",
|
||||||
|
"refy": "refY",
|
||||||
|
"repeatcount": "repeatCount",
|
||||||
|
"repeatdur": "repeatDur",
|
||||||
|
"requiredextensions": "requiredExtensions",
|
||||||
|
"requiredfeatures": "requiredFeatures",
|
||||||
|
"specularconstant": "specularConstant",
|
||||||
|
"specularexponent": "specularExponent",
|
||||||
|
"spreadmethod": "spreadMethod",
|
||||||
|
"startoffset": "startOffset",
|
||||||
|
"stddeviation": "stdDeviation",
|
||||||
|
"stitchtiles": "stitchTiles",
|
||||||
|
"surfacescale": "surfaceScale",
|
||||||
|
"systemlanguage": "systemLanguage",
|
||||||
|
"tablevalues": "tableValues",
|
||||||
|
"targetx": "targetX",
|
||||||
|
"targety": "targetY",
|
||||||
|
"textlength": "textLength",
|
||||||
|
"viewbox": "viewBox",
|
||||||
|
"viewtarget": "viewTarget",
|
||||||
|
"xchannelselector": "xChannelSelector",
|
||||||
|
"ychannelselector": "yChannelSelector",
|
||||||
|
"zoomandpan": "zoomAndPan",
|
||||||
|
}
|
220
vendor/golang.org/x/net/html/node.go
generated
vendored
Normal file
220
vendor/golang.org/x/net/html/node.go
generated
vendored
Normal file
@ -0,0 +1,220 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package html
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/html/atom"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A NodeType is the type of a Node.
|
||||||
|
type NodeType uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
ErrorNode NodeType = iota
|
||||||
|
TextNode
|
||||||
|
DocumentNode
|
||||||
|
ElementNode
|
||||||
|
CommentNode
|
||||||
|
DoctypeNode
|
||||||
|
scopeMarkerNode
|
||||||
|
)
|
||||||
|
|
||||||
|
// Section 12.2.4.3 says "The markers are inserted when entering applet,
|
||||||
|
// object, marquee, template, td, th, and caption elements, and are used
|
||||||
|
// to prevent formatting from "leaking" into applet, object, marquee,
|
||||||
|
// template, td, th, and caption elements".
|
||||||
|
var scopeMarker = Node{Type: scopeMarkerNode}
|
||||||
|
|
||||||
|
// A Node consists of a NodeType and some Data (tag name for element nodes,
|
||||||
|
// content for text) and are part of a tree of Nodes. Element nodes may also
|
||||||
|
// have a Namespace and contain a slice of Attributes. Data is unescaped, so
|
||||||
|
// that it looks like "a<b" rather than "a<b". For element nodes, DataAtom
|
||||||
|
// is the atom for Data, or zero if Data is not a known tag name.
|
||||||
|
//
|
||||||
|
// An empty Namespace implies a "http://www.w3.org/1999/xhtml" namespace.
|
||||||
|
// Similarly, "math" is short for "http://www.w3.org/1998/Math/MathML", and
|
||||||
|
// "svg" is short for "http://www.w3.org/2000/svg".
|
||||||
|
type Node struct {
|
||||||
|
Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
|
||||||
|
|
||||||
|
Type NodeType
|
||||||
|
DataAtom atom.Atom
|
||||||
|
Data string
|
||||||
|
Namespace string
|
||||||
|
Attr []Attribute
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertBefore inserts newChild as a child of n, immediately before oldChild
|
||||||
|
// in the sequence of n's children. oldChild may be nil, in which case newChild
|
||||||
|
// is appended to the end of n's children.
|
||||||
|
//
|
||||||
|
// It will panic if newChild already has a parent or siblings.
|
||||||
|
func (n *Node) InsertBefore(newChild, oldChild *Node) {
|
||||||
|
if newChild.Parent != nil || newChild.PrevSibling != nil || newChild.NextSibling != nil {
|
||||||
|
panic("html: InsertBefore called for an attached child Node")
|
||||||
|
}
|
||||||
|
var prev, next *Node
|
||||||
|
if oldChild != nil {
|
||||||
|
prev, next = oldChild.PrevSibling, oldChild
|
||||||
|
} else {
|
||||||
|
prev = n.LastChild
|
||||||
|
}
|
||||||
|
if prev != nil {
|
||||||
|
prev.NextSibling = newChild
|
||||||
|
} else {
|
||||||
|
n.FirstChild = newChild
|
||||||
|
}
|
||||||
|
if next != nil {
|
||||||
|
next.PrevSibling = newChild
|
||||||
|
} else {
|
||||||
|
n.LastChild = newChild
|
||||||
|
}
|
||||||
|
newChild.Parent = n
|
||||||
|
newChild.PrevSibling = prev
|
||||||
|
newChild.NextSibling = next
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendChild adds a node c as a child of n.
|
||||||
|
//
|
||||||
|
// It will panic if c already has a parent or siblings.
|
||||||
|
func (n *Node) AppendChild(c *Node) {
|
||||||
|
if c.Parent != nil || c.PrevSibling != nil || c.NextSibling != nil {
|
||||||
|
panic("html: AppendChild called for an attached child Node")
|
||||||
|
}
|
||||||
|
last := n.LastChild
|
||||||
|
if last != nil {
|
||||||
|
last.NextSibling = c
|
||||||
|
} else {
|
||||||
|
n.FirstChild = c
|
||||||
|
}
|
||||||
|
n.LastChild = c
|
||||||
|
c.Parent = n
|
||||||
|
c.PrevSibling = last
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveChild removes a node c that is a child of n. Afterwards, c will have
|
||||||
|
// no parent and no siblings.
|
||||||
|
//
|
||||||
|
// It will panic if c's parent is not n.
|
||||||
|
func (n *Node) RemoveChild(c *Node) {
|
||||||
|
if c.Parent != n {
|
||||||
|
panic("html: RemoveChild called for a non-child Node")
|
||||||
|
}
|
||||||
|
if n.FirstChild == c {
|
||||||
|
n.FirstChild = c.NextSibling
|
||||||
|
}
|
||||||
|
if c.NextSibling != nil {
|
||||||
|
c.NextSibling.PrevSibling = c.PrevSibling
|
||||||
|
}
|
||||||
|
if n.LastChild == c {
|
||||||
|
n.LastChild = c.PrevSibling
|
||||||
|
}
|
||||||
|
if c.PrevSibling != nil {
|
||||||
|
c.PrevSibling.NextSibling = c.NextSibling
|
||||||
|
}
|
||||||
|
c.Parent = nil
|
||||||
|
c.PrevSibling = nil
|
||||||
|
c.NextSibling = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reparentChildren reparents all of src's child nodes to dst.
|
||||||
|
func reparentChildren(dst, src *Node) {
|
||||||
|
for {
|
||||||
|
child := src.FirstChild
|
||||||
|
if child == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
src.RemoveChild(child)
|
||||||
|
dst.AppendChild(child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// clone returns a new node with the same type, data and attributes.
|
||||||
|
// The clone has no parent, no siblings and no children.
|
||||||
|
func (n *Node) clone() *Node {
|
||||||
|
m := &Node{
|
||||||
|
Type: n.Type,
|
||||||
|
DataAtom: n.DataAtom,
|
||||||
|
Data: n.Data,
|
||||||
|
Attr: make([]Attribute, len(n.Attr)),
|
||||||
|
}
|
||||||
|
copy(m.Attr, n.Attr)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// nodeStack is a stack of nodes.
|
||||||
|
type nodeStack []*Node
|
||||||
|
|
||||||
|
// pop pops the stack. It will panic if s is empty.
|
||||||
|
func (s *nodeStack) pop() *Node {
|
||||||
|
i := len(*s)
|
||||||
|
n := (*s)[i-1]
|
||||||
|
*s = (*s)[:i-1]
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// top returns the most recently pushed node, or nil if s is empty.
|
||||||
|
func (s *nodeStack) top() *Node {
|
||||||
|
if i := len(*s); i > 0 {
|
||||||
|
return (*s)[i-1]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// index returns the index of the top-most occurrence of n in the stack, or -1
|
||||||
|
// if n is not present.
|
||||||
|
func (s *nodeStack) index(n *Node) int {
|
||||||
|
for i := len(*s) - 1; i >= 0; i-- {
|
||||||
|
if (*s)[i] == n {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// contains returns whether a is within s.
|
||||||
|
func (s *nodeStack) contains(a atom.Atom) bool {
|
||||||
|
for _, n := range *s {
|
||||||
|
if n.DataAtom == a && n.Namespace == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert inserts a node at the given index.
|
||||||
|
func (s *nodeStack) insert(i int, n *Node) {
|
||||||
|
(*s) = append(*s, nil)
|
||||||
|
copy((*s)[i+1:], (*s)[i:])
|
||||||
|
(*s)[i] = n
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove removes a node from the stack. It is a no-op if n is not present.
|
||||||
|
func (s *nodeStack) remove(n *Node) {
|
||||||
|
i := s.index(n)
|
||||||
|
if i == -1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
copy((*s)[i:], (*s)[i+1:])
|
||||||
|
j := len(*s) - 1
|
||||||
|
(*s)[j] = nil
|
||||||
|
*s = (*s)[:j]
|
||||||
|
}
|
||||||
|
|
||||||
|
type insertionModeStack []insertionMode
|
||||||
|
|
||||||
|
func (s *insertionModeStack) pop() (im insertionMode) {
|
||||||
|
i := len(*s)
|
||||||
|
im = (*s)[i-1]
|
||||||
|
*s = (*s)[:i-1]
|
||||||
|
return im
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *insertionModeStack) top() insertionMode {
|
||||||
|
if i := len(*s); i > 0 {
|
||||||
|
return (*s)[i-1]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
2417
vendor/golang.org/x/net/html/parse.go
generated
vendored
Normal file
2417
vendor/golang.org/x/net/html/parse.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
271
vendor/golang.org/x/net/html/render.go
generated
vendored
Normal file
271
vendor/golang.org/x/net/html/render.go
generated
vendored
Normal file
@ -0,0 +1,271 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package html
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type writer interface {
|
||||||
|
io.Writer
|
||||||
|
io.ByteWriter
|
||||||
|
WriteString(string) (int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render renders the parse tree n to the given writer.
|
||||||
|
//
|
||||||
|
// Rendering is done on a 'best effort' basis: calling Parse on the output of
|
||||||
|
// Render will always result in something similar to the original tree, but it
|
||||||
|
// is not necessarily an exact clone unless the original tree was 'well-formed'.
|
||||||
|
// 'Well-formed' is not easily specified; the HTML5 specification is
|
||||||
|
// complicated.
|
||||||
|
//
|
||||||
|
// Calling Parse on arbitrary input typically results in a 'well-formed' parse
|
||||||
|
// tree. However, it is possible for Parse to yield a 'badly-formed' parse tree.
|
||||||
|
// For example, in a 'well-formed' parse tree, no <a> element is a child of
|
||||||
|
// another <a> element: parsing "<a><a>" results in two sibling elements.
|
||||||
|
// Similarly, in a 'well-formed' parse tree, no <a> element is a child of a
|
||||||
|
// <table> element: parsing "<p><table><a>" results in a <p> with two sibling
|
||||||
|
// children; the <a> is reparented to the <table>'s parent. However, calling
|
||||||
|
// Parse on "<a><table><a>" does not return an error, but the result has an <a>
|
||||||
|
// element with an <a> child, and is therefore not 'well-formed'.
|
||||||
|
//
|
||||||
|
// Programmatically constructed trees are typically also 'well-formed', but it
|
||||||
|
// is possible to construct a tree that looks innocuous but, when rendered and
|
||||||
|
// re-parsed, results in a different tree. A simple example is that a solitary
|
||||||
|
// text node would become a tree containing <html>, <head> and <body> elements.
|
||||||
|
// Another example is that the programmatic equivalent of "a<head>b</head>c"
|
||||||
|
// becomes "<html><head><head/><body>abc</body></html>".
|
||||||
|
func Render(w io.Writer, n *Node) error {
|
||||||
|
if x, ok := w.(writer); ok {
|
||||||
|
return render(x, n)
|
||||||
|
}
|
||||||
|
buf := bufio.NewWriter(w)
|
||||||
|
if err := render(buf, n); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return buf.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// plaintextAbort is returned from render1 when a <plaintext> element
|
||||||
|
// has been rendered. No more end tags should be rendered after that.
|
||||||
|
var plaintextAbort = errors.New("html: internal error (plaintext abort)")
|
||||||
|
|
||||||
|
func render(w writer, n *Node) error {
|
||||||
|
err := render1(w, n)
|
||||||
|
if err == plaintextAbort {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func render1(w writer, n *Node) error {
|
||||||
|
// Render non-element nodes; these are the easy cases.
|
||||||
|
switch n.Type {
|
||||||
|
case ErrorNode:
|
||||||
|
return errors.New("html: cannot render an ErrorNode node")
|
||||||
|
case TextNode:
|
||||||
|
return escape(w, n.Data)
|
||||||
|
case DocumentNode:
|
||||||
|
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||||
|
if err := render1(w, c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case ElementNode:
|
||||||
|
// No-op.
|
||||||
|
case CommentNode:
|
||||||
|
if _, err := w.WriteString("<!--"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.WriteString(n.Data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.WriteString("-->"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case DoctypeNode:
|
||||||
|
if _, err := w.WriteString("<!DOCTYPE "); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.WriteString(n.Data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if n.Attr != nil {
|
||||||
|
var p, s string
|
||||||
|
for _, a := range n.Attr {
|
||||||
|
switch a.Key {
|
||||||
|
case "public":
|
||||||
|
p = a.Val
|
||||||
|
case "system":
|
||||||
|
s = a.Val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if p != "" {
|
||||||
|
if _, err := w.WriteString(" PUBLIC "); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := writeQuoted(w, p); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if s != "" {
|
||||||
|
if err := w.WriteByte(' '); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := writeQuoted(w, s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if s != "" {
|
||||||
|
if _, err := w.WriteString(" SYSTEM "); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := writeQuoted(w, s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return w.WriteByte('>')
|
||||||
|
default:
|
||||||
|
return errors.New("html: unknown node type")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render the <xxx> opening tag.
|
||||||
|
if err := w.WriteByte('<'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.WriteString(n.Data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, a := range n.Attr {
|
||||||
|
if err := w.WriteByte(' '); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if a.Namespace != "" {
|
||||||
|
if _, err := w.WriteString(a.Namespace); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.WriteByte(':'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, err := w.WriteString(a.Key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.WriteString(`="`); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := escape(w, a.Val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.WriteByte('"'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if voidElements[n.Data] {
|
||||||
|
if n.FirstChild != nil {
|
||||||
|
return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
|
||||||
|
}
|
||||||
|
_, err := w.WriteString("/>")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.WriteByte('>'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add initial newline where there is danger of a newline beging ignored.
|
||||||
|
if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
|
||||||
|
switch n.Data {
|
||||||
|
case "pre", "listing", "textarea":
|
||||||
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render any child nodes.
|
||||||
|
switch n.Data {
|
||||||
|
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
|
||||||
|
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||||
|
if c.Type == TextNode {
|
||||||
|
if _, err := w.WriteString(c.Data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := render1(w, c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n.Data == "plaintext" {
|
||||||
|
// Don't render anything else. <plaintext> must be the
|
||||||
|
// last element in the file, with no closing tag.
|
||||||
|
return plaintextAbort
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||||
|
if err := render1(w, c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render the </xxx> closing tag.
|
||||||
|
if _, err := w.WriteString("</"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.WriteString(n.Data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return w.WriteByte('>')
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeQuoted writes s to w surrounded by quotes. Normally it will use double
|
||||||
|
// quotes, but if s contains a double quote, it will use single quotes.
|
||||||
|
// It is used for writing the identifiers in a doctype declaration.
|
||||||
|
// In valid HTML, they can't contain both types of quotes.
|
||||||
|
func writeQuoted(w writer, s string) error {
|
||||||
|
var q byte = '"'
|
||||||
|
if strings.Contains(s, `"`) {
|
||||||
|
q = '\''
|
||||||
|
}
|
||||||
|
if err := w.WriteByte(q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.WriteString(s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.WriteByte(q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Section 12.1.2, "Elements", gives this list of void elements. Void elements
|
||||||
|
// are those that can't have any contents.
|
||||||
|
var voidElements = map[string]bool{
|
||||||
|
"area": true,
|
||||||
|
"base": true,
|
||||||
|
"br": true,
|
||||||
|
"col": true,
|
||||||
|
"command": true,
|
||||||
|
"embed": true,
|
||||||
|
"hr": true,
|
||||||
|
"img": true,
|
||||||
|
"input": true,
|
||||||
|
"keygen": true,
|
||||||
|
"link": true,
|
||||||
|
"meta": true,
|
||||||
|
"param": true,
|
||||||
|
"source": true,
|
||||||
|
"track": true,
|
||||||
|
"wbr": true,
|
||||||
|
}
|
1219
vendor/golang.org/x/net/html/token.go
generated
vendored
Normal file
1219
vendor/golang.org/x/net/html/token.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3
vendor/golang.org/x/text/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/text/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# This source code refers to The Go Authors for copyright purposes.
|
||||||
|
# The master list of authors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/text/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/text/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# This source code was written by the Go contributors.
|
||||||
|
# The master list of contributors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/text/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/text/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/text/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/text/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
Additional IP Rights Grant (Patents)
|
||||||
|
|
||||||
|
"This implementation" means the copyrightable works distributed by
|
||||||
|
Google as part of the Go project.
|
||||||
|
|
||||||
|
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||||
|
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||||
|
patent license to make, have made, use, offer to sell, sell, import,
|
||||||
|
transfer and otherwise run, modify and propagate the contents of this
|
||||||
|
implementation of Go, where such license applies only to those patent
|
||||||
|
claims, both currently owned or controlled by Google and acquired in
|
||||||
|
the future, licensable by Google that are necessarily infringed by this
|
||||||
|
implementation of Go. This grant does not include claims that would be
|
||||||
|
infringed only as a consequence of further modification of this
|
||||||
|
implementation. If you or your agent or exclusive licensee institute or
|
||||||
|
order or agree to the institution of patent litigation against any
|
||||||
|
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||||
|
that this implementation of Go or any code incorporated within this
|
||||||
|
implementation of Go constitutes direct or contributory patent
|
||||||
|
infringement, or inducement of patent infringement, then any patent
|
||||||
|
rights granted to you under this License for this implementation of Go
|
||||||
|
shall terminate as of the date such litigation is filed.
|
249
vendor/golang.org/x/text/encoding/charmap/charmap.go
generated
vendored
Normal file
249
vendor/golang.org/x/text/encoding/charmap/charmap.go
generated
vendored
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:generate go run maketables.go
|
||||||
|
|
||||||
|
// Package charmap provides simple character encodings such as IBM Code Page 437
|
||||||
|
// and Windows 1252.
|
||||||
|
package charmap // import "golang.org/x/text/encoding/charmap"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/internal"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// These encodings vary only in the way clients should interpret them. Their
|
||||||
|
// coded character set is identical and a single implementation can be shared.
|
||||||
|
var (
|
||||||
|
// ISO8859_6E is the ISO 8859-6E encoding.
|
||||||
|
ISO8859_6E encoding.Encoding = &iso8859_6E
|
||||||
|
|
||||||
|
// ISO8859_6I is the ISO 8859-6I encoding.
|
||||||
|
ISO8859_6I encoding.Encoding = &iso8859_6I
|
||||||
|
|
||||||
|
// ISO8859_8E is the ISO 8859-8E encoding.
|
||||||
|
ISO8859_8E encoding.Encoding = &iso8859_8E
|
||||||
|
|
||||||
|
// ISO8859_8I is the ISO 8859-8I encoding.
|
||||||
|
ISO8859_8I encoding.Encoding = &iso8859_8I
|
||||||
|
|
||||||
|
iso8859_6E = internal.Encoding{
|
||||||
|
Encoding: ISO8859_6,
|
||||||
|
Name: "ISO-8859-6E",
|
||||||
|
MIB: identifier.ISO88596E,
|
||||||
|
}
|
||||||
|
|
||||||
|
iso8859_6I = internal.Encoding{
|
||||||
|
Encoding: ISO8859_6,
|
||||||
|
Name: "ISO-8859-6I",
|
||||||
|
MIB: identifier.ISO88596I,
|
||||||
|
}
|
||||||
|
|
||||||
|
iso8859_8E = internal.Encoding{
|
||||||
|
Encoding: ISO8859_8,
|
||||||
|
Name: "ISO-8859-8E",
|
||||||
|
MIB: identifier.ISO88598E,
|
||||||
|
}
|
||||||
|
|
||||||
|
iso8859_8I = internal.Encoding{
|
||||||
|
Encoding: ISO8859_8,
|
||||||
|
Name: "ISO-8859-8I",
|
||||||
|
MIB: identifier.ISO88598I,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// All is a list of all defined encodings in this package.
|
||||||
|
var All []encoding.Encoding = listAll
|
||||||
|
|
||||||
|
// TODO: implement these encodings, in order of importance.
|
||||||
|
// ASCII, ISO8859_1: Rather common. Close to Windows 1252.
|
||||||
|
// ISO8859_9: Close to Windows 1254.
|
||||||
|
|
||||||
|
// utf8Enc holds a rune's UTF-8 encoding in data[:len].
|
||||||
|
type utf8Enc struct {
|
||||||
|
len uint8
|
||||||
|
data [3]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Charmap is an 8-bit character set encoding.
|
||||||
|
type Charmap struct {
|
||||||
|
// name is the encoding's name.
|
||||||
|
name string
|
||||||
|
// mib is the encoding type of this encoder.
|
||||||
|
mib identifier.MIB
|
||||||
|
// asciiSuperset states whether the encoding is a superset of ASCII.
|
||||||
|
asciiSuperset bool
|
||||||
|
// low is the lower bound of the encoded byte for a non-ASCII rune. If
|
||||||
|
// Charmap.asciiSuperset is true then this will be 0x80, otherwise 0x00.
|
||||||
|
low uint8
|
||||||
|
// replacement is the encoded replacement character.
|
||||||
|
replacement byte
|
||||||
|
// decode is the map from encoded byte to UTF-8.
|
||||||
|
decode [256]utf8Enc
|
||||||
|
// encoding is the map from runes to encoded bytes. Each entry is a
|
||||||
|
// uint32: the high 8 bits are the encoded byte and the low 24 bits are
|
||||||
|
// the rune. The table entries are sorted by ascending rune.
|
||||||
|
encode [256]uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder implements the encoding.Encoding interface.
|
||||||
|
func (m *Charmap) NewDecoder() *encoding.Decoder {
|
||||||
|
return &encoding.Decoder{Transformer: charmapDecoder{charmap: m}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder implements the encoding.Encoding interface.
|
||||||
|
func (m *Charmap) NewEncoder() *encoding.Encoder {
|
||||||
|
return &encoding.Encoder{Transformer: charmapEncoder{charmap: m}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the Charmap's name.
|
||||||
|
func (m *Charmap) String() string {
|
||||||
|
return m.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID implements an internal interface.
|
||||||
|
func (m *Charmap) ID() (mib identifier.MIB, other string) {
|
||||||
|
return m.mib, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// charmapDecoder implements transform.Transformer by decoding to UTF-8.
|
||||||
|
type charmapDecoder struct {
|
||||||
|
transform.NopResetter
|
||||||
|
charmap *Charmap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m charmapDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
for i, c := range src {
|
||||||
|
if m.charmap.asciiSuperset && c < utf8.RuneSelf {
|
||||||
|
if nDst >= len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst] = c
|
||||||
|
nDst++
|
||||||
|
nSrc = i + 1
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
decode := &m.charmap.decode[c]
|
||||||
|
n := int(decode.len)
|
||||||
|
if nDst+n > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// It's 15% faster to avoid calling copy for these tiny slices.
|
||||||
|
for j := 0; j < n; j++ {
|
||||||
|
dst[nDst] = decode.data[j]
|
||||||
|
nDst++
|
||||||
|
}
|
||||||
|
nSrc = i + 1
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeByte returns the Charmap's rune decoding of the byte b.
|
||||||
|
func (m *Charmap) DecodeByte(b byte) rune {
|
||||||
|
switch x := &m.decode[b]; x.len {
|
||||||
|
case 1:
|
||||||
|
return rune(x.data[0])
|
||||||
|
case 2:
|
||||||
|
return rune(x.data[0]&0x1f)<<6 | rune(x.data[1]&0x3f)
|
||||||
|
default:
|
||||||
|
return rune(x.data[0]&0x0f)<<12 | rune(x.data[1]&0x3f)<<6 | rune(x.data[2]&0x3f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// charmapEncoder implements transform.Transformer by encoding from UTF-8.
|
||||||
|
type charmapEncoder struct {
|
||||||
|
transform.NopResetter
|
||||||
|
charmap *Charmap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m charmapEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
loop:
|
||||||
|
for nSrc < len(src) {
|
||||||
|
if nDst >= len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
r = rune(src[nSrc])
|
||||||
|
|
||||||
|
// Decode a 1-byte rune.
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
if m.charmap.asciiSuperset {
|
||||||
|
nSrc++
|
||||||
|
dst[nDst] = uint8(r)
|
||||||
|
nDst++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
size = 1
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Decode a multi-byte rune.
|
||||||
|
r, size = utf8.DecodeRune(src[nSrc:])
|
||||||
|
if size == 1 {
|
||||||
|
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||||
|
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||||
|
// full character yet.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
} else {
|
||||||
|
err = internal.RepertoireError(m.charmap.replacement)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Binary search in [low, high) for that rune in the m.charmap.encode table.
|
||||||
|
for low, high := int(m.charmap.low), 0x100; ; {
|
||||||
|
if low >= high {
|
||||||
|
err = internal.RepertoireError(m.charmap.replacement)
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
mid := (low + high) / 2
|
||||||
|
got := m.charmap.encode[mid]
|
||||||
|
gotRune := rune(got & (1<<24 - 1))
|
||||||
|
if gotRune < r {
|
||||||
|
low = mid + 1
|
||||||
|
} else if gotRune > r {
|
||||||
|
high = mid
|
||||||
|
} else {
|
||||||
|
dst[nDst] = byte(got >> 24)
|
||||||
|
nDst++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nSrc += size
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeRune returns the Charmap's byte encoding of the rune r. ok is whether
|
||||||
|
// r is in the Charmap's repertoire. If not, b is set to the Charmap's
|
||||||
|
// replacement byte. This is often the ASCII substitute character '\x1a'.
|
||||||
|
func (m *Charmap) EncodeRune(r rune) (b byte, ok bool) {
|
||||||
|
if r < utf8.RuneSelf && m.asciiSuperset {
|
||||||
|
return byte(r), true
|
||||||
|
}
|
||||||
|
for low, high := int(m.low), 0x100; ; {
|
||||||
|
if low >= high {
|
||||||
|
return m.replacement, false
|
||||||
|
}
|
||||||
|
mid := (low + high) / 2
|
||||||
|
got := m.encode[mid]
|
||||||
|
gotRune := rune(got & (1<<24 - 1))
|
||||||
|
if gotRune < r {
|
||||||
|
low = mid + 1
|
||||||
|
} else if gotRune > r {
|
||||||
|
high = mid
|
||||||
|
} else {
|
||||||
|
return byte(got >> 24), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
556
vendor/golang.org/x/text/encoding/charmap/maketables.go
generated
vendored
Normal file
556
vendor/golang.org/x/text/encoding/charmap/maketables.go
generated
vendored
Normal file
@ -0,0 +1,556 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/internal/gen"
|
||||||
|
)
|
||||||
|
|
||||||
|
const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" +
|
||||||
|
"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
|
||||||
|
` !"#$%&'()*+,-./0123456789:;<=>?` +
|
||||||
|
`@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` +
|
||||||
|
"`abcdefghijklmnopqrstuvwxyz{|}~\u007f"
|
||||||
|
|
||||||
|
var encodings = []struct {
|
||||||
|
name string
|
||||||
|
mib string
|
||||||
|
comment string
|
||||||
|
varName string
|
||||||
|
replacement byte
|
||||||
|
mapping string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"IBM Code Page 037",
|
||||||
|
"IBM037",
|
||||||
|
"",
|
||||||
|
"CodePage037",
|
||||||
|
0x3f,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM037-2.1.2.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IBM Code Page 437",
|
||||||
|
"PC8CodePage437",
|
||||||
|
"",
|
||||||
|
"CodePage437",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IBM Code Page 850",
|
||||||
|
"PC850Multilingual",
|
||||||
|
"",
|
||||||
|
"CodePage850",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IBM Code Page 852",
|
||||||
|
"PCp852",
|
||||||
|
"",
|
||||||
|
"CodePage852",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IBM Code Page 855",
|
||||||
|
"IBM855",
|
||||||
|
"",
|
||||||
|
"CodePage855",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Windows Code Page 858", // PC latin1 with Euro
|
||||||
|
"IBM00858",
|
||||||
|
"",
|
||||||
|
"CodePage858",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IBM Code Page 860",
|
||||||
|
"IBM860",
|
||||||
|
"",
|
||||||
|
"CodePage860",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM860-2.1.2.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IBM Code Page 862",
|
||||||
|
"PC862LatinHebrew",
|
||||||
|
"",
|
||||||
|
"CodePage862",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IBM Code Page 863",
|
||||||
|
"IBM863",
|
||||||
|
"",
|
||||||
|
"CodePage863",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM863-2.1.2.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IBM Code Page 865",
|
||||||
|
"IBM865",
|
||||||
|
"",
|
||||||
|
"CodePage865",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM865-2.1.2.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IBM Code Page 866",
|
||||||
|
"IBM866",
|
||||||
|
"",
|
||||||
|
"CodePage866",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-ibm866.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IBM Code Page 1047",
|
||||||
|
"IBM1047",
|
||||||
|
"",
|
||||||
|
"CodePage1047",
|
||||||
|
0x3f,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM1047-2.1.2.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IBM Code Page 1140",
|
||||||
|
"IBM01140",
|
||||||
|
"",
|
||||||
|
"CodePage1140",
|
||||||
|
0x3f,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/ibm-1140_P100-1997.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-1",
|
||||||
|
"ISOLatin1",
|
||||||
|
"",
|
||||||
|
"ISO8859_1",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-2",
|
||||||
|
"ISOLatin2",
|
||||||
|
"",
|
||||||
|
"ISO8859_2",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-iso-8859-2.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-3",
|
||||||
|
"ISOLatin3",
|
||||||
|
"",
|
||||||
|
"ISO8859_3",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-iso-8859-3.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-4",
|
||||||
|
"ISOLatin4",
|
||||||
|
"",
|
||||||
|
"ISO8859_4",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-iso-8859-4.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-5",
|
||||||
|
"ISOLatinCyrillic",
|
||||||
|
"",
|
||||||
|
"ISO8859_5",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-iso-8859-5.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-6",
|
||||||
|
"ISOLatinArabic",
|
||||||
|
"",
|
||||||
|
"ISO8859_6,ISO8859_6E,ISO8859_6I",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-iso-8859-6.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-7",
|
||||||
|
"ISOLatinGreek",
|
||||||
|
"",
|
||||||
|
"ISO8859_7",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-iso-8859-7.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-8",
|
||||||
|
"ISOLatinHebrew",
|
||||||
|
"",
|
||||||
|
"ISO8859_8,ISO8859_8E,ISO8859_8I",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-iso-8859-8.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-9",
|
||||||
|
"ISOLatin5",
|
||||||
|
"",
|
||||||
|
"ISO8859_9",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_9-1999.ucm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-10",
|
||||||
|
"ISOLatin6",
|
||||||
|
"",
|
||||||
|
"ISO8859_10",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-iso-8859-10.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-13",
|
||||||
|
"ISO885913",
|
||||||
|
"",
|
||||||
|
"ISO8859_13",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-iso-8859-13.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-14",
|
||||||
|
"ISO885914",
|
||||||
|
"",
|
||||||
|
"ISO8859_14",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-iso-8859-14.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-15",
|
||||||
|
"ISO885915",
|
||||||
|
"",
|
||||||
|
"ISO8859_15",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-iso-8859-15.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ISO 8859-16",
|
||||||
|
"ISO885916",
|
||||||
|
"",
|
||||||
|
"ISO8859_16",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-iso-8859-16.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"KOI8-R",
|
||||||
|
"KOI8R",
|
||||||
|
"",
|
||||||
|
"KOI8R",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-koi8-r.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"KOI8-U",
|
||||||
|
"KOI8U",
|
||||||
|
"",
|
||||||
|
"KOI8U",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-koi8-u.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Macintosh",
|
||||||
|
"Macintosh",
|
||||||
|
"",
|
||||||
|
"Macintosh",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-macintosh.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Macintosh Cyrillic",
|
||||||
|
"MacintoshCyrillic",
|
||||||
|
"",
|
||||||
|
"MacintoshCyrillic",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Windows 874",
|
||||||
|
"Windows874",
|
||||||
|
"",
|
||||||
|
"Windows874",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-windows-874.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Windows 1250",
|
||||||
|
"Windows1250",
|
||||||
|
"",
|
||||||
|
"Windows1250",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-windows-1250.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Windows 1251",
|
||||||
|
"Windows1251",
|
||||||
|
"",
|
||||||
|
"Windows1251",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-windows-1251.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Windows 1252",
|
||||||
|
"Windows1252",
|
||||||
|
"",
|
||||||
|
"Windows1252",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-windows-1252.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Windows 1253",
|
||||||
|
"Windows1253",
|
||||||
|
"",
|
||||||
|
"Windows1253",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-windows-1253.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Windows 1254",
|
||||||
|
"Windows1254",
|
||||||
|
"",
|
||||||
|
"Windows1254",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-windows-1254.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Windows 1255",
|
||||||
|
"Windows1255",
|
||||||
|
"",
|
||||||
|
"Windows1255",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-windows-1255.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Windows 1256",
|
||||||
|
"Windows1256",
|
||||||
|
"",
|
||||||
|
"Windows1256",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-windows-1256.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Windows 1257",
|
||||||
|
"Windows1257",
|
||||||
|
"",
|
||||||
|
"Windows1257",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-windows-1257.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Windows 1258",
|
||||||
|
"Windows1258",
|
||||||
|
"",
|
||||||
|
"Windows1258",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
"http://encoding.spec.whatwg.org/index-windows-1258.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"X-User-Defined",
|
||||||
|
"XUserDefined",
|
||||||
|
"It is defined at http://encoding.spec.whatwg.org/#x-user-defined",
|
||||||
|
"XUserDefined",
|
||||||
|
encoding.ASCIISub,
|
||||||
|
ascii +
|
||||||
|
"\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" +
|
||||||
|
"\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" +
|
||||||
|
"\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" +
|
||||||
|
"\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" +
|
||||||
|
"\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" +
|
||||||
|
"\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" +
|
||||||
|
"\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" +
|
||||||
|
"\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" +
|
||||||
|
"\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" +
|
||||||
|
"\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" +
|
||||||
|
"\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" +
|
||||||
|
"\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" +
|
||||||
|
"\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" +
|
||||||
|
"\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" +
|
||||||
|
"\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" +
|
||||||
|
"\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func getWHATWG(url string) string {
|
||||||
|
res, err := http.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("%q: Get: %v", url, err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
mapping := make([]rune, 128)
|
||||||
|
for i := range mapping {
|
||||||
|
mapping[i] = '\ufffd'
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(res.Body)
|
||||||
|
for scanner.Scan() {
|
||||||
|
s := strings.TrimSpace(scanner.Text())
|
||||||
|
if s == "" || s[0] == '#' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
x, y := 0, 0
|
||||||
|
if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil {
|
||||||
|
log.Fatalf("could not parse %q", s)
|
||||||
|
}
|
||||||
|
if x < 0 || 128 <= x {
|
||||||
|
log.Fatalf("code %d is out of range", x)
|
||||||
|
}
|
||||||
|
if 0x80 <= y && y < 0xa0 {
|
||||||
|
// We diverge from the WHATWG spec by mapping control characters
|
||||||
|
// in the range [0x80, 0xa0) to U+FFFD.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mapping[x] = rune(y)
|
||||||
|
}
|
||||||
|
return ascii + string(mapping)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getUCM(url string) string {
|
||||||
|
res, err := http.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("%q: Get: %v", url, err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
mapping := make([]rune, 256)
|
||||||
|
for i := range mapping {
|
||||||
|
mapping[i] = '\ufffd'
|
||||||
|
}
|
||||||
|
|
||||||
|
charsFound := 0
|
||||||
|
scanner := bufio.NewScanner(res.Body)
|
||||||
|
for scanner.Scan() {
|
||||||
|
s := strings.TrimSpace(scanner.Text())
|
||||||
|
if s == "" || s[0] == '#' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var c byte
|
||||||
|
var r rune
|
||||||
|
if _, err := fmt.Sscanf(s, `<U%x> \x%x |0`, &r, &c); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mapping[c] = r
|
||||||
|
charsFound++
|
||||||
|
}
|
||||||
|
|
||||||
|
if charsFound < 200 {
|
||||||
|
log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(mapping)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
mibs := map[string]bool{}
|
||||||
|
all := []string{}
|
||||||
|
|
||||||
|
w := gen.NewCodeWriter()
|
||||||
|
defer w.WriteGoFile("tables.go", "charmap")
|
||||||
|
|
||||||
|
printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) }
|
||||||
|
|
||||||
|
printf("import (\n")
|
||||||
|
printf("\t\"golang.org/x/text/encoding\"\n")
|
||||||
|
printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n")
|
||||||
|
printf(")\n\n")
|
||||||
|
for _, e := range encodings {
|
||||||
|
varNames := strings.Split(e.varName, ",")
|
||||||
|
all = append(all, varNames...)
|
||||||
|
varName := varNames[0]
|
||||||
|
switch {
|
||||||
|
case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"):
|
||||||
|
e.mapping = getWHATWG(e.mapping)
|
||||||
|
case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"):
|
||||||
|
e.mapping = getUCM(e.mapping)
|
||||||
|
}
|
||||||
|
|
||||||
|
asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00
|
||||||
|
if asciiSuperset {
|
||||||
|
low = 0x80
|
||||||
|
}
|
||||||
|
lvn := 1
|
||||||
|
if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") {
|
||||||
|
lvn = 3
|
||||||
|
}
|
||||||
|
lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:]
|
||||||
|
printf("// %s is the %s encoding.\n", varName, e.name)
|
||||||
|
if e.comment != "" {
|
||||||
|
printf("//\n// %s\n", e.comment)
|
||||||
|
}
|
||||||
|
printf("var %s *Charmap = &%s\n\nvar %s = Charmap{\nname: %q,\n",
|
||||||
|
varName, lowerVarName, lowerVarName, e.name)
|
||||||
|
if mibs[e.mib] {
|
||||||
|
log.Fatalf("MIB type %q declared multiple times.", e.mib)
|
||||||
|
}
|
||||||
|
printf("mib: identifier.%s,\n", e.mib)
|
||||||
|
printf("asciiSuperset: %t,\n", asciiSuperset)
|
||||||
|
printf("low: 0x%02x,\n", low)
|
||||||
|
printf("replacement: 0x%02x,\n", e.replacement)
|
||||||
|
|
||||||
|
printf("decode: [256]utf8Enc{\n")
|
||||||
|
i, backMapping := 0, map[rune]byte{}
|
||||||
|
for _, c := range e.mapping {
|
||||||
|
if _, ok := backMapping[c]; !ok && c != utf8.RuneError {
|
||||||
|
backMapping[c] = byte(i)
|
||||||
|
}
|
||||||
|
var buf [8]byte
|
||||||
|
n := utf8.EncodeRune(buf[:], c)
|
||||||
|
if n > 3 {
|
||||||
|
panic(fmt.Sprintf("rune %q (%U) is too long", c, c))
|
||||||
|
}
|
||||||
|
printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2])
|
||||||
|
if i%2 == 1 {
|
||||||
|
printf("\n")
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
printf("},\n")
|
||||||
|
|
||||||
|
printf("encode: [256]uint32{\n")
|
||||||
|
encode := make([]uint32, 0, 256)
|
||||||
|
for c, i := range backMapping {
|
||||||
|
encode = append(encode, uint32(i)<<24|uint32(c))
|
||||||
|
}
|
||||||
|
sort.Sort(byRune(encode))
|
||||||
|
for len(encode) < cap(encode) {
|
||||||
|
encode = append(encode, encode[len(encode)-1])
|
||||||
|
}
|
||||||
|
for i, enc := range encode {
|
||||||
|
printf("0x%08x,", enc)
|
||||||
|
if i%8 == 7 {
|
||||||
|
printf("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printf("},\n}\n")
|
||||||
|
|
||||||
|
// Add an estimate of the size of a single Charmap{} struct value, which
|
||||||
|
// includes two 256 elem arrays of 4 bytes and some extra fields, which
|
||||||
|
// align to 3 uint64s on 64-bit architectures.
|
||||||
|
w.Size += 2*4*256 + 3*8
|
||||||
|
}
|
||||||
|
// TODO: add proper line breaking.
|
||||||
|
printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
type byRune []uint32
|
||||||
|
|
||||||
|
func (b byRune) Len() int { return len(b) }
|
||||||
|
func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff }
|
||||||
|
func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
7410
vendor/golang.org/x/text/encoding/charmap/tables.go
generated
vendored
Normal file
7410
vendor/golang.org/x/text/encoding/charmap/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
335
vendor/golang.org/x/text/encoding/encoding.go
generated
vendored
Normal file
335
vendor/golang.org/x/text/encoding/encoding.go
generated
vendored
Normal file
@ -0,0 +1,335 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package encoding defines an interface for character encodings, such as Shift
|
||||||
|
// JIS and Windows 1252, that can convert to and from UTF-8.
|
||||||
|
//
|
||||||
|
// Encoding implementations are provided in other packages, such as
|
||||||
|
// golang.org/x/text/encoding/charmap and
|
||||||
|
// golang.org/x/text/encoding/japanese.
|
||||||
|
package encoding // import "golang.org/x/text/encoding"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO:
|
||||||
|
// - There seems to be some inconsistency in when decoders return errors
|
||||||
|
// and when not. Also documentation seems to suggest they shouldn't return
|
||||||
|
// errors at all (except for UTF-16).
|
||||||
|
// - Encoders seem to rely on or at least benefit from the input being in NFC
|
||||||
|
// normal form. Perhaps add an example how users could prepare their output.
|
||||||
|
|
||||||
|
// Encoding is a character set encoding that can be transformed to and from
|
||||||
|
// UTF-8.
|
||||||
|
type Encoding interface {
|
||||||
|
// NewDecoder returns a Decoder.
|
||||||
|
NewDecoder() *Decoder
|
||||||
|
|
||||||
|
// NewEncoder returns an Encoder.
|
||||||
|
NewEncoder() *Encoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Decoder converts bytes to UTF-8. It implements transform.Transformer.
|
||||||
|
//
|
||||||
|
// Transforming source bytes that are not of that encoding will not result in an
|
||||||
|
// error per se. Each byte that cannot be transcoded will be represented in the
|
||||||
|
// output by the UTF-8 encoding of '\uFFFD', the replacement rune.
|
||||||
|
type Decoder struct {
|
||||||
|
transform.Transformer
|
||||||
|
|
||||||
|
// This forces external creators of Decoders to use names in struct
|
||||||
|
// initializers, allowing for future extendibility without having to break
|
||||||
|
// code.
|
||||||
|
_ struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes converts the given encoded bytes to UTF-8. It returns the converted
|
||||||
|
// bytes or nil, err if any error occurred.
|
||||||
|
func (d *Decoder) Bytes(b []byte) ([]byte, error) {
|
||||||
|
b, _, err := transform.Bytes(d, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String converts the given encoded string to UTF-8. It returns the converted
|
||||||
|
// string or "", err if any error occurred.
|
||||||
|
func (d *Decoder) String(s string) (string, error) {
|
||||||
|
s, _, err := transform.String(d, s)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reader wraps another Reader to decode its bytes.
|
||||||
|
//
|
||||||
|
// The Decoder may not be used for any other operation as long as the returned
|
||||||
|
// Reader is in use.
|
||||||
|
func (d *Decoder) Reader(r io.Reader) io.Reader {
|
||||||
|
return transform.NewReader(r, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Encoder converts bytes from UTF-8. It implements transform.Transformer.
|
||||||
|
//
|
||||||
|
// Each rune that cannot be transcoded will result in an error. In this case,
|
||||||
|
// the transform will consume all source byte up to, not including the offending
|
||||||
|
// rune. Transforming source bytes that are not valid UTF-8 will be replaced by
|
||||||
|
// `\uFFFD`. To return early with an error instead, use transform.Chain to
|
||||||
|
// preprocess the data with a UTF8Validator.
|
||||||
|
type Encoder struct {
|
||||||
|
transform.Transformer
|
||||||
|
|
||||||
|
// This forces external creators of Encoders to use names in struct
|
||||||
|
// initializers, allowing for future extendibility without having to break
|
||||||
|
// code.
|
||||||
|
_ struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if
|
||||||
|
// any error occurred.
|
||||||
|
func (e *Encoder) Bytes(b []byte) ([]byte, error) {
|
||||||
|
b, _, err := transform.Bytes(e, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String converts a string from UTF-8. It returns the converted string or
|
||||||
|
// "", err if any error occurred.
|
||||||
|
func (e *Encoder) String(s string) (string, error) {
|
||||||
|
s, _, err := transform.String(e, s)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer wraps another Writer to encode its UTF-8 output.
|
||||||
|
//
|
||||||
|
// The Encoder may not be used for any other operation as long as the returned
|
||||||
|
// Writer is in use.
|
||||||
|
func (e *Encoder) Writer(w io.Writer) io.Writer {
|
||||||
|
return transform.NewWriter(w, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ASCIISub is the ASCII substitute character, as recommended by
|
||||||
|
// http://unicode.org/reports/tr36/#Text_Comparison
|
||||||
|
const ASCIISub = '\x1a'
|
||||||
|
|
||||||
|
// Nop is the nop encoding. Its transformed bytes are the same as the source
|
||||||
|
// bytes; it does not replace invalid UTF-8 sequences.
|
||||||
|
var Nop Encoding = nop{}
|
||||||
|
|
||||||
|
type nop struct{}
|
||||||
|
|
||||||
|
func (nop) NewDecoder() *Decoder {
|
||||||
|
return &Decoder{Transformer: transform.Nop}
|
||||||
|
}
|
||||||
|
func (nop) NewEncoder() *Encoder {
|
||||||
|
return &Encoder{Transformer: transform.Nop}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replacement is the replacement encoding. Decoding from the replacement
|
||||||
|
// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to
|
||||||
|
// the replacement encoding yields the same as the source bytes except that
|
||||||
|
// invalid UTF-8 is converted to '\uFFFD'.
|
||||||
|
//
|
||||||
|
// It is defined at http://encoding.spec.whatwg.org/#replacement
|
||||||
|
var Replacement Encoding = replacement{}
|
||||||
|
|
||||||
|
type replacement struct{}
|
||||||
|
|
||||||
|
func (replacement) NewDecoder() *Decoder {
|
||||||
|
return &Decoder{Transformer: replacementDecoder{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (replacement) NewEncoder() *Encoder {
|
||||||
|
return &Encoder{Transformer: replacementEncoder{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (replacement) ID() (mib identifier.MIB, other string) {
|
||||||
|
return identifier.Replacement, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type replacementDecoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
if len(dst) < 3 {
|
||||||
|
return 0, 0, transform.ErrShortDst
|
||||||
|
}
|
||||||
|
if atEOF {
|
||||||
|
const fffd = "\ufffd"
|
||||||
|
dst[0] = fffd[0]
|
||||||
|
dst[1] = fffd[1]
|
||||||
|
dst[2] = fffd[2]
|
||||||
|
nDst = 3
|
||||||
|
}
|
||||||
|
return nDst, len(src), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type replacementEncoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
r = rune(src[nSrc])
|
||||||
|
|
||||||
|
// Decode a 1-byte rune.
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Decode a multi-byte rune.
|
||||||
|
r, size = utf8.DecodeRune(src[nSrc:])
|
||||||
|
if size == 1 {
|
||||||
|
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||||
|
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||||
|
// full character yet.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
r = '\ufffd'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTMLEscapeUnsupported wraps encoders to replace source runes outside the
|
||||||
|
// repertoire of the destination encoding with HTML escape sequences.
|
||||||
|
//
|
||||||
|
// This wrapper exists to comply to URL and HTML forms requiring a
|
||||||
|
// non-terminating legacy encoder. The produced sequences may lead to data
|
||||||
|
// loss as they are indistinguishable from legitimate input. To avoid this
|
||||||
|
// issue, use UTF-8 encodings whenever possible.
|
||||||
|
func HTMLEscapeUnsupported(e *Encoder) *Encoder {
|
||||||
|
return &Encoder{Transformer: &errorHandler{e, errorToHTML}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceUnsupported wraps encoders to replace source runes outside the
|
||||||
|
// repertoire of the destination encoding with an encoding-specific
|
||||||
|
// replacement.
|
||||||
|
//
|
||||||
|
// This wrapper is only provided for backwards compatibility and legacy
|
||||||
|
// handling. Its use is strongly discouraged. Use UTF-8 whenever possible.
|
||||||
|
func ReplaceUnsupported(e *Encoder) *Encoder {
|
||||||
|
return &Encoder{Transformer: &errorHandler{e, errorToReplacement}}
|
||||||
|
}
|
||||||
|
|
||||||
|
type errorHandler struct {
|
||||||
|
*Encoder
|
||||||
|
handler func(dst []byte, r rune, err repertoireError) (n int, ok bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: consider making this error public in some form.
|
||||||
|
type repertoireError interface {
|
||||||
|
Replacement() byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF)
|
||||||
|
for err != nil {
|
||||||
|
rerr, ok := err.(repertoireError)
|
||||||
|
if !ok {
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
r, sz := utf8.DecodeRune(src[nSrc:])
|
||||||
|
n, ok := h.handler(dst[nDst:], r, rerr)
|
||||||
|
if !ok {
|
||||||
|
return nDst, nSrc, transform.ErrShortDst
|
||||||
|
}
|
||||||
|
err = nil
|
||||||
|
nDst += n
|
||||||
|
if nSrc += sz; nSrc < len(src) {
|
||||||
|
var dn, sn int
|
||||||
|
dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF)
|
||||||
|
nDst += dn
|
||||||
|
nSrc += sn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) {
|
||||||
|
buf := [8]byte{}
|
||||||
|
b := strconv.AppendUint(buf[:0], uint64(r), 10)
|
||||||
|
if n = len(b) + len("&#;"); n >= len(dst) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
dst[0] = '&'
|
||||||
|
dst[1] = '#'
|
||||||
|
dst[copy(dst[2:], b)+2] = ';'
|
||||||
|
return n, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) {
|
||||||
|
if len(dst) == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
dst[0] = err.Replacement()
|
||||||
|
return 1, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8.
|
||||||
|
var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8")
|
||||||
|
|
||||||
|
// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first
|
||||||
|
// input byte that is not valid UTF-8.
|
||||||
|
var UTF8Validator transform.Transformer = utf8Validator{}
|
||||||
|
|
||||||
|
type utf8Validator struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
n := len(src)
|
||||||
|
if n > len(dst) {
|
||||||
|
n = len(dst)
|
||||||
|
}
|
||||||
|
for i := 0; i < n; {
|
||||||
|
if c := src[i]; c < utf8.RuneSelf {
|
||||||
|
dst[i] = c
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_, size := utf8.DecodeRune(src[i:])
|
||||||
|
if size == 1 {
|
||||||
|
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||||
|
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||||
|
// full character yet.
|
||||||
|
err = ErrInvalidUTF8
|
||||||
|
if !atEOF && !utf8.FullRune(src[i:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
return i, i, err
|
||||||
|
}
|
||||||
|
if i+size > len(dst) {
|
||||||
|
return i, i, transform.ErrShortDst
|
||||||
|
}
|
||||||
|
for ; size > 0; size-- {
|
||||||
|
dst[i] = src[i]
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(src) > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
}
|
||||||
|
return n, n, err
|
||||||
|
}
|
173
vendor/golang.org/x/text/encoding/htmlindex/gen.go
generated
vendored
Normal file
173
vendor/golang.org/x/text/encoding/htmlindex/gen.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/gen"
|
||||||
|
)
|
||||||
|
|
||||||
|
type group struct {
|
||||||
|
Encodings []struct {
|
||||||
|
Labels []string
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
gen.Init()
|
||||||
|
|
||||||
|
r := gen.Open("https://encoding.spec.whatwg.org", "whatwg", "encodings.json")
|
||||||
|
var groups []group
|
||||||
|
if err := json.NewDecoder(r).Decode(&groups); err != nil {
|
||||||
|
log.Fatalf("Error reading encodings.json: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w := &bytes.Buffer{}
|
||||||
|
fmt.Fprintln(w, "type htmlEncoding byte")
|
||||||
|
fmt.Fprintln(w, "const (")
|
||||||
|
for i, g := range groups {
|
||||||
|
for _, e := range g.Encodings {
|
||||||
|
key := strings.ToLower(e.Name)
|
||||||
|
name := consts[key]
|
||||||
|
if name == "" {
|
||||||
|
log.Fatalf("No const defined for %s.", key)
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
fmt.Fprintf(w, "%s htmlEncoding = iota\n", name)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(w, "%s\n", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintln(w, "numEncodings")
|
||||||
|
fmt.Fprint(w, ")\n\n")
|
||||||
|
|
||||||
|
fmt.Fprintln(w, "var canonical = [numEncodings]string{")
|
||||||
|
for _, g := range groups {
|
||||||
|
for _, e := range g.Encodings {
|
||||||
|
fmt.Fprintf(w, "%q,\n", strings.ToLower(e.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprint(w, "}\n\n")
|
||||||
|
|
||||||
|
fmt.Fprintln(w, "var nameMap = map[string]htmlEncoding{")
|
||||||
|
for _, g := range groups {
|
||||||
|
for _, e := range g.Encodings {
|
||||||
|
for _, l := range e.Labels {
|
||||||
|
key := strings.ToLower(e.Name)
|
||||||
|
name := consts[key]
|
||||||
|
fmt.Fprintf(w, "%q: %s,\n", l, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprint(w, "}\n\n")
|
||||||
|
|
||||||
|
var tags []string
|
||||||
|
fmt.Fprintln(w, "var localeMap = []htmlEncoding{")
|
||||||
|
for _, loc := range locales {
|
||||||
|
tags = append(tags, loc.tag)
|
||||||
|
fmt.Fprintf(w, "%s, // %s \n", consts[loc.name], loc.tag)
|
||||||
|
}
|
||||||
|
fmt.Fprint(w, "}\n\n")
|
||||||
|
|
||||||
|
fmt.Fprintf(w, "const locales = %q\n", strings.Join(tags, " "))
|
||||||
|
|
||||||
|
gen.WriteGoFile("tables.go", "htmlindex", w.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
// consts maps canonical encoding name to internal constant.
|
||||||
|
var consts = map[string]string{
|
||||||
|
"utf-8": "utf8",
|
||||||
|
"ibm866": "ibm866",
|
||||||
|
"iso-8859-2": "iso8859_2",
|
||||||
|
"iso-8859-3": "iso8859_3",
|
||||||
|
"iso-8859-4": "iso8859_4",
|
||||||
|
"iso-8859-5": "iso8859_5",
|
||||||
|
"iso-8859-6": "iso8859_6",
|
||||||
|
"iso-8859-7": "iso8859_7",
|
||||||
|
"iso-8859-8": "iso8859_8",
|
||||||
|
"iso-8859-8-i": "iso8859_8I",
|
||||||
|
"iso-8859-10": "iso8859_10",
|
||||||
|
"iso-8859-13": "iso8859_13",
|
||||||
|
"iso-8859-14": "iso8859_14",
|
||||||
|
"iso-8859-15": "iso8859_15",
|
||||||
|
"iso-8859-16": "iso8859_16",
|
||||||
|
"koi8-r": "koi8r",
|
||||||
|
"koi8-u": "koi8u",
|
||||||
|
"macintosh": "macintosh",
|
||||||
|
"windows-874": "windows874",
|
||||||
|
"windows-1250": "windows1250",
|
||||||
|
"windows-1251": "windows1251",
|
||||||
|
"windows-1252": "windows1252",
|
||||||
|
"windows-1253": "windows1253",
|
||||||
|
"windows-1254": "windows1254",
|
||||||
|
"windows-1255": "windows1255",
|
||||||
|
"windows-1256": "windows1256",
|
||||||
|
"windows-1257": "windows1257",
|
||||||
|
"windows-1258": "windows1258",
|
||||||
|
"x-mac-cyrillic": "macintoshCyrillic",
|
||||||
|
"gbk": "gbk",
|
||||||
|
"gb18030": "gb18030",
|
||||||
|
// "hz-gb-2312": "hzgb2312", // Was removed from WhatWG
|
||||||
|
"big5": "big5",
|
||||||
|
"euc-jp": "eucjp",
|
||||||
|
"iso-2022-jp": "iso2022jp",
|
||||||
|
"shift_jis": "shiftJIS",
|
||||||
|
"euc-kr": "euckr",
|
||||||
|
"replacement": "replacement",
|
||||||
|
"utf-16be": "utf16be",
|
||||||
|
"utf-16le": "utf16le",
|
||||||
|
"x-user-defined": "xUserDefined",
|
||||||
|
}
|
||||||
|
|
||||||
|
// locales is taken from
|
||||||
|
// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm.
|
||||||
|
var locales = []struct{ tag, name string }{
|
||||||
|
// The default value. Explicitly state latin to benefit from the exact
|
||||||
|
// script option, while still making 1252 the default encoding for languages
|
||||||
|
// written in Latin script.
|
||||||
|
{"und_Latn", "windows-1252"},
|
||||||
|
{"ar", "windows-1256"},
|
||||||
|
{"ba", "windows-1251"},
|
||||||
|
{"be", "windows-1251"},
|
||||||
|
{"bg", "windows-1251"},
|
||||||
|
{"cs", "windows-1250"},
|
||||||
|
{"el", "iso-8859-7"},
|
||||||
|
{"et", "windows-1257"},
|
||||||
|
{"fa", "windows-1256"},
|
||||||
|
{"he", "windows-1255"},
|
||||||
|
{"hr", "windows-1250"},
|
||||||
|
{"hu", "iso-8859-2"},
|
||||||
|
{"ja", "shift_jis"},
|
||||||
|
{"kk", "windows-1251"},
|
||||||
|
{"ko", "euc-kr"},
|
||||||
|
{"ku", "windows-1254"},
|
||||||
|
{"ky", "windows-1251"},
|
||||||
|
{"lt", "windows-1257"},
|
||||||
|
{"lv", "windows-1257"},
|
||||||
|
{"mk", "windows-1251"},
|
||||||
|
{"pl", "iso-8859-2"},
|
||||||
|
{"ru", "windows-1251"},
|
||||||
|
{"sah", "windows-1251"},
|
||||||
|
{"sk", "windows-1250"},
|
||||||
|
{"sl", "iso-8859-2"},
|
||||||
|
{"sr", "windows-1251"},
|
||||||
|
{"tg", "windows-1251"},
|
||||||
|
{"th", "windows-874"},
|
||||||
|
{"tr", "windows-1254"},
|
||||||
|
{"tt", "windows-1251"},
|
||||||
|
{"uk", "windows-1251"},
|
||||||
|
{"vi", "windows-1258"},
|
||||||
|
{"zh-hans", "gb18030"},
|
||||||
|
{"zh-hant", "big5"},
|
||||||
|
}
|
86
vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go
generated
vendored
Normal file
86
vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:generate go run gen.go
|
||||||
|
|
||||||
|
// Package htmlindex maps character set encoding names to Encodings as
|
||||||
|
// recommended by the W3C for use in HTML 5. See http://www.w3.org/TR/encoding.
|
||||||
|
package htmlindex
|
||||||
|
|
||||||
|
// TODO: perhaps have a "bare" version of the index (used by this package) that
|
||||||
|
// is not pre-loaded with all encodings. Global variables in encodings prevent
|
||||||
|
// the linker from being able to purge unneeded tables. This means that
|
||||||
|
// referencing all encodings, as this package does for the default index, links
|
||||||
|
// in all encodings unconditionally.
|
||||||
|
//
|
||||||
|
// This issue can be solved by either solving the linking issue (see
|
||||||
|
// https://github.com/golang/go/issues/6330) or refactoring the encoding tables
|
||||||
|
// (e.g. moving the tables to internal packages that do not use global
|
||||||
|
// variables).
|
||||||
|
|
||||||
|
// TODO: allow canonicalizing names
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/language"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errInvalidName = errors.New("htmlindex: invalid encoding name")
|
||||||
|
errUnknown = errors.New("htmlindex: unknown Encoding")
|
||||||
|
errUnsupported = errors.New("htmlindex: this encoding is not supported")
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
matcherOnce sync.Once
|
||||||
|
matcher language.Matcher
|
||||||
|
)
|
||||||
|
|
||||||
|
// LanguageDefault returns the canonical name of the default encoding for a
|
||||||
|
// given language.
|
||||||
|
func LanguageDefault(tag language.Tag) string {
|
||||||
|
matcherOnce.Do(func() {
|
||||||
|
tags := []language.Tag{}
|
||||||
|
for _, t := range strings.Split(locales, " ") {
|
||||||
|
tags = append(tags, language.MustParse(t))
|
||||||
|
}
|
||||||
|
matcher = language.NewMatcher(tags, language.PreferSameScript(true))
|
||||||
|
})
|
||||||
|
_, i, _ := matcher.Match(tag)
|
||||||
|
return canonical[localeMap[i]] // Default is Windows-1252.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns an Encoding for one of the names listed in
|
||||||
|
// http://www.w3.org/TR/encoding using the Default Index. Matching is case-
|
||||||
|
// insensitive.
|
||||||
|
func Get(name string) (encoding.Encoding, error) {
|
||||||
|
x, ok := nameMap[strings.ToLower(strings.TrimSpace(name))]
|
||||||
|
if !ok {
|
||||||
|
return nil, errInvalidName
|
||||||
|
}
|
||||||
|
return encodings[x], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name reports the canonical name of the given Encoding. It will return
|
||||||
|
// an error if e is not associated with a supported encoding scheme.
|
||||||
|
func Name(e encoding.Encoding) (string, error) {
|
||||||
|
id, ok := e.(identifier.Interface)
|
||||||
|
if !ok {
|
||||||
|
return "", errUnknown
|
||||||
|
}
|
||||||
|
mib, _ := id.ID()
|
||||||
|
if mib == 0 {
|
||||||
|
return "", errUnknown
|
||||||
|
}
|
||||||
|
v, ok := mibMap[mib]
|
||||||
|
if !ok {
|
||||||
|
return "", errUnsupported
|
||||||
|
}
|
||||||
|
return canonical[v], nil
|
||||||
|
}
|
105
vendor/golang.org/x/text/encoding/htmlindex/map.go
generated
vendored
Normal file
105
vendor/golang.org/x/text/encoding/htmlindex/map.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package htmlindex
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/charmap"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/encoding/japanese"
|
||||||
|
"golang.org/x/text/encoding/korean"
|
||||||
|
"golang.org/x/text/encoding/simplifiedchinese"
|
||||||
|
"golang.org/x/text/encoding/traditionalchinese"
|
||||||
|
"golang.org/x/text/encoding/unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// mibMap maps a MIB identifier to an htmlEncoding index.
|
||||||
|
var mibMap = map[identifier.MIB]htmlEncoding{
|
||||||
|
identifier.UTF8: utf8,
|
||||||
|
identifier.UTF16BE: utf16be,
|
||||||
|
identifier.UTF16LE: utf16le,
|
||||||
|
identifier.IBM866: ibm866,
|
||||||
|
identifier.ISOLatin2: iso8859_2,
|
||||||
|
identifier.ISOLatin3: iso8859_3,
|
||||||
|
identifier.ISOLatin4: iso8859_4,
|
||||||
|
identifier.ISOLatinCyrillic: iso8859_5,
|
||||||
|
identifier.ISOLatinArabic: iso8859_6,
|
||||||
|
identifier.ISOLatinGreek: iso8859_7,
|
||||||
|
identifier.ISOLatinHebrew: iso8859_8,
|
||||||
|
identifier.ISO88598I: iso8859_8I,
|
||||||
|
identifier.ISOLatin6: iso8859_10,
|
||||||
|
identifier.ISO885913: iso8859_13,
|
||||||
|
identifier.ISO885914: iso8859_14,
|
||||||
|
identifier.ISO885915: iso8859_15,
|
||||||
|
identifier.ISO885916: iso8859_16,
|
||||||
|
identifier.KOI8R: koi8r,
|
||||||
|
identifier.KOI8U: koi8u,
|
||||||
|
identifier.Macintosh: macintosh,
|
||||||
|
identifier.MacintoshCyrillic: macintoshCyrillic,
|
||||||
|
identifier.Windows874: windows874,
|
||||||
|
identifier.Windows1250: windows1250,
|
||||||
|
identifier.Windows1251: windows1251,
|
||||||
|
identifier.Windows1252: windows1252,
|
||||||
|
identifier.Windows1253: windows1253,
|
||||||
|
identifier.Windows1254: windows1254,
|
||||||
|
identifier.Windows1255: windows1255,
|
||||||
|
identifier.Windows1256: windows1256,
|
||||||
|
identifier.Windows1257: windows1257,
|
||||||
|
identifier.Windows1258: windows1258,
|
||||||
|
identifier.XUserDefined: xUserDefined,
|
||||||
|
identifier.GBK: gbk,
|
||||||
|
identifier.GB18030: gb18030,
|
||||||
|
identifier.Big5: big5,
|
||||||
|
identifier.EUCPkdFmtJapanese: eucjp,
|
||||||
|
identifier.ISO2022JP: iso2022jp,
|
||||||
|
identifier.ShiftJIS: shiftJIS,
|
||||||
|
identifier.EUCKR: euckr,
|
||||||
|
identifier.Replacement: replacement,
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodings maps the internal htmlEncoding to an Encoding.
|
||||||
|
// TODO: consider using a reusable index in encoding/internal.
|
||||||
|
var encodings = [numEncodings]encoding.Encoding{
|
||||||
|
utf8: unicode.UTF8,
|
||||||
|
ibm866: charmap.CodePage866,
|
||||||
|
iso8859_2: charmap.ISO8859_2,
|
||||||
|
iso8859_3: charmap.ISO8859_3,
|
||||||
|
iso8859_4: charmap.ISO8859_4,
|
||||||
|
iso8859_5: charmap.ISO8859_5,
|
||||||
|
iso8859_6: charmap.ISO8859_6,
|
||||||
|
iso8859_7: charmap.ISO8859_7,
|
||||||
|
iso8859_8: charmap.ISO8859_8,
|
||||||
|
iso8859_8I: charmap.ISO8859_8I,
|
||||||
|
iso8859_10: charmap.ISO8859_10,
|
||||||
|
iso8859_13: charmap.ISO8859_13,
|
||||||
|
iso8859_14: charmap.ISO8859_14,
|
||||||
|
iso8859_15: charmap.ISO8859_15,
|
||||||
|
iso8859_16: charmap.ISO8859_16,
|
||||||
|
koi8r: charmap.KOI8R,
|
||||||
|
koi8u: charmap.KOI8U,
|
||||||
|
macintosh: charmap.Macintosh,
|
||||||
|
windows874: charmap.Windows874,
|
||||||
|
windows1250: charmap.Windows1250,
|
||||||
|
windows1251: charmap.Windows1251,
|
||||||
|
windows1252: charmap.Windows1252,
|
||||||
|
windows1253: charmap.Windows1253,
|
||||||
|
windows1254: charmap.Windows1254,
|
||||||
|
windows1255: charmap.Windows1255,
|
||||||
|
windows1256: charmap.Windows1256,
|
||||||
|
windows1257: charmap.Windows1257,
|
||||||
|
windows1258: charmap.Windows1258,
|
||||||
|
macintoshCyrillic: charmap.MacintoshCyrillic,
|
||||||
|
gbk: simplifiedchinese.GBK,
|
||||||
|
gb18030: simplifiedchinese.GB18030,
|
||||||
|
big5: traditionalchinese.Big5,
|
||||||
|
eucjp: japanese.EUCJP,
|
||||||
|
iso2022jp: japanese.ISO2022JP,
|
||||||
|
shiftJIS: japanese.ShiftJIS,
|
||||||
|
euckr: korean.EUCKR,
|
||||||
|
replacement: encoding.Replacement,
|
||||||
|
utf16be: unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM),
|
||||||
|
utf16le: unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM),
|
||||||
|
xUserDefined: charmap.XUserDefined,
|
||||||
|
}
|
352
vendor/golang.org/x/text/encoding/htmlindex/tables.go
generated
vendored
Normal file
352
vendor/golang.org/x/text/encoding/htmlindex/tables.go
generated
vendored
Normal file
@ -0,0 +1,352 @@
|
|||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
package htmlindex
|
||||||
|
|
||||||
|
type htmlEncoding byte
|
||||||
|
|
||||||
|
const (
|
||||||
|
utf8 htmlEncoding = iota
|
||||||
|
ibm866
|
||||||
|
iso8859_2
|
||||||
|
iso8859_3
|
||||||
|
iso8859_4
|
||||||
|
iso8859_5
|
||||||
|
iso8859_6
|
||||||
|
iso8859_7
|
||||||
|
iso8859_8
|
||||||
|
iso8859_8I
|
||||||
|
iso8859_10
|
||||||
|
iso8859_13
|
||||||
|
iso8859_14
|
||||||
|
iso8859_15
|
||||||
|
iso8859_16
|
||||||
|
koi8r
|
||||||
|
koi8u
|
||||||
|
macintosh
|
||||||
|
windows874
|
||||||
|
windows1250
|
||||||
|
windows1251
|
||||||
|
windows1252
|
||||||
|
windows1253
|
||||||
|
windows1254
|
||||||
|
windows1255
|
||||||
|
windows1256
|
||||||
|
windows1257
|
||||||
|
windows1258
|
||||||
|
macintoshCyrillic
|
||||||
|
gbk
|
||||||
|
gb18030
|
||||||
|
big5
|
||||||
|
eucjp
|
||||||
|
iso2022jp
|
||||||
|
shiftJIS
|
||||||
|
euckr
|
||||||
|
replacement
|
||||||
|
utf16be
|
||||||
|
utf16le
|
||||||
|
xUserDefined
|
||||||
|
numEncodings
|
||||||
|
)
|
||||||
|
|
||||||
|
var canonical = [numEncodings]string{
|
||||||
|
"utf-8",
|
||||||
|
"ibm866",
|
||||||
|
"iso-8859-2",
|
||||||
|
"iso-8859-3",
|
||||||
|
"iso-8859-4",
|
||||||
|
"iso-8859-5",
|
||||||
|
"iso-8859-6",
|
||||||
|
"iso-8859-7",
|
||||||
|
"iso-8859-8",
|
||||||
|
"iso-8859-8-i",
|
||||||
|
"iso-8859-10",
|
||||||
|
"iso-8859-13",
|
||||||
|
"iso-8859-14",
|
||||||
|
"iso-8859-15",
|
||||||
|
"iso-8859-16",
|
||||||
|
"koi8-r",
|
||||||
|
"koi8-u",
|
||||||
|
"macintosh",
|
||||||
|
"windows-874",
|
||||||
|
"windows-1250",
|
||||||
|
"windows-1251",
|
||||||
|
"windows-1252",
|
||||||
|
"windows-1253",
|
||||||
|
"windows-1254",
|
||||||
|
"windows-1255",
|
||||||
|
"windows-1256",
|
||||||
|
"windows-1257",
|
||||||
|
"windows-1258",
|
||||||
|
"x-mac-cyrillic",
|
||||||
|
"gbk",
|
||||||
|
"gb18030",
|
||||||
|
"big5",
|
||||||
|
"euc-jp",
|
||||||
|
"iso-2022-jp",
|
||||||
|
"shift_jis",
|
||||||
|
"euc-kr",
|
||||||
|
"replacement",
|
||||||
|
"utf-16be",
|
||||||
|
"utf-16le",
|
||||||
|
"x-user-defined",
|
||||||
|
}
|
||||||
|
|
||||||
|
var nameMap = map[string]htmlEncoding{
|
||||||
|
"unicode-1-1-utf-8": utf8,
|
||||||
|
"utf-8": utf8,
|
||||||
|
"utf8": utf8,
|
||||||
|
"866": ibm866,
|
||||||
|
"cp866": ibm866,
|
||||||
|
"csibm866": ibm866,
|
||||||
|
"ibm866": ibm866,
|
||||||
|
"csisolatin2": iso8859_2,
|
||||||
|
"iso-8859-2": iso8859_2,
|
||||||
|
"iso-ir-101": iso8859_2,
|
||||||
|
"iso8859-2": iso8859_2,
|
||||||
|
"iso88592": iso8859_2,
|
||||||
|
"iso_8859-2": iso8859_2,
|
||||||
|
"iso_8859-2:1987": iso8859_2,
|
||||||
|
"l2": iso8859_2,
|
||||||
|
"latin2": iso8859_2,
|
||||||
|
"csisolatin3": iso8859_3,
|
||||||
|
"iso-8859-3": iso8859_3,
|
||||||
|
"iso-ir-109": iso8859_3,
|
||||||
|
"iso8859-3": iso8859_3,
|
||||||
|
"iso88593": iso8859_3,
|
||||||
|
"iso_8859-3": iso8859_3,
|
||||||
|
"iso_8859-3:1988": iso8859_3,
|
||||||
|
"l3": iso8859_3,
|
||||||
|
"latin3": iso8859_3,
|
||||||
|
"csisolatin4": iso8859_4,
|
||||||
|
"iso-8859-4": iso8859_4,
|
||||||
|
"iso-ir-110": iso8859_4,
|
||||||
|
"iso8859-4": iso8859_4,
|
||||||
|
"iso88594": iso8859_4,
|
||||||
|
"iso_8859-4": iso8859_4,
|
||||||
|
"iso_8859-4:1988": iso8859_4,
|
||||||
|
"l4": iso8859_4,
|
||||||
|
"latin4": iso8859_4,
|
||||||
|
"csisolatincyrillic": iso8859_5,
|
||||||
|
"cyrillic": iso8859_5,
|
||||||
|
"iso-8859-5": iso8859_5,
|
||||||
|
"iso-ir-144": iso8859_5,
|
||||||
|
"iso8859-5": iso8859_5,
|
||||||
|
"iso88595": iso8859_5,
|
||||||
|
"iso_8859-5": iso8859_5,
|
||||||
|
"iso_8859-5:1988": iso8859_5,
|
||||||
|
"arabic": iso8859_6,
|
||||||
|
"asmo-708": iso8859_6,
|
||||||
|
"csiso88596e": iso8859_6,
|
||||||
|
"csiso88596i": iso8859_6,
|
||||||
|
"csisolatinarabic": iso8859_6,
|
||||||
|
"ecma-114": iso8859_6,
|
||||||
|
"iso-8859-6": iso8859_6,
|
||||||
|
"iso-8859-6-e": iso8859_6,
|
||||||
|
"iso-8859-6-i": iso8859_6,
|
||||||
|
"iso-ir-127": iso8859_6,
|
||||||
|
"iso8859-6": iso8859_6,
|
||||||
|
"iso88596": iso8859_6,
|
||||||
|
"iso_8859-6": iso8859_6,
|
||||||
|
"iso_8859-6:1987": iso8859_6,
|
||||||
|
"csisolatingreek": iso8859_7,
|
||||||
|
"ecma-118": iso8859_7,
|
||||||
|
"elot_928": iso8859_7,
|
||||||
|
"greek": iso8859_7,
|
||||||
|
"greek8": iso8859_7,
|
||||||
|
"iso-8859-7": iso8859_7,
|
||||||
|
"iso-ir-126": iso8859_7,
|
||||||
|
"iso8859-7": iso8859_7,
|
||||||
|
"iso88597": iso8859_7,
|
||||||
|
"iso_8859-7": iso8859_7,
|
||||||
|
"iso_8859-7:1987": iso8859_7,
|
||||||
|
"sun_eu_greek": iso8859_7,
|
||||||
|
"csiso88598e": iso8859_8,
|
||||||
|
"csisolatinhebrew": iso8859_8,
|
||||||
|
"hebrew": iso8859_8,
|
||||||
|
"iso-8859-8": iso8859_8,
|
||||||
|
"iso-8859-8-e": iso8859_8,
|
||||||
|
"iso-ir-138": iso8859_8,
|
||||||
|
"iso8859-8": iso8859_8,
|
||||||
|
"iso88598": iso8859_8,
|
||||||
|
"iso_8859-8": iso8859_8,
|
||||||
|
"iso_8859-8:1988": iso8859_8,
|
||||||
|
"visual": iso8859_8,
|
||||||
|
"csiso88598i": iso8859_8I,
|
||||||
|
"iso-8859-8-i": iso8859_8I,
|
||||||
|
"logical": iso8859_8I,
|
||||||
|
"csisolatin6": iso8859_10,
|
||||||
|
"iso-8859-10": iso8859_10,
|
||||||
|
"iso-ir-157": iso8859_10,
|
||||||
|
"iso8859-10": iso8859_10,
|
||||||
|
"iso885910": iso8859_10,
|
||||||
|
"l6": iso8859_10,
|
||||||
|
"latin6": iso8859_10,
|
||||||
|
"iso-8859-13": iso8859_13,
|
||||||
|
"iso8859-13": iso8859_13,
|
||||||
|
"iso885913": iso8859_13,
|
||||||
|
"iso-8859-14": iso8859_14,
|
||||||
|
"iso8859-14": iso8859_14,
|
||||||
|
"iso885914": iso8859_14,
|
||||||
|
"csisolatin9": iso8859_15,
|
||||||
|
"iso-8859-15": iso8859_15,
|
||||||
|
"iso8859-15": iso8859_15,
|
||||||
|
"iso885915": iso8859_15,
|
||||||
|
"iso_8859-15": iso8859_15,
|
||||||
|
"l9": iso8859_15,
|
||||||
|
"iso-8859-16": iso8859_16,
|
||||||
|
"cskoi8r": koi8r,
|
||||||
|
"koi": koi8r,
|
||||||
|
"koi8": koi8r,
|
||||||
|
"koi8-r": koi8r,
|
||||||
|
"koi8_r": koi8r,
|
||||||
|
"koi8-ru": koi8u,
|
||||||
|
"koi8-u": koi8u,
|
||||||
|
"csmacintosh": macintosh,
|
||||||
|
"mac": macintosh,
|
||||||
|
"macintosh": macintosh,
|
||||||
|
"x-mac-roman": macintosh,
|
||||||
|
"dos-874": windows874,
|
||||||
|
"iso-8859-11": windows874,
|
||||||
|
"iso8859-11": windows874,
|
||||||
|
"iso885911": windows874,
|
||||||
|
"tis-620": windows874,
|
||||||
|
"windows-874": windows874,
|
||||||
|
"cp1250": windows1250,
|
||||||
|
"windows-1250": windows1250,
|
||||||
|
"x-cp1250": windows1250,
|
||||||
|
"cp1251": windows1251,
|
||||||
|
"windows-1251": windows1251,
|
||||||
|
"x-cp1251": windows1251,
|
||||||
|
"ansi_x3.4-1968": windows1252,
|
||||||
|
"ascii": windows1252,
|
||||||
|
"cp1252": windows1252,
|
||||||
|
"cp819": windows1252,
|
||||||
|
"csisolatin1": windows1252,
|
||||||
|
"ibm819": windows1252,
|
||||||
|
"iso-8859-1": windows1252,
|
||||||
|
"iso-ir-100": windows1252,
|
||||||
|
"iso8859-1": windows1252,
|
||||||
|
"iso88591": windows1252,
|
||||||
|
"iso_8859-1": windows1252,
|
||||||
|
"iso_8859-1:1987": windows1252,
|
||||||
|
"l1": windows1252,
|
||||||
|
"latin1": windows1252,
|
||||||
|
"us-ascii": windows1252,
|
||||||
|
"windows-1252": windows1252,
|
||||||
|
"x-cp1252": windows1252,
|
||||||
|
"cp1253": windows1253,
|
||||||
|
"windows-1253": windows1253,
|
||||||
|
"x-cp1253": windows1253,
|
||||||
|
"cp1254": windows1254,
|
||||||
|
"csisolatin5": windows1254,
|
||||||
|
"iso-8859-9": windows1254,
|
||||||
|
"iso-ir-148": windows1254,
|
||||||
|
"iso8859-9": windows1254,
|
||||||
|
"iso88599": windows1254,
|
||||||
|
"iso_8859-9": windows1254,
|
||||||
|
"iso_8859-9:1989": windows1254,
|
||||||
|
"l5": windows1254,
|
||||||
|
"latin5": windows1254,
|
||||||
|
"windows-1254": windows1254,
|
||||||
|
"x-cp1254": windows1254,
|
||||||
|
"cp1255": windows1255,
|
||||||
|
"windows-1255": windows1255,
|
||||||
|
"x-cp1255": windows1255,
|
||||||
|
"cp1256": windows1256,
|
||||||
|
"windows-1256": windows1256,
|
||||||
|
"x-cp1256": windows1256,
|
||||||
|
"cp1257": windows1257,
|
||||||
|
"windows-1257": windows1257,
|
||||||
|
"x-cp1257": windows1257,
|
||||||
|
"cp1258": windows1258,
|
||||||
|
"windows-1258": windows1258,
|
||||||
|
"x-cp1258": windows1258,
|
||||||
|
"x-mac-cyrillic": macintoshCyrillic,
|
||||||
|
"x-mac-ukrainian": macintoshCyrillic,
|
||||||
|
"chinese": gbk,
|
||||||
|
"csgb2312": gbk,
|
||||||
|
"csiso58gb231280": gbk,
|
||||||
|
"gb2312": gbk,
|
||||||
|
"gb_2312": gbk,
|
||||||
|
"gb_2312-80": gbk,
|
||||||
|
"gbk": gbk,
|
||||||
|
"iso-ir-58": gbk,
|
||||||
|
"x-gbk": gbk,
|
||||||
|
"gb18030": gb18030,
|
||||||
|
"big5": big5,
|
||||||
|
"big5-hkscs": big5,
|
||||||
|
"cn-big5": big5,
|
||||||
|
"csbig5": big5,
|
||||||
|
"x-x-big5": big5,
|
||||||
|
"cseucpkdfmtjapanese": eucjp,
|
||||||
|
"euc-jp": eucjp,
|
||||||
|
"x-euc-jp": eucjp,
|
||||||
|
"csiso2022jp": iso2022jp,
|
||||||
|
"iso-2022-jp": iso2022jp,
|
||||||
|
"csshiftjis": shiftJIS,
|
||||||
|
"ms932": shiftJIS,
|
||||||
|
"ms_kanji": shiftJIS,
|
||||||
|
"shift-jis": shiftJIS,
|
||||||
|
"shift_jis": shiftJIS,
|
||||||
|
"sjis": shiftJIS,
|
||||||
|
"windows-31j": shiftJIS,
|
||||||
|
"x-sjis": shiftJIS,
|
||||||
|
"cseuckr": euckr,
|
||||||
|
"csksc56011987": euckr,
|
||||||
|
"euc-kr": euckr,
|
||||||
|
"iso-ir-149": euckr,
|
||||||
|
"korean": euckr,
|
||||||
|
"ks_c_5601-1987": euckr,
|
||||||
|
"ks_c_5601-1989": euckr,
|
||||||
|
"ksc5601": euckr,
|
||||||
|
"ksc_5601": euckr,
|
||||||
|
"windows-949": euckr,
|
||||||
|
"csiso2022kr": replacement,
|
||||||
|
"hz-gb-2312": replacement,
|
||||||
|
"iso-2022-cn": replacement,
|
||||||
|
"iso-2022-cn-ext": replacement,
|
||||||
|
"iso-2022-kr": replacement,
|
||||||
|
"utf-16be": utf16be,
|
||||||
|
"utf-16": utf16le,
|
||||||
|
"utf-16le": utf16le,
|
||||||
|
"x-user-defined": xUserDefined,
|
||||||
|
}
|
||||||
|
|
||||||
|
var localeMap = []htmlEncoding{
|
||||||
|
windows1252, // und_Latn
|
||||||
|
windows1256, // ar
|
||||||
|
windows1251, // ba
|
||||||
|
windows1251, // be
|
||||||
|
windows1251, // bg
|
||||||
|
windows1250, // cs
|
||||||
|
iso8859_7, // el
|
||||||
|
windows1257, // et
|
||||||
|
windows1256, // fa
|
||||||
|
windows1255, // he
|
||||||
|
windows1250, // hr
|
||||||
|
iso8859_2, // hu
|
||||||
|
shiftJIS, // ja
|
||||||
|
windows1251, // kk
|
||||||
|
euckr, // ko
|
||||||
|
windows1254, // ku
|
||||||
|
windows1251, // ky
|
||||||
|
windows1257, // lt
|
||||||
|
windows1257, // lv
|
||||||
|
windows1251, // mk
|
||||||
|
iso8859_2, // pl
|
||||||
|
windows1251, // ru
|
||||||
|
windows1251, // sah
|
||||||
|
windows1250, // sk
|
||||||
|
iso8859_2, // sl
|
||||||
|
windows1251, // sr
|
||||||
|
windows1251, // tg
|
||||||
|
windows874, // th
|
||||||
|
windows1254, // tr
|
||||||
|
windows1251, // tt
|
||||||
|
windows1251, // uk
|
||||||
|
windows1258, // vi
|
||||||
|
gb18030, // zh-hans
|
||||||
|
big5, // zh-hant
|
||||||
|
}
|
||||||
|
|
||||||
|
const locales = "und_Latn ar ba be bg cs el et fa he hr hu ja kk ko ku ky lt lv mk pl ru sah sk sl sr tg th tr tt uk vi zh-hans zh-hant"
|
137
vendor/golang.org/x/text/encoding/internal/identifier/gen.go
generated
vendored
Normal file
137
vendor/golang.org/x/text/encoding/internal/identifier/gen.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/gen"
|
||||||
|
)
|
||||||
|
|
||||||
|
type registry struct {
|
||||||
|
XMLName xml.Name `xml:"registry"`
|
||||||
|
Updated string `xml:"updated"`
|
||||||
|
Registry []struct {
|
||||||
|
ID string `xml:"id,attr"`
|
||||||
|
Record []struct {
|
||||||
|
Name string `xml:"name"`
|
||||||
|
Xref []struct {
|
||||||
|
Type string `xml:"type,attr"`
|
||||||
|
Data string `xml:"data,attr"`
|
||||||
|
} `xml:"xref"`
|
||||||
|
Desc struct {
|
||||||
|
Data string `xml:",innerxml"`
|
||||||
|
// Any []struct {
|
||||||
|
// Data string `xml:",chardata"`
|
||||||
|
// } `xml:",any"`
|
||||||
|
// Data string `xml:",chardata"`
|
||||||
|
} `xml:"description,"`
|
||||||
|
MIB string `xml:"value"`
|
||||||
|
Alias []string `xml:"alias"`
|
||||||
|
MIME string `xml:"preferred_alias"`
|
||||||
|
} `xml:"record"`
|
||||||
|
} `xml:"registry"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml")
|
||||||
|
reg := ®istry{}
|
||||||
|
if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF {
|
||||||
|
log.Fatalf("Error decoding charset registry: %v", err)
|
||||||
|
}
|
||||||
|
if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" {
|
||||||
|
log.Fatalf("Unexpected ID %s", reg.Registry[0].ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
w := &bytes.Buffer{}
|
||||||
|
fmt.Fprintf(w, "const (\n")
|
||||||
|
for _, rec := range reg.Registry[0].Record {
|
||||||
|
constName := ""
|
||||||
|
for _, a := range rec.Alias {
|
||||||
|
if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 {
|
||||||
|
// Some of the constant definitions have comments in them. Strip those.
|
||||||
|
constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if constName == "" {
|
||||||
|
switch rec.MIB {
|
||||||
|
case "2085":
|
||||||
|
constName = "HZGB2312" // Not listed as alias for some reason.
|
||||||
|
default:
|
||||||
|
log.Fatalf("No cs alias defined for %s.", rec.MIB)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if rec.MIME != "" {
|
||||||
|
rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME)
|
||||||
|
if len(rec.Desc.Data) > 0 {
|
||||||
|
fmt.Fprint(w, "// ")
|
||||||
|
d := xml.NewDecoder(strings.NewReader(rec.Desc.Data))
|
||||||
|
inElem := true
|
||||||
|
attr := ""
|
||||||
|
for {
|
||||||
|
t, err := d.Token()
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
switch x := t.(type) {
|
||||||
|
case xml.CharData:
|
||||||
|
attr = "" // Don't need attribute info.
|
||||||
|
a := bytes.Split([]byte(x), []byte("\n"))
|
||||||
|
for i, b := range a {
|
||||||
|
if b = bytes.TrimSpace(b); len(b) != 0 {
|
||||||
|
if !inElem && i > 0 {
|
||||||
|
fmt.Fprint(w, "\n// ")
|
||||||
|
}
|
||||||
|
inElem = false
|
||||||
|
fmt.Fprintf(w, "%s ", string(b))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case xml.StartElement:
|
||||||
|
if x.Name.Local == "xref" {
|
||||||
|
inElem = true
|
||||||
|
use := false
|
||||||
|
for _, a := range x.Attr {
|
||||||
|
if a.Name.Local == "type" {
|
||||||
|
use = use || a.Value != "person"
|
||||||
|
}
|
||||||
|
if a.Name.Local == "data" && use {
|
||||||
|
attr = a.Value + " "
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case xml.EndElement:
|
||||||
|
inElem = false
|
||||||
|
fmt.Fprint(w, attr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprint(w, "\n")
|
||||||
|
}
|
||||||
|
for _, x := range rec.Xref {
|
||||||
|
switch x.Type {
|
||||||
|
case "rfc":
|
||||||
|
fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data))
|
||||||
|
case "uri":
|
||||||
|
fmt.Fprintf(w, "// Reference: %s\n", x.Data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB)
|
||||||
|
fmt.Fprintln(w)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(w, ")")
|
||||||
|
|
||||||
|
gen.WriteGoFile("mib.go", "identifier", w.Bytes())
|
||||||
|
}
|
81
vendor/golang.org/x/text/encoding/internal/identifier/identifier.go
generated
vendored
Normal file
81
vendor/golang.org/x/text/encoding/internal/identifier/identifier.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:generate go run gen.go
|
||||||
|
|
||||||
|
// Package identifier defines the contract between implementations of Encoding
|
||||||
|
// and Index by defining identifiers that uniquely identify standardized coded
|
||||||
|
// character sets (CCS) and character encoding schemes (CES), which we will
|
||||||
|
// together refer to as encodings, for which Encoding implementations provide
|
||||||
|
// converters to and from UTF-8. This package is typically only of concern to
|
||||||
|
// implementers of Indexes and Encodings.
|
||||||
|
//
|
||||||
|
// One part of the identifier is the MIB code, which is defined by IANA and
|
||||||
|
// uniquely identifies a CCS or CES. Each code is associated with data that
|
||||||
|
// references authorities, official documentation as well as aliases and MIME
|
||||||
|
// names.
|
||||||
|
//
|
||||||
|
// Not all CESs are covered by the IANA registry. The "other" string that is
|
||||||
|
// returned by ID can be used to identify other character sets or versions of
|
||||||
|
// existing ones.
|
||||||
|
//
|
||||||
|
// It is recommended that each package that provides a set of Encodings provide
|
||||||
|
// the All and Common variables to reference all supported encodings and
|
||||||
|
// commonly used subset. This allows Index implementations to include all
|
||||||
|
// available encodings without explicitly referencing or knowing about them.
|
||||||
|
package identifier
|
||||||
|
|
||||||
|
// Note: this package is internal, but could be made public if there is a need
|
||||||
|
// for writing third-party Indexes and Encodings.
|
||||||
|
|
||||||
|
// References:
|
||||||
|
// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt
|
||||||
|
// - http://www.iana.org/assignments/character-sets/character-sets.xhtml
|
||||||
|
// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib
|
||||||
|
// - http://www.ietf.org/rfc/rfc2978.txt
|
||||||
|
// - http://www.unicode.org/reports/tr22/
|
||||||
|
// - http://www.w3.org/TR/encoding/
|
||||||
|
// - https://encoding.spec.whatwg.org/
|
||||||
|
// - https://encoding.spec.whatwg.org/encodings.json
|
||||||
|
// - https://tools.ietf.org/html/rfc6657#section-5
|
||||||
|
|
||||||
|
// Interface can be implemented by Encodings to define the CCS or CES for which
|
||||||
|
// it implements conversions.
|
||||||
|
type Interface interface {
|
||||||
|
// ID returns an encoding identifier. Exactly one of the mib and other
|
||||||
|
// values should be non-zero.
|
||||||
|
//
|
||||||
|
// In the usual case it is only necessary to indicate the MIB code. The
|
||||||
|
// other string can be used to specify encodings for which there is no MIB,
|
||||||
|
// such as "x-mac-dingbat".
|
||||||
|
//
|
||||||
|
// The other string may only contain the characters a-z, A-Z, 0-9, - and _.
|
||||||
|
ID() (mib MIB, other string)
|
||||||
|
|
||||||
|
// NOTE: the restrictions on the encoding are to allow extending the syntax
|
||||||
|
// with additional information such as versions, vendors and other variants.
|
||||||
|
}
|
||||||
|
|
||||||
|
// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds
|
||||||
|
// some identifiers for some encodings that are not covered by the IANA
|
||||||
|
// standard.
|
||||||
|
//
|
||||||
|
// See http://www.iana.org/assignments/ianacharset-mib.
|
||||||
|
type MIB uint16
|
||||||
|
|
||||||
|
// These additional MIB types are not defined in IANA. They are added because
|
||||||
|
// they are common and defined within the text repo.
|
||||||
|
const (
|
||||||
|
// Unofficial marks the start of encodings not registered by IANA.
|
||||||
|
Unofficial MIB = 10000 + iota
|
||||||
|
|
||||||
|
// Replacement is the WhatWG replacement encoding.
|
||||||
|
Replacement
|
||||||
|
|
||||||
|
// XUserDefined is the code for x-user-defined.
|
||||||
|
XUserDefined
|
||||||
|
|
||||||
|
// MacintoshCyrillic is the code for x-mac-cyrillic.
|
||||||
|
MacintoshCyrillic
|
||||||
|
)
|
1621
vendor/golang.org/x/text/encoding/internal/identifier/mib.go
generated
vendored
Normal file
1621
vendor/golang.org/x/text/encoding/internal/identifier/mib.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
75
vendor/golang.org/x/text/encoding/internal/internal.go
generated
vendored
Normal file
75
vendor/golang.org/x/text/encoding/internal/internal.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package internal contains code that is shared among encoding implementations.
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Encoding is an implementation of the Encoding interface that adds the String
|
||||||
|
// and ID methods to an existing encoding.
|
||||||
|
type Encoding struct {
|
||||||
|
encoding.Encoding
|
||||||
|
Name string
|
||||||
|
MIB identifier.MIB
|
||||||
|
}
|
||||||
|
|
||||||
|
// _ verifies that Encoding implements identifier.Interface.
|
||||||
|
var _ identifier.Interface = (*Encoding)(nil)
|
||||||
|
|
||||||
|
func (e *Encoding) String() string {
|
||||||
|
return e.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Encoding) ID() (mib identifier.MIB, other string) {
|
||||||
|
return e.MIB, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// SimpleEncoding is an Encoding that combines two Transformers.
|
||||||
|
type SimpleEncoding struct {
|
||||||
|
Decoder transform.Transformer
|
||||||
|
Encoder transform.Transformer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SimpleEncoding) NewDecoder() *encoding.Decoder {
|
||||||
|
return &encoding.Decoder{Transformer: e.Decoder}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SimpleEncoding) NewEncoder() *encoding.Encoder {
|
||||||
|
return &encoding.Encoder{Transformer: e.Encoder}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FuncEncoding is an Encoding that combines two functions returning a new
|
||||||
|
// Transformer.
|
||||||
|
type FuncEncoding struct {
|
||||||
|
Decoder func() transform.Transformer
|
||||||
|
Encoder func() transform.Transformer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e FuncEncoding) NewDecoder() *encoding.Decoder {
|
||||||
|
return &encoding.Decoder{Transformer: e.Decoder()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e FuncEncoding) NewEncoder() *encoding.Encoder {
|
||||||
|
return &encoding.Encoder{Transformer: e.Encoder()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A RepertoireError indicates a rune is not in the repertoire of a destination
|
||||||
|
// encoding. It is associated with an encoding-specific suggested replacement
|
||||||
|
// byte.
|
||||||
|
type RepertoireError byte
|
||||||
|
|
||||||
|
// Error implements the error interrface.
|
||||||
|
func (r RepertoireError) Error() string {
|
||||||
|
return "encoding: rune not supported by encoding."
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replacement returns the replacement string associated with this error.
|
||||||
|
func (r RepertoireError) Replacement() byte { return byte(r) }
|
||||||
|
|
||||||
|
var ErrASCIIReplacement = RepertoireError(encoding.ASCIISub)
|
12
vendor/golang.org/x/text/encoding/japanese/all.go
generated
vendored
Normal file
12
vendor/golang.org/x/text/encoding/japanese/all.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package japanese
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
)
|
||||||
|
|
||||||
|
// All is a list of all defined encodings in this package.
|
||||||
|
var All = []encoding.Encoding{EUCJP, ISO2022JP, ShiftJIS}
|
225
vendor/golang.org/x/text/encoding/japanese/eucjp.go
generated
vendored
Normal file
225
vendor/golang.org/x/text/encoding/japanese/eucjp.go
generated
vendored
Normal file
@ -0,0 +1,225 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package japanese
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/internal"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EUCJP is the EUC-JP encoding.
|
||||||
|
var EUCJP encoding.Encoding = &eucJP
|
||||||
|
|
||||||
|
var eucJP = internal.Encoding{
|
||||||
|
&internal.SimpleEncoding{eucJPDecoder{}, eucJPEncoder{}},
|
||||||
|
"EUC-JP",
|
||||||
|
identifier.EUCPkdFmtJapanese,
|
||||||
|
}
|
||||||
|
|
||||||
|
type eucJPDecoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
// See https://encoding.spec.whatwg.org/#euc-jp-decoder.
|
||||||
|
func (eucJPDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
loop:
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
switch c0 := src[nSrc]; {
|
||||||
|
case c0 < utf8.RuneSelf:
|
||||||
|
r, size = rune(c0), 1
|
||||||
|
|
||||||
|
case c0 == 0x8e:
|
||||||
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c1 := src[nSrc+1]
|
||||||
|
switch {
|
||||||
|
case c1 < 0xa1:
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
case c1 > 0xdf:
|
||||||
|
r, size = utf8.RuneError, 2
|
||||||
|
if c1 == 0xff {
|
||||||
|
size = 1
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
r, size = rune(c1)+(0xff61-0xa1), 2
|
||||||
|
}
|
||||||
|
case c0 == 0x8f:
|
||||||
|
if nSrc+2 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
if p := nSrc + 1; p < len(src) && 0xa1 <= src[p] && src[p] < 0xfe {
|
||||||
|
size = 2
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c1 := src[nSrc+1]
|
||||||
|
if c1 < 0xa1 || 0xfe < c1 {
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c2 := src[nSrc+2]
|
||||||
|
if c2 < 0xa1 || 0xfe < c2 {
|
||||||
|
r, size = utf8.RuneError, 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
r, size = utf8.RuneError, 3
|
||||||
|
if i := int(c1-0xa1)*94 + int(c2-0xa1); i < len(jis0212Decode) {
|
||||||
|
r = rune(jis0212Decode[i])
|
||||||
|
if r == 0 {
|
||||||
|
r = utf8.RuneError
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case 0xa1 <= c0 && c0 <= 0xfe:
|
||||||
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c1 := src[nSrc+1]
|
||||||
|
if c1 < 0xa1 || 0xfe < c1 {
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
r, size = utf8.RuneError, 2
|
||||||
|
if i := int(c0-0xa1)*94 + int(c1-0xa1); i < len(jis0208Decode) {
|
||||||
|
r = rune(jis0208Decode[i])
|
||||||
|
if r == 0 {
|
||||||
|
r = utf8.RuneError
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type eucJPEncoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
func (eucJPEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
r = rune(src[nSrc])
|
||||||
|
|
||||||
|
// Decode a 1-byte rune.
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Decode a multi-byte rune.
|
||||||
|
r, size = utf8.DecodeRune(src[nSrc:])
|
||||||
|
if size == 1 {
|
||||||
|
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||||
|
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||||
|
// full character yet.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// func init checks that the switch covers all tables.
|
||||||
|
switch {
|
||||||
|
case encode0Low <= r && r < encode0High:
|
||||||
|
if r = rune(encode0[r-encode0Low]); r != 0 {
|
||||||
|
goto write2or3
|
||||||
|
}
|
||||||
|
case encode1Low <= r && r < encode1High:
|
||||||
|
if r = rune(encode1[r-encode1Low]); r != 0 {
|
||||||
|
goto write2or3
|
||||||
|
}
|
||||||
|
case encode2Low <= r && r < encode2High:
|
||||||
|
if r = rune(encode2[r-encode2Low]); r != 0 {
|
||||||
|
goto write2or3
|
||||||
|
}
|
||||||
|
case encode3Low <= r && r < encode3High:
|
||||||
|
if r = rune(encode3[r-encode3Low]); r != 0 {
|
||||||
|
goto write2or3
|
||||||
|
}
|
||||||
|
case encode4Low <= r && r < encode4High:
|
||||||
|
if r = rune(encode4[r-encode4Low]); r != 0 {
|
||||||
|
goto write2or3
|
||||||
|
}
|
||||||
|
case encode5Low <= r && r < encode5High:
|
||||||
|
if 0xff61 <= r && r < 0xffa0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
if r = rune(encode5[r-encode5Low]); r != 0 {
|
||||||
|
goto write2or3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = internal.ErrASCIIReplacement
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if nDst >= len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst] = uint8(r)
|
||||||
|
nDst++
|
||||||
|
continue
|
||||||
|
|
||||||
|
write2or3:
|
||||||
|
if r>>tableShift == jis0208 {
|
||||||
|
if nDst+2 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if nDst+3 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst] = 0x8f
|
||||||
|
nDst++
|
||||||
|
}
|
||||||
|
dst[nDst+0] = 0xa1 + uint8(r>>codeShift)&codeMask
|
||||||
|
dst[nDst+1] = 0xa1 + uint8(r)&codeMask
|
||||||
|
nDst += 2
|
||||||
|
continue
|
||||||
|
|
||||||
|
write2:
|
||||||
|
if nDst+2 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+0] = 0x8e
|
||||||
|
dst[nDst+1] = uint8(r - (0xff61 - 0xa1))
|
||||||
|
nDst += 2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Check that the hard-coded encode switch covers all tables.
|
||||||
|
if numEncodeTables != 6 {
|
||||||
|
panic("bad numEncodeTables")
|
||||||
|
}
|
||||||
|
}
|
299
vendor/golang.org/x/text/encoding/japanese/iso2022jp.go
generated
vendored
Normal file
299
vendor/golang.org/x/text/encoding/japanese/iso2022jp.go
generated
vendored
Normal file
@ -0,0 +1,299 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package japanese
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/internal"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ISO2022JP is the ISO-2022-JP encoding.
|
||||||
|
var ISO2022JP encoding.Encoding = &iso2022JP
|
||||||
|
|
||||||
|
var iso2022JP = internal.Encoding{
|
||||||
|
internal.FuncEncoding{iso2022JPNewDecoder, iso2022JPNewEncoder},
|
||||||
|
"ISO-2022-JP",
|
||||||
|
identifier.ISO2022JP,
|
||||||
|
}
|
||||||
|
|
||||||
|
func iso2022JPNewDecoder() transform.Transformer {
|
||||||
|
return new(iso2022JPDecoder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func iso2022JPNewEncoder() transform.Transformer {
|
||||||
|
return new(iso2022JPEncoder)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
asciiState = iota
|
||||||
|
katakanaState
|
||||||
|
jis0208State
|
||||||
|
jis0212State
|
||||||
|
)
|
||||||
|
|
||||||
|
const asciiEsc = 0x1b
|
||||||
|
|
||||||
|
type iso2022JPDecoder int
|
||||||
|
|
||||||
|
func (d *iso2022JPDecoder) Reset() {
|
||||||
|
*d = asciiState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *iso2022JPDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
c0 := src[nSrc]
|
||||||
|
if c0 >= utf8.RuneSelf {
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
|
||||||
|
if c0 == asciiEsc {
|
||||||
|
if nSrc+2 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
return nDst, nSrc, transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
// TODO: is it correct to only skip 1??
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
size = 3
|
||||||
|
c1 := src[nSrc+1]
|
||||||
|
c2 := src[nSrc+2]
|
||||||
|
switch {
|
||||||
|
case c1 == '$' && (c2 == '@' || c2 == 'B'): // 0x24 {0x40, 0x42}
|
||||||
|
*d = jis0208State
|
||||||
|
continue
|
||||||
|
case c1 == '$' && c2 == '(': // 0x24 0x28
|
||||||
|
if nSrc+3 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
return nDst, nSrc, transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
size = 4
|
||||||
|
if src[nSrc+3] == 'D' {
|
||||||
|
*d = jis0212State
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case c1 == '(' && (c2 == 'B' || c2 == 'J'): // 0x28 {0x42, 0x4A}
|
||||||
|
*d = asciiState
|
||||||
|
continue
|
||||||
|
case c1 == '(' && c2 == 'I': // 0x28 0x49
|
||||||
|
*d = katakanaState
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
|
||||||
|
switch *d {
|
||||||
|
case asciiState:
|
||||||
|
r, size = rune(c0), 1
|
||||||
|
|
||||||
|
case katakanaState:
|
||||||
|
if c0 < 0x21 || 0x60 <= c0 {
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
r, size = rune(c0)+(0xff61-0x21), 1
|
||||||
|
|
||||||
|
default:
|
||||||
|
if c0 == 0x0a {
|
||||||
|
*d = asciiState
|
||||||
|
r, size = rune(c0), 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
return nDst, nSrc, transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
size = 2
|
||||||
|
c1 := src[nSrc+1]
|
||||||
|
i := int(c0-0x21)*94 + int(c1-0x21)
|
||||||
|
if *d == jis0208State && i < len(jis0208Decode) {
|
||||||
|
r = rune(jis0208Decode[i])
|
||||||
|
} else if *d == jis0212State && i < len(jis0212Decode) {
|
||||||
|
r = rune(jis0212Decode[i])
|
||||||
|
} else {
|
||||||
|
r = '\ufffd'
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
if r == 0 {
|
||||||
|
r = '\ufffd'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
write:
|
||||||
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
|
return nDst, nSrc, transform.ErrShortDst
|
||||||
|
}
|
||||||
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type iso2022JPEncoder int
|
||||||
|
|
||||||
|
func (e *iso2022JPEncoder) Reset() {
|
||||||
|
*e = asciiState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *iso2022JPEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
r = rune(src[nSrc])
|
||||||
|
|
||||||
|
// Decode a 1-byte rune.
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Decode a multi-byte rune.
|
||||||
|
r, size = utf8.DecodeRune(src[nSrc:])
|
||||||
|
if size == 1 {
|
||||||
|
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||||
|
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||||
|
// full character yet.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// func init checks that the switch covers all tables.
|
||||||
|
//
|
||||||
|
// http://encoding.spec.whatwg.org/#iso-2022-jp says that "the index jis0212
|
||||||
|
// is not used by the iso-2022-jp encoder due to lack of widespread support".
|
||||||
|
//
|
||||||
|
// TODO: do we have to special-case U+00A5 and U+203E, as per
|
||||||
|
// http://encoding.spec.whatwg.org/#iso-2022-jp
|
||||||
|
// Doing so would mean that "\u00a5" would not be preserved
|
||||||
|
// after an encode-decode round trip.
|
||||||
|
switch {
|
||||||
|
case encode0Low <= r && r < encode0High:
|
||||||
|
if r = rune(encode0[r-encode0Low]); r>>tableShift == jis0208 {
|
||||||
|
goto writeJIS
|
||||||
|
}
|
||||||
|
case encode1Low <= r && r < encode1High:
|
||||||
|
if r = rune(encode1[r-encode1Low]); r>>tableShift == jis0208 {
|
||||||
|
goto writeJIS
|
||||||
|
}
|
||||||
|
case encode2Low <= r && r < encode2High:
|
||||||
|
if r = rune(encode2[r-encode2Low]); r>>tableShift == jis0208 {
|
||||||
|
goto writeJIS
|
||||||
|
}
|
||||||
|
case encode3Low <= r && r < encode3High:
|
||||||
|
if r = rune(encode3[r-encode3Low]); r>>tableShift == jis0208 {
|
||||||
|
goto writeJIS
|
||||||
|
}
|
||||||
|
case encode4Low <= r && r < encode4High:
|
||||||
|
if r = rune(encode4[r-encode4Low]); r>>tableShift == jis0208 {
|
||||||
|
goto writeJIS
|
||||||
|
}
|
||||||
|
case encode5Low <= r && r < encode5High:
|
||||||
|
if 0xff61 <= r && r < 0xffa0 {
|
||||||
|
goto writeKatakana
|
||||||
|
}
|
||||||
|
if r = rune(encode5[r-encode5Low]); r>>tableShift == jis0208 {
|
||||||
|
goto writeJIS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Switch back to ASCII state in case of error so that an ASCII
|
||||||
|
// replacement character can be written in the correct state.
|
||||||
|
if *e != asciiState {
|
||||||
|
if nDst+3 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
*e = asciiState
|
||||||
|
dst[nDst+0] = asciiEsc
|
||||||
|
dst[nDst+1] = '('
|
||||||
|
dst[nDst+2] = 'B'
|
||||||
|
nDst += 3
|
||||||
|
}
|
||||||
|
err = internal.ErrASCIIReplacement
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if *e != asciiState {
|
||||||
|
if nDst+4 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
*e = asciiState
|
||||||
|
dst[nDst+0] = asciiEsc
|
||||||
|
dst[nDst+1] = '('
|
||||||
|
dst[nDst+2] = 'B'
|
||||||
|
nDst += 3
|
||||||
|
} else if nDst >= len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst] = uint8(r)
|
||||||
|
nDst++
|
||||||
|
continue
|
||||||
|
|
||||||
|
writeJIS:
|
||||||
|
if *e != jis0208State {
|
||||||
|
if nDst+5 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
*e = jis0208State
|
||||||
|
dst[nDst+0] = asciiEsc
|
||||||
|
dst[nDst+1] = '$'
|
||||||
|
dst[nDst+2] = 'B'
|
||||||
|
nDst += 3
|
||||||
|
} else if nDst+2 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+0] = 0x21 + uint8(r>>codeShift)&codeMask
|
||||||
|
dst[nDst+1] = 0x21 + uint8(r)&codeMask
|
||||||
|
nDst += 2
|
||||||
|
continue
|
||||||
|
|
||||||
|
writeKatakana:
|
||||||
|
if *e != katakanaState {
|
||||||
|
if nDst+4 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
*e = katakanaState
|
||||||
|
dst[nDst+0] = asciiEsc
|
||||||
|
dst[nDst+1] = '('
|
||||||
|
dst[nDst+2] = 'I'
|
||||||
|
nDst += 3
|
||||||
|
} else if nDst >= len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst] = uint8(r - (0xff61 - 0x21))
|
||||||
|
nDst++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if atEOF && err == nil && *e != asciiState {
|
||||||
|
if nDst+3 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
} else {
|
||||||
|
*e = asciiState
|
||||||
|
dst[nDst+0] = asciiEsc
|
||||||
|
dst[nDst+1] = '('
|
||||||
|
dst[nDst+2] = 'B'
|
||||||
|
nDst += 3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
161
vendor/golang.org/x/text/encoding/japanese/maketables.go
generated
vendored
Normal file
161
vendor/golang.org/x/text/encoding/japanese/maketables.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
// This program generates tables.go:
|
||||||
|
// go run maketables.go | gofmt > tables.go
|
||||||
|
|
||||||
|
// TODO: Emoji extensions?
|
||||||
|
// http://www.unicode.org/faq/emoji_dingbats.html
|
||||||
|
// http://www.unicode.org/Public/UNIDATA/EmojiSources.txt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type entry struct {
|
||||||
|
jisCode, table int
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
|
||||||
|
fmt.Printf("// Package japanese provides Japanese encodings such as EUC-JP and Shift JIS.\n")
|
||||||
|
fmt.Printf(`package japanese // import "golang.org/x/text/encoding/japanese"` + "\n\n")
|
||||||
|
|
||||||
|
reverse := [65536]entry{}
|
||||||
|
for i := range reverse {
|
||||||
|
reverse[i].table = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
tables := []struct {
|
||||||
|
url string
|
||||||
|
name string
|
||||||
|
}{
|
||||||
|
{"http://encoding.spec.whatwg.org/index-jis0208.txt", "0208"},
|
||||||
|
{"http://encoding.spec.whatwg.org/index-jis0212.txt", "0212"},
|
||||||
|
}
|
||||||
|
for i, table := range tables {
|
||||||
|
res, err := http.Get(table.url)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("%q: Get: %v", table.url, err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
mapping := [65536]uint16{}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(res.Body)
|
||||||
|
for scanner.Scan() {
|
||||||
|
s := strings.TrimSpace(scanner.Text())
|
||||||
|
if s == "" || s[0] == '#' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
x, y := 0, uint16(0)
|
||||||
|
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
|
||||||
|
log.Fatalf("%q: could not parse %q", table.url, s)
|
||||||
|
}
|
||||||
|
if x < 0 || 120*94 <= x {
|
||||||
|
log.Fatalf("%q: JIS code %d is out of range", table.url, x)
|
||||||
|
}
|
||||||
|
mapping[x] = y
|
||||||
|
if reverse[y].table == -1 {
|
||||||
|
reverse[y] = entry{jisCode: x, table: i}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
log.Fatalf("%q: scanner error: %v", table.url, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("// jis%sDecode is the decoding table from JIS %s code to Unicode.\n// It is defined at %s\n",
|
||||||
|
table.name, table.name, table.url)
|
||||||
|
fmt.Printf("var jis%sDecode = [...]uint16{\n", table.name)
|
||||||
|
for i, m := range mapping {
|
||||||
|
if m != 0 {
|
||||||
|
fmt.Printf("\t%d: 0x%04X,\n", i, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Printf("}\n\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Any run of at least separation continuous zero entries in the reverse map will
|
||||||
|
// be a separate encode table.
|
||||||
|
const separation = 1024
|
||||||
|
|
||||||
|
intervals := []interval(nil)
|
||||||
|
low, high := -1, -1
|
||||||
|
for i, v := range reverse {
|
||||||
|
if v.table == -1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if low < 0 {
|
||||||
|
low = i
|
||||||
|
} else if i-high >= separation {
|
||||||
|
if high >= 0 {
|
||||||
|
intervals = append(intervals, interval{low, high})
|
||||||
|
}
|
||||||
|
low = i
|
||||||
|
}
|
||||||
|
high = i + 1
|
||||||
|
}
|
||||||
|
if high >= 0 {
|
||||||
|
intervals = append(intervals, interval{low, high})
|
||||||
|
}
|
||||||
|
sort.Sort(byDecreasingLength(intervals))
|
||||||
|
|
||||||
|
fmt.Printf("const (\n")
|
||||||
|
fmt.Printf("\tjis0208 = 1\n")
|
||||||
|
fmt.Printf("\tjis0212 = 2\n")
|
||||||
|
fmt.Printf("\tcodeMask = 0x7f\n")
|
||||||
|
fmt.Printf("\tcodeShift = 7\n")
|
||||||
|
fmt.Printf("\ttableShift = 14\n")
|
||||||
|
fmt.Printf(")\n\n")
|
||||||
|
|
||||||
|
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
|
||||||
|
fmt.Printf("// encodeX are the encoding tables from Unicode to JIS code,\n")
|
||||||
|
fmt.Printf("// sorted by decreasing length.\n")
|
||||||
|
for i, v := range intervals {
|
||||||
|
fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
|
||||||
|
}
|
||||||
|
fmt.Printf("//\n")
|
||||||
|
fmt.Printf("// The high two bits of the value record whether the JIS code comes from the\n")
|
||||||
|
fmt.Printf("// JIS0208 table (high bits == 1) or the JIS0212 table (high bits == 2).\n")
|
||||||
|
fmt.Printf("// The low 14 bits are two 7-bit unsigned integers j1 and j2 that form the\n")
|
||||||
|
fmt.Printf("// JIS code (94*j1 + j2) within that table.\n")
|
||||||
|
fmt.Printf("\n")
|
||||||
|
|
||||||
|
for i, v := range intervals {
|
||||||
|
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
|
||||||
|
fmt.Printf("var encode%d = [...]uint16{\n", i)
|
||||||
|
for j := v.low; j < v.high; j++ {
|
||||||
|
x := reverse[j]
|
||||||
|
if x.table == -1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("\t%d - %d: jis%s<<14 | 0x%02X<<7 | 0x%02X,\n",
|
||||||
|
j, v.low, tables[x.table].name, x.jisCode/94, x.jisCode%94)
|
||||||
|
}
|
||||||
|
fmt.Printf("}\n\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// interval is a half-open interval [low, high).
|
||||||
|
type interval struct {
|
||||||
|
low, high int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i interval) len() int { return i.high - i.low }
|
||||||
|
|
||||||
|
// byDecreasingLength sorts intervals by decreasing length.
|
||||||
|
type byDecreasingLength []interval
|
||||||
|
|
||||||
|
func (b byDecreasingLength) Len() int { return len(b) }
|
||||||
|
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
|
||||||
|
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
189
vendor/golang.org/x/text/encoding/japanese/shiftjis.go
generated
vendored
Normal file
189
vendor/golang.org/x/text/encoding/japanese/shiftjis.go
generated
vendored
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package japanese
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/internal"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ShiftJIS is the Shift JIS encoding, also known as Code Page 932 and
|
||||||
|
// Windows-31J.
|
||||||
|
var ShiftJIS encoding.Encoding = &shiftJIS
|
||||||
|
|
||||||
|
var shiftJIS = internal.Encoding{
|
||||||
|
&internal.SimpleEncoding{shiftJISDecoder{}, shiftJISEncoder{}},
|
||||||
|
"Shift JIS",
|
||||||
|
identifier.ShiftJIS,
|
||||||
|
}
|
||||||
|
|
||||||
|
type shiftJISDecoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
func (shiftJISDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
loop:
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
switch c0 := src[nSrc]; {
|
||||||
|
case c0 < utf8.RuneSelf:
|
||||||
|
r, size = rune(c0), 1
|
||||||
|
|
||||||
|
case 0xa1 <= c0 && c0 < 0xe0:
|
||||||
|
r, size = rune(c0)+(0xff61-0xa1), 1
|
||||||
|
|
||||||
|
case (0x81 <= c0 && c0 < 0xa0) || (0xe0 <= c0 && c0 < 0xfd):
|
||||||
|
if c0 <= 0x9f {
|
||||||
|
c0 -= 0x70
|
||||||
|
} else {
|
||||||
|
c0 -= 0xb0
|
||||||
|
}
|
||||||
|
c0 = 2*c0 - 0x21
|
||||||
|
|
||||||
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
c1 := src[nSrc+1]
|
||||||
|
switch {
|
||||||
|
case c1 < 0x40:
|
||||||
|
r, size = '\ufffd', 1 // c1 is ASCII so output on next round
|
||||||
|
goto write
|
||||||
|
case c1 < 0x7f:
|
||||||
|
c0--
|
||||||
|
c1 -= 0x40
|
||||||
|
case c1 == 0x7f:
|
||||||
|
r, size = '\ufffd', 1 // c1 is ASCII so output on next round
|
||||||
|
goto write
|
||||||
|
case c1 < 0x9f:
|
||||||
|
c0--
|
||||||
|
c1 -= 0x41
|
||||||
|
case c1 < 0xfd:
|
||||||
|
c1 -= 0x9f
|
||||||
|
default:
|
||||||
|
r, size = '\ufffd', 2
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
r, size = '\ufffd', 2
|
||||||
|
if i := int(c0)*94 + int(c1); i < len(jis0208Decode) {
|
||||||
|
r = rune(jis0208Decode[i])
|
||||||
|
if r == 0 {
|
||||||
|
r = '\ufffd'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case c0 == 0x80:
|
||||||
|
r, size = 0x80, 1
|
||||||
|
|
||||||
|
default:
|
||||||
|
r, size = '\ufffd', 1
|
||||||
|
}
|
||||||
|
write:
|
||||||
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type shiftJISEncoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
func (shiftJISEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
loop:
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
r = rune(src[nSrc])
|
||||||
|
|
||||||
|
// Decode a 1-byte rune.
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Decode a multi-byte rune.
|
||||||
|
r, size = utf8.DecodeRune(src[nSrc:])
|
||||||
|
if size == 1 {
|
||||||
|
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||||
|
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||||
|
// full character yet.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// func init checks that the switch covers all tables.
|
||||||
|
switch {
|
||||||
|
case encode0Low <= r && r < encode0High:
|
||||||
|
if r = rune(encode0[r-encode0Low]); r>>tableShift == jis0208 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode1Low <= r && r < encode1High:
|
||||||
|
if r = rune(encode1[r-encode1Low]); r>>tableShift == jis0208 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode2Low <= r && r < encode2High:
|
||||||
|
if r = rune(encode2[r-encode2Low]); r>>tableShift == jis0208 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode3Low <= r && r < encode3High:
|
||||||
|
if r = rune(encode3[r-encode3Low]); r>>tableShift == jis0208 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode4Low <= r && r < encode4High:
|
||||||
|
if r = rune(encode4[r-encode4Low]); r>>tableShift == jis0208 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode5Low <= r && r < encode5High:
|
||||||
|
if 0xff61 <= r && r < 0xffa0 {
|
||||||
|
r -= 0xff61 - 0xa1
|
||||||
|
goto write1
|
||||||
|
}
|
||||||
|
if r = rune(encode5[r-encode5Low]); r>>tableShift == jis0208 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = internal.ErrASCIIReplacement
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
write1:
|
||||||
|
if nDst >= len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst] = uint8(r)
|
||||||
|
nDst++
|
||||||
|
continue
|
||||||
|
|
||||||
|
write2:
|
||||||
|
j1 := uint8(r>>codeShift) & codeMask
|
||||||
|
j2 := uint8(r) & codeMask
|
||||||
|
if nDst+2 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
if j1 <= 61 {
|
||||||
|
dst[nDst+0] = 129 + j1/2
|
||||||
|
} else {
|
||||||
|
dst[nDst+0] = 193 + j1/2
|
||||||
|
}
|
||||||
|
if j1&1 == 0 {
|
||||||
|
dst[nDst+1] = j2 + j2/63 + 64
|
||||||
|
} else {
|
||||||
|
dst[nDst+1] = j2 + 159
|
||||||
|
}
|
||||||
|
nDst += 2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
26971
vendor/golang.org/x/text/encoding/japanese/tables.go
generated
vendored
Normal file
26971
vendor/golang.org/x/text/encoding/japanese/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
177
vendor/golang.org/x/text/encoding/korean/euckr.go
generated
vendored
Normal file
177
vendor/golang.org/x/text/encoding/korean/euckr.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package korean
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/internal"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// All is a list of all defined encodings in this package.
|
||||||
|
var All = []encoding.Encoding{EUCKR}
|
||||||
|
|
||||||
|
// EUCKR is the EUC-KR encoding, also known as Code Page 949.
|
||||||
|
var EUCKR encoding.Encoding = &eucKR
|
||||||
|
|
||||||
|
var eucKR = internal.Encoding{
|
||||||
|
&internal.SimpleEncoding{eucKRDecoder{}, eucKREncoder{}},
|
||||||
|
"EUC-KR",
|
||||||
|
identifier.EUCKR,
|
||||||
|
}
|
||||||
|
|
||||||
|
type eucKRDecoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
func (eucKRDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
loop:
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
switch c0 := src[nSrc]; {
|
||||||
|
case c0 < utf8.RuneSelf:
|
||||||
|
r, size = rune(c0), 1
|
||||||
|
|
||||||
|
case 0x81 <= c0 && c0 < 0xff:
|
||||||
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c1 := src[nSrc+1]
|
||||||
|
size = 2
|
||||||
|
if c0 < 0xc7 {
|
||||||
|
r = 178 * rune(c0-0x81)
|
||||||
|
switch {
|
||||||
|
case 0x41 <= c1 && c1 < 0x5b:
|
||||||
|
r += rune(c1) - (0x41 - 0*26)
|
||||||
|
case 0x61 <= c1 && c1 < 0x7b:
|
||||||
|
r += rune(c1) - (0x61 - 1*26)
|
||||||
|
case 0x81 <= c1 && c1 < 0xff:
|
||||||
|
r += rune(c1) - (0x81 - 2*26)
|
||||||
|
default:
|
||||||
|
goto decError
|
||||||
|
}
|
||||||
|
} else if 0xa1 <= c1 && c1 < 0xff {
|
||||||
|
r = 178*(0xc7-0x81) + rune(c0-0xc7)*94 + rune(c1-0xa1)
|
||||||
|
} else {
|
||||||
|
goto decError
|
||||||
|
}
|
||||||
|
if int(r) < len(decode) {
|
||||||
|
r = rune(decode[r])
|
||||||
|
if r != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
decError:
|
||||||
|
r = utf8.RuneError
|
||||||
|
if c1 < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type eucKREncoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
func (eucKREncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
r = rune(src[nSrc])
|
||||||
|
|
||||||
|
// Decode a 1-byte rune.
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
|
||||||
|
if nDst >= len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst] = uint8(r)
|
||||||
|
nDst++
|
||||||
|
continue
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Decode a multi-byte rune.
|
||||||
|
r, size = utf8.DecodeRune(src[nSrc:])
|
||||||
|
if size == 1 {
|
||||||
|
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||||
|
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||||
|
// full character yet.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// func init checks that the switch covers all tables.
|
||||||
|
switch {
|
||||||
|
case encode0Low <= r && r < encode0High:
|
||||||
|
if r = rune(encode0[r-encode0Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode1Low <= r && r < encode1High:
|
||||||
|
if r = rune(encode1[r-encode1Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode2Low <= r && r < encode2High:
|
||||||
|
if r = rune(encode2[r-encode2Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode3Low <= r && r < encode3High:
|
||||||
|
if r = rune(encode3[r-encode3Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode4Low <= r && r < encode4High:
|
||||||
|
if r = rune(encode4[r-encode4Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode5Low <= r && r < encode5High:
|
||||||
|
if r = rune(encode5[r-encode5Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode6Low <= r && r < encode6High:
|
||||||
|
if r = rune(encode6[r-encode6Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = internal.ErrASCIIReplacement
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
write2:
|
||||||
|
if nDst+2 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+0] = uint8(r >> 8)
|
||||||
|
dst[nDst+1] = uint8(r)
|
||||||
|
nDst += 2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Check that the hard-coded encode switch covers all tables.
|
||||||
|
if numEncodeTables != 7 {
|
||||||
|
panic("bad numEncodeTables")
|
||||||
|
}
|
||||||
|
}
|
143
vendor/golang.org/x/text/encoding/korean/maketables.go
generated
vendored
Normal file
143
vendor/golang.org/x/text/encoding/korean/maketables.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
// This program generates tables.go:
|
||||||
|
// go run maketables.go | gofmt > tables.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
|
||||||
|
fmt.Printf("// Package korean provides Korean encodings such as EUC-KR.\n")
|
||||||
|
fmt.Printf(`package korean // import "golang.org/x/text/encoding/korean"` + "\n\n")
|
||||||
|
|
||||||
|
res, err := http.Get("http://encoding.spec.whatwg.org/index-euc-kr.txt")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Get: %v", err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
mapping := [65536]uint16{}
|
||||||
|
reverse := [65536]uint16{}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(res.Body)
|
||||||
|
for scanner.Scan() {
|
||||||
|
s := strings.TrimSpace(scanner.Text())
|
||||||
|
if s == "" || s[0] == '#' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
x, y := uint16(0), uint16(0)
|
||||||
|
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
|
||||||
|
log.Fatalf("could not parse %q", s)
|
||||||
|
}
|
||||||
|
if x < 0 || 178*(0xc7-0x81)+(0xfe-0xc7)*94+(0xff-0xa1) <= x {
|
||||||
|
log.Fatalf("EUC-KR code %d is out of range", x)
|
||||||
|
}
|
||||||
|
mapping[x] = y
|
||||||
|
if reverse[y] == 0 {
|
||||||
|
c0, c1 := uint16(0), uint16(0)
|
||||||
|
if x < 178*(0xc7-0x81) {
|
||||||
|
c0 = uint16(x/178) + 0x81
|
||||||
|
c1 = uint16(x % 178)
|
||||||
|
switch {
|
||||||
|
case c1 < 1*26:
|
||||||
|
c1 += 0x41
|
||||||
|
case c1 < 2*26:
|
||||||
|
c1 += 0x47
|
||||||
|
default:
|
||||||
|
c1 += 0x4d
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
x -= 178 * (0xc7 - 0x81)
|
||||||
|
c0 = uint16(x/94) + 0xc7
|
||||||
|
c1 = uint16(x%94) + 0xa1
|
||||||
|
}
|
||||||
|
reverse[y] = c0<<8 | c1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
log.Fatalf("scanner error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("// decode is the decoding table from EUC-KR code to Unicode.\n")
|
||||||
|
fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt\n")
|
||||||
|
fmt.Printf("var decode = [...]uint16{\n")
|
||||||
|
for i, v := range mapping {
|
||||||
|
if v != 0 {
|
||||||
|
fmt.Printf("\t%d: 0x%04X,\n", i, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Printf("}\n\n")
|
||||||
|
|
||||||
|
// Any run of at least separation continuous zero entries in the reverse map will
|
||||||
|
// be a separate encode table.
|
||||||
|
const separation = 1024
|
||||||
|
|
||||||
|
intervals := []interval(nil)
|
||||||
|
low, high := -1, -1
|
||||||
|
for i, v := range reverse {
|
||||||
|
if v == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if low < 0 {
|
||||||
|
low = i
|
||||||
|
} else if i-high >= separation {
|
||||||
|
if high >= 0 {
|
||||||
|
intervals = append(intervals, interval{low, high})
|
||||||
|
}
|
||||||
|
low = i
|
||||||
|
}
|
||||||
|
high = i + 1
|
||||||
|
}
|
||||||
|
if high >= 0 {
|
||||||
|
intervals = append(intervals, interval{low, high})
|
||||||
|
}
|
||||||
|
sort.Sort(byDecreasingLength(intervals))
|
||||||
|
|
||||||
|
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
|
||||||
|
fmt.Printf("// encodeX are the encoding tables from Unicode to EUC-KR code,\n")
|
||||||
|
fmt.Printf("// sorted by decreasing length.\n")
|
||||||
|
for i, v := range intervals {
|
||||||
|
fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
|
||||||
|
}
|
||||||
|
fmt.Printf("\n")
|
||||||
|
|
||||||
|
for i, v := range intervals {
|
||||||
|
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
|
||||||
|
fmt.Printf("var encode%d = [...]uint16{\n", i)
|
||||||
|
for j := v.low; j < v.high; j++ {
|
||||||
|
x := reverse[j]
|
||||||
|
if x == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
|
||||||
|
}
|
||||||
|
fmt.Printf("}\n\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// interval is a half-open interval [low, high).
|
||||||
|
type interval struct {
|
||||||
|
low, high int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i interval) len() int { return i.high - i.low }
|
||||||
|
|
||||||
|
// byDecreasingLength sorts intervals by decreasing length.
|
||||||
|
type byDecreasingLength []interval
|
||||||
|
|
||||||
|
func (b byDecreasingLength) Len() int { return len(b) }
|
||||||
|
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
|
||||||
|
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
34152
vendor/golang.org/x/text/encoding/korean/tables.go
generated
vendored
Normal file
34152
vendor/golang.org/x/text/encoding/korean/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
12
vendor/golang.org/x/text/encoding/simplifiedchinese/all.go
generated
vendored
Normal file
12
vendor/golang.org/x/text/encoding/simplifiedchinese/all.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package simplifiedchinese
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
)
|
||||||
|
|
||||||
|
// All is a list of all defined encodings in this package.
|
||||||
|
var All = []encoding.Encoding{GB18030, GBK, HZGB2312}
|
269
vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go
generated
vendored
Normal file
269
vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package simplifiedchinese
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/internal"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// GB18030 is the GB18030 encoding.
|
||||||
|
GB18030 encoding.Encoding = &gbk18030
|
||||||
|
// GBK is the GBK encoding. It encodes an extension of the GB2312 character set
|
||||||
|
// and is also known as Code Page 936.
|
||||||
|
GBK encoding.Encoding = &gbk
|
||||||
|
)
|
||||||
|
|
||||||
|
var gbk = internal.Encoding{
|
||||||
|
&internal.SimpleEncoding{
|
||||||
|
gbkDecoder{gb18030: false},
|
||||||
|
gbkEncoder{gb18030: false},
|
||||||
|
},
|
||||||
|
"GBK",
|
||||||
|
identifier.GBK,
|
||||||
|
}
|
||||||
|
|
||||||
|
var gbk18030 = internal.Encoding{
|
||||||
|
&internal.SimpleEncoding{
|
||||||
|
gbkDecoder{gb18030: true},
|
||||||
|
gbkEncoder{gb18030: true},
|
||||||
|
},
|
||||||
|
"GB18030",
|
||||||
|
identifier.GB18030,
|
||||||
|
}
|
||||||
|
|
||||||
|
type gbkDecoder struct {
|
||||||
|
transform.NopResetter
|
||||||
|
gb18030 bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d gbkDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
loop:
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
switch c0 := src[nSrc]; {
|
||||||
|
case c0 < utf8.RuneSelf:
|
||||||
|
r, size = rune(c0), 1
|
||||||
|
|
||||||
|
// Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC
|
||||||
|
// as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk
|
||||||
|
// says to treat "gbk" as Code Page 936.
|
||||||
|
case c0 == 0x80:
|
||||||
|
r, size = '€', 1
|
||||||
|
|
||||||
|
case c0 < 0xff:
|
||||||
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
c1 := src[nSrc+1]
|
||||||
|
switch {
|
||||||
|
case 0x40 <= c1 && c1 < 0x7f:
|
||||||
|
c1 -= 0x40
|
||||||
|
case 0x80 <= c1 && c1 < 0xff:
|
||||||
|
c1 -= 0x41
|
||||||
|
case d.gb18030 && 0x30 <= c1 && c1 < 0x40:
|
||||||
|
if nSrc+3 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
// The second byte here is always ASCII, so we can set size
|
||||||
|
// to 1 in all cases.
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
c2 := src[nSrc+2]
|
||||||
|
if c2 < 0x81 || 0xff <= c2 {
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
c3 := src[nSrc+3]
|
||||||
|
if c3 < 0x30 || 0x3a <= c3 {
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
size = 4
|
||||||
|
r = ((rune(c0-0x81)*10+rune(c1-0x30))*126+rune(c2-0x81))*10 + rune(c3-0x30)
|
||||||
|
if r < 39420 {
|
||||||
|
i, j := 0, len(gb18030)
|
||||||
|
for i < j {
|
||||||
|
h := i + (j-i)/2
|
||||||
|
if r >= rune(gb18030[h][0]) {
|
||||||
|
i = h + 1
|
||||||
|
} else {
|
||||||
|
j = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dec := &gb18030[i-1]
|
||||||
|
r += rune(dec[1]) - rune(dec[0])
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
r -= 189000
|
||||||
|
if 0 <= r && r < 0x100000 {
|
||||||
|
r += 0x10000
|
||||||
|
} else {
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
}
|
||||||
|
goto write
|
||||||
|
default:
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
r, size = '\ufffd', 2
|
||||||
|
if i := int(c0-0x81)*190 + int(c1); i < len(decode) {
|
||||||
|
r = rune(decode[i])
|
||||||
|
if r == 0 {
|
||||||
|
r = '\ufffd'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
}
|
||||||
|
|
||||||
|
write:
|
||||||
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type gbkEncoder struct {
|
||||||
|
transform.NopResetter
|
||||||
|
gb18030 bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e gbkEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, r2, size := rune(0), rune(0), 0
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
r = rune(src[nSrc])
|
||||||
|
|
||||||
|
// Decode a 1-byte rune.
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Decode a multi-byte rune.
|
||||||
|
r, size = utf8.DecodeRune(src[nSrc:])
|
||||||
|
if size == 1 {
|
||||||
|
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||||
|
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||||
|
// full character yet.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// func init checks that the switch covers all tables.
|
||||||
|
switch {
|
||||||
|
case encode0Low <= r && r < encode0High:
|
||||||
|
if r2 = rune(encode0[r-encode0Low]); r2 != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode1Low <= r && r < encode1High:
|
||||||
|
// Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC
|
||||||
|
// as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk
|
||||||
|
// says to treat "gbk" as Code Page 936.
|
||||||
|
if r == '€' {
|
||||||
|
r = 0x80
|
||||||
|
goto write1
|
||||||
|
}
|
||||||
|
if r2 = rune(encode1[r-encode1Low]); r2 != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode2Low <= r && r < encode2High:
|
||||||
|
if r2 = rune(encode2[r-encode2Low]); r2 != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode3Low <= r && r < encode3High:
|
||||||
|
if r2 = rune(encode3[r-encode3Low]); r2 != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode4Low <= r && r < encode4High:
|
||||||
|
if r2 = rune(encode4[r-encode4Low]); r2 != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.gb18030 {
|
||||||
|
if r < 0x10000 {
|
||||||
|
i, j := 0, len(gb18030)
|
||||||
|
for i < j {
|
||||||
|
h := i + (j-i)/2
|
||||||
|
if r >= rune(gb18030[h][1]) {
|
||||||
|
i = h + 1
|
||||||
|
} else {
|
||||||
|
j = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dec := &gb18030[i-1]
|
||||||
|
r += rune(dec[0]) - rune(dec[1])
|
||||||
|
goto write4
|
||||||
|
} else if r < 0x110000 {
|
||||||
|
r += 189000 - 0x10000
|
||||||
|
goto write4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = internal.ErrASCIIReplacement
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
write1:
|
||||||
|
if nDst >= len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst] = uint8(r)
|
||||||
|
nDst++
|
||||||
|
continue
|
||||||
|
|
||||||
|
write2:
|
||||||
|
if nDst+2 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+0] = uint8(r2 >> 8)
|
||||||
|
dst[nDst+1] = uint8(r2)
|
||||||
|
nDst += 2
|
||||||
|
continue
|
||||||
|
|
||||||
|
write4:
|
||||||
|
if nDst+4 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+3] = uint8(r%10 + 0x30)
|
||||||
|
r /= 10
|
||||||
|
dst[nDst+2] = uint8(r%126 + 0x81)
|
||||||
|
r /= 126
|
||||||
|
dst[nDst+1] = uint8(r%10 + 0x30)
|
||||||
|
r /= 10
|
||||||
|
dst[nDst+0] = uint8(r + 0x81)
|
||||||
|
nDst += 4
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Check that the hard-coded encode switch covers all tables.
|
||||||
|
if numEncodeTables != 5 {
|
||||||
|
panic("bad numEncodeTables")
|
||||||
|
}
|
||||||
|
}
|
245
vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go
generated
vendored
Normal file
245
vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go
generated
vendored
Normal file
@ -0,0 +1,245 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package simplifiedchinese
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/internal"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HZGB2312 is the HZ-GB2312 encoding.
|
||||||
|
var HZGB2312 encoding.Encoding = &hzGB2312
|
||||||
|
|
||||||
|
var hzGB2312 = internal.Encoding{
|
||||||
|
internal.FuncEncoding{hzGB2312NewDecoder, hzGB2312NewEncoder},
|
||||||
|
"HZ-GB2312",
|
||||||
|
identifier.HZGB2312,
|
||||||
|
}
|
||||||
|
|
||||||
|
func hzGB2312NewDecoder() transform.Transformer {
|
||||||
|
return new(hzGB2312Decoder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hzGB2312NewEncoder() transform.Transformer {
|
||||||
|
return new(hzGB2312Encoder)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
asciiState = iota
|
||||||
|
gbState
|
||||||
|
)
|
||||||
|
|
||||||
|
type hzGB2312Decoder int
|
||||||
|
|
||||||
|
func (d *hzGB2312Decoder) Reset() {
|
||||||
|
*d = asciiState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *hzGB2312Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
loop:
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
c0 := src[nSrc]
|
||||||
|
if c0 >= utf8.RuneSelf {
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
|
||||||
|
if c0 == '~' {
|
||||||
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
r = utf8.RuneError
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
size = 2
|
||||||
|
switch src[nSrc+1] {
|
||||||
|
case '{':
|
||||||
|
*d = gbState
|
||||||
|
continue
|
||||||
|
case '}':
|
||||||
|
*d = asciiState
|
||||||
|
continue
|
||||||
|
case '~':
|
||||||
|
if nDst >= len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
dst[nDst] = '~'
|
||||||
|
nDst++
|
||||||
|
continue
|
||||||
|
case '\n':
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
r = utf8.RuneError
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if *d == asciiState {
|
||||||
|
r, size = rune(c0), 1
|
||||||
|
} else {
|
||||||
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
size = 2
|
||||||
|
c1 := src[nSrc+1]
|
||||||
|
if c0 < 0x21 || 0x7e <= c0 || c1 < 0x21 || 0x7f <= c1 {
|
||||||
|
// error
|
||||||
|
} else if i := int(c0-0x01)*190 + int(c1+0x3f); i < len(decode) {
|
||||||
|
r = rune(decode[i])
|
||||||
|
if r != 0 {
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c1 > utf8.RuneSelf {
|
||||||
|
// Be consistent and always treat non-ASCII as a single error.
|
||||||
|
size = 1
|
||||||
|
}
|
||||||
|
r = utf8.RuneError
|
||||||
|
}
|
||||||
|
|
||||||
|
write:
|
||||||
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type hzGB2312Encoder int
|
||||||
|
|
||||||
|
func (d *hzGB2312Encoder) Reset() {
|
||||||
|
*d = asciiState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *hzGB2312Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
r = rune(src[nSrc])
|
||||||
|
|
||||||
|
// Decode a 1-byte rune.
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
if r == '~' {
|
||||||
|
if nDst+2 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+0] = '~'
|
||||||
|
dst[nDst+1] = '~'
|
||||||
|
nDst += 2
|
||||||
|
continue
|
||||||
|
} else if *e != asciiState {
|
||||||
|
if nDst+3 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
*e = asciiState
|
||||||
|
dst[nDst+0] = '~'
|
||||||
|
dst[nDst+1] = '}'
|
||||||
|
nDst += 2
|
||||||
|
} else if nDst >= len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst] = uint8(r)
|
||||||
|
nDst += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a multi-byte rune.
|
||||||
|
r, size = utf8.DecodeRune(src[nSrc:])
|
||||||
|
if size == 1 {
|
||||||
|
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||||
|
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||||
|
// full character yet.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// func init checks that the switch covers all tables.
|
||||||
|
switch {
|
||||||
|
case encode0Low <= r && r < encode0High:
|
||||||
|
if r = rune(encode0[r-encode0Low]); r != 0 {
|
||||||
|
goto writeGB
|
||||||
|
}
|
||||||
|
case encode1Low <= r && r < encode1High:
|
||||||
|
if r = rune(encode1[r-encode1Low]); r != 0 {
|
||||||
|
goto writeGB
|
||||||
|
}
|
||||||
|
case encode2Low <= r && r < encode2High:
|
||||||
|
if r = rune(encode2[r-encode2Low]); r != 0 {
|
||||||
|
goto writeGB
|
||||||
|
}
|
||||||
|
case encode3Low <= r && r < encode3High:
|
||||||
|
if r = rune(encode3[r-encode3Low]); r != 0 {
|
||||||
|
goto writeGB
|
||||||
|
}
|
||||||
|
case encode4Low <= r && r < encode4High:
|
||||||
|
if r = rune(encode4[r-encode4Low]); r != 0 {
|
||||||
|
goto writeGB
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
terminateInASCIIState:
|
||||||
|
// Switch back to ASCII state in case of error so that an ASCII
|
||||||
|
// replacement character can be written in the correct state.
|
||||||
|
if *e != asciiState {
|
||||||
|
if nDst+2 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+0] = '~'
|
||||||
|
dst[nDst+1] = '}'
|
||||||
|
nDst += 2
|
||||||
|
}
|
||||||
|
err = internal.ErrASCIIReplacement
|
||||||
|
break
|
||||||
|
|
||||||
|
writeGB:
|
||||||
|
c0 := uint8(r>>8) - 0x80
|
||||||
|
c1 := uint8(r) - 0x80
|
||||||
|
if c0 < 0x21 || 0x7e <= c0 || c1 < 0x21 || 0x7f <= c1 {
|
||||||
|
goto terminateInASCIIState
|
||||||
|
}
|
||||||
|
if *e == asciiState {
|
||||||
|
if nDst+4 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
*e = gbState
|
||||||
|
dst[nDst+0] = '~'
|
||||||
|
dst[nDst+1] = '{'
|
||||||
|
nDst += 2
|
||||||
|
} else if nDst+2 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+0] = c0
|
||||||
|
dst[nDst+1] = c1
|
||||||
|
nDst += 2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// TODO: should one always terminate in ASCII state to make it safe to
|
||||||
|
// concatenate two HZ-GB2312-encoded strings?
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
161
vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go
generated
vendored
Normal file
161
vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
// This program generates tables.go:
|
||||||
|
// go run maketables.go | gofmt > tables.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
|
||||||
|
fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n")
|
||||||
|
fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n")
|
||||||
|
|
||||||
|
printGB18030()
|
||||||
|
printGBK()
|
||||||
|
}
|
||||||
|
|
||||||
|
func printGB18030() {
|
||||||
|
res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Get: %v", err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n")
|
||||||
|
fmt.Printf("var gb18030 = [...][2]uint16{\n")
|
||||||
|
scanner := bufio.NewScanner(res.Body)
|
||||||
|
for scanner.Scan() {
|
||||||
|
s := strings.TrimSpace(scanner.Text())
|
||||||
|
if s == "" || s[0] == '#' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
x, y := uint32(0), uint32(0)
|
||||||
|
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
|
||||||
|
log.Fatalf("could not parse %q", s)
|
||||||
|
}
|
||||||
|
if x < 0x10000 && y < 0x10000 {
|
||||||
|
fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Printf("}\n\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func printGBK() {
|
||||||
|
res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Get: %v", err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
mapping := [65536]uint16{}
|
||||||
|
reverse := [65536]uint16{}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(res.Body)
|
||||||
|
for scanner.Scan() {
|
||||||
|
s := strings.TrimSpace(scanner.Text())
|
||||||
|
if s == "" || s[0] == '#' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
x, y := uint16(0), uint16(0)
|
||||||
|
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
|
||||||
|
log.Fatalf("could not parse %q", s)
|
||||||
|
}
|
||||||
|
if x < 0 || 126*190 <= x {
|
||||||
|
log.Fatalf("GBK code %d is out of range", x)
|
||||||
|
}
|
||||||
|
mapping[x] = y
|
||||||
|
if reverse[y] == 0 {
|
||||||
|
c0, c1 := x/190, x%190
|
||||||
|
if c1 >= 0x3f {
|
||||||
|
c1++
|
||||||
|
}
|
||||||
|
reverse[y] = (0x81+c0)<<8 | (0x40 + c1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
log.Fatalf("scanner error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n")
|
||||||
|
fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n")
|
||||||
|
fmt.Printf("var decode = [...]uint16{\n")
|
||||||
|
for i, v := range mapping {
|
||||||
|
if v != 0 {
|
||||||
|
fmt.Printf("\t%d: 0x%04X,\n", i, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Printf("}\n\n")
|
||||||
|
|
||||||
|
// Any run of at least separation continuous zero entries in the reverse map will
|
||||||
|
// be a separate encode table.
|
||||||
|
const separation = 1024
|
||||||
|
|
||||||
|
intervals := []interval(nil)
|
||||||
|
low, high := -1, -1
|
||||||
|
for i, v := range reverse {
|
||||||
|
if v == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if low < 0 {
|
||||||
|
low = i
|
||||||
|
} else if i-high >= separation {
|
||||||
|
if high >= 0 {
|
||||||
|
intervals = append(intervals, interval{low, high})
|
||||||
|
}
|
||||||
|
low = i
|
||||||
|
}
|
||||||
|
high = i + 1
|
||||||
|
}
|
||||||
|
if high >= 0 {
|
||||||
|
intervals = append(intervals, interval{low, high})
|
||||||
|
}
|
||||||
|
sort.Sort(byDecreasingLength(intervals))
|
||||||
|
|
||||||
|
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
|
||||||
|
fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n")
|
||||||
|
fmt.Printf("// sorted by decreasing length.\n")
|
||||||
|
for i, v := range intervals {
|
||||||
|
fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
|
||||||
|
}
|
||||||
|
fmt.Printf("\n")
|
||||||
|
|
||||||
|
for i, v := range intervals {
|
||||||
|
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
|
||||||
|
fmt.Printf("var encode%d = [...]uint16{\n", i)
|
||||||
|
for j := v.low; j < v.high; j++ {
|
||||||
|
x := reverse[j]
|
||||||
|
if x == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
|
||||||
|
}
|
||||||
|
fmt.Printf("}\n\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// interval is a half-open interval [low, high).
|
||||||
|
type interval struct {
|
||||||
|
low, high int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i interval) len() int { return i.high - i.low }
|
||||||
|
|
||||||
|
// byDecreasingLength sorts intervals by decreasing length.
|
||||||
|
type byDecreasingLength []interval
|
||||||
|
|
||||||
|
func (b byDecreasingLength) Len() int { return len(b) }
|
||||||
|
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
|
||||||
|
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
43999
vendor/golang.org/x/text/encoding/simplifiedchinese/tables.go
generated
vendored
Normal file
43999
vendor/golang.org/x/text/encoding/simplifiedchinese/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
199
vendor/golang.org/x/text/encoding/traditionalchinese/big5.go
generated
vendored
Normal file
199
vendor/golang.org/x/text/encoding/traditionalchinese/big5.go
generated
vendored
Normal file
@ -0,0 +1,199 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package traditionalchinese
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/internal"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// All is a list of all defined encodings in this package.
|
||||||
|
var All = []encoding.Encoding{Big5}
|
||||||
|
|
||||||
|
// Big5 is the Big5 encoding, also known as Code Page 950.
|
||||||
|
var Big5 encoding.Encoding = &big5
|
||||||
|
|
||||||
|
var big5 = internal.Encoding{
|
||||||
|
&internal.SimpleEncoding{big5Decoder{}, big5Encoder{}},
|
||||||
|
"Big5",
|
||||||
|
identifier.Big5,
|
||||||
|
}
|
||||||
|
|
||||||
|
type big5Decoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
func (big5Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size, s := rune(0), 0, ""
|
||||||
|
loop:
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
switch c0 := src[nSrc]; {
|
||||||
|
case c0 < utf8.RuneSelf:
|
||||||
|
r, size = rune(c0), 1
|
||||||
|
|
||||||
|
case 0x81 <= c0 && c0 < 0xff:
|
||||||
|
if nSrc+1 >= len(src) {
|
||||||
|
if !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
c1 := src[nSrc+1]
|
||||||
|
switch {
|
||||||
|
case 0x40 <= c1 && c1 < 0x7f:
|
||||||
|
c1 -= 0x40
|
||||||
|
case 0xa1 <= c1 && c1 < 0xff:
|
||||||
|
c1 -= 0x62
|
||||||
|
case c1 < 0x40:
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
goto write
|
||||||
|
default:
|
||||||
|
r, size = utf8.RuneError, 2
|
||||||
|
goto write
|
||||||
|
}
|
||||||
|
r, size = '\ufffd', 2
|
||||||
|
if i := int(c0-0x81)*157 + int(c1); i < len(decode) {
|
||||||
|
if 1133 <= i && i < 1167 {
|
||||||
|
// The two-rune special cases for LATIN CAPITAL / SMALL E WITH CIRCUMFLEX
|
||||||
|
// AND MACRON / CARON are from http://encoding.spec.whatwg.org/#big5
|
||||||
|
switch i {
|
||||||
|
case 1133:
|
||||||
|
s = "\u00CA\u0304"
|
||||||
|
goto writeStr
|
||||||
|
case 1135:
|
||||||
|
s = "\u00CA\u030C"
|
||||||
|
goto writeStr
|
||||||
|
case 1164:
|
||||||
|
s = "\u00EA\u0304"
|
||||||
|
goto writeStr
|
||||||
|
case 1166:
|
||||||
|
s = "\u00EA\u030C"
|
||||||
|
goto writeStr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r = rune(decode[i])
|
||||||
|
if r == 0 {
|
||||||
|
r = '\ufffd'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
r, size = utf8.RuneError, 1
|
||||||
|
}
|
||||||
|
|
||||||
|
write:
|
||||||
|
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
|
continue loop
|
||||||
|
|
||||||
|
writeStr:
|
||||||
|
if nDst+len(s) > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
nDst += copy(dst[nDst:], s)
|
||||||
|
continue loop
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type big5Encoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
func (big5Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
r, size := rune(0), 0
|
||||||
|
for ; nSrc < len(src); nSrc += size {
|
||||||
|
r = rune(src[nSrc])
|
||||||
|
|
||||||
|
// Decode a 1-byte rune.
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
if nDst >= len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst] = uint8(r)
|
||||||
|
nDst++
|
||||||
|
continue
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Decode a multi-byte rune.
|
||||||
|
r, size = utf8.DecodeRune(src[nSrc:])
|
||||||
|
if size == 1 {
|
||||||
|
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||||
|
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||||
|
// full character yet.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if r >= utf8.RuneSelf {
|
||||||
|
// func init checks that the switch covers all tables.
|
||||||
|
switch {
|
||||||
|
case encode0Low <= r && r < encode0High:
|
||||||
|
if r = rune(encode0[r-encode0Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode1Low <= r && r < encode1High:
|
||||||
|
if r = rune(encode1[r-encode1Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode2Low <= r && r < encode2High:
|
||||||
|
if r = rune(encode2[r-encode2Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode3Low <= r && r < encode3High:
|
||||||
|
if r = rune(encode3[r-encode3Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode4Low <= r && r < encode4High:
|
||||||
|
if r = rune(encode4[r-encode4Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode5Low <= r && r < encode5High:
|
||||||
|
if r = rune(encode5[r-encode5Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode6Low <= r && r < encode6High:
|
||||||
|
if r = rune(encode6[r-encode6Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
case encode7Low <= r && r < encode7High:
|
||||||
|
if r = rune(encode7[r-encode7Low]); r != 0 {
|
||||||
|
goto write2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = internal.ErrASCIIReplacement
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
write2:
|
||||||
|
if nDst+2 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+0] = uint8(r >> 8)
|
||||||
|
dst[nDst+1] = uint8(r)
|
||||||
|
nDst += 2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Check that the hard-coded encode switch covers all tables.
|
||||||
|
if numEncodeTables != 8 {
|
||||||
|
panic("bad numEncodeTables")
|
||||||
|
}
|
||||||
|
}
|
140
vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go
generated
vendored
Normal file
140
vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
// This program generates tables.go:
|
||||||
|
// go run maketables.go | gofmt > tables.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
|
||||||
|
fmt.Printf("// Package traditionalchinese provides Traditional Chinese encodings such as Big5.\n")
|
||||||
|
fmt.Printf(`package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese"` + "\n\n")
|
||||||
|
|
||||||
|
res, err := http.Get("http://encoding.spec.whatwg.org/index-big5.txt")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Get: %v", err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
mapping := [65536]uint32{}
|
||||||
|
reverse := [65536 * 4]uint16{}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(res.Body)
|
||||||
|
for scanner.Scan() {
|
||||||
|
s := strings.TrimSpace(scanner.Text())
|
||||||
|
if s == "" || s[0] == '#' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
x, y := uint16(0), uint32(0)
|
||||||
|
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
|
||||||
|
log.Fatalf("could not parse %q", s)
|
||||||
|
}
|
||||||
|
if x < 0 || 126*157 <= x {
|
||||||
|
log.Fatalf("Big5 code %d is out of range", x)
|
||||||
|
}
|
||||||
|
mapping[x] = y
|
||||||
|
|
||||||
|
// The WHATWG spec http://encoding.spec.whatwg.org/#indexes says that
|
||||||
|
// "The index pointer for code point in index is the first pointer
|
||||||
|
// corresponding to code point in index", which would normally mean
|
||||||
|
// that the code below should be guarded by "if reverse[y] == 0", but
|
||||||
|
// last instead of first seems to match the behavior of
|
||||||
|
// "iconv -f UTF-8 -t BIG5". For example, U+8005 者 occurs twice in
|
||||||
|
// http://encoding.spec.whatwg.org/index-big5.txt, as index 2148
|
||||||
|
// (encoded as "\x8e\xcd") and index 6543 (encoded as "\xaa\xcc")
|
||||||
|
// and "echo 者 | iconv -f UTF-8 -t BIG5 | xxd" gives "\xaa\xcc".
|
||||||
|
c0, c1 := x/157, x%157
|
||||||
|
if c1 < 0x3f {
|
||||||
|
c1 += 0x40
|
||||||
|
} else {
|
||||||
|
c1 += 0x62
|
||||||
|
}
|
||||||
|
reverse[y] = (0x81+c0)<<8 | c1
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
log.Fatalf("scanner error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("// decode is the decoding table from Big5 code to Unicode.\n")
|
||||||
|
fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-big5.txt\n")
|
||||||
|
fmt.Printf("var decode = [...]uint32{\n")
|
||||||
|
for i, v := range mapping {
|
||||||
|
if v != 0 {
|
||||||
|
fmt.Printf("\t%d: 0x%08X,\n", i, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Printf("}\n\n")
|
||||||
|
|
||||||
|
// Any run of at least separation continuous zero entries in the reverse map will
|
||||||
|
// be a separate encode table.
|
||||||
|
const separation = 1024
|
||||||
|
|
||||||
|
intervals := []interval(nil)
|
||||||
|
low, high := -1, -1
|
||||||
|
for i, v := range reverse {
|
||||||
|
if v == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if low < 0 {
|
||||||
|
low = i
|
||||||
|
} else if i-high >= separation {
|
||||||
|
if high >= 0 {
|
||||||
|
intervals = append(intervals, interval{low, high})
|
||||||
|
}
|
||||||
|
low = i
|
||||||
|
}
|
||||||
|
high = i + 1
|
||||||
|
}
|
||||||
|
if high >= 0 {
|
||||||
|
intervals = append(intervals, interval{low, high})
|
||||||
|
}
|
||||||
|
sort.Sort(byDecreasingLength(intervals))
|
||||||
|
|
||||||
|
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
|
||||||
|
fmt.Printf("// encodeX are the encoding tables from Unicode to Big5 code,\n")
|
||||||
|
fmt.Printf("// sorted by decreasing length.\n")
|
||||||
|
for i, v := range intervals {
|
||||||
|
fmt.Printf("// encode%d: %5d entries for runes in [%6d, %6d).\n", i, v.len(), v.low, v.high)
|
||||||
|
}
|
||||||
|
fmt.Printf("\n")
|
||||||
|
|
||||||
|
for i, v := range intervals {
|
||||||
|
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
|
||||||
|
fmt.Printf("var encode%d = [...]uint16{\n", i)
|
||||||
|
for j := v.low; j < v.high; j++ {
|
||||||
|
x := reverse[j]
|
||||||
|
if x == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
|
||||||
|
}
|
||||||
|
fmt.Printf("}\n\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// interval is a half-open interval [low, high).
|
||||||
|
type interval struct {
|
||||||
|
low, high int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i interval) len() int { return i.high - i.low }
|
||||||
|
|
||||||
|
// byDecreasingLength sorts intervals by decreasing length.
|
||||||
|
type byDecreasingLength []interval
|
||||||
|
|
||||||
|
func (b byDecreasingLength) Len() int { return len(b) }
|
||||||
|
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
|
||||||
|
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
37142
vendor/golang.org/x/text/encoding/traditionalchinese/tables.go
generated
vendored
Normal file
37142
vendor/golang.org/x/text/encoding/traditionalchinese/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
82
vendor/golang.org/x/text/encoding/unicode/override.go
generated
vendored
Normal file
82
vendor/golang.org/x/text/encoding/unicode/override.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package unicode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BOMOverride returns a new decoder transformer that is identical to fallback,
|
||||||
|
// except that the presence of a Byte Order Mark at the start of the input
|
||||||
|
// causes it to switch to the corresponding Unicode decoding. It will only
|
||||||
|
// consider BOMs for UTF-8, UTF-16BE, and UTF-16LE.
|
||||||
|
//
|
||||||
|
// This differs from using ExpectBOM by allowing a BOM to switch to UTF-8, not
|
||||||
|
// just UTF-16 variants, and allowing falling back to any encoding scheme.
|
||||||
|
//
|
||||||
|
// This technique is recommended by the W3C for use in HTML 5: "For
|
||||||
|
// compatibility with deployed content, the byte order mark (also known as BOM)
|
||||||
|
// is considered more authoritative than anything else."
|
||||||
|
// http://www.w3.org/TR/encoding/#specification-hooks
|
||||||
|
//
|
||||||
|
// Using BOMOverride is mostly intended for use cases where the first characters
|
||||||
|
// of a fallback encoding are known to not be a BOM, for example, for valid HTML
|
||||||
|
// and most encodings.
|
||||||
|
func BOMOverride(fallback transform.Transformer) transform.Transformer {
|
||||||
|
// TODO: possibly allow a variadic argument of unicode encodings to allow
|
||||||
|
// specifying details of which fallbacks are supported as well as
|
||||||
|
// specifying the details of the implementations. This would also allow for
|
||||||
|
// support for UTF-32, which should not be supported by default.
|
||||||
|
return &bomOverride{fallback: fallback}
|
||||||
|
}
|
||||||
|
|
||||||
|
type bomOverride struct {
|
||||||
|
fallback transform.Transformer
|
||||||
|
current transform.Transformer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bomOverride) Reset() {
|
||||||
|
d.current = nil
|
||||||
|
d.fallback.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// TODO: we could use decode functions here, instead of allocating a new
|
||||||
|
// decoder on every NewDecoder as IgnoreBOM decoders can be stateless.
|
||||||
|
utf16le = UTF16(LittleEndian, IgnoreBOM)
|
||||||
|
utf16be = UTF16(BigEndian, IgnoreBOM)
|
||||||
|
)
|
||||||
|
|
||||||
|
const utf8BOM = "\ufeff"
|
||||||
|
|
||||||
|
func (d *bomOverride) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
if d.current != nil {
|
||||||
|
return d.current.Transform(dst, src, atEOF)
|
||||||
|
}
|
||||||
|
if len(src) < 3 && !atEOF {
|
||||||
|
return 0, 0, transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
d.current = d.fallback
|
||||||
|
bomSize := 0
|
||||||
|
if len(src) >= 2 {
|
||||||
|
if src[0] == 0xFF && src[1] == 0xFE {
|
||||||
|
d.current = utf16le.NewDecoder()
|
||||||
|
bomSize = 2
|
||||||
|
} else if src[0] == 0xFE && src[1] == 0xFF {
|
||||||
|
d.current = utf16be.NewDecoder()
|
||||||
|
bomSize = 2
|
||||||
|
} else if len(src) >= 3 &&
|
||||||
|
src[0] == utf8BOM[0] &&
|
||||||
|
src[1] == utf8BOM[1] &&
|
||||||
|
src[2] == utf8BOM[2] {
|
||||||
|
d.current = transform.Nop
|
||||||
|
bomSize = 3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if bomSize < len(src) {
|
||||||
|
nDst, nSrc, err = d.current.Transform(dst, src[bomSize:], atEOF)
|
||||||
|
}
|
||||||
|
return nDst, nSrc + bomSize, err
|
||||||
|
}
|
434
vendor/golang.org/x/text/encoding/unicode/unicode.go
generated
vendored
Normal file
434
vendor/golang.org/x/text/encoding/unicode/unicode.go
generated
vendored
Normal file
@ -0,0 +1,434 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package unicode provides Unicode encodings such as UTF-16.
|
||||||
|
package unicode // import "golang.org/x/text/encoding/unicode"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"unicode/utf16"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/internal"
|
||||||
|
"golang.org/x/text/encoding/internal/identifier"
|
||||||
|
"golang.org/x/text/internal/utf8internal"
|
||||||
|
"golang.org/x/text/runes"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: I think the Transformers really should return errors on unmatched
|
||||||
|
// surrogate pairs and odd numbers of bytes. This is not required by RFC 2781,
|
||||||
|
// which leaves it open, but is suggested by WhatWG. It will allow for all error
|
||||||
|
// modes as defined by WhatWG: fatal, HTML and Replacement. This would require
|
||||||
|
// the introduction of some kind of error type for conveying the erroneous code
|
||||||
|
// point.
|
||||||
|
|
||||||
|
// UTF8 is the UTF-8 encoding.
|
||||||
|
var UTF8 encoding.Encoding = utf8enc
|
||||||
|
|
||||||
|
var utf8enc = &internal.Encoding{
|
||||||
|
&internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()},
|
||||||
|
"UTF-8",
|
||||||
|
identifier.UTF8,
|
||||||
|
}
|
||||||
|
|
||||||
|
type utf8Decoder struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
var pSrc int // point from which to start copy in src
|
||||||
|
var accept utf8internal.AcceptRange
|
||||||
|
|
||||||
|
// The decoder can only make the input larger, not smaller.
|
||||||
|
n := len(src)
|
||||||
|
if len(dst) < n {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
n = len(dst)
|
||||||
|
atEOF = false
|
||||||
|
}
|
||||||
|
for nSrc < n {
|
||||||
|
c := src[nSrc]
|
||||||
|
if c < utf8.RuneSelf {
|
||||||
|
nSrc++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
first := utf8internal.First[c]
|
||||||
|
size := int(first & utf8internal.SizeMask)
|
||||||
|
if first == utf8internal.FirstInvalid {
|
||||||
|
goto handleInvalid // invalid starter byte
|
||||||
|
}
|
||||||
|
accept = utf8internal.AcceptRanges[first>>utf8internal.AcceptShift]
|
||||||
|
if nSrc+size > n {
|
||||||
|
if !atEOF {
|
||||||
|
// We may stop earlier than necessary here if the short sequence
|
||||||
|
// has invalid bytes. Not checking for this simplifies the code
|
||||||
|
// and may avoid duplicate computations in certain conditions.
|
||||||
|
if err == nil {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Determine the maximal subpart of an ill-formed subsequence.
|
||||||
|
switch {
|
||||||
|
case nSrc+1 >= n || src[nSrc+1] < accept.Lo || accept.Hi < src[nSrc+1]:
|
||||||
|
size = 1
|
||||||
|
case nSrc+2 >= n || src[nSrc+2] < utf8internal.LoCB || utf8internal.HiCB < src[nSrc+2]:
|
||||||
|
size = 2
|
||||||
|
default:
|
||||||
|
size = 3 // As we are short, the maximum is 3.
|
||||||
|
}
|
||||||
|
goto handleInvalid
|
||||||
|
}
|
||||||
|
if c = src[nSrc+1]; c < accept.Lo || accept.Hi < c {
|
||||||
|
size = 1
|
||||||
|
goto handleInvalid // invalid continuation byte
|
||||||
|
} else if size == 2 {
|
||||||
|
} else if c = src[nSrc+2]; c < utf8internal.LoCB || utf8internal.HiCB < c {
|
||||||
|
size = 2
|
||||||
|
goto handleInvalid // invalid continuation byte
|
||||||
|
} else if size == 3 {
|
||||||
|
} else if c = src[nSrc+3]; c < utf8internal.LoCB || utf8internal.HiCB < c {
|
||||||
|
size = 3
|
||||||
|
goto handleInvalid // invalid continuation byte
|
||||||
|
}
|
||||||
|
nSrc += size
|
||||||
|
continue
|
||||||
|
|
||||||
|
handleInvalid:
|
||||||
|
// Copy the scanned input so far.
|
||||||
|
nDst += copy(dst[nDst:], src[pSrc:nSrc])
|
||||||
|
|
||||||
|
// Append RuneError to the destination.
|
||||||
|
const runeError = "\ufffd"
|
||||||
|
if nDst+len(runeError) > len(dst) {
|
||||||
|
return nDst, nSrc, transform.ErrShortDst
|
||||||
|
}
|
||||||
|
nDst += copy(dst[nDst:], runeError)
|
||||||
|
|
||||||
|
// Skip the maximal subpart of an ill-formed subsequence according to
|
||||||
|
// the W3C standard way instead of the Go way. This Transform is
|
||||||
|
// probably the only place in the text repo where it is warranted.
|
||||||
|
nSrc += size
|
||||||
|
pSrc = nSrc
|
||||||
|
|
||||||
|
// Recompute the maximum source length.
|
||||||
|
if sz := len(dst) - nDst; sz < len(src)-nSrc {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
n = nSrc + sz
|
||||||
|
atEOF = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nDst + copy(dst[nDst:], src[pSrc:nSrc]), nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UTF16 returns a UTF-16 Encoding for the given default endianness and byte
|
||||||
|
// order mark (BOM) policy.
|
||||||
|
//
|
||||||
|
// When decoding from UTF-16 to UTF-8, if the BOMPolicy is IgnoreBOM then
|
||||||
|
// neither BOMs U+FEFF nor noncharacters U+FFFE in the input stream will affect
|
||||||
|
// the endianness used for decoding, and will instead be output as their
|
||||||
|
// standard UTF-8 encodings: "\xef\xbb\xbf" and "\xef\xbf\xbe". If the BOMPolicy
|
||||||
|
// is UseBOM or ExpectBOM a staring BOM is not written to the UTF-8 output.
|
||||||
|
// Instead, it overrides the default endianness e for the remainder of the
|
||||||
|
// transformation. Any subsequent BOMs U+FEFF or noncharacters U+FFFE will not
|
||||||
|
// affect the endianness used, and will instead be output as their standard
|
||||||
|
// UTF-8 encodings. For UseBOM, if there is no starting BOM, it will proceed
|
||||||
|
// with the default Endianness. For ExpectBOM, in that case, the transformation
|
||||||
|
// will return early with an ErrMissingBOM error.
|
||||||
|
//
|
||||||
|
// When encoding from UTF-8 to UTF-16, a BOM will be inserted at the start of
|
||||||
|
// the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM will not
|
||||||
|
// be inserted. The UTF-8 input does not need to contain a BOM.
|
||||||
|
//
|
||||||
|
// There is no concept of a 'native' endianness. If the UTF-16 data is produced
|
||||||
|
// and consumed in a greater context that implies a certain endianness, use
|
||||||
|
// IgnoreBOM. Otherwise, use ExpectBOM and always produce and consume a BOM.
|
||||||
|
//
|
||||||
|
// In the language of http://www.unicode.org/faq/utf_bom.html#bom10, IgnoreBOM
|
||||||
|
// corresponds to "Where the precise type of the data stream is known... the
|
||||||
|
// BOM should not be used" and ExpectBOM corresponds to "A particular
|
||||||
|
// protocol... may require use of the BOM".
|
||||||
|
func UTF16(e Endianness, b BOMPolicy) encoding.Encoding {
|
||||||
|
return utf16Encoding{config{e, b}, mibValue[e][b&bomMask]}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mibValue maps Endianness and BOMPolicy settings to MIB constants. Note that
|
||||||
|
// some configurations map to the same MIB identifier. RFC 2781 has requirements
|
||||||
|
// and recommendations. Some of the "configurations" are merely recommendations,
|
||||||
|
// so multiple configurations could match.
|
||||||
|
var mibValue = map[Endianness][numBOMValues]identifier.MIB{
|
||||||
|
BigEndian: [numBOMValues]identifier.MIB{
|
||||||
|
IgnoreBOM: identifier.UTF16BE,
|
||||||
|
UseBOM: identifier.UTF16, // BigEnding default is preferred by RFC 2781.
|
||||||
|
// TODO: acceptBOM | strictBOM would map to UTF16BE as well.
|
||||||
|
},
|
||||||
|
LittleEndian: [numBOMValues]identifier.MIB{
|
||||||
|
IgnoreBOM: identifier.UTF16LE,
|
||||||
|
UseBOM: identifier.UTF16, // LittleEndian default is allowed and preferred on Windows.
|
||||||
|
// TODO: acceptBOM | strictBOM would map to UTF16LE as well.
|
||||||
|
},
|
||||||
|
// ExpectBOM is not widely used and has no valid MIB identifier.
|
||||||
|
}
|
||||||
|
|
||||||
|
// All lists a configuration for each IANA-defined UTF-16 variant.
|
||||||
|
var All = []encoding.Encoding{
|
||||||
|
UTF8,
|
||||||
|
UTF16(BigEndian, UseBOM),
|
||||||
|
UTF16(BigEndian, IgnoreBOM),
|
||||||
|
UTF16(LittleEndian, IgnoreBOM),
|
||||||
|
}
|
||||||
|
|
||||||
|
// BOMPolicy is a UTF-16 encoding's byte order mark policy.
|
||||||
|
type BOMPolicy uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
writeBOM BOMPolicy = 0x01
|
||||||
|
acceptBOM BOMPolicy = 0x02
|
||||||
|
requireBOM BOMPolicy = 0x04
|
||||||
|
bomMask BOMPolicy = 0x07
|
||||||
|
|
||||||
|
// HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a
|
||||||
|
// map of an array of length 8 of a type that is also used as a key or value
|
||||||
|
// in another map). See golang.org/issue/11354.
|
||||||
|
// TODO: consider changing this value back to 8 if the use of 1.4.* has
|
||||||
|
// been minimized.
|
||||||
|
numBOMValues = 8 + 1
|
||||||
|
|
||||||
|
// IgnoreBOM means to ignore any byte order marks.
|
||||||
|
IgnoreBOM BOMPolicy = 0
|
||||||
|
// Common and RFC 2781-compliant interpretation for UTF-16BE/LE.
|
||||||
|
|
||||||
|
// UseBOM means that the UTF-16 form may start with a byte order mark, which
|
||||||
|
// will be used to override the default encoding.
|
||||||
|
UseBOM BOMPolicy = writeBOM | acceptBOM
|
||||||
|
// Common and RFC 2781-compliant interpretation for UTF-16.
|
||||||
|
|
||||||
|
// ExpectBOM means that the UTF-16 form must start with a byte order mark,
|
||||||
|
// which will be used to override the default encoding.
|
||||||
|
ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM
|
||||||
|
// Used in Java as Unicode (not to be confused with Java's UTF-16) and
|
||||||
|
// ICU's UTF-16,version=1. Not compliant with RFC 2781.
|
||||||
|
|
||||||
|
// TODO (maybe): strictBOM: BOM must match Endianness. This would allow:
|
||||||
|
// - UTF-16(B|L)E,version=1: writeBOM | acceptBOM | requireBOM | strictBOM
|
||||||
|
// (UnicodeBig and UnicodeLittle in Java)
|
||||||
|
// - RFC 2781-compliant, but less common interpretation for UTF-16(B|L)E:
|
||||||
|
// acceptBOM | strictBOM (e.g. assigned to CheckBOM).
|
||||||
|
// This addition would be consistent with supporting ExpectBOM.
|
||||||
|
)
|
||||||
|
|
||||||
|
// Endianness is a UTF-16 encoding's default endianness.
|
||||||
|
type Endianness bool
|
||||||
|
|
||||||
|
const (
|
||||||
|
// BigEndian is UTF-16BE.
|
||||||
|
BigEndian Endianness = false
|
||||||
|
// LittleEndian is UTF-16LE.
|
||||||
|
LittleEndian Endianness = true
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrMissingBOM means that decoding UTF-16 input with ExpectBOM did not find a
|
||||||
|
// starting byte order mark.
|
||||||
|
var ErrMissingBOM = errors.New("encoding: missing byte order mark")
|
||||||
|
|
||||||
|
type utf16Encoding struct {
|
||||||
|
config
|
||||||
|
mib identifier.MIB
|
||||||
|
}
|
||||||
|
|
||||||
|
type config struct {
|
||||||
|
endianness Endianness
|
||||||
|
bomPolicy BOMPolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u utf16Encoding) NewDecoder() *encoding.Decoder {
|
||||||
|
return &encoding.Decoder{Transformer: &utf16Decoder{
|
||||||
|
initial: u.config,
|
||||||
|
current: u.config,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u utf16Encoding) NewEncoder() *encoding.Encoder {
|
||||||
|
return &encoding.Encoder{Transformer: &utf16Encoder{
|
||||||
|
endianness: u.endianness,
|
||||||
|
initialBOMPolicy: u.bomPolicy,
|
||||||
|
currentBOMPolicy: u.bomPolicy,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u utf16Encoding) ID() (mib identifier.MIB, other string) {
|
||||||
|
return u.mib, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u utf16Encoding) String() string {
|
||||||
|
e, b := "B", ""
|
||||||
|
if u.endianness == LittleEndian {
|
||||||
|
e = "L"
|
||||||
|
}
|
||||||
|
switch u.bomPolicy {
|
||||||
|
case ExpectBOM:
|
||||||
|
b = "Expect"
|
||||||
|
case UseBOM:
|
||||||
|
b = "Use"
|
||||||
|
case IgnoreBOM:
|
||||||
|
b = "Ignore"
|
||||||
|
}
|
||||||
|
return "UTF-16" + e + "E (" + b + " BOM)"
|
||||||
|
}
|
||||||
|
|
||||||
|
type utf16Decoder struct {
|
||||||
|
initial config
|
||||||
|
current config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *utf16Decoder) Reset() {
|
||||||
|
u.current = u.initial
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
if len(src) == 0 {
|
||||||
|
if atEOF && u.current.bomPolicy&requireBOM != 0 {
|
||||||
|
return 0, 0, ErrMissingBOM
|
||||||
|
}
|
||||||
|
return 0, 0, nil
|
||||||
|
}
|
||||||
|
if u.current.bomPolicy&acceptBOM != 0 {
|
||||||
|
if len(src) < 2 {
|
||||||
|
return 0, 0, transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case src[0] == 0xfe && src[1] == 0xff:
|
||||||
|
u.current.endianness = BigEndian
|
||||||
|
nSrc = 2
|
||||||
|
case src[0] == 0xff && src[1] == 0xfe:
|
||||||
|
u.current.endianness = LittleEndian
|
||||||
|
nSrc = 2
|
||||||
|
default:
|
||||||
|
if u.current.bomPolicy&requireBOM != 0 {
|
||||||
|
return 0, 0, ErrMissingBOM
|
||||||
|
}
|
||||||
|
}
|
||||||
|
u.current.bomPolicy = IgnoreBOM
|
||||||
|
}
|
||||||
|
|
||||||
|
var r rune
|
||||||
|
var dSize, sSize int
|
||||||
|
for nSrc < len(src) {
|
||||||
|
if nSrc+1 < len(src) {
|
||||||
|
x := uint16(src[nSrc+0])<<8 | uint16(src[nSrc+1])
|
||||||
|
if u.current.endianness == LittleEndian {
|
||||||
|
x = x>>8 | x<<8
|
||||||
|
}
|
||||||
|
r, sSize = rune(x), 2
|
||||||
|
if utf16.IsSurrogate(r) {
|
||||||
|
if nSrc+3 < len(src) {
|
||||||
|
x = uint16(src[nSrc+2])<<8 | uint16(src[nSrc+3])
|
||||||
|
if u.current.endianness == LittleEndian {
|
||||||
|
x = x>>8 | x<<8
|
||||||
|
}
|
||||||
|
// Save for next iteration if it is not a high surrogate.
|
||||||
|
if isHighSurrogate(rune(x)) {
|
||||||
|
r, sSize = utf16.DecodeRune(r, rune(x)), 4
|
||||||
|
}
|
||||||
|
} else if !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dSize = utf8.RuneLen(r); dSize < 0 {
|
||||||
|
r, dSize = utf8.RuneError, 3
|
||||||
|
}
|
||||||
|
} else if atEOF {
|
||||||
|
// Single trailing byte.
|
||||||
|
r, dSize, sSize = utf8.RuneError, 3, 1
|
||||||
|
} else {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if nDst+dSize > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||||
|
nSrc += sSize
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func isHighSurrogate(r rune) bool {
|
||||||
|
return 0xDC00 <= r && r <= 0xDFFF
|
||||||
|
}
|
||||||
|
|
||||||
|
type utf16Encoder struct {
|
||||||
|
endianness Endianness
|
||||||
|
initialBOMPolicy BOMPolicy
|
||||||
|
currentBOMPolicy BOMPolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *utf16Encoder) Reset() {
|
||||||
|
u.currentBOMPolicy = u.initialBOMPolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *utf16Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
if u.currentBOMPolicy&writeBOM != 0 {
|
||||||
|
if len(dst) < 2 {
|
||||||
|
return 0, 0, transform.ErrShortDst
|
||||||
|
}
|
||||||
|
dst[0], dst[1] = 0xfe, 0xff
|
||||||
|
u.currentBOMPolicy = IgnoreBOM
|
||||||
|
nDst = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
r, size := rune(0), 0
|
||||||
|
for nSrc < len(src) {
|
||||||
|
r = rune(src[nSrc])
|
||||||
|
|
||||||
|
// Decode a 1-byte rune.
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Decode a multi-byte rune.
|
||||||
|
r, size = utf8.DecodeRune(src[nSrc:])
|
||||||
|
if size == 1 {
|
||||||
|
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||||
|
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||||
|
// full character yet.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if r <= 0xffff {
|
||||||
|
if nDst+2 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+0] = uint8(r >> 8)
|
||||||
|
dst[nDst+1] = uint8(r)
|
||||||
|
nDst += 2
|
||||||
|
} else {
|
||||||
|
if nDst+4 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
r1, r2 := utf16.EncodeRune(r)
|
||||||
|
dst[nDst+0] = uint8(r1 >> 8)
|
||||||
|
dst[nDst+1] = uint8(r1)
|
||||||
|
dst[nDst+2] = uint8(r2 >> 8)
|
||||||
|
dst[nDst+3] = uint8(r2)
|
||||||
|
nDst += 4
|
||||||
|
}
|
||||||
|
nSrc += size
|
||||||
|
}
|
||||||
|
|
||||||
|
if u.endianness == LittleEndian {
|
||||||
|
for i := 0; i < nDst; i += 2 {
|
||||||
|
dst[i], dst[i+1] = dst[i+1], dst[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
100
vendor/golang.org/x/text/internal/tag/tag.go
generated
vendored
Normal file
100
vendor/golang.org/x/text/internal/tag/tag.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package tag contains functionality handling tags and related data.
|
||||||
|
package tag // import "golang.org/x/text/internal/tag"
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
|
// An Index converts tags to a compact numeric value.
|
||||||
|
//
|
||||||
|
// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can
|
||||||
|
// be used to store additional information about the tag.
|
||||||
|
type Index string
|
||||||
|
|
||||||
|
// Elem returns the element data at the given index.
|
||||||
|
func (s Index) Elem(x int) string {
|
||||||
|
return string(s[x*4 : x*4+4])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index reports the index of the given key or -1 if it could not be found.
|
||||||
|
// Only the first len(key) bytes from the start of the 4-byte entries will be
|
||||||
|
// considered for the search and the first match in Index will be returned.
|
||||||
|
func (s Index) Index(key []byte) int {
|
||||||
|
n := len(key)
|
||||||
|
// search the index of the first entry with an equal or higher value than
|
||||||
|
// key in s.
|
||||||
|
index := sort.Search(len(s)/4, func(i int) bool {
|
||||||
|
return cmp(s[i*4:i*4+n], key) != -1
|
||||||
|
})
|
||||||
|
i := index * 4
|
||||||
|
if cmp(s[i:i+len(key)], key) != 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return index
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next finds the next occurrence of key after index x, which must have been
|
||||||
|
// obtained from a call to Index using the same key. It returns x+1 or -1.
|
||||||
|
func (s Index) Next(key []byte, x int) int {
|
||||||
|
if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 {
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// cmp returns an integer comparing a and b lexicographically.
|
||||||
|
func cmp(a Index, b []byte) int {
|
||||||
|
n := len(a)
|
||||||
|
if len(b) < n {
|
||||||
|
n = len(b)
|
||||||
|
}
|
||||||
|
for i, c := range b[:n] {
|
||||||
|
switch {
|
||||||
|
case a[i] > c:
|
||||||
|
return 1
|
||||||
|
case a[i] < c:
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case len(a) < len(b):
|
||||||
|
return -1
|
||||||
|
case len(a) > len(b):
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare returns an integer comparing a and b lexicographically.
|
||||||
|
func Compare(a string, b []byte) int {
|
||||||
|
return cmp(Index(a), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FixCase reformats b to the same pattern of cases as form.
|
||||||
|
// If returns false if string b is malformed.
|
||||||
|
func FixCase(form string, b []byte) bool {
|
||||||
|
if len(form) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, c := range b {
|
||||||
|
if form[i] <= 'Z' {
|
||||||
|
if c >= 'a' {
|
||||||
|
c -= 'z' - 'Z'
|
||||||
|
}
|
||||||
|
if c < 'A' || 'Z' < c {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if c <= 'Z' {
|
||||||
|
c += 'z' - 'Z'
|
||||||
|
}
|
||||||
|
if c < 'a' || 'z' < c {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b[i] = c
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
87
vendor/golang.org/x/text/internal/utf8internal/utf8internal.go
generated
vendored
Normal file
87
vendor/golang.org/x/text/internal/utf8internal/utf8internal.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package utf8internal contains low-level utf8-related constants, tables, etc.
|
||||||
|
// that are used internally by the text package.
|
||||||
|
package utf8internal
|
||||||
|
|
||||||
|
// The default lowest and highest continuation byte.
|
||||||
|
const (
|
||||||
|
LoCB = 0x80 // 1000 0000
|
||||||
|
HiCB = 0xBF // 1011 1111
|
||||||
|
)
|
||||||
|
|
||||||
|
// Constants related to getting information of first bytes of UTF-8 sequences.
|
||||||
|
const (
|
||||||
|
// ASCII identifies a UTF-8 byte as ASCII.
|
||||||
|
ASCII = as
|
||||||
|
|
||||||
|
// FirstInvalid indicates a byte is invalid as a first byte of a UTF-8
|
||||||
|
// sequence.
|
||||||
|
FirstInvalid = xx
|
||||||
|
|
||||||
|
// SizeMask is a mask for the size bits. Use use x&SizeMask to get the size.
|
||||||
|
SizeMask = 7
|
||||||
|
|
||||||
|
// AcceptShift is the right-shift count for the first byte info byte to get
|
||||||
|
// the index into the AcceptRanges table. See AcceptRanges.
|
||||||
|
AcceptShift = 4
|
||||||
|
|
||||||
|
// The names of these constants are chosen to give nice alignment in the
|
||||||
|
// table below. The first nibble is an index into acceptRanges or F for
|
||||||
|
// special one-byte cases. The second nibble is the Rune length or the
|
||||||
|
// Status for the special one-byte case.
|
||||||
|
xx = 0xF1 // invalid: size 1
|
||||||
|
as = 0xF0 // ASCII: size 1
|
||||||
|
s1 = 0x02 // accept 0, size 2
|
||||||
|
s2 = 0x13 // accept 1, size 3
|
||||||
|
s3 = 0x03 // accept 0, size 3
|
||||||
|
s4 = 0x23 // accept 2, size 3
|
||||||
|
s5 = 0x34 // accept 3, size 4
|
||||||
|
s6 = 0x04 // accept 0, size 4
|
||||||
|
s7 = 0x44 // accept 4, size 4
|
||||||
|
)
|
||||||
|
|
||||||
|
// First is information about the first byte in a UTF-8 sequence.
|
||||||
|
var First = [256]uint8{
|
||||||
|
// 1 2 3 4 5 6 7 8 9 A B C D E F
|
||||||
|
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
|
||||||
|
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
|
||||||
|
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
|
||||||
|
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
|
||||||
|
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
|
||||||
|
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
|
||||||
|
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
|
||||||
|
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
|
||||||
|
// 1 2 3 4 5 6 7 8 9 A B C D E F
|
||||||
|
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
|
||||||
|
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
|
||||||
|
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
|
||||||
|
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
|
||||||
|
xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
|
||||||
|
s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
|
||||||
|
s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
|
||||||
|
s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
|
||||||
|
}
|
||||||
|
|
||||||
|
// AcceptRange gives the range of valid values for the second byte in a UTF-8
|
||||||
|
// sequence for any value for First that is not ASCII or FirstInvalid.
|
||||||
|
type AcceptRange struct {
|
||||||
|
Lo uint8 // lowest value for second byte.
|
||||||
|
Hi uint8 // highest value for second byte.
|
||||||
|
}
|
||||||
|
|
||||||
|
// AcceptRanges is a slice of AcceptRange values. For a given byte sequence b
|
||||||
|
//
|
||||||
|
// AcceptRanges[First[b[0]]>>AcceptShift]
|
||||||
|
//
|
||||||
|
// will give the value of AcceptRange for the multi-byte UTF-8 sequence starting
|
||||||
|
// at b[0].
|
||||||
|
var AcceptRanges = [...]AcceptRange{
|
||||||
|
0: {LoCB, HiCB},
|
||||||
|
1: {0xA0, HiCB},
|
||||||
|
2: {LoCB, 0x9F},
|
||||||
|
3: {0x90, HiCB},
|
||||||
|
4: {LoCB, 0x8F},
|
||||||
|
}
|
16
vendor/golang.org/x/text/language/Makefile
generated
vendored
Normal file
16
vendor/golang.org/x/text/language/Makefile
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style
|
||||||
|
# license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
CLEANFILES+=maketables
|
||||||
|
|
||||||
|
maketables: maketables.go
|
||||||
|
go build $^
|
||||||
|
|
||||||
|
tables: maketables
|
||||||
|
./maketables > tables.go
|
||||||
|
gofmt -w -s tables.go
|
||||||
|
|
||||||
|
# Build (but do not run) maketables during testing,
|
||||||
|
# just to make sure it still compiles.
|
||||||
|
testshort: maketables
|
16
vendor/golang.org/x/text/language/common.go
generated
vendored
Normal file
16
vendor/golang.org/x/text/language/common.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
// This file contains code common to the maketables.go and the package code.
|
||||||
|
|
||||||
|
// langAliasType is the type of an alias in langAliasMap.
|
||||||
|
type langAliasType int8
|
||||||
|
|
||||||
|
const (
|
||||||
|
langDeprecated langAliasType = iota
|
||||||
|
langMacro
|
||||||
|
langLegacy
|
||||||
|
|
||||||
|
langAliasTypeUnknown langAliasType = -1
|
||||||
|
)
|
197
vendor/golang.org/x/text/language/coverage.go
generated
vendored
Normal file
197
vendor/golang.org/x/text/language/coverage.go
generated
vendored
Normal file
@ -0,0 +1,197 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The Coverage interface is used to define the level of coverage of an
|
||||||
|
// internationalization service. Note that not all types are supported by all
|
||||||
|
// services. As lists may be generated on the fly, it is recommended that users
|
||||||
|
// of a Coverage cache the results.
|
||||||
|
type Coverage interface {
|
||||||
|
// Tags returns the list of supported tags.
|
||||||
|
Tags() []Tag
|
||||||
|
|
||||||
|
// BaseLanguages returns the list of supported base languages.
|
||||||
|
BaseLanguages() []Base
|
||||||
|
|
||||||
|
// Scripts returns the list of supported scripts.
|
||||||
|
Scripts() []Script
|
||||||
|
|
||||||
|
// Regions returns the list of supported regions.
|
||||||
|
Regions() []Region
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Supported defines a Coverage that lists all supported subtags. Tags
|
||||||
|
// always returns nil.
|
||||||
|
Supported Coverage = allSubtags{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO:
|
||||||
|
// - Support Variants, numbering systems.
|
||||||
|
// - CLDR coverage levels.
|
||||||
|
// - Set of common tags defined in this package.
|
||||||
|
|
||||||
|
type allSubtags struct{}
|
||||||
|
|
||||||
|
// Regions returns the list of supported regions. As all regions are in a
|
||||||
|
// consecutive range, it simply returns a slice of numbers in increasing order.
|
||||||
|
// The "undefined" region is not returned.
|
||||||
|
func (s allSubtags) Regions() []Region {
|
||||||
|
reg := make([]Region, numRegions)
|
||||||
|
for i := range reg {
|
||||||
|
reg[i] = Region{regionID(i + 1)}
|
||||||
|
}
|
||||||
|
return reg
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scripts returns the list of supported scripts. As all scripts are in a
|
||||||
|
// consecutive range, it simply returns a slice of numbers in increasing order.
|
||||||
|
// The "undefined" script is not returned.
|
||||||
|
func (s allSubtags) Scripts() []Script {
|
||||||
|
scr := make([]Script, numScripts)
|
||||||
|
for i := range scr {
|
||||||
|
scr[i] = Script{scriptID(i + 1)}
|
||||||
|
}
|
||||||
|
return scr
|
||||||
|
}
|
||||||
|
|
||||||
|
// BaseLanguages returns the list of all supported base languages. It generates
|
||||||
|
// the list by traversing the internal structures.
|
||||||
|
func (s allSubtags) BaseLanguages() []Base {
|
||||||
|
base := make([]Base, 0, numLanguages)
|
||||||
|
for i := 0; i < langNoIndexOffset; i++ {
|
||||||
|
// We included "und" already for the value 0.
|
||||||
|
if i != nonCanonicalUnd {
|
||||||
|
base = append(base, Base{langID(i)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i := langNoIndexOffset
|
||||||
|
for _, v := range langNoIndex {
|
||||||
|
for k := 0; k < 8; k++ {
|
||||||
|
if v&1 == 1 {
|
||||||
|
base = append(base, Base{langID(i)})
|
||||||
|
}
|
||||||
|
v >>= 1
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return base
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tags always returns nil.
|
||||||
|
func (s allSubtags) Tags() []Tag {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// coverage is used used by NewCoverage which is used as a convenient way for
|
||||||
|
// creating Coverage implementations for partially defined data. Very often a
|
||||||
|
// package will only need to define a subset of slices. coverage provides a
|
||||||
|
// convenient way to do this. Moreover, packages using NewCoverage, instead of
|
||||||
|
// their own implementation, will not break if later new slice types are added.
|
||||||
|
type coverage struct {
|
||||||
|
tags func() []Tag
|
||||||
|
bases func() []Base
|
||||||
|
scripts func() []Script
|
||||||
|
regions func() []Region
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *coverage) Tags() []Tag {
|
||||||
|
if s.tags == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.tags()
|
||||||
|
}
|
||||||
|
|
||||||
|
// bases implements sort.Interface and is used to sort base languages.
|
||||||
|
type bases []Base
|
||||||
|
|
||||||
|
func (b bases) Len() int {
|
||||||
|
return len(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b bases) Swap(i, j int) {
|
||||||
|
b[i], b[j] = b[j], b[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b bases) Less(i, j int) bool {
|
||||||
|
return b[i].langID < b[j].langID
|
||||||
|
}
|
||||||
|
|
||||||
|
// BaseLanguages returns the result from calling s.bases if it is specified or
|
||||||
|
// otherwise derives the set of supported base languages from tags.
|
||||||
|
func (s *coverage) BaseLanguages() []Base {
|
||||||
|
if s.bases == nil {
|
||||||
|
tags := s.Tags()
|
||||||
|
if len(tags) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
a := make([]Base, len(tags))
|
||||||
|
for i, t := range tags {
|
||||||
|
a[i] = Base{langID(t.lang)}
|
||||||
|
}
|
||||||
|
sort.Sort(bases(a))
|
||||||
|
k := 0
|
||||||
|
for i := 1; i < len(a); i++ {
|
||||||
|
if a[k] != a[i] {
|
||||||
|
k++
|
||||||
|
a[k] = a[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return a[:k+1]
|
||||||
|
}
|
||||||
|
return s.bases()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *coverage) Scripts() []Script {
|
||||||
|
if s.scripts == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.scripts()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *coverage) Regions() []Region {
|
||||||
|
if s.regions == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.regions()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCoverage returns a Coverage for the given lists. It is typically used by
|
||||||
|
// packages providing internationalization services to define their level of
|
||||||
|
// coverage. A list may be of type []T or func() []T, where T is either Tag,
|
||||||
|
// Base, Script or Region. The returned Coverage derives the value for Bases
|
||||||
|
// from Tags if no func or slice for []Base is specified. For other unspecified
|
||||||
|
// types the returned Coverage will return nil for the respective methods.
|
||||||
|
func NewCoverage(list ...interface{}) Coverage {
|
||||||
|
s := &coverage{}
|
||||||
|
for _, x := range list {
|
||||||
|
switch v := x.(type) {
|
||||||
|
case func() []Base:
|
||||||
|
s.bases = v
|
||||||
|
case func() []Script:
|
||||||
|
s.scripts = v
|
||||||
|
case func() []Region:
|
||||||
|
s.regions = v
|
||||||
|
case func() []Tag:
|
||||||
|
s.tags = v
|
||||||
|
case []Base:
|
||||||
|
s.bases = func() []Base { return v }
|
||||||
|
case []Script:
|
||||||
|
s.scripts = func() []Script { return v }
|
||||||
|
case []Region:
|
||||||
|
s.regions = func() []Region { return v }
|
||||||
|
case []Tag:
|
||||||
|
s.tags = func() []Tag { return v }
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("language: unsupported set type %T", v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
102
vendor/golang.org/x/text/language/doc.go
generated
vendored
Normal file
102
vendor/golang.org/x/text/language/doc.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package language implements BCP 47 language tags and related functionality.
|
||||||
|
//
|
||||||
|
// The most important function of package language is to match a list of
|
||||||
|
// user-preferred languages to a list of supported languages.
|
||||||
|
// It alleviates the developer of dealing with the complexity of this process
|
||||||
|
// and provides the user with the best experience
|
||||||
|
// (see https://blog.golang.org/matchlang).
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Matching preferred against supported languages
|
||||||
|
//
|
||||||
|
// A Matcher for an application that supports English, Australian English,
|
||||||
|
// Danish, and standard Mandarin can be created as follows:
|
||||||
|
//
|
||||||
|
// var matcher = language.NewMatcher([]language.Tag{
|
||||||
|
// language.English, // The first language is used as fallback.
|
||||||
|
// language.MustParse("en-AU"),
|
||||||
|
// language.Danish,
|
||||||
|
// language.Chinese,
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// This list of supported languages is typically implied by the languages for
|
||||||
|
// which there exists translations of the user interface.
|
||||||
|
//
|
||||||
|
// User-preferred languages usually come as a comma-separated list of BCP 47
|
||||||
|
// language tags.
|
||||||
|
// The MatchString finds best matches for such strings:
|
||||||
|
//
|
||||||
|
// handler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// lang, _ := r.Cookie("lang")
|
||||||
|
// accept := r.Header.Get("Accept-Language")
|
||||||
|
// tag, _ := language.MatchStrings(matcher, lang.String(), accept)
|
||||||
|
//
|
||||||
|
// // tag should now be used for the initialization of any
|
||||||
|
// // locale-specific service.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The Matcher's Match method can be used to match Tags directly.
|
||||||
|
//
|
||||||
|
// Matchers are aware of the intricacies of equivalence between languages, such
|
||||||
|
// as deprecated subtags, legacy tags, macro languages, mutual
|
||||||
|
// intelligibility between scripts and languages, and transparently passing
|
||||||
|
// BCP 47 user configuration.
|
||||||
|
// For instance, it will know that a reader of Bokmål Danish can read Norwegian
|
||||||
|
// and will know that Cantonese ("yue") is a good match for "zh-HK".
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Using match results
|
||||||
|
//
|
||||||
|
// To guarantee a consistent user experience to the user it is important to
|
||||||
|
// use the same language tag for the selection of any locale-specific services.
|
||||||
|
// For example, it is utterly confusing to substitute spelled-out numbers
|
||||||
|
// or dates in one language in text of another language.
|
||||||
|
// More subtly confusing is using the wrong sorting order or casing
|
||||||
|
// algorithm for a certain language.
|
||||||
|
//
|
||||||
|
// All the packages in x/text that provide locale-specific services
|
||||||
|
// (e.g. collate, cases) should be initialized with the tag that was
|
||||||
|
// obtained at the start of an interaction with the user.
|
||||||
|
//
|
||||||
|
// Note that Tag that is returned by Match and MatchString may differ from any
|
||||||
|
// of the supported languages, as it may contain carried over settings from
|
||||||
|
// the user tags.
|
||||||
|
// This may be inconvenient when your application has some additional
|
||||||
|
// locale-specific data for your supported languages.
|
||||||
|
// Match and MatchString both return the index of the matched supported tag
|
||||||
|
// to simplify associating such data with the matched tag.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Canonicalization
|
||||||
|
//
|
||||||
|
// If one uses the Matcher to compare languages one does not need to
|
||||||
|
// worry about canonicalization.
|
||||||
|
//
|
||||||
|
// The meaning of a Tag varies per application. The language package
|
||||||
|
// therefore delays canonicalization and preserves information as much
|
||||||
|
// as possible. The Matcher, however, will always take into account that
|
||||||
|
// two different tags may represent the same language.
|
||||||
|
//
|
||||||
|
// By default, only legacy and deprecated tags are converted into their
|
||||||
|
// canonical equivalent. All other information is preserved. This approach makes
|
||||||
|
// the confidence scores more accurate and allows matchers to distinguish
|
||||||
|
// between variants that are otherwise lost.
|
||||||
|
//
|
||||||
|
// As a consequence, two tags that should be treated as identical according to
|
||||||
|
// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The
|
||||||
|
// Matcher handles such distinctions, though, and is aware of the
|
||||||
|
// equivalence relations. The CanonType type can be used to alter the
|
||||||
|
// canonicalization form.
|
||||||
|
//
|
||||||
|
// References
|
||||||
|
//
|
||||||
|
// BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47
|
||||||
|
//
|
||||||
|
package language // import "golang.org/x/text/language"
|
||||||
|
|
||||||
|
// TODO: explanation on how to match languages for your own locale-specific
|
||||||
|
// service.
|
1712
vendor/golang.org/x/text/language/gen.go
generated
vendored
Normal file
1712
vendor/golang.org/x/text/language/gen.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
20
vendor/golang.org/x/text/language/gen_common.go
generated
vendored
Normal file
20
vendor/golang.org/x/text/language/gen_common.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
// This file contains code common to the maketables.go and the package code.
|
||||||
|
|
||||||
|
// langAliasType is the type of an alias in langAliasMap.
|
||||||
|
type langAliasType int8
|
||||||
|
|
||||||
|
const (
|
||||||
|
langDeprecated langAliasType = iota
|
||||||
|
langMacro
|
||||||
|
langLegacy
|
||||||
|
|
||||||
|
langAliasTypeUnknown langAliasType = -1
|
||||||
|
)
|
162
vendor/golang.org/x/text/language/gen_index.go
generated
vendored
Normal file
162
vendor/golang.org/x/text/language/gen_index.go
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
// This file generates derivative tables based on the language package itself.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/gen"
|
||||||
|
"golang.org/x/text/language"
|
||||||
|
"golang.org/x/text/unicode/cldr"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
test = flag.Bool("test", false,
|
||||||
|
"test existing tables; can be used to compare web data with package data.")
|
||||||
|
|
||||||
|
draft = flag.String("draft",
|
||||||
|
"contributed",
|
||||||
|
`Minimal draft requirements (approved, contributed, provisional, unconfirmed).`)
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
gen.Init()
|
||||||
|
|
||||||
|
// Read the CLDR zip file.
|
||||||
|
r := gen.OpenCLDRCoreZip()
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
d := &cldr.Decoder{}
|
||||||
|
data, err := d.DecodeZip(r)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("DecodeZip: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w := gen.NewCodeWriter()
|
||||||
|
defer func() {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
|
||||||
|
if _, err = w.WriteGo(buf, "language", ""); err != nil {
|
||||||
|
log.Fatalf("Error formatting file index.go: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since we're generating a table for our own package we need to rewrite
|
||||||
|
// doing the equivalent of go fmt -r 'language.b -> b'. Using
|
||||||
|
// bytes.Replace will do.
|
||||||
|
out := bytes.Replace(buf.Bytes(), []byte("language."), nil, -1)
|
||||||
|
if err := ioutil.WriteFile("index.go", out, 0600); err != nil {
|
||||||
|
log.Fatalf("Could not create file index.go: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
m := map[language.Tag]bool{}
|
||||||
|
for _, lang := range data.Locales() {
|
||||||
|
// We include all locales unconditionally to be consistent with en_US.
|
||||||
|
// We want en_US, even though it has no data associated with it.
|
||||||
|
|
||||||
|
// TODO: put any of the languages for which no data exists at the end
|
||||||
|
// of the index. This allows all components based on ICU to use that
|
||||||
|
// as the cutoff point.
|
||||||
|
// if x := data.RawLDML(lang); false ||
|
||||||
|
// x.LocaleDisplayNames != nil ||
|
||||||
|
// x.Characters != nil ||
|
||||||
|
// x.Delimiters != nil ||
|
||||||
|
// x.Measurement != nil ||
|
||||||
|
// x.Dates != nil ||
|
||||||
|
// x.Numbers != nil ||
|
||||||
|
// x.Units != nil ||
|
||||||
|
// x.ListPatterns != nil ||
|
||||||
|
// x.Collations != nil ||
|
||||||
|
// x.Segmentations != nil ||
|
||||||
|
// x.Rbnf != nil ||
|
||||||
|
// x.Annotations != nil ||
|
||||||
|
// x.Metadata != nil {
|
||||||
|
|
||||||
|
// TODO: support POSIX natively, albeit non-standard.
|
||||||
|
tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1))
|
||||||
|
m[tag] = true
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
// Include locales for plural rules, which uses a different structure.
|
||||||
|
for _, plurals := range data.Supplemental().Plurals {
|
||||||
|
for _, rules := range plurals.PluralRules {
|
||||||
|
for _, lang := range strings.Split(rules.Locales, " ") {
|
||||||
|
m[language.Make(lang)] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var core, special []language.Tag
|
||||||
|
|
||||||
|
for t := range m {
|
||||||
|
if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" {
|
||||||
|
log.Fatalf("Unexpected extension %v in %v", x, t)
|
||||||
|
}
|
||||||
|
if len(t.Variants()) == 0 && len(t.Extensions()) == 0 {
|
||||||
|
core = append(core, t)
|
||||||
|
} else {
|
||||||
|
special = append(special, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteComment(`
|
||||||
|
NumCompactTags is the number of common tags. The maximum tag is
|
||||||
|
NumCompactTags-1.`)
|
||||||
|
w.WriteConst("NumCompactTags", len(core)+len(special))
|
||||||
|
|
||||||
|
sort.Sort(byAlpha(special))
|
||||||
|
w.WriteVar("specialTags", special)
|
||||||
|
|
||||||
|
// TODO: order by frequency?
|
||||||
|
sort.Sort(byAlpha(core))
|
||||||
|
|
||||||
|
// Size computations are just an estimate.
|
||||||
|
w.Size += int(reflect.TypeOf(map[uint32]uint16{}).Size())
|
||||||
|
w.Size += len(core) * 6 // size of uint32 and uint16
|
||||||
|
|
||||||
|
fmt.Fprintln(w)
|
||||||
|
fmt.Fprintln(w, "var coreTags = map[uint32]uint16{")
|
||||||
|
fmt.Fprintln(w, "0x0: 0, // und")
|
||||||
|
i := len(special) + 1 // Und and special tags already written.
|
||||||
|
for _, t := range core {
|
||||||
|
if t == language.Und {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprint(w.Hash, t, i)
|
||||||
|
b, s, r := t.Raw()
|
||||||
|
fmt.Fprintf(w, "0x%s%s%s: %d, // %s\n",
|
||||||
|
getIndex(b, 3), // 3 is enough as it is guaranteed to be a compact number
|
||||||
|
getIndex(s, 2),
|
||||||
|
getIndex(r, 3),
|
||||||
|
i, t)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
fmt.Fprintln(w, "}")
|
||||||
|
}
|
||||||
|
|
||||||
|
// getIndex prints the subtag type and extracts its index of size nibble.
|
||||||
|
// If the index is less than n nibbles, the result is prefixed with 0s.
|
||||||
|
func getIndex(x interface{}, n int) string {
|
||||||
|
s := fmt.Sprintf("%#v", x) // s is of form Type{typeID: 0x00}
|
||||||
|
s = s[strings.Index(s, "0x")+2 : len(s)-1]
|
||||||
|
return strings.Repeat("0", n-len(s)) + s
|
||||||
|
}
|
||||||
|
|
||||||
|
type byAlpha []language.Tag
|
||||||
|
|
||||||
|
func (a byAlpha) Len() int { return len(a) }
|
||||||
|
func (a byAlpha) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
func (a byAlpha) Less(i, j int) bool { return a[i].String() < a[j].String() }
|
38
vendor/golang.org/x/text/language/go1_1.go
generated
vendored
Normal file
38
vendor/golang.org/x/text/language/go1_1.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.2
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
|
func sortStable(s sort.Interface) {
|
||||||
|
ss := stableSort{
|
||||||
|
s: s,
|
||||||
|
pos: make([]int, s.Len()),
|
||||||
|
}
|
||||||
|
for i := range ss.pos {
|
||||||
|
ss.pos[i] = i
|
||||||
|
}
|
||||||
|
sort.Sort(&ss)
|
||||||
|
}
|
||||||
|
|
||||||
|
type stableSort struct {
|
||||||
|
s sort.Interface
|
||||||
|
pos []int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stableSort) Len() int {
|
||||||
|
return len(s.pos)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stableSort) Less(i, j int) bool {
|
||||||
|
return s.s.Less(i, j) || !s.s.Less(j, i) && s.pos[i] < s.pos[j]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stableSort) Swap(i, j int) {
|
||||||
|
s.s.Swap(i, j)
|
||||||
|
s.pos[i], s.pos[j] = s.pos[j], s.pos[i]
|
||||||
|
}
|
11
vendor/golang.org/x/text/language/go1_2.go
generated
vendored
Normal file
11
vendor/golang.org/x/text/language/go1_2.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.2
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
|
var sortStable = sort.Stable
|
783
vendor/golang.org/x/text/language/index.go
generated
vendored
Normal file
783
vendor/golang.org/x/text/language/index.go
generated
vendored
Normal file
@ -0,0 +1,783 @@
|
|||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
// NumCompactTags is the number of common tags. The maximum tag is
|
||||||
|
// NumCompactTags-1.
|
||||||
|
const NumCompactTags = 768
|
||||||
|
|
||||||
|
var specialTags = []Tag{ // 2 elements
|
||||||
|
0: {lang: 0xd7, region: 0x6e, script: 0x0, pVariant: 0x5, pExt: 0xe, str: "ca-ES-valencia"},
|
||||||
|
1: {lang: 0x139, region: 0x135, script: 0x0, pVariant: 0x5, pExt: 0x5, str: "en-US-u-va-posix"},
|
||||||
|
} // Size: 72 bytes
|
||||||
|
|
||||||
|
var coreTags = map[uint32]uint16{
|
||||||
|
0x0: 0, // und
|
||||||
|
0x01600000: 3, // af
|
||||||
|
0x016000d2: 4, // af-NA
|
||||||
|
0x01600161: 5, // af-ZA
|
||||||
|
0x01c00000: 6, // agq
|
||||||
|
0x01c00052: 7, // agq-CM
|
||||||
|
0x02100000: 8, // ak
|
||||||
|
0x02100080: 9, // ak-GH
|
||||||
|
0x02700000: 10, // am
|
||||||
|
0x0270006f: 11, // am-ET
|
||||||
|
0x03a00000: 12, // ar
|
||||||
|
0x03a00001: 13, // ar-001
|
||||||
|
0x03a00023: 14, // ar-AE
|
||||||
|
0x03a00039: 15, // ar-BH
|
||||||
|
0x03a00062: 16, // ar-DJ
|
||||||
|
0x03a00067: 17, // ar-DZ
|
||||||
|
0x03a0006b: 18, // ar-EG
|
||||||
|
0x03a0006c: 19, // ar-EH
|
||||||
|
0x03a0006d: 20, // ar-ER
|
||||||
|
0x03a00097: 21, // ar-IL
|
||||||
|
0x03a0009b: 22, // ar-IQ
|
||||||
|
0x03a000a1: 23, // ar-JO
|
||||||
|
0x03a000a8: 24, // ar-KM
|
||||||
|
0x03a000ac: 25, // ar-KW
|
||||||
|
0x03a000b0: 26, // ar-LB
|
||||||
|
0x03a000b9: 27, // ar-LY
|
||||||
|
0x03a000ba: 28, // ar-MA
|
||||||
|
0x03a000c9: 29, // ar-MR
|
||||||
|
0x03a000e1: 30, // ar-OM
|
||||||
|
0x03a000ed: 31, // ar-PS
|
||||||
|
0x03a000f3: 32, // ar-QA
|
||||||
|
0x03a00108: 33, // ar-SA
|
||||||
|
0x03a0010b: 34, // ar-SD
|
||||||
|
0x03a00115: 35, // ar-SO
|
||||||
|
0x03a00117: 36, // ar-SS
|
||||||
|
0x03a0011c: 37, // ar-SY
|
||||||
|
0x03a00120: 38, // ar-TD
|
||||||
|
0x03a00128: 39, // ar-TN
|
||||||
|
0x03a0015e: 40, // ar-YE
|
||||||
|
0x04000000: 41, // ars
|
||||||
|
0x04300000: 42, // as
|
||||||
|
0x04300099: 43, // as-IN
|
||||||
|
0x04400000: 44, // asa
|
||||||
|
0x0440012f: 45, // asa-TZ
|
||||||
|
0x04800000: 46, // ast
|
||||||
|
0x0480006e: 47, // ast-ES
|
||||||
|
0x05800000: 48, // az
|
||||||
|
0x0581f000: 49, // az-Cyrl
|
||||||
|
0x0581f032: 50, // az-Cyrl-AZ
|
||||||
|
0x05857000: 51, // az-Latn
|
||||||
|
0x05857032: 52, // az-Latn-AZ
|
||||||
|
0x05e00000: 53, // bas
|
||||||
|
0x05e00052: 54, // bas-CM
|
||||||
|
0x07100000: 55, // be
|
||||||
|
0x07100047: 56, // be-BY
|
||||||
|
0x07500000: 57, // bem
|
||||||
|
0x07500162: 58, // bem-ZM
|
||||||
|
0x07900000: 59, // bez
|
||||||
|
0x0790012f: 60, // bez-TZ
|
||||||
|
0x07e00000: 61, // bg
|
||||||
|
0x07e00038: 62, // bg-BG
|
||||||
|
0x08200000: 63, // bh
|
||||||
|
0x0a000000: 64, // bm
|
||||||
|
0x0a0000c3: 65, // bm-ML
|
||||||
|
0x0a500000: 66, // bn
|
||||||
|
0x0a500035: 67, // bn-BD
|
||||||
|
0x0a500099: 68, // bn-IN
|
||||||
|
0x0a900000: 69, // bo
|
||||||
|
0x0a900053: 70, // bo-CN
|
||||||
|
0x0a900099: 71, // bo-IN
|
||||||
|
0x0b200000: 72, // br
|
||||||
|
0x0b200078: 73, // br-FR
|
||||||
|
0x0b500000: 74, // brx
|
||||||
|
0x0b500099: 75, // brx-IN
|
||||||
|
0x0b700000: 76, // bs
|
||||||
|
0x0b71f000: 77, // bs-Cyrl
|
||||||
|
0x0b71f033: 78, // bs-Cyrl-BA
|
||||||
|
0x0b757000: 79, // bs-Latn
|
||||||
|
0x0b757033: 80, // bs-Latn-BA
|
||||||
|
0x0d700000: 81, // ca
|
||||||
|
0x0d700022: 82, // ca-AD
|
||||||
|
0x0d70006e: 83, // ca-ES
|
||||||
|
0x0d700078: 84, // ca-FR
|
||||||
|
0x0d70009e: 85, // ca-IT
|
||||||
|
0x0db00000: 86, // ccp
|
||||||
|
0x0db00035: 87, // ccp-BD
|
||||||
|
0x0db00099: 88, // ccp-IN
|
||||||
|
0x0dc00000: 89, // ce
|
||||||
|
0x0dc00106: 90, // ce-RU
|
||||||
|
0x0df00000: 91, // cgg
|
||||||
|
0x0df00131: 92, // cgg-UG
|
||||||
|
0x0e500000: 93, // chr
|
||||||
|
0x0e500135: 94, // chr-US
|
||||||
|
0x0e900000: 95, // ckb
|
||||||
|
0x0e90009b: 96, // ckb-IQ
|
||||||
|
0x0e90009c: 97, // ckb-IR
|
||||||
|
0x0fa00000: 98, // cs
|
||||||
|
0x0fa0005e: 99, // cs-CZ
|
||||||
|
0x0fe00000: 100, // cu
|
||||||
|
0x0fe00106: 101, // cu-RU
|
||||||
|
0x10000000: 102, // cy
|
||||||
|
0x1000007b: 103, // cy-GB
|
||||||
|
0x10100000: 104, // da
|
||||||
|
0x10100063: 105, // da-DK
|
||||||
|
0x10100082: 106, // da-GL
|
||||||
|
0x10800000: 107, // dav
|
||||||
|
0x108000a4: 108, // dav-KE
|
||||||
|
0x10d00000: 109, // de
|
||||||
|
0x10d0002e: 110, // de-AT
|
||||||
|
0x10d00036: 111, // de-BE
|
||||||
|
0x10d0004e: 112, // de-CH
|
||||||
|
0x10d00060: 113, // de-DE
|
||||||
|
0x10d0009e: 114, // de-IT
|
||||||
|
0x10d000b2: 115, // de-LI
|
||||||
|
0x10d000b7: 116, // de-LU
|
||||||
|
0x11700000: 117, // dje
|
||||||
|
0x117000d4: 118, // dje-NE
|
||||||
|
0x11f00000: 119, // dsb
|
||||||
|
0x11f00060: 120, // dsb-DE
|
||||||
|
0x12400000: 121, // dua
|
||||||
|
0x12400052: 122, // dua-CM
|
||||||
|
0x12800000: 123, // dv
|
||||||
|
0x12b00000: 124, // dyo
|
||||||
|
0x12b00114: 125, // dyo-SN
|
||||||
|
0x12d00000: 126, // dz
|
||||||
|
0x12d00043: 127, // dz-BT
|
||||||
|
0x12f00000: 128, // ebu
|
||||||
|
0x12f000a4: 129, // ebu-KE
|
||||||
|
0x13000000: 130, // ee
|
||||||
|
0x13000080: 131, // ee-GH
|
||||||
|
0x13000122: 132, // ee-TG
|
||||||
|
0x13600000: 133, // el
|
||||||
|
0x1360005d: 134, // el-CY
|
||||||
|
0x13600087: 135, // el-GR
|
||||||
|
0x13900000: 136, // en
|
||||||
|
0x13900001: 137, // en-001
|
||||||
|
0x1390001a: 138, // en-150
|
||||||
|
0x13900025: 139, // en-AG
|
||||||
|
0x13900026: 140, // en-AI
|
||||||
|
0x1390002d: 141, // en-AS
|
||||||
|
0x1390002e: 142, // en-AT
|
||||||
|
0x1390002f: 143, // en-AU
|
||||||
|
0x13900034: 144, // en-BB
|
||||||
|
0x13900036: 145, // en-BE
|
||||||
|
0x1390003a: 146, // en-BI
|
||||||
|
0x1390003d: 147, // en-BM
|
||||||
|
0x13900042: 148, // en-BS
|
||||||
|
0x13900046: 149, // en-BW
|
||||||
|
0x13900048: 150, // en-BZ
|
||||||
|
0x13900049: 151, // en-CA
|
||||||
|
0x1390004a: 152, // en-CC
|
||||||
|
0x1390004e: 153, // en-CH
|
||||||
|
0x13900050: 154, // en-CK
|
||||||
|
0x13900052: 155, // en-CM
|
||||||
|
0x1390005c: 156, // en-CX
|
||||||
|
0x1390005d: 157, // en-CY
|
||||||
|
0x13900060: 158, // en-DE
|
||||||
|
0x13900061: 159, // en-DG
|
||||||
|
0x13900063: 160, // en-DK
|
||||||
|
0x13900064: 161, // en-DM
|
||||||
|
0x1390006d: 162, // en-ER
|
||||||
|
0x13900072: 163, // en-FI
|
||||||
|
0x13900073: 164, // en-FJ
|
||||||
|
0x13900074: 165, // en-FK
|
||||||
|
0x13900075: 166, // en-FM
|
||||||
|
0x1390007b: 167, // en-GB
|
||||||
|
0x1390007c: 168, // en-GD
|
||||||
|
0x1390007f: 169, // en-GG
|
||||||
|
0x13900080: 170, // en-GH
|
||||||
|
0x13900081: 171, // en-GI
|
||||||
|
0x13900083: 172, // en-GM
|
||||||
|
0x1390008a: 173, // en-GU
|
||||||
|
0x1390008c: 174, // en-GY
|
||||||
|
0x1390008d: 175, // en-HK
|
||||||
|
0x13900096: 176, // en-IE
|
||||||
|
0x13900097: 177, // en-IL
|
||||||
|
0x13900098: 178, // en-IM
|
||||||
|
0x13900099: 179, // en-IN
|
||||||
|
0x1390009a: 180, // en-IO
|
||||||
|
0x1390009f: 181, // en-JE
|
||||||
|
0x139000a0: 182, // en-JM
|
||||||
|
0x139000a4: 183, // en-KE
|
||||||
|
0x139000a7: 184, // en-KI
|
||||||
|
0x139000a9: 185, // en-KN
|
||||||
|
0x139000ad: 186, // en-KY
|
||||||
|
0x139000b1: 187, // en-LC
|
||||||
|
0x139000b4: 188, // en-LR
|
||||||
|
0x139000b5: 189, // en-LS
|
||||||
|
0x139000bf: 190, // en-MG
|
||||||
|
0x139000c0: 191, // en-MH
|
||||||
|
0x139000c6: 192, // en-MO
|
||||||
|
0x139000c7: 193, // en-MP
|
||||||
|
0x139000ca: 194, // en-MS
|
||||||
|
0x139000cb: 195, // en-MT
|
||||||
|
0x139000cc: 196, // en-MU
|
||||||
|
0x139000ce: 197, // en-MW
|
||||||
|
0x139000d0: 198, // en-MY
|
||||||
|
0x139000d2: 199, // en-NA
|
||||||
|
0x139000d5: 200, // en-NF
|
||||||
|
0x139000d6: 201, // en-NG
|
||||||
|
0x139000d9: 202, // en-NL
|
||||||
|
0x139000dd: 203, // en-NR
|
||||||
|
0x139000df: 204, // en-NU
|
||||||
|
0x139000e0: 205, // en-NZ
|
||||||
|
0x139000e6: 206, // en-PG
|
||||||
|
0x139000e7: 207, // en-PH
|
||||||
|
0x139000e8: 208, // en-PK
|
||||||
|
0x139000eb: 209, // en-PN
|
||||||
|
0x139000ec: 210, // en-PR
|
||||||
|
0x139000f0: 211, // en-PW
|
||||||
|
0x13900107: 212, // en-RW
|
||||||
|
0x13900109: 213, // en-SB
|
||||||
|
0x1390010a: 214, // en-SC
|
||||||
|
0x1390010b: 215, // en-SD
|
||||||
|
0x1390010c: 216, // en-SE
|
||||||
|
0x1390010d: 217, // en-SG
|
||||||
|
0x1390010e: 218, // en-SH
|
||||||
|
0x1390010f: 219, // en-SI
|
||||||
|
0x13900112: 220, // en-SL
|
||||||
|
0x13900117: 221, // en-SS
|
||||||
|
0x1390011b: 222, // en-SX
|
||||||
|
0x1390011d: 223, // en-SZ
|
||||||
|
0x1390011f: 224, // en-TC
|
||||||
|
0x13900125: 225, // en-TK
|
||||||
|
0x13900129: 226, // en-TO
|
||||||
|
0x1390012c: 227, // en-TT
|
||||||
|
0x1390012d: 228, // en-TV
|
||||||
|
0x1390012f: 229, // en-TZ
|
||||||
|
0x13900131: 230, // en-UG
|
||||||
|
0x13900133: 231, // en-UM
|
||||||
|
0x13900135: 232, // en-US
|
||||||
|
0x13900139: 233, // en-VC
|
||||||
|
0x1390013c: 234, // en-VG
|
||||||
|
0x1390013d: 235, // en-VI
|
||||||
|
0x1390013f: 236, // en-VU
|
||||||
|
0x13900142: 237, // en-WS
|
||||||
|
0x13900161: 238, // en-ZA
|
||||||
|
0x13900162: 239, // en-ZM
|
||||||
|
0x13900164: 240, // en-ZW
|
||||||
|
0x13c00000: 241, // eo
|
||||||
|
0x13c00001: 242, // eo-001
|
||||||
|
0x13e00000: 243, // es
|
||||||
|
0x13e0001f: 244, // es-419
|
||||||
|
0x13e0002c: 245, // es-AR
|
||||||
|
0x13e0003f: 246, // es-BO
|
||||||
|
0x13e00041: 247, // es-BR
|
||||||
|
0x13e00048: 248, // es-BZ
|
||||||
|
0x13e00051: 249, // es-CL
|
||||||
|
0x13e00054: 250, // es-CO
|
||||||
|
0x13e00056: 251, // es-CR
|
||||||
|
0x13e00059: 252, // es-CU
|
||||||
|
0x13e00065: 253, // es-DO
|
||||||
|
0x13e00068: 254, // es-EA
|
||||||
|
0x13e00069: 255, // es-EC
|
||||||
|
0x13e0006e: 256, // es-ES
|
||||||
|
0x13e00086: 257, // es-GQ
|
||||||
|
0x13e00089: 258, // es-GT
|
||||||
|
0x13e0008f: 259, // es-HN
|
||||||
|
0x13e00094: 260, // es-IC
|
||||||
|
0x13e000cf: 261, // es-MX
|
||||||
|
0x13e000d8: 262, // es-NI
|
||||||
|
0x13e000e2: 263, // es-PA
|
||||||
|
0x13e000e4: 264, // es-PE
|
||||||
|
0x13e000e7: 265, // es-PH
|
||||||
|
0x13e000ec: 266, // es-PR
|
||||||
|
0x13e000f1: 267, // es-PY
|
||||||
|
0x13e0011a: 268, // es-SV
|
||||||
|
0x13e00135: 269, // es-US
|
||||||
|
0x13e00136: 270, // es-UY
|
||||||
|
0x13e0013b: 271, // es-VE
|
||||||
|
0x14000000: 272, // et
|
||||||
|
0x1400006a: 273, // et-EE
|
||||||
|
0x14500000: 274, // eu
|
||||||
|
0x1450006e: 275, // eu-ES
|
||||||
|
0x14600000: 276, // ewo
|
||||||
|
0x14600052: 277, // ewo-CM
|
||||||
|
0x14800000: 278, // fa
|
||||||
|
0x14800024: 279, // fa-AF
|
||||||
|
0x1480009c: 280, // fa-IR
|
||||||
|
0x14e00000: 281, // ff
|
||||||
|
0x14e00052: 282, // ff-CM
|
||||||
|
0x14e00084: 283, // ff-GN
|
||||||
|
0x14e000c9: 284, // ff-MR
|
||||||
|
0x14e00114: 285, // ff-SN
|
||||||
|
0x15100000: 286, // fi
|
||||||
|
0x15100072: 287, // fi-FI
|
||||||
|
0x15300000: 288, // fil
|
||||||
|
0x153000e7: 289, // fil-PH
|
||||||
|
0x15800000: 290, // fo
|
||||||
|
0x15800063: 291, // fo-DK
|
||||||
|
0x15800076: 292, // fo-FO
|
||||||
|
0x15e00000: 293, // fr
|
||||||
|
0x15e00036: 294, // fr-BE
|
||||||
|
0x15e00037: 295, // fr-BF
|
||||||
|
0x15e0003a: 296, // fr-BI
|
||||||
|
0x15e0003b: 297, // fr-BJ
|
||||||
|
0x15e0003c: 298, // fr-BL
|
||||||
|
0x15e00049: 299, // fr-CA
|
||||||
|
0x15e0004b: 300, // fr-CD
|
||||||
|
0x15e0004c: 301, // fr-CF
|
||||||
|
0x15e0004d: 302, // fr-CG
|
||||||
|
0x15e0004e: 303, // fr-CH
|
||||||
|
0x15e0004f: 304, // fr-CI
|
||||||
|
0x15e00052: 305, // fr-CM
|
||||||
|
0x15e00062: 306, // fr-DJ
|
||||||
|
0x15e00067: 307, // fr-DZ
|
||||||
|
0x15e00078: 308, // fr-FR
|
||||||
|
0x15e0007a: 309, // fr-GA
|
||||||
|
0x15e0007e: 310, // fr-GF
|
||||||
|
0x15e00084: 311, // fr-GN
|
||||||
|
0x15e00085: 312, // fr-GP
|
||||||
|
0x15e00086: 313, // fr-GQ
|
||||||
|
0x15e00091: 314, // fr-HT
|
||||||
|
0x15e000a8: 315, // fr-KM
|
||||||
|
0x15e000b7: 316, // fr-LU
|
||||||
|
0x15e000ba: 317, // fr-MA
|
||||||
|
0x15e000bb: 318, // fr-MC
|
||||||
|
0x15e000be: 319, // fr-MF
|
||||||
|
0x15e000bf: 320, // fr-MG
|
||||||
|
0x15e000c3: 321, // fr-ML
|
||||||
|
0x15e000c8: 322, // fr-MQ
|
||||||
|
0x15e000c9: 323, // fr-MR
|
||||||
|
0x15e000cc: 324, // fr-MU
|
||||||
|
0x15e000d3: 325, // fr-NC
|
||||||
|
0x15e000d4: 326, // fr-NE
|
||||||
|
0x15e000e5: 327, // fr-PF
|
||||||
|
0x15e000ea: 328, // fr-PM
|
||||||
|
0x15e00102: 329, // fr-RE
|
||||||
|
0x15e00107: 330, // fr-RW
|
||||||
|
0x15e0010a: 331, // fr-SC
|
||||||
|
0x15e00114: 332, // fr-SN
|
||||||
|
0x15e0011c: 333, // fr-SY
|
||||||
|
0x15e00120: 334, // fr-TD
|
||||||
|
0x15e00122: 335, // fr-TG
|
||||||
|
0x15e00128: 336, // fr-TN
|
||||||
|
0x15e0013f: 337, // fr-VU
|
||||||
|
0x15e00140: 338, // fr-WF
|
||||||
|
0x15e0015f: 339, // fr-YT
|
||||||
|
0x16900000: 340, // fur
|
||||||
|
0x1690009e: 341, // fur-IT
|
||||||
|
0x16d00000: 342, // fy
|
||||||
|
0x16d000d9: 343, // fy-NL
|
||||||
|
0x16e00000: 344, // ga
|
||||||
|
0x16e00096: 345, // ga-IE
|
||||||
|
0x17e00000: 346, // gd
|
||||||
|
0x17e0007b: 347, // gd-GB
|
||||||
|
0x19000000: 348, // gl
|
||||||
|
0x1900006e: 349, // gl-ES
|
||||||
|
0x1a300000: 350, // gsw
|
||||||
|
0x1a30004e: 351, // gsw-CH
|
||||||
|
0x1a300078: 352, // gsw-FR
|
||||||
|
0x1a3000b2: 353, // gsw-LI
|
||||||
|
0x1a400000: 354, // gu
|
||||||
|
0x1a400099: 355, // gu-IN
|
||||||
|
0x1a900000: 356, // guw
|
||||||
|
0x1ab00000: 357, // guz
|
||||||
|
0x1ab000a4: 358, // guz-KE
|
||||||
|
0x1ac00000: 359, // gv
|
||||||
|
0x1ac00098: 360, // gv-IM
|
||||||
|
0x1b400000: 361, // ha
|
||||||
|
0x1b400080: 362, // ha-GH
|
||||||
|
0x1b4000d4: 363, // ha-NE
|
||||||
|
0x1b4000d6: 364, // ha-NG
|
||||||
|
0x1b800000: 365, // haw
|
||||||
|
0x1b800135: 366, // haw-US
|
||||||
|
0x1bc00000: 367, // he
|
||||||
|
0x1bc00097: 368, // he-IL
|
||||||
|
0x1be00000: 369, // hi
|
||||||
|
0x1be00099: 370, // hi-IN
|
||||||
|
0x1d100000: 371, // hr
|
||||||
|
0x1d100033: 372, // hr-BA
|
||||||
|
0x1d100090: 373, // hr-HR
|
||||||
|
0x1d200000: 374, // hsb
|
||||||
|
0x1d200060: 375, // hsb-DE
|
||||||
|
0x1d500000: 376, // hu
|
||||||
|
0x1d500092: 377, // hu-HU
|
||||||
|
0x1d700000: 378, // hy
|
||||||
|
0x1d700028: 379, // hy-AM
|
||||||
|
0x1e100000: 380, // id
|
||||||
|
0x1e100095: 381, // id-ID
|
||||||
|
0x1e700000: 382, // ig
|
||||||
|
0x1e7000d6: 383, // ig-NG
|
||||||
|
0x1ea00000: 384, // ii
|
||||||
|
0x1ea00053: 385, // ii-CN
|
||||||
|
0x1f500000: 386, // io
|
||||||
|
0x1f800000: 387, // is
|
||||||
|
0x1f80009d: 388, // is-IS
|
||||||
|
0x1f900000: 389, // it
|
||||||
|
0x1f90004e: 390, // it-CH
|
||||||
|
0x1f90009e: 391, // it-IT
|
||||||
|
0x1f900113: 392, // it-SM
|
||||||
|
0x1f900138: 393, // it-VA
|
||||||
|
0x1fa00000: 394, // iu
|
||||||
|
0x20000000: 395, // ja
|
||||||
|
0x200000a2: 396, // ja-JP
|
||||||
|
0x20300000: 397, // jbo
|
||||||
|
0x20700000: 398, // jgo
|
||||||
|
0x20700052: 399, // jgo-CM
|
||||||
|
0x20a00000: 400, // jmc
|
||||||
|
0x20a0012f: 401, // jmc-TZ
|
||||||
|
0x20e00000: 402, // jv
|
||||||
|
0x21000000: 403, // ka
|
||||||
|
0x2100007d: 404, // ka-GE
|
||||||
|
0x21200000: 405, // kab
|
||||||
|
0x21200067: 406, // kab-DZ
|
||||||
|
0x21600000: 407, // kaj
|
||||||
|
0x21700000: 408, // kam
|
||||||
|
0x217000a4: 409, // kam-KE
|
||||||
|
0x21f00000: 410, // kcg
|
||||||
|
0x22300000: 411, // kde
|
||||||
|
0x2230012f: 412, // kde-TZ
|
||||||
|
0x22700000: 413, // kea
|
||||||
|
0x2270005a: 414, // kea-CV
|
||||||
|
0x23400000: 415, // khq
|
||||||
|
0x234000c3: 416, // khq-ML
|
||||||
|
0x23900000: 417, // ki
|
||||||
|
0x239000a4: 418, // ki-KE
|
||||||
|
0x24200000: 419, // kk
|
||||||
|
0x242000ae: 420, // kk-KZ
|
||||||
|
0x24400000: 421, // kkj
|
||||||
|
0x24400052: 422, // kkj-CM
|
||||||
|
0x24500000: 423, // kl
|
||||||
|
0x24500082: 424, // kl-GL
|
||||||
|
0x24600000: 425, // kln
|
||||||
|
0x246000a4: 426, // kln-KE
|
||||||
|
0x24a00000: 427, // km
|
||||||
|
0x24a000a6: 428, // km-KH
|
||||||
|
0x25100000: 429, // kn
|
||||||
|
0x25100099: 430, // kn-IN
|
||||||
|
0x25400000: 431, // ko
|
||||||
|
0x254000aa: 432, // ko-KP
|
||||||
|
0x254000ab: 433, // ko-KR
|
||||||
|
0x25600000: 434, // kok
|
||||||
|
0x25600099: 435, // kok-IN
|
||||||
|
0x26a00000: 436, // ks
|
||||||
|
0x26a00099: 437, // ks-IN
|
||||||
|
0x26b00000: 438, // ksb
|
||||||
|
0x26b0012f: 439, // ksb-TZ
|
||||||
|
0x26d00000: 440, // ksf
|
||||||
|
0x26d00052: 441, // ksf-CM
|
||||||
|
0x26e00000: 442, // ksh
|
||||||
|
0x26e00060: 443, // ksh-DE
|
||||||
|
0x27400000: 444, // ku
|
||||||
|
0x28100000: 445, // kw
|
||||||
|
0x2810007b: 446, // kw-GB
|
||||||
|
0x28a00000: 447, // ky
|
||||||
|
0x28a000a5: 448, // ky-KG
|
||||||
|
0x29100000: 449, // lag
|
||||||
|
0x2910012f: 450, // lag-TZ
|
||||||
|
0x29500000: 451, // lb
|
||||||
|
0x295000b7: 452, // lb-LU
|
||||||
|
0x2a300000: 453, // lg
|
||||||
|
0x2a300131: 454, // lg-UG
|
||||||
|
0x2af00000: 455, // lkt
|
||||||
|
0x2af00135: 456, // lkt-US
|
||||||
|
0x2b500000: 457, // ln
|
||||||
|
0x2b50002a: 458, // ln-AO
|
||||||
|
0x2b50004b: 459, // ln-CD
|
||||||
|
0x2b50004c: 460, // ln-CF
|
||||||
|
0x2b50004d: 461, // ln-CG
|
||||||
|
0x2b800000: 462, // lo
|
||||||
|
0x2b8000af: 463, // lo-LA
|
||||||
|
0x2bf00000: 464, // lrc
|
||||||
|
0x2bf0009b: 465, // lrc-IQ
|
||||||
|
0x2bf0009c: 466, // lrc-IR
|
||||||
|
0x2c000000: 467, // lt
|
||||||
|
0x2c0000b6: 468, // lt-LT
|
||||||
|
0x2c200000: 469, // lu
|
||||||
|
0x2c20004b: 470, // lu-CD
|
||||||
|
0x2c400000: 471, // luo
|
||||||
|
0x2c4000a4: 472, // luo-KE
|
||||||
|
0x2c500000: 473, // luy
|
||||||
|
0x2c5000a4: 474, // luy-KE
|
||||||
|
0x2c700000: 475, // lv
|
||||||
|
0x2c7000b8: 476, // lv-LV
|
||||||
|
0x2d100000: 477, // mas
|
||||||
|
0x2d1000a4: 478, // mas-KE
|
||||||
|
0x2d10012f: 479, // mas-TZ
|
||||||
|
0x2e900000: 480, // mer
|
||||||
|
0x2e9000a4: 481, // mer-KE
|
||||||
|
0x2ed00000: 482, // mfe
|
||||||
|
0x2ed000cc: 483, // mfe-MU
|
||||||
|
0x2f100000: 484, // mg
|
||||||
|
0x2f1000bf: 485, // mg-MG
|
||||||
|
0x2f200000: 486, // mgh
|
||||||
|
0x2f2000d1: 487, // mgh-MZ
|
||||||
|
0x2f400000: 488, // mgo
|
||||||
|
0x2f400052: 489, // mgo-CM
|
||||||
|
0x2ff00000: 490, // mk
|
||||||
|
0x2ff000c2: 491, // mk-MK
|
||||||
|
0x30400000: 492, // ml
|
||||||
|
0x30400099: 493, // ml-IN
|
||||||
|
0x30b00000: 494, // mn
|
||||||
|
0x30b000c5: 495, // mn-MN
|
||||||
|
0x31b00000: 496, // mr
|
||||||
|
0x31b00099: 497, // mr-IN
|
||||||
|
0x31f00000: 498, // ms
|
||||||
|
0x31f0003e: 499, // ms-BN
|
||||||
|
0x31f000d0: 500, // ms-MY
|
||||||
|
0x31f0010d: 501, // ms-SG
|
||||||
|
0x32000000: 502, // mt
|
||||||
|
0x320000cb: 503, // mt-MT
|
||||||
|
0x32500000: 504, // mua
|
||||||
|
0x32500052: 505, // mua-CM
|
||||||
|
0x33100000: 506, // my
|
||||||
|
0x331000c4: 507, // my-MM
|
||||||
|
0x33a00000: 508, // mzn
|
||||||
|
0x33a0009c: 509, // mzn-IR
|
||||||
|
0x34100000: 510, // nah
|
||||||
|
0x34500000: 511, // naq
|
||||||
|
0x345000d2: 512, // naq-NA
|
||||||
|
0x34700000: 513, // nb
|
||||||
|
0x347000da: 514, // nb-NO
|
||||||
|
0x34700110: 515, // nb-SJ
|
||||||
|
0x34e00000: 516, // nd
|
||||||
|
0x34e00164: 517, // nd-ZW
|
||||||
|
0x35000000: 518, // nds
|
||||||
|
0x35000060: 519, // nds-DE
|
||||||
|
0x350000d9: 520, // nds-NL
|
||||||
|
0x35100000: 521, // ne
|
||||||
|
0x35100099: 522, // ne-IN
|
||||||
|
0x351000db: 523, // ne-NP
|
||||||
|
0x36700000: 524, // nl
|
||||||
|
0x36700030: 525, // nl-AW
|
||||||
|
0x36700036: 526, // nl-BE
|
||||||
|
0x36700040: 527, // nl-BQ
|
||||||
|
0x3670005b: 528, // nl-CW
|
||||||
|
0x367000d9: 529, // nl-NL
|
||||||
|
0x36700116: 530, // nl-SR
|
||||||
|
0x3670011b: 531, // nl-SX
|
||||||
|
0x36800000: 532, // nmg
|
||||||
|
0x36800052: 533, // nmg-CM
|
||||||
|
0x36a00000: 534, // nn
|
||||||
|
0x36a000da: 535, // nn-NO
|
||||||
|
0x36c00000: 536, // nnh
|
||||||
|
0x36c00052: 537, // nnh-CM
|
||||||
|
0x36f00000: 538, // no
|
||||||
|
0x37500000: 539, // nqo
|
||||||
|
0x37600000: 540, // nr
|
||||||
|
0x37a00000: 541, // nso
|
||||||
|
0x38000000: 542, // nus
|
||||||
|
0x38000117: 543, // nus-SS
|
||||||
|
0x38700000: 544, // ny
|
||||||
|
0x38900000: 545, // nyn
|
||||||
|
0x38900131: 546, // nyn-UG
|
||||||
|
0x39000000: 547, // om
|
||||||
|
0x3900006f: 548, // om-ET
|
||||||
|
0x390000a4: 549, // om-KE
|
||||||
|
0x39500000: 550, // or
|
||||||
|
0x39500099: 551, // or-IN
|
||||||
|
0x39800000: 552, // os
|
||||||
|
0x3980007d: 553, // os-GE
|
||||||
|
0x39800106: 554, // os-RU
|
||||||
|
0x39d00000: 555, // pa
|
||||||
|
0x39d05000: 556, // pa-Arab
|
||||||
|
0x39d050e8: 557, // pa-Arab-PK
|
||||||
|
0x39d33000: 558, // pa-Guru
|
||||||
|
0x39d33099: 559, // pa-Guru-IN
|
||||||
|
0x3a100000: 560, // pap
|
||||||
|
0x3b300000: 561, // pl
|
||||||
|
0x3b3000e9: 562, // pl-PL
|
||||||
|
0x3bd00000: 563, // prg
|
||||||
|
0x3bd00001: 564, // prg-001
|
||||||
|
0x3be00000: 565, // ps
|
||||||
|
0x3be00024: 566, // ps-AF
|
||||||
|
0x3c000000: 567, // pt
|
||||||
|
0x3c00002a: 568, // pt-AO
|
||||||
|
0x3c000041: 569, // pt-BR
|
||||||
|
0x3c00004e: 570, // pt-CH
|
||||||
|
0x3c00005a: 571, // pt-CV
|
||||||
|
0x3c000086: 572, // pt-GQ
|
||||||
|
0x3c00008b: 573, // pt-GW
|
||||||
|
0x3c0000b7: 574, // pt-LU
|
||||||
|
0x3c0000c6: 575, // pt-MO
|
||||||
|
0x3c0000d1: 576, // pt-MZ
|
||||||
|
0x3c0000ee: 577, // pt-PT
|
||||||
|
0x3c000118: 578, // pt-ST
|
||||||
|
0x3c000126: 579, // pt-TL
|
||||||
|
0x3c400000: 580, // qu
|
||||||
|
0x3c40003f: 581, // qu-BO
|
||||||
|
0x3c400069: 582, // qu-EC
|
||||||
|
0x3c4000e4: 583, // qu-PE
|
||||||
|
0x3d400000: 584, // rm
|
||||||
|
0x3d40004e: 585, // rm-CH
|
||||||
|
0x3d900000: 586, // rn
|
||||||
|
0x3d90003a: 587, // rn-BI
|
||||||
|
0x3dc00000: 588, // ro
|
||||||
|
0x3dc000bc: 589, // ro-MD
|
||||||
|
0x3dc00104: 590, // ro-RO
|
||||||
|
0x3de00000: 591, // rof
|
||||||
|
0x3de0012f: 592, // rof-TZ
|
||||||
|
0x3e200000: 593, // ru
|
||||||
|
0x3e200047: 594, // ru-BY
|
||||||
|
0x3e2000a5: 595, // ru-KG
|
||||||
|
0x3e2000ae: 596, // ru-KZ
|
||||||
|
0x3e2000bc: 597, // ru-MD
|
||||||
|
0x3e200106: 598, // ru-RU
|
||||||
|
0x3e200130: 599, // ru-UA
|
||||||
|
0x3e500000: 600, // rw
|
||||||
|
0x3e500107: 601, // rw-RW
|
||||||
|
0x3e600000: 602, // rwk
|
||||||
|
0x3e60012f: 603, // rwk-TZ
|
||||||
|
0x3eb00000: 604, // sah
|
||||||
|
0x3eb00106: 605, // sah-RU
|
||||||
|
0x3ec00000: 606, // saq
|
||||||
|
0x3ec000a4: 607, // saq-KE
|
||||||
|
0x3f300000: 608, // sbp
|
||||||
|
0x3f30012f: 609, // sbp-TZ
|
||||||
|
0x3fa00000: 610, // sd
|
||||||
|
0x3fa000e8: 611, // sd-PK
|
||||||
|
0x3fc00000: 612, // sdh
|
||||||
|
0x3fd00000: 613, // se
|
||||||
|
0x3fd00072: 614, // se-FI
|
||||||
|
0x3fd000da: 615, // se-NO
|
||||||
|
0x3fd0010c: 616, // se-SE
|
||||||
|
0x3ff00000: 617, // seh
|
||||||
|
0x3ff000d1: 618, // seh-MZ
|
||||||
|
0x40100000: 619, // ses
|
||||||
|
0x401000c3: 620, // ses-ML
|
||||||
|
0x40200000: 621, // sg
|
||||||
|
0x4020004c: 622, // sg-CF
|
||||||
|
0x40800000: 623, // shi
|
||||||
|
0x40857000: 624, // shi-Latn
|
||||||
|
0x408570ba: 625, // shi-Latn-MA
|
||||||
|
0x408dc000: 626, // shi-Tfng
|
||||||
|
0x408dc0ba: 627, // shi-Tfng-MA
|
||||||
|
0x40c00000: 628, // si
|
||||||
|
0x40c000b3: 629, // si-LK
|
||||||
|
0x41200000: 630, // sk
|
||||||
|
0x41200111: 631, // sk-SK
|
||||||
|
0x41600000: 632, // sl
|
||||||
|
0x4160010f: 633, // sl-SI
|
||||||
|
0x41c00000: 634, // sma
|
||||||
|
0x41d00000: 635, // smi
|
||||||
|
0x41e00000: 636, // smj
|
||||||
|
0x41f00000: 637, // smn
|
||||||
|
0x41f00072: 638, // smn-FI
|
||||||
|
0x42200000: 639, // sms
|
||||||
|
0x42300000: 640, // sn
|
||||||
|
0x42300164: 641, // sn-ZW
|
||||||
|
0x42900000: 642, // so
|
||||||
|
0x42900062: 643, // so-DJ
|
||||||
|
0x4290006f: 644, // so-ET
|
||||||
|
0x429000a4: 645, // so-KE
|
||||||
|
0x42900115: 646, // so-SO
|
||||||
|
0x43100000: 647, // sq
|
||||||
|
0x43100027: 648, // sq-AL
|
||||||
|
0x431000c2: 649, // sq-MK
|
||||||
|
0x4310014d: 650, // sq-XK
|
||||||
|
0x43200000: 651, // sr
|
||||||
|
0x4321f000: 652, // sr-Cyrl
|
||||||
|
0x4321f033: 653, // sr-Cyrl-BA
|
||||||
|
0x4321f0bd: 654, // sr-Cyrl-ME
|
||||||
|
0x4321f105: 655, // sr-Cyrl-RS
|
||||||
|
0x4321f14d: 656, // sr-Cyrl-XK
|
||||||
|
0x43257000: 657, // sr-Latn
|
||||||
|
0x43257033: 658, // sr-Latn-BA
|
||||||
|
0x432570bd: 659, // sr-Latn-ME
|
||||||
|
0x43257105: 660, // sr-Latn-RS
|
||||||
|
0x4325714d: 661, // sr-Latn-XK
|
||||||
|
0x43700000: 662, // ss
|
||||||
|
0x43a00000: 663, // ssy
|
||||||
|
0x43b00000: 664, // st
|
||||||
|
0x44400000: 665, // sv
|
||||||
|
0x44400031: 666, // sv-AX
|
||||||
|
0x44400072: 667, // sv-FI
|
||||||
|
0x4440010c: 668, // sv-SE
|
||||||
|
0x44500000: 669, // sw
|
||||||
|
0x4450004b: 670, // sw-CD
|
||||||
|
0x445000a4: 671, // sw-KE
|
||||||
|
0x4450012f: 672, // sw-TZ
|
||||||
|
0x44500131: 673, // sw-UG
|
||||||
|
0x44e00000: 674, // syr
|
||||||
|
0x45000000: 675, // ta
|
||||||
|
0x45000099: 676, // ta-IN
|
||||||
|
0x450000b3: 677, // ta-LK
|
||||||
|
0x450000d0: 678, // ta-MY
|
||||||
|
0x4500010d: 679, // ta-SG
|
||||||
|
0x46100000: 680, // te
|
||||||
|
0x46100099: 681, // te-IN
|
||||||
|
0x46400000: 682, // teo
|
||||||
|
0x464000a4: 683, // teo-KE
|
||||||
|
0x46400131: 684, // teo-UG
|
||||||
|
0x46700000: 685, // tg
|
||||||
|
0x46700124: 686, // tg-TJ
|
||||||
|
0x46b00000: 687, // th
|
||||||
|
0x46b00123: 688, // th-TH
|
||||||
|
0x46f00000: 689, // ti
|
||||||
|
0x46f0006d: 690, // ti-ER
|
||||||
|
0x46f0006f: 691, // ti-ET
|
||||||
|
0x47100000: 692, // tig
|
||||||
|
0x47600000: 693, // tk
|
||||||
|
0x47600127: 694, // tk-TM
|
||||||
|
0x48000000: 695, // tn
|
||||||
|
0x48200000: 696, // to
|
||||||
|
0x48200129: 697, // to-TO
|
||||||
|
0x48a00000: 698, // tr
|
||||||
|
0x48a0005d: 699, // tr-CY
|
||||||
|
0x48a0012b: 700, // tr-TR
|
||||||
|
0x48e00000: 701, // ts
|
||||||
|
0x49400000: 702, // tt
|
||||||
|
0x49400106: 703, // tt-RU
|
||||||
|
0x4a400000: 704, // twq
|
||||||
|
0x4a4000d4: 705, // twq-NE
|
||||||
|
0x4a900000: 706, // tzm
|
||||||
|
0x4a9000ba: 707, // tzm-MA
|
||||||
|
0x4ac00000: 708, // ug
|
||||||
|
0x4ac00053: 709, // ug-CN
|
||||||
|
0x4ae00000: 710, // uk
|
||||||
|
0x4ae00130: 711, // uk-UA
|
||||||
|
0x4b400000: 712, // ur
|
||||||
|
0x4b400099: 713, // ur-IN
|
||||||
|
0x4b4000e8: 714, // ur-PK
|
||||||
|
0x4bc00000: 715, // uz
|
||||||
|
0x4bc05000: 716, // uz-Arab
|
||||||
|
0x4bc05024: 717, // uz-Arab-AF
|
||||||
|
0x4bc1f000: 718, // uz-Cyrl
|
||||||
|
0x4bc1f137: 719, // uz-Cyrl-UZ
|
||||||
|
0x4bc57000: 720, // uz-Latn
|
||||||
|
0x4bc57137: 721, // uz-Latn-UZ
|
||||||
|
0x4be00000: 722, // vai
|
||||||
|
0x4be57000: 723, // vai-Latn
|
||||||
|
0x4be570b4: 724, // vai-Latn-LR
|
||||||
|
0x4bee3000: 725, // vai-Vaii
|
||||||
|
0x4bee30b4: 726, // vai-Vaii-LR
|
||||||
|
0x4c000000: 727, // ve
|
||||||
|
0x4c300000: 728, // vi
|
||||||
|
0x4c30013e: 729, // vi-VN
|
||||||
|
0x4c900000: 730, // vo
|
||||||
|
0x4c900001: 731, // vo-001
|
||||||
|
0x4cc00000: 732, // vun
|
||||||
|
0x4cc0012f: 733, // vun-TZ
|
||||||
|
0x4ce00000: 734, // wa
|
||||||
|
0x4cf00000: 735, // wae
|
||||||
|
0x4cf0004e: 736, // wae-CH
|
||||||
|
0x4e500000: 737, // wo
|
||||||
|
0x4e500114: 738, // wo-SN
|
||||||
|
0x4f200000: 739, // xh
|
||||||
|
0x4fb00000: 740, // xog
|
||||||
|
0x4fb00131: 741, // xog-UG
|
||||||
|
0x50900000: 742, // yav
|
||||||
|
0x50900052: 743, // yav-CM
|
||||||
|
0x51200000: 744, // yi
|
||||||
|
0x51200001: 745, // yi-001
|
||||||
|
0x51800000: 746, // yo
|
||||||
|
0x5180003b: 747, // yo-BJ
|
||||||
|
0x518000d6: 748, // yo-NG
|
||||||
|
0x51f00000: 749, // yue
|
||||||
|
0x51f38000: 750, // yue-Hans
|
||||||
|
0x51f38053: 751, // yue-Hans-CN
|
||||||
|
0x51f39000: 752, // yue-Hant
|
||||||
|
0x51f3908d: 753, // yue-Hant-HK
|
||||||
|
0x52800000: 754, // zgh
|
||||||
|
0x528000ba: 755, // zgh-MA
|
||||||
|
0x52900000: 756, // zh
|
||||||
|
0x52938000: 757, // zh-Hans
|
||||||
|
0x52938053: 758, // zh-Hans-CN
|
||||||
|
0x5293808d: 759, // zh-Hans-HK
|
||||||
|
0x529380c6: 760, // zh-Hans-MO
|
||||||
|
0x5293810d: 761, // zh-Hans-SG
|
||||||
|
0x52939000: 762, // zh-Hant
|
||||||
|
0x5293908d: 763, // zh-Hant-HK
|
||||||
|
0x529390c6: 764, // zh-Hant-MO
|
||||||
|
0x5293912e: 765, // zh-Hant-TW
|
||||||
|
0x52f00000: 766, // zu
|
||||||
|
0x52f00161: 767, // zu-ZA
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total table size 4676 bytes (4KiB); checksum: 17BE3673
|
907
vendor/golang.org/x/text/language/language.go
generated
vendored
Normal file
907
vendor/golang.org/x/text/language/language.go
generated
vendored
Normal file
@ -0,0 +1,907 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:generate go run gen.go gen_common.go -output tables.go
|
||||||
|
//go:generate go run gen_index.go
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
// TODO: Remove above NOTE after:
|
||||||
|
// - verifying that tables are dropped correctly (most notably matcher tables).
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// maxCoreSize is the maximum size of a BCP 47 tag without variants and
|
||||||
|
// extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes.
|
||||||
|
maxCoreSize = 12
|
||||||
|
|
||||||
|
// max99thPercentileSize is a somewhat arbitrary buffer size that presumably
|
||||||
|
// is large enough to hold at least 99% of the BCP 47 tags.
|
||||||
|
max99thPercentileSize = 32
|
||||||
|
|
||||||
|
// maxSimpleUExtensionSize is the maximum size of a -u extension with one
|
||||||
|
// key-type pair. Equals len("-u-") + key (2) + dash + max value (8).
|
||||||
|
maxSimpleUExtensionSize = 14
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tag represents a BCP 47 language tag. It is used to specify an instance of a
|
||||||
|
// specific language or locale. All language tag values are guaranteed to be
|
||||||
|
// well-formed.
|
||||||
|
type Tag struct {
|
||||||
|
lang langID
|
||||||
|
region regionID
|
||||||
|
// TODO: we will soon run out of positions for script. Idea: instead of
|
||||||
|
// storing lang, region, and script codes, store only the compact index and
|
||||||
|
// have a lookup table from this code to its expansion. This greatly speeds
|
||||||
|
// up table lookup, speed up common variant cases.
|
||||||
|
// This will also immediately free up 3 extra bytes. Also, the pVariant
|
||||||
|
// field can now be moved to the lookup table, as the compact index uniquely
|
||||||
|
// determines the offset of a possible variant.
|
||||||
|
script scriptID
|
||||||
|
pVariant byte // offset in str, includes preceding '-'
|
||||||
|
pExt uint16 // offset of first extension, includes preceding '-'
|
||||||
|
|
||||||
|
// str is the string representation of the Tag. It will only be used if the
|
||||||
|
// tag has variants or extensions.
|
||||||
|
str string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make is a convenience wrapper for Parse that omits the error.
|
||||||
|
// In case of an error, a sensible default is returned.
|
||||||
|
func Make(s string) Tag {
|
||||||
|
return Default.Make(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make is a convenience wrapper for c.Parse that omits the error.
|
||||||
|
// In case of an error, a sensible default is returned.
|
||||||
|
func (c CanonType) Make(s string) Tag {
|
||||||
|
t, _ := c.Parse(s)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Raw returns the raw base language, script and region, without making an
|
||||||
|
// attempt to infer their values.
|
||||||
|
func (t Tag) Raw() (b Base, s Script, r Region) {
|
||||||
|
return Base{t.lang}, Script{t.script}, Region{t.region}
|
||||||
|
}
|
||||||
|
|
||||||
|
// equalTags compares language, script and region subtags only.
|
||||||
|
func (t Tag) equalTags(a Tag) bool {
|
||||||
|
return t.lang == a.lang && t.script == a.script && t.region == a.region
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRoot returns true if t is equal to language "und".
|
||||||
|
func (t Tag) IsRoot() bool {
|
||||||
|
if int(t.pVariant) < len(t.str) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return t.equalTags(und)
|
||||||
|
}
|
||||||
|
|
||||||
|
// private reports whether the Tag consists solely of a private use tag.
|
||||||
|
func (t Tag) private() bool {
|
||||||
|
return t.str != "" && t.pVariant == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonType can be used to enable or disable various types of canonicalization.
|
||||||
|
type CanonType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Replace deprecated base languages with their preferred replacements.
|
||||||
|
DeprecatedBase CanonType = 1 << iota
|
||||||
|
// Replace deprecated scripts with their preferred replacements.
|
||||||
|
DeprecatedScript
|
||||||
|
// Replace deprecated regions with their preferred replacements.
|
||||||
|
DeprecatedRegion
|
||||||
|
// Remove redundant scripts.
|
||||||
|
SuppressScript
|
||||||
|
// Normalize legacy encodings. This includes legacy languages defined in
|
||||||
|
// CLDR as well as bibliographic codes defined in ISO-639.
|
||||||
|
Legacy
|
||||||
|
// Map the dominant language of a macro language group to the macro language
|
||||||
|
// subtag. For example cmn -> zh.
|
||||||
|
Macro
|
||||||
|
// The CLDR flag should be used if full compatibility with CLDR is required.
|
||||||
|
// There are a few cases where language.Tag may differ from CLDR. To follow all
|
||||||
|
// of CLDR's suggestions, use All|CLDR.
|
||||||
|
CLDR
|
||||||
|
|
||||||
|
// Raw can be used to Compose or Parse without Canonicalization.
|
||||||
|
Raw CanonType = 0
|
||||||
|
|
||||||
|
// Replace all deprecated tags with their preferred replacements.
|
||||||
|
Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion
|
||||||
|
|
||||||
|
// All canonicalizations recommended by BCP 47.
|
||||||
|
BCP47 = Deprecated | SuppressScript
|
||||||
|
|
||||||
|
// All canonicalizations.
|
||||||
|
All = BCP47 | Legacy | Macro
|
||||||
|
|
||||||
|
// Default is the canonicalization used by Parse, Make and Compose. To
|
||||||
|
// preserve as much information as possible, canonicalizations that remove
|
||||||
|
// potentially valuable information are not included. The Matcher is
|
||||||
|
// designed to recognize similar tags that would be the same if
|
||||||
|
// they were canonicalized using All.
|
||||||
|
Default = Deprecated | Legacy
|
||||||
|
|
||||||
|
canonLang = DeprecatedBase | Legacy | Macro
|
||||||
|
|
||||||
|
// TODO: LikelyScript, LikelyRegion: suppress similar to ICU.
|
||||||
|
)
|
||||||
|
|
||||||
|
// canonicalize returns the canonicalized equivalent of the tag and
|
||||||
|
// whether there was any change.
|
||||||
|
func (t Tag) canonicalize(c CanonType) (Tag, bool) {
|
||||||
|
if c == Raw {
|
||||||
|
return t, false
|
||||||
|
}
|
||||||
|
changed := false
|
||||||
|
if c&SuppressScript != 0 {
|
||||||
|
if t.lang < langNoIndexOffset && uint8(t.script) == suppressScript[t.lang] {
|
||||||
|
t.script = 0
|
||||||
|
changed = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c&canonLang != 0 {
|
||||||
|
for {
|
||||||
|
if l, aliasType := normLang(t.lang); l != t.lang {
|
||||||
|
switch aliasType {
|
||||||
|
case langLegacy:
|
||||||
|
if c&Legacy != 0 {
|
||||||
|
if t.lang == _sh && t.script == 0 {
|
||||||
|
t.script = _Latn
|
||||||
|
}
|
||||||
|
t.lang = l
|
||||||
|
changed = true
|
||||||
|
}
|
||||||
|
case langMacro:
|
||||||
|
if c&Macro != 0 {
|
||||||
|
// We deviate here from CLDR. The mapping "nb" -> "no"
|
||||||
|
// qualifies as a typical Macro language mapping. However,
|
||||||
|
// for legacy reasons, CLDR maps "no", the macro language
|
||||||
|
// code for Norwegian, to the dominant variant "nb". This
|
||||||
|
// change is currently under consideration for CLDR as well.
|
||||||
|
// See http://unicode.org/cldr/trac/ticket/2698 and also
|
||||||
|
// http://unicode.org/cldr/trac/ticket/1790 for some of the
|
||||||
|
// practical implications. TODO: this check could be removed
|
||||||
|
// if CLDR adopts this change.
|
||||||
|
if c&CLDR == 0 || t.lang != _nb {
|
||||||
|
changed = true
|
||||||
|
t.lang = l
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case langDeprecated:
|
||||||
|
if c&DeprecatedBase != 0 {
|
||||||
|
if t.lang == _mo && t.region == 0 {
|
||||||
|
t.region = _MD
|
||||||
|
}
|
||||||
|
t.lang = l
|
||||||
|
changed = true
|
||||||
|
// Other canonicalization types may still apply.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if c&Legacy != 0 && t.lang == _no && c&CLDR != 0 {
|
||||||
|
t.lang = _nb
|
||||||
|
changed = true
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c&DeprecatedScript != 0 {
|
||||||
|
if t.script == _Qaai {
|
||||||
|
changed = true
|
||||||
|
t.script = _Zinh
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c&DeprecatedRegion != 0 {
|
||||||
|
if r := normRegion(t.region); r != 0 {
|
||||||
|
changed = true
|
||||||
|
t.region = r
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, changed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Canonicalize returns the canonicalized equivalent of the tag.
|
||||||
|
func (c CanonType) Canonicalize(t Tag) (Tag, error) {
|
||||||
|
t, changed := t.canonicalize(c)
|
||||||
|
if changed {
|
||||||
|
t.remakeString()
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Confidence indicates the level of certainty for a given return value.
|
||||||
|
// For example, Serbian may be written in Cyrillic or Latin script.
|
||||||
|
// The confidence level indicates whether a value was explicitly specified,
|
||||||
|
// whether it is typically the only possible value, or whether there is
|
||||||
|
// an ambiguity.
|
||||||
|
type Confidence int
|
||||||
|
|
||||||
|
const (
|
||||||
|
No Confidence = iota // full confidence that there was no match
|
||||||
|
Low // most likely value picked out of a set of alternatives
|
||||||
|
High // value is generally assumed to be the correct match
|
||||||
|
Exact // exact match or explicitly specified value
|
||||||
|
)
|
||||||
|
|
||||||
|
var confName = []string{"No", "Low", "High", "Exact"}
|
||||||
|
|
||||||
|
func (c Confidence) String() string {
|
||||||
|
return confName[c]
|
||||||
|
}
|
||||||
|
|
||||||
|
// remakeString is used to update t.str in case lang, script or region changed.
|
||||||
|
// It is assumed that pExt and pVariant still point to the start of the
|
||||||
|
// respective parts.
|
||||||
|
func (t *Tag) remakeString() {
|
||||||
|
if t.str == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
extra := t.str[t.pVariant:]
|
||||||
|
if t.pVariant > 0 {
|
||||||
|
extra = extra[1:]
|
||||||
|
}
|
||||||
|
if t.equalTags(und) && strings.HasPrefix(extra, "x-") {
|
||||||
|
t.str = extra
|
||||||
|
t.pVariant = 0
|
||||||
|
t.pExt = 0
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases.
|
||||||
|
b := buf[:t.genCoreBytes(buf[:])]
|
||||||
|
if extra != "" {
|
||||||
|
diff := len(b) - int(t.pVariant)
|
||||||
|
b = append(b, '-')
|
||||||
|
b = append(b, extra...)
|
||||||
|
t.pVariant = uint8(int(t.pVariant) + diff)
|
||||||
|
t.pExt = uint16(int(t.pExt) + diff)
|
||||||
|
} else {
|
||||||
|
t.pVariant = uint8(len(b))
|
||||||
|
t.pExt = uint16(len(b))
|
||||||
|
}
|
||||||
|
t.str = string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// genCoreBytes writes a string for the base languages, script and region tags
|
||||||
|
// to the given buffer and returns the number of bytes written. It will never
|
||||||
|
// write more than maxCoreSize bytes.
|
||||||
|
func (t *Tag) genCoreBytes(buf []byte) int {
|
||||||
|
n := t.lang.stringToBuf(buf[:])
|
||||||
|
if t.script != 0 {
|
||||||
|
n += copy(buf[n:], "-")
|
||||||
|
n += copy(buf[n:], t.script.String())
|
||||||
|
}
|
||||||
|
if t.region != 0 {
|
||||||
|
n += copy(buf[n:], "-")
|
||||||
|
n += copy(buf[n:], t.region.String())
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the canonical string representation of the language tag.
|
||||||
|
func (t Tag) String() string {
|
||||||
|
if t.str != "" {
|
||||||
|
return t.str
|
||||||
|
}
|
||||||
|
if t.script == 0 && t.region == 0 {
|
||||||
|
return t.lang.String()
|
||||||
|
}
|
||||||
|
buf := [maxCoreSize]byte{}
|
||||||
|
return string(buf[:t.genCoreBytes(buf[:])])
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText implements encoding.TextMarshaler.
|
||||||
|
func (t Tag) MarshalText() (text []byte, err error) {
|
||||||
|
if t.str != "" {
|
||||||
|
text = append(text, t.str...)
|
||||||
|
} else if t.script == 0 && t.region == 0 {
|
||||||
|
text = append(text, t.lang.String()...)
|
||||||
|
} else {
|
||||||
|
buf := [maxCoreSize]byte{}
|
||||||
|
text = buf[:t.genCoreBytes(buf[:])]
|
||||||
|
}
|
||||||
|
return text, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||||
|
func (t *Tag) UnmarshalText(text []byte) error {
|
||||||
|
tag, err := Raw.Parse(string(text))
|
||||||
|
*t = tag
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Base returns the base language of the language tag. If the base language is
|
||||||
|
// unspecified, an attempt will be made to infer it from the context.
|
||||||
|
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
|
||||||
|
func (t Tag) Base() (Base, Confidence) {
|
||||||
|
if t.lang != 0 {
|
||||||
|
return Base{t.lang}, Exact
|
||||||
|
}
|
||||||
|
c := High
|
||||||
|
if t.script == 0 && !(Region{t.region}).IsCountry() {
|
||||||
|
c = Low
|
||||||
|
}
|
||||||
|
if tag, err := addTags(t); err == nil && tag.lang != 0 {
|
||||||
|
return Base{tag.lang}, c
|
||||||
|
}
|
||||||
|
return Base{0}, No
|
||||||
|
}
|
||||||
|
|
||||||
|
// Script infers the script for the language tag. If it was not explicitly given, it will infer
|
||||||
|
// a most likely candidate.
|
||||||
|
// If more than one script is commonly used for a language, the most likely one
|
||||||
|
// is returned with a low confidence indication. For example, it returns (Cyrl, Low)
|
||||||
|
// for Serbian.
|
||||||
|
// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined)
|
||||||
|
// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks
|
||||||
|
// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts.
|
||||||
|
// See http://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for
|
||||||
|
// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified.
|
||||||
|
// Note that an inferred script is never guaranteed to be the correct one. Latin is
|
||||||
|
// almost exclusively used for Afrikaans, but Arabic has been used for some texts
|
||||||
|
// in the past. Also, the script that is commonly used may change over time.
|
||||||
|
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
|
||||||
|
func (t Tag) Script() (Script, Confidence) {
|
||||||
|
if t.script != 0 {
|
||||||
|
return Script{t.script}, Exact
|
||||||
|
}
|
||||||
|
sc, c := scriptID(_Zzzz), No
|
||||||
|
if t.lang < langNoIndexOffset {
|
||||||
|
if scr := scriptID(suppressScript[t.lang]); scr != 0 {
|
||||||
|
// Note: it is not always the case that a language with a suppress
|
||||||
|
// script value is only written in one script (e.g. kk, ms, pa).
|
||||||
|
if t.region == 0 {
|
||||||
|
return Script{scriptID(scr)}, High
|
||||||
|
}
|
||||||
|
sc, c = scr, High
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tag, err := addTags(t); err == nil {
|
||||||
|
if tag.script != sc {
|
||||||
|
sc, c = tag.script, Low
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t, _ = (Deprecated | Macro).Canonicalize(t)
|
||||||
|
if tag, err := addTags(t); err == nil && tag.script != sc {
|
||||||
|
sc, c = tag.script, Low
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Script{sc}, c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Region returns the region for the language tag. If it was not explicitly given, it will
|
||||||
|
// infer a most likely candidate from the context.
|
||||||
|
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
|
||||||
|
func (t Tag) Region() (Region, Confidence) {
|
||||||
|
if t.region != 0 {
|
||||||
|
return Region{t.region}, Exact
|
||||||
|
}
|
||||||
|
if t, err := addTags(t); err == nil {
|
||||||
|
return Region{t.region}, Low // TODO: differentiate between high and low.
|
||||||
|
}
|
||||||
|
t, _ = (Deprecated | Macro).Canonicalize(t)
|
||||||
|
if tag, err := addTags(t); err == nil {
|
||||||
|
return Region{tag.region}, Low
|
||||||
|
}
|
||||||
|
return Region{_ZZ}, No // TODO: return world instead of undetermined?
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variant returns the variants specified explicitly for this language tag.
|
||||||
|
// or nil if no variant was specified.
|
||||||
|
func (t Tag) Variants() []Variant {
|
||||||
|
v := []Variant{}
|
||||||
|
if int(t.pVariant) < int(t.pExt) {
|
||||||
|
for x, str := "", t.str[t.pVariant:t.pExt]; str != ""; {
|
||||||
|
x, str = nextToken(str)
|
||||||
|
v = append(v, Variant{x})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
|
||||||
|
// specific language are substituted with fields from the parent language.
|
||||||
|
// The parent for a language may change for newer versions of CLDR.
|
||||||
|
func (t Tag) Parent() Tag {
|
||||||
|
if t.str != "" {
|
||||||
|
// Strip the variants and extensions.
|
||||||
|
t, _ = Raw.Compose(t.Raw())
|
||||||
|
if t.region == 0 && t.script != 0 && t.lang != 0 {
|
||||||
|
base, _ := addTags(Tag{lang: t.lang})
|
||||||
|
if base.script == t.script {
|
||||||
|
return Tag{lang: t.lang}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
if t.lang != 0 {
|
||||||
|
if t.region != 0 {
|
||||||
|
maxScript := t.script
|
||||||
|
if maxScript == 0 {
|
||||||
|
max, _ := addTags(t)
|
||||||
|
maxScript = max.script
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range parents {
|
||||||
|
if langID(parents[i].lang) == t.lang && scriptID(parents[i].maxScript) == maxScript {
|
||||||
|
for _, r := range parents[i].fromRegion {
|
||||||
|
if regionID(r) == t.region {
|
||||||
|
return Tag{
|
||||||
|
lang: t.lang,
|
||||||
|
script: scriptID(parents[i].script),
|
||||||
|
region: regionID(parents[i].toRegion),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip the script if it is the default one.
|
||||||
|
base, _ := addTags(Tag{lang: t.lang})
|
||||||
|
if base.script != maxScript {
|
||||||
|
return Tag{lang: t.lang, script: maxScript}
|
||||||
|
}
|
||||||
|
return Tag{lang: t.lang}
|
||||||
|
} else if t.script != 0 {
|
||||||
|
// The parent for an base-script pair with a non-default script is
|
||||||
|
// "und" instead of the base language.
|
||||||
|
base, _ := addTags(Tag{lang: t.lang})
|
||||||
|
if base.script != t.script {
|
||||||
|
return und
|
||||||
|
}
|
||||||
|
return Tag{lang: t.lang}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return und
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns token t and the rest of the string.
|
||||||
|
func nextToken(s string) (t, tail string) {
|
||||||
|
p := strings.Index(s[1:], "-")
|
||||||
|
if p == -1 {
|
||||||
|
return s[1:], ""
|
||||||
|
}
|
||||||
|
p++
|
||||||
|
return s[1:p], s[p:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extension is a single BCP 47 extension.
|
||||||
|
type Extension struct {
|
||||||
|
s string
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string representation of the extension, including the
|
||||||
|
// type tag.
|
||||||
|
func (e Extension) String() string {
|
||||||
|
return e.s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseExtension parses s as an extension and returns it on success.
|
||||||
|
func ParseExtension(s string) (e Extension, err error) {
|
||||||
|
scan := makeScannerString(s)
|
||||||
|
var end int
|
||||||
|
if n := len(scan.token); n != 1 {
|
||||||
|
return Extension{}, errSyntax
|
||||||
|
}
|
||||||
|
scan.toLower(0, len(scan.b))
|
||||||
|
end = parseExtension(&scan)
|
||||||
|
if end != len(s) {
|
||||||
|
return Extension{}, errSyntax
|
||||||
|
}
|
||||||
|
return Extension{string(scan.b)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the one-byte extension type of e. It returns 0 for the zero
|
||||||
|
// exception.
|
||||||
|
func (e Extension) Type() byte {
|
||||||
|
if e.s == "" {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return e.s[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tokens returns the list of tokens of e.
|
||||||
|
func (e Extension) Tokens() []string {
|
||||||
|
return strings.Split(e.s, "-")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extension returns the extension of type x for tag t. It will return
|
||||||
|
// false for ok if t does not have the requested extension. The returned
|
||||||
|
// extension will be invalid in this case.
|
||||||
|
func (t Tag) Extension(x byte) (ext Extension, ok bool) {
|
||||||
|
for i := int(t.pExt); i < len(t.str)-1; {
|
||||||
|
var ext string
|
||||||
|
i, ext = getExtension(t.str, i)
|
||||||
|
if ext[0] == x {
|
||||||
|
return Extension{ext}, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Extension{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extensions returns all extensions of t.
|
||||||
|
func (t Tag) Extensions() []Extension {
|
||||||
|
e := []Extension{}
|
||||||
|
for i := int(t.pExt); i < len(t.str)-1; {
|
||||||
|
var ext string
|
||||||
|
i, ext = getExtension(t.str, i)
|
||||||
|
e = append(e, Extension{ext})
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeForKey returns the type associated with the given key, where key and type
|
||||||
|
// are of the allowed values defined for the Unicode locale extension ('u') in
|
||||||
|
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||||
|
// TypeForKey will traverse the inheritance chain to get the correct value.
|
||||||
|
func (t Tag) TypeForKey(key string) string {
|
||||||
|
if start, end, _ := t.findTypeForKey(key); end != start {
|
||||||
|
return t.str[start:end]
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errPrivateUse = errors.New("cannot set a key on a private use tag")
|
||||||
|
errInvalidArguments = errors.New("invalid key or type")
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetTypeForKey returns a new Tag with the key set to type, where key and type
|
||||||
|
// are of the allowed values defined for the Unicode locale extension ('u') in
|
||||||
|
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||||
|
// An empty value removes an existing pair with the same key.
|
||||||
|
func (t Tag) SetTypeForKey(key, value string) (Tag, error) {
|
||||||
|
if t.private() {
|
||||||
|
return t, errPrivateUse
|
||||||
|
}
|
||||||
|
if len(key) != 2 {
|
||||||
|
return t, errInvalidArguments
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the setting if value is "".
|
||||||
|
if value == "" {
|
||||||
|
start, end, _ := t.findTypeForKey(key)
|
||||||
|
if start != end {
|
||||||
|
// Remove key tag and leading '-'.
|
||||||
|
start -= 4
|
||||||
|
|
||||||
|
// Remove a possible empty extension.
|
||||||
|
if (end == len(t.str) || t.str[end+2] == '-') && t.str[start-2] == '-' {
|
||||||
|
start -= 2
|
||||||
|
}
|
||||||
|
if start == int(t.pVariant) && end == len(t.str) {
|
||||||
|
t.str = ""
|
||||||
|
t.pVariant, t.pExt = 0, 0
|
||||||
|
} else {
|
||||||
|
t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(value) < 3 || len(value) > 8 {
|
||||||
|
return t, errInvalidArguments
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
buf [maxCoreSize + maxSimpleUExtensionSize]byte
|
||||||
|
uStart int // start of the -u extension.
|
||||||
|
)
|
||||||
|
|
||||||
|
// Generate the tag string if needed.
|
||||||
|
if t.str == "" {
|
||||||
|
uStart = t.genCoreBytes(buf[:])
|
||||||
|
buf[uStart] = '-'
|
||||||
|
uStart++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new key-type pair and parse it to verify.
|
||||||
|
b := buf[uStart:]
|
||||||
|
copy(b, "u-")
|
||||||
|
copy(b[2:], key)
|
||||||
|
b[4] = '-'
|
||||||
|
b = b[:5+copy(b[5:], value)]
|
||||||
|
scan := makeScanner(b)
|
||||||
|
if parseExtensions(&scan); scan.err != nil {
|
||||||
|
return t, scan.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assemble the replacement string.
|
||||||
|
if t.str == "" {
|
||||||
|
t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1)
|
||||||
|
t.str = string(buf[:uStart+len(b)])
|
||||||
|
} else {
|
||||||
|
s := t.str
|
||||||
|
start, end, hasExt := t.findTypeForKey(key)
|
||||||
|
if start == end {
|
||||||
|
if hasExt {
|
||||||
|
b = b[2:]
|
||||||
|
}
|
||||||
|
t.str = fmt.Sprintf("%s-%s%s", s[:start], b, s[end:])
|
||||||
|
} else {
|
||||||
|
t.str = fmt.Sprintf("%s%s%s", s[:start], value, s[end:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findKeyAndType returns the start and end position for the type corresponding
|
||||||
|
// to key or the point at which to insert the key-value pair if the type
|
||||||
|
// wasn't found. The hasExt return value reports whether an -u extension was present.
|
||||||
|
// Note: the extensions are typically very small and are likely to contain
|
||||||
|
// only one key-type pair.
|
||||||
|
func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) {
|
||||||
|
p := int(t.pExt)
|
||||||
|
if len(key) != 2 || p == len(t.str) || p == 0 {
|
||||||
|
return p, p, false
|
||||||
|
}
|
||||||
|
s := t.str
|
||||||
|
|
||||||
|
// Find the correct extension.
|
||||||
|
for p++; s[p] != 'u'; p++ {
|
||||||
|
if s[p] > 'u' {
|
||||||
|
p--
|
||||||
|
return p, p, false
|
||||||
|
}
|
||||||
|
if p = nextExtension(s, p); p == len(s) {
|
||||||
|
return len(s), len(s), false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Proceed to the hyphen following the extension name.
|
||||||
|
p++
|
||||||
|
|
||||||
|
// curKey is the key currently being processed.
|
||||||
|
curKey := ""
|
||||||
|
|
||||||
|
// Iterate over keys until we get the end of a section.
|
||||||
|
for {
|
||||||
|
// p points to the hyphen preceding the current token.
|
||||||
|
if p3 := p + 3; s[p3] == '-' {
|
||||||
|
// Found a key.
|
||||||
|
// Check whether we just processed the key that was requested.
|
||||||
|
if curKey == key {
|
||||||
|
return start, p, true
|
||||||
|
}
|
||||||
|
// Set to the next key and continue scanning type tokens.
|
||||||
|
curKey = s[p+1 : p3]
|
||||||
|
if curKey > key {
|
||||||
|
return p, p, true
|
||||||
|
}
|
||||||
|
// Start of the type token sequence.
|
||||||
|
start = p + 4
|
||||||
|
// A type is at least 3 characters long.
|
||||||
|
p += 7 // 4 + 3
|
||||||
|
} else {
|
||||||
|
// Attribute or type, which is at least 3 characters long.
|
||||||
|
p += 4
|
||||||
|
}
|
||||||
|
// p points past the third character of a type or attribute.
|
||||||
|
max := p + 5 // maximum length of token plus hyphen.
|
||||||
|
if len(s) < max {
|
||||||
|
max = len(s)
|
||||||
|
}
|
||||||
|
for ; p < max && s[p] != '-'; p++ {
|
||||||
|
}
|
||||||
|
// Bail if we have exhausted all tokens or if the next token starts
|
||||||
|
// a new extension.
|
||||||
|
if p == len(s) || s[p+2] == '-' {
|
||||||
|
if curKey == key {
|
||||||
|
return start, p, true
|
||||||
|
}
|
||||||
|
return p, p, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags
|
||||||
|
// for which data exists in the text repository. The index will change over time
|
||||||
|
// and should not be stored in persistent storage. Extensions, except for the
|
||||||
|
// 'va' type of the 'u' extension, are ignored. It will return 0, false if no
|
||||||
|
// compact tag exists, where 0 is the index for the root language (Und).
|
||||||
|
func CompactIndex(t Tag) (index int, ok bool) {
|
||||||
|
// TODO: perhaps give more frequent tags a lower index.
|
||||||
|
// TODO: we could make the indexes stable. This will excluded some
|
||||||
|
// possibilities for optimization, so don't do this quite yet.
|
||||||
|
b, s, r := t.Raw()
|
||||||
|
if len(t.str) > 0 {
|
||||||
|
if strings.HasPrefix(t.str, "x-") {
|
||||||
|
// We have no entries for user-defined tags.
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
if uint16(t.pVariant) != t.pExt {
|
||||||
|
// There are no tags with variants and an u-va type.
|
||||||
|
if t.TypeForKey("va") != "" {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
t, _ = Raw.Compose(b, s, r, t.Variants())
|
||||||
|
} else if _, ok := t.Extension('u'); ok {
|
||||||
|
// Strip all but the 'va' entry.
|
||||||
|
variant := t.TypeForKey("va")
|
||||||
|
t, _ = Raw.Compose(b, s, r)
|
||||||
|
t, _ = t.SetTypeForKey("va", variant)
|
||||||
|
}
|
||||||
|
if len(t.str) > 0 {
|
||||||
|
// We have some variants.
|
||||||
|
for i, s := range specialTags {
|
||||||
|
if s == t {
|
||||||
|
return i + 1, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// No variants specified: just compare core components.
|
||||||
|
// The key has the form lllssrrr, where l, s, and r are nibbles for
|
||||||
|
// respectively the langID, scriptID, and regionID.
|
||||||
|
key := uint32(b.langID) << (8 + 12)
|
||||||
|
key |= uint32(s.scriptID) << 12
|
||||||
|
key |= uint32(r.regionID)
|
||||||
|
x, ok := coreTags[key]
|
||||||
|
return int(x), ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Base is an ISO 639 language code, used for encoding the base language
|
||||||
|
// of a language tag.
|
||||||
|
type Base struct {
|
||||||
|
langID
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseBase parses a 2- or 3-letter ISO 639 code.
|
||||||
|
// It returns a ValueError if s is a well-formed but unknown language identifier
|
||||||
|
// or another error if another error occurred.
|
||||||
|
func ParseBase(s string) (Base, error) {
|
||||||
|
if n := len(s); n < 2 || 3 < n {
|
||||||
|
return Base{}, errSyntax
|
||||||
|
}
|
||||||
|
var buf [3]byte
|
||||||
|
l, err := getLangID(buf[:copy(buf[:], s)])
|
||||||
|
return Base{l}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Script is a 4-letter ISO 15924 code for representing scripts.
|
||||||
|
// It is idiomatically represented in title case.
|
||||||
|
type Script struct {
|
||||||
|
scriptID
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseScript parses a 4-letter ISO 15924 code.
|
||||||
|
// It returns a ValueError if s is a well-formed but unknown script identifier
|
||||||
|
// or another error if another error occurred.
|
||||||
|
func ParseScript(s string) (Script, error) {
|
||||||
|
if len(s) != 4 {
|
||||||
|
return Script{}, errSyntax
|
||||||
|
}
|
||||||
|
var buf [4]byte
|
||||||
|
sc, err := getScriptID(script, buf[:copy(buf[:], s)])
|
||||||
|
return Script{sc}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions.
|
||||||
|
type Region struct {
|
||||||
|
regionID
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeM49 returns the Region for the given UN M.49 code.
|
||||||
|
// It returns an error if r is not a valid code.
|
||||||
|
func EncodeM49(r int) (Region, error) {
|
||||||
|
rid, err := getRegionM49(r)
|
||||||
|
return Region{rid}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code.
|
||||||
|
// It returns a ValueError if s is a well-formed but unknown region identifier
|
||||||
|
// or another error if another error occurred.
|
||||||
|
func ParseRegion(s string) (Region, error) {
|
||||||
|
if n := len(s); n < 2 || 3 < n {
|
||||||
|
return Region{}, errSyntax
|
||||||
|
}
|
||||||
|
var buf [3]byte
|
||||||
|
r, err := getRegionID(buf[:copy(buf[:], s)])
|
||||||
|
return Region{r}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCountry returns whether this region is a country or autonomous area. This
|
||||||
|
// includes non-standard definitions from CLDR.
|
||||||
|
func (r Region) IsCountry() bool {
|
||||||
|
if r.regionID == 0 || r.IsGroup() || r.IsPrivateUse() && r.regionID != _XK {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsGroup returns whether this region defines a collection of regions. This
|
||||||
|
// includes non-standard definitions from CLDR.
|
||||||
|
func (r Region) IsGroup() bool {
|
||||||
|
if r.regionID == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return int(regionInclusion[r.regionID]) < len(regionContainment)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains returns whether Region c is contained by Region r. It returns true
|
||||||
|
// if c == r.
|
||||||
|
func (r Region) Contains(c Region) bool {
|
||||||
|
return r.regionID.contains(c.regionID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r regionID) contains(c regionID) bool {
|
||||||
|
if r == c {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
g := regionInclusion[r]
|
||||||
|
if g >= nRegionGroups {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
m := regionContainment[g]
|
||||||
|
|
||||||
|
d := regionInclusion[c]
|
||||||
|
b := regionInclusionBits[d]
|
||||||
|
|
||||||
|
// A contained country may belong to multiple disjoint groups. Matching any
|
||||||
|
// of these indicates containment. If the contained region is a group, it
|
||||||
|
// must strictly be a subset.
|
||||||
|
if d >= nRegionGroups {
|
||||||
|
return b&m != 0
|
||||||
|
}
|
||||||
|
return b&^m == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var errNoTLD = errors.New("language: region is not a valid ccTLD")
|
||||||
|
|
||||||
|
// TLD returns the country code top-level domain (ccTLD). UK is returned for GB.
|
||||||
|
// In all other cases it returns either the region itself or an error.
|
||||||
|
//
|
||||||
|
// This method may return an error for a region for which there exists a
|
||||||
|
// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The
|
||||||
|
// region will already be canonicalized it was obtained from a Tag that was
|
||||||
|
// obtained using any of the default methods.
|
||||||
|
func (r Region) TLD() (Region, error) {
|
||||||
|
// See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the
|
||||||
|
// difference between ISO 3166-1 and IANA ccTLD.
|
||||||
|
if r.regionID == _GB {
|
||||||
|
r = Region{_UK}
|
||||||
|
}
|
||||||
|
if (r.typ() & ccTLD) == 0 {
|
||||||
|
return Region{}, errNoTLD
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Canonicalize returns the region or a possible replacement if the region is
|
||||||
|
// deprecated. It will not return a replacement for deprecated regions that
|
||||||
|
// are split into multiple regions.
|
||||||
|
func (r Region) Canonicalize() Region {
|
||||||
|
if cr := normRegion(r.regionID); cr != 0 {
|
||||||
|
return Region{cr}
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variant represents a registered variant of a language as defined by BCP 47.
|
||||||
|
type Variant struct {
|
||||||
|
variant string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseVariant parses and returns a Variant. An error is returned if s is not
|
||||||
|
// a valid variant.
|
||||||
|
func ParseVariant(s string) (Variant, error) {
|
||||||
|
s = strings.ToLower(s)
|
||||||
|
if _, ok := variantIndex[s]; ok {
|
||||||
|
return Variant{s}, nil
|
||||||
|
}
|
||||||
|
return Variant{}, mkErrInvalid([]byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string representation of the variant.
|
||||||
|
func (v Variant) String() string {
|
||||||
|
return v.variant
|
||||||
|
}
|
396
vendor/golang.org/x/text/language/lookup.go
generated
vendored
Normal file
396
vendor/golang.org/x/text/language/lookup.go
generated
vendored
Normal file
@ -0,0 +1,396 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
// findIndex tries to find the given tag in idx and returns a standardized error
|
||||||
|
// if it could not be found.
|
||||||
|
func findIndex(idx tag.Index, key []byte, form string) (index int, err error) {
|
||||||
|
if !tag.FixCase(form, key) {
|
||||||
|
return 0, errSyntax
|
||||||
|
}
|
||||||
|
i := idx.Index(key)
|
||||||
|
if i == -1 {
|
||||||
|
return 0, mkErrInvalid(key)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func searchUint(imap []uint16, key uint16) int {
|
||||||
|
return sort.Search(len(imap), func(i int) bool {
|
||||||
|
return imap[i] >= key
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type langID uint16
|
||||||
|
|
||||||
|
// getLangID returns the langID of s if s is a canonical subtag
|
||||||
|
// or langUnknown if s is not a canonical subtag.
|
||||||
|
func getLangID(s []byte) (langID, error) {
|
||||||
|
if len(s) == 2 {
|
||||||
|
return getLangISO2(s)
|
||||||
|
}
|
||||||
|
return getLangISO3(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mapLang returns the mapped langID of id according to mapping m.
|
||||||
|
func normLang(id langID) (langID, langAliasType) {
|
||||||
|
k := sort.Search(len(langAliasMap), func(i int) bool {
|
||||||
|
return langAliasMap[i].from >= uint16(id)
|
||||||
|
})
|
||||||
|
if k < len(langAliasMap) && langAliasMap[k].from == uint16(id) {
|
||||||
|
return langID(langAliasMap[k].to), langAliasTypes[k]
|
||||||
|
}
|
||||||
|
return id, langAliasTypeUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
// getLangISO2 returns the langID for the given 2-letter ISO language code
|
||||||
|
// or unknownLang if this does not exist.
|
||||||
|
func getLangISO2(s []byte) (langID, error) {
|
||||||
|
if !tag.FixCase("zz", s) {
|
||||||
|
return 0, errSyntax
|
||||||
|
}
|
||||||
|
if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 {
|
||||||
|
return langID(i), nil
|
||||||
|
}
|
||||||
|
return 0, mkErrInvalid(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
const base = 'z' - 'a' + 1
|
||||||
|
|
||||||
|
func strToInt(s []byte) uint {
|
||||||
|
v := uint(0)
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
v *= base
|
||||||
|
v += uint(s[i] - 'a')
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// converts the given integer to the original ASCII string passed to strToInt.
|
||||||
|
// len(s) must match the number of characters obtained.
|
||||||
|
func intToStr(v uint, s []byte) {
|
||||||
|
for i := len(s) - 1; i >= 0; i-- {
|
||||||
|
s[i] = byte(v%base) + 'a'
|
||||||
|
v /= base
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getLangISO3 returns the langID for the given 3-letter ISO language code
|
||||||
|
// or unknownLang if this does not exist.
|
||||||
|
func getLangISO3(s []byte) (langID, error) {
|
||||||
|
if tag.FixCase("und", s) {
|
||||||
|
// first try to match canonical 3-letter entries
|
||||||
|
for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) {
|
||||||
|
if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] {
|
||||||
|
// We treat "und" as special and always translate it to "unspecified".
|
||||||
|
// Note that ZZ and Zzzz are private use and are not treated as
|
||||||
|
// unspecified by default.
|
||||||
|
id := langID(i)
|
||||||
|
if id == nonCanonicalUnd {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i := altLangISO3.Index(s); i != -1 {
|
||||||
|
return langID(altLangIndex[altLangISO3.Elem(i)[3]]), nil
|
||||||
|
}
|
||||||
|
n := strToInt(s)
|
||||||
|
if langNoIndex[n/8]&(1<<(n%8)) != 0 {
|
||||||
|
return langID(n) + langNoIndexOffset, nil
|
||||||
|
}
|
||||||
|
// Check for non-canonical uses of ISO3.
|
||||||
|
for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) {
|
||||||
|
if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] {
|
||||||
|
return langID(i), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, mkErrInvalid(s)
|
||||||
|
}
|
||||||
|
return 0, errSyntax
|
||||||
|
}
|
||||||
|
|
||||||
|
// stringToBuf writes the string to b and returns the number of bytes
|
||||||
|
// written. cap(b) must be >= 3.
|
||||||
|
func (id langID) stringToBuf(b []byte) int {
|
||||||
|
if id >= langNoIndexOffset {
|
||||||
|
intToStr(uint(id)-langNoIndexOffset, b[:3])
|
||||||
|
return 3
|
||||||
|
} else if id == 0 {
|
||||||
|
return copy(b, "und")
|
||||||
|
}
|
||||||
|
l := lang[id<<2:]
|
||||||
|
if l[3] == 0 {
|
||||||
|
return copy(b, l[:3])
|
||||||
|
}
|
||||||
|
return copy(b, l[:2])
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the BCP 47 representation of the langID.
|
||||||
|
// Use b as variable name, instead of id, to ensure the variable
|
||||||
|
// used is consistent with that of Base in which this type is embedded.
|
||||||
|
func (b langID) String() string {
|
||||||
|
if b == 0 {
|
||||||
|
return "und"
|
||||||
|
} else if b >= langNoIndexOffset {
|
||||||
|
b -= langNoIndexOffset
|
||||||
|
buf := [3]byte{}
|
||||||
|
intToStr(uint(b), buf[:])
|
||||||
|
return string(buf[:])
|
||||||
|
}
|
||||||
|
l := lang.Elem(int(b))
|
||||||
|
if l[3] == 0 {
|
||||||
|
return l[:3]
|
||||||
|
}
|
||||||
|
return l[:2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ISO3 returns the ISO 639-3 language code.
|
||||||
|
func (b langID) ISO3() string {
|
||||||
|
if b == 0 || b >= langNoIndexOffset {
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
l := lang.Elem(int(b))
|
||||||
|
if l[3] == 0 {
|
||||||
|
return l[:3]
|
||||||
|
} else if l[2] == 0 {
|
||||||
|
return altLangISO3.Elem(int(l[3]))[:3]
|
||||||
|
}
|
||||||
|
// This allocation will only happen for 3-letter ISO codes
|
||||||
|
// that are non-canonical BCP 47 language identifiers.
|
||||||
|
return l[0:1] + l[2:4]
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPrivateUse reports whether this language code is reserved for private use.
|
||||||
|
func (b langID) IsPrivateUse() bool {
|
||||||
|
return langPrivateStart <= b && b <= langPrivateEnd
|
||||||
|
}
|
||||||
|
|
||||||
|
type regionID uint16
|
||||||
|
|
||||||
|
// getRegionID returns the region id for s if s is a valid 2-letter region code
|
||||||
|
// or unknownRegion.
|
||||||
|
func getRegionID(s []byte) (regionID, error) {
|
||||||
|
if len(s) == 3 {
|
||||||
|
if isAlpha(s[0]) {
|
||||||
|
return getRegionISO3(s)
|
||||||
|
}
|
||||||
|
if i, err := strconv.ParseUint(string(s), 10, 10); err == nil {
|
||||||
|
return getRegionM49(int(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return getRegionISO2(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getRegionISO2 returns the regionID for the given 2-letter ISO country code
|
||||||
|
// or unknownRegion if this does not exist.
|
||||||
|
func getRegionISO2(s []byte) (regionID, error) {
|
||||||
|
i, err := findIndex(regionISO, s, "ZZ")
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return regionID(i) + isoRegionOffset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getRegionISO3 returns the regionID for the given 3-letter ISO country code
|
||||||
|
// or unknownRegion if this does not exist.
|
||||||
|
func getRegionISO3(s []byte) (regionID, error) {
|
||||||
|
if tag.FixCase("ZZZ", s) {
|
||||||
|
for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) {
|
||||||
|
if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] {
|
||||||
|
return regionID(i) + isoRegionOffset, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 0; i < len(altRegionISO3); i += 3 {
|
||||||
|
if tag.Compare(altRegionISO3[i:i+3], s) == 0 {
|
||||||
|
return regionID(altRegionIDs[i/3]), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, mkErrInvalid(s)
|
||||||
|
}
|
||||||
|
return 0, errSyntax
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRegionM49(n int) (regionID, error) {
|
||||||
|
if 0 < n && n <= 999 {
|
||||||
|
const (
|
||||||
|
searchBits = 7
|
||||||
|
regionBits = 9
|
||||||
|
regionMask = 1<<regionBits - 1
|
||||||
|
)
|
||||||
|
idx := n >> searchBits
|
||||||
|
buf := fromM49[m49Index[idx]:m49Index[idx+1]]
|
||||||
|
val := uint16(n) << regionBits // we rely on bits shifting out
|
||||||
|
i := sort.Search(len(buf), func(i int) bool {
|
||||||
|
return buf[i] >= val
|
||||||
|
})
|
||||||
|
if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val {
|
||||||
|
return regionID(r & regionMask), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var e ValueError
|
||||||
|
fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n)
|
||||||
|
return 0, e
|
||||||
|
}
|
||||||
|
|
||||||
|
// normRegion returns a region if r is deprecated or 0 otherwise.
|
||||||
|
// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ).
|
||||||
|
// TODO: consider mapping split up regions to new most populous one (like CLDR).
|
||||||
|
func normRegion(r regionID) regionID {
|
||||||
|
m := regionOldMap
|
||||||
|
k := sort.Search(len(m), func(i int) bool {
|
||||||
|
return m[i].from >= uint16(r)
|
||||||
|
})
|
||||||
|
if k < len(m) && m[k].from == uint16(r) {
|
||||||
|
return regionID(m[k].to)
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
iso3166UserAssigned = 1 << iota
|
||||||
|
ccTLD
|
||||||
|
bcp47Region
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r regionID) typ() byte {
|
||||||
|
return regionTypes[r]
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the BCP 47 representation for the region.
|
||||||
|
// It returns "ZZ" for an unspecified region.
|
||||||
|
func (r regionID) String() string {
|
||||||
|
if r < isoRegionOffset {
|
||||||
|
if r == 0 {
|
||||||
|
return "ZZ"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%03d", r.M49())
|
||||||
|
}
|
||||||
|
r -= isoRegionOffset
|
||||||
|
return regionISO.Elem(int(r))[:2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ISO3 returns the 3-letter ISO code of r.
|
||||||
|
// Note that not all regions have a 3-letter ISO code.
|
||||||
|
// In such cases this method returns "ZZZ".
|
||||||
|
func (r regionID) ISO3() string {
|
||||||
|
if r < isoRegionOffset {
|
||||||
|
return "ZZZ"
|
||||||
|
}
|
||||||
|
r -= isoRegionOffset
|
||||||
|
reg := regionISO.Elem(int(r))
|
||||||
|
switch reg[2] {
|
||||||
|
case 0:
|
||||||
|
return altRegionISO3[reg[3]:][:3]
|
||||||
|
case ' ':
|
||||||
|
return "ZZZ"
|
||||||
|
}
|
||||||
|
return reg[0:1] + reg[2:4]
|
||||||
|
}
|
||||||
|
|
||||||
|
// M49 returns the UN M.49 encoding of r, or 0 if this encoding
|
||||||
|
// is not defined for r.
|
||||||
|
func (r regionID) M49() int {
|
||||||
|
return int(m49[r])
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This
|
||||||
|
// may include private-use tags that are assigned by CLDR and used in this
|
||||||
|
// implementation. So IsPrivateUse and IsCountry can be simultaneously true.
|
||||||
|
func (r regionID) IsPrivateUse() bool {
|
||||||
|
return r.typ()&iso3166UserAssigned != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type scriptID uint8
|
||||||
|
|
||||||
|
// getScriptID returns the script id for string s. It assumes that s
|
||||||
|
// is of the format [A-Z][a-z]{3}.
|
||||||
|
func getScriptID(idx tag.Index, s []byte) (scriptID, error) {
|
||||||
|
i, err := findIndex(idx, s, "Zzzz")
|
||||||
|
return scriptID(i), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the script code in title case.
|
||||||
|
// It returns "Zzzz" for an unspecified script.
|
||||||
|
func (s scriptID) String() string {
|
||||||
|
if s == 0 {
|
||||||
|
return "Zzzz"
|
||||||
|
}
|
||||||
|
return script.Elem(int(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPrivateUse reports whether this script code is reserved for private use.
|
||||||
|
func (s scriptID) IsPrivateUse() bool {
|
||||||
|
return _Qaaa <= s && s <= _Qabx
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxAltTaglen = len("en-US-POSIX")
|
||||||
|
maxLen = maxAltTaglen
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// grandfatheredMap holds a mapping from legacy and grandfathered tags to
|
||||||
|
// their base language or index to more elaborate tag.
|
||||||
|
grandfatheredMap = map[[maxLen]byte]int16{
|
||||||
|
[maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban
|
||||||
|
[maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami
|
||||||
|
[maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn
|
||||||
|
[maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak
|
||||||
|
[maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon
|
||||||
|
[maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux
|
||||||
|
[maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo
|
||||||
|
[maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn
|
||||||
|
[maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao
|
||||||
|
[maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay
|
||||||
|
[maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu
|
||||||
|
[maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok
|
||||||
|
[maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn
|
||||||
|
[maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR
|
||||||
|
[maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL
|
||||||
|
[maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE
|
||||||
|
[maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu
|
||||||
|
[maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka
|
||||||
|
[maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan
|
||||||
|
[maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang
|
||||||
|
|
||||||
|
// Grandfathered tags with no modern replacement will be converted as
|
||||||
|
// follows:
|
||||||
|
[maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish
|
||||||
|
[maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed
|
||||||
|
[maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default
|
||||||
|
[maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian
|
||||||
|
[maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo
|
||||||
|
[maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min
|
||||||
|
|
||||||
|
// CLDR-specific tag.
|
||||||
|
[maxLen]byte{'r', 'o', 'o', 't'}: 0, // root
|
||||||
|
[maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX"
|
||||||
|
}
|
||||||
|
|
||||||
|
altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102}
|
||||||
|
|
||||||
|
altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) {
|
||||||
|
if v, ok := grandfatheredMap[s]; ok {
|
||||||
|
if v < 0 {
|
||||||
|
return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true
|
||||||
|
}
|
||||||
|
t.lang = langID(v)
|
||||||
|
return t, true
|
||||||
|
}
|
||||||
|
return t, false
|
||||||
|
}
|
933
vendor/golang.org/x/text/language/match.go
generated
vendored
Normal file
933
vendor/golang.org/x/text/language/match.go
generated
vendored
Normal file
@ -0,0 +1,933 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
// A MatchOption configures a Matcher.
|
||||||
|
type MatchOption func(*matcher)
|
||||||
|
|
||||||
|
// PreferSameScript will, in the absence of a match, result in the first
|
||||||
|
// preferred tag with the same script as a supported tag to match this supported
|
||||||
|
// tag. The default is currently true, but this may change in the future.
|
||||||
|
func PreferSameScript(preferSame bool) MatchOption {
|
||||||
|
return func(m *matcher) { m.preferSameScript = preferSame }
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(v1.0.0): consider making Matcher a concrete type, instead of interface.
|
||||||
|
// There doesn't seem to be too much need for multiple types.
|
||||||
|
// Making it a concrete type allows MatchStrings to be a method, which will
|
||||||
|
// improve its discoverability.
|
||||||
|
|
||||||
|
// MatchStrings parses and matches the given strings until one of them matches
|
||||||
|
// the language in the Matcher. A string may be an Accept-Language header as
|
||||||
|
// handled by ParseAcceptLanguage. The default language is returned if no
|
||||||
|
// other language matched.
|
||||||
|
func MatchStrings(m Matcher, lang ...string) (tag Tag, index int) {
|
||||||
|
for _, accept := range lang {
|
||||||
|
desired, _, err := ParseAcceptLanguage(accept)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tag, index, conf := m.Match(desired...); conf != No {
|
||||||
|
return tag, index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tag, index, _ = m.Match()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matcher is the interface that wraps the Match method.
|
||||||
|
//
|
||||||
|
// Match returns the best match for any of the given tags, along with
|
||||||
|
// a unique index associated with the returned tag and a confidence
|
||||||
|
// score.
|
||||||
|
type Matcher interface {
|
||||||
|
Match(t ...Tag) (tag Tag, index int, c Confidence)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Comprehends reports the confidence score for a speaker of a given language
|
||||||
|
// to being able to comprehend the written form of an alternative language.
|
||||||
|
func Comprehends(speaker, alternative Tag) Confidence {
|
||||||
|
_, _, c := NewMatcher([]Tag{alternative}).Match(speaker)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMatcher returns a Matcher that matches an ordered list of preferred tags
|
||||||
|
// against a list of supported tags based on written intelligibility, closeness
|
||||||
|
// of dialect, equivalence of subtags and various other rules. It is initialized
|
||||||
|
// with the list of supported tags. The first element is used as the default
|
||||||
|
// value in case no match is found.
|
||||||
|
//
|
||||||
|
// Its Match method matches the first of the given Tags to reach a certain
|
||||||
|
// confidence threshold. The tags passed to Match should therefore be specified
|
||||||
|
// in order of preference. Extensions are ignored for matching.
|
||||||
|
//
|
||||||
|
// The index returned by the Match method corresponds to the index of the
|
||||||
|
// matched tag in t, but is augmented with the Unicode extension ('u')of the
|
||||||
|
// corresponding preferred tag. This allows user locale options to be passed
|
||||||
|
// transparently.
|
||||||
|
func NewMatcher(t []Tag, options ...MatchOption) Matcher {
|
||||||
|
return newMatcher(t, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
|
||||||
|
match, w, c := m.getBest(want...)
|
||||||
|
if match != nil {
|
||||||
|
t, index = match.tag, match.index
|
||||||
|
} else {
|
||||||
|
// TODO: this should be an option
|
||||||
|
t = m.default_.tag
|
||||||
|
if m.preferSameScript {
|
||||||
|
outer:
|
||||||
|
for _, w := range want {
|
||||||
|
script, _ := w.Script()
|
||||||
|
if script.scriptID == 0 {
|
||||||
|
// Don't do anything if there is no script, such as with
|
||||||
|
// private subtags.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for i, h := range m.supported {
|
||||||
|
if script.scriptID == h.maxScript {
|
||||||
|
t, index = h.tag, i
|
||||||
|
break outer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO: select first language tag based on script.
|
||||||
|
}
|
||||||
|
if w.region != 0 && t.region != 0 && t.region.contains(w.region) {
|
||||||
|
t, _ = Raw.Compose(t, Region{w.region})
|
||||||
|
}
|
||||||
|
// Copy options from the user-provided tag into the result tag. This is hard
|
||||||
|
// to do after the fact, so we do it here.
|
||||||
|
// TODO: add in alternative variants to -u-va-.
|
||||||
|
// TODO: add preferred region to -u-rg-.
|
||||||
|
if e := w.Extensions(); len(e) > 0 {
|
||||||
|
t, _ = Raw.Compose(t, e)
|
||||||
|
}
|
||||||
|
return t, index, c
|
||||||
|
}
|
||||||
|
|
||||||
|
type scriptRegionFlags uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
isList = 1 << iota
|
||||||
|
scriptInFrom
|
||||||
|
regionInFrom
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t *Tag) setUndefinedLang(id langID) {
|
||||||
|
if t.lang == 0 {
|
||||||
|
t.lang = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tag) setUndefinedScript(id scriptID) {
|
||||||
|
if t.script == 0 {
|
||||||
|
t.script = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tag) setUndefinedRegion(id regionID) {
|
||||||
|
if t.region == 0 || t.region.contains(id) {
|
||||||
|
t.region = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrMissingLikelyTagsData indicates no information was available
|
||||||
|
// to compute likely values of missing tags.
|
||||||
|
var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
|
||||||
|
|
||||||
|
// addLikelySubtags sets subtags to their most likely value, given the locale.
|
||||||
|
// In most cases this means setting fields for unknown values, but in some
|
||||||
|
// cases it may alter a value. It returns an ErrMissingLikelyTagsData error
|
||||||
|
// if the given locale cannot be expanded.
|
||||||
|
func (t Tag) addLikelySubtags() (Tag, error) {
|
||||||
|
id, err := addTags(t)
|
||||||
|
if err != nil {
|
||||||
|
return t, err
|
||||||
|
} else if id.equalTags(t) {
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
id.remakeString()
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// specializeRegion attempts to specialize a group region.
|
||||||
|
func specializeRegion(t *Tag) bool {
|
||||||
|
if i := regionInclusion[t.region]; i < nRegionGroups {
|
||||||
|
x := likelyRegionGroup[i]
|
||||||
|
if langID(x.lang) == t.lang && scriptID(x.script) == t.script {
|
||||||
|
t.region = regionID(x.region)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTags(t Tag) (Tag, error) {
|
||||||
|
// We leave private use identifiers alone.
|
||||||
|
if t.private() {
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
if t.script != 0 && t.region != 0 {
|
||||||
|
if t.lang != 0 {
|
||||||
|
// already fully specified
|
||||||
|
specializeRegion(&t)
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
// Search matches for und-script-region. Note that for these cases
|
||||||
|
// region will never be a group so there is no need to check for this.
|
||||||
|
list := likelyRegion[t.region : t.region+1]
|
||||||
|
if x := list[0]; x.flags&isList != 0 {
|
||||||
|
list = likelyRegionList[x.lang : x.lang+uint16(x.script)]
|
||||||
|
}
|
||||||
|
for _, x := range list {
|
||||||
|
// Deviating from the spec. See match_test.go for details.
|
||||||
|
if scriptID(x.script) == t.script {
|
||||||
|
t.setUndefinedLang(langID(x.lang))
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if t.lang != 0 {
|
||||||
|
// Search matches for lang-script and lang-region, where lang != und.
|
||||||
|
if t.lang < langNoIndexOffset {
|
||||||
|
x := likelyLang[t.lang]
|
||||||
|
if x.flags&isList != 0 {
|
||||||
|
list := likelyLangList[x.region : x.region+uint16(x.script)]
|
||||||
|
if t.script != 0 {
|
||||||
|
for _, x := range list {
|
||||||
|
if scriptID(x.script) == t.script && x.flags&scriptInFrom != 0 {
|
||||||
|
t.setUndefinedRegion(regionID(x.region))
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if t.region != 0 {
|
||||||
|
count := 0
|
||||||
|
goodScript := true
|
||||||
|
tt := t
|
||||||
|
for _, x := range list {
|
||||||
|
// We visit all entries for which the script was not
|
||||||
|
// defined, including the ones where the region was not
|
||||||
|
// defined. This allows for proper disambiguation within
|
||||||
|
// regions.
|
||||||
|
if x.flags&scriptInFrom == 0 && t.region.contains(regionID(x.region)) {
|
||||||
|
tt.region = regionID(x.region)
|
||||||
|
tt.setUndefinedScript(scriptID(x.script))
|
||||||
|
goodScript = goodScript && tt.script == scriptID(x.script)
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if count == 1 {
|
||||||
|
return tt, nil
|
||||||
|
}
|
||||||
|
// Even if we fail to find a unique Region, we might have
|
||||||
|
// an unambiguous script.
|
||||||
|
if goodScript {
|
||||||
|
t.script = tt.script
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Search matches for und-script.
|
||||||
|
if t.script != 0 {
|
||||||
|
x := likelyScript[t.script]
|
||||||
|
if x.region != 0 {
|
||||||
|
t.setUndefinedRegion(regionID(x.region))
|
||||||
|
t.setUndefinedLang(langID(x.lang))
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Search matches for und-region. If und-script-region exists, it would
|
||||||
|
// have been found earlier.
|
||||||
|
if t.region != 0 {
|
||||||
|
if i := regionInclusion[t.region]; i < nRegionGroups {
|
||||||
|
x := likelyRegionGroup[i]
|
||||||
|
if x.region != 0 {
|
||||||
|
t.setUndefinedLang(langID(x.lang))
|
||||||
|
t.setUndefinedScript(scriptID(x.script))
|
||||||
|
t.region = regionID(x.region)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
x := likelyRegion[t.region]
|
||||||
|
if x.flags&isList != 0 {
|
||||||
|
x = likelyRegionList[x.lang]
|
||||||
|
}
|
||||||
|
if x.script != 0 && x.flags != scriptInFrom {
|
||||||
|
t.setUndefinedLang(langID(x.lang))
|
||||||
|
t.setUndefinedScript(scriptID(x.script))
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search matches for lang.
|
||||||
|
if t.lang < langNoIndexOffset {
|
||||||
|
x := likelyLang[t.lang]
|
||||||
|
if x.flags&isList != 0 {
|
||||||
|
x = likelyLangList[x.region]
|
||||||
|
}
|
||||||
|
if x.region != 0 {
|
||||||
|
t.setUndefinedScript(scriptID(x.script))
|
||||||
|
t.setUndefinedRegion(regionID(x.region))
|
||||||
|
}
|
||||||
|
specializeRegion(&t)
|
||||||
|
if t.lang == 0 {
|
||||||
|
t.lang = _en // default language
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
return t, ErrMissingLikelyTagsData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tag) setTagsFrom(id Tag) {
|
||||||
|
t.lang = id.lang
|
||||||
|
t.script = id.script
|
||||||
|
t.region = id.region
|
||||||
|
}
|
||||||
|
|
||||||
|
// minimize removes the region or script subtags from t such that
|
||||||
|
// t.addLikelySubtags() == t.minimize().addLikelySubtags().
|
||||||
|
func (t Tag) minimize() (Tag, error) {
|
||||||
|
t, err := minimizeTags(t)
|
||||||
|
if err != nil {
|
||||||
|
return t, err
|
||||||
|
}
|
||||||
|
t.remakeString()
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// minimizeTags mimics the behavior of the ICU 51 C implementation.
|
||||||
|
func minimizeTags(t Tag) (Tag, error) {
|
||||||
|
if t.equalTags(und) {
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
max, err := addTags(t)
|
||||||
|
if err != nil {
|
||||||
|
return t, err
|
||||||
|
}
|
||||||
|
for _, id := range [...]Tag{
|
||||||
|
{lang: t.lang},
|
||||||
|
{lang: t.lang, region: t.region},
|
||||||
|
{lang: t.lang, script: t.script},
|
||||||
|
} {
|
||||||
|
if x, err := addTags(id); err == nil && max.equalTags(x) {
|
||||||
|
t.setTagsFrom(id)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tag Matching
|
||||||
|
// CLDR defines an algorithm for finding the best match between two sets of language
|
||||||
|
// tags. The basic algorithm defines how to score a possible match and then find
|
||||||
|
// the match with the best score
|
||||||
|
// (see http://www.unicode.org/reports/tr35/#LanguageMatching).
|
||||||
|
// Using scoring has several disadvantages. The scoring obfuscates the importance of
|
||||||
|
// the various factors considered, making the algorithm harder to understand. Using
|
||||||
|
// scoring also requires the full score to be computed for each pair of tags.
|
||||||
|
//
|
||||||
|
// We will use a different algorithm which aims to have the following properties:
|
||||||
|
// - clarity on the precedence of the various selection factors, and
|
||||||
|
// - improved performance by allowing early termination of a comparison.
|
||||||
|
//
|
||||||
|
// Matching algorithm (overview)
|
||||||
|
// Input:
|
||||||
|
// - supported: a set of supported tags
|
||||||
|
// - default: the default tag to return in case there is no match
|
||||||
|
// - desired: list of desired tags, ordered by preference, starting with
|
||||||
|
// the most-preferred.
|
||||||
|
//
|
||||||
|
// Algorithm:
|
||||||
|
// 1) Set the best match to the lowest confidence level
|
||||||
|
// 2) For each tag in "desired":
|
||||||
|
// a) For each tag in "supported":
|
||||||
|
// 1) compute the match between the two tags.
|
||||||
|
// 2) if the match is better than the previous best match, replace it
|
||||||
|
// with the new match. (see next section)
|
||||||
|
// b) if the current best match is Exact and pin is true the result will be
|
||||||
|
// frozen to the language found thusfar, although better matches may
|
||||||
|
// still be found for the same language.
|
||||||
|
// 3) If the best match so far is below a certain threshold, return "default".
|
||||||
|
//
|
||||||
|
// Ranking:
|
||||||
|
// We use two phases to determine whether one pair of tags are a better match
|
||||||
|
// than another pair of tags. First, we determine a rough confidence level. If the
|
||||||
|
// levels are different, the one with the highest confidence wins.
|
||||||
|
// Second, if the rough confidence levels are identical, we use a set of tie-breaker
|
||||||
|
// rules.
|
||||||
|
//
|
||||||
|
// The confidence level of matching a pair of tags is determined by finding the
|
||||||
|
// lowest confidence level of any matches of the corresponding subtags (the
|
||||||
|
// result is deemed as good as its weakest link).
|
||||||
|
// We define the following levels:
|
||||||
|
// Exact - An exact match of a subtag, before adding likely subtags.
|
||||||
|
// MaxExact - An exact match of a subtag, after adding likely subtags.
|
||||||
|
// [See Note 2].
|
||||||
|
// High - High level of mutual intelligibility between different subtag
|
||||||
|
// variants.
|
||||||
|
// Low - Low level of mutual intelligibility between different subtag
|
||||||
|
// variants.
|
||||||
|
// No - No mutual intelligibility.
|
||||||
|
//
|
||||||
|
// The following levels can occur for each type of subtag:
|
||||||
|
// Base: Exact, MaxExact, High, Low, No
|
||||||
|
// Script: Exact, MaxExact [see Note 3], Low, No
|
||||||
|
// Region: Exact, MaxExact, High
|
||||||
|
// Variant: Exact, High
|
||||||
|
// Private: Exact, No
|
||||||
|
//
|
||||||
|
// Any result with a confidence level of Low or higher is deemed a possible match.
|
||||||
|
// Once a desired tag matches any of the supported tags with a level of MaxExact
|
||||||
|
// or higher, the next desired tag is not considered (see Step 2.b).
|
||||||
|
// Note that CLDR provides languageMatching data that defines close equivalence
|
||||||
|
// classes for base languages, scripts and regions.
|
||||||
|
//
|
||||||
|
// Tie-breaking
|
||||||
|
// If we get the same confidence level for two matches, we apply a sequence of
|
||||||
|
// tie-breaking rules. The first that succeeds defines the result. The rules are
|
||||||
|
// applied in the following order.
|
||||||
|
// 1) Original language was defined and was identical.
|
||||||
|
// 2) Original region was defined and was identical.
|
||||||
|
// 3) Distance between two maximized regions was the smallest.
|
||||||
|
// 4) Original script was defined and was identical.
|
||||||
|
// 5) Distance from want tag to have tag using the parent relation [see Note 5.]
|
||||||
|
// If there is still no winner after these rules are applied, the first match
|
||||||
|
// found wins.
|
||||||
|
//
|
||||||
|
// Notes:
|
||||||
|
// [2] In practice, as matching of Exact is done in a separate phase from
|
||||||
|
// matching the other levels, we reuse the Exact level to mean MaxExact in
|
||||||
|
// the second phase. As a consequence, we only need the levels defined by
|
||||||
|
// the Confidence type. The MaxExact confidence level is mapped to High in
|
||||||
|
// the public API.
|
||||||
|
// [3] We do not differentiate between maximized script values that were derived
|
||||||
|
// from suppressScript versus most likely tag data. We determined that in
|
||||||
|
// ranking the two, one ranks just after the other. Moreover, the two cannot
|
||||||
|
// occur concurrently. As a consequence, they are identical for practical
|
||||||
|
// purposes.
|
||||||
|
// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign
|
||||||
|
// the MaxExact level to allow iw vs he to still be a closer match than
|
||||||
|
// en-AU vs en-US, for example.
|
||||||
|
// [5] In CLDR a locale inherits fields that are unspecified for this locale
|
||||||
|
// from its parent. Therefore, if a locale is a parent of another locale,
|
||||||
|
// it is a strong measure for closeness, especially when no other tie
|
||||||
|
// breaker rule applies. One could also argue it is inconsistent, for
|
||||||
|
// example, when pt-AO matches pt (which CLDR equates with pt-BR), even
|
||||||
|
// though its parent is pt-PT according to the inheritance rules.
|
||||||
|
//
|
||||||
|
// Implementation Details:
|
||||||
|
// There are several performance considerations worth pointing out. Most notably,
|
||||||
|
// we preprocess as much as possible (within reason) at the time of creation of a
|
||||||
|
// matcher. This includes:
|
||||||
|
// - creating a per-language map, which includes data for the raw base language
|
||||||
|
// and its canonicalized variant (if applicable),
|
||||||
|
// - expanding entries for the equivalence classes defined in CLDR's
|
||||||
|
// languageMatch data.
|
||||||
|
// The per-language map ensures that typically only a very small number of tags
|
||||||
|
// need to be considered. The pre-expansion of canonicalized subtags and
|
||||||
|
// equivalence classes reduces the amount of map lookups that need to be done at
|
||||||
|
// runtime.
|
||||||
|
|
||||||
|
// matcher keeps a set of supported language tags, indexed by language.
|
||||||
|
type matcher struct {
|
||||||
|
default_ *haveTag
|
||||||
|
supported []*haveTag
|
||||||
|
index map[langID]*matchHeader
|
||||||
|
passSettings bool
|
||||||
|
preferSameScript bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchHeader has the lists of tags for exact matches and matches based on
|
||||||
|
// maximized and canonicalized tags for a given language.
|
||||||
|
type matchHeader struct {
|
||||||
|
haveTags []*haveTag
|
||||||
|
original bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// haveTag holds a supported Tag and its maximized script and region. The maximized
|
||||||
|
// or canonicalized language is not stored as it is not needed during matching.
|
||||||
|
type haveTag struct {
|
||||||
|
tag Tag
|
||||||
|
|
||||||
|
// index of this tag in the original list of supported tags.
|
||||||
|
index int
|
||||||
|
|
||||||
|
// conf is the maximum confidence that can result from matching this haveTag.
|
||||||
|
// When conf < Exact this means it was inserted after applying a CLDR equivalence rule.
|
||||||
|
conf Confidence
|
||||||
|
|
||||||
|
// Maximized region and script.
|
||||||
|
maxRegion regionID
|
||||||
|
maxScript scriptID
|
||||||
|
|
||||||
|
// altScript may be checked as an alternative match to maxScript. If altScript
|
||||||
|
// matches, the confidence level for this match is Low. Theoretically there
|
||||||
|
// could be multiple alternative scripts. This does not occur in practice.
|
||||||
|
altScript scriptID
|
||||||
|
|
||||||
|
// nextMax is the index of the next haveTag with the same maximized tags.
|
||||||
|
nextMax uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeHaveTag(tag Tag, index int) (haveTag, langID) {
|
||||||
|
max := tag
|
||||||
|
if tag.lang != 0 || tag.region != 0 || tag.script != 0 {
|
||||||
|
max, _ = max.canonicalize(All)
|
||||||
|
max, _ = addTags(max)
|
||||||
|
max.remakeString()
|
||||||
|
}
|
||||||
|
return haveTag{tag, index, Exact, max.region, max.script, altScript(max.lang, max.script), 0}, max.lang
|
||||||
|
}
|
||||||
|
|
||||||
|
// altScript returns an alternative script that may match the given script with
|
||||||
|
// a low confidence. At the moment, the langMatch data allows for at most one
|
||||||
|
// script to map to another and we rely on this to keep the code simple.
|
||||||
|
func altScript(l langID, s scriptID) scriptID {
|
||||||
|
for _, alt := range matchScript {
|
||||||
|
// TODO: also match cases where language is not the same.
|
||||||
|
if (langID(alt.wantLang) == l || langID(alt.haveLang) == l) &&
|
||||||
|
scriptID(alt.haveScript) == s {
|
||||||
|
return scriptID(alt.wantScript)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// addIfNew adds a haveTag to the list of tags only if it is a unique tag.
|
||||||
|
// Tags that have the same maximized values are linked by index.
|
||||||
|
func (h *matchHeader) addIfNew(n haveTag, exact bool) {
|
||||||
|
h.original = h.original || exact
|
||||||
|
// Don't add new exact matches.
|
||||||
|
for _, v := range h.haveTags {
|
||||||
|
if v.tag.equalsRest(n.tag) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Allow duplicate maximized tags, but create a linked list to allow quickly
|
||||||
|
// comparing the equivalents and bail out.
|
||||||
|
for i, v := range h.haveTags {
|
||||||
|
if v.maxScript == n.maxScript &&
|
||||||
|
v.maxRegion == n.maxRegion &&
|
||||||
|
v.tag.variantOrPrivateTagStr() == n.tag.variantOrPrivateTagStr() {
|
||||||
|
for h.haveTags[i].nextMax != 0 {
|
||||||
|
i = int(h.haveTags[i].nextMax)
|
||||||
|
}
|
||||||
|
h.haveTags[i].nextMax = uint16(len(h.haveTags))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.haveTags = append(h.haveTags, &n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// header returns the matchHeader for the given language. It creates one if
|
||||||
|
// it doesn't already exist.
|
||||||
|
func (m *matcher) header(l langID) *matchHeader {
|
||||||
|
if h := m.index[l]; h != nil {
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
h := &matchHeader{}
|
||||||
|
m.index[l] = h
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func toConf(d uint8) Confidence {
|
||||||
|
if d <= 10 {
|
||||||
|
return High
|
||||||
|
}
|
||||||
|
if d < 30 {
|
||||||
|
return Low
|
||||||
|
}
|
||||||
|
return No
|
||||||
|
}
|
||||||
|
|
||||||
|
// newMatcher builds an index for the given supported tags and returns it as
|
||||||
|
// a matcher. It also expands the index by considering various equivalence classes
|
||||||
|
// for a given tag.
|
||||||
|
func newMatcher(supported []Tag, options []MatchOption) *matcher {
|
||||||
|
m := &matcher{
|
||||||
|
index: make(map[langID]*matchHeader),
|
||||||
|
preferSameScript: true,
|
||||||
|
}
|
||||||
|
for _, o := range options {
|
||||||
|
o(m)
|
||||||
|
}
|
||||||
|
if len(supported) == 0 {
|
||||||
|
m.default_ = &haveTag{}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
// Add supported languages to the index. Add exact matches first to give
|
||||||
|
// them precedence.
|
||||||
|
for i, tag := range supported {
|
||||||
|
pair, _ := makeHaveTag(tag, i)
|
||||||
|
m.header(tag.lang).addIfNew(pair, true)
|
||||||
|
m.supported = append(m.supported, &pair)
|
||||||
|
}
|
||||||
|
m.default_ = m.header(supported[0].lang).haveTags[0]
|
||||||
|
// Keep these in two different loops to support the case that two equivalent
|
||||||
|
// languages are distinguished, such as iw and he.
|
||||||
|
for i, tag := range supported {
|
||||||
|
pair, max := makeHaveTag(tag, i)
|
||||||
|
if max != tag.lang {
|
||||||
|
m.header(max).addIfNew(pair, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// update is used to add indexes in the map for equivalent languages.
|
||||||
|
// update will only add entries to original indexes, thus not computing any
|
||||||
|
// transitive relations.
|
||||||
|
update := func(want, have uint16, conf Confidence) {
|
||||||
|
if hh := m.index[langID(have)]; hh != nil {
|
||||||
|
if !hh.original {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
hw := m.header(langID(want))
|
||||||
|
for _, ht := range hh.haveTags {
|
||||||
|
v := *ht
|
||||||
|
if conf < v.conf {
|
||||||
|
v.conf = conf
|
||||||
|
}
|
||||||
|
v.nextMax = 0 // this value needs to be recomputed
|
||||||
|
if v.altScript != 0 {
|
||||||
|
v.altScript = altScript(langID(want), v.maxScript)
|
||||||
|
}
|
||||||
|
hw.addIfNew(v, conf == Exact && hh.original)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add entries for languages with mutual intelligibility as defined by CLDR's
|
||||||
|
// languageMatch data.
|
||||||
|
for _, ml := range matchLang {
|
||||||
|
update(ml.want, ml.have, toConf(ml.distance))
|
||||||
|
if !ml.oneway {
|
||||||
|
update(ml.have, ml.want, toConf(ml.distance))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add entries for possible canonicalizations. This is an optimization to
|
||||||
|
// ensure that only one map lookup needs to be done at runtime per desired tag.
|
||||||
|
// First we match deprecated equivalents. If they are perfect equivalents
|
||||||
|
// (their canonicalization simply substitutes a different language code, but
|
||||||
|
// nothing else), the match confidence is Exact, otherwise it is High.
|
||||||
|
for i, lm := range langAliasMap {
|
||||||
|
// If deprecated codes match and there is no fiddling with the script or
|
||||||
|
// or region, we consider it an exact match.
|
||||||
|
conf := Exact
|
||||||
|
if langAliasTypes[i] != langMacro {
|
||||||
|
if !isExactEquivalent(langID(lm.from)) {
|
||||||
|
conf = High
|
||||||
|
}
|
||||||
|
update(lm.to, lm.from, conf)
|
||||||
|
}
|
||||||
|
update(lm.from, lm.to, conf)
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBest gets the best matching tag in m for any of the given tags, taking into
|
||||||
|
// account the order of preference of the given tags.
|
||||||
|
func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
|
||||||
|
best := bestMatch{}
|
||||||
|
for i, w := range want {
|
||||||
|
var max Tag
|
||||||
|
// Check for exact match first.
|
||||||
|
h := m.index[w.lang]
|
||||||
|
if w.lang != 0 {
|
||||||
|
if h == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Base language is defined.
|
||||||
|
max, _ = w.canonicalize(Legacy | Deprecated | Macro)
|
||||||
|
// A region that is added through canonicalization is stronger than
|
||||||
|
// a maximized region: set it in the original (e.g. mo -> ro-MD).
|
||||||
|
if w.region != max.region {
|
||||||
|
w.region = max.region
|
||||||
|
}
|
||||||
|
// TODO: should we do the same for scripts?
|
||||||
|
// See test case: en, sr, nl ; sh ; sr
|
||||||
|
max, _ = addTags(max)
|
||||||
|
} else {
|
||||||
|
// Base language is not defined.
|
||||||
|
if h != nil {
|
||||||
|
for i := range h.haveTags {
|
||||||
|
have := h.haveTags[i]
|
||||||
|
if have.tag.equalsRest(w) {
|
||||||
|
return have, w, Exact
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if w.script == 0 && w.region == 0 {
|
||||||
|
// We skip all tags matching und for approximate matching, including
|
||||||
|
// private tags.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
max, _ = addTags(w)
|
||||||
|
if h = m.index[max.lang]; h == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pin := true
|
||||||
|
for _, t := range want[i+1:] {
|
||||||
|
if w.lang == t.lang {
|
||||||
|
pin = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check for match based on maximized tag.
|
||||||
|
for i := range h.haveTags {
|
||||||
|
have := h.haveTags[i]
|
||||||
|
best.update(have, w, max.script, max.region, pin)
|
||||||
|
if best.conf == Exact {
|
||||||
|
for have.nextMax != 0 {
|
||||||
|
have = h.haveTags[have.nextMax]
|
||||||
|
best.update(have, w, max.script, max.region, pin)
|
||||||
|
}
|
||||||
|
return best.have, best.want, best.conf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if best.conf <= No {
|
||||||
|
if len(want) != 0 {
|
||||||
|
return nil, want[0], No
|
||||||
|
}
|
||||||
|
return nil, Tag{}, No
|
||||||
|
}
|
||||||
|
return best.have, best.want, best.conf
|
||||||
|
}
|
||||||
|
|
||||||
|
// bestMatch accumulates the best match so far.
|
||||||
|
type bestMatch struct {
|
||||||
|
have *haveTag
|
||||||
|
want Tag
|
||||||
|
conf Confidence
|
||||||
|
pinnedRegion regionID
|
||||||
|
pinLanguage bool
|
||||||
|
sameRegionGroup bool
|
||||||
|
// Cached results from applying tie-breaking rules.
|
||||||
|
origLang bool
|
||||||
|
origReg bool
|
||||||
|
paradigmReg bool
|
||||||
|
regGroupDist uint8
|
||||||
|
origScript bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// update updates the existing best match if the new pair is considered to be a
|
||||||
|
// better match. To determine if the given pair is a better match, it first
|
||||||
|
// computes the rough confidence level. If this surpasses the current match, it
|
||||||
|
// will replace it and update the tie-breaker rule cache. If there is a tie, it
|
||||||
|
// proceeds with applying a series of tie-breaker rules. If there is no
|
||||||
|
// conclusive winner after applying the tie-breaker rules, it leaves the current
|
||||||
|
// match as the preferred match.
|
||||||
|
//
|
||||||
|
// If pin is true and have and tag are a strong match, it will henceforth only
|
||||||
|
// consider matches for this language. This corresponds to the nothing that most
|
||||||
|
// users have a strong preference for the first defined language. A user can
|
||||||
|
// still prefer a second language over a dialect of the preferred language by
|
||||||
|
// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should
|
||||||
|
// be false.
|
||||||
|
func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID, pin bool) {
|
||||||
|
// Bail if the maximum attainable confidence is below that of the current best match.
|
||||||
|
c := have.conf
|
||||||
|
if c < m.conf {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Don't change the language once we already have found an exact match.
|
||||||
|
if m.pinLanguage && tag.lang != m.want.lang {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Pin the region group if we are comparing tags for the same language.
|
||||||
|
if tag.lang == m.want.lang && m.sameRegionGroup {
|
||||||
|
_, sameGroup := regionGroupDist(m.pinnedRegion, have.maxRegion, have.maxScript, m.want.lang)
|
||||||
|
if !sameGroup {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c == Exact && have.maxScript == maxScript {
|
||||||
|
// If there is another language and then another entry of this language,
|
||||||
|
// don't pin anything, otherwise pin the language.
|
||||||
|
m.pinLanguage = pin
|
||||||
|
}
|
||||||
|
if have.tag.equalsRest(tag) {
|
||||||
|
} else if have.maxScript != maxScript {
|
||||||
|
// There is usually very little comprehension between different scripts.
|
||||||
|
// In a few cases there may still be Low comprehension. This possibility
|
||||||
|
// is pre-computed and stored in have.altScript.
|
||||||
|
if Low < m.conf || have.altScript != maxScript {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c = Low
|
||||||
|
} else if have.maxRegion != maxRegion {
|
||||||
|
if High < c {
|
||||||
|
// There is usually a small difference between languages across regions.
|
||||||
|
c = High
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We store the results of the computations of the tie-breaker rules along
|
||||||
|
// with the best match. There is no need to do the checks once we determine
|
||||||
|
// we have a winner, but we do still need to do the tie-breaker computations.
|
||||||
|
// We use "beaten" to keep track if we still need to do the checks.
|
||||||
|
beaten := false // true if the new pair defeats the current one.
|
||||||
|
if c != m.conf {
|
||||||
|
if c < m.conf {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
beaten = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tie-breaker rules:
|
||||||
|
// We prefer if the pre-maximized language was specified and identical.
|
||||||
|
origLang := have.tag.lang == tag.lang && tag.lang != 0
|
||||||
|
if !beaten && m.origLang != origLang {
|
||||||
|
if m.origLang {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
beaten = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// We prefer if the pre-maximized region was specified and identical.
|
||||||
|
origReg := have.tag.region == tag.region && tag.region != 0
|
||||||
|
if !beaten && m.origReg != origReg {
|
||||||
|
if m.origReg {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
beaten = true
|
||||||
|
}
|
||||||
|
|
||||||
|
regGroupDist, sameGroup := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.lang)
|
||||||
|
if !beaten && m.regGroupDist != regGroupDist {
|
||||||
|
if regGroupDist > m.regGroupDist {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
beaten = true
|
||||||
|
}
|
||||||
|
|
||||||
|
paradigmReg := isParadigmLocale(tag.lang, have.maxRegion)
|
||||||
|
if !beaten && m.paradigmReg != paradigmReg {
|
||||||
|
if !paradigmReg {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
beaten = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next we prefer if the pre-maximized script was specified and identical.
|
||||||
|
origScript := have.tag.script == tag.script && tag.script != 0
|
||||||
|
if !beaten && m.origScript != origScript {
|
||||||
|
if m.origScript {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
beaten = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update m to the newly found best match.
|
||||||
|
if beaten {
|
||||||
|
m.have = have
|
||||||
|
m.want = tag
|
||||||
|
m.conf = c
|
||||||
|
m.pinnedRegion = maxRegion
|
||||||
|
m.sameRegionGroup = sameGroup
|
||||||
|
m.origLang = origLang
|
||||||
|
m.origReg = origReg
|
||||||
|
m.paradigmReg = paradigmReg
|
||||||
|
m.origScript = origScript
|
||||||
|
m.regGroupDist = regGroupDist
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isParadigmLocale(lang langID, r regionID) bool {
|
||||||
|
for _, e := range paradigmLocales {
|
||||||
|
if langID(e[0]) == lang && (r == regionID(e[1]) || r == regionID(e[2])) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// regionGroupDist computes the distance between two regions based on their
|
||||||
|
// CLDR grouping.
|
||||||
|
func regionGroupDist(a, b regionID, script scriptID, lang langID) (dist uint8, same bool) {
|
||||||
|
const defaultDistance = 4
|
||||||
|
|
||||||
|
aGroup := uint(regionToGroups[a]) << 1
|
||||||
|
bGroup := uint(regionToGroups[b]) << 1
|
||||||
|
for _, ri := range matchRegion {
|
||||||
|
if langID(ri.lang) == lang && (ri.script == 0 || scriptID(ri.script) == script) {
|
||||||
|
group := uint(1 << (ri.group &^ 0x80))
|
||||||
|
if 0x80&ri.group == 0 {
|
||||||
|
if aGroup&bGroup&group != 0 { // Both regions are in the group.
|
||||||
|
return ri.distance, ri.distance == defaultDistance
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (aGroup|bGroup)&group == 0 { // Both regions are not in the group.
|
||||||
|
return ri.distance, ri.distance == defaultDistance
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return defaultDistance, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Tag) variants() string {
|
||||||
|
if t.pVariant == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return t.str[t.pVariant:t.pExt]
|
||||||
|
}
|
||||||
|
|
||||||
|
// variantOrPrivateTagStr returns variants or private use tags.
|
||||||
|
func (t Tag) variantOrPrivateTagStr() string {
|
||||||
|
if t.pExt > 0 {
|
||||||
|
return t.str[t.pVariant:t.pExt]
|
||||||
|
}
|
||||||
|
return t.str[t.pVariant:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// equalsRest compares everything except the language.
|
||||||
|
func (a Tag) equalsRest(b Tag) bool {
|
||||||
|
// TODO: don't include extensions in this comparison. To do this efficiently,
|
||||||
|
// though, we should handle private tags separately.
|
||||||
|
return a.script == b.script && a.region == b.region && a.variantOrPrivateTagStr() == b.variantOrPrivateTagStr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// isExactEquivalent returns true if canonicalizing the language will not alter
|
||||||
|
// the script or region of a tag.
|
||||||
|
func isExactEquivalent(l langID) bool {
|
||||||
|
for _, o := range notEquivalent {
|
||||||
|
if o == l {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var notEquivalent []langID
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Create a list of all languages for which canonicalization may alter the
|
||||||
|
// script or region.
|
||||||
|
for _, lm := range langAliasMap {
|
||||||
|
tag := Tag{lang: langID(lm.from)}
|
||||||
|
if tag, _ = tag.canonicalize(All); tag.script != 0 || tag.region != 0 {
|
||||||
|
notEquivalent = append(notEquivalent, langID(lm.from))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Maximize undefined regions of paradigm locales.
|
||||||
|
for i, v := range paradigmLocales {
|
||||||
|
max, _ := addTags(Tag{lang: langID(v[0])})
|
||||||
|
if v[1] == 0 {
|
||||||
|
paradigmLocales[i][1] = uint16(max.region)
|
||||||
|
}
|
||||||
|
if v[2] == 0 {
|
||||||
|
paradigmLocales[i][2] = uint16(max.region)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
859
vendor/golang.org/x/text/language/parse.go
generated
vendored
Normal file
859
vendor/golang.org/x/text/language/parse.go
generated
vendored
Normal file
@ -0,0 +1,859 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/text/internal/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
// isAlpha returns true if the byte is not a digit.
|
||||||
|
// b must be an ASCII letter or digit.
|
||||||
|
func isAlpha(b byte) bool {
|
||||||
|
return b > '9'
|
||||||
|
}
|
||||||
|
|
||||||
|
// isAlphaNum returns true if the string contains only ASCII letters or digits.
|
||||||
|
func isAlphaNum(s []byte) bool {
|
||||||
|
for _, c := range s {
|
||||||
|
if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// errSyntax is returned by any of the parsing functions when the
|
||||||
|
// input is not well-formed, according to BCP 47.
|
||||||
|
// TODO: return the position at which the syntax error occurred?
|
||||||
|
var errSyntax = errors.New("language: tag is not well-formed")
|
||||||
|
|
||||||
|
// ValueError is returned by any of the parsing functions when the
|
||||||
|
// input is well-formed but the respective subtag is not recognized
|
||||||
|
// as a valid value.
|
||||||
|
type ValueError struct {
|
||||||
|
v [8]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func mkErrInvalid(s []byte) error {
|
||||||
|
var e ValueError
|
||||||
|
copy(e.v[:], s)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ValueError) tag() []byte {
|
||||||
|
n := bytes.IndexByte(e.v[:], 0)
|
||||||
|
if n == -1 {
|
||||||
|
n = 8
|
||||||
|
}
|
||||||
|
return e.v[:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements the error interface.
|
||||||
|
func (e ValueError) Error() string {
|
||||||
|
return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subtag returns the subtag for which the error occurred.
|
||||||
|
func (e ValueError) Subtag() string {
|
||||||
|
return string(e.tag())
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanner is used to scan BCP 47 tokens, which are separated by _ or -.
|
||||||
|
type scanner struct {
|
||||||
|
b []byte
|
||||||
|
bytes [max99thPercentileSize]byte
|
||||||
|
token []byte
|
||||||
|
start int // start position of the current token
|
||||||
|
end int // end position of the current token
|
||||||
|
next int // next point for scan
|
||||||
|
err error
|
||||||
|
done bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeScannerString(s string) scanner {
|
||||||
|
scan := scanner{}
|
||||||
|
if len(s) <= len(scan.bytes) {
|
||||||
|
scan.b = scan.bytes[:copy(scan.bytes[:], s)]
|
||||||
|
} else {
|
||||||
|
scan.b = []byte(s)
|
||||||
|
}
|
||||||
|
scan.init()
|
||||||
|
return scan
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeScanner returns a scanner using b as the input buffer.
|
||||||
|
// b is not copied and may be modified by the scanner routines.
|
||||||
|
func makeScanner(b []byte) scanner {
|
||||||
|
scan := scanner{b: b}
|
||||||
|
scan.init()
|
||||||
|
return scan
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) init() {
|
||||||
|
for i, c := range s.b {
|
||||||
|
if c == '_' {
|
||||||
|
s.b[i] = '-'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.scan()
|
||||||
|
}
|
||||||
|
|
||||||
|
// restToLower converts the string between start and end to lower case.
|
||||||
|
func (s *scanner) toLower(start, end int) {
|
||||||
|
for i := start; i < end; i++ {
|
||||||
|
c := s.b[i]
|
||||||
|
if 'A' <= c && c <= 'Z' {
|
||||||
|
s.b[i] += 'a' - 'A'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) setError(e error) {
|
||||||
|
if s.err == nil || (e == errSyntax && s.err != errSyntax) {
|
||||||
|
s.err = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resizeRange shrinks or grows the array at position oldStart such that
|
||||||
|
// a new string of size newSize can fit between oldStart and oldEnd.
|
||||||
|
// Sets the scan point to after the resized range.
|
||||||
|
func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) {
|
||||||
|
s.start = oldStart
|
||||||
|
if end := oldStart + newSize; end != oldEnd {
|
||||||
|
diff := end - oldEnd
|
||||||
|
if end < cap(s.b) {
|
||||||
|
b := make([]byte, len(s.b)+diff)
|
||||||
|
copy(b, s.b[:oldStart])
|
||||||
|
copy(b[end:], s.b[oldEnd:])
|
||||||
|
s.b = b
|
||||||
|
} else {
|
||||||
|
s.b = append(s.b[end:], s.b[oldEnd:]...)
|
||||||
|
}
|
||||||
|
s.next = end + (s.next - s.end)
|
||||||
|
s.end = end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// replace replaces the current token with repl.
|
||||||
|
func (s *scanner) replace(repl string) {
|
||||||
|
s.resizeRange(s.start, s.end, len(repl))
|
||||||
|
copy(s.b[s.start:], repl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// gobble removes the current token from the input.
|
||||||
|
// Caller must call scan after calling gobble.
|
||||||
|
func (s *scanner) gobble(e error) {
|
||||||
|
s.setError(e)
|
||||||
|
if s.start == 0 {
|
||||||
|
s.b = s.b[:+copy(s.b, s.b[s.next:])]
|
||||||
|
s.end = 0
|
||||||
|
} else {
|
||||||
|
s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])]
|
||||||
|
s.end = s.start - 1
|
||||||
|
}
|
||||||
|
s.next = s.start
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteRange removes the given range from s.b before the current token.
|
||||||
|
func (s *scanner) deleteRange(start, end int) {
|
||||||
|
s.setError(errSyntax)
|
||||||
|
s.b = s.b[:start+copy(s.b[start:], s.b[end:])]
|
||||||
|
diff := end - start
|
||||||
|
s.next -= diff
|
||||||
|
s.start -= diff
|
||||||
|
s.end -= diff
|
||||||
|
}
|
||||||
|
|
||||||
|
// scan parses the next token of a BCP 47 string. Tokens that are larger
|
||||||
|
// than 8 characters or include non-alphanumeric characters result in an error
|
||||||
|
// and are gobbled and removed from the output.
|
||||||
|
// It returns the end position of the last token consumed.
|
||||||
|
func (s *scanner) scan() (end int) {
|
||||||
|
end = s.end
|
||||||
|
s.token = nil
|
||||||
|
for s.start = s.next; s.next < len(s.b); {
|
||||||
|
i := bytes.IndexByte(s.b[s.next:], '-')
|
||||||
|
if i == -1 {
|
||||||
|
s.end = len(s.b)
|
||||||
|
s.next = len(s.b)
|
||||||
|
i = s.end - s.start
|
||||||
|
} else {
|
||||||
|
s.end = s.next + i
|
||||||
|
s.next = s.end + 1
|
||||||
|
}
|
||||||
|
token := s.b[s.start:s.end]
|
||||||
|
if i < 1 || i > 8 || !isAlphaNum(token) {
|
||||||
|
s.gobble(errSyntax)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
s.token = token
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
if n := len(s.b); n > 0 && s.b[n-1] == '-' {
|
||||||
|
s.setError(errSyntax)
|
||||||
|
s.b = s.b[:len(s.b)-1]
|
||||||
|
}
|
||||||
|
s.done = true
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
|
||||||
|
// acceptMinSize parses multiple tokens of the given size or greater.
|
||||||
|
// It returns the end position of the last token consumed.
|
||||||
|
func (s *scanner) acceptMinSize(min int) (end int) {
|
||||||
|
end = s.end
|
||||||
|
s.scan()
|
||||||
|
for ; len(s.token) >= min; s.scan() {
|
||||||
|
end = s.end
|
||||||
|
}
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
|
||||||
|
// failed it returns an error and any part of the tag that could be parsed.
|
||||||
|
// If parsing succeeded but an unknown value was found, it returns
|
||||||
|
// ValueError. The Tag returned in this case is just stripped of the unknown
|
||||||
|
// value. All other values are preserved. It accepts tags in the BCP 47 format
|
||||||
|
// and extensions to this standard defined in
|
||||||
|
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||||
|
// The resulting tag is canonicalized using the default canonicalization type.
|
||||||
|
func Parse(s string) (t Tag, err error) {
|
||||||
|
return Default.Parse(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
|
||||||
|
// failed it returns an error and any part of the tag that could be parsed.
|
||||||
|
// If parsing succeeded but an unknown value was found, it returns
|
||||||
|
// ValueError. The Tag returned in this case is just stripped of the unknown
|
||||||
|
// value. All other values are preserved. It accepts tags in the BCP 47 format
|
||||||
|
// and extensions to this standard defined in
|
||||||
|
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||||
|
// The resulting tag is canonicalized using the the canonicalization type c.
|
||||||
|
func (c CanonType) Parse(s string) (t Tag, err error) {
|
||||||
|
// TODO: consider supporting old-style locale key-value pairs.
|
||||||
|
if s == "" {
|
||||||
|
return und, errSyntax
|
||||||
|
}
|
||||||
|
if len(s) <= maxAltTaglen {
|
||||||
|
b := [maxAltTaglen]byte{}
|
||||||
|
for i, c := range s {
|
||||||
|
// Generating invalid UTF-8 is okay as it won't match.
|
||||||
|
if 'A' <= c && c <= 'Z' {
|
||||||
|
c += 'a' - 'A'
|
||||||
|
} else if c == '_' {
|
||||||
|
c = '-'
|
||||||
|
}
|
||||||
|
b[i] = byte(c)
|
||||||
|
}
|
||||||
|
if t, ok := grandfathered(b); ok {
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scan := makeScannerString(s)
|
||||||
|
t, err = parse(&scan, s)
|
||||||
|
t, changed := t.canonicalize(c)
|
||||||
|
if changed {
|
||||||
|
t.remakeString()
|
||||||
|
}
|
||||||
|
return t, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func parse(scan *scanner, s string) (t Tag, err error) {
|
||||||
|
t = und
|
||||||
|
var end int
|
||||||
|
if n := len(scan.token); n <= 1 {
|
||||||
|
scan.toLower(0, len(scan.b))
|
||||||
|
if n == 0 || scan.token[0] != 'x' {
|
||||||
|
return t, errSyntax
|
||||||
|
}
|
||||||
|
end = parseExtensions(scan)
|
||||||
|
} else if n >= 4 {
|
||||||
|
return und, errSyntax
|
||||||
|
} else { // the usual case
|
||||||
|
t, end = parseTag(scan)
|
||||||
|
if n := len(scan.token); n == 1 {
|
||||||
|
t.pExt = uint16(end)
|
||||||
|
end = parseExtensions(scan)
|
||||||
|
} else if end < len(scan.b) {
|
||||||
|
scan.setError(errSyntax)
|
||||||
|
scan.b = scan.b[:end]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if int(t.pVariant) < len(scan.b) {
|
||||||
|
if end < len(s) {
|
||||||
|
s = s[:end]
|
||||||
|
}
|
||||||
|
if len(s) > 0 && tag.Compare(s, scan.b) == 0 {
|
||||||
|
t.str = s
|
||||||
|
} else {
|
||||||
|
t.str = string(scan.b)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.pVariant, t.pExt = 0, 0
|
||||||
|
}
|
||||||
|
return t, scan.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTag parses language, script, region and variants.
|
||||||
|
// It returns a Tag and the end position in the input that was parsed.
|
||||||
|
func parseTag(scan *scanner) (t Tag, end int) {
|
||||||
|
var e error
|
||||||
|
// TODO: set an error if an unknown lang, script or region is encountered.
|
||||||
|
t.lang, e = getLangID(scan.token)
|
||||||
|
scan.setError(e)
|
||||||
|
scan.replace(t.lang.String())
|
||||||
|
langStart := scan.start
|
||||||
|
end = scan.scan()
|
||||||
|
for len(scan.token) == 3 && isAlpha(scan.token[0]) {
|
||||||
|
// From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent
|
||||||
|
// to a tag of the form <extlang>.
|
||||||
|
lang, e := getLangID(scan.token)
|
||||||
|
if lang != 0 {
|
||||||
|
t.lang = lang
|
||||||
|
copy(scan.b[langStart:], lang.String())
|
||||||
|
scan.b[langStart+3] = '-'
|
||||||
|
scan.start = langStart + 4
|
||||||
|
}
|
||||||
|
scan.gobble(e)
|
||||||
|
end = scan.scan()
|
||||||
|
}
|
||||||
|
if len(scan.token) == 4 && isAlpha(scan.token[0]) {
|
||||||
|
t.script, e = getScriptID(script, scan.token)
|
||||||
|
if t.script == 0 {
|
||||||
|
scan.gobble(e)
|
||||||
|
}
|
||||||
|
end = scan.scan()
|
||||||
|
}
|
||||||
|
if n := len(scan.token); n >= 2 && n <= 3 {
|
||||||
|
t.region, e = getRegionID(scan.token)
|
||||||
|
if t.region == 0 {
|
||||||
|
scan.gobble(e)
|
||||||
|
} else {
|
||||||
|
scan.replace(t.region.String())
|
||||||
|
}
|
||||||
|
end = scan.scan()
|
||||||
|
}
|
||||||
|
scan.toLower(scan.start, len(scan.b))
|
||||||
|
t.pVariant = byte(end)
|
||||||
|
end = parseVariants(scan, end, t)
|
||||||
|
t.pExt = uint16(end)
|
||||||
|
return t, end
|
||||||
|
}
|
||||||
|
|
||||||
|
var separator = []byte{'-'}
|
||||||
|
|
||||||
|
// parseVariants scans tokens as long as each token is a valid variant string.
|
||||||
|
// Duplicate variants are removed.
|
||||||
|
func parseVariants(scan *scanner, end int, t Tag) int {
|
||||||
|
start := scan.start
|
||||||
|
varIDBuf := [4]uint8{}
|
||||||
|
variantBuf := [4][]byte{}
|
||||||
|
varID := varIDBuf[:0]
|
||||||
|
variant := variantBuf[:0]
|
||||||
|
last := -1
|
||||||
|
needSort := false
|
||||||
|
for ; len(scan.token) >= 4; scan.scan() {
|
||||||
|
// TODO: measure the impact of needing this conversion and redesign
|
||||||
|
// the data structure if there is an issue.
|
||||||
|
v, ok := variantIndex[string(scan.token)]
|
||||||
|
if !ok {
|
||||||
|
// unknown variant
|
||||||
|
// TODO: allow user-defined variants?
|
||||||
|
scan.gobble(mkErrInvalid(scan.token))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
varID = append(varID, v)
|
||||||
|
variant = append(variant, scan.token)
|
||||||
|
if !needSort {
|
||||||
|
if last < int(v) {
|
||||||
|
last = int(v)
|
||||||
|
} else {
|
||||||
|
needSort = true
|
||||||
|
// There is no legal combinations of more than 7 variants
|
||||||
|
// (and this is by no means a useful sequence).
|
||||||
|
const maxVariants = 8
|
||||||
|
if len(varID) > maxVariants {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
end = scan.end
|
||||||
|
}
|
||||||
|
if needSort {
|
||||||
|
sort.Sort(variantsSort{varID, variant})
|
||||||
|
k, l := 0, -1
|
||||||
|
for i, v := range varID {
|
||||||
|
w := int(v)
|
||||||
|
if l == w {
|
||||||
|
// Remove duplicates.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
varID[k] = varID[i]
|
||||||
|
variant[k] = variant[i]
|
||||||
|
k++
|
||||||
|
l = w
|
||||||
|
}
|
||||||
|
if str := bytes.Join(variant[:k], separator); len(str) == 0 {
|
||||||
|
end = start - 1
|
||||||
|
} else {
|
||||||
|
scan.resizeRange(start, end, len(str))
|
||||||
|
copy(scan.b[scan.start:], str)
|
||||||
|
end = scan.end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
|
||||||
|
type variantsSort struct {
|
||||||
|
i []uint8
|
||||||
|
v [][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s variantsSort) Len() int {
|
||||||
|
return len(s.i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s variantsSort) Swap(i, j int) {
|
||||||
|
s.i[i], s.i[j] = s.i[j], s.i[i]
|
||||||
|
s.v[i], s.v[j] = s.v[j], s.v[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s variantsSort) Less(i, j int) bool {
|
||||||
|
return s.i[i] < s.i[j]
|
||||||
|
}
|
||||||
|
|
||||||
|
type bytesSort [][]byte
|
||||||
|
|
||||||
|
func (b bytesSort) Len() int {
|
||||||
|
return len(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b bytesSort) Swap(i, j int) {
|
||||||
|
b[i], b[j] = b[j], b[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b bytesSort) Less(i, j int) bool {
|
||||||
|
return bytes.Compare(b[i], b[j]) == -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseExtensions parses and normalizes the extensions in the buffer.
|
||||||
|
// It returns the last position of scan.b that is part of any extension.
|
||||||
|
// It also trims scan.b to remove excess parts accordingly.
|
||||||
|
func parseExtensions(scan *scanner) int {
|
||||||
|
start := scan.start
|
||||||
|
exts := [][]byte{}
|
||||||
|
private := []byte{}
|
||||||
|
end := scan.end
|
||||||
|
for len(scan.token) == 1 {
|
||||||
|
extStart := scan.start
|
||||||
|
ext := scan.token[0]
|
||||||
|
end = parseExtension(scan)
|
||||||
|
extension := scan.b[extStart:end]
|
||||||
|
if len(extension) < 3 || (ext != 'x' && len(extension) < 4) {
|
||||||
|
scan.setError(errSyntax)
|
||||||
|
end = extStart
|
||||||
|
continue
|
||||||
|
} else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) {
|
||||||
|
scan.b = scan.b[:end]
|
||||||
|
return end
|
||||||
|
} else if ext == 'x' {
|
||||||
|
private = extension
|
||||||
|
break
|
||||||
|
}
|
||||||
|
exts = append(exts, extension)
|
||||||
|
}
|
||||||
|
sort.Sort(bytesSort(exts))
|
||||||
|
if len(private) > 0 {
|
||||||
|
exts = append(exts, private)
|
||||||
|
}
|
||||||
|
scan.b = scan.b[:start]
|
||||||
|
if len(exts) > 0 {
|
||||||
|
scan.b = append(scan.b, bytes.Join(exts, separator)...)
|
||||||
|
} else if start > 0 {
|
||||||
|
// Strip trailing '-'.
|
||||||
|
scan.b = scan.b[:start-1]
|
||||||
|
}
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseExtension parses a single extension and returns the position of
|
||||||
|
// the extension end.
|
||||||
|
func parseExtension(scan *scanner) int {
|
||||||
|
start, end := scan.start, scan.end
|
||||||
|
switch scan.token[0] {
|
||||||
|
case 'u':
|
||||||
|
attrStart := end
|
||||||
|
scan.scan()
|
||||||
|
for last := []byte{}; len(scan.token) > 2; scan.scan() {
|
||||||
|
if bytes.Compare(scan.token, last) != -1 {
|
||||||
|
// Attributes are unsorted. Start over from scratch.
|
||||||
|
p := attrStart + 1
|
||||||
|
scan.next = p
|
||||||
|
attrs := [][]byte{}
|
||||||
|
for scan.scan(); len(scan.token) > 2; scan.scan() {
|
||||||
|
attrs = append(attrs, scan.token)
|
||||||
|
end = scan.end
|
||||||
|
}
|
||||||
|
sort.Sort(bytesSort(attrs))
|
||||||
|
copy(scan.b[p:], bytes.Join(attrs, separator))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
last = scan.token
|
||||||
|
end = scan.end
|
||||||
|
}
|
||||||
|
var last, key []byte
|
||||||
|
for attrEnd := end; len(scan.token) == 2; last = key {
|
||||||
|
key = scan.token
|
||||||
|
keyEnd := scan.end
|
||||||
|
end = scan.acceptMinSize(3)
|
||||||
|
// TODO: check key value validity
|
||||||
|
if keyEnd == end || bytes.Compare(key, last) != 1 {
|
||||||
|
// We have an invalid key or the keys are not sorted.
|
||||||
|
// Start scanning keys from scratch and reorder.
|
||||||
|
p := attrEnd + 1
|
||||||
|
scan.next = p
|
||||||
|
keys := [][]byte{}
|
||||||
|
for scan.scan(); len(scan.token) == 2; {
|
||||||
|
keyStart, keyEnd := scan.start, scan.end
|
||||||
|
end = scan.acceptMinSize(3)
|
||||||
|
if keyEnd != end {
|
||||||
|
keys = append(keys, scan.b[keyStart:end])
|
||||||
|
} else {
|
||||||
|
scan.setError(errSyntax)
|
||||||
|
end = keyStart
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Sort(bytesSort(keys))
|
||||||
|
reordered := bytes.Join(keys, separator)
|
||||||
|
if e := p + len(reordered); e < end {
|
||||||
|
scan.deleteRange(e, end)
|
||||||
|
end = e
|
||||||
|
}
|
||||||
|
copy(scan.b[p:], bytes.Join(keys, separator))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 't':
|
||||||
|
scan.scan()
|
||||||
|
if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) {
|
||||||
|
_, end = parseTag(scan)
|
||||||
|
scan.toLower(start, end)
|
||||||
|
}
|
||||||
|
for len(scan.token) == 2 && !isAlpha(scan.token[1]) {
|
||||||
|
end = scan.acceptMinSize(3)
|
||||||
|
}
|
||||||
|
case 'x':
|
||||||
|
end = scan.acceptMinSize(1)
|
||||||
|
default:
|
||||||
|
end = scan.acceptMinSize(2)
|
||||||
|
}
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
|
||||||
|
// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
|
||||||
|
// Base, Script or Region or slice of type Variant or Extension is passed more
|
||||||
|
// than once, the latter will overwrite the former. Variants and Extensions are
|
||||||
|
// accumulated, but if two extensions of the same type are passed, the latter
|
||||||
|
// will replace the former. A Tag overwrites all former values and typically
|
||||||
|
// only makes sense as the first argument. The resulting tag is returned after
|
||||||
|
// canonicalizing using the Default CanonType. If one or more errors are
|
||||||
|
// encountered, one of the errors is returned.
|
||||||
|
func Compose(part ...interface{}) (t Tag, err error) {
|
||||||
|
return Default.Compose(part...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
|
||||||
|
// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
|
||||||
|
// Base, Script or Region or slice of type Variant or Extension is passed more
|
||||||
|
// than once, the latter will overwrite the former. Variants and Extensions are
|
||||||
|
// accumulated, but if two extensions of the same type are passed, the latter
|
||||||
|
// will replace the former. A Tag overwrites all former values and typically
|
||||||
|
// only makes sense as the first argument. The resulting tag is returned after
|
||||||
|
// canonicalizing using CanonType c. If one or more errors are encountered,
|
||||||
|
// one of the errors is returned.
|
||||||
|
func (c CanonType) Compose(part ...interface{}) (t Tag, err error) {
|
||||||
|
var b builder
|
||||||
|
if err = b.update(part...); err != nil {
|
||||||
|
return und, err
|
||||||
|
}
|
||||||
|
t, _ = b.tag.canonicalize(c)
|
||||||
|
|
||||||
|
if len(b.ext) > 0 || len(b.variant) > 0 {
|
||||||
|
sort.Sort(sortVariant(b.variant))
|
||||||
|
sort.Strings(b.ext)
|
||||||
|
if b.private != "" {
|
||||||
|
b.ext = append(b.ext, b.private)
|
||||||
|
}
|
||||||
|
n := maxCoreSize + tokenLen(b.variant...) + tokenLen(b.ext...)
|
||||||
|
buf := make([]byte, n)
|
||||||
|
p := t.genCoreBytes(buf)
|
||||||
|
t.pVariant = byte(p)
|
||||||
|
p += appendTokens(buf[p:], b.variant...)
|
||||||
|
t.pExt = uint16(p)
|
||||||
|
p += appendTokens(buf[p:], b.ext...)
|
||||||
|
t.str = string(buf[:p])
|
||||||
|
} else if b.private != "" {
|
||||||
|
t.str = b.private
|
||||||
|
t.remakeString()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type builder struct {
|
||||||
|
tag Tag
|
||||||
|
|
||||||
|
private string // the x extension
|
||||||
|
ext []string
|
||||||
|
variant []string
|
||||||
|
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) addExt(e string) {
|
||||||
|
if e == "" {
|
||||||
|
} else if e[0] == 'x' {
|
||||||
|
b.private = e
|
||||||
|
} else {
|
||||||
|
b.ext = append(b.ext, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var errInvalidArgument = errors.New("invalid Extension or Variant")
|
||||||
|
|
||||||
|
func (b *builder) update(part ...interface{}) (err error) {
|
||||||
|
replace := func(l *[]string, s string, eq func(a, b string) bool) bool {
|
||||||
|
if s == "" {
|
||||||
|
b.err = errInvalidArgument
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
for i, v := range *l {
|
||||||
|
if eq(v, s) {
|
||||||
|
(*l)[i] = s
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, x := range part {
|
||||||
|
switch v := x.(type) {
|
||||||
|
case Tag:
|
||||||
|
b.tag.lang = v.lang
|
||||||
|
b.tag.region = v.region
|
||||||
|
b.tag.script = v.script
|
||||||
|
if v.str != "" {
|
||||||
|
b.variant = nil
|
||||||
|
for x, s := "", v.str[v.pVariant:v.pExt]; s != ""; {
|
||||||
|
x, s = nextToken(s)
|
||||||
|
b.variant = append(b.variant, x)
|
||||||
|
}
|
||||||
|
b.ext, b.private = nil, ""
|
||||||
|
for i, e := int(v.pExt), ""; i < len(v.str); {
|
||||||
|
i, e = getExtension(v.str, i)
|
||||||
|
b.addExt(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case Base:
|
||||||
|
b.tag.lang = v.langID
|
||||||
|
case Script:
|
||||||
|
b.tag.script = v.scriptID
|
||||||
|
case Region:
|
||||||
|
b.tag.region = v.regionID
|
||||||
|
case Variant:
|
||||||
|
if !replace(&b.variant, v.variant, func(a, b string) bool { return a == b }) {
|
||||||
|
b.variant = append(b.variant, v.variant)
|
||||||
|
}
|
||||||
|
case Extension:
|
||||||
|
if !replace(&b.ext, v.s, func(a, b string) bool { return a[0] == b[0] }) {
|
||||||
|
b.addExt(v.s)
|
||||||
|
}
|
||||||
|
case []Variant:
|
||||||
|
b.variant = nil
|
||||||
|
for _, x := range v {
|
||||||
|
b.update(x)
|
||||||
|
}
|
||||||
|
case []Extension:
|
||||||
|
b.ext, b.private = nil, ""
|
||||||
|
for _, e := range v {
|
||||||
|
b.update(e)
|
||||||
|
}
|
||||||
|
// TODO: support parsing of raw strings based on morphology or just extensions?
|
||||||
|
case error:
|
||||||
|
err = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func tokenLen(token ...string) (n int) {
|
||||||
|
for _, t := range token {
|
||||||
|
n += len(t) + 1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendTokens(b []byte, token ...string) int {
|
||||||
|
p := 0
|
||||||
|
for _, t := range token {
|
||||||
|
b[p] = '-'
|
||||||
|
copy(b[p+1:], t)
|
||||||
|
p += 1 + len(t)
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
type sortVariant []string
|
||||||
|
|
||||||
|
func (s sortVariant) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sortVariant) Swap(i, j int) {
|
||||||
|
s[j], s[i] = s[i], s[j]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sortVariant) Less(i, j int) bool {
|
||||||
|
return variantIndex[s[i]] < variantIndex[s[j]]
|
||||||
|
}
|
||||||
|
|
||||||
|
func findExt(list []string, x byte) int {
|
||||||
|
for i, e := range list {
|
||||||
|
if e[0] == x {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// getExtension returns the name, body and end position of the extension.
|
||||||
|
func getExtension(s string, p int) (end int, ext string) {
|
||||||
|
if s[p] == '-' {
|
||||||
|
p++
|
||||||
|
}
|
||||||
|
if s[p] == 'x' {
|
||||||
|
return len(s), s[p:]
|
||||||
|
}
|
||||||
|
end = nextExtension(s, p)
|
||||||
|
return end, s[p:end]
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextExtension finds the next extension within the string, searching
|
||||||
|
// for the -<char>- pattern from position p.
|
||||||
|
// In the fast majority of cases, language tags will have at most
|
||||||
|
// one extension and extensions tend to be small.
|
||||||
|
func nextExtension(s string, p int) int {
|
||||||
|
for n := len(s) - 3; p < n; {
|
||||||
|
if s[p] == '-' {
|
||||||
|
if s[p+2] == '-' {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
p += 3
|
||||||
|
} else {
|
||||||
|
p++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight")
|
||||||
|
|
||||||
|
// ParseAcceptLanguage parses the contents of an Accept-Language header as
|
||||||
|
// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and
|
||||||
|
// a list of corresponding quality weights. It is more permissive than RFC 2616
|
||||||
|
// and may return non-nil slices even if the input is not valid.
|
||||||
|
// The Tags will be sorted by highest weight first and then by first occurrence.
|
||||||
|
// Tags with a weight of zero will be dropped. An error will be returned if the
|
||||||
|
// input could not be parsed.
|
||||||
|
func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) {
|
||||||
|
var entry string
|
||||||
|
for s != "" {
|
||||||
|
if entry, s = split(s, ','); entry == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
entry, weight := split(entry, ';')
|
||||||
|
|
||||||
|
// Scan the language.
|
||||||
|
t, err := Parse(entry)
|
||||||
|
if err != nil {
|
||||||
|
id, ok := acceptFallback[entry]
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
t = Tag{lang: id}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan the optional weight.
|
||||||
|
w := 1.0
|
||||||
|
if weight != "" {
|
||||||
|
weight = consume(weight, 'q')
|
||||||
|
weight = consume(weight, '=')
|
||||||
|
// consume returns the empty string when a token could not be
|
||||||
|
// consumed, resulting in an error for ParseFloat.
|
||||||
|
if w, err = strconv.ParseFloat(weight, 32); err != nil {
|
||||||
|
return nil, nil, errInvalidWeight
|
||||||
|
}
|
||||||
|
// Drop tags with a quality weight of 0.
|
||||||
|
if w <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = append(tag, t)
|
||||||
|
q = append(q, float32(w))
|
||||||
|
}
|
||||||
|
sortStable(&tagSort{tag, q})
|
||||||
|
return tag, q, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// consume removes a leading token c from s and returns the result or the empty
|
||||||
|
// string if there is no such token.
|
||||||
|
func consume(s string, c byte) string {
|
||||||
|
if s == "" || s[0] != c {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(s[1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func split(s string, c byte) (head, tail string) {
|
||||||
|
if i := strings.IndexByte(s, c); i >= 0 {
|
||||||
|
return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:])
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(s), ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add hack mapping to deal with a small number of cases that that occur
|
||||||
|
// in Accept-Language (with reasonable frequency).
|
||||||
|
var acceptFallback = map[string]langID{
|
||||||
|
"english": _en,
|
||||||
|
"deutsch": _de,
|
||||||
|
"italian": _it,
|
||||||
|
"french": _fr,
|
||||||
|
"*": _mul, // defined in the spec to match all languages.
|
||||||
|
}
|
||||||
|
|
||||||
|
type tagSort struct {
|
||||||
|
tag []Tag
|
||||||
|
q []float32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *tagSort) Len() int {
|
||||||
|
return len(s.q)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *tagSort) Less(i, j int) bool {
|
||||||
|
return s.q[i] > s.q[j]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *tagSort) Swap(i, j int) {
|
||||||
|
s.tag[i], s.tag[j] = s.tag[j], s.tag[i]
|
||||||
|
s.q[i], s.q[j] = s.q[j], s.q[i]
|
||||||
|
}
|
3686
vendor/golang.org/x/text/language/tables.go
generated
vendored
Normal file
3686
vendor/golang.org/x/text/language/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
143
vendor/golang.org/x/text/language/tags.go
generated
vendored
Normal file
143
vendor/golang.org/x/text/language/tags.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package language
|
||||||
|
|
||||||
|
// TODO: Various sets of commonly use tags and regions.
|
||||||
|
|
||||||
|
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
|
||||||
|
// It simplifies safe initialization of Tag values.
|
||||||
|
func MustParse(s string) Tag {
|
||||||
|
t, err := Parse(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
|
||||||
|
// It simplifies safe initialization of Tag values.
|
||||||
|
func (c CanonType) MustParse(s string) Tag {
|
||||||
|
t, err := c.Parse(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustParseBase is like ParseBase, but panics if the given base cannot be parsed.
|
||||||
|
// It simplifies safe initialization of Base values.
|
||||||
|
func MustParseBase(s string) Base {
|
||||||
|
b, err := ParseBase(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustParseScript is like ParseScript, but panics if the given script cannot be
|
||||||
|
// parsed. It simplifies safe initialization of Script values.
|
||||||
|
func MustParseScript(s string) Script {
|
||||||
|
scr, err := ParseScript(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return scr
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustParseRegion is like ParseRegion, but panics if the given region cannot be
|
||||||
|
// parsed. It simplifies safe initialization of Region values.
|
||||||
|
func MustParseRegion(s string) Region {
|
||||||
|
r, err := ParseRegion(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
und = Tag{}
|
||||||
|
|
||||||
|
Und Tag = Tag{}
|
||||||
|
|
||||||
|
Afrikaans Tag = Tag{lang: _af} // af
|
||||||
|
Amharic Tag = Tag{lang: _am} // am
|
||||||
|
Arabic Tag = Tag{lang: _ar} // ar
|
||||||
|
ModernStandardArabic Tag = Tag{lang: _ar, region: _001} // ar-001
|
||||||
|
Azerbaijani Tag = Tag{lang: _az} // az
|
||||||
|
Bulgarian Tag = Tag{lang: _bg} // bg
|
||||||
|
Bengali Tag = Tag{lang: _bn} // bn
|
||||||
|
Catalan Tag = Tag{lang: _ca} // ca
|
||||||
|
Czech Tag = Tag{lang: _cs} // cs
|
||||||
|
Danish Tag = Tag{lang: _da} // da
|
||||||
|
German Tag = Tag{lang: _de} // de
|
||||||
|
Greek Tag = Tag{lang: _el} // el
|
||||||
|
English Tag = Tag{lang: _en} // en
|
||||||
|
AmericanEnglish Tag = Tag{lang: _en, region: _US} // en-US
|
||||||
|
BritishEnglish Tag = Tag{lang: _en, region: _GB} // en-GB
|
||||||
|
Spanish Tag = Tag{lang: _es} // es
|
||||||
|
EuropeanSpanish Tag = Tag{lang: _es, region: _ES} // es-ES
|
||||||
|
LatinAmericanSpanish Tag = Tag{lang: _es, region: _419} // es-419
|
||||||
|
Estonian Tag = Tag{lang: _et} // et
|
||||||
|
Persian Tag = Tag{lang: _fa} // fa
|
||||||
|
Finnish Tag = Tag{lang: _fi} // fi
|
||||||
|
Filipino Tag = Tag{lang: _fil} // fil
|
||||||
|
French Tag = Tag{lang: _fr} // fr
|
||||||
|
CanadianFrench Tag = Tag{lang: _fr, region: _CA} // fr-CA
|
||||||
|
Gujarati Tag = Tag{lang: _gu} // gu
|
||||||
|
Hebrew Tag = Tag{lang: _he} // he
|
||||||
|
Hindi Tag = Tag{lang: _hi} // hi
|
||||||
|
Croatian Tag = Tag{lang: _hr} // hr
|
||||||
|
Hungarian Tag = Tag{lang: _hu} // hu
|
||||||
|
Armenian Tag = Tag{lang: _hy} // hy
|
||||||
|
Indonesian Tag = Tag{lang: _id} // id
|
||||||
|
Icelandic Tag = Tag{lang: _is} // is
|
||||||
|
Italian Tag = Tag{lang: _it} // it
|
||||||
|
Japanese Tag = Tag{lang: _ja} // ja
|
||||||
|
Georgian Tag = Tag{lang: _ka} // ka
|
||||||
|
Kazakh Tag = Tag{lang: _kk} // kk
|
||||||
|
Khmer Tag = Tag{lang: _km} // km
|
||||||
|
Kannada Tag = Tag{lang: _kn} // kn
|
||||||
|
Korean Tag = Tag{lang: _ko} // ko
|
||||||
|
Kirghiz Tag = Tag{lang: _ky} // ky
|
||||||
|
Lao Tag = Tag{lang: _lo} // lo
|
||||||
|
Lithuanian Tag = Tag{lang: _lt} // lt
|
||||||
|
Latvian Tag = Tag{lang: _lv} // lv
|
||||||
|
Macedonian Tag = Tag{lang: _mk} // mk
|
||||||
|
Malayalam Tag = Tag{lang: _ml} // ml
|
||||||
|
Mongolian Tag = Tag{lang: _mn} // mn
|
||||||
|
Marathi Tag = Tag{lang: _mr} // mr
|
||||||
|
Malay Tag = Tag{lang: _ms} // ms
|
||||||
|
Burmese Tag = Tag{lang: _my} // my
|
||||||
|
Nepali Tag = Tag{lang: _ne} // ne
|
||||||
|
Dutch Tag = Tag{lang: _nl} // nl
|
||||||
|
Norwegian Tag = Tag{lang: _no} // no
|
||||||
|
Punjabi Tag = Tag{lang: _pa} // pa
|
||||||
|
Polish Tag = Tag{lang: _pl} // pl
|
||||||
|
Portuguese Tag = Tag{lang: _pt} // pt
|
||||||
|
BrazilianPortuguese Tag = Tag{lang: _pt, region: _BR} // pt-BR
|
||||||
|
EuropeanPortuguese Tag = Tag{lang: _pt, region: _PT} // pt-PT
|
||||||
|
Romanian Tag = Tag{lang: _ro} // ro
|
||||||
|
Russian Tag = Tag{lang: _ru} // ru
|
||||||
|
Sinhala Tag = Tag{lang: _si} // si
|
||||||
|
Slovak Tag = Tag{lang: _sk} // sk
|
||||||
|
Slovenian Tag = Tag{lang: _sl} // sl
|
||||||
|
Albanian Tag = Tag{lang: _sq} // sq
|
||||||
|
Serbian Tag = Tag{lang: _sr} // sr
|
||||||
|
SerbianLatin Tag = Tag{lang: _sr, script: _Latn} // sr-Latn
|
||||||
|
Swedish Tag = Tag{lang: _sv} // sv
|
||||||
|
Swahili Tag = Tag{lang: _sw} // sw
|
||||||
|
Tamil Tag = Tag{lang: _ta} // ta
|
||||||
|
Telugu Tag = Tag{lang: _te} // te
|
||||||
|
Thai Tag = Tag{lang: _th} // th
|
||||||
|
Turkish Tag = Tag{lang: _tr} // tr
|
||||||
|
Ukrainian Tag = Tag{lang: _uk} // uk
|
||||||
|
Urdu Tag = Tag{lang: _ur} // ur
|
||||||
|
Uzbek Tag = Tag{lang: _uz} // uz
|
||||||
|
Vietnamese Tag = Tag{lang: _vi} // vi
|
||||||
|
Chinese Tag = Tag{lang: _zh} // zh
|
||||||
|
SimplifiedChinese Tag = Tag{lang: _zh, script: _Hans} // zh-Hans
|
||||||
|
TraditionalChinese Tag = Tag{lang: _zh, script: _Hant} // zh-Hant
|
||||||
|
Zulu Tag = Tag{lang: _zu} // zu
|
||||||
|
)
|
187
vendor/golang.org/x/text/runes/cond.go
generated
vendored
Normal file
187
vendor/golang.org/x/text/runes/cond.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package runes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is.
|
||||||
|
// This is done for various reasons:
|
||||||
|
// - To retain the semantics of the Nop transformer: if input is passed to a Nop
|
||||||
|
// one would expect it to be unchanged.
|
||||||
|
// - It would be very expensive to pass a converted RuneError to a transformer:
|
||||||
|
// a transformer might need more source bytes after RuneError, meaning that
|
||||||
|
// the only way to pass it safely is to create a new buffer and manage the
|
||||||
|
// intermingling of RuneErrors and normal input.
|
||||||
|
// - Many transformers leave ill-formed UTF-8 as is, so this is not
|
||||||
|
// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a
|
||||||
|
// logical consequence of the operation (as for Map) or if it otherwise would
|
||||||
|
// pose security concerns (as for Remove).
|
||||||
|
// - An alternative would be to return an error on ill-formed UTF-8, but this
|
||||||
|
// would be inconsistent with other operations.
|
||||||
|
|
||||||
|
// If returns a transformer that applies tIn to consecutive runes for which
|
||||||
|
// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset
|
||||||
|
// is called on tIn and tNotIn at the start of each run. A Nop transformer will
|
||||||
|
// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated
|
||||||
|
// to RuneError to determine which transformer to apply, but is passed as is to
|
||||||
|
// the respective transformer.
|
||||||
|
func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
|
||||||
|
if tIn == nil && tNotIn == nil {
|
||||||
|
return Transformer{transform.Nop}
|
||||||
|
}
|
||||||
|
if tIn == nil {
|
||||||
|
tIn = transform.Nop
|
||||||
|
}
|
||||||
|
if tNotIn == nil {
|
||||||
|
tNotIn = transform.Nop
|
||||||
|
}
|
||||||
|
sIn, ok := tIn.(transform.SpanningTransformer)
|
||||||
|
if !ok {
|
||||||
|
sIn = dummySpan{tIn}
|
||||||
|
}
|
||||||
|
sNotIn, ok := tNotIn.(transform.SpanningTransformer)
|
||||||
|
if !ok {
|
||||||
|
sNotIn = dummySpan{tNotIn}
|
||||||
|
}
|
||||||
|
|
||||||
|
a := &cond{
|
||||||
|
tIn: sIn,
|
||||||
|
tNotIn: sNotIn,
|
||||||
|
f: s.Contains,
|
||||||
|
}
|
||||||
|
a.Reset()
|
||||||
|
return Transformer{a}
|
||||||
|
}
|
||||||
|
|
||||||
|
type dummySpan struct{ transform.Transformer }
|
||||||
|
|
||||||
|
func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
|
||||||
|
return 0, transform.ErrEndOfSpan
|
||||||
|
}
|
||||||
|
|
||||||
|
type cond struct {
|
||||||
|
tIn, tNotIn transform.SpanningTransformer
|
||||||
|
f func(rune) bool
|
||||||
|
check func(rune) bool // current check to perform
|
||||||
|
t transform.SpanningTransformer // current transformer to use
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset implements transform.Transformer.
|
||||||
|
func (t *cond) Reset() {
|
||||||
|
t.check = t.is
|
||||||
|
t.t = t.tIn
|
||||||
|
t.t.Reset() // notIn will be reset on first usage.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *cond) is(r rune) bool {
|
||||||
|
if t.f(r) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
t.check = t.isNot
|
||||||
|
t.t = t.tNotIn
|
||||||
|
t.tNotIn.Reset()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *cond) isNot(r rune) bool {
|
||||||
|
if !t.f(r) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
t.check = t.is
|
||||||
|
t.t = t.tIn
|
||||||
|
t.tIn.Reset()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// This implementation of Span doesn't help all too much, but it needs to be
|
||||||
|
// there to satisfy this package's Transformer interface.
|
||||||
|
// TODO: there are certainly room for improvements, though. For example, if
|
||||||
|
// t.t == transform.Nop (which will a common occurrence) it will save a bundle
|
||||||
|
// to special-case that loop.
|
||||||
|
func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
|
||||||
|
p := 0
|
||||||
|
for n < len(src) && err == nil {
|
||||||
|
// Don't process too much at a time as the Spanner that will be
|
||||||
|
// called on this block may terminate early.
|
||||||
|
const maxChunk = 4096
|
||||||
|
max := len(src)
|
||||||
|
if v := n + maxChunk; v < max {
|
||||||
|
max = v
|
||||||
|
}
|
||||||
|
atEnd := false
|
||||||
|
size := 0
|
||||||
|
current := t.t
|
||||||
|
for ; p < max; p += size {
|
||||||
|
r := rune(src[p])
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
|
||||||
|
if !atEOF && !utf8.FullRune(src[p:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !t.check(r) {
|
||||||
|
// The next rune will be the start of a new run.
|
||||||
|
atEnd = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
|
||||||
|
n += n2
|
||||||
|
if err2 != nil {
|
||||||
|
return n, err2
|
||||||
|
}
|
||||||
|
// At this point either err != nil or t.check will pass for the rune at p.
|
||||||
|
p = n + size
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
p := 0
|
||||||
|
for nSrc < len(src) && err == nil {
|
||||||
|
// Don't process too much at a time, as the work might be wasted if the
|
||||||
|
// destination buffer isn't large enough to hold the result or a
|
||||||
|
// transform returns an error early.
|
||||||
|
const maxChunk = 4096
|
||||||
|
max := len(src)
|
||||||
|
if n := nSrc + maxChunk; n < len(src) {
|
||||||
|
max = n
|
||||||
|
}
|
||||||
|
atEnd := false
|
||||||
|
size := 0
|
||||||
|
current := t.t
|
||||||
|
for ; p < max; p += size {
|
||||||
|
r := rune(src[p])
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
|
||||||
|
if !atEOF && !utf8.FullRune(src[p:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !t.check(r) {
|
||||||
|
// The next rune will be the start of a new run.
|
||||||
|
atEnd = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src)))
|
||||||
|
nDst += nDst2
|
||||||
|
nSrc += nSrc2
|
||||||
|
if err2 != nil {
|
||||||
|
return nDst, nSrc, err2
|
||||||
|
}
|
||||||
|
// At this point either err != nil or t.check will pass for the rune at p.
|
||||||
|
p = nSrc + size
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
355
vendor/golang.org/x/text/runes/runes.go
generated
vendored
Normal file
355
vendor/golang.org/x/text/runes/runes.go
generated
vendored
Normal file
@ -0,0 +1,355 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package runes provide transforms for UTF-8 encoded text.
|
||||||
|
package runes // import "golang.org/x/text/runes"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Set is a collection of runes.
|
||||||
|
type Set interface {
|
||||||
|
// Contains returns true if r is contained in the set.
|
||||||
|
Contains(r rune) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type setFunc func(rune) bool
|
||||||
|
|
||||||
|
func (s setFunc) Contains(r rune) bool {
|
||||||
|
return s(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: using funcs here instead of wrapping types result in cleaner
|
||||||
|
// documentation and a smaller API.
|
||||||
|
|
||||||
|
// In creates a Set with a Contains method that returns true for all runes in
|
||||||
|
// the given RangeTable.
|
||||||
|
func In(rt *unicode.RangeTable) Set {
|
||||||
|
return setFunc(func(r rune) bool { return unicode.Is(rt, r) })
|
||||||
|
}
|
||||||
|
|
||||||
|
// In creates a Set with a Contains method that returns true for all runes not
|
||||||
|
// in the given RangeTable.
|
||||||
|
func NotIn(rt *unicode.RangeTable) Set {
|
||||||
|
return setFunc(func(r rune) bool { return !unicode.Is(rt, r) })
|
||||||
|
}
|
||||||
|
|
||||||
|
// Predicate creates a Set with a Contains method that returns f(r).
|
||||||
|
func Predicate(f func(rune) bool) Set {
|
||||||
|
return setFunc(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transformer implements the transform.Transformer interface.
|
||||||
|
type Transformer struct {
|
||||||
|
t transform.SpanningTransformer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
return t.t.Transform(dst, src, atEOF)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
|
||||||
|
return t.t.Span(b, atEOF)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Transformer) Reset() { t.t.Reset() }
|
||||||
|
|
||||||
|
// Bytes returns a new byte slice with the result of converting b using t. It
|
||||||
|
// calls Reset on t. It returns nil if any error was found. This can only happen
|
||||||
|
// if an error-producing Transformer is passed to If.
|
||||||
|
func (t Transformer) Bytes(b []byte) []byte {
|
||||||
|
b, _, err := transform.Bytes(t, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string with the result of converting s using t. It calls
|
||||||
|
// Reset on t. It returns the empty string if any error was found. This can only
|
||||||
|
// happen if an error-producing Transformer is passed to If.
|
||||||
|
func (t Transformer) String(s string) string {
|
||||||
|
s, _, err := transform.String(t, s)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO:
|
||||||
|
// - Copy: copying strings and bytes in whole-rune units.
|
||||||
|
// - Validation (maybe)
|
||||||
|
// - Well-formed-ness (maybe)
|
||||||
|
|
||||||
|
const runeErrorString = string(utf8.RuneError)
|
||||||
|
|
||||||
|
// Remove returns a Transformer that removes runes r for which s.Contains(r).
|
||||||
|
// Illegal input bytes are replaced by RuneError before being passed to f.
|
||||||
|
func Remove(s Set) Transformer {
|
||||||
|
if f, ok := s.(setFunc); ok {
|
||||||
|
// This little trick cuts the running time of BenchmarkRemove for sets
|
||||||
|
// created by Predicate roughly in half.
|
||||||
|
// TODO: special-case RangeTables as well.
|
||||||
|
return Transformer{remove(f)}
|
||||||
|
}
|
||||||
|
return Transformer{remove(s.Contains)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: remove transform.RemoveFunc.
|
||||||
|
|
||||||
|
type remove func(r rune) bool
|
||||||
|
|
||||||
|
func (remove) Reset() {}
|
||||||
|
|
||||||
|
// Span implements transform.Spanner.
|
||||||
|
func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
|
||||||
|
for r, size := rune(0), 0; n < len(src); {
|
||||||
|
if r = rune(src[n]); r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
|
||||||
|
// Invalid rune.
|
||||||
|
if !atEOF && !utf8.FullRune(src[n:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
} else {
|
||||||
|
err = transform.ErrEndOfSpan
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if t(r) {
|
||||||
|
err = transform.ErrEndOfSpan
|
||||||
|
break
|
||||||
|
}
|
||||||
|
n += size
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transform implements transform.Transformer.
|
||||||
|
func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
for r, size := rune(0), 0; nSrc < len(src); {
|
||||||
|
if r = rune(src[nSrc]); r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
|
||||||
|
// Invalid rune.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// We replace illegal bytes with RuneError. Not doing so might
|
||||||
|
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
|
||||||
|
// The resulting byte sequence may subsequently contain runes
|
||||||
|
// for which t(r) is true that were passed unnoticed.
|
||||||
|
if !t(utf8.RuneError) {
|
||||||
|
if nDst+3 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+0] = runeErrorString[0]
|
||||||
|
dst[nDst+1] = runeErrorString[1]
|
||||||
|
dst[nDst+2] = runeErrorString[2]
|
||||||
|
nDst += 3
|
||||||
|
}
|
||||||
|
nSrc++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if t(r) {
|
||||||
|
nSrc += size
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if nDst+size > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
dst[nDst] = src[nSrc]
|
||||||
|
nDst++
|
||||||
|
nSrc++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map returns a Transformer that maps the runes in the input using the given
|
||||||
|
// mapping. Illegal bytes in the input are converted to utf8.RuneError before
|
||||||
|
// being passed to the mapping func.
|
||||||
|
func Map(mapping func(rune) rune) Transformer {
|
||||||
|
return Transformer{mapper(mapping)}
|
||||||
|
}
|
||||||
|
|
||||||
|
type mapper func(rune) rune
|
||||||
|
|
||||||
|
func (mapper) Reset() {}
|
||||||
|
|
||||||
|
// Span implements transform.Spanner.
|
||||||
|
func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
|
||||||
|
for r, size := rune(0), 0; n < len(src); n += size {
|
||||||
|
if r = rune(src[n]); r < utf8.RuneSelf {
|
||||||
|
size = 1
|
||||||
|
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
|
||||||
|
// Invalid rune.
|
||||||
|
if !atEOF && !utf8.FullRune(src[n:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
} else {
|
||||||
|
err = transform.ErrEndOfSpan
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if t(r) != r {
|
||||||
|
err = transform.ErrEndOfSpan
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transform implements transform.Transformer.
|
||||||
|
func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
var replacement rune
|
||||||
|
var b [utf8.UTFMax]byte
|
||||||
|
|
||||||
|
for r, size := rune(0), 0; nSrc < len(src); {
|
||||||
|
if r = rune(src[nSrc]); r < utf8.RuneSelf {
|
||||||
|
if replacement = t(r); replacement < utf8.RuneSelf {
|
||||||
|
if nDst == len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst] = byte(replacement)
|
||||||
|
nDst++
|
||||||
|
nSrc++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
size = 1
|
||||||
|
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
|
||||||
|
// Invalid rune.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if replacement = t(utf8.RuneError); replacement == utf8.RuneError {
|
||||||
|
if nDst+3 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+0] = runeErrorString[0]
|
||||||
|
dst[nDst+1] = runeErrorString[1]
|
||||||
|
dst[nDst+2] = runeErrorString[2]
|
||||||
|
nDst += 3
|
||||||
|
nSrc++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if replacement = t(r); replacement == r {
|
||||||
|
if nDst+size > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
dst[nDst] = src[nSrc]
|
||||||
|
nDst++
|
||||||
|
nSrc++
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
n := utf8.EncodeRune(b[:], replacement)
|
||||||
|
|
||||||
|
if nDst+n > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
dst[nDst] = b[i]
|
||||||
|
nDst++
|
||||||
|
}
|
||||||
|
nSrc += size
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceIllFormed returns a transformer that replaces all input bytes that are
|
||||||
|
// not part of a well-formed UTF-8 code sequence with utf8.RuneError.
|
||||||
|
func ReplaceIllFormed() Transformer {
|
||||||
|
return Transformer{&replaceIllFormed{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
type replaceIllFormed struct{ transform.NopResetter }
|
||||||
|
|
||||||
|
func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
|
||||||
|
for n < len(src) {
|
||||||
|
// ASCII fast path.
|
||||||
|
if src[n] < utf8.RuneSelf {
|
||||||
|
n++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
r, size := utf8.DecodeRune(src[n:])
|
||||||
|
|
||||||
|
// Look for a valid non-ASCII rune.
|
||||||
|
if r != utf8.RuneError || size != 1 {
|
||||||
|
n += size
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for short source data.
|
||||||
|
if !atEOF && !utf8.FullRune(src[n:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have an invalid rune.
|
||||||
|
err = transform.ErrEndOfSpan
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
for nSrc < len(src) {
|
||||||
|
// ASCII fast path.
|
||||||
|
if r := src[nSrc]; r < utf8.RuneSelf {
|
||||||
|
if nDst == len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst] = r
|
||||||
|
nDst++
|
||||||
|
nSrc++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for a valid non-ASCII rune.
|
||||||
|
if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
|
||||||
|
if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nDst += size
|
||||||
|
nSrc += size
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for short source data.
|
||||||
|
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have an invalid rune.
|
||||||
|
if nDst+3 > len(dst) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst[nDst+0] = runeErrorString[0]
|
||||||
|
dst[nDst+1] = runeErrorString[1]
|
||||||
|
dst[nDst+2] = runeErrorString[2]
|
||||||
|
nDst += 3
|
||||||
|
nSrc++
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
705
vendor/golang.org/x/text/transform/transform.go
generated
vendored
Normal file
705
vendor/golang.org/x/text/transform/transform.go
generated
vendored
Normal file
@ -0,0 +1,705 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package transform provides reader and writer wrappers that transform the
|
||||||
|
// bytes passing through as well as various transformations. Example
|
||||||
|
// transformations provided by other packages include normalization and
|
||||||
|
// conversion between character sets.
|
||||||
|
package transform // import "golang.org/x/text/transform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrShortDst means that the destination buffer was too short to
|
||||||
|
// receive all of the transformed bytes.
|
||||||
|
ErrShortDst = errors.New("transform: short destination buffer")
|
||||||
|
|
||||||
|
// ErrShortSrc means that the source buffer has insufficient data to
|
||||||
|
// complete the transformation.
|
||||||
|
ErrShortSrc = errors.New("transform: short source buffer")
|
||||||
|
|
||||||
|
// ErrEndOfSpan means that the input and output (the transformed input)
|
||||||
|
// are not identical.
|
||||||
|
ErrEndOfSpan = errors.New("transform: input and output are not identical")
|
||||||
|
|
||||||
|
// errInconsistentByteCount means that Transform returned success (nil
|
||||||
|
// error) but also returned nSrc inconsistent with the src argument.
|
||||||
|
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
|
||||||
|
|
||||||
|
// errShortInternal means that an internal buffer is not large enough
|
||||||
|
// to make progress and the Transform operation must be aborted.
|
||||||
|
errShortInternal = errors.New("transform: short internal buffer")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Transformer transforms bytes.
|
||||||
|
type Transformer interface {
|
||||||
|
// Transform writes to dst the transformed bytes read from src, and
|
||||||
|
// returns the number of dst bytes written and src bytes read. The
|
||||||
|
// atEOF argument tells whether src represents the last bytes of the
|
||||||
|
// input.
|
||||||
|
//
|
||||||
|
// Callers should always process the nDst bytes produced and account
|
||||||
|
// for the nSrc bytes consumed before considering the error err.
|
||||||
|
//
|
||||||
|
// A nil error means that all of the transformed bytes (whether freshly
|
||||||
|
// transformed from src or left over from previous Transform calls)
|
||||||
|
// were written to dst. A nil error can be returned regardless of
|
||||||
|
// whether atEOF is true. If err is nil then nSrc must equal len(src);
|
||||||
|
// the converse is not necessarily true.
|
||||||
|
//
|
||||||
|
// ErrShortDst means that dst was too short to receive all of the
|
||||||
|
// transformed bytes. ErrShortSrc means that src had insufficient data
|
||||||
|
// to complete the transformation. If both conditions apply, then
|
||||||
|
// either error may be returned. Other than the error conditions listed
|
||||||
|
// here, implementations are free to report other errors that arise.
|
||||||
|
Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
|
||||||
|
|
||||||
|
// Reset resets the state and allows a Transformer to be reused.
|
||||||
|
Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanningTransformer extends the Transformer interface with a Span method
|
||||||
|
// that determines how much of the input already conforms to the Transformer.
|
||||||
|
type SpanningTransformer interface {
|
||||||
|
Transformer
|
||||||
|
|
||||||
|
// Span returns a position in src such that transforming src[:n] results in
|
||||||
|
// identical output src[:n] for these bytes. It does not necessarily return
|
||||||
|
// the largest such n. The atEOF argument tells whether src represents the
|
||||||
|
// last bytes of the input.
|
||||||
|
//
|
||||||
|
// Callers should always account for the n bytes consumed before
|
||||||
|
// considering the error err.
|
||||||
|
//
|
||||||
|
// A nil error means that all input bytes are known to be identical to the
|
||||||
|
// output produced by the Transformer. A nil error can be be returned
|
||||||
|
// regardless of whether atEOF is true. If err is nil, then then n must
|
||||||
|
// equal len(src); the converse is not necessarily true.
|
||||||
|
//
|
||||||
|
// ErrEndOfSpan means that the Transformer output may differ from the
|
||||||
|
// input after n bytes. Note that n may be len(src), meaning that the output
|
||||||
|
// would contain additional bytes after otherwise identical output.
|
||||||
|
// ErrShortSrc means that src had insufficient data to determine whether the
|
||||||
|
// remaining bytes would change. Other than the error conditions listed
|
||||||
|
// here, implementations are free to report other errors that arise.
|
||||||
|
//
|
||||||
|
// Calling Span can modify the Transformer state as a side effect. In
|
||||||
|
// effect, it does the transformation just as calling Transform would, only
|
||||||
|
// without copying to a destination buffer and only up to a point it can
|
||||||
|
// determine the input and output bytes are the same. This is obviously more
|
||||||
|
// limited than calling Transform, but can be more efficient in terms of
|
||||||
|
// copying and allocating buffers. Calls to Span and Transform may be
|
||||||
|
// interleaved.
|
||||||
|
Span(src []byte, atEOF bool) (n int, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NopResetter can be embedded by implementations of Transformer to add a nop
|
||||||
|
// Reset method.
|
||||||
|
type NopResetter struct{}
|
||||||
|
|
||||||
|
// Reset implements the Reset method of the Transformer interface.
|
||||||
|
func (NopResetter) Reset() {}
|
||||||
|
|
||||||
|
// Reader wraps another io.Reader by transforming the bytes read.
|
||||||
|
type Reader struct {
|
||||||
|
r io.Reader
|
||||||
|
t Transformer
|
||||||
|
err error
|
||||||
|
|
||||||
|
// dst[dst0:dst1] contains bytes that have been transformed by t but
|
||||||
|
// not yet copied out via Read.
|
||||||
|
dst []byte
|
||||||
|
dst0, dst1 int
|
||||||
|
|
||||||
|
// src[src0:src1] contains bytes that have been read from r but not
|
||||||
|
// yet transformed through t.
|
||||||
|
src []byte
|
||||||
|
src0, src1 int
|
||||||
|
|
||||||
|
// transformComplete is whether the transformation is complete,
|
||||||
|
// regardless of whether or not it was successful.
|
||||||
|
transformComplete bool
|
||||||
|
}
|
||||||
|
|
||||||
|
const defaultBufSize = 4096
|
||||||
|
|
||||||
|
// NewReader returns a new Reader that wraps r by transforming the bytes read
|
||||||
|
// via t. It calls Reset on t.
|
||||||
|
func NewReader(r io.Reader, t Transformer) *Reader {
|
||||||
|
t.Reset()
|
||||||
|
return &Reader{
|
||||||
|
r: r,
|
||||||
|
t: t,
|
||||||
|
dst: make([]byte, defaultBufSize),
|
||||||
|
src: make([]byte, defaultBufSize),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read implements the io.Reader interface.
|
||||||
|
func (r *Reader) Read(p []byte) (int, error) {
|
||||||
|
n, err := 0, error(nil)
|
||||||
|
for {
|
||||||
|
// Copy out any transformed bytes and return the final error if we are done.
|
||||||
|
if r.dst0 != r.dst1 {
|
||||||
|
n = copy(p, r.dst[r.dst0:r.dst1])
|
||||||
|
r.dst0 += n
|
||||||
|
if r.dst0 == r.dst1 && r.transformComplete {
|
||||||
|
return n, r.err
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
} else if r.transformComplete {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to transform some source bytes, or to flush the transformer if we
|
||||||
|
// are out of source bytes. We do this even if r.r.Read returned an error.
|
||||||
|
// As the io.Reader documentation says, "process the n > 0 bytes returned
|
||||||
|
// before considering the error".
|
||||||
|
if r.src0 != r.src1 || r.err != nil {
|
||||||
|
r.dst0 = 0
|
||||||
|
r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF)
|
||||||
|
r.src0 += n
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if r.src0 != r.src1 {
|
||||||
|
r.err = errInconsistentByteCount
|
||||||
|
}
|
||||||
|
// The Transform call was successful; we are complete if we
|
||||||
|
// cannot read more bytes into src.
|
||||||
|
r.transformComplete = r.err != nil
|
||||||
|
continue
|
||||||
|
case err == ErrShortDst && (r.dst1 != 0 || n != 0):
|
||||||
|
// Make room in dst by copying out, and try again.
|
||||||
|
continue
|
||||||
|
case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil:
|
||||||
|
// Read more bytes into src via the code below, and try again.
|
||||||
|
default:
|
||||||
|
r.transformComplete = true
|
||||||
|
// The reader error (r.err) takes precedence over the
|
||||||
|
// transformer error (err) unless r.err is nil or io.EOF.
|
||||||
|
if r.err == nil || r.err == io.EOF {
|
||||||
|
r.err = err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move any untransformed source bytes to the start of the buffer
|
||||||
|
// and read more bytes.
|
||||||
|
if r.src0 != 0 {
|
||||||
|
r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1])
|
||||||
|
}
|
||||||
|
n, r.err = r.r.Read(r.src[r.src1:])
|
||||||
|
r.src1 += n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: implement ReadByte (and ReadRune??).
|
||||||
|
|
||||||
|
// Writer wraps another io.Writer by transforming the bytes read.
|
||||||
|
// The user needs to call Close to flush unwritten bytes that may
|
||||||
|
// be buffered.
|
||||||
|
type Writer struct {
|
||||||
|
w io.Writer
|
||||||
|
t Transformer
|
||||||
|
dst []byte
|
||||||
|
|
||||||
|
// src[:n] contains bytes that have not yet passed through t.
|
||||||
|
src []byte
|
||||||
|
n int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriter returns a new Writer that wraps w by transforming the bytes written
|
||||||
|
// via t. It calls Reset on t.
|
||||||
|
func NewWriter(w io.Writer, t Transformer) *Writer {
|
||||||
|
t.Reset()
|
||||||
|
return &Writer{
|
||||||
|
w: w,
|
||||||
|
t: t,
|
||||||
|
dst: make([]byte, defaultBufSize),
|
||||||
|
src: make([]byte, defaultBufSize),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write implements the io.Writer interface. If there are not enough
|
||||||
|
// bytes available to complete a Transform, the bytes will be buffered
|
||||||
|
// for the next write. Call Close to convert the remaining bytes.
|
||||||
|
func (w *Writer) Write(data []byte) (n int, err error) {
|
||||||
|
src := data
|
||||||
|
if w.n > 0 {
|
||||||
|
// Append bytes from data to the last remainder.
|
||||||
|
// TODO: limit the amount copied on first try.
|
||||||
|
n = copy(w.src[w.n:], data)
|
||||||
|
w.n += n
|
||||||
|
src = w.src[:w.n]
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
nDst, nSrc, err := w.t.Transform(w.dst, src, false)
|
||||||
|
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
|
||||||
|
return n, werr
|
||||||
|
}
|
||||||
|
src = src[nSrc:]
|
||||||
|
if w.n == 0 {
|
||||||
|
n += nSrc
|
||||||
|
} else if len(src) <= n {
|
||||||
|
// Enough bytes from w.src have been consumed. We make src point
|
||||||
|
// to data instead to reduce the copying.
|
||||||
|
w.n = 0
|
||||||
|
n -= len(src)
|
||||||
|
src = data[n:]
|
||||||
|
if n < len(data) && (err == nil || err == ErrShortSrc) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch err {
|
||||||
|
case ErrShortDst:
|
||||||
|
// This error is okay as long as we are making progress.
|
||||||
|
if nDst > 0 || nSrc > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case ErrShortSrc:
|
||||||
|
if len(src) < len(w.src) {
|
||||||
|
m := copy(w.src, src)
|
||||||
|
// If w.n > 0, bytes from data were already copied to w.src and n
|
||||||
|
// was already set to the number of bytes consumed.
|
||||||
|
if w.n == 0 {
|
||||||
|
n += m
|
||||||
|
}
|
||||||
|
w.n = m
|
||||||
|
err = nil
|
||||||
|
} else if nDst > 0 || nSrc > 0 {
|
||||||
|
// Not enough buffer to store the remainder. Keep processing as
|
||||||
|
// long as there is progress. Without this case, transforms that
|
||||||
|
// require a lookahead larger than the buffer may result in an
|
||||||
|
// error. This is not something one may expect to be common in
|
||||||
|
// practice, but it may occur when buffers are set to small
|
||||||
|
// sizes during testing.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case nil:
|
||||||
|
if w.n > 0 {
|
||||||
|
err = errInconsistentByteCount
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close implements the io.Closer interface.
|
||||||
|
func (w *Writer) Close() error {
|
||||||
|
src := w.src[:w.n]
|
||||||
|
for {
|
||||||
|
nDst, nSrc, err := w.t.Transform(w.dst, src, true)
|
||||||
|
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
|
||||||
|
return werr
|
||||||
|
}
|
||||||
|
if err != ErrShortDst {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
src = src[nSrc:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type nop struct{ NopResetter }
|
||||||
|
|
||||||
|
func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
n := copy(dst, src)
|
||||||
|
if n < len(src) {
|
||||||
|
err = ErrShortDst
|
||||||
|
}
|
||||||
|
return n, n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nop) Span(src []byte, atEOF bool) (n int, err error) {
|
||||||
|
return len(src), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type discard struct{ NopResetter }
|
||||||
|
|
||||||
|
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
return 0, len(src), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Discard is a Transformer for which all Transform calls succeed
|
||||||
|
// by consuming all bytes and writing nothing.
|
||||||
|
Discard Transformer = discard{}
|
||||||
|
|
||||||
|
// Nop is a SpanningTransformer that copies src to dst.
|
||||||
|
Nop SpanningTransformer = nop{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// chain is a sequence of links. A chain with N Transformers has N+1 links and
|
||||||
|
// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst
|
||||||
|
// buffers given to chain.Transform and the middle N-1 buffers are intermediate
|
||||||
|
// buffers owned by the chain. The i'th link transforms bytes from the i'th
|
||||||
|
// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer
|
||||||
|
// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N).
|
||||||
|
type chain struct {
|
||||||
|
link []link
|
||||||
|
err error
|
||||||
|
// errStart is the index at which the error occurred plus 1. Processing
|
||||||
|
// errStart at this level at the next call to Transform. As long as
|
||||||
|
// errStart > 0, chain will not consume any more source bytes.
|
||||||
|
errStart int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *chain) fatalError(errIndex int, err error) {
|
||||||
|
if i := errIndex + 1; i > c.errStart {
|
||||||
|
c.errStart = i
|
||||||
|
c.err = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type link struct {
|
||||||
|
t Transformer
|
||||||
|
// b[p:n] holds the bytes to be transformed by t.
|
||||||
|
b []byte
|
||||||
|
p int
|
||||||
|
n int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *link) src() []byte {
|
||||||
|
return l.b[l.p:l.n]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *link) dst() []byte {
|
||||||
|
return l.b[l.n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chain returns a Transformer that applies t in sequence.
|
||||||
|
func Chain(t ...Transformer) Transformer {
|
||||||
|
if len(t) == 0 {
|
||||||
|
return nop{}
|
||||||
|
}
|
||||||
|
c := &chain{link: make([]link, len(t)+1)}
|
||||||
|
for i, tt := range t {
|
||||||
|
c.link[i].t = tt
|
||||||
|
}
|
||||||
|
// Allocate intermediate buffers.
|
||||||
|
b := make([][defaultBufSize]byte, len(t)-1)
|
||||||
|
for i := range b {
|
||||||
|
c.link[i+1].b = b[i][:]
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets the state of Chain. It calls Reset on all the Transformers.
|
||||||
|
func (c *chain) Reset() {
|
||||||
|
for i, l := range c.link {
|
||||||
|
if l.t != nil {
|
||||||
|
l.t.Reset()
|
||||||
|
}
|
||||||
|
c.link[i].p, c.link[i].n = 0, 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: make chain use Span (is going to be fun to implement!)
|
||||||
|
|
||||||
|
// Transform applies the transformers of c in sequence.
|
||||||
|
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
// Set up src and dst in the chain.
|
||||||
|
srcL := &c.link[0]
|
||||||
|
dstL := &c.link[len(c.link)-1]
|
||||||
|
srcL.b, srcL.p, srcL.n = src, 0, len(src)
|
||||||
|
dstL.b, dstL.n = dst, 0
|
||||||
|
var lastFull, needProgress bool // for detecting progress
|
||||||
|
|
||||||
|
// i is the index of the next Transformer to apply, for i in [low, high].
|
||||||
|
// low is the lowest index for which c.link[low] may still produce bytes.
|
||||||
|
// high is the highest index for which c.link[high] has a Transformer.
|
||||||
|
// The error returned by Transform determines whether to increase or
|
||||||
|
// decrease i. We try to completely fill a buffer before converting it.
|
||||||
|
for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; {
|
||||||
|
in, out := &c.link[i], &c.link[i+1]
|
||||||
|
nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i)
|
||||||
|
out.n += nDst
|
||||||
|
in.p += nSrc
|
||||||
|
if i > 0 && in.p == in.n {
|
||||||
|
in.p, in.n = 0, 0
|
||||||
|
}
|
||||||
|
needProgress, lastFull = lastFull, false
|
||||||
|
switch err0 {
|
||||||
|
case ErrShortDst:
|
||||||
|
// Process the destination buffer next. Return if we are already
|
||||||
|
// at the high index.
|
||||||
|
if i == high {
|
||||||
|
return dstL.n, srcL.p, ErrShortDst
|
||||||
|
}
|
||||||
|
if out.n != 0 {
|
||||||
|
i++
|
||||||
|
// If the Transformer at the next index is not able to process any
|
||||||
|
// source bytes there is nothing that can be done to make progress
|
||||||
|
// and the bytes will remain unprocessed. lastFull is used to
|
||||||
|
// detect this and break out of the loop with a fatal error.
|
||||||
|
lastFull = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// The destination buffer was too small, but is completely empty.
|
||||||
|
// Return a fatal error as this transformation can never complete.
|
||||||
|
c.fatalError(i, errShortInternal)
|
||||||
|
case ErrShortSrc:
|
||||||
|
if i == 0 {
|
||||||
|
// Save ErrShortSrc in err. All other errors take precedence.
|
||||||
|
err = ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Source bytes were depleted before filling up the destination buffer.
|
||||||
|
// Verify we made some progress, move the remaining bytes to the errStart
|
||||||
|
// and try to get more source bytes.
|
||||||
|
if needProgress && nSrc == 0 || in.n-in.p == len(in.b) {
|
||||||
|
// There were not enough source bytes to proceed while the source
|
||||||
|
// buffer cannot hold any more bytes. Return a fatal error as this
|
||||||
|
// transformation can never complete.
|
||||||
|
c.fatalError(i, errShortInternal)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// in.b is an internal buffer and we can make progress.
|
||||||
|
in.p, in.n = 0, copy(in.b, in.src())
|
||||||
|
fallthrough
|
||||||
|
case nil:
|
||||||
|
// if i == low, we have depleted the bytes at index i or any lower levels.
|
||||||
|
// In that case we increase low and i. In all other cases we decrease i to
|
||||||
|
// fetch more bytes before proceeding to the next index.
|
||||||
|
if i > low {
|
||||||
|
i--
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
c.fatalError(i, err0)
|
||||||
|
}
|
||||||
|
// Exhausted level low or fatal error: increase low and continue
|
||||||
|
// to process the bytes accepted so far.
|
||||||
|
i++
|
||||||
|
low = i
|
||||||
|
}
|
||||||
|
|
||||||
|
// If c.errStart > 0, this means we found a fatal error. We will clear
|
||||||
|
// all upstream buffers. At this point, no more progress can be made
|
||||||
|
// downstream, as Transform would have bailed while handling ErrShortDst.
|
||||||
|
if c.errStart > 0 {
|
||||||
|
for i := 1; i < c.errStart; i++ {
|
||||||
|
c.link[i].p, c.link[i].n = 0, 0
|
||||||
|
}
|
||||||
|
err, c.errStart, c.err = c.err, 0, nil
|
||||||
|
}
|
||||||
|
return dstL.n, srcL.p, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: use runes.Remove instead.
|
||||||
|
func RemoveFunc(f func(r rune) bool) Transformer {
|
||||||
|
return removeF(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type removeF func(r rune) bool
|
||||||
|
|
||||||
|
func (removeF) Reset() {}
|
||||||
|
|
||||||
|
// Transform implements the Transformer interface.
|
||||||
|
func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] {
|
||||||
|
|
||||||
|
if r = rune(src[0]); r < utf8.RuneSelf {
|
||||||
|
sz = 1
|
||||||
|
} else {
|
||||||
|
r, sz = utf8.DecodeRune(src)
|
||||||
|
|
||||||
|
if sz == 1 {
|
||||||
|
// Invalid rune.
|
||||||
|
if !atEOF && !utf8.FullRune(src) {
|
||||||
|
err = ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// We replace illegal bytes with RuneError. Not doing so might
|
||||||
|
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
|
||||||
|
// The resulting byte sequence may subsequently contain runes
|
||||||
|
// for which t(r) is true that were passed unnoticed.
|
||||||
|
if !t(r) {
|
||||||
|
if nDst+3 > len(dst) {
|
||||||
|
err = ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nDst += copy(dst[nDst:], "\uFFFD")
|
||||||
|
}
|
||||||
|
nSrc++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !t(r) {
|
||||||
|
if nDst+sz > len(dst) {
|
||||||
|
err = ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nDst += copy(dst[nDst:], src[:sz])
|
||||||
|
}
|
||||||
|
nSrc += sz
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// grow returns a new []byte that is longer than b, and copies the first n bytes
|
||||||
|
// of b to the start of the new slice.
|
||||||
|
func grow(b []byte, n int) []byte {
|
||||||
|
m := len(b)
|
||||||
|
if m <= 32 {
|
||||||
|
m = 64
|
||||||
|
} else if m <= 256 {
|
||||||
|
m *= 2
|
||||||
|
} else {
|
||||||
|
m += m >> 1
|
||||||
|
}
|
||||||
|
buf := make([]byte, m)
|
||||||
|
copy(buf, b[:n])
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
const initialBufSize = 128
|
||||||
|
|
||||||
|
// String returns a string with the result of converting s[:n] using t, where
|
||||||
|
// n <= len(s). If err == nil, n will be len(s). It calls Reset on t.
|
||||||
|
func String(t Transformer, s string) (result string, n int, err error) {
|
||||||
|
t.Reset()
|
||||||
|
if s == "" {
|
||||||
|
// Fast path for the common case for empty input. Results in about a
|
||||||
|
// 86% reduction of running time for BenchmarkStringLowerEmpty.
|
||||||
|
if _, _, err := t.Transform(nil, nil, true); err == nil {
|
||||||
|
return "", 0, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate only once. Note that both dst and src escape when passed to
|
||||||
|
// Transform.
|
||||||
|
buf := [2 * initialBufSize]byte{}
|
||||||
|
dst := buf[:initialBufSize:initialBufSize]
|
||||||
|
src := buf[initialBufSize : 2*initialBufSize]
|
||||||
|
|
||||||
|
// The input string s is transformed in multiple chunks (starting with a
|
||||||
|
// chunk size of initialBufSize). nDst and nSrc are per-chunk (or
|
||||||
|
// per-Transform-call) indexes, pDst and pSrc are overall indexes.
|
||||||
|
nDst, nSrc := 0, 0
|
||||||
|
pDst, pSrc := 0, 0
|
||||||
|
|
||||||
|
// pPrefix is the length of a common prefix: the first pPrefix bytes of the
|
||||||
|
// result will equal the first pPrefix bytes of s. It is not guaranteed to
|
||||||
|
// be the largest such value, but if pPrefix, len(result) and len(s) are
|
||||||
|
// all equal after the final transform (i.e. calling Transform with atEOF
|
||||||
|
// being true returned nil error) then we don't need to allocate a new
|
||||||
|
// result string.
|
||||||
|
pPrefix := 0
|
||||||
|
for {
|
||||||
|
// Invariant: pDst == pPrefix && pSrc == pPrefix.
|
||||||
|
|
||||||
|
n := copy(src, s[pSrc:])
|
||||||
|
nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s))
|
||||||
|
pDst += nDst
|
||||||
|
pSrc += nSrc
|
||||||
|
|
||||||
|
// TODO: let transformers implement an optional Spanner interface, akin
|
||||||
|
// to norm's QuickSpan. This would even allow us to avoid any allocation.
|
||||||
|
if !bytes.Equal(dst[:nDst], src[:nSrc]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
pPrefix = pSrc
|
||||||
|
if err == ErrShortDst {
|
||||||
|
// A buffer can only be short if a transformer modifies its input.
|
||||||
|
break
|
||||||
|
} else if err == ErrShortSrc {
|
||||||
|
if nSrc == 0 {
|
||||||
|
// No progress was made.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Equal so far and !atEOF, so continue checking.
|
||||||
|
} else if err != nil || pPrefix == len(s) {
|
||||||
|
return string(s[:pPrefix]), pPrefix, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc.
|
||||||
|
|
||||||
|
// We have transformed the first pSrc bytes of the input s to become pDst
|
||||||
|
// transformed bytes. Those transformed bytes are discontiguous: the first
|
||||||
|
// pPrefix of them equal s[:pPrefix] and the last nDst of them equal
|
||||||
|
// dst[:nDst]. We copy them around, into a new dst buffer if necessary, so
|
||||||
|
// that they become one contiguous slice: dst[:pDst].
|
||||||
|
if pPrefix != 0 {
|
||||||
|
newDst := dst
|
||||||
|
if pDst > len(newDst) {
|
||||||
|
newDst = make([]byte, len(s)+nDst-nSrc)
|
||||||
|
}
|
||||||
|
copy(newDst[pPrefix:pDst], dst[:nDst])
|
||||||
|
copy(newDst[:pPrefix], s[:pPrefix])
|
||||||
|
dst = newDst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent duplicate Transform calls with atEOF being true at the end of
|
||||||
|
// the input. Also return if we have an unrecoverable error.
|
||||||
|
if (err == nil && pSrc == len(s)) ||
|
||||||
|
(err != nil && err != ErrShortDst && err != ErrShortSrc) {
|
||||||
|
return string(dst[:pDst]), pSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transform the remaining input, growing dst and src buffers as necessary.
|
||||||
|
for {
|
||||||
|
n := copy(src, s[pSrc:])
|
||||||
|
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
|
||||||
|
pDst += nDst
|
||||||
|
pSrc += nSrc
|
||||||
|
|
||||||
|
// If we got ErrShortDst or ErrShortSrc, do not grow as long as we can
|
||||||
|
// make progress. This may avoid excessive allocations.
|
||||||
|
if err == ErrShortDst {
|
||||||
|
if nDst == 0 {
|
||||||
|
dst = grow(dst, pDst)
|
||||||
|
}
|
||||||
|
} else if err == ErrShortSrc {
|
||||||
|
if nSrc == 0 {
|
||||||
|
src = grow(src, 0)
|
||||||
|
}
|
||||||
|
} else if err != nil || pSrc == len(s) {
|
||||||
|
return string(dst[:pDst]), pSrc, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns a new byte slice with the result of converting b[:n] using t,
|
||||||
|
// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t.
|
||||||
|
func Bytes(t Transformer, b []byte) (result []byte, n int, err error) {
|
||||||
|
return doAppend(t, 0, make([]byte, len(b)), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append appends the result of converting src[:n] using t to dst, where
|
||||||
|
// n <= len(src), If err == nil, n will be len(src). It calls Reset on t.
|
||||||
|
func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) {
|
||||||
|
if len(dst) == cap(dst) {
|
||||||
|
n := len(src) + len(dst) // It is okay for this to be 0.
|
||||||
|
b := make([]byte, n)
|
||||||
|
dst = b[:copy(b, dst)]
|
||||||
|
}
|
||||||
|
return doAppend(t, len(dst), dst[:cap(dst)], src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) {
|
||||||
|
t.Reset()
|
||||||
|
pSrc := 0
|
||||||
|
for {
|
||||||
|
nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true)
|
||||||
|
pDst += nDst
|
||||||
|
pSrc += nSrc
|
||||||
|
if err != ErrShortDst {
|
||||||
|
return dst[:pDst], pSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grow the destination buffer, but do not grow as long as we can make
|
||||||
|
// progress. This may avoid excessive allocations.
|
||||||
|
if nDst == 0 {
|
||||||
|
dst = grow(dst, pDst)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
6
vendor/gopkg.in/ini.v1/.gitignore
generated
vendored
Normal file
6
vendor/gopkg.in/ini.v1/.gitignore
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
testdata/conf_out.ini
|
||||||
|
ini.sublime-project
|
||||||
|
ini.sublime-workspace
|
||||||
|
testdata/conf_reflect.ini
|
||||||
|
.idea
|
||||||
|
/.vscode
|
17
vendor/gopkg.in/ini.v1/.travis.yml
generated
vendored
Normal file
17
vendor/gopkg.in/ini.v1/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
sudo: false
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.6.x
|
||||||
|
- 1.7.x
|
||||||
|
- 1.8.x
|
||||||
|
- 1.9.x
|
||||||
|
- 1.10.x
|
||||||
|
- 1.11.x
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go get golang.org/x/tools/cmd/cover
|
||||||
|
- go get github.com/smartystreets/goconvey
|
||||||
|
- mkdir -p $HOME/gopath/src/gopkg.in
|
||||||
|
- ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1
|
||||||
|
- cd $HOME/gopath/src/gopkg.in/ini.v1
|
||||||
|
- go test -v -cover -race
|
191
vendor/gopkg.in/ini.v1/LICENSE
generated
vendored
Normal file
191
vendor/gopkg.in/ini.v1/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, and
|
||||||
|
distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||||
|
owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||||
|
that control, are controlled by, or are under common control with that entity.
|
||||||
|
For the purposes of this definition, "control" means (i) the power, direct or
|
||||||
|
indirect, to cause the direction or management of such entity, whether by
|
||||||
|
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||||
|
permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, including
|
||||||
|
but not limited to software source code, documentation source, and configuration
|
||||||
|
files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical transformation or
|
||||||
|
translation of a Source form, including but not limited to compiled object code,
|
||||||
|
generated documentation, and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||||
|
available under the License, as indicated by a copyright notice that is included
|
||||||
|
in or attached to the work (an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||||
|
is based on (or derived from) the Work and for which the editorial revisions,
|
||||||
|
annotations, elaborations, or other modifications represent, as a whole, an
|
||||||
|
original work of authorship. For the purposes of this License, Derivative Works
|
||||||
|
shall not include works that remain separable from, or merely link (or bind by
|
||||||
|
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including the original version
|
||||||
|
of the Work and any modifications or additions to that Work or Derivative Works
|
||||||
|
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||||
|
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||||
|
on behalf of the copyright owner. For the purposes of this definition,
|
||||||
|
"submitted" means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems, and
|
||||||
|
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||||
|
the purpose of discussing and improving the Work, but excluding communication
|
||||||
|
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||||
|
owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||||
|
of whom a Contribution has been received by Licensor and subsequently
|
||||||
|
incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||||
|
Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable (except as stated in this section) patent license to make, have
|
||||||
|
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||||
|
such license applies only to those patent claims licensable by such Contributor
|
||||||
|
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||||
|
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||||
|
submitted. If You institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||||
|
Contribution incorporated within the Work constitutes direct or contributory
|
||||||
|
patent infringement, then any patent licenses granted to You under this License
|
||||||
|
for that Work shall terminate as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution.
|
||||||
|
|
||||||
|
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||||
|
in any medium, with or without modifications, and in Source or Object form,
|
||||||
|
provided that You meet the following conditions:
|
||||||
|
|
||||||
|
You must give any other recipients of the Work or Derivative Works a copy of
|
||||||
|
this License; and
|
||||||
|
You must cause any modified files to carry prominent notices stating that You
|
||||||
|
changed the files; and
|
||||||
|
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||||
|
all copyright, patent, trademark, and attribution notices from the Source form
|
||||||
|
of the Work, excluding those notices that do not pertain to any part of the
|
||||||
|
Derivative Works; and
|
||||||
|
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||||
|
Derivative Works that You distribute must include a readable copy of the
|
||||||
|
attribution notices contained within such NOTICE file, excluding those notices
|
||||||
|
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||||
|
following places: within a NOTICE text file distributed as part of the
|
||||||
|
Derivative Works; within the Source form or documentation, if provided along
|
||||||
|
with the Derivative Works; or, within a display generated by the Derivative
|
||||||
|
Works, if and wherever such third-party notices normally appear. The contents of
|
||||||
|
the NOTICE file are for informational purposes only and do not modify the
|
||||||
|
License. You may add Your own attribution notices within Derivative Works that
|
||||||
|
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||||
|
provided that such additional attribution notices cannot be construed as
|
||||||
|
modifying the License.
|
||||||
|
You may add Your own copyright statement to Your modifications and may provide
|
||||||
|
additional or different license terms and conditions for use, reproduction, or
|
||||||
|
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||||
|
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||||
|
with the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions.
|
||||||
|
|
||||||
|
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||||
|
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||||
|
conditions of this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||||
|
any separate license agreement you may have executed with Licensor regarding
|
||||||
|
such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks.
|
||||||
|
|
||||||
|
This License does not grant permission to use the trade names, trademarks,
|
||||||
|
service marks, or product names of the Licensor, except as required for
|
||||||
|
reasonable and customary use in describing the origin of the Work and
|
||||||
|
reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||||
|
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||||
|
including, without limitation, any warranties or conditions of TITLE,
|
||||||
|
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||||
|
solely responsible for determining the appropriateness of using or
|
||||||
|
redistributing the Work and assume any risks associated with Your exercise of
|
||||||
|
permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability.
|
||||||
|
|
||||||
|
In no event and under no legal theory, whether in tort (including negligence),
|
||||||
|
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||||
|
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special, incidental,
|
||||||
|
or consequential damages of any character arising as a result of this License or
|
||||||
|
out of the use or inability to use the Work (including but not limited to
|
||||||
|
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||||
|
any and all other commercial damages or losses), even if such Contributor has
|
||||||
|
been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability.
|
||||||
|
|
||||||
|
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||||
|
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||||
|
other liability obligations and/or rights consistent with this License. However,
|
||||||
|
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||||
|
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||||
|
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason of your
|
||||||
|
accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following boilerplate
|
||||||
|
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||||
|
identifying information. (Don't include the brackets!) The text should be
|
||||||
|
enclosed in the appropriate comment syntax for the file format. We also
|
||||||
|
recommend that a file or class name and description of purpose be included on
|
||||||
|
the same "printed page" as the copyright notice for easier identification within
|
||||||
|
third-party archives.
|
||||||
|
|
||||||
|
Copyright 2014 Unknwon
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
15
vendor/gopkg.in/ini.v1/Makefile
generated
vendored
Normal file
15
vendor/gopkg.in/ini.v1/Makefile
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
.PHONY: build test bench vet coverage
|
||||||
|
|
||||||
|
build: vet bench
|
||||||
|
|
||||||
|
test:
|
||||||
|
go test -v -cover -race
|
||||||
|
|
||||||
|
bench:
|
||||||
|
go test -v -cover -race -test.bench=. -test.benchmem
|
||||||
|
|
||||||
|
vet:
|
||||||
|
go vet
|
||||||
|
|
||||||
|
coverage:
|
||||||
|
go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out
|
46
vendor/gopkg.in/ini.v1/README.md
generated
vendored
Normal file
46
vendor/gopkg.in/ini.v1/README.md
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg)](https://sourcegraph.com/github.com/go-ini/ini)
|
||||||
|
===
|
||||||
|
|
||||||
|
![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
|
||||||
|
|
||||||
|
Package ini provides INI file read and write functionality in Go.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Load from multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
|
||||||
|
- Read with recursion values.
|
||||||
|
- Read with parent-child sections.
|
||||||
|
- Read with auto-increment key names.
|
||||||
|
- Read with multiple-line values.
|
||||||
|
- Read with tons of helper methods.
|
||||||
|
- Read and convert values to Go types.
|
||||||
|
- Read and **WRITE** comments of sections and keys.
|
||||||
|
- Manipulate sections, keys and comments with ease.
|
||||||
|
- Keep sections and keys in order as you parse and save.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
The minimum requirement of Go is **1.6**.
|
||||||
|
|
||||||
|
To use a tagged revision:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ go get gopkg.in/ini.v1
|
||||||
|
```
|
||||||
|
|
||||||
|
To use with latest changes:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ go get github.com/go-ini/ini
|
||||||
|
```
|
||||||
|
|
||||||
|
Please add `-u` flag to update in the future.
|
||||||
|
|
||||||
|
## Getting Help
|
||||||
|
|
||||||
|
- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
|
||||||
|
- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user