Compare commits

...

5 Commits

Author SHA1 Message Date
13a7dc542f updated .drone.yml
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-16 13:21:56 +02:00
0ea1221164 updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-16 00:47:37 +02:00
b43a8b47e2 adding better search
All checks were successful
continuous-integration/drone/push Build is passing
2024-09-08 18:05:33 +02:00
08c6d20559 updated .drone.yml
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2024-09-03 20:33:39 +02:00
d26b7a5ebc updated .drone.yml
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/tag Build is failing
2024-09-03 20:09:09 +02:00
70 changed files with 6861 additions and 4137 deletions

View File

@ -1,14 +1,19 @@
---
kind: pipeline
type: docker
name: build-linux
name: build-linux-amd64
platform:
os: linux
arch: amd64
steps:
- name: build-linux-amd64
image: golang
- name: build
image: golang:1.23
commands:
- go build -o $PROJECTNAME $GOOPTIONS $SRCFILES
environment:
CGO_ENABLED: 1
GOOS: linux
GOARCH: amd64
GOOPTIONS: -mod=vendor
@ -18,58 +23,80 @@ steps:
event:
exclude:
- tag
- name: build-linux-arm64
image: golang
commands:
- go build -o $PROJECTNAME $GOOPTIONS $SRCFILES
environment:
GOOS: linux
GOARCH: arm64
GOOPTIONS: -mod=vendor
SRCFILES: cmd/qrz/*.go
PROJECTNAME: qrz
when:
event:
exclude:
- tag
---
kind: pipeline
type: docker
name: gitea-release-linux
steps:
- name: build-linux-amd64
image: golang
commands:
- go build -o $PROJECTNAME $GOOPTIONS $SRCFILES
- tar -czvf $PROJECTNAME-$DRONE_TAG-$GOOS-$GOARCH.tar.gz $PROJECTNAME
- echo $PROJECTNAME $DRONE_TAG > VERSION
environment:
GOOS: linux
GOARCH: amd64
GOOPTIONS: -mod=vendor
SRCFILES: cmd/qrz/*.go
PROJECTNAME: qrz
when:
event:
- tag
- name: build-linux-arm64
image: golang
commands:
- go build -o $PROJECTNAME $GOOPTIONS $SRCFILES
- tar -czvf $PROJECTNAME-$DRONE_TAG-$GOOS-$GOARCH.tar.gz $PROJECTNAME
- echo $PROJECTNAME $DRONE_TAG > VERSION
environment:
GOOS: linux
GOARCH: arm64
GOOPTIONS: -mod=vendor
SRCFILES: cmd/qrz/*.go
PROJECTNAME: qrz
when:
event:
- tag
- name: release
image: golang:1.23
commands:
- go build -o $PROJECTNAME $GOOPTIONS $SRCFILES
- tar -czvf $PROJECTNAME-$DRONE_TAG-$GOOS-$GOARCH.tar.gz $PROJECTNAME
- echo $PROJECTNAME $DRONE_TAG > VERSION
environment:
CGO_ENABLED: 1
GOOS: linux
GOARCH: amd64
GOOPTIONS: -mod=vendor
SRCFILES: cmd/qrz/*.go
PROJECTNAME: qrz
when:
event:
- tag
- name: publish
image: plugins/gitea-release
settings:
base_url: https://git.paulbsd.com
api_key:
from_secret: gitea_token
files: "*.tar.gz"
title: VERSION
when:
event:
- tag
---
kind: pipeline
type: docker
name: build-linux-arm64
platform:
os: linux
arch: arm64
steps:
- name: build
image: golang:1.23
commands:
- go build -o $PROJECTNAME $GOOPTIONS $SRCFILES
environment:
CGO_ENABLED: 1
GOOS: linux
GOARCH: arm64
GOOPTIONS: -mod=vendor
SRCFILES: cmd/qrz/*.go
PROJECTNAME: qrz
when:
event:
exclude:
- tag
- name: release
image: golang:1.23
commands:
- go build -o $PROJECTNAME $GOOPTIONS $SRCFILES
- tar -czvf $PROJECTNAME-$DRONE_TAG-$GOOS-$GOARCH.tar.gz $PROJECTNAME
- echo $PROJECTNAME $DRONE_TAG > VERSION
environment:
CGO_ENABLED: 1
GOOS: linux
GOARCH: arm64
GOOPTIONS: -mod=vendor
SRCFILES: cmd/qrz/*.go
PROJECTNAME: qrz
when:
event:
- tag
- name: publish
image: plugins/gitea-release
settings:
base_url: https://git.paulbsd.com

14
go.mod
View File

@ -3,20 +3,20 @@ module git.paulbsd.com/paulbsd/qrz
go 1.23
require (
github.com/antchfx/htmlquery v1.3.2
github.com/antchfx/xpath v1.3.1 // indirect
github.com/antchfx/htmlquery v1.3.3
github.com/antchfx/xpath v1.3.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/labstack/echo/v4 v4.12.0
github.com/lib/pq v1.10.9
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-sqlite3 v1.14.22
github.com/mattn/go-sqlite3 v1.14.24
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.23.0 // indirect
github.com/robfig/cron v1.2.0
golang.org/x/crypto v0.26.0 // indirect
golang.org/x/net v0.28.0
golang.org/x/sys v0.24.0 // indirect
golang.org/x/text v0.17.0 // indirect
golang.org/x/crypto v0.28.0 // indirect
golang.org/x/net v0.30.0
golang.org/x/sys v0.26.0 // indirect
golang.org/x/text v0.19.0 // indirect
gopkg.in/ini.v1 v1.67.0
xorm.io/builder v0.3.13 // indirect
xorm.io/xorm v1.3.9

14
go.sum
View File

@ -25,6 +25,8 @@ github.com/antchfx/htmlquery v1.3.1 h1:wm0LxjLMsZhRHfQKKZscDf2COyH4vDYA3wyH+qZ+Y
github.com/antchfx/htmlquery v1.3.1/go.mod h1:PTj+f1V2zksPlwNt7uVvZPsxpKNa7mlVliCRxLX6Nx8=
github.com/antchfx/htmlquery v1.3.2 h1:85YdttVkR1rAY+Oiv/nKI4FCimID+NXhDn82kz3mEvs=
github.com/antchfx/htmlquery v1.3.2/go.mod h1:1mbkcEgEarAokJiWhTfr4hR06w/q2ZZjnYLrDt6CTUk=
github.com/antchfx/htmlquery v1.3.3 h1:x6tVzrRhVNfECDaVxnZi1mEGrQg3mjE/rxbH2Pe6dNE=
github.com/antchfx/htmlquery v1.3.3/go.mod h1:WeU3N7/rL6mb6dCwtE30dURBnBieKDC/fR8t6X+cKjU=
github.com/antchfx/xpath v1.2.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
github.com/antchfx/xpath v1.2.4 h1:dW1HB/JxKvGtJ9WyVGJ0sIoEcqftV3SqIstujI+B9XY=
github.com/antchfx/xpath v1.2.4/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
@ -34,6 +36,8 @@ github.com/antchfx/xpath v1.3.0 h1:nTMlzGAK3IJ0bPpME2urTuFL76o4A96iYvoKFHRXJgc=
github.com/antchfx/xpath v1.3.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
github.com/antchfx/xpath v1.3.1 h1:PNbFuUqHwWl0xRjvUPjJ95Agbmdj2uzzIwmQKgu4oCk=
github.com/antchfx/xpath v1.3.1/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
github.com/antchfx/xpath v1.3.2 h1:LNjzlsSjinu3bQpw9hWMY9ocB80oLOWuQqFvO6xt51U=
github.com/antchfx/xpath v1.3.2/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@ -319,6 +323,8 @@ github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@ -518,6 +524,8 @@ golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@ -574,6 +582,8 @@ golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -646,6 +656,8 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -675,6 +687,8 @@ golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@ -262,9 +262,9 @@ func ToSlice(qrz Qrz) (out []string) {
type Qrz struct {
ID int `db:"id" xorm:"pk autoincr"`
QRZ string `db:"qrz" xorm:"index notnull"`
DMRID string `db:"dmrid" xorm:"notnull"`
DMRID string `db:"dmrid" xorm:"index notnull"`
Name string `db:"name" xorm:"index notnull"`
Address string `db:"address" xorm:"notnull"`
Address string `db:"address" xorm:"index notnull"`
City string `db:"city" xorm:"index notnull"`
Zipcode string `db:"zipcode" xorm:"index varchar(5) notnull"`
Dept string `db:"dept" xorm:"index notnull"`

View File

@ -226,9 +226,9 @@ func SetSearchLike(config config.Config, qrzdt *QrzDatatableInput) (searchstmt s
var searchstr string
switch config.DbType {
case "sqlite3":
searchstr = "%s LIKE '%%%s%%'"
searchstr = "%s LIKE '%s%%'"
case "mysql":
searchstr = "%s LIKE '%%%s%%'"
searchstr = "%s LIKE '%s%%'"
case "postgresql":
searchstr = "%s ~* '%s'"
default:

View File

@ -45,28 +45,9 @@ type builder struct {
// axisPredicate creates a predicate to predicating for this axis node.
func axisPredicate(root *axisNode) func(NodeNavigator) bool {
// get current axix node type.
typ := ElementNode
switch root.AxeType {
case "attribute":
typ = AttributeNode
case "self", "parent":
typ = allNode
default:
switch root.Prop {
case "comment":
typ = CommentNode
case "text":
typ = TextNode
// case "processing-instruction":
// typ = ProcessingInstructionNode
case "node":
typ = allNode
}
}
nametest := root.LocalName != "" || root.Prefix != ""
predicate := func(n NodeNavigator) bool {
if typ == n.NodeType() || typ == allNode {
if root.typeTest == n.NodeType() || root.typeTest == allNode {
if nametest {
type namespaceURL interface {
NamespaceURL() string
@ -102,39 +83,35 @@ func (b *builder) processAxis(root *axisNode, flags flag, props *builderProp) (q
*props = builderProps.None
} else {
inputFlags := flagsEnum.None
if root.AxeType == "child" && (root.Input.Type() == nodeAxis) {
if input := root.Input.(*axisNode); input.AxeType == "descendant-or-self" {
if (flags & flagsEnum.Filter) == 0 {
if root.AxisType == "child" && (root.Input.Type() == nodeAxis) {
if input := root.Input.(*axisNode); input.AxisType == "descendant-or-self" {
var qyGrandInput query
if input.Input != nil {
qyGrandInput, _ = b.processNode(input.Input, flagsEnum.SmartDesc, props)
qyGrandInput, err = b.processNode(input.Input, flagsEnum.SmartDesc, props)
if err != nil {
return nil, err
}
} else {
qyGrandInput = &contextQuery{}
}
// fix #20: https://github.com/antchfx/htmlquery/issues/20
filter := func(n NodeNavigator) bool {
v := predicate(n)
switch root.Prop {
case "text":
v = v && n.NodeType() == TextNode
case "comment":
v = v && n.NodeType() == CommentNode
}
return v
}
qyOutput = &descendantQuery{name: root.LocalName, Input: qyGrandInput, Predicate: filter, Self: false}
qyOutput = &descendantQuery{name: root.LocalName, Input: qyGrandInput, Predicate: predicate, Self: false}
*props |= builderProps.NonFlat
return qyOutput, nil
}
} else if ((flags & flagsEnum.Filter) == 0) && (root.AxeType == "descendant" || root.AxeType == "descendant-or-self") {
}
if root.AxisType == "descendant" || root.AxisType == "descendant-or-self" {
inputFlags |= flagsEnum.SmartDesc
}
}
qyInput, err = b.processNode(root.Input, inputFlags, props)
if err != nil {
return nil, err
}
}
switch root.AxeType {
switch root.AxisType {
case "ancestor":
qyOutput = &ancestorQuery{name: root.LocalName, Input: qyInput, Predicate: predicate}
*props |= builderProps.NonFlat
@ -144,22 +121,10 @@ func (b *builder) processAxis(root *axisNode, flags flag, props *builderProp) (q
case "attribute":
qyOutput = &attributeQuery{name: root.LocalName, Input: qyInput, Predicate: predicate}
case "child":
filter := func(n NodeNavigator) bool {
v := predicate(n)
switch root.Prop {
case "text":
v = v && n.NodeType() == TextNode
case "node":
v = v && (n.NodeType() == ElementNode || n.NodeType() == TextNode)
case "comment":
v = v && n.NodeType() == CommentNode
}
return v
}
if (*props & builderProps.NonFlat) == 0 {
qyOutput = &childQuery{name: root.LocalName, Input: qyInput, Predicate: filter}
qyOutput = &childQuery{name: root.LocalName, Input: qyInput, Predicate: predicate}
} else {
qyOutput = &cachedChildQuery{name: root.LocalName, Input: qyInput, Predicate: filter}
qyOutput = &cachedChildQuery{name: root.LocalName, Input: qyInput, Predicate: predicate}
}
case "descendant":
if (flags & flagsEnum.SmartDesc) != flagsEnum.None {
@ -192,7 +157,7 @@ func (b *builder) processAxis(root *axisNode, flags flag, props *builderProp) (q
case "namespace":
// haha,what will you do someting??
default:
err = fmt.Errorf("unknown axe type: %s", root.AxeType)
err = fmt.Errorf("unknown axe type: %s", root.AxisType)
return nil, err
}
return qyOutput, nil
@ -235,7 +200,6 @@ func (b *builder) processFilter(root *filterNode, flags flag, props *builderProp
*props |= builderProps.PosFilter
}
merge := (qyInput.Properties() & queryProps.Merge) != 0
if (propsCond & builderProps.HasPosition) != builderProps.None {
if (propsCond & builderProps.HasLast) != 0 {
// https://github.com/antchfx/xpath/issues/76
@ -243,16 +207,15 @@ func (b *builder) processFilter(root *filterNode, flags flag, props *builderProp
if qyFunc, ok := cond.(*functionQuery); ok {
switch qyFunc.Input.(type) {
case *filterQuery:
cond = &lastQuery{Input: qyFunc.Input}
cond = &lastFuncQuery{Input: qyFunc.Input}
}
}
}
}
merge := (qyInput.Properties() & queryProps.Merge) != 0
if first && firstInput != nil {
if merge && ((*props & builderProps.PosFilter) != 0) {
qyInput = &filterQuery{Input: qyInput, Predicate: cond, NoPosition: false}
var (
rootQuery = &contextQuery{}
parent query
@ -315,10 +278,11 @@ func (b *builder) processFilter(root *filterNode, flags flag, props *builderProp
}
}
b.firstInput = nil
child := &filterQuery{Input: qyInput, Predicate: cond, NoPosition: false}
if parent != nil {
return &mergeQuery{Input: parent, Child: qyInput}, nil
return &mergeQuery{Input: parent, Child: child}, nil
}
return qyInput, nil
return child, nil
}
b.firstInput = nil
}
@ -343,7 +307,7 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: arg, Func: lowerCaseFunc}
qyOutput = &functionQuery{Func: lowerCaseFunc(arg)}
case "starts-with":
arg1, err := b.processNode(root.Args[0], flagsEnum.None, props)
if err != nil {
@ -446,14 +410,17 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
}
qyOutput = &functionQuery{Func: stringLengthFunc(arg1)}
case "normalize-space":
if len(root.Args) == 0 {
return nil, errors.New("xpath: normalize-space function must have at least one parameter")
var arg node
if len(root.Args) > 0 {
arg = root.Args[0]
} else {
arg = newAxisNode("self", allNode, "", "", "", nil)
}
argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props)
arg1, err := b.processNode(arg, flagsEnum.None, props)
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: argQuery, Func: normalizespaceFunc}
qyOutput = &functionQuery{Func: normalizespaceFunc(arg1)}
case "replace":
//replace( string , string, string )
if len(root.Args) != 3 {
@ -500,7 +467,7 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: argQuery, Func: notFunc}
qyOutput = &functionQuery{Func: notFunc(argQuery)}
case "name", "local-name", "namespace-uri":
if len(root.Args) > 1 {
return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName)
@ -531,17 +498,10 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
},
}
case "last":
//switch typ := b.firstInput.(type) {
//case *groupQuery, *filterQuery:
// https://github.com/antchfx/xpath/issues/76
// https://github.com/antchfx/xpath/issues/78
//qyOutput = &lastQuery{Input: typ}
//default:
qyOutput = &functionQuery{Func: lastFunc}
//}
qyOutput = &functionQuery{Input: b.firstInput, Func: lastFunc()}
*props |= builderProps.HasLast
case "position":
qyOutput = &functionQuery{Func: positionFunc}
qyOutput = &functionQuery{Input: b.firstInput, Func: positionFunc()}
*props |= builderProps.HasPosition
case "boolean", "number", "string":
var inp query
@ -555,16 +515,14 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
}
inp = argQuery
}
f := &functionQuery{Input: inp}
switch root.FuncName {
case "boolean":
f.Func = booleanFunc
qyOutput = &functionQuery{Func: booleanFunc(inp)}
case "string":
f.Func = stringFunc
qyOutput = &functionQuery{Func: stringFunc(inp)}
case "number":
f.Func = numberFunc
qyOutput = &functionQuery{Func: numberFunc(inp)}
}
qyOutput = f
case "count":
if len(root.Args) == 0 {
return nil, fmt.Errorf("xpath: count(node-sets) function must with have parameters node-sets")
@ -573,7 +531,7 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: argQuery, Func: countFunc}
qyOutput = &functionQuery{Func: countFunc(argQuery)}
case "sum":
if len(root.Args) == 0 {
return nil, fmt.Errorf("xpath: sum(node-sets) function must with have parameters node-sets")
@ -582,7 +540,7 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: argQuery, Func: sumFunc}
qyOutput = &functionQuery{Func: sumFunc(argQuery)}
case "ceiling", "floor", "round":
if len(root.Args) == 0 {
return nil, fmt.Errorf("xpath: ceiling(node-sets) function must with have parameters node-sets")
@ -591,16 +549,14 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if err != nil {
return nil, err
}
f := &functionQuery{Input: argQuery}
switch root.FuncName {
case "ceiling":
f.Func = ceilingFunc
qyOutput = &functionQuery{Func: ceilingFunc(argQuery)}
case "floor":
f.Func = floorFunc
qyOutput = &functionQuery{Func: floorFunc(argQuery)}
case "round":
f.Func = roundFunc
qyOutput = &functionQuery{Func: roundFunc(argQuery)}
}
qyOutput = f
case "concat":
if len(root.Args) < 2 {
return nil, fmt.Errorf("xpath: concat() must have at least two arguments")
@ -627,7 +583,7 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if len(root.Args) != 2 {
return nil, fmt.Errorf("xpath: string-join(node-sets, separator) function requires node-set and argument")
}
argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props)
input, err := b.processNode(root.Args[0], flagsEnum.None, props)
if err != nil {
return nil, err
}
@ -635,14 +591,10 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: argQuery, Func: stringJoinFunc(arg1)}
qyOutput = &functionQuery{Func: stringJoinFunc(input, arg1)}
default:
return nil, fmt.Errorf("not yet support this function %s()", root.FuncName)
}
if funcQuery, ok := qyOutput.(*functionQuery); ok && funcQuery.Input == nil {
funcQuery.Input = b.firstInput
}
return qyOutput, nil
}

View File

@ -37,7 +37,8 @@ func predicate(q query) func(NodeNavigator) bool {
}
// positionFunc is a XPath Node Set functions position().
func positionFunc(q query, t iterator) interface{} {
func positionFunc() func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
var (
count = 1
node = t.Current().Copy()
@ -49,16 +50,18 @@ func positionFunc(q query, t iterator) interface{} {
}
}
return float64(count)
}
}
// lastFunc is a XPath Node Set functions last().
func lastFunc(q query, t iterator) interface{} {
func lastFunc() func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
var (
count = 0
node = t.Current().Copy()
)
node.MoveToFirst()
test := predicate(q)
node.MoveToFirst()
for {
if test(node) {
count++
@ -68,12 +71,14 @@ func lastFunc(q query, t iterator) interface{} {
}
}
return float64(count)
}
}
// countFunc is a XPath Node Set functions count(node-set).
func countFunc(q query, t iterator) interface{} {
func countFunc(arg query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} {
var count = 0
q = functionArgs(q)
q := functionArgs(arg)
test := predicate(q)
switch typ := q.Evaluate(t).(type) {
case query:
@ -84,12 +89,14 @@ func countFunc(q query, t iterator) interface{} {
}
}
return float64(count)
}
}
// sumFunc is a XPath Node Set functions sum(node-set).
func sumFunc(q query, t iterator) interface{} {
func sumFunc(arg query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} {
var sum float64
switch typ := functionArgs(q).Evaluate(t).(type) {
switch typ := functionArgs(arg).Evaluate(t).(type) {
case query:
for node := typ.Select(t); node != nil; node = typ.Select(t) {
if v, err := strconv.ParseFloat(node.Value(), 64); err == nil {
@ -106,6 +113,7 @@ func sumFunc(q query, t iterator) interface{} {
sum = v
}
return sum
}
}
func asNumber(t iterator, o interface{}) float64 {
@ -130,30 +138,36 @@ func asNumber(t iterator, o interface{}) float64 {
}
// ceilingFunc is a XPath Node Set functions ceiling(node-set).
func ceilingFunc(q query, t iterator) interface{} {
val := asNumber(t, functionArgs(q).Evaluate(t))
func ceilingFunc(arg query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} {
val := asNumber(t, functionArgs(arg).Evaluate(t))
// if math.IsNaN(val) {
// panic(errors.New("ceiling() function argument type must be a valid number"))
// }
return math.Ceil(val)
}
}
// floorFunc is a XPath Node Set functions floor(node-set).
func floorFunc(q query, t iterator) interface{} {
val := asNumber(t, functionArgs(q).Evaluate(t))
func floorFunc(arg query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} {
val := asNumber(t, functionArgs(arg).Evaluate(t))
return math.Floor(val)
}
}
// roundFunc is a XPath Node Set functions round(node-set).
func roundFunc(q query, t iterator) interface{} {
val := asNumber(t, functionArgs(q).Evaluate(t))
func roundFunc(arg query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} {
val := asNumber(t, functionArgs(arg).Evaluate(t))
//return math.Round(val)
return round(val)
}
}
// nameFunc is a XPath functions name([node-set]).
func nameFunc(arg query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
var v NodeNavigator
if arg == nil {
v = t.Current()
@ -173,7 +187,7 @@ func nameFunc(arg query) func(query, iterator) interface{} {
// localNameFunc is a XPath functions local-name([node-set]).
func localNameFunc(arg query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
var v NodeNavigator
if arg == nil {
v = t.Current()
@ -189,7 +203,7 @@ func localNameFunc(arg query) func(query, iterator) interface{} {
// namespaceFunc is a XPath functions namespace-uri([node-set]).
func namespaceFunc(arg query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
var v NodeNavigator
if arg == nil {
v = t.Current()
@ -256,26 +270,32 @@ func asString(t iterator, v interface{}) string {
}
// booleanFunc is a XPath functions boolean([node-set]).
func booleanFunc(q query, t iterator) interface{} {
v := functionArgs(q).Evaluate(t)
func booleanFunc(arg1 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} {
v := functionArgs(arg1).Evaluate(t)
return asBool(t, v)
}
}
// numberFunc is a XPath functions number([node-set]).
func numberFunc(q query, t iterator) interface{} {
v := functionArgs(q).Evaluate(t)
func numberFunc(arg1 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} {
v := functionArgs(arg1).Evaluate(t)
return asNumber(t, v)
}
}
// stringFunc is a XPath functions string([node-set]).
func stringFunc(q query, t iterator) interface{} {
v := functionArgs(q).Evaluate(t)
func stringFunc(arg1 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} {
v := functionArgs(arg1).Evaluate(t)
return asString(t, v)
}
}
// startwithFunc is a XPath functions starts-with(string, string).
func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
var (
m, n string
ok bool
@ -302,7 +322,7 @@ func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
// endwithFunc is a XPath functions ends-with(string, string).
func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
var (
m, n string
ok bool
@ -329,7 +349,7 @@ func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
// containsFunc is a XPath functions contains(string or @attr, string).
func containsFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
var (
m, n string
ok bool
@ -360,7 +380,7 @@ func containsFunc(arg1, arg2 query) func(query, iterator) interface{} {
// Note: does not support https://www.w3.org/TR/xpath-functions-31/#func-matches 3rd optional `flags` argument; if
// needed, directly put flags in the regexp pattern, such as `(?i)^pattern$` for `i` flag.
func matchesFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
var s string
switch typ := functionArgs(arg1).Evaluate(t).(type) {
case string:
@ -386,9 +406,10 @@ func matchesFunc(arg1, arg2 query) func(query, iterator) interface{} {
}
// normalizespaceFunc is XPath functions normalize-space(string?)
func normalizespaceFunc(q query, t iterator) interface{} {
func normalizespaceFunc(arg1 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} {
var m string
switch typ := functionArgs(q).Evaluate(t).(type) {
switch typ := functionArgs(arg1).Evaluate(t).(type) {
case string:
m = typ
case query:
@ -418,11 +439,12 @@ func normalizespaceFunc(q query, t iterator) interface{} {
builderPool.Put(b)
return result
}
}
// substringFunc is XPath functions substring function returns a part of a given string.
func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
var m string
switch typ := functionArgs(arg1).Evaluate(t).(type) {
case string:
@ -461,7 +483,7 @@ func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
// substringIndFunc is XPath functions substring-before/substring-after function returns a part of a given string.
func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
var str string
switch v := functionArgs(arg1).Evaluate(t).(type) {
case string:
@ -502,7 +524,7 @@ func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interf
// stringLengthFunc is XPATH string-length( [string] ) function that returns a number
// equal to the number of characters in a given string.
func stringLengthFunc(arg1 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
switch v := functionArgs(arg1).Evaluate(t).(type) {
case string:
return float64(len(v))
@ -519,7 +541,7 @@ func stringLengthFunc(arg1 query) func(query, iterator) interface{} {
// translateFunc is XPath functions translate() function returns a replaced string.
func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
str := asString(t, functionArgs(arg1).Evaluate(t))
src := asString(t, functionArgs(arg2).Evaluate(t))
dst := asString(t, functionArgs(arg3).Evaluate(t))
@ -538,7 +560,7 @@ func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
// replaceFunc is XPath functions replace() function returns a replaced string.
func replaceFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
str := asString(t, functionArgs(arg1).Evaluate(t))
src := asString(t, functionArgs(arg2).Evaluate(t))
dst := asString(t, functionArgs(arg3).Evaluate(t))
@ -548,8 +570,9 @@ func replaceFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
}
// notFunc is XPATH functions not(expression) function operation.
func notFunc(q query, t iterator) interface{} {
switch v := functionArgs(q).Evaluate(t).(type) {
func notFunc(arg1 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} {
switch v := functionArgs(arg1).Evaluate(t).(type) {
case bool:
return !v
case query:
@ -558,13 +581,14 @@ func notFunc(q query, t iterator) interface{} {
default:
return false
}
}
}
// concatFunc is the concat function concatenates two or more
// strings and returns the resulting string.
// concat( string1 , string2 [, stringn]* )
func concatFunc(args ...query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} {
b := builderPool.Get().(stringBuilder)
for _, v := range args {
v = functionArgs(v)
@ -616,8 +640,8 @@ func reverseFunc(q query, t iterator) func() NodeNavigator {
}
// string-join is a XPath Node Set functions string-join(node-set, separator).
func stringJoinFunc(arg1 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
func stringJoinFunc(q, arg1 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} {
var separator string
switch v := functionArgs(arg1).Evaluate(t).(type) {
case string:
@ -647,7 +671,9 @@ func stringJoinFunc(arg1 query) func(query, iterator) interface{} {
}
// lower-case is XPATH function that converts a string to lower case.
func lowerCaseFunc(q query, t iterator) interface{} {
v := functionArgs(q).Evaluate(t)
func lowerCaseFunc(arg1 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} {
v := functionArgs(arg1).Evaluate(t)
return strings.ToLower(asString(t, v))
}
}

View File

@ -6,6 +6,7 @@ import (
"fmt"
"strconv"
"unicode"
"unicode/utf8"
)
// A XPath expression token type.
@ -85,12 +86,13 @@ func newOperandNode(v interface{}) node {
}
// newAxisNode returns new axis node AxisNode.
func newAxisNode(axeTyp, localName, prefix, prop string, n node, opts ...func(p *axisNode)) node {
func newAxisNode(axisType string, typeTest NodeType, localName, prefix, prop string, n node, opts ...func(p *axisNode)) node {
a := axisNode{
nodeType: nodeAxis,
typeTest: typeTest,
LocalName: localName,
Prefix: prefix,
AxeType: axeTyp,
AxisType: axisType,
Prop: prop,
Input: n,
}
@ -228,6 +230,7 @@ Loop:
}
// RelationalExpr ::= AdditiveExpr | RelationalExpr '<' AdditiveExpr | RelationalExpr '>' AdditiveExpr
//
// | RelationalExpr '<=' AdditiveExpr
// | RelationalExpr '>=' AdditiveExpr
func (p *parser) parseRelationalExpr(n node) node {
@ -274,6 +277,7 @@ Loop:
}
// MultiplicativeExpr ::= UnaryExpr | MultiplicativeExpr MultiplyOperator(*) UnaryExpr
//
// | MultiplicativeExpr 'div' UnaryExpr | MultiplicativeExpr 'mod' UnaryExpr
func (p *parser) parseMultiplicativeExpr(n node) node {
opnd := p.parseUnaryExpr(n)
@ -335,7 +339,7 @@ func (p *parser) parsePathExpr(n node) node {
opnd = p.parseRelativeLocationPath(opnd)
case itemSlashSlash:
p.next()
opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", "", "", "", opnd))
opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", allNode, "", "", "", opnd))
}
} else {
opnd = p.parseLocationPath(nil)
@ -372,7 +376,7 @@ func (p *parser) parseLocationPath(n node) (opnd node) {
case itemSlashSlash:
p.next()
opnd = newRootNode("//")
opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", "", "", "", opnd))
opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", allNode, "", "", "", opnd))
default:
opnd = p.parseRelativeLocationPath(n)
}
@ -388,7 +392,7 @@ Loop:
switch p.r.typ {
case itemSlashSlash:
p.next()
opnd = newAxisNode("descendant-or-self", "", "", "", opnd)
opnd = newAxisNode("descendant-or-self", allNode, "", "", "", opnd)
case itemSlash:
p.next()
default:
@ -400,30 +404,33 @@ Loop:
// Step ::= AxisSpecifier NodeTest Predicate* | AbbreviatedStep
func (p *parser) parseStep(n node) (opnd node) {
axeTyp := "child" // default axes value.
if p.r.typ == itemDot || p.r.typ == itemDotDot {
if p.r.typ == itemDot {
axeTyp = "self"
opnd = newAxisNode("self", allNode, "", "", "", n)
} else {
axeTyp = "parent"
opnd = newAxisNode("parent", allNode, "", "", "", n)
}
p.next()
opnd = newAxisNode(axeTyp, "", "", "", n)
if p.r.typ != itemLBracket {
return opnd
}
} else {
axisType := "child" // default axes value.
switch p.r.typ {
case itemAt:
axisType = "attribute"
p.next()
axeTyp = "attribute"
case itemAxe:
axeTyp = p.r.name
axisType = p.r.name
p.next()
case itemLParens:
return p.parseSequence(n)
}
opnd = p.parseNodeTest(n, axeTyp)
matchType := ElementNode
if axisType == "attribute" {
matchType = AttributeNode
}
opnd = p.parseNodeTest(n, axisType, matchType)
}
for p.r.typ == itemLBracket {
opnd = newFilterNode(opnd, p.parsePredicate(opnd))
@ -448,7 +455,7 @@ func (p *parser) parseSequence(n node) (opnd node) {
}
// NodeTest ::= NameTest | nodeType '(' ')' | 'processing-instruction' '(' Literal ')'
func (p *parser) parseNodeTest(n node, axeTyp string) (opnd node) {
func (p *parser) parseNodeTest(n node, axeTyp string, matchType NodeType) (opnd node) {
switch p.r.typ {
case itemName:
if p.r.canBeFunc && isNodeType(p.r) {
@ -466,7 +473,19 @@ func (p *parser) parseNodeTest(n node, axeTyp string) (opnd node) {
p.next()
}
p.skipItem(itemRParens)
opnd = newAxisNode(axeTyp, name, "", prop, n)
switch prop {
case "comment":
matchType = CommentNode
case "text":
matchType = TextNode
case "processing-instruction":
case "node":
matchType = allNode
default:
matchType = RootNode
}
opnd = newAxisNode(axeTyp, matchType, name, "", prop, n)
} else {
prefix := p.r.prefix
name := p.r.name
@ -474,7 +493,7 @@ func (p *parser) parseNodeTest(n node, axeTyp string) (opnd node) {
if p.r.name == "*" {
name = ""
}
opnd = newAxisNode(axeTyp, name, prefix, "", n, func(a *axisNode) {
opnd = newAxisNode(axeTyp, matchType, name, prefix, "", n, func(a *axisNode) {
if prefix != "" && p.namespaces != nil {
if ns, ok := p.namespaces[prefix]; ok {
a.hasNamespaceURI = true
@ -486,7 +505,7 @@ func (p *parser) parseNodeTest(n node, axeTyp string) (opnd node) {
})
}
case itemStar:
opnd = newAxisNode(axeTyp, "", "", "", n)
opnd = newAxisNode(axeTyp, matchType, "", "", "", n)
p.next()
default:
panic("expression must evaluate to a node-set")
@ -579,17 +598,18 @@ type axisNode struct {
nodeType
Input node
Prop string // node-test name.[comment|text|processing-instruction|node]
AxeType string // name of the axes.[attribute|ancestor|child|....]
AxisType string // name of the axis.[attribute|ancestor|child|....]
LocalName string // local part name of node.
Prefix string // prefix name of node.
namespaceURI string // namespace URI of node
hasNamespaceURI bool // if namespace URI is set (can be "")
typeTest NodeType
}
func (a *axisNode) String() string {
var b bytes.Buffer
if a.AxeType != "" {
b.Write([]byte(a.AxeType + "::"))
if a.AxisType != "" {
b.Write([]byte(a.AxisType + "::"))
}
if a.Prefix != "" {
b.Write([]byte(a.Prefix + ":"))
@ -672,6 +692,7 @@ type scanner struct {
pos int
curr rune
currSize int
typ itemType
strval string // text value at current pos
numval float64 // number value at current pos
@ -681,10 +702,18 @@ type scanner struct {
func (s *scanner) nextChar() bool {
if s.pos >= len(s.text) {
s.curr = rune(0)
s.currSize = 1
return false
}
s.curr = rune(s.text[s.pos])
s.pos++
r, size := rune(s.text[s.pos]), 1
if r >= 0x80 { // handle multi-byte runes
r, size = utf8.DecodeRuneInString(s.text[s.pos:])
}
s.curr = r
s.currSize = size
s.pos += size
return true
}
@ -843,12 +872,15 @@ func (s *scanner) scanString() string {
end = s.curr
)
s.nextChar()
i := s.pos - 1
i := s.pos - s.currSize
if s.currSize > 1 {
c++
}
for s.curr != end {
if !s.nextChar() {
panic(errors.New("xpath: scanString got unclosed string"))
}
c++
c += s.currSize
}
s.nextChar()
return s.text[i : i+c]
@ -856,14 +888,18 @@ func (s *scanner) scanString() string {
func (s *scanner) scanName() string {
var (
c int
i = s.pos - 1
c = s.currSize - 1
i = s.pos - s.currSize
)
// Detect current rune size
for isName(s.curr) {
c++
if !s.nextChar() {
c += s.currSize
break
}
c += s.currSize
}
return s.text[i : i+c]
}

View File

@ -850,6 +850,9 @@ func (f *functionQuery) Evaluate(t iterator) interface{} {
}
func (f *functionQuery) Clone() query {
if f.Input == nil {
return &functionQuery{Func: f.Func}
}
return &functionQuery{Input: f.Input.Clone(), Func: f.Func}
}
@ -1187,18 +1190,18 @@ func (u *unionQuery) Properties() queryProp {
return queryProps.Merge
}
type lastQuery struct {
type lastFuncQuery struct {
buffer []NodeNavigator
counted bool
Input query
}
func (q *lastQuery) Select(t iterator) NodeNavigator {
func (q *lastFuncQuery) Select(t iterator) NodeNavigator {
return nil
}
func (q *lastQuery) Evaluate(t iterator) interface{} {
func (q *lastFuncQuery) Evaluate(t iterator) interface{} {
if !q.counted {
for {
node := q.Input.Select(t)
@ -1212,15 +1215,15 @@ func (q *lastQuery) Evaluate(t iterator) interface{} {
return float64(len(q.buffer))
}
func (q *lastQuery) Clone() query {
return &lastQuery{Input: q.Input.Clone()}
func (q *lastFuncQuery) Clone() query {
return &lastFuncQuery{Input: q.Input.Clone()}
}
func (q *lastQuery) ValueType() resultType {
func (q *lastFuncQuery) ValueType() resultType {
return xpathResultType.Number
}
func (q *lastQuery) Properties() queryProp {
func (q *lastFuncQuery) Properties() queryProp {
return queryProps.Merge
}

File diff suppressed because it is too large Load Diff

View File

@ -147,9 +147,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
#define SQLITE_VERSION "3.45.1"
#define SQLITE_VERSION_NUMBER 3045001
#define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a"
#define SQLITE_VERSION "3.46.1"
#define SQLITE_VERSION_NUMBER 3046001
#define SQLITE_SOURCE_ID "2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33"
/*
** CAPI3REF: Run-Time Library Version Numbers
@ -421,6 +421,8 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**);
** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running.
** <li> The application must not modify the SQL statement text passed into
** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running.
** <li> The application must not dereference the arrays or string pointers
** passed as the 3rd and 4th callback parameters after it returns.
** </ul>
*/
SQLITE_API int sqlite3_exec(
@ -763,11 +765,11 @@ struct sqlite3_file {
** </ul>
** xLock() upgrades the database file lock. In other words, xLock() moves the
** database file lock in the direction NONE toward EXCLUSIVE. The argument to
** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never
** xLock() is always one of SHARED, RESERVED, PENDING, or EXCLUSIVE, never
** SQLITE_LOCK_NONE. If the database file lock is already at or above the
** requested lock, then the call to xLock() is a no-op.
** xUnlock() downgrades the database file lock to either SHARED or NONE.
* If the lock is already at or below the requested lock state, then the call
** If the lock is already at or below the requested lock state, then the call
** to xUnlock() is a no-op.
** The xCheckReservedLock() method checks whether any database connection,
** either in this process or in some other process, is holding a RESERVED,
@ -2142,6 +2144,22 @@ struct sqlite3_mem_methods {
** configuration setting is never used, then the default maximum is determined
** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that
** compile-time option is not set, then the default maximum is 1073741824.
**
** [[SQLITE_CONFIG_ROWID_IN_VIEW]]
** <dt>SQLITE_CONFIG_ROWID_IN_VIEW
** <dd>The SQLITE_CONFIG_ROWID_IN_VIEW option enables or disables the ability
** for VIEWs to have a ROWID. The capability can only be enabled if SQLite is
** compiled with -DSQLITE_ALLOW_ROWID_IN_VIEW, in which case the capability
** defaults to on. This configuration option queries the current setting or
** changes the setting to off or on. The argument is a pointer to an integer.
** If that integer initially holds a value of 1, then the ability for VIEWs to
** have ROWIDs is activated. If the integer initially holds zero, then the
** ability is deactivated. Any other initial value for the integer leaves the
** setting unchanged. After changes, if any, the integer is written with
** a 1 or 0, if the ability for VIEWs to have ROWIDs is on or off. If SQLite
** is compiled without -DSQLITE_ALLOW_ROWID_IN_VIEW (which is the usual and
** recommended case) then the integer is always filled with zero, regardless
** if its initial value.
** </dl>
*/
#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */
@ -2173,6 +2191,7 @@ struct sqlite3_mem_methods {
#define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */
#define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */
#define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */
#define SQLITE_CONFIG_ROWID_IN_VIEW 30 /* int* */
/*
** CAPI3REF: Database Connection Configuration Options
@ -3287,8 +3306,8 @@ SQLITE_API int sqlite3_set_authorizer(
#define SQLITE_RECURSIVE 33 /* NULL NULL */
/*
** CAPI3REF: Tracing And Profiling Functions
** METHOD: sqlite3
** CAPI3REF: Deprecated Tracing And Profiling Functions
** DEPRECATED
**
** These routines are deprecated. Use the [sqlite3_trace_v2()] interface
** instead of the routines described here.
@ -6869,6 +6888,12 @@ SQLITE_API int sqlite3_autovacuum_pages(
** The exceptions defined in this paragraph might change in a future
** release of SQLite.
**
** Whether the update hook is invoked before or after the
** corresponding change is currently unspecified and may differ
** depending on the type of change. Do not rely on the order of the
** hook call with regards to the final result of the operation which
** triggers the hook.
**
** The update hook implementation must not do anything that will modify
** the database connection that invoked the update hook. Any actions
** to modify the database connection must be deferred until after the
@ -8339,7 +8364,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
** The sqlite3_keyword_count() interface returns the number of distinct
** keywords understood by SQLite.
**
** The sqlite3_keyword_name(N,Z,L) interface finds the N-th keyword and
** The sqlite3_keyword_name(N,Z,L) interface finds the 0-based N-th keyword and
** makes *Z point to that keyword expressed as UTF8 and writes the number
** of bytes in the keyword into *L. The string that *Z points to is not
** zero-terminated. The sqlite3_keyword_name(N,Z,L) routine returns
@ -9918,24 +9943,45 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int);
** <li value="2"><p>
** ^(If the sqlite3_vtab_distinct() interface returns 2, that means
** that the query planner does not need the rows returned in any particular
** order, as long as rows with the same values in all "aOrderBy" columns
** are adjacent.)^ ^(Furthermore, only a single row for each particular
** combination of values in the columns identified by the "aOrderBy" field
** needs to be returned.)^ ^It is always ok for two or more rows with the same
** values in all "aOrderBy" columns to be returned, as long as all such rows
** are adjacent. ^The virtual table may, if it chooses, omit extra rows
** that have the same value for all columns identified by "aOrderBy".
** ^However omitting the extra rows is optional.
** order, as long as rows with the same values in all columns identified
** by "aOrderBy" are adjacent.)^ ^(Furthermore, when two or more rows
** contain the same values for all columns identified by "colUsed", all but
** one such row may optionally be omitted from the result.)^
** The virtual table is not required to omit rows that are duplicates
** over the "colUsed" columns, but if the virtual table can do that without
** too much extra effort, it could potentially help the query to run faster.
** This mode is used for a DISTINCT query.
** <li value="3"><p>
** ^(If the sqlite3_vtab_distinct() interface returns 3, that means
** that the query planner needs only distinct rows but it does need the
** rows to be sorted.)^ ^The virtual table implementation is free to omit
** rows that are identical in all aOrderBy columns, if it wants to, but
** it is not required to omit any rows. This mode is used for queries
** ^(If the sqlite3_vtab_distinct() interface returns 3, that means the
** virtual table must return rows in the order defined by "aOrderBy" as
** if the sqlite3_vtab_distinct() interface had returned 0. However if
** two or more rows in the result have the same values for all columns
** identified by "colUsed", then all but one such row may optionally be
** omitted.)^ Like when the return value is 2, the virtual table
** is not required to omit rows that are duplicates over the "colUsed"
** columns, but if the virtual table can do that without
** too much extra effort, it could potentially help the query to run faster.
** This mode is used for queries
** that have both DISTINCT and ORDER BY clauses.
** </ol>
**
** <p>The following table summarizes the conditions under which the
** virtual table is allowed to set the "orderByConsumed" flag based on
** the value returned by sqlite3_vtab_distinct(). This table is a
** restatement of the previous four paragraphs:
**
** <table border=1 cellspacing=0 cellpadding=10 width="90%">
** <tr>
** <td valign="top">sqlite3_vtab_distinct() return value
** <td valign="top">Rows are returned in aOrderBy order
** <td valign="top">Rows with the same value in all aOrderBy columns are adjacent
** <td valign="top">Duplicates over all colUsed columns may be omitted
** <tr><td>0<td>yes<td>yes<td>no
** <tr><td>1<td>no<td>yes<td>no
** <tr><td>2<td>no<td>yes<td>yes
** <tr><td>3<td>yes<td>yes<td>yes
** </table>
**
** ^For the purposes of comparing virtual table output values to see if the
** values are same value for sorting purposes, two NULL values are considered
** to be the same. In other words, the comparison operator is "IS"
@ -11980,6 +12026,30 @@ SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const c
*/
SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);
/*
** CAPI3REF: Add A Single Change To A Changegroup
** METHOD: sqlite3_changegroup
**
** This function adds the single change currently indicated by the iterator
** passed as the second argument to the changegroup object. The rules for
** adding the change are just as described for [sqlite3changegroup_add()].
**
** If the change is successfully added to the changegroup, SQLITE_OK is
** returned. Otherwise, an SQLite error code is returned.
**
** The iterator must point to a valid entry when this function is called.
** If it does not, SQLITE_ERROR is returned and no change is added to the
** changegroup. Additionally, the iterator must not have been opened with
** the SQLITE_CHANGESETAPPLY_INVERT flag. In this case SQLITE_ERROR is also
** returned.
*/
SQLITE_API int sqlite3changegroup_add_change(
sqlite3_changegroup*,
sqlite3_changeset_iter*
);
/*
** CAPI3REF: Obtain A Composite Changeset From A Changegroup
** METHOD: sqlite3_changegroup
@ -12784,8 +12854,8 @@ struct Fts5PhraseIter {
** EXTENSION API FUNCTIONS
**
** xUserData(pFts):
** Return a copy of the context pointer the extension function was
** registered with.
** Return a copy of the pUserData pointer passed to the xCreateFunction()
** API when the extension function was registered.
**
** xColumnTotalSize(pFts, iCol, pnToken):
** If parameter iCol is less than zero, set output variable *pnToken

View File

@ -1679,7 +1679,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
}
}
// Forgein Keys
// Foreign Keys
if foreignKeys > -1 {
if err := exec(fmt.Sprintf("PRAGMA foreign_keys = %d;", foreignKeys)); err != nil {
C.sqlite3_close_v2(db)

View File

@ -18,5 +18,6 @@ package sqlite3
#cgo openbsd LDFLAGS: -lsqlite3
#cgo solaris LDFLAGS: -lsqlite3
#cgo windows LDFLAGS: -lsqlite3
#cgo zos LDFLAGS: -lsqlite3
*/
import "C"

View File

@ -86,7 +86,7 @@ var (
// combination is incorrect or unknown.
//
// If the SQLITE_USER table is not present in the database file, then
// this interface is a harmless no-op returnning SQLITE_OK.
// this interface is a harmless no-op returning SQLITE_OK.
func (c *SQLiteConn) Authenticate(username, password string) error {
rv := c.authenticate(username, password)
switch rv {

122
vendor/golang.org/x/net/http2/config.go generated vendored Normal file
View File

@ -0,0 +1,122 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"math"
"net/http"
"time"
)
// http2Config is a package-internal version of net/http.HTTP2Config.
//
// http.HTTP2Config was added in Go 1.24.
// When running with a version of net/http that includes HTTP2Config,
// we merge the configuration with the fields in Transport or Server
// to produce an http2Config.
//
// Zero valued fields in http2Config are interpreted as in the
// net/http.HTTPConfig documentation.
//
// Precedence order for reconciling configurations is:
//
// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero.
// - Otherwise use the http2.{Server.Transport} value.
// - If the resulting value is zero or out of range, use a default.
type http2Config struct {
MaxConcurrentStreams uint32
MaxDecoderHeaderTableSize uint32
MaxEncoderHeaderTableSize uint32
MaxReadFrameSize uint32
MaxUploadBufferPerConnection int32
MaxUploadBufferPerStream int32
SendPingTimeout time.Duration
PingTimeout time.Duration
WriteByteTimeout time.Duration
PermitProhibitedCipherSuites bool
CountError func(errType string)
}
// configFromServer merges configuration settings from
// net/http.Server.HTTP2Config and http2.Server.
func configFromServer(h1 *http.Server, h2 *Server) http2Config {
conf := http2Config{
MaxConcurrentStreams: h2.MaxConcurrentStreams,
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
MaxReadFrameSize: h2.MaxReadFrameSize,
MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection,
MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream,
SendPingTimeout: h2.ReadIdleTimeout,
PingTimeout: h2.PingTimeout,
WriteByteTimeout: h2.WriteByteTimeout,
PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
CountError: h2.CountError,
}
fillNetHTTPServerConfig(&conf, h1)
setConfigDefaults(&conf, true)
return conf
}
// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
// (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
MaxReadFrameSize: h2.MaxReadFrameSize,
SendPingTimeout: h2.ReadIdleTimeout,
PingTimeout: h2.PingTimeout,
WriteByteTimeout: h2.WriteByteTimeout,
}
// Unlike most config fields, where out-of-range values revert to the default,
// Transport.MaxReadFrameSize clips.
if conf.MaxReadFrameSize < minMaxFrameSize {
conf.MaxReadFrameSize = minMaxFrameSize
} else if conf.MaxReadFrameSize > maxFrameSize {
conf.MaxReadFrameSize = maxFrameSize
}
if h2.t1 != nil {
fillNetHTTPTransportConfig(&conf, h2.t1)
}
setConfigDefaults(&conf, false)
return conf
}
func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) {
if *v < minval || *v > maxval {
*v = defval
}
}
func setConfigDefaults(conf *http2Config, server bool) {
setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams)
setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
if server {
setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20)
} else {
setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow)
}
if server {
setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20)
} else {
setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow)
}
setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize)
setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second)
}
// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header
// to an HTTP/2 MAX_HEADER_LIST_SIZE value.
func adjustHTTP1MaxHeaderSize(n int64) int64 {
// http2's count is in a slightly different unit and includes 32 bytes per pair.
// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
const perFieldOverhead = 32 // per http2 spec
const typicalHeaders = 10 // conservative
return n + typicalHeaders*perFieldOverhead
}

61
vendor/golang.org/x/net/http2/config_go124.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.24
package http2
import "net/http"
// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
fillNetHTTPConfig(conf, srv.HTTP2)
}
// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
fillNetHTTPConfig(conf, tr.HTTP2)
}
func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
if h2 == nil {
return
}
if h2.MaxConcurrentStreams != 0 {
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
}
if h2.MaxEncoderHeaderTableSize != 0 {
conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
}
if h2.MaxDecoderHeaderTableSize != 0 {
conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
}
if h2.MaxConcurrentStreams != 0 {
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
}
if h2.MaxReadFrameSize != 0 {
conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
}
if h2.MaxReceiveBufferPerConnection != 0 {
conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
}
if h2.MaxReceiveBufferPerStream != 0 {
conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
}
if h2.SendPingTimeout != 0 {
conf.SendPingTimeout = h2.SendPingTimeout
}
if h2.PingTimeout != 0 {
conf.PingTimeout = h2.PingTimeout
}
if h2.WriteByteTimeout != 0 {
conf.WriteByteTimeout = h2.WriteByteTimeout
}
if h2.PermitProhibitedCipherSuites {
conf.PermitProhibitedCipherSuites = true
}
if h2.CountError != nil {
conf.CountError = h2.CountError
}
}

16
vendor/golang.org/x/net/http2/config_pre_go124.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.24
package http2
import "net/http"
// Pre-Go 1.24 fallback.
// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}

View File

@ -19,8 +19,9 @@ import (
"bufio"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"sort"
@ -238,12 +239,18 @@ func (cw closeWaiter) Wait() {
// idle memory usage with many connections.
type bufferedWriter struct {
_ incomparable
w io.Writer // immutable
group synctestGroupInterface // immutable
conn net.Conn // immutable
bw *bufio.Writer // non-nil when data is buffered
byteTimeout time.Duration // immutable, WriteByteTimeout
}
func newBufferedWriter(w io.Writer) *bufferedWriter {
return &bufferedWriter{w: w}
func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
return &bufferedWriter{
group: group,
conn: conn,
byteTimeout: timeout,
}
}
// bufWriterPoolBufferSize is the size of bufio.Writer's
@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int {
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil {
bw := bufWriterPool.Get().(*bufio.Writer)
bw.Reset(w.w)
bw.Reset((*bufferedWriterTimeoutWriter)(w))
w.bw = bw
}
return w.bw.Write(p)
@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error {
return err
}
type bufferedWriterTimeoutWriter bufferedWriter
func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
}
// writeWithByteTimeout writes to conn.
// If more than timeout passes without any bytes being written to the connection,
// the write fails.
func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
if timeout <= 0 {
return conn.Write(p)
}
for {
var now time.Time
if group == nil {
now = time.Now()
} else {
now = group.Now()
}
conn.SetWriteDeadline(now.Add(timeout))
nn, err := conn.Write(p[n:])
n += nn
if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
// Either we finished the write, made no progress, or hit the deadline.
// Whichever it is, we're done now.
conn.SetWriteDeadline(time.Time{})
return n, err
}
}
}
func mustUint31(v int32) uint32 {
if v < 0 || v > 2147483647 {
panic("out of range")

View File

@ -29,6 +29,7 @@ import (
"bufio"
"bytes"
"context"
"crypto/rand"
"crypto/tls"
"errors"
"fmt"
@ -56,6 +57,10 @@ const (
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
handlerChunkWriteSize = 4 << 10
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
// maxQueuedControlFrames is the maximum number of control frames like
// SETTINGS, PING and RST_STREAM that will be queued for writing before
// the connection is closed to prevent memory exhaustion attacks.
maxQueuedControlFrames = 10000
)
@ -127,6 +132,22 @@ type Server struct {
// If zero or negative, there is no timeout.
IdleTimeout time.Duration
// ReadIdleTimeout is the timeout after which a health check using a ping
// frame will be carried out if no frame is received on the connection.
// If zero, no health check is performed.
ReadIdleTimeout time.Duration
// PingTimeout is the timeout after which the connection will be closed
// if a response to a ping is not received.
// If zero, a default of 15 seconds is used.
PingTimeout time.Duration
// WriteByteTimeout is the timeout after which a connection will be
// closed if no data can be written to it. The timeout begins when data is
// available to write, and is extended whenever any bytes are written.
// If zero or negative, there is no timeout.
WriteByteTimeout time.Duration
// MaxUploadBufferPerConnection is the size of the initial flow
// control window for each connections. The HTTP/2 spec does not
// allow this to be smaller than 65535 or larger than 2^32-1.
@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer {
return timeTimer{time.AfterFunc(d, f)}
}
func (s *Server) initialConnRecvWindowSize() int32 {
if s.MaxUploadBufferPerConnection >= initialWindowSize {
return s.MaxUploadBufferPerConnection
}
return 1 << 20
}
func (s *Server) initialStreamRecvWindowSize() int32 {
if s.MaxUploadBufferPerStream > 0 {
return s.MaxUploadBufferPerStream
}
return 1 << 20
}
func (s *Server) maxReadFrameSize() uint32 {
if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
return v
}
return defaultMaxReadFrameSize
}
func (s *Server) maxConcurrentStreams() uint32 {
if v := s.MaxConcurrentStreams; v > 0 {
return v
}
return defaultMaxStreams
}
func (s *Server) maxDecoderHeaderTableSize() uint32 {
if v := s.MaxDecoderHeaderTableSize; v > 0 {
return v
}
return initialHeaderTableSize
}
func (s *Server) maxEncoderHeaderTableSize() uint32 {
if v := s.MaxEncoderHeaderTableSize; v > 0 {
return v
}
return initialHeaderTableSize
}
// maxQueuedControlFrames is the maximum number of control frames like
// SETTINGS, PING and RST_STREAM that will be queued for writing before
// the connection is closed to prevent memory exhaustion attacks.
func (s *Server) maxQueuedControlFrames() int {
// TODO: if anybody asks, add a Server field, and remember to define the
// behavior of negative values.
return maxQueuedControlFrames
}
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
baseCtx, cancel := serverConnBaseContext(c, opts)
defer cancel()
http1srv := opts.baseConfig()
conf := configFromServer(http1srv, s)
sc := &serverConn{
srv: s,
hs: opts.baseConfig(),
hs: http1srv,
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
bw: newBufferedWriter(c),
bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
advMaxStreams: s.maxConcurrentStreams(),
advMaxStreams: conf.MaxConcurrentStreams,
initialStreamSendWindowSize: initialWindowSize,
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxFrameSize: initialMaxFrameSize,
pingTimeout: conf.PingTimeout,
countErrorFunc: conf.CountError,
serveG: newGoroutineLock(),
pushEnabled: true,
sawClientPreface: opts.SawClientPreface,
@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
sc.flow.add(initialWindowSize)
sc.inflow.init(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
fr := NewFramer(sc.bw, c)
if s.CountError != nil {
fr.countError = s.CountError
if conf.CountError != nil {
fr.countError = conf.CountError
}
fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil)
fr.MaxHeaderListSize = sc.maxHeaderListSize()
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
sc.framer = fr
if tc, ok := c.(connectionStater); ok {
@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
// So for now, do nothing here again.
}
if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
// "Endpoints MAY choose to generate a connection error
// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
// the prohibited cipher suites are negotiated."
@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
opts.UpgradeRequest = nil
}
sc.serve()
sc.serve(conf)
}
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
@ -609,6 +584,7 @@ type serverConn struct {
tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string
writeSched WriteScheduler
countErrorFunc func(errType string)
// Everything following is owned by the serve loop; use serveG.check():
serveG goroutineLock // used to verify funcs are on serve()
@ -628,6 +604,7 @@ type serverConn struct {
streams map[uint32]*stream
unstartedHandlers []unstartedHandler
initialStreamSendWindowSize int32
initialStreamRecvWindowSize int32
maxFrameSize int32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
@ -638,9 +615,14 @@ type serverConn struct {
inGoAway bool // we've started to or sent GOAWAY
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write
pingSent bool
sentPingData [8]byte
goAwayCode ErrCode
shutdownTimer timer // nil until used
idleTimer timer // nil if unused
readIdleTimeout time.Duration
pingTimeout time.Duration
readIdleTimer timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 {
if n <= 0 {
n = http.DefaultMaxHeaderBytes
}
// http2's count is in a slightly different unit and includes 32 bytes per pair.
// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
const perFieldOverhead = 32 // per http2 spec
const typicalHeaders = 10 // conservative
return uint32(n + typicalHeaders*perFieldOverhead)
return uint32(adjustHTTP1MaxHeaderSize(int64(n)))
}
func (sc *serverConn) curOpenStreams() uint32 {
@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() {
}
}
func (sc *serverConn) serve() {
func (sc *serverConn) serve(conf http2Config) {
sc.serveG.check()
defer sc.notePanic()
defer sc.conn.Close()
@ -937,18 +915,18 @@ func (sc *serverConn) serve() {
sc.writeFrame(FrameWriteRequest{
write: writeSettings{
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
{SettingMaxFrameSize, conf.MaxReadFrameSize},
{SettingMaxConcurrentStreams, sc.advMaxStreams},
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
{SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
{SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
{SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
},
})
sc.unackedSettings++
// Each connection starts with initialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff))
}
@ -968,11 +946,18 @@ func (sc *serverConn) serve() {
defer sc.idleTimer.Stop()
}
if conf.SendPingTimeout > 0 {
sc.readIdleTimeout = conf.SendPingTimeout
sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
defer sc.readIdleTimer.Stop()
}
go sc.readFrames() // closed by defer sc.conn.Close above
settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
lastFrameTime := sc.srv.now()
loopNum := 0
for {
loopNum++
@ -986,6 +971,7 @@ func (sc *serverConn) serve() {
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
lastFrameTime = sc.srv.now()
// Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started.
if sc.writingFrameAsync {
@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() {
case idleTimerMsg:
sc.vlogf("connection is idle")
sc.goAway(ErrCodeNo)
case readIdleTimerMsg:
sc.handlePingTimer(lastFrameTime)
case shutdownTimerMsg:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() {
// If the peer is causing us to generate a lot of control frames,
// but not reading them from us, assume they are trying to make us
// run out of memory.
if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
if sc.queuedControlFrames > maxQueuedControlFrames {
sc.vlogf("http2: too many control frames in send queue, closing connection")
return
}
@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() {
}
}
func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
if sc.pingSent {
sc.vlogf("timeout waiting for PING response")
sc.conn.Close()
return
}
pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
now := sc.srv.now()
if pingAt.After(now) {
// We received frames since arming the ping timer.
// Reset it for the next possible timeout.
sc.readIdleTimer.Reset(pingAt.Sub(now))
return
}
sc.pingSent = true
// Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does
// is we send a PING frame containing 0s.
_, _ = rand.Read(sc.sentPingData[:])
sc.writeFrame(FrameWriteRequest{
write: &writePing{data: sc.sentPingData},
})
sc.readIdleTimer.Reset(sc.pingTimeout)
}
type serverMessage int
// Message values sent to serveMsgCh.
var (
settingsTimerMsg = new(serverMessage)
idleTimerMsg = new(serverMessage)
readIdleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
handlerDoneMsg = new(serverMessage)
@ -1068,6 +1083,7 @@ var (
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) }
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
func (sc *serverConn) sendServeMsg(msg interface{}) {
@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
sc.writingFrame = false
sc.writingFrameAsync = false
if res.err != nil {
sc.conn.Close()
}
wr := res.wr
if writeEndsStream(wr.write) {
@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error {
func (sc *serverConn) processPing(f *PingFrame) error {
sc.serveG.check()
if f.IsAck() {
if sc.pingSent && sc.sentPingData == f.Data {
// This is a response to a PING we sent.
sc.pingSent = false
sc.readIdleTimer.Reset(sc.readIdleTimeout)
}
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
// containing this flag."
return nil
@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 {
st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error {
if sc == nil || sc.srv == nil {
return err
}
f := sc.srv.CountError
f := sc.countErrorFunc
if f == nil {
return err
}

View File

@ -25,7 +25,6 @@ import (
"net/http"
"net/http/httptrace"
"net/textproto"
"os"
"sort"
"strconv"
"strings"
@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co
}
func (t *Transport) maxHeaderListSize() uint32 {
if t.MaxHeaderListSize == 0 {
n := int64(t.MaxHeaderListSize)
if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 {
n = t.t1.MaxResponseHeaderBytes
if n > 0 {
n = adjustHTTP1MaxHeaderSize(n)
}
}
if n <= 0 {
return 10 << 20
}
if t.MaxHeaderListSize == 0xffffffff {
if n >= 0xffffffff {
return 0
}
return t.MaxHeaderListSize
}
func (t *Transport) maxFrameReadSize() uint32 {
if t.MaxReadFrameSize == 0 {
return 0 // use the default provided by the peer
}
if t.MaxReadFrameSize < minMaxFrameSize {
return minMaxFrameSize
}
if t.MaxReadFrameSize > maxFrameSize {
return maxFrameSize
}
return t.MaxReadFrameSize
return uint32(n)
}
func (t *Transport) disableCompression() bool {
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
}
func (t *Transport) pingTimeout() time.Duration {
if t.PingTimeout == 0 {
return 15 * time.Second
}
return t.PingTimeout
}
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
// It returns an error if t1 has already been HTTP/2-enabled.
//
@ -375,6 +360,9 @@ type ClientConn struct {
peerMaxHeaderListSize uint64
peerMaxHeaderTableSize uint32
initialWindowSize uint32
initialStreamRecvWindowSize int32
readIdleTimeout time.Duration
pingTimeout time.Duration
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock.
@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() {
}
type stickyErrWriter struct {
group synctestGroupInterface
conn net.Conn
timeout time.Duration
err *error
@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil {
return 0, *sew.err
}
for {
if sew.timeout != 0 {
sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
}
nn, err := sew.conn.Write(p[n:])
n += nn
if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
// Keep extending the deadline so long as we're making progress.
continue
}
if sew.timeout != 0 {
sew.conn.SetWriteDeadline(time.Time{})
}
n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
*sew.err = err
return n, err
}
}
// noCachedConnError is the concrete type of ErrNoCachedConn, which
@ -758,25 +734,12 @@ func (t *Transport) expectContinueTimeout() time.Duration {
return t.t1.ExpectContinueTimeout
}
func (t *Transport) maxDecoderHeaderTableSize() uint32 {
if v := t.MaxDecoderHeaderTableSize; v > 0 {
return v
}
return initialHeaderTableSize
}
func (t *Transport) maxEncoderHeaderTableSize() uint32 {
if v := t.MaxEncoderHeaderTableSize; v > 0 {
return v
}
return initialHeaderTableSize
}
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
return t.newClientConn(c, t.disableKeepAlives())
}
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
conf := configFromTransport(t)
cc := &ClientConn{
t: t,
tconn: c,
@ -784,18 +747,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
nextStreamID: 1,
maxFrameSize: 16 << 10, // spec default
initialWindowSize: 65535, // spec default
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*clientStream),
singleUse: singleUse,
wantSettingsAck: true,
readIdleTimeout: conf.SendPingTimeout,
pingTimeout: conf.PingTimeout,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
}
var group synctestGroupInterface
if t.transportTestHooks != nil {
t.markNewGoroutine()
t.transportTestHooks.newclientconn(cc)
c = cc.tconn
group = t.group
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@ -807,24 +775,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(stickyErrWriter{
group: group,
conn: c,
timeout: t.WriteByteTimeout,
timeout: conf.WriteByteTimeout,
err: &cc.werr,
})
cc.br = bufio.NewReader(c)
cc.fr = NewFramer(cc.bw, cc.br)
if t.maxFrameReadSize() != 0 {
cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize())
}
cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
if t.CountError != nil {
cc.fr.countError = t.CountError
}
maxHeaderTableSize := t.maxDecoderHeaderTableSize()
maxHeaderTableSize := conf.MaxDecoderHeaderTableSize
cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
cc.henc = hpack.NewEncoder(&cc.hbuf)
cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
cc.peerMaxHeaderTableSize = initialHeaderTableSize
if cs, ok := c.(connectionStater); ok {
@ -834,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
initialSettings := []Setting{
{ID: SettingEnablePush, Val: 0},
{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
}
if max := t.maxFrameReadSize(); max != 0 {
initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max})
{ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)},
}
initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize})
if max := t.maxHeaderListSize(); max != 0 {
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
}
@ -848,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...)
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection))
cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize)
cc.bw.Flush()
if cc.werr != nil {
cc.Close()
@ -867,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
}
func (cc *ClientConn) healthCheck() {
pingTimeout := cc.t.pingTimeout()
pingTimeout := cc.pingTimeout
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
@ -2199,7 +2164,7 @@ type resAndError struct {
func (cc *ClientConn) addStreamLocked(cs *clientStream) {
cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow)
cs.inflow.init(transportDefaultStreamFlow)
cs.inflow.init(cc.initialStreamRecvWindowSize)
cs.ID = cc.nextStreamID
cc.nextStreamID += 2
cc.streams[cs.ID] = cs
@ -2345,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) {
func (rl *clientConnReadLoop) run() error {
cc := rl.cc
gotSettings := false
readIdleTimeout := cc.t.ReadIdleTimeout
readIdleTimeout := cc.readIdleTimeout
var t timer
if readIdleTimeout != 0 {
t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)

View File

@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error {
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
type writePing struct {
data [8]byte
}
func (w writePing) writeFrame(ctx writeContext) error {
return ctx.Framer().WritePing(false, w.data)
}
func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max }
type writePingAck struct{ pf *PingFrame }
func (w writePingAck) writeFrame(ctx writeContext) error {

View File

@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these
into a common file for each OS.
The merge is performed in the following steps:
1. Construct the set of common code that is idential in all architecture-specific files.
1. Construct the set of common code that is identical in all architecture-specific files.
2. Write this common code to the merged file.
3. Remove the common code from all architecture-specific files.

View File

@ -552,6 +552,7 @@ ccflags="$@"
$2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
$2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ ||
$2 ~ /^(CONNECT|SAE)_/ ||
$2 ~ /^FIORDCHK$/ ||
$2 ~ /^SIOC/ ||
$2 ~ /^TIOC/ ||
@ -655,7 +656,7 @@ errors=$(
signals=$(
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort
)
@ -665,7 +666,7 @@ echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
sort >_error.grep
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort >_signal.grep
echo '// mkerrors.sh' "$@"

View File

@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int,
var status _C_int
var r Pid_t
err = ERESTART
// AIX wait4 may return with ERESTART errno, while the processus is still
// AIX wait4 may return with ERESTART errno, while the process is still
// active.
for err == ERESTART {
r, err = wait4(Pid_t(pid), &status, options, rusage)

View File

@ -566,6 +566,43 @@ func PthreadFchdir(fd int) (err error) {
return pthread_fchdir_np(fd)
}
// Connectx calls connectx(2) to initiate a connection on a socket.
//
// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument.
//
// - srcIf is the optional source interface index. 0 means unspecified.
// - srcAddr is the optional source address. nil means unspecified.
// - dstAddr is the destination address.
//
// On success, Connectx returns the number of bytes enqueued for transmission.
func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) {
endpoints := SaEndpoints{
Srcif: srcIf,
}
if srcAddr != nil {
addrp, addrlen, err := srcAddr.sockaddr()
if err != nil {
return 0, err
}
endpoints.Srcaddr = (*RawSockaddr)(addrp)
endpoints.Srcaddrlen = uint32(addrlen)
}
if dstAddr != nil {
addrp, addrlen, err := dstAddr.sockaddr()
if err != nil {
return 0, err
}
endpoints.Dstaddr = (*RawSockaddr)(addrp)
endpoints.Dstaddrlen = uint32(addrlen)
}
err = connectx(fd, &endpoints, associd, flags, iov, &n, connid)
return
}
//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)

View File

@ -11,6 +11,7 @@ package unix
int ioctl(int, unsigned long int, uintptr_t);
*/
import "C"
import "unsafe"
func ioctl(fd int, req uint, arg uintptr) (err error) {
r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg))

View File

@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) {
return &value, err
}
// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas"
// algorithm.
//
// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
//
// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) {
var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
vallen := _Socklen(SizeofTCPCCInfo)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
out := (*TCPVegasInfo)(unsafe.Pointer(&value[0]))
return out, err
}
// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp"
// algorithm.
//
// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
//
// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) {
var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
vallen := _Socklen(SizeofTCPCCInfo)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0]))
return out, err
}
// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr"
// algorithm.
//
// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
//
// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) {
var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
vallen := _Socklen(SizeofTCPCCInfo)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
out := (*TCPBBRInfo)(unsafe.Pointer(&value[0]))
return out, err
}
// GetsockoptString returns the string value of the socket option opt for the
// socket associated with fd at the given socket level.
func GetsockoptString(fd, level, opt int) (string, error) {
@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) {
//sysnb Getpid() (pid int)
//sysnb Getppid() (ppid int)
//sys Getpriority(which int, who int) (prio int, err error)
//sys Getrandom(buf []byte, flags int) (n int, err error)
func Getrandom(buf []byte, flags int) (n int, err error) {
vdsoRet, supported := vgetrandom(buf, uint32(flags))
if supported {
if vdsoRet < 0 {
return 0, errnoErr(syscall.Errno(-vdsoRet))
}
return vdsoRet, nil
}
var p *byte
if len(buf) > 0 {
p = &buf[0]
}
r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags))
if e != 0 {
return 0, errnoErr(e)
}
return int(r), nil
}
//sysnb Getrusage(who int, rusage *Rusage) (err error)
//sysnb Getsid(pid int) (sid int, err error)
//sysnb Gettid() (tid int)

View File

@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}
const SYS_FSTATAT = SYS_NEWFSTATAT

View File

@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}
const SYS_FSTATAT = SYS_NEWFSTATAT

View File

@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error
}
return riscvHWProbe(pairs, setSize, set, flags)
}
const SYS_FSTATAT = SYS_NEWFSTATAT

13
vendor/golang.org/x/sys/unix/vgetrandom_linux.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && go1.24
package unix
import _ "unsafe"
//go:linkname vgetrandom runtime.vgetrandom
//go:noescape
func vgetrandom(p []byte, flags uint32) (ret int, supported bool)

11
vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !linux || !go1.24
package unix
func vgetrandom(p []byte, flags uint32) (ret int, supported bool) {
return -1, false
}

View File

@ -237,6 +237,9 @@ const (
CLOCK_UPTIME_RAW_APPROX = 0x9
CLONE_NOFOLLOW = 0x1
CLONE_NOOWNERCOPY = 0x2
CONNECT_DATA_AUTHENTICATED = 0x4
CONNECT_DATA_IDEMPOTENT = 0x2
CONNECT_RESUME_ON_READ_WRITE = 0x1
CR0 = 0x0
CR1 = 0x1000
CR2 = 0x2000
@ -1265,6 +1268,10 @@ const (
RTV_SSTHRESH = 0x20
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
SAE_ASSOCID_ALL = 0xffffffff
SAE_ASSOCID_ANY = 0x0
SAE_CONNID_ALL = 0xffffffff
SAE_CONNID_ANY = 0x0
SCM_CREDS = 0x3
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x2

View File

@ -237,6 +237,9 @@ const (
CLOCK_UPTIME_RAW_APPROX = 0x9
CLONE_NOFOLLOW = 0x1
CLONE_NOOWNERCOPY = 0x2
CONNECT_DATA_AUTHENTICATED = 0x4
CONNECT_DATA_IDEMPOTENT = 0x2
CONNECT_RESUME_ON_READ_WRITE = 0x1
CR0 = 0x0
CR1 = 0x1000
CR2 = 0x2000
@ -1265,6 +1268,10 @@ const (
RTV_SSTHRESH = 0x20
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
SAE_ASSOCID_ALL = 0xffffffff
SAE_ASSOCID_ANY = 0x0
SAE_CONNID_ALL = 0xffffffff
SAE_CONNID_ANY = 0x0
SCM_CREDS = 0x3
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x2

View File

@ -495,6 +495,7 @@ const (
BPF_F_TEST_REG_INVARIANTS = 0x80
BPF_F_TEST_RND_HI32 = 0x4
BPF_F_TEST_RUN_ON_CPU = 0x1
BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4
BPF_F_TEST_STATE_FREQ = 0x8
BPF_F_TEST_XDP_LIVE_FRAMES = 0x2
BPF_F_XDP_DEV_BOUND_ONLY = 0x40
@ -1922,6 +1923,7 @@ const (
MNT_EXPIRE = 0x4
MNT_FORCE = 0x1
MNT_ID_REQ_SIZE_VER0 = 0x18
MNT_ID_REQ_SIZE_VER1 = 0x20
MODULE_INIT_COMPRESSED_FILE = 0x4
MODULE_INIT_IGNORE_MODVERSIONS = 0x1
MODULE_INIT_IGNORE_VERMAGIC = 0x2
@ -2187,7 +2189,7 @@ const (
NFT_REG_SIZE = 0x10
NFT_REJECT_ICMPX_MAX = 0x3
NFT_RT_MAX = 0x4
NFT_SECMARK_CTX_MAXLEN = 0x100
NFT_SECMARK_CTX_MAXLEN = 0x1000
NFT_SET_MAXNAMELEN = 0x100
NFT_SOCKET_MAX = 0x3
NFT_TABLE_F_MASK = 0x7
@ -2356,9 +2358,11 @@ const (
PERF_MEM_LVLNUM_IO = 0xa
PERF_MEM_LVLNUM_L1 = 0x1
PERF_MEM_LVLNUM_L2 = 0x2
PERF_MEM_LVLNUM_L2_MHB = 0x5
PERF_MEM_LVLNUM_L3 = 0x3
PERF_MEM_LVLNUM_L4 = 0x4
PERF_MEM_LVLNUM_LFB = 0xc
PERF_MEM_LVLNUM_MSC = 0x6
PERF_MEM_LVLNUM_NA = 0xf
PERF_MEM_LVLNUM_PMEM = 0xe
PERF_MEM_LVLNUM_RAM = 0xd
@ -2431,6 +2435,7 @@ const (
PRIO_PGRP = 0x1
PRIO_PROCESS = 0x0
PRIO_USER = 0x2
PROCFS_IOCTL_MAGIC = 'f'
PROC_SUPER_MAGIC = 0x9fa0
PROT_EXEC = 0x4
PROT_GROWSDOWN = 0x1000000
@ -2933,11 +2938,12 @@ const (
RUSAGE_SELF = 0x0
RUSAGE_THREAD = 0x1
RWF_APPEND = 0x10
RWF_ATOMIC = 0x40
RWF_DSYNC = 0x2
RWF_HIPRI = 0x1
RWF_NOAPPEND = 0x20
RWF_NOWAIT = 0x8
RWF_SUPPORTED = 0x3f
RWF_SUPPORTED = 0x7f
RWF_SYNC = 0x4
RWF_WRITE_LIFE_NOT_SET = 0x0
SCHED_BATCH = 0x3
@ -3210,6 +3216,7 @@ const (
STATX_ATTR_MOUNT_ROOT = 0x2000
STATX_ATTR_NODUMP = 0x40
STATX_ATTR_VERITY = 0x100000
STATX_ATTR_WRITE_ATOMIC = 0x400000
STATX_BASIC_STATS = 0x7ff
STATX_BLOCKS = 0x400
STATX_BTIME = 0x800
@ -3226,6 +3233,7 @@ const (
STATX_SUBVOL = 0x8000
STATX_TYPE = 0x1
STATX_UID = 0x8
STATX_WRITE_ATOMIC = 0x10000
STATX__RESERVED = 0x80000000
SYNC_FILE_RANGE_WAIT_AFTER = 0x4
SYNC_FILE_RANGE_WAIT_BEFORE = 0x1
@ -3624,6 +3632,7 @@ const (
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
XDP_UMEM_TX_METADATA_LEN = 0x4
XDP_UMEM_TX_SW_CSUM = 0x2
XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1
XDP_USE_NEED_WAKEUP = 0x8

View File

@ -153,9 +153,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4

View File

@ -153,9 +153,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4

View File

@ -150,9 +150,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4

View File

@ -154,9 +154,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4

View File

@ -154,9 +154,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4

View File

@ -150,9 +150,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4

View File

@ -150,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4

View File

@ -150,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4

View File

@ -150,9 +150,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4

View File

@ -152,9 +152,14 @@ const (
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x4
ONLCR = 0x2

View File

@ -152,9 +152,14 @@ const (
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x4
ONLCR = 0x2

View File

@ -152,9 +152,14 @@ const (
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x4
ONLCR = 0x2

View File

@ -150,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4

View File

@ -150,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4

View File

@ -155,9 +155,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4

View File

@ -581,6 +581,8 @@ const (
AT_EMPTY_PATH = 0x1000
AT_REMOVEDIR = 0x200
RENAME_NOREPLACE = 1 << 0
ST_RDONLY = 1
ST_NOSUID = 2
)
const (

View File

@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) {
var _p0 unsafe.Pointer
if len(iov) > 0 {
_p0 = unsafe.Pointer(&iov[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_connectx_trampoline_addr uintptr
//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 {

View File

@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_connectx(SB)
GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8
DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB)
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8

View File

@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) {
var _p0 unsafe.Pointer
if len(iov) > 0 {
_p0 = unsafe.Pointer(&iov[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_connectx_trampoline_addr uintptr
//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 {

View File

@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_connectx(SB)
GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8
DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB)
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8

View File

@ -971,23 +971,6 @@ func Getpriority(which int, who int) (prio int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrandom(buf []byte, flags int) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrusage(who int, rusage *Rusage) (err error) {
_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {

View File

@ -341,6 +341,7 @@ const (
SYS_STATX = 332
SYS_IO_PGETEVENTS = 333
SYS_RSEQ = 334
SYS_URETPROBE = 335
SYS_PIDFD_SEND_SIGNAL = 424
SYS_IO_URING_SETUP = 425
SYS_IO_URING_ENTER = 426

View File

@ -85,7 +85,7 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
SYS_FSTATAT = 79
SYS_NEWFSTATAT = 79
SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82

View File

@ -84,6 +84,8 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
SYS_NEWFSTATAT = 79
SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82
SYS_FDATASYNC = 83

View File

@ -84,7 +84,7 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
SYS_FSTATAT = 79
SYS_NEWFSTATAT = 79
SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82

View File

@ -306,6 +306,19 @@ type XVSockPgen struct {
type _Socklen uint32
type SaeAssocID uint32
type SaeConnID uint32
type SaEndpoints struct {
Srcif uint32
Srcaddr *RawSockaddr
Srcaddrlen uint32
Dstaddr *RawSockaddr
Dstaddrlen uint32
_ [4]byte
}
type Xucred struct {
Version uint32
Uid uint32

View File

@ -306,6 +306,19 @@ type XVSockPgen struct {
type _Socklen uint32
type SaeAssocID uint32
type SaeConnID uint32
type SaEndpoints struct {
Srcif uint32
Srcaddr *RawSockaddr
Srcaddrlen uint32
Dstaddr *RawSockaddr
Dstaddrlen uint32
_ [4]byte
}
type Xucred struct {
Version uint32
Uid uint32

View File

@ -625,6 +625,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
POLLRDHUP = 0x4000
)
type CapRights struct {

View File

@ -630,6 +630,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
POLLRDHUP = 0x4000
)
type CapRights struct {

View File

@ -616,6 +616,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
POLLRDHUP = 0x4000
)
type CapRights struct {

View File

@ -610,6 +610,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
POLLRDHUP = 0x4000
)
type CapRights struct {

View File

@ -612,6 +612,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
POLLRDHUP = 0x4000
)
type CapRights struct {

View File

@ -111,7 +111,11 @@ type Statx_t struct {
Dio_mem_align uint32
Dio_offset_align uint32
Subvol uint64
_ [11]uint64
Atomic_write_unit_min uint32
Atomic_write_unit_max uint32
Atomic_write_segments_max uint32
_ [1]uint32
_ [9]uint64
}
type Fsid struct {
@ -516,6 +520,29 @@ type TCPInfo struct {
Total_rto_time uint32
}
type TCPVegasInfo struct {
Enabled uint32
Rttcnt uint32
Rtt uint32
Minrtt uint32
}
type TCPDCTCPInfo struct {
Enabled uint16
Ce_state uint16
Alpha uint32
Ab_ecn uint32
Ab_tot uint32
}
type TCPBBRInfo struct {
Bw_lo uint32
Bw_hi uint32
Min_rtt uint32
Pacing_gain uint32
Cwnd_gain uint32
}
type CanFilter struct {
Id uint32
Mask uint32
@ -557,6 +584,7 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0xf8
SizeofTCPCCInfo = 0x14
SizeofCanFilter = 0x8
SizeofTCPRepairOpt = 0x8
)
@ -2486,7 +2514,7 @@ type XDPMmapOffsets struct {
type XDPUmemReg struct {
Addr uint64
Len uint64
Chunk_size uint32
Size uint32
Headroom uint32
Flags uint32
Tx_metadata_len uint32
@ -3766,7 +3794,7 @@ const (
ETHTOOL_MSG_PSE_GET = 0x24
ETHTOOL_MSG_PSE_SET = 0x25
ETHTOOL_MSG_RSS_GET = 0x26
ETHTOOL_MSG_USER_MAX = 0x2b
ETHTOOL_MSG_USER_MAX = 0x2c
ETHTOOL_MSG_KERNEL_NONE = 0x0
ETHTOOL_MSG_STRSET_GET_REPLY = 0x1
ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2
@ -3806,7 +3834,7 @@ const (
ETHTOOL_MSG_MODULE_NTF = 0x24
ETHTOOL_MSG_PSE_GET_REPLY = 0x25
ETHTOOL_MSG_RSS_GET_REPLY = 0x26
ETHTOOL_MSG_KERNEL_MAX = 0x2b
ETHTOOL_MSG_KERNEL_MAX = 0x2c
ETHTOOL_FLAG_COMPACT_BITSETS = 0x1
ETHTOOL_FLAG_OMIT_REPLY = 0x2
ETHTOOL_FLAG_STATS = 0x4
@ -3951,7 +3979,7 @@ const (
ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17
ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18
ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19
ETHTOOL_A_COALESCE_MAX = 0x1c
ETHTOOL_A_COALESCE_MAX = 0x1e
ETHTOOL_A_PAUSE_UNSPEC = 0x0
ETHTOOL_A_PAUSE_HEADER = 0x1
ETHTOOL_A_PAUSE_AUTONEG = 0x2
@ -4609,7 +4637,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
NL80211_ATTR_MAX = 0x14a
NL80211_ATTR_MAX = 0x14c
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85
@ -5213,7 +5241,7 @@ const (
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_MAX = 0x20
NL80211_FREQUENCY_ATTR_MAX = 0x21
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc

View File

@ -727,6 +727,37 @@ const (
RISCV_HWPROBE_EXT_ZBA = 0x8
RISCV_HWPROBE_EXT_ZBB = 0x10
RISCV_HWPROBE_EXT_ZBS = 0x20
RISCV_HWPROBE_EXT_ZICBOZ = 0x40
RISCV_HWPROBE_EXT_ZBC = 0x80
RISCV_HWPROBE_EXT_ZBKB = 0x100
RISCV_HWPROBE_EXT_ZBKC = 0x200
RISCV_HWPROBE_EXT_ZBKX = 0x400
RISCV_HWPROBE_EXT_ZKND = 0x800
RISCV_HWPROBE_EXT_ZKNE = 0x1000
RISCV_HWPROBE_EXT_ZKNH = 0x2000
RISCV_HWPROBE_EXT_ZKSED = 0x4000
RISCV_HWPROBE_EXT_ZKSH = 0x8000
RISCV_HWPROBE_EXT_ZKT = 0x10000
RISCV_HWPROBE_EXT_ZVBB = 0x20000
RISCV_HWPROBE_EXT_ZVBC = 0x40000
RISCV_HWPROBE_EXT_ZVKB = 0x80000
RISCV_HWPROBE_EXT_ZVKG = 0x100000
RISCV_HWPROBE_EXT_ZVKNED = 0x200000
RISCV_HWPROBE_EXT_ZVKNHA = 0x400000
RISCV_HWPROBE_EXT_ZVKNHB = 0x800000
RISCV_HWPROBE_EXT_ZVKSED = 0x1000000
RISCV_HWPROBE_EXT_ZVKSH = 0x2000000
RISCV_HWPROBE_EXT_ZVKT = 0x4000000
RISCV_HWPROBE_EXT_ZFH = 0x8000000
RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000
RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000
RISCV_HWPROBE_EXT_ZVFH = 0x40000000
RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000
RISCV_HWPROBE_EXT_ZFA = 0x100000000
RISCV_HWPROBE_EXT_ZTSO = 0x200000000
RISCV_HWPROBE_EXT_ZACAS = 0x400000000
RISCV_HWPROBE_EXT_ZICOND = 0x800000000
RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000
RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5
RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0
RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1
@ -734,4 +765,6 @@ const (
RISCV_HWPROBE_MISALIGNED_FAST = 0x3
RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4
RISCV_HWPROBE_MISALIGNED_MASK = 0x7
RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6
RISCV_HWPROBE_WHICH_CPUS = 0x1
)

14
vendor/modules.txt vendored
View File

@ -2,10 +2,10 @@
## explicit; go 1.20
filippo.io/edwards25519
filippo.io/edwards25519/field
# github.com/antchfx/htmlquery v1.3.2
# github.com/antchfx/htmlquery v1.3.3
## explicit; go 1.14
github.com/antchfx/htmlquery
# github.com/antchfx/xpath v1.3.1
# github.com/antchfx/xpath v1.3.2
## explicit; go 1.14
github.com/antchfx/xpath
# github.com/go-sql-driver/mysql v1.8.1
@ -49,7 +49,7 @@ github.com/mattn/go-colorable
# github.com/mattn/go-isatty v0.0.20
## explicit; go 1.15
github.com/mattn/go-isatty
# github.com/mattn/go-sqlite3 v1.14.22
# github.com/mattn/go-sqlite3 v1.14.24
## explicit; go 1.19
github.com/mattn/go-sqlite3
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
@ -85,11 +85,11 @@ github.com/valyala/bytebufferpool
# github.com/valyala/fasttemplate v1.2.2
## explicit; go 1.12
github.com/valyala/fasttemplate
# golang.org/x/crypto v0.26.0
# golang.org/x/crypto v0.28.0
## explicit; go 1.20
golang.org/x/crypto/acme
golang.org/x/crypto/acme/autocert
# golang.org/x/net v0.28.0
# golang.org/x/net v0.30.0
## explicit; go 1.18
golang.org/x/net/html
golang.org/x/net/html/atom
@ -99,10 +99,10 @@ golang.org/x/net/http2
golang.org/x/net/http2/h2c
golang.org/x/net/http2/hpack
golang.org/x/net/idna
# golang.org/x/sys v0.24.0
# golang.org/x/sys v0.26.0
## explicit; go 1.18
golang.org/x/sys/unix
# golang.org/x/text v0.17.0
# golang.org/x/text v0.19.0
## explicit; go 1.18
golang.org/x/text/encoding
golang.org/x/text/encoding/charmap