Compare commits

..

No commits in common. "master" and "1.5.4" have entirely different histories.

70 changed files with 4106 additions and 6830 deletions

View File

@ -1,19 +1,14 @@
--- ---
kind: pipeline kind: pipeline
type: docker type: docker
name: build-linux-amd64 name: build-linux
platform:
os: linux
arch: amd64
steps: steps:
- name: build - name: build-linux-amd64
image: golang:1.23 image: golang
commands: commands:
- go build -o $PROJECTNAME $GOOPTIONS $SRCFILES - go build -o $PROJECTNAME $GOOPTIONS $SRCFILES
environment: environment:
CGO_ENABLED: 1
GOOS: linux GOOS: linux
GOARCH: amd64 GOARCH: amd64
GOOPTIONS: -mod=vendor GOOPTIONS: -mod=vendor
@ -23,52 +18,11 @@ steps:
event: event:
exclude: exclude:
- tag - tag
- name: build-linux-arm64
- name: release image: golang
image: golang:1.23
commands:
- go build -o $PROJECTNAME $GOOPTIONS $SRCFILES
- tar -czvf $PROJECTNAME-$DRONE_TAG-$GOOS-$GOARCH.tar.gz $PROJECTNAME
- echo $PROJECTNAME $DRONE_TAG > VERSION
environment:
CGO_ENABLED: 1
GOOS: linux
GOARCH: amd64
GOOPTIONS: -mod=vendor
SRCFILES: cmd/qrz/*.go
PROJECTNAME: qrz
when:
event:
- tag
- name: publish
image: plugins/gitea-release
settings:
base_url: https://git.paulbsd.com
api_key:
from_secret: gitea_token
files: "*.tar.gz"
title: VERSION
when:
event:
- tag
---
kind: pipeline
type: docker
name: build-linux-arm64
platform:
os: linux
arch: arm64
steps:
- name: build
image: golang:1.23
commands: commands:
- go build -o $PROJECTNAME $GOOPTIONS $SRCFILES - go build -o $PROJECTNAME $GOOPTIONS $SRCFILES
environment: environment:
CGO_ENABLED: 1
GOOS: linux GOOS: linux
GOARCH: arm64 GOARCH: arm64
GOOPTIONS: -mod=vendor GOOPTIONS: -mod=vendor
@ -79,14 +33,34 @@ steps:
exclude: exclude:
- tag - tag
- name: release ---
image: golang:1.23 kind: pipeline
type: docker
name: gitea-release-linux
steps:
- name: build-linux-amd64
image: golang
commands:
- go build -o $PROJECTNAME $GOOPTIONS $SRCFILES
- tar -czvf $PROJECTNAME-$DRONE_TAG-$GOOS-$GOARCH.tar.gz $PROJECTNAME
- echo $PROJECTNAME $DRONE_TAG > VERSION
environment:
GOOS: linux
GOARCH: amd64
GOOPTIONS: -mod=vendor
SRCFILES: cmd/qrz/*.go
PROJECTNAME: qrz
when:
event:
- tag
- name: build-linux-arm64
image: golang
commands: commands:
- go build -o $PROJECTNAME $GOOPTIONS $SRCFILES - go build -o $PROJECTNAME $GOOPTIONS $SRCFILES
- tar -czvf $PROJECTNAME-$DRONE_TAG-$GOOS-$GOARCH.tar.gz $PROJECTNAME - tar -czvf $PROJECTNAME-$DRONE_TAG-$GOOS-$GOARCH.tar.gz $PROJECTNAME
- echo $PROJECTNAME $DRONE_TAG > VERSION - echo $PROJECTNAME $DRONE_TAG > VERSION
environment: environment:
CGO_ENABLED: 1
GOOS: linux GOOS: linux
GOARCH: arm64 GOARCH: arm64
GOOPTIONS: -mod=vendor GOOPTIONS: -mod=vendor
@ -95,8 +69,7 @@ steps:
when: when:
event: event:
- tag - tag
- name: release
- name: publish
image: plugins/gitea-release image: plugins/gitea-release
settings: settings:
base_url: https://git.paulbsd.com base_url: https://git.paulbsd.com

14
go.mod
View File

@ -3,20 +3,20 @@ module git.paulbsd.com/paulbsd/qrz
go 1.23 go 1.23
require ( require (
github.com/antchfx/htmlquery v1.3.3 github.com/antchfx/htmlquery v1.3.2
github.com/antchfx/xpath v1.3.2 // indirect github.com/antchfx/xpath v1.3.1 // indirect
github.com/golang/snappy v0.0.4 // indirect github.com/golang/snappy v0.0.4 // indirect
github.com/labstack/echo/v4 v4.12.0 github.com/labstack/echo/v4 v4.12.0
github.com/lib/pq v1.10.9 github.com/lib/pq v1.10.9
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-sqlite3 v1.14.24 github.com/mattn/go-sqlite3 v1.14.22
github.com/onsi/ginkgo v1.16.5 // indirect github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.23.0 // indirect github.com/onsi/gomega v1.23.0 // indirect
github.com/robfig/cron v1.2.0 github.com/robfig/cron v1.2.0
golang.org/x/crypto v0.28.0 // indirect golang.org/x/crypto v0.26.0 // indirect
golang.org/x/net v0.30.0 golang.org/x/net v0.28.0
golang.org/x/sys v0.26.0 // indirect golang.org/x/sys v0.24.0 // indirect
golang.org/x/text v0.19.0 // indirect golang.org/x/text v0.17.0 // indirect
gopkg.in/ini.v1 v1.67.0 gopkg.in/ini.v1 v1.67.0
xorm.io/builder v0.3.13 // indirect xorm.io/builder v0.3.13 // indirect
xorm.io/xorm v1.3.9 xorm.io/xorm v1.3.9

14
go.sum
View File

@ -25,8 +25,6 @@ github.com/antchfx/htmlquery v1.3.1 h1:wm0LxjLMsZhRHfQKKZscDf2COyH4vDYA3wyH+qZ+Y
github.com/antchfx/htmlquery v1.3.1/go.mod h1:PTj+f1V2zksPlwNt7uVvZPsxpKNa7mlVliCRxLX6Nx8= github.com/antchfx/htmlquery v1.3.1/go.mod h1:PTj+f1V2zksPlwNt7uVvZPsxpKNa7mlVliCRxLX6Nx8=
github.com/antchfx/htmlquery v1.3.2 h1:85YdttVkR1rAY+Oiv/nKI4FCimID+NXhDn82kz3mEvs= github.com/antchfx/htmlquery v1.3.2 h1:85YdttVkR1rAY+Oiv/nKI4FCimID+NXhDn82kz3mEvs=
github.com/antchfx/htmlquery v1.3.2/go.mod h1:1mbkcEgEarAokJiWhTfr4hR06w/q2ZZjnYLrDt6CTUk= github.com/antchfx/htmlquery v1.3.2/go.mod h1:1mbkcEgEarAokJiWhTfr4hR06w/q2ZZjnYLrDt6CTUk=
github.com/antchfx/htmlquery v1.3.3 h1:x6tVzrRhVNfECDaVxnZi1mEGrQg3mjE/rxbH2Pe6dNE=
github.com/antchfx/htmlquery v1.3.3/go.mod h1:WeU3N7/rL6mb6dCwtE30dURBnBieKDC/fR8t6X+cKjU=
github.com/antchfx/xpath v1.2.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antchfx/xpath v1.2.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
github.com/antchfx/xpath v1.2.4 h1:dW1HB/JxKvGtJ9WyVGJ0sIoEcqftV3SqIstujI+B9XY= github.com/antchfx/xpath v1.2.4 h1:dW1HB/JxKvGtJ9WyVGJ0sIoEcqftV3SqIstujI+B9XY=
github.com/antchfx/xpath v1.2.4/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antchfx/xpath v1.2.4/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
@ -36,8 +34,6 @@ github.com/antchfx/xpath v1.3.0 h1:nTMlzGAK3IJ0bPpME2urTuFL76o4A96iYvoKFHRXJgc=
github.com/antchfx/xpath v1.3.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antchfx/xpath v1.3.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
github.com/antchfx/xpath v1.3.1 h1:PNbFuUqHwWl0xRjvUPjJ95Agbmdj2uzzIwmQKgu4oCk= github.com/antchfx/xpath v1.3.1 h1:PNbFuUqHwWl0xRjvUPjJ95Agbmdj2uzzIwmQKgu4oCk=
github.com/antchfx/xpath v1.3.1/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antchfx/xpath v1.3.1/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
github.com/antchfx/xpath v1.3.2 h1:LNjzlsSjinu3bQpw9hWMY9ocB80oLOWuQqFvO6xt51U=
github.com/antchfx/xpath v1.3.2/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@ -323,8 +319,6 @@ github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@ -524,8 +518,6 @@ golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@ -582,8 +574,6 @@ golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -656,8 +646,6 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -687,8 +675,6 @@ golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@ -262,9 +262,9 @@ func ToSlice(qrz Qrz) (out []string) {
type Qrz struct { type Qrz struct {
ID int `db:"id" xorm:"pk autoincr"` ID int `db:"id" xorm:"pk autoincr"`
QRZ string `db:"qrz" xorm:"index notnull"` QRZ string `db:"qrz" xorm:"index notnull"`
DMRID string `db:"dmrid" xorm:"index notnull"` DMRID string `db:"dmrid" xorm:"notnull"`
Name string `db:"name" xorm:"index notnull"` Name string `db:"name" xorm:"index notnull"`
Address string `db:"address" xorm:"index notnull"` Address string `db:"address" xorm:"notnull"`
City string `db:"city" xorm:"index notnull"` City string `db:"city" xorm:"index notnull"`
Zipcode string `db:"zipcode" xorm:"index varchar(5) notnull"` Zipcode string `db:"zipcode" xorm:"index varchar(5) notnull"`
Dept string `db:"dept" xorm:"index notnull"` Dept string `db:"dept" xorm:"index notnull"`

View File

@ -226,9 +226,9 @@ func SetSearchLike(config config.Config, qrzdt *QrzDatatableInput) (searchstmt s
var searchstr string var searchstr string
switch config.DbType { switch config.DbType {
case "sqlite3": case "sqlite3":
searchstr = "%s LIKE '%s%%'" searchstr = "%s LIKE '%%%s%%'"
case "mysql": case "mysql":
searchstr = "%s LIKE '%s%%'" searchstr = "%s LIKE '%%%s%%'"
case "postgresql": case "postgresql":
searchstr = "%s ~* '%s'" searchstr = "%s ~* '%s'"
default: default:

View File

@ -45,9 +45,28 @@ type builder struct {
// axisPredicate creates a predicate to predicating for this axis node. // axisPredicate creates a predicate to predicating for this axis node.
func axisPredicate(root *axisNode) func(NodeNavigator) bool { func axisPredicate(root *axisNode) func(NodeNavigator) bool {
// get current axix node type.
typ := ElementNode
switch root.AxeType {
case "attribute":
typ = AttributeNode
case "self", "parent":
typ = allNode
default:
switch root.Prop {
case "comment":
typ = CommentNode
case "text":
typ = TextNode
// case "processing-instruction":
// typ = ProcessingInstructionNode
case "node":
typ = allNode
}
}
nametest := root.LocalName != "" || root.Prefix != "" nametest := root.LocalName != "" || root.Prefix != ""
predicate := func(n NodeNavigator) bool { predicate := func(n NodeNavigator) bool {
if root.typeTest == n.NodeType() || root.typeTest == allNode { if typ == n.NodeType() || typ == allNode {
if nametest { if nametest {
type namespaceURL interface { type namespaceURL interface {
NamespaceURL() string NamespaceURL() string
@ -83,35 +102,39 @@ func (b *builder) processAxis(root *axisNode, flags flag, props *builderProp) (q
*props = builderProps.None *props = builderProps.None
} else { } else {
inputFlags := flagsEnum.None inputFlags := flagsEnum.None
if (flags & flagsEnum.Filter) == 0 { if root.AxeType == "child" && (root.Input.Type() == nodeAxis) {
if root.AxisType == "child" && (root.Input.Type() == nodeAxis) { if input := root.Input.(*axisNode); input.AxeType == "descendant-or-self" {
if input := root.Input.(*axisNode); input.AxisType == "descendant-or-self" { var qyGrandInput query
var qyGrandInput query if input.Input != nil {
if input.Input != nil { qyGrandInput, _ = b.processNode(input.Input, flagsEnum.SmartDesc, props)
qyGrandInput, err = b.processNode(input.Input, flagsEnum.SmartDesc, props) } else {
if err != nil { qyGrandInput = &contextQuery{}
return nil, err
}
} else {
qyGrandInput = &contextQuery{}
}
qyOutput = &descendantQuery{name: root.LocalName, Input: qyGrandInput, Predicate: predicate, Self: false}
*props |= builderProps.NonFlat
return qyOutput, nil
} }
// fix #20: https://github.com/antchfx/htmlquery/issues/20
filter := func(n NodeNavigator) bool {
v := predicate(n)
switch root.Prop {
case "text":
v = v && n.NodeType() == TextNode
case "comment":
v = v && n.NodeType() == CommentNode
}
return v
}
qyOutput = &descendantQuery{name: root.LocalName, Input: qyGrandInput, Predicate: filter, Self: false}
*props |= builderProps.NonFlat
return qyOutput, nil
} }
if root.AxisType == "descendant" || root.AxisType == "descendant-or-self" { } else if ((flags & flagsEnum.Filter) == 0) && (root.AxeType == "descendant" || root.AxeType == "descendant-or-self") {
inputFlags |= flagsEnum.SmartDesc inputFlags |= flagsEnum.SmartDesc
}
} }
qyInput, err = b.processNode(root.Input, inputFlags, props) qyInput, err = b.processNode(root.Input, inputFlags, props)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
switch root.AxisType { switch root.AxeType {
case "ancestor": case "ancestor":
qyOutput = &ancestorQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} qyOutput = &ancestorQuery{name: root.LocalName, Input: qyInput, Predicate: predicate}
*props |= builderProps.NonFlat *props |= builderProps.NonFlat
@ -121,10 +144,22 @@ func (b *builder) processAxis(root *axisNode, flags flag, props *builderProp) (q
case "attribute": case "attribute":
qyOutput = &attributeQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} qyOutput = &attributeQuery{name: root.LocalName, Input: qyInput, Predicate: predicate}
case "child": case "child":
filter := func(n NodeNavigator) bool {
v := predicate(n)
switch root.Prop {
case "text":
v = v && n.NodeType() == TextNode
case "node":
v = v && (n.NodeType() == ElementNode || n.NodeType() == TextNode)
case "comment":
v = v && n.NodeType() == CommentNode
}
return v
}
if (*props & builderProps.NonFlat) == 0 { if (*props & builderProps.NonFlat) == 0 {
qyOutput = &childQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} qyOutput = &childQuery{name: root.LocalName, Input: qyInput, Predicate: filter}
} else { } else {
qyOutput = &cachedChildQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} qyOutput = &cachedChildQuery{name: root.LocalName, Input: qyInput, Predicate: filter}
} }
case "descendant": case "descendant":
if (flags & flagsEnum.SmartDesc) != flagsEnum.None { if (flags & flagsEnum.SmartDesc) != flagsEnum.None {
@ -157,7 +192,7 @@ func (b *builder) processAxis(root *axisNode, flags flag, props *builderProp) (q
case "namespace": case "namespace":
// haha,what will you do someting?? // haha,what will you do someting??
default: default:
err = fmt.Errorf("unknown axe type: %s", root.AxisType) err = fmt.Errorf("unknown axe type: %s", root.AxeType)
return nil, err return nil, err
} }
return qyOutput, nil return qyOutput, nil
@ -200,6 +235,7 @@ func (b *builder) processFilter(root *filterNode, flags flag, props *builderProp
*props |= builderProps.PosFilter *props |= builderProps.PosFilter
} }
merge := (qyInput.Properties() & queryProps.Merge) != 0
if (propsCond & builderProps.HasPosition) != builderProps.None { if (propsCond & builderProps.HasPosition) != builderProps.None {
if (propsCond & builderProps.HasLast) != 0 { if (propsCond & builderProps.HasLast) != 0 {
// https://github.com/antchfx/xpath/issues/76 // https://github.com/antchfx/xpath/issues/76
@ -207,15 +243,16 @@ func (b *builder) processFilter(root *filterNode, flags flag, props *builderProp
if qyFunc, ok := cond.(*functionQuery); ok { if qyFunc, ok := cond.(*functionQuery); ok {
switch qyFunc.Input.(type) { switch qyFunc.Input.(type) {
case *filterQuery: case *filterQuery:
cond = &lastFuncQuery{Input: qyFunc.Input} cond = &lastQuery{Input: qyFunc.Input}
} }
} }
} }
} }
merge := (qyInput.Properties() & queryProps.Merge) != 0
if first && firstInput != nil { if first && firstInput != nil {
if merge && ((*props & builderProps.PosFilter) != 0) { if merge && ((*props & builderProps.PosFilter) != 0) {
qyInput = &filterQuery{Input: qyInput, Predicate: cond, NoPosition: false}
var ( var (
rootQuery = &contextQuery{} rootQuery = &contextQuery{}
parent query parent query
@ -278,11 +315,10 @@ func (b *builder) processFilter(root *filterNode, flags flag, props *builderProp
} }
} }
b.firstInput = nil b.firstInput = nil
child := &filterQuery{Input: qyInput, Predicate: cond, NoPosition: false}
if parent != nil { if parent != nil {
return &mergeQuery{Input: parent, Child: child}, nil return &mergeQuery{Input: parent, Child: qyInput}, nil
} }
return child, nil return qyInput, nil
} }
b.firstInput = nil b.firstInput = nil
} }
@ -307,7 +343,7 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if err != nil { if err != nil {
return nil, err return nil, err
} }
qyOutput = &functionQuery{Func: lowerCaseFunc(arg)} qyOutput = &functionQuery{Input: arg, Func: lowerCaseFunc}
case "starts-with": case "starts-with":
arg1, err := b.processNode(root.Args[0], flagsEnum.None, props) arg1, err := b.processNode(root.Args[0], flagsEnum.None, props)
if err != nil { if err != nil {
@ -410,17 +446,14 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
} }
qyOutput = &functionQuery{Func: stringLengthFunc(arg1)} qyOutput = &functionQuery{Func: stringLengthFunc(arg1)}
case "normalize-space": case "normalize-space":
var arg node if len(root.Args) == 0 {
if len(root.Args) > 0 { return nil, errors.New("xpath: normalize-space function must have at least one parameter")
arg = root.Args[0]
} else {
arg = newAxisNode("self", allNode, "", "", "", nil)
} }
arg1, err := b.processNode(arg, flagsEnum.None, props) argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props)
if err != nil { if err != nil {
return nil, err return nil, err
} }
qyOutput = &functionQuery{Func: normalizespaceFunc(arg1)} qyOutput = &functionQuery{Input: argQuery, Func: normalizespaceFunc}
case "replace": case "replace":
//replace( string , string, string ) //replace( string , string, string )
if len(root.Args) != 3 { if len(root.Args) != 3 {
@ -467,7 +500,7 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if err != nil { if err != nil {
return nil, err return nil, err
} }
qyOutput = &functionQuery{Func: notFunc(argQuery)} qyOutput = &functionQuery{Input: argQuery, Func: notFunc}
case "name", "local-name", "namespace-uri": case "name", "local-name", "namespace-uri":
if len(root.Args) > 1 { if len(root.Args) > 1 {
return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName) return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName)
@ -498,10 +531,17 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
}, },
} }
case "last": case "last":
qyOutput = &functionQuery{Input: b.firstInput, Func: lastFunc()} //switch typ := b.firstInput.(type) {
//case *groupQuery, *filterQuery:
// https://github.com/antchfx/xpath/issues/76
// https://github.com/antchfx/xpath/issues/78
//qyOutput = &lastQuery{Input: typ}
//default:
qyOutput = &functionQuery{Func: lastFunc}
//}
*props |= builderProps.HasLast *props |= builderProps.HasLast
case "position": case "position":
qyOutput = &functionQuery{Input: b.firstInput, Func: positionFunc()} qyOutput = &functionQuery{Func: positionFunc}
*props |= builderProps.HasPosition *props |= builderProps.HasPosition
case "boolean", "number", "string": case "boolean", "number", "string":
var inp query var inp query
@ -515,14 +555,16 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
} }
inp = argQuery inp = argQuery
} }
f := &functionQuery{Input: inp}
switch root.FuncName { switch root.FuncName {
case "boolean": case "boolean":
qyOutput = &functionQuery{Func: booleanFunc(inp)} f.Func = booleanFunc
case "string": case "string":
qyOutput = &functionQuery{Func: stringFunc(inp)} f.Func = stringFunc
case "number": case "number":
qyOutput = &functionQuery{Func: numberFunc(inp)} f.Func = numberFunc
} }
qyOutput = f
case "count": case "count":
if len(root.Args) == 0 { if len(root.Args) == 0 {
return nil, fmt.Errorf("xpath: count(node-sets) function must with have parameters node-sets") return nil, fmt.Errorf("xpath: count(node-sets) function must with have parameters node-sets")
@ -531,7 +573,7 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if err != nil { if err != nil {
return nil, err return nil, err
} }
qyOutput = &functionQuery{Func: countFunc(argQuery)} qyOutput = &functionQuery{Input: argQuery, Func: countFunc}
case "sum": case "sum":
if len(root.Args) == 0 { if len(root.Args) == 0 {
return nil, fmt.Errorf("xpath: sum(node-sets) function must with have parameters node-sets") return nil, fmt.Errorf("xpath: sum(node-sets) function must with have parameters node-sets")
@ -540,7 +582,7 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if err != nil { if err != nil {
return nil, err return nil, err
} }
qyOutput = &functionQuery{Func: sumFunc(argQuery)} qyOutput = &functionQuery{Input: argQuery, Func: sumFunc}
case "ceiling", "floor", "round": case "ceiling", "floor", "round":
if len(root.Args) == 0 { if len(root.Args) == 0 {
return nil, fmt.Errorf("xpath: ceiling(node-sets) function must with have parameters node-sets") return nil, fmt.Errorf("xpath: ceiling(node-sets) function must with have parameters node-sets")
@ -549,14 +591,16 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if err != nil { if err != nil {
return nil, err return nil, err
} }
f := &functionQuery{Input: argQuery}
switch root.FuncName { switch root.FuncName {
case "ceiling": case "ceiling":
qyOutput = &functionQuery{Func: ceilingFunc(argQuery)} f.Func = ceilingFunc
case "floor": case "floor":
qyOutput = &functionQuery{Func: floorFunc(argQuery)} f.Func = floorFunc
case "round": case "round":
qyOutput = &functionQuery{Func: roundFunc(argQuery)} f.Func = roundFunc
} }
qyOutput = f
case "concat": case "concat":
if len(root.Args) < 2 { if len(root.Args) < 2 {
return nil, fmt.Errorf("xpath: concat() must have at least two arguments") return nil, fmt.Errorf("xpath: concat() must have at least two arguments")
@ -583,7 +627,7 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if len(root.Args) != 2 { if len(root.Args) != 2 {
return nil, fmt.Errorf("xpath: string-join(node-sets, separator) function requires node-set and argument") return nil, fmt.Errorf("xpath: string-join(node-sets, separator) function requires node-set and argument")
} }
input, err := b.processNode(root.Args[0], flagsEnum.None, props) argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -591,10 +635,14 @@ func (b *builder) processFunction(root *functionNode, props *builderProp) (query
if err != nil { if err != nil {
return nil, err return nil, err
} }
qyOutput = &functionQuery{Func: stringJoinFunc(input, arg1)} qyOutput = &functionQuery{Input: argQuery, Func: stringJoinFunc(arg1)}
default: default:
return nil, fmt.Errorf("not yet support this function %s()", root.FuncName) return nil, fmt.Errorf("not yet support this function %s()", root.FuncName)
} }
if funcQuery, ok := qyOutput.(*functionQuery); ok && funcQuery.Input == nil {
funcQuery.Input = b.firstInput
}
return qyOutput, nil return qyOutput, nil
} }

View File

@ -37,83 +37,75 @@ func predicate(q query) func(NodeNavigator) bool {
} }
// positionFunc is a XPath Node Set functions position(). // positionFunc is a XPath Node Set functions position().
func positionFunc() func(query, iterator) interface{} { func positionFunc(q query, t iterator) interface{} {
return func(q query, t iterator) interface{} { var (
var ( count = 1
count = 1 node = t.Current().Copy()
node = t.Current().Copy() )
) test := predicate(q)
test := predicate(q) for node.MoveToPrevious() {
for node.MoveToPrevious() { if test(node) {
if test(node) { count++
count++
}
} }
return float64(count)
} }
return float64(count)
} }
// lastFunc is a XPath Node Set functions last(). // lastFunc is a XPath Node Set functions last().
func lastFunc() func(query, iterator) interface{} { func lastFunc(q query, t iterator) interface{} {
return func(q query, t iterator) interface{} { var (
var ( count = 0
count = 0 node = t.Current().Copy()
node = t.Current().Copy() )
) node.MoveToFirst()
test := predicate(q) test := predicate(q)
node.MoveToFirst() for {
for { if test(node) {
if test(node) { count++
count++ }
} if !node.MoveToNext() {
if !node.MoveToNext() { break
break
}
} }
return float64(count)
} }
return float64(count)
} }
// countFunc is a XPath Node Set functions count(node-set). // countFunc is a XPath Node Set functions count(node-set).
func countFunc(arg query) func(query, iterator) interface{} { func countFunc(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} { var count = 0
var count = 0 q = functionArgs(q)
q := functionArgs(arg) test := predicate(q)
test := predicate(q) switch typ := q.Evaluate(t).(type) {
switch typ := q.Evaluate(t).(type) { case query:
case query: for node := typ.Select(t); node != nil; node = typ.Select(t) {
for node := typ.Select(t); node != nil; node = typ.Select(t) { if test(node) {
if test(node) { count++
count++
}
} }
} }
return float64(count)
} }
return float64(count)
} }
// sumFunc is a XPath Node Set functions sum(node-set). // sumFunc is a XPath Node Set functions sum(node-set).
func sumFunc(arg query) func(query, iterator) interface{} { func sumFunc(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} { var sum float64
var sum float64 switch typ := functionArgs(q).Evaluate(t).(type) {
switch typ := functionArgs(arg).Evaluate(t).(type) { case query:
case query: for node := typ.Select(t); node != nil; node = typ.Select(t) {
for node := typ.Select(t); node != nil; node = typ.Select(t) { if v, err := strconv.ParseFloat(node.Value(), 64); err == nil {
if v, err := strconv.ParseFloat(node.Value(), 64); err == nil { sum += v
sum += v
}
} }
case float64:
sum = typ
case string:
v, err := strconv.ParseFloat(typ, 64)
if err != nil {
panic(errors.New("sum() function argument type must be a node-set or number"))
}
sum = v
} }
return sum case float64:
sum = typ
case string:
v, err := strconv.ParseFloat(typ, 64)
if err != nil {
panic(errors.New("sum() function argument type must be a node-set or number"))
}
sum = v
} }
return sum
} }
func asNumber(t iterator, o interface{}) float64 { func asNumber(t iterator, o interface{}) float64 {
@ -138,36 +130,30 @@ func asNumber(t iterator, o interface{}) float64 {
} }
// ceilingFunc is a XPath Node Set functions ceiling(node-set). // ceilingFunc is a XPath Node Set functions ceiling(node-set).
func ceilingFunc(arg query) func(query, iterator) interface{} { func ceilingFunc(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} { val := asNumber(t, functionArgs(q).Evaluate(t))
val := asNumber(t, functionArgs(arg).Evaluate(t)) // if math.IsNaN(val) {
// if math.IsNaN(val) { // panic(errors.New("ceiling() function argument type must be a valid number"))
// panic(errors.New("ceiling() function argument type must be a valid number")) // }
// } return math.Ceil(val)
return math.Ceil(val)
}
} }
// floorFunc is a XPath Node Set functions floor(node-set). // floorFunc is a XPath Node Set functions floor(node-set).
func floorFunc(arg query) func(query, iterator) interface{} { func floorFunc(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} { val := asNumber(t, functionArgs(q).Evaluate(t))
val := asNumber(t, functionArgs(arg).Evaluate(t)) return math.Floor(val)
return math.Floor(val)
}
} }
// roundFunc is a XPath Node Set functions round(node-set). // roundFunc is a XPath Node Set functions round(node-set).
func roundFunc(arg query) func(query, iterator) interface{} { func roundFunc(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} { val := asNumber(t, functionArgs(q).Evaluate(t))
val := asNumber(t, functionArgs(arg).Evaluate(t)) //return math.Round(val)
//return math.Round(val) return round(val)
return round(val)
}
} }
// nameFunc is a XPath functions name([node-set]). // nameFunc is a XPath functions name([node-set]).
func nameFunc(arg query) func(query, iterator) interface{} { func nameFunc(arg query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
var v NodeNavigator var v NodeNavigator
if arg == nil { if arg == nil {
v = t.Current() v = t.Current()
@ -187,7 +173,7 @@ func nameFunc(arg query) func(query, iterator) interface{} {
// localNameFunc is a XPath functions local-name([node-set]). // localNameFunc is a XPath functions local-name([node-set]).
func localNameFunc(arg query) func(query, iterator) interface{} { func localNameFunc(arg query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
var v NodeNavigator var v NodeNavigator
if arg == nil { if arg == nil {
v = t.Current() v = t.Current()
@ -203,7 +189,7 @@ func localNameFunc(arg query) func(query, iterator) interface{} {
// namespaceFunc is a XPath functions namespace-uri([node-set]). // namespaceFunc is a XPath functions namespace-uri([node-set]).
func namespaceFunc(arg query) func(query, iterator) interface{} { func namespaceFunc(arg query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
var v NodeNavigator var v NodeNavigator
if arg == nil { if arg == nil {
v = t.Current() v = t.Current()
@ -270,32 +256,26 @@ func asString(t iterator, v interface{}) string {
} }
// booleanFunc is a XPath functions boolean([node-set]). // booleanFunc is a XPath functions boolean([node-set]).
func booleanFunc(arg1 query) func(query, iterator) interface{} { func booleanFunc(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} { v := functionArgs(q).Evaluate(t)
v := functionArgs(arg1).Evaluate(t) return asBool(t, v)
return asBool(t, v)
}
} }
// numberFunc is a XPath functions number([node-set]). // numberFunc is a XPath functions number([node-set]).
func numberFunc(arg1 query) func(query, iterator) interface{} { func numberFunc(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} { v := functionArgs(q).Evaluate(t)
v := functionArgs(arg1).Evaluate(t) return asNumber(t, v)
return asNumber(t, v)
}
} }
// stringFunc is a XPath functions string([node-set]). // stringFunc is a XPath functions string([node-set]).
func stringFunc(arg1 query) func(query, iterator) interface{} { func stringFunc(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} { v := functionArgs(q).Evaluate(t)
v := functionArgs(arg1).Evaluate(t) return asString(t, v)
return asString(t, v)
}
} }
// startwithFunc is a XPath functions starts-with(string, string). // startwithFunc is a XPath functions starts-with(string, string).
func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} { func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
var ( var (
m, n string m, n string
ok bool ok bool
@ -322,7 +302,7 @@ func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
// endwithFunc is a XPath functions ends-with(string, string). // endwithFunc is a XPath functions ends-with(string, string).
func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} { func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
var ( var (
m, n string m, n string
ok bool ok bool
@ -349,7 +329,7 @@ func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
// containsFunc is a XPath functions contains(string or @attr, string). // containsFunc is a XPath functions contains(string or @attr, string).
func containsFunc(arg1, arg2 query) func(query, iterator) interface{} { func containsFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
var ( var (
m, n string m, n string
ok bool ok bool
@ -380,7 +360,7 @@ func containsFunc(arg1, arg2 query) func(query, iterator) interface{} {
// Note: does not support https://www.w3.org/TR/xpath-functions-31/#func-matches 3rd optional `flags` argument; if // Note: does not support https://www.w3.org/TR/xpath-functions-31/#func-matches 3rd optional `flags` argument; if
// needed, directly put flags in the regexp pattern, such as `(?i)^pattern$` for `i` flag. // needed, directly put flags in the regexp pattern, such as `(?i)^pattern$` for `i` flag.
func matchesFunc(arg1, arg2 query) func(query, iterator) interface{} { func matchesFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
var s string var s string
switch typ := functionArgs(arg1).Evaluate(t).(type) { switch typ := functionArgs(arg1).Evaluate(t).(type) {
case string: case string:
@ -406,45 +386,43 @@ func matchesFunc(arg1, arg2 query) func(query, iterator) interface{} {
} }
// normalizespaceFunc is XPath functions normalize-space(string?) // normalizespaceFunc is XPath functions normalize-space(string?)
func normalizespaceFunc(arg1 query) func(query, iterator) interface{} { func normalizespaceFunc(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} { var m string
var m string switch typ := functionArgs(q).Evaluate(t).(type) {
switch typ := functionArgs(arg1).Evaluate(t).(type) { case string:
case string: m = typ
m = typ case query:
case query: node := typ.Select(t)
node := typ.Select(t) if node == nil {
if node == nil { return ""
return ""
}
m = node.Value()
} }
var b = builderPool.Get().(stringBuilder) m = node.Value()
b.Grow(len(m))
runeStr := []rune(strings.TrimSpace(m))
l := len(runeStr)
for i := range runeStr {
r := runeStr[i]
isSpace := unicode.IsSpace(r)
if !(isSpace && (i+1 < l && unicode.IsSpace(runeStr[i+1]))) {
if isSpace {
r = ' '
}
b.WriteRune(r)
}
}
result := b.String()
b.Reset()
builderPool.Put(b)
return result
} }
var b = builderPool.Get().(stringBuilder)
b.Grow(len(m))
runeStr := []rune(strings.TrimSpace(m))
l := len(runeStr)
for i := range runeStr {
r := runeStr[i]
isSpace := unicode.IsSpace(r)
if !(isSpace && (i+1 < l && unicode.IsSpace(runeStr[i+1]))) {
if isSpace {
r = ' '
}
b.WriteRune(r)
}
}
result := b.String()
b.Reset()
builderPool.Put(b)
return result
} }
// substringFunc is XPath functions substring function returns a part of a given string. // substringFunc is XPath functions substring function returns a part of a given string.
func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
var m string var m string
switch typ := functionArgs(arg1).Evaluate(t).(type) { switch typ := functionArgs(arg1).Evaluate(t).(type) {
case string: case string:
@ -483,7 +461,7 @@ func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
// substringIndFunc is XPath functions substring-before/substring-after function returns a part of a given string. // substringIndFunc is XPath functions substring-before/substring-after function returns a part of a given string.
func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interface{} { func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
var str string var str string
switch v := functionArgs(arg1).Evaluate(t).(type) { switch v := functionArgs(arg1).Evaluate(t).(type) {
case string: case string:
@ -524,7 +502,7 @@ func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interf
// stringLengthFunc is XPATH string-length( [string] ) function that returns a number // stringLengthFunc is XPATH string-length( [string] ) function that returns a number
// equal to the number of characters in a given string. // equal to the number of characters in a given string.
func stringLengthFunc(arg1 query) func(query, iterator) interface{} { func stringLengthFunc(arg1 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
switch v := functionArgs(arg1).Evaluate(t).(type) { switch v := functionArgs(arg1).Evaluate(t).(type) {
case string: case string:
return float64(len(v)) return float64(len(v))
@ -541,7 +519,7 @@ func stringLengthFunc(arg1 query) func(query, iterator) interface{} {
// translateFunc is XPath functions translate() function returns a replaced string. // translateFunc is XPath functions translate() function returns a replaced string.
func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
str := asString(t, functionArgs(arg1).Evaluate(t)) str := asString(t, functionArgs(arg1).Evaluate(t))
src := asString(t, functionArgs(arg2).Evaluate(t)) src := asString(t, functionArgs(arg2).Evaluate(t))
dst := asString(t, functionArgs(arg3).Evaluate(t)) dst := asString(t, functionArgs(arg3).Evaluate(t))
@ -560,7 +538,7 @@ func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
// replaceFunc is XPath functions replace() function returns a replaced string. // replaceFunc is XPath functions replace() function returns a replaced string.
func replaceFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { func replaceFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
str := asString(t, functionArgs(arg1).Evaluate(t)) str := asString(t, functionArgs(arg1).Evaluate(t))
src := asString(t, functionArgs(arg2).Evaluate(t)) src := asString(t, functionArgs(arg2).Evaluate(t))
dst := asString(t, functionArgs(arg3).Evaluate(t)) dst := asString(t, functionArgs(arg3).Evaluate(t))
@ -570,17 +548,15 @@ func replaceFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
} }
// notFunc is XPATH functions not(expression) function operation. // notFunc is XPATH functions not(expression) function operation.
func notFunc(arg1 query) func(query, iterator) interface{} { func notFunc(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} { switch v := functionArgs(q).Evaluate(t).(type) {
switch v := functionArgs(arg1).Evaluate(t).(type) { case bool:
case bool: return !v
return !v case query:
case query: node := v.Select(t)
node := v.Select(t) return node == nil
return node == nil default:
default: return false
return false
}
} }
} }
@ -588,7 +564,7 @@ func notFunc(arg1 query) func(query, iterator) interface{} {
// strings and returns the resulting string. // strings and returns the resulting string.
// concat( string1 , string2 [, stringn]* ) // concat( string1 , string2 [, stringn]* )
func concatFunc(args ...query) func(query, iterator) interface{} { func concatFunc(args ...query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
b := builderPool.Get().(stringBuilder) b := builderPool.Get().(stringBuilder)
for _, v := range args { for _, v := range args {
v = functionArgs(v) v = functionArgs(v)
@ -640,8 +616,8 @@ func reverseFunc(q query, t iterator) func() NodeNavigator {
} }
// string-join is a XPath Node Set functions string-join(node-set, separator). // string-join is a XPath Node Set functions string-join(node-set, separator).
func stringJoinFunc(q, arg1 query) func(query, iterator) interface{} { func stringJoinFunc(arg1 query) func(query, iterator) interface{} {
return func(_ query, t iterator) interface{} { return func(q query, t iterator) interface{} {
var separator string var separator string
switch v := functionArgs(arg1).Evaluate(t).(type) { switch v := functionArgs(arg1).Evaluate(t).(type) {
case string: case string:
@ -671,9 +647,7 @@ func stringJoinFunc(q, arg1 query) func(query, iterator) interface{} {
} }
// lower-case is XPATH function that converts a string to lower case. // lower-case is XPATH function that converts a string to lower case.
func lowerCaseFunc(arg1 query) func(query, iterator) interface{} { func lowerCaseFunc(q query, t iterator) interface{} {
return func(_ query, t iterator) interface{} { v := functionArgs(q).Evaluate(t)
v := functionArgs(arg1).Evaluate(t) return strings.ToLower(asString(t, v))
return strings.ToLower(asString(t, v))
}
} }

View File

@ -6,7 +6,6 @@ import (
"fmt" "fmt"
"strconv" "strconv"
"unicode" "unicode"
"unicode/utf8"
) )
// A XPath expression token type. // A XPath expression token type.
@ -86,13 +85,12 @@ func newOperandNode(v interface{}) node {
} }
// newAxisNode returns new axis node AxisNode. // newAxisNode returns new axis node AxisNode.
func newAxisNode(axisType string, typeTest NodeType, localName, prefix, prop string, n node, opts ...func(p *axisNode)) node { func newAxisNode(axeTyp, localName, prefix, prop string, n node, opts ...func(p *axisNode)) node {
a := axisNode{ a := axisNode{
nodeType: nodeAxis, nodeType: nodeAxis,
typeTest: typeTest,
LocalName: localName, LocalName: localName,
Prefix: prefix, Prefix: prefix,
AxisType: axisType, AxeType: axeTyp,
Prop: prop, Prop: prop,
Input: n, Input: n,
} }
@ -230,9 +228,8 @@ Loop:
} }
// RelationalExpr ::= AdditiveExpr | RelationalExpr '<' AdditiveExpr | RelationalExpr '>' AdditiveExpr // RelationalExpr ::= AdditiveExpr | RelationalExpr '<' AdditiveExpr | RelationalExpr '>' AdditiveExpr
// // | RelationalExpr '<=' AdditiveExpr
// | RelationalExpr '<=' AdditiveExpr // | RelationalExpr '>=' AdditiveExpr
// | RelationalExpr '>=' AdditiveExpr
func (p *parser) parseRelationalExpr(n node) node { func (p *parser) parseRelationalExpr(n node) node {
opnd := p.parseAdditiveExpr(n) opnd := p.parseAdditiveExpr(n)
Loop: Loop:
@ -277,8 +274,7 @@ Loop:
} }
// MultiplicativeExpr ::= UnaryExpr | MultiplicativeExpr MultiplyOperator(*) UnaryExpr // MultiplicativeExpr ::= UnaryExpr | MultiplicativeExpr MultiplyOperator(*) UnaryExpr
// // | MultiplicativeExpr 'div' UnaryExpr | MultiplicativeExpr 'mod' UnaryExpr
// | MultiplicativeExpr 'div' UnaryExpr | MultiplicativeExpr 'mod' UnaryExpr
func (p *parser) parseMultiplicativeExpr(n node) node { func (p *parser) parseMultiplicativeExpr(n node) node {
opnd := p.parseUnaryExpr(n) opnd := p.parseUnaryExpr(n)
Loop: Loop:
@ -312,7 +308,7 @@ func (p *parser) parseUnaryExpr(n node) node {
return opnd return opnd
} }
// UnionExpr ::= PathExpr | UnionExpr '|' PathExpr // UnionExpr ::= PathExpr | UnionExpr '|' PathExpr
func (p *parser) parseUnionExpr(n node) node { func (p *parser) parseUnionExpr(n node) node {
opnd := p.parsePathExpr(n) opnd := p.parsePathExpr(n)
Loop: Loop:
@ -339,7 +335,7 @@ func (p *parser) parsePathExpr(n node) node {
opnd = p.parseRelativeLocationPath(opnd) opnd = p.parseRelativeLocationPath(opnd)
case itemSlashSlash: case itemSlashSlash:
p.next() p.next()
opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", allNode, "", "", "", opnd)) opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", "", "", "", opnd))
} }
} else { } else {
opnd = p.parseLocationPath(nil) opnd = p.parseLocationPath(nil)
@ -356,7 +352,7 @@ func (p *parser) parseFilterExpr(n node) node {
return opnd return opnd
} }
// Predicate ::= '[' PredicateExpr ']' // Predicate ::= '[' PredicateExpr ']'
func (p *parser) parsePredicate(n node) node { func (p *parser) parsePredicate(n node) node {
p.skipItem(itemLBracket) p.skipItem(itemLBracket)
opnd := p.parseExpression(n) opnd := p.parseExpression(n)
@ -376,7 +372,7 @@ func (p *parser) parseLocationPath(n node) (opnd node) {
case itemSlashSlash: case itemSlashSlash:
p.next() p.next()
opnd = newRootNode("//") opnd = newRootNode("//")
opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", allNode, "", "", "", opnd)) opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", "", "", "", opnd))
default: default:
opnd = p.parseRelativeLocationPath(n) opnd = p.parseRelativeLocationPath(n)
} }
@ -392,7 +388,7 @@ Loop:
switch p.r.typ { switch p.r.typ {
case itemSlashSlash: case itemSlashSlash:
p.next() p.next()
opnd = newAxisNode("descendant-or-self", allNode, "", "", "", opnd) opnd = newAxisNode("descendant-or-self", "", "", "", opnd)
case itemSlash: case itemSlash:
p.next() p.next()
default: default:
@ -404,33 +400,30 @@ Loop:
// Step ::= AxisSpecifier NodeTest Predicate* | AbbreviatedStep // Step ::= AxisSpecifier NodeTest Predicate* | AbbreviatedStep
func (p *parser) parseStep(n node) (opnd node) { func (p *parser) parseStep(n node) (opnd node) {
axeTyp := "child" // default axes value.
if p.r.typ == itemDot || p.r.typ == itemDotDot { if p.r.typ == itemDot || p.r.typ == itemDotDot {
if p.r.typ == itemDot { if p.r.typ == itemDot {
opnd = newAxisNode("self", allNode, "", "", "", n) axeTyp = "self"
} else { } else {
opnd = newAxisNode("parent", allNode, "", "", "", n) axeTyp = "parent"
} }
p.next() p.next()
opnd = newAxisNode(axeTyp, "", "", "", n)
if p.r.typ != itemLBracket { if p.r.typ != itemLBracket {
return opnd return opnd
} }
} else { } else {
axisType := "child" // default axes value.
switch p.r.typ { switch p.r.typ {
case itemAt: case itemAt:
axisType = "attribute"
p.next() p.next()
axeTyp = "attribute"
case itemAxe: case itemAxe:
axisType = p.r.name axeTyp = p.r.name
p.next() p.next()
case itemLParens: case itemLParens:
return p.parseSequence(n) return p.parseSequence(n)
} }
matchType := ElementNode opnd = p.parseNodeTest(n, axeTyp)
if axisType == "attribute" {
matchType = AttributeNode
}
opnd = p.parseNodeTest(n, axisType, matchType)
} }
for p.r.typ == itemLBracket { for p.r.typ == itemLBracket {
opnd = newFilterNode(opnd, p.parsePredicate(opnd)) opnd = newFilterNode(opnd, p.parsePredicate(opnd))
@ -454,8 +447,8 @@ func (p *parser) parseSequence(n node) (opnd node) {
return opnd return opnd
} }
// NodeTest ::= NameTest | nodeType '(' ')' | 'processing-instruction' '(' Literal ')' // NodeTest ::= NameTest | nodeType '(' ')' | 'processing-instruction' '(' Literal ')'
func (p *parser) parseNodeTest(n node, axeTyp string, matchType NodeType) (opnd node) { func (p *parser) parseNodeTest(n node, axeTyp string) (opnd node) {
switch p.r.typ { switch p.r.typ {
case itemName: case itemName:
if p.r.canBeFunc && isNodeType(p.r) { if p.r.canBeFunc && isNodeType(p.r) {
@ -473,19 +466,7 @@ func (p *parser) parseNodeTest(n node, axeTyp string, matchType NodeType) (opnd
p.next() p.next()
} }
p.skipItem(itemRParens) p.skipItem(itemRParens)
switch prop { opnd = newAxisNode(axeTyp, name, "", prop, n)
case "comment":
matchType = CommentNode
case "text":
matchType = TextNode
case "processing-instruction":
case "node":
matchType = allNode
default:
matchType = RootNode
}
opnd = newAxisNode(axeTyp, matchType, name, "", prop, n)
} else { } else {
prefix := p.r.prefix prefix := p.r.prefix
name := p.r.name name := p.r.name
@ -493,7 +474,7 @@ func (p *parser) parseNodeTest(n node, axeTyp string, matchType NodeType) (opnd
if p.r.name == "*" { if p.r.name == "*" {
name = "" name = ""
} }
opnd = newAxisNode(axeTyp, matchType, name, prefix, "", n, func(a *axisNode) { opnd = newAxisNode(axeTyp, name, prefix, "", n, func(a *axisNode) {
if prefix != "" && p.namespaces != nil { if prefix != "" && p.namespaces != nil {
if ns, ok := p.namespaces[prefix]; ok { if ns, ok := p.namespaces[prefix]; ok {
a.hasNamespaceURI = true a.hasNamespaceURI = true
@ -505,7 +486,7 @@ func (p *parser) parseNodeTest(n node, axeTyp string, matchType NodeType) (opnd
}) })
} }
case itemStar: case itemStar:
opnd = newAxisNode(axeTyp, matchType, "", "", "", n) opnd = newAxisNode(axeTyp, "", "", "", n)
p.next() p.next()
default: default:
panic("expression must evaluate to a node-set") panic("expression must evaluate to a node-set")
@ -598,18 +579,17 @@ type axisNode struct {
nodeType nodeType
Input node Input node
Prop string // node-test name.[comment|text|processing-instruction|node] Prop string // node-test name.[comment|text|processing-instruction|node]
AxisType string // name of the axis.[attribute|ancestor|child|....] AxeType string // name of the axes.[attribute|ancestor|child|....]
LocalName string // local part name of node. LocalName string // local part name of node.
Prefix string // prefix name of node. Prefix string // prefix name of node.
namespaceURI string // namespace URI of node namespaceURI string // namespace URI of node
hasNamespaceURI bool // if namespace URI is set (can be "") hasNamespaceURI bool // if namespace URI is set (can be "")
typeTest NodeType
} }
func (a *axisNode) String() string { func (a *axisNode) String() string {
var b bytes.Buffer var b bytes.Buffer
if a.AxisType != "" { if a.AxeType != "" {
b.Write([]byte(a.AxisType + "::")) b.Write([]byte(a.AxeType + "::"))
} }
if a.Prefix != "" { if a.Prefix != "" {
b.Write([]byte(a.Prefix + ":")) b.Write([]byte(a.Prefix + ":"))
@ -692,7 +672,6 @@ type scanner struct {
pos int pos int
curr rune curr rune
currSize int
typ itemType typ itemType
strval string // text value at current pos strval string // text value at current pos
numval float64 // number value at current pos numval float64 // number value at current pos
@ -702,18 +681,10 @@ type scanner struct {
func (s *scanner) nextChar() bool { func (s *scanner) nextChar() bool {
if s.pos >= len(s.text) { if s.pos >= len(s.text) {
s.curr = rune(0) s.curr = rune(0)
s.currSize = 1
return false return false
} }
s.curr = rune(s.text[s.pos])
r, size := rune(s.text[s.pos]), 1 s.pos++
if r >= 0x80 { // handle multi-byte runes
r, size = utf8.DecodeRuneInString(s.text[s.pos:])
}
s.curr = r
s.currSize = size
s.pos += size
return true return true
} }
@ -872,15 +843,12 @@ func (s *scanner) scanString() string {
end = s.curr end = s.curr
) )
s.nextChar() s.nextChar()
i := s.pos - s.currSize i := s.pos - 1
if s.currSize > 1 {
c++
}
for s.curr != end { for s.curr != end {
if !s.nextChar() { if !s.nextChar() {
panic(errors.New("xpath: scanString got unclosed string")) panic(errors.New("xpath: scanString got unclosed string"))
} }
c += s.currSize c++
} }
s.nextChar() s.nextChar()
return s.text[i : i+c] return s.text[i : i+c]
@ -888,18 +856,14 @@ func (s *scanner) scanString() string {
func (s *scanner) scanName() string { func (s *scanner) scanName() string {
var ( var (
c = s.currSize - 1 c int
i = s.pos - s.currSize i = s.pos - 1
) )
// Detect current rune size
for isName(s.curr) { for isName(s.curr) {
c++
if !s.nextChar() { if !s.nextChar() {
c += s.currSize
break break
} }
c += s.currSize
} }
return s.text[i : i+c] return s.text[i : i+c]
} }

View File

@ -850,9 +850,6 @@ func (f *functionQuery) Evaluate(t iterator) interface{} {
} }
func (f *functionQuery) Clone() query { func (f *functionQuery) Clone() query {
if f.Input == nil {
return &functionQuery{Func: f.Func}
}
return &functionQuery{Input: f.Input.Clone(), Func: f.Func} return &functionQuery{Input: f.Input.Clone(), Func: f.Func}
} }
@ -1190,18 +1187,18 @@ func (u *unionQuery) Properties() queryProp {
return queryProps.Merge return queryProps.Merge
} }
type lastFuncQuery struct { type lastQuery struct {
buffer []NodeNavigator buffer []NodeNavigator
counted bool counted bool
Input query Input query
} }
func (q *lastFuncQuery) Select(t iterator) NodeNavigator { func (q *lastQuery) Select(t iterator) NodeNavigator {
return nil return nil
} }
func (q *lastFuncQuery) Evaluate(t iterator) interface{} { func (q *lastQuery) Evaluate(t iterator) interface{} {
if !q.counted { if !q.counted {
for { for {
node := q.Input.Select(t) node := q.Input.Select(t)
@ -1215,15 +1212,15 @@ func (q *lastFuncQuery) Evaluate(t iterator) interface{} {
return float64(len(q.buffer)) return float64(len(q.buffer))
} }
func (q *lastFuncQuery) Clone() query { func (q *lastQuery) Clone() query {
return &lastFuncQuery{Input: q.Input.Clone()} return &lastQuery{Input: q.Input.Clone()}
} }
func (q *lastFuncQuery) ValueType() resultType { func (q *lastQuery) ValueType() resultType {
return xpathResultType.Number return xpathResultType.Number
} }
func (q *lastFuncQuery) Properties() queryProp { func (q *lastQuery) Properties() queryProp {
return queryProps.Merge return queryProps.Merge
} }

File diff suppressed because it is too large Load Diff

View File

@ -147,9 +147,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()]. ** [sqlite_version()] and [sqlite_source_id()].
*/ */
#define SQLITE_VERSION "3.46.1" #define SQLITE_VERSION "3.45.1"
#define SQLITE_VERSION_NUMBER 3046001 #define SQLITE_VERSION_NUMBER 3045001
#define SQLITE_SOURCE_ID "2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33" #define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a"
/* /*
** CAPI3REF: Run-Time Library Version Numbers ** CAPI3REF: Run-Time Library Version Numbers
@ -421,8 +421,6 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**);
** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running. ** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running.
** <li> The application must not modify the SQL statement text passed into ** <li> The application must not modify the SQL statement text passed into
** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running. ** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running.
** <li> The application must not dereference the arrays or string pointers
** passed as the 3rd and 4th callback parameters after it returns.
** </ul> ** </ul>
*/ */
SQLITE_API int sqlite3_exec( SQLITE_API int sqlite3_exec(
@ -765,11 +763,11 @@ struct sqlite3_file {
** </ul> ** </ul>
** xLock() upgrades the database file lock. In other words, xLock() moves the ** xLock() upgrades the database file lock. In other words, xLock() moves the
** database file lock in the direction NONE toward EXCLUSIVE. The argument to ** database file lock in the direction NONE toward EXCLUSIVE. The argument to
** xLock() is always one of SHARED, RESERVED, PENDING, or EXCLUSIVE, never ** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never
** SQLITE_LOCK_NONE. If the database file lock is already at or above the ** SQLITE_LOCK_NONE. If the database file lock is already at or above the
** requested lock, then the call to xLock() is a no-op. ** requested lock, then the call to xLock() is a no-op.
** xUnlock() downgrades the database file lock to either SHARED or NONE. ** xUnlock() downgrades the database file lock to either SHARED or NONE.
** If the lock is already at or below the requested lock state, then the call * If the lock is already at or below the requested lock state, then the call
** to xUnlock() is a no-op. ** to xUnlock() is a no-op.
** The xCheckReservedLock() method checks whether any database connection, ** The xCheckReservedLock() method checks whether any database connection,
** either in this process or in some other process, is holding a RESERVED, ** either in this process or in some other process, is holding a RESERVED,
@ -2144,22 +2142,6 @@ struct sqlite3_mem_methods {
** configuration setting is never used, then the default maximum is determined ** configuration setting is never used, then the default maximum is determined
** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that ** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that
** compile-time option is not set, then the default maximum is 1073741824. ** compile-time option is not set, then the default maximum is 1073741824.
**
** [[SQLITE_CONFIG_ROWID_IN_VIEW]]
** <dt>SQLITE_CONFIG_ROWID_IN_VIEW
** <dd>The SQLITE_CONFIG_ROWID_IN_VIEW option enables or disables the ability
** for VIEWs to have a ROWID. The capability can only be enabled if SQLite is
** compiled with -DSQLITE_ALLOW_ROWID_IN_VIEW, in which case the capability
** defaults to on. This configuration option queries the current setting or
** changes the setting to off or on. The argument is a pointer to an integer.
** If that integer initially holds a value of 1, then the ability for VIEWs to
** have ROWIDs is activated. If the integer initially holds zero, then the
** ability is deactivated. Any other initial value for the integer leaves the
** setting unchanged. After changes, if any, the integer is written with
** a 1 or 0, if the ability for VIEWs to have ROWIDs is on or off. If SQLite
** is compiled without -DSQLITE_ALLOW_ROWID_IN_VIEW (which is the usual and
** recommended case) then the integer is always filled with zero, regardless
** if its initial value.
** </dl> ** </dl>
*/ */
#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ #define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */
@ -2191,7 +2173,6 @@ struct sqlite3_mem_methods {
#define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */ #define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */
#define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */ #define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */
#define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */ #define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */
#define SQLITE_CONFIG_ROWID_IN_VIEW 30 /* int* */
/* /*
** CAPI3REF: Database Connection Configuration Options ** CAPI3REF: Database Connection Configuration Options
@ -3306,8 +3287,8 @@ SQLITE_API int sqlite3_set_authorizer(
#define SQLITE_RECURSIVE 33 /* NULL NULL */ #define SQLITE_RECURSIVE 33 /* NULL NULL */
/* /*
** CAPI3REF: Deprecated Tracing And Profiling Functions ** CAPI3REF: Tracing And Profiling Functions
** DEPRECATED ** METHOD: sqlite3
** **
** These routines are deprecated. Use the [sqlite3_trace_v2()] interface ** These routines are deprecated. Use the [sqlite3_trace_v2()] interface
** instead of the routines described here. ** instead of the routines described here.
@ -6888,12 +6869,6 @@ SQLITE_API int sqlite3_autovacuum_pages(
** The exceptions defined in this paragraph might change in a future ** The exceptions defined in this paragraph might change in a future
** release of SQLite. ** release of SQLite.
** **
** Whether the update hook is invoked before or after the
** corresponding change is currently unspecified and may differ
** depending on the type of change. Do not rely on the order of the
** hook call with regards to the final result of the operation which
** triggers the hook.
**
** The update hook implementation must not do anything that will modify ** The update hook implementation must not do anything that will modify
** the database connection that invoked the update hook. Any actions ** the database connection that invoked the update hook. Any actions
** to modify the database connection must be deferred until after the ** to modify the database connection must be deferred until after the
@ -8364,7 +8339,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
** The sqlite3_keyword_count() interface returns the number of distinct ** The sqlite3_keyword_count() interface returns the number of distinct
** keywords understood by SQLite. ** keywords understood by SQLite.
** **
** The sqlite3_keyword_name(N,Z,L) interface finds the 0-based N-th keyword and ** The sqlite3_keyword_name(N,Z,L) interface finds the N-th keyword and
** makes *Z point to that keyword expressed as UTF8 and writes the number ** makes *Z point to that keyword expressed as UTF8 and writes the number
** of bytes in the keyword into *L. The string that *Z points to is not ** of bytes in the keyword into *L. The string that *Z points to is not
** zero-terminated. The sqlite3_keyword_name(N,Z,L) routine returns ** zero-terminated. The sqlite3_keyword_name(N,Z,L) routine returns
@ -9943,45 +9918,24 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int);
** <li value="2"><p> ** <li value="2"><p>
** ^(If the sqlite3_vtab_distinct() interface returns 2, that means ** ^(If the sqlite3_vtab_distinct() interface returns 2, that means
** that the query planner does not need the rows returned in any particular ** that the query planner does not need the rows returned in any particular
** order, as long as rows with the same values in all columns identified ** order, as long as rows with the same values in all "aOrderBy" columns
** by "aOrderBy" are adjacent.)^ ^(Furthermore, when two or more rows ** are adjacent.)^ ^(Furthermore, only a single row for each particular
** contain the same values for all columns identified by "colUsed", all but ** combination of values in the columns identified by the "aOrderBy" field
** one such row may optionally be omitted from the result.)^ ** needs to be returned.)^ ^It is always ok for two or more rows with the same
** The virtual table is not required to omit rows that are duplicates ** values in all "aOrderBy" columns to be returned, as long as all such rows
** over the "colUsed" columns, but if the virtual table can do that without ** are adjacent. ^The virtual table may, if it chooses, omit extra rows
** too much extra effort, it could potentially help the query to run faster. ** that have the same value for all columns identified by "aOrderBy".
** ^However omitting the extra rows is optional.
** This mode is used for a DISTINCT query. ** This mode is used for a DISTINCT query.
** <li value="3"><p> ** <li value="3"><p>
** ^(If the sqlite3_vtab_distinct() interface returns 3, that means the ** ^(If the sqlite3_vtab_distinct() interface returns 3, that means
** virtual table must return rows in the order defined by "aOrderBy" as ** that the query planner needs only distinct rows but it does need the
** if the sqlite3_vtab_distinct() interface had returned 0. However if ** rows to be sorted.)^ ^The virtual table implementation is free to omit
** two or more rows in the result have the same values for all columns ** rows that are identical in all aOrderBy columns, if it wants to, but
** identified by "colUsed", then all but one such row may optionally be ** it is not required to omit any rows. This mode is used for queries
** omitted.)^ Like when the return value is 2, the virtual table
** is not required to omit rows that are duplicates over the "colUsed"
** columns, but if the virtual table can do that without
** too much extra effort, it could potentially help the query to run faster.
** This mode is used for queries
** that have both DISTINCT and ORDER BY clauses. ** that have both DISTINCT and ORDER BY clauses.
** </ol> ** </ol>
** **
** <p>The following table summarizes the conditions under which the
** virtual table is allowed to set the "orderByConsumed" flag based on
** the value returned by sqlite3_vtab_distinct(). This table is a
** restatement of the previous four paragraphs:
**
** <table border=1 cellspacing=0 cellpadding=10 width="90%">
** <tr>
** <td valign="top">sqlite3_vtab_distinct() return value
** <td valign="top">Rows are returned in aOrderBy order
** <td valign="top">Rows with the same value in all aOrderBy columns are adjacent
** <td valign="top">Duplicates over all colUsed columns may be omitted
** <tr><td>0<td>yes<td>yes<td>no
** <tr><td>1<td>no<td>yes<td>no
** <tr><td>2<td>no<td>yes<td>yes
** <tr><td>3<td>yes<td>yes<td>yes
** </table>
**
** ^For the purposes of comparing virtual table output values to see if the ** ^For the purposes of comparing virtual table output values to see if the
** values are same value for sorting purposes, two NULL values are considered ** values are same value for sorting purposes, two NULL values are considered
** to be the same. In other words, the comparison operator is "IS" ** to be the same. In other words, the comparison operator is "IS"
@ -12026,30 +11980,6 @@ SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const c
*/ */
SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);
/*
** CAPI3REF: Add A Single Change To A Changegroup
** METHOD: sqlite3_changegroup
**
** This function adds the single change currently indicated by the iterator
** passed as the second argument to the changegroup object. The rules for
** adding the change are just as described for [sqlite3changegroup_add()].
**
** If the change is successfully added to the changegroup, SQLITE_OK is
** returned. Otherwise, an SQLite error code is returned.
**
** The iterator must point to a valid entry when this function is called.
** If it does not, SQLITE_ERROR is returned and no change is added to the
** changegroup. Additionally, the iterator must not have been opened with
** the SQLITE_CHANGESETAPPLY_INVERT flag. In this case SQLITE_ERROR is also
** returned.
*/
SQLITE_API int sqlite3changegroup_add_change(
sqlite3_changegroup*,
sqlite3_changeset_iter*
);
/* /*
** CAPI3REF: Obtain A Composite Changeset From A Changegroup ** CAPI3REF: Obtain A Composite Changeset From A Changegroup
** METHOD: sqlite3_changegroup ** METHOD: sqlite3_changegroup
@ -12854,8 +12784,8 @@ struct Fts5PhraseIter {
** EXTENSION API FUNCTIONS ** EXTENSION API FUNCTIONS
** **
** xUserData(pFts): ** xUserData(pFts):
** Return a copy of the pUserData pointer passed to the xCreateFunction() ** Return a copy of the context pointer the extension function was
** API when the extension function was registered. ** registered with.
** **
** xColumnTotalSize(pFts, iCol, pnToken): ** xColumnTotalSize(pFts, iCol, pnToken):
** If parameter iCol is less than zero, set output variable *pnToken ** If parameter iCol is less than zero, set output variable *pnToken

View File

@ -1679,7 +1679,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
} }
} }
// Foreign Keys // Forgein Keys
if foreignKeys > -1 { if foreignKeys > -1 {
if err := exec(fmt.Sprintf("PRAGMA foreign_keys = %d;", foreignKeys)); err != nil { if err := exec(fmt.Sprintf("PRAGMA foreign_keys = %d;", foreignKeys)); err != nil {
C.sqlite3_close_v2(db) C.sqlite3_close_v2(db)

View File

@ -18,6 +18,5 @@ package sqlite3
#cgo openbsd LDFLAGS: -lsqlite3 #cgo openbsd LDFLAGS: -lsqlite3
#cgo solaris LDFLAGS: -lsqlite3 #cgo solaris LDFLAGS: -lsqlite3
#cgo windows LDFLAGS: -lsqlite3 #cgo windows LDFLAGS: -lsqlite3
#cgo zos LDFLAGS: -lsqlite3
*/ */
import "C" import "C"

View File

@ -86,7 +86,7 @@ var (
// combination is incorrect or unknown. // combination is incorrect or unknown.
// //
// If the SQLITE_USER table is not present in the database file, then // If the SQLITE_USER table is not present in the database file, then
// this interface is a harmless no-op returning SQLITE_OK. // this interface is a harmless no-op returnning SQLITE_OK.
func (c *SQLiteConn) Authenticate(username, password string) error { func (c *SQLiteConn) Authenticate(username, password string) error {
rv := c.authenticate(username, password) rv := c.authenticate(username, password)
switch rv { switch rv {

View File

@ -1,122 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"math"
"net/http"
"time"
)
// http2Config is a package-internal version of net/http.HTTP2Config.
//
// http.HTTP2Config was added in Go 1.24.
// When running with a version of net/http that includes HTTP2Config,
// we merge the configuration with the fields in Transport or Server
// to produce an http2Config.
//
// Zero valued fields in http2Config are interpreted as in the
// net/http.HTTPConfig documentation.
//
// Precedence order for reconciling configurations is:
//
// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero.
// - Otherwise use the http2.{Server.Transport} value.
// - If the resulting value is zero or out of range, use a default.
type http2Config struct {
MaxConcurrentStreams uint32
MaxDecoderHeaderTableSize uint32
MaxEncoderHeaderTableSize uint32
MaxReadFrameSize uint32
MaxUploadBufferPerConnection int32
MaxUploadBufferPerStream int32
SendPingTimeout time.Duration
PingTimeout time.Duration
WriteByteTimeout time.Duration
PermitProhibitedCipherSuites bool
CountError func(errType string)
}
// configFromServer merges configuration settings from
// net/http.Server.HTTP2Config and http2.Server.
func configFromServer(h1 *http.Server, h2 *Server) http2Config {
conf := http2Config{
MaxConcurrentStreams: h2.MaxConcurrentStreams,
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
MaxReadFrameSize: h2.MaxReadFrameSize,
MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection,
MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream,
SendPingTimeout: h2.ReadIdleTimeout,
PingTimeout: h2.PingTimeout,
WriteByteTimeout: h2.WriteByteTimeout,
PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
CountError: h2.CountError,
}
fillNetHTTPServerConfig(&conf, h1)
setConfigDefaults(&conf, true)
return conf
}
// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
// (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
MaxReadFrameSize: h2.MaxReadFrameSize,
SendPingTimeout: h2.ReadIdleTimeout,
PingTimeout: h2.PingTimeout,
WriteByteTimeout: h2.WriteByteTimeout,
}
// Unlike most config fields, where out-of-range values revert to the default,
// Transport.MaxReadFrameSize clips.
if conf.MaxReadFrameSize < minMaxFrameSize {
conf.MaxReadFrameSize = minMaxFrameSize
} else if conf.MaxReadFrameSize > maxFrameSize {
conf.MaxReadFrameSize = maxFrameSize
}
if h2.t1 != nil {
fillNetHTTPTransportConfig(&conf, h2.t1)
}
setConfigDefaults(&conf, false)
return conf
}
func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) {
if *v < minval || *v > maxval {
*v = defval
}
}
func setConfigDefaults(conf *http2Config, server bool) {
setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams)
setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
if server {
setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20)
} else {
setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow)
}
if server {
setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20)
} else {
setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow)
}
setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize)
setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second)
}
// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header
// to an HTTP/2 MAX_HEADER_LIST_SIZE value.
func adjustHTTP1MaxHeaderSize(n int64) int64 {
// http2's count is in a slightly different unit and includes 32 bytes per pair.
// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
const perFieldOverhead = 32 // per http2 spec
const typicalHeaders = 10 // conservative
return n + typicalHeaders*perFieldOverhead
}

View File

@ -1,61 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.24
package http2
import "net/http"
// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
fillNetHTTPConfig(conf, srv.HTTP2)
}
// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
fillNetHTTPConfig(conf, tr.HTTP2)
}
func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
if h2 == nil {
return
}
if h2.MaxConcurrentStreams != 0 {
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
}
if h2.MaxEncoderHeaderTableSize != 0 {
conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
}
if h2.MaxDecoderHeaderTableSize != 0 {
conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
}
if h2.MaxConcurrentStreams != 0 {
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
}
if h2.MaxReadFrameSize != 0 {
conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
}
if h2.MaxReceiveBufferPerConnection != 0 {
conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
}
if h2.MaxReceiveBufferPerStream != 0 {
conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
}
if h2.SendPingTimeout != 0 {
conf.SendPingTimeout = h2.SendPingTimeout
}
if h2.PingTimeout != 0 {
conf.PingTimeout = h2.PingTimeout
}
if h2.WriteByteTimeout != 0 {
conf.WriteByteTimeout = h2.WriteByteTimeout
}
if h2.PermitProhibitedCipherSuites {
conf.PermitProhibitedCipherSuites = true
}
if h2.CountError != nil {
conf.CountError = h2.CountError
}
}

View File

@ -1,16 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.24
package http2
import "net/http"
// Pre-Go 1.24 fallback.
// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}

View File

@ -19,9 +19,8 @@ import (
"bufio" "bufio"
"context" "context"
"crypto/tls" "crypto/tls"
"errors"
"fmt" "fmt"
"net" "io"
"net/http" "net/http"
"os" "os"
"sort" "sort"
@ -238,19 +237,13 @@ func (cw closeWaiter) Wait() {
// Its buffered writer is lazily allocated as needed, to minimize // Its buffered writer is lazily allocated as needed, to minimize
// idle memory usage with many connections. // idle memory usage with many connections.
type bufferedWriter struct { type bufferedWriter struct {
_ incomparable _ incomparable
group synctestGroupInterface // immutable w io.Writer // immutable
conn net.Conn // immutable bw *bufio.Writer // non-nil when data is buffered
bw *bufio.Writer // non-nil when data is buffered
byteTimeout time.Duration // immutable, WriteByteTimeout
} }
func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { func newBufferedWriter(w io.Writer) *bufferedWriter {
return &bufferedWriter{ return &bufferedWriter{w: w}
group: group,
conn: conn,
byteTimeout: timeout,
}
} }
// bufWriterPoolBufferSize is the size of bufio.Writer's // bufWriterPoolBufferSize is the size of bufio.Writer's
@ -277,7 +270,7 @@ func (w *bufferedWriter) Available() int {
func (w *bufferedWriter) Write(p []byte) (n int, err error) { func (w *bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil { if w.bw == nil {
bw := bufWriterPool.Get().(*bufio.Writer) bw := bufWriterPool.Get().(*bufio.Writer)
bw.Reset((*bufferedWriterTimeoutWriter)(w)) bw.Reset(w.w)
w.bw = bw w.bw = bw
} }
return w.bw.Write(p) return w.bw.Write(p)
@ -295,38 +288,6 @@ func (w *bufferedWriter) Flush() error {
return err return err
} }
type bufferedWriterTimeoutWriter bufferedWriter
func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
}
// writeWithByteTimeout writes to conn.
// If more than timeout passes without any bytes being written to the connection,
// the write fails.
func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
if timeout <= 0 {
return conn.Write(p)
}
for {
var now time.Time
if group == nil {
now = time.Now()
} else {
now = group.Now()
}
conn.SetWriteDeadline(now.Add(timeout))
nn, err := conn.Write(p[n:])
n += nn
if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
// Either we finished the write, made no progress, or hit the deadline.
// Whichever it is, we're done now.
conn.SetWriteDeadline(time.Time{})
return n, err
}
}
}
func mustUint31(v int32) uint32 { func mustUint31(v int32) uint32 {
if v < 0 || v > 2147483647 { if v < 0 || v > 2147483647 {
panic("out of range") panic("out of range")

View File

@ -29,7 +29,6 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"context" "context"
"crypto/rand"
"crypto/tls" "crypto/tls"
"errors" "errors"
"fmt" "fmt"
@ -53,14 +52,10 @@ import (
) )
const ( const (
prefaceTimeout = 10 * time.Second prefaceTimeout = 10 * time.Second
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
handlerChunkWriteSize = 4 << 10 handlerChunkWriteSize = 4 << 10
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
// maxQueuedControlFrames is the maximum number of control frames like
// SETTINGS, PING and RST_STREAM that will be queued for writing before
// the connection is closed to prevent memory exhaustion attacks.
maxQueuedControlFrames = 10000 maxQueuedControlFrames = 10000
) )
@ -132,22 +127,6 @@ type Server struct {
// If zero or negative, there is no timeout. // If zero or negative, there is no timeout.
IdleTimeout time.Duration IdleTimeout time.Duration
// ReadIdleTimeout is the timeout after which a health check using a ping
// frame will be carried out if no frame is received on the connection.
// If zero, no health check is performed.
ReadIdleTimeout time.Duration
// PingTimeout is the timeout after which the connection will be closed
// if a response to a ping is not received.
// If zero, a default of 15 seconds is used.
PingTimeout time.Duration
// WriteByteTimeout is the timeout after which a connection will be
// closed if no data can be written to it. The timeout begins when data is
// available to write, and is extended whenever any bytes are written.
// If zero or negative, there is no timeout.
WriteByteTimeout time.Duration
// MaxUploadBufferPerConnection is the size of the initial flow // MaxUploadBufferPerConnection is the size of the initial flow
// control window for each connections. The HTTP/2 spec does not // control window for each connections. The HTTP/2 spec does not
// allow this to be smaller than 65535 or larger than 2^32-1. // allow this to be smaller than 65535 or larger than 2^32-1.
@ -210,6 +189,57 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer {
return timeTimer{time.AfterFunc(d, f)} return timeTimer{time.AfterFunc(d, f)}
} }
func (s *Server) initialConnRecvWindowSize() int32 {
if s.MaxUploadBufferPerConnection >= initialWindowSize {
return s.MaxUploadBufferPerConnection
}
return 1 << 20
}
func (s *Server) initialStreamRecvWindowSize() int32 {
if s.MaxUploadBufferPerStream > 0 {
return s.MaxUploadBufferPerStream
}
return 1 << 20
}
func (s *Server) maxReadFrameSize() uint32 {
if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
return v
}
return defaultMaxReadFrameSize
}
func (s *Server) maxConcurrentStreams() uint32 {
if v := s.MaxConcurrentStreams; v > 0 {
return v
}
return defaultMaxStreams
}
func (s *Server) maxDecoderHeaderTableSize() uint32 {
if v := s.MaxDecoderHeaderTableSize; v > 0 {
return v
}
return initialHeaderTableSize
}
func (s *Server) maxEncoderHeaderTableSize() uint32 {
if v := s.MaxEncoderHeaderTableSize; v > 0 {
return v
}
return initialHeaderTableSize
}
// maxQueuedControlFrames is the maximum number of control frames like
// SETTINGS, PING and RST_STREAM that will be queued for writing before
// the connection is closed to prevent memory exhaustion attacks.
func (s *Server) maxQueuedControlFrames() int {
// TODO: if anybody asks, add a Server field, and remember to define the
// behavior of negative values.
return maxQueuedControlFrames
}
type serverInternalState struct { type serverInternalState struct {
mu sync.Mutex mu sync.Mutex
activeConns map[*serverConn]struct{} activeConns map[*serverConn]struct{}
@ -410,15 +440,13 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
baseCtx, cancel := serverConnBaseContext(c, opts) baseCtx, cancel := serverConnBaseContext(c, opts)
defer cancel() defer cancel()
http1srv := opts.baseConfig()
conf := configFromServer(http1srv, s)
sc := &serverConn{ sc := &serverConn{
srv: s, srv: s,
hs: http1srv, hs: opts.baseConfig(),
conn: c, conn: c,
baseCtx: baseCtx, baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(), remoteAddrStr: c.RemoteAddr().String(),
bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), bw: newBufferedWriter(c),
handler: opts.handler(), handler: opts.handler(),
streams: make(map[uint32]*stream), streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult), readFrameCh: make(chan readFrameResult),
@ -428,12 +456,9 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}), doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
advMaxStreams: conf.MaxConcurrentStreams, advMaxStreams: s.maxConcurrentStreams(),
initialStreamSendWindowSize: initialWindowSize, initialStreamSendWindowSize: initialWindowSize,
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxFrameSize: initialMaxFrameSize, maxFrameSize: initialMaxFrameSize,
pingTimeout: conf.PingTimeout,
countErrorFunc: conf.CountError,
serveG: newGoroutineLock(), serveG: newGoroutineLock(),
pushEnabled: true, pushEnabled: true,
sawClientPreface: opts.SawClientPreface, sawClientPreface: opts.SawClientPreface,
@ -466,15 +491,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
sc.flow.add(initialWindowSize) sc.flow.add(initialWindowSize)
sc.inflow.init(initialWindowSize) sc.inflow.init(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
fr := NewFramer(sc.bw, c) fr := NewFramer(sc.bw, c)
if conf.CountError != nil { if s.CountError != nil {
fr.countError = conf.CountError fr.countError = s.CountError
} }
fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
fr.MaxHeaderListSize = sc.maxHeaderListSize() fr.MaxHeaderListSize = sc.maxHeaderListSize()
fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) fr.SetMaxReadFrameSize(s.maxReadFrameSize())
sc.framer = fr sc.framer = fr
if tc, ok := c.(connectionStater); ok { if tc, ok := c.(connectionStater); ok {
@ -507,7 +532,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
// So for now, do nothing here again. // So for now, do nothing here again.
} }
if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
// "Endpoints MAY choose to generate a connection error // "Endpoints MAY choose to generate a connection error
// (Section 5.4.1) of type INADEQUATE_SECURITY if one of // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
// the prohibited cipher suites are negotiated." // the prohibited cipher suites are negotiated."
@ -544,7 +569,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
opts.UpgradeRequest = nil opts.UpgradeRequest = nil
} }
sc.serve(conf) sc.serve()
} }
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
@ -584,7 +609,6 @@ type serverConn struct {
tlsState *tls.ConnectionState // shared by all handlers, like net/http tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string remoteAddrStr string
writeSched WriteScheduler writeSched WriteScheduler
countErrorFunc func(errType string)
// Everything following is owned by the serve loop; use serveG.check(): // Everything following is owned by the serve loop; use serveG.check():
serveG goroutineLock // used to verify funcs are on serve() serveG goroutineLock // used to verify funcs are on serve()
@ -604,7 +628,6 @@ type serverConn struct {
streams map[uint32]*stream streams map[uint32]*stream
unstartedHandlers []unstartedHandler unstartedHandlers []unstartedHandler
initialStreamSendWindowSize int32 initialStreamSendWindowSize int32
initialStreamRecvWindowSize int32
maxFrameSize int32 maxFrameSize int32
peerMaxHeaderListSize uint32 // zero means unknown (default) peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
@ -615,14 +638,9 @@ type serverConn struct {
inGoAway bool // we've started to or sent GOAWAY inGoAway bool // we've started to or sent GOAWAY
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write needToSendGoAway bool // we need to schedule a GOAWAY frame write
pingSent bool
sentPingData [8]byte
goAwayCode ErrCode goAwayCode ErrCode
shutdownTimer timer // nil until used shutdownTimer timer // nil until used
idleTimer timer // nil if unused idleTimer timer // nil if unused
readIdleTimeout time.Duration
pingTimeout time.Duration
readIdleTimer timer // nil if unused
// Owned by the writeFrameAsync goroutine: // Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer headerWriteBuf bytes.Buffer
@ -637,7 +655,11 @@ func (sc *serverConn) maxHeaderListSize() uint32 {
if n <= 0 { if n <= 0 {
n = http.DefaultMaxHeaderBytes n = http.DefaultMaxHeaderBytes
} }
return uint32(adjustHTTP1MaxHeaderSize(int64(n))) // http2's count is in a slightly different unit and includes 32 bytes per pair.
// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
const perFieldOverhead = 32 // per http2 spec
const typicalHeaders = 10 // conservative
return uint32(n + typicalHeaders*perFieldOverhead)
} }
func (sc *serverConn) curOpenStreams() uint32 { func (sc *serverConn) curOpenStreams() uint32 {
@ -901,7 +923,7 @@ func (sc *serverConn) notePanic() {
} }
} }
func (sc *serverConn) serve(conf http2Config) { func (sc *serverConn) serve() {
sc.serveG.check() sc.serveG.check()
defer sc.notePanic() defer sc.notePanic()
defer sc.conn.Close() defer sc.conn.Close()
@ -915,18 +937,18 @@ func (sc *serverConn) serve(conf http2Config) {
sc.writeFrame(FrameWriteRequest{ sc.writeFrame(FrameWriteRequest{
write: writeSettings{ write: writeSettings{
{SettingMaxFrameSize, conf.MaxReadFrameSize}, {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
{SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxConcurrentStreams, sc.advMaxStreams},
{SettingMaxHeaderListSize, sc.maxHeaderListSize()}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
{SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
{SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
}, },
}) })
sc.unackedSettings++ sc.unackedSettings++
// Each connection starts with initialWindowSize inflow tokens. // Each connection starts with initialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens. // If a higher value is configured, we add more tokens.
if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff)) sc.sendWindowUpdate(nil, int(diff))
} }
@ -946,18 +968,11 @@ func (sc *serverConn) serve(conf http2Config) {
defer sc.idleTimer.Stop() defer sc.idleTimer.Stop()
} }
if conf.SendPingTimeout > 0 {
sc.readIdleTimeout = conf.SendPingTimeout
sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
defer sc.readIdleTimer.Stop()
}
go sc.readFrames() // closed by defer sc.conn.Close above go sc.readFrames() // closed by defer sc.conn.Close above
settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop() defer settingsTimer.Stop()
lastFrameTime := sc.srv.now()
loopNum := 0 loopNum := 0
for { for {
loopNum++ loopNum++
@ -971,7 +986,6 @@ func (sc *serverConn) serve(conf http2Config) {
case res := <-sc.wroteFrameCh: case res := <-sc.wroteFrameCh:
sc.wroteFrame(res) sc.wroteFrame(res)
case res := <-sc.readFrameCh: case res := <-sc.readFrameCh:
lastFrameTime = sc.srv.now()
// Process any written frames before reading new frames from the client since a // Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started. // written frame could have triggered a new stream to be started.
if sc.writingFrameAsync { if sc.writingFrameAsync {
@ -1003,8 +1017,6 @@ func (sc *serverConn) serve(conf http2Config) {
case idleTimerMsg: case idleTimerMsg:
sc.vlogf("connection is idle") sc.vlogf("connection is idle")
sc.goAway(ErrCodeNo) sc.goAway(ErrCodeNo)
case readIdleTimerMsg:
sc.handlePingTimer(lastFrameTime)
case shutdownTimerMsg: case shutdownTimerMsg:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return return
@ -1027,7 +1039,7 @@ func (sc *serverConn) serve(conf http2Config) {
// If the peer is causing us to generate a lot of control frames, // If the peer is causing us to generate a lot of control frames,
// but not reading them from us, assume they are trying to make us // but not reading them from us, assume they are trying to make us
// run out of memory. // run out of memory.
if sc.queuedControlFrames > maxQueuedControlFrames { if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
sc.vlogf("http2: too many control frames in send queue, closing connection") sc.vlogf("http2: too many control frames in send queue, closing connection")
return return
} }
@ -1043,39 +1055,12 @@ func (sc *serverConn) serve(conf http2Config) {
} }
} }
func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
if sc.pingSent {
sc.vlogf("timeout waiting for PING response")
sc.conn.Close()
return
}
pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
now := sc.srv.now()
if pingAt.After(now) {
// We received frames since arming the ping timer.
// Reset it for the next possible timeout.
sc.readIdleTimer.Reset(pingAt.Sub(now))
return
}
sc.pingSent = true
// Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does
// is we send a PING frame containing 0s.
_, _ = rand.Read(sc.sentPingData[:])
sc.writeFrame(FrameWriteRequest{
write: &writePing{data: sc.sentPingData},
})
sc.readIdleTimer.Reset(sc.pingTimeout)
}
type serverMessage int type serverMessage int
// Message values sent to serveMsgCh. // Message values sent to serveMsgCh.
var ( var (
settingsTimerMsg = new(serverMessage) settingsTimerMsg = new(serverMessage)
idleTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage)
readIdleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage)
handlerDoneMsg = new(serverMessage) handlerDoneMsg = new(serverMessage)
@ -1083,7 +1068,6 @@ var (
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) }
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
func (sc *serverConn) sendServeMsg(msg interface{}) { func (sc *serverConn) sendServeMsg(msg interface{}) {
@ -1336,10 +1320,6 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
sc.writingFrame = false sc.writingFrame = false
sc.writingFrameAsync = false sc.writingFrameAsync = false
if res.err != nil {
sc.conn.Close()
}
wr := res.wr wr := res.wr
if writeEndsStream(wr.write) { if writeEndsStream(wr.write) {
@ -1614,11 +1594,6 @@ func (sc *serverConn) processFrame(f Frame) error {
func (sc *serverConn) processPing(f *PingFrame) error { func (sc *serverConn) processPing(f *PingFrame) error {
sc.serveG.check() sc.serveG.check()
if f.IsAck() { if f.IsAck() {
if sc.pingSent && sc.sentPingData == f.Data {
// This is a response to a PING we sent.
sc.pingSent = false
sc.readIdleTimer.Reset(sc.readIdleTimeout)
}
// 6.7 PING: " An endpoint MUST NOT respond to PING frames // 6.7 PING: " An endpoint MUST NOT respond to PING frames
// containing this flag." // containing this flag."
return nil return nil
@ -2185,7 +2160,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.cw.Init() st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize) st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.init(sc.initialStreamRecvWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize())
if sc.hs.WriteTimeout > 0 { if sc.hs.WriteTimeout > 0 {
st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
} }
@ -3326,7 +3301,7 @@ func (sc *serverConn) countError(name string, err error) error {
if sc == nil || sc.srv == nil { if sc == nil || sc.srv == nil {
return err return err
} }
f := sc.countErrorFunc f := sc.srv.CountError
if f == nil { if f == nil {
return err return err
} }

View File

@ -25,6 +25,7 @@ import (
"net/http" "net/http"
"net/http/httptrace" "net/http/httptrace"
"net/textproto" "net/textproto"
"os"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -226,26 +227,40 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co
} }
func (t *Transport) maxHeaderListSize() uint32 { func (t *Transport) maxHeaderListSize() uint32 {
n := int64(t.MaxHeaderListSize) if t.MaxHeaderListSize == 0 {
if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 {
n = t.t1.MaxResponseHeaderBytes
if n > 0 {
n = adjustHTTP1MaxHeaderSize(n)
}
}
if n <= 0 {
return 10 << 20 return 10 << 20
} }
if n >= 0xffffffff { if t.MaxHeaderListSize == 0xffffffff {
return 0 return 0
} }
return uint32(n) return t.MaxHeaderListSize
}
func (t *Transport) maxFrameReadSize() uint32 {
if t.MaxReadFrameSize == 0 {
return 0 // use the default provided by the peer
}
if t.MaxReadFrameSize < minMaxFrameSize {
return minMaxFrameSize
}
if t.MaxReadFrameSize > maxFrameSize {
return maxFrameSize
}
return t.MaxReadFrameSize
} }
func (t *Transport) disableCompression() bool { func (t *Transport) disableCompression() bool {
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
} }
func (t *Transport) pingTimeout() time.Duration {
if t.PingTimeout == 0 {
return 15 * time.Second
}
return t.PingTimeout
}
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
// It returns an error if t1 has already been HTTP/2-enabled. // It returns an error if t1 has already been HTTP/2-enabled.
// //
@ -355,14 +370,11 @@ type ClientConn struct {
lastActive time.Time lastActive time.Time
lastIdle time.Time // time last idle lastIdle time.Time // time last idle
// Settings from peer: (also guarded by wmu) // Settings from peer: (also guarded by wmu)
maxFrameSize uint32 maxFrameSize uint32
maxConcurrentStreams uint32 maxConcurrentStreams uint32
peerMaxHeaderListSize uint64 peerMaxHeaderListSize uint64
peerMaxHeaderTableSize uint32 peerMaxHeaderTableSize uint32
initialWindowSize uint32 initialWindowSize uint32
initialStreamRecvWindowSize int32
readIdleTimeout time.Duration
pingTimeout time.Duration
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock. // Write to reqHeaderMu to lock it, read from it to unlock.
@ -487,7 +499,6 @@ func (cs *clientStream) closeReqBodyLocked() {
} }
type stickyErrWriter struct { type stickyErrWriter struct {
group synctestGroupInterface
conn net.Conn conn net.Conn
timeout time.Duration timeout time.Duration
err *error err *error
@ -497,9 +508,22 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil { if *sew.err != nil {
return 0, *sew.err return 0, *sew.err
} }
n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) for {
*sew.err = err if sew.timeout != 0 {
return n, err sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
}
nn, err := sew.conn.Write(p[n:])
n += nn
if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
// Keep extending the deadline so long as we're making progress.
continue
}
if sew.timeout != 0 {
sew.conn.SetWriteDeadline(time.Time{})
}
*sew.err = err
return n, err
}
} }
// noCachedConnError is the concrete type of ErrNoCachedConn, which // noCachedConnError is the concrete type of ErrNoCachedConn, which
@ -734,36 +758,44 @@ func (t *Transport) expectContinueTimeout() time.Duration {
return t.t1.ExpectContinueTimeout return t.t1.ExpectContinueTimeout
} }
func (t *Transport) maxDecoderHeaderTableSize() uint32 {
if v := t.MaxDecoderHeaderTableSize; v > 0 {
return v
}
return initialHeaderTableSize
}
func (t *Transport) maxEncoderHeaderTableSize() uint32 {
if v := t.MaxEncoderHeaderTableSize; v > 0 {
return v
}
return initialHeaderTableSize
}
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
return t.newClientConn(c, t.disableKeepAlives()) return t.newClientConn(c, t.disableKeepAlives())
} }
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
conf := configFromTransport(t)
cc := &ClientConn{ cc := &ClientConn{
t: t, t: t,
tconn: c, tconn: c,
readerDone: make(chan struct{}), readerDone: make(chan struct{}),
nextStreamID: 1, nextStreamID: 1,
maxFrameSize: 16 << 10, // spec default maxFrameSize: 16 << 10, // spec default
initialWindowSize: 65535, // spec default initialWindowSize: 65535, // spec default
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream),
streams: make(map[uint32]*clientStream), singleUse: singleUse,
singleUse: singleUse, wantSettingsAck: true,
wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}),
readIdleTimeout: conf.SendPingTimeout, reqHeaderMu: make(chan struct{}, 1),
pingTimeout: conf.PingTimeout,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
} }
var group synctestGroupInterface
if t.transportTestHooks != nil { if t.transportTestHooks != nil {
t.markNewGoroutine() t.markNewGoroutine()
t.transportTestHooks.newclientconn(cc) t.transportTestHooks.newclientconn(cc)
c = cc.tconn c = cc.tconn
group = t.group
} }
if VerboseLogs { if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@ -775,23 +807,24 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// TODO: adjust this writer size to account for frame size + // TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding. // MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(stickyErrWriter{ cc.bw = bufio.NewWriter(stickyErrWriter{
group: group,
conn: c, conn: c,
timeout: conf.WriteByteTimeout, timeout: t.WriteByteTimeout,
err: &cc.werr, err: &cc.werr,
}) })
cc.br = bufio.NewReader(c) cc.br = bufio.NewReader(c)
cc.fr = NewFramer(cc.bw, cc.br) cc.fr = NewFramer(cc.bw, cc.br)
cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.maxFrameReadSize() != 0 {
cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize())
}
if t.CountError != nil { if t.CountError != nil {
cc.fr.countError = t.CountError cc.fr.countError = t.CountError
} }
maxHeaderTableSize := conf.MaxDecoderHeaderTableSize maxHeaderTableSize := t.maxDecoderHeaderTableSize()
cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
cc.henc = hpack.NewEncoder(&cc.hbuf) cc.henc = hpack.NewEncoder(&cc.hbuf)
cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
cc.peerMaxHeaderTableSize = initialHeaderTableSize cc.peerMaxHeaderTableSize = initialHeaderTableSize
if cs, ok := c.(connectionStater); ok { if cs, ok := c.(connectionStater); ok {
@ -801,9 +834,11 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
initialSettings := []Setting{ initialSettings := []Setting{
{ID: SettingEnablePush, Val: 0}, {ID: SettingEnablePush, Val: 0},
{ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
}
if max := t.maxFrameReadSize(); max != 0 {
initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max})
} }
initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize})
if max := t.maxHeaderListSize(); max != 0 { if max := t.maxHeaderListSize(); max != 0 {
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
} }
@ -813,8 +848,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
cc.bw.Write(clientPreface) cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...) cc.fr.WriteSettings(initialSettings...)
cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
cc.bw.Flush() cc.bw.Flush()
if cc.werr != nil { if cc.werr != nil {
cc.Close() cc.Close()
@ -832,7 +867,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
} }
func (cc *ClientConn) healthCheck() { func (cc *ClientConn) healthCheck() {
pingTimeout := cc.pingTimeout pingTimeout := cc.t.pingTimeout()
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will // We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received. // trigger the healthCheck again if there is no frame received.
ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
@ -2164,7 +2199,7 @@ type resAndError struct {
func (cc *ClientConn) addStreamLocked(cs *clientStream) { func (cc *ClientConn) addStreamLocked(cs *clientStream) {
cs.flow.add(int32(cc.initialWindowSize)) cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow) cs.flow.setConnFlow(&cc.flow)
cs.inflow.init(cc.initialStreamRecvWindowSize) cs.inflow.init(transportDefaultStreamFlow)
cs.ID = cc.nextStreamID cs.ID = cc.nextStreamID
cc.nextStreamID += 2 cc.nextStreamID += 2
cc.streams[cs.ID] = cs cc.streams[cs.ID] = cs
@ -2310,7 +2345,7 @@ func (cc *ClientConn) countReadFrameError(err error) {
func (rl *clientConnReadLoop) run() error { func (rl *clientConnReadLoop) run() error {
cc := rl.cc cc := rl.cc
gotSettings := false gotSettings := false
readIdleTimeout := cc.readIdleTimeout readIdleTimeout := cc.t.ReadIdleTimeout
var t timer var t timer
if readIdleTimeout != 0 { if readIdleTimeout != 0 {
t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)

View File

@ -131,16 +131,6 @@ func (se StreamError) writeFrame(ctx writeContext) error {
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
type writePing struct {
data [8]byte
}
func (w writePing) writeFrame(ctx writeContext) error {
return ctx.Framer().WritePing(false, w.data)
}
func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max }
type writePingAck struct{ pf *PingFrame } type writePingAck struct{ pf *PingFrame }
func (w writePingAck) writeFrame(ctx writeContext) error { func (w writePingAck) writeFrame(ctx writeContext) error {

View File

@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these
into a common file for each OS. into a common file for each OS.
The merge is performed in the following steps: The merge is performed in the following steps:
1. Construct the set of common code that is identical in all architecture-specific files. 1. Construct the set of common code that is idential in all architecture-specific files.
2. Write this common code to the merged file. 2. Write this common code to the merged file.
3. Remove the common code from all architecture-specific files. 3. Remove the common code from all architecture-specific files.

View File

@ -552,7 +552,6 @@ ccflags="$@"
$2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
$2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ ||
$2 ~ /^(CONNECT|SAE)_/ ||
$2 ~ /^FIORDCHK$/ || $2 ~ /^FIORDCHK$/ ||
$2 ~ /^SIOC/ || $2 ~ /^SIOC/ ||
$2 ~ /^TIOC/ || $2 ~ /^TIOC/ ||
@ -656,7 +655,7 @@ errors=$(
signals=$( signals=$(
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags | echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
sort sort
) )
@ -666,7 +665,7 @@ echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
sort >_error.grep sort >_error.grep
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags | echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
sort >_signal.grep sort >_signal.grep
echo '// mkerrors.sh' "$@" echo '// mkerrors.sh' "$@"

View File

@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int,
var status _C_int var status _C_int
var r Pid_t var r Pid_t
err = ERESTART err = ERESTART
// AIX wait4 may return with ERESTART errno, while the process is still // AIX wait4 may return with ERESTART errno, while the processus is still
// active. // active.
for err == ERESTART { for err == ERESTART {
r, err = wait4(Pid_t(pid), &status, options, rusage) r, err = wait4(Pid_t(pid), &status, options, rusage)

View File

@ -566,43 +566,6 @@ func PthreadFchdir(fd int) (err error) {
return pthread_fchdir_np(fd) return pthread_fchdir_np(fd)
} }
// Connectx calls connectx(2) to initiate a connection on a socket.
//
// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument.
//
// - srcIf is the optional source interface index. 0 means unspecified.
// - srcAddr is the optional source address. nil means unspecified.
// - dstAddr is the destination address.
//
// On success, Connectx returns the number of bytes enqueued for transmission.
func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) {
endpoints := SaEndpoints{
Srcif: srcIf,
}
if srcAddr != nil {
addrp, addrlen, err := srcAddr.sockaddr()
if err != nil {
return 0, err
}
endpoints.Srcaddr = (*RawSockaddr)(addrp)
endpoints.Srcaddrlen = uint32(addrlen)
}
if dstAddr != nil {
addrp, addrlen, err := dstAddr.sockaddr()
if err != nil {
return 0, err
}
endpoints.Dstaddr = (*RawSockaddr)(addrp)
endpoints.Dstaddrlen = uint32(addrlen)
}
err = connectx(fd, &endpoints, associd, flags, iov, &n, connid)
return
}
//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)

View File

@ -11,7 +11,6 @@ package unix
int ioctl(int, unsigned long int, uintptr_t); int ioctl(int, unsigned long int, uintptr_t);
*/ */
import "C" import "C"
import "unsafe"
func ioctl(fd int, req uint, arg uintptr) (err error) { func ioctl(fd int, req uint, arg uintptr) (err error) {
r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg))

View File

@ -1295,48 +1295,6 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) {
return &value, err return &value, err
} }
// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas"
// algorithm.
//
// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
//
// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) {
var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
vallen := _Socklen(SizeofTCPCCInfo)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
out := (*TCPVegasInfo)(unsafe.Pointer(&value[0]))
return out, err
}
// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp"
// algorithm.
//
// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
//
// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) {
var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
vallen := _Socklen(SizeofTCPCCInfo)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0]))
return out, err
}
// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr"
// algorithm.
//
// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
//
// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) {
var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
vallen := _Socklen(SizeofTCPCCInfo)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
out := (*TCPBBRInfo)(unsafe.Pointer(&value[0]))
return out, err
}
// GetsockoptString returns the string value of the socket option opt for the // GetsockoptString returns the string value of the socket option opt for the
// socket associated with fd at the given socket level. // socket associated with fd at the given socket level.
func GetsockoptString(fd, level, opt int) (string, error) { func GetsockoptString(fd, level, opt int) (string, error) {
@ -2001,26 +1959,7 @@ func Getpgrp() (pid int) {
//sysnb Getpid() (pid int) //sysnb Getpid() (pid int)
//sysnb Getppid() (ppid int) //sysnb Getppid() (ppid int)
//sys Getpriority(which int, who int) (prio int, err error) //sys Getpriority(which int, who int) (prio int, err error)
//sys Getrandom(buf []byte, flags int) (n int, err error)
func Getrandom(buf []byte, flags int) (n int, err error) {
vdsoRet, supported := vgetrandom(buf, uint32(flags))
if supported {
if vdsoRet < 0 {
return 0, errnoErr(syscall.Errno(-vdsoRet))
}
return vdsoRet, nil
}
var p *byte
if len(buf) > 0 {
p = &buf[0]
}
r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags))
if e != 0 {
return 0, errnoErr(e)
}
return int(r), nil
}
//sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error)
//sysnb Getsid(pid int) (sid int, err error) //sysnb Getsid(pid int) (sid int, err error)
//sysnb Gettid() (tid int) //sysnb Gettid() (tid int)

View File

@ -182,5 +182,3 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
} }
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
} }
const SYS_FSTATAT = SYS_NEWFSTATAT

View File

@ -214,5 +214,3 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
} }
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
} }
const SYS_FSTATAT = SYS_NEWFSTATAT

View File

@ -187,5 +187,3 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error
} }
return riscvHWProbe(pairs, setSize, set, flags) return riscvHWProbe(pairs, setSize, set, flags)
} }
const SYS_FSTATAT = SYS_NEWFSTATAT

View File

@ -1,13 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && go1.24
package unix
import _ "unsafe"
//go:linkname vgetrandom runtime.vgetrandom
//go:noescape
func vgetrandom(p []byte, flags uint32) (ret int, supported bool)

View File

@ -1,11 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !linux || !go1.24
package unix
func vgetrandom(p []byte, flags uint32) (ret int, supported bool) {
return -1, false
}

View File

@ -237,9 +237,6 @@ const (
CLOCK_UPTIME_RAW_APPROX = 0x9 CLOCK_UPTIME_RAW_APPROX = 0x9
CLONE_NOFOLLOW = 0x1 CLONE_NOFOLLOW = 0x1
CLONE_NOOWNERCOPY = 0x2 CLONE_NOOWNERCOPY = 0x2
CONNECT_DATA_AUTHENTICATED = 0x4
CONNECT_DATA_IDEMPOTENT = 0x2
CONNECT_RESUME_ON_READ_WRITE = 0x1
CR0 = 0x0 CR0 = 0x0
CR1 = 0x1000 CR1 = 0x1000
CR2 = 0x2000 CR2 = 0x2000
@ -1268,10 +1265,6 @@ const (
RTV_SSTHRESH = 0x20 RTV_SSTHRESH = 0x20
RUSAGE_CHILDREN = -0x1 RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0 RUSAGE_SELF = 0x0
SAE_ASSOCID_ALL = 0xffffffff
SAE_ASSOCID_ANY = 0x0
SAE_CONNID_ALL = 0xffffffff
SAE_CONNID_ANY = 0x0
SCM_CREDS = 0x3 SCM_CREDS = 0x3
SCM_RIGHTS = 0x1 SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x2 SCM_TIMESTAMP = 0x2

View File

@ -237,9 +237,6 @@ const (
CLOCK_UPTIME_RAW_APPROX = 0x9 CLOCK_UPTIME_RAW_APPROX = 0x9
CLONE_NOFOLLOW = 0x1 CLONE_NOFOLLOW = 0x1
CLONE_NOOWNERCOPY = 0x2 CLONE_NOOWNERCOPY = 0x2
CONNECT_DATA_AUTHENTICATED = 0x4
CONNECT_DATA_IDEMPOTENT = 0x2
CONNECT_RESUME_ON_READ_WRITE = 0x1
CR0 = 0x0 CR0 = 0x0
CR1 = 0x1000 CR1 = 0x1000
CR2 = 0x2000 CR2 = 0x2000
@ -1268,10 +1265,6 @@ const (
RTV_SSTHRESH = 0x20 RTV_SSTHRESH = 0x20
RUSAGE_CHILDREN = -0x1 RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0 RUSAGE_SELF = 0x0
SAE_ASSOCID_ALL = 0xffffffff
SAE_ASSOCID_ANY = 0x0
SAE_CONNID_ALL = 0xffffffff
SAE_CONNID_ANY = 0x0
SCM_CREDS = 0x3 SCM_CREDS = 0x3
SCM_RIGHTS = 0x1 SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x2 SCM_TIMESTAMP = 0x2

View File

@ -495,7 +495,6 @@ const (
BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_REG_INVARIANTS = 0x80
BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RND_HI32 = 0x4
BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_RUN_ON_CPU = 0x1
BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4
BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_STATE_FREQ = 0x8
BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2
BPF_F_XDP_DEV_BOUND_ONLY = 0x40 BPF_F_XDP_DEV_BOUND_ONLY = 0x40
@ -1923,7 +1922,6 @@ const (
MNT_EXPIRE = 0x4 MNT_EXPIRE = 0x4
MNT_FORCE = 0x1 MNT_FORCE = 0x1
MNT_ID_REQ_SIZE_VER0 = 0x18 MNT_ID_REQ_SIZE_VER0 = 0x18
MNT_ID_REQ_SIZE_VER1 = 0x20
MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_COMPRESSED_FILE = 0x4
MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_MODVERSIONS = 0x1
MODULE_INIT_IGNORE_VERMAGIC = 0x2 MODULE_INIT_IGNORE_VERMAGIC = 0x2
@ -2189,7 +2187,7 @@ const (
NFT_REG_SIZE = 0x10 NFT_REG_SIZE = 0x10
NFT_REJECT_ICMPX_MAX = 0x3 NFT_REJECT_ICMPX_MAX = 0x3
NFT_RT_MAX = 0x4 NFT_RT_MAX = 0x4
NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SECMARK_CTX_MAXLEN = 0x100
NFT_SET_MAXNAMELEN = 0x100 NFT_SET_MAXNAMELEN = 0x100
NFT_SOCKET_MAX = 0x3 NFT_SOCKET_MAX = 0x3
NFT_TABLE_F_MASK = 0x7 NFT_TABLE_F_MASK = 0x7
@ -2358,11 +2356,9 @@ const (
PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_IO = 0xa
PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L1 = 0x1
PERF_MEM_LVLNUM_L2 = 0x2 PERF_MEM_LVLNUM_L2 = 0x2
PERF_MEM_LVLNUM_L2_MHB = 0x5
PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L3 = 0x3
PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_L4 = 0x4
PERF_MEM_LVLNUM_LFB = 0xc PERF_MEM_LVLNUM_LFB = 0xc
PERF_MEM_LVLNUM_MSC = 0x6
PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_NA = 0xf
PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_PMEM = 0xe
PERF_MEM_LVLNUM_RAM = 0xd PERF_MEM_LVLNUM_RAM = 0xd
@ -2435,7 +2431,6 @@ const (
PRIO_PGRP = 0x1 PRIO_PGRP = 0x1
PRIO_PROCESS = 0x0 PRIO_PROCESS = 0x0
PRIO_USER = 0x2 PRIO_USER = 0x2
PROCFS_IOCTL_MAGIC = 'f'
PROC_SUPER_MAGIC = 0x9fa0 PROC_SUPER_MAGIC = 0x9fa0
PROT_EXEC = 0x4 PROT_EXEC = 0x4
PROT_GROWSDOWN = 0x1000000 PROT_GROWSDOWN = 0x1000000
@ -2938,12 +2933,11 @@ const (
RUSAGE_SELF = 0x0 RUSAGE_SELF = 0x0
RUSAGE_THREAD = 0x1 RUSAGE_THREAD = 0x1
RWF_APPEND = 0x10 RWF_APPEND = 0x10
RWF_ATOMIC = 0x40
RWF_DSYNC = 0x2 RWF_DSYNC = 0x2
RWF_HIPRI = 0x1 RWF_HIPRI = 0x1
RWF_NOAPPEND = 0x20 RWF_NOAPPEND = 0x20
RWF_NOWAIT = 0x8 RWF_NOWAIT = 0x8
RWF_SUPPORTED = 0x7f RWF_SUPPORTED = 0x3f
RWF_SYNC = 0x4 RWF_SYNC = 0x4
RWF_WRITE_LIFE_NOT_SET = 0x0 RWF_WRITE_LIFE_NOT_SET = 0x0
SCHED_BATCH = 0x3 SCHED_BATCH = 0x3
@ -3216,7 +3210,6 @@ const (
STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_MOUNT_ROOT = 0x2000
STATX_ATTR_NODUMP = 0x40 STATX_ATTR_NODUMP = 0x40
STATX_ATTR_VERITY = 0x100000 STATX_ATTR_VERITY = 0x100000
STATX_ATTR_WRITE_ATOMIC = 0x400000
STATX_BASIC_STATS = 0x7ff STATX_BASIC_STATS = 0x7ff
STATX_BLOCKS = 0x400 STATX_BLOCKS = 0x400
STATX_BTIME = 0x800 STATX_BTIME = 0x800
@ -3233,7 +3226,6 @@ const (
STATX_SUBVOL = 0x8000 STATX_SUBVOL = 0x8000
STATX_TYPE = 0x1 STATX_TYPE = 0x1
STATX_UID = 0x8 STATX_UID = 0x8
STATX_WRITE_ATOMIC = 0x10000
STATX__RESERVED = 0x80000000 STATX__RESERVED = 0x80000000
SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_AFTER = 0x4
SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1
@ -3632,7 +3624,6 @@ const (
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4 XDP_UMEM_REG = 0x4
XDP_UMEM_TX_METADATA_LEN = 0x4
XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_TX_SW_CSUM = 0x2
XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1
XDP_USE_NEED_WAKEUP = 0x8 XDP_USE_NEED_WAKEUP = 0x8

View File

@ -153,14 +153,9 @@ const (
NFDBITS = 0x20 NFDBITS = 0x20
NLDLY = 0x100 NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703 NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704 NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702 NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701 NS_GET_USERNS = 0xb701
OLCUC = 0x2 OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4

View File

@ -153,14 +153,9 @@ const (
NFDBITS = 0x40 NFDBITS = 0x40
NLDLY = 0x100 NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703 NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704 NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702 NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701 NS_GET_USERNS = 0xb701
OLCUC = 0x2 OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4

View File

@ -150,14 +150,9 @@ const (
NFDBITS = 0x20 NFDBITS = 0x20
NLDLY = 0x100 NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703 NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704 NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702 NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701 NS_GET_USERNS = 0xb701
OLCUC = 0x2 OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4

View File

@ -154,14 +154,9 @@ const (
NFDBITS = 0x40 NFDBITS = 0x40
NLDLY = 0x100 NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703 NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704 NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702 NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701 NS_GET_USERNS = 0xb701
OLCUC = 0x2 OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4

View File

@ -154,14 +154,9 @@ const (
NFDBITS = 0x40 NFDBITS = 0x40
NLDLY = 0x100 NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703 NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704 NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702 NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701 NS_GET_USERNS = 0xb701
OLCUC = 0x2 OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4

View File

@ -150,14 +150,9 @@ const (
NFDBITS = 0x20 NFDBITS = 0x20
NLDLY = 0x100 NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703 NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704 NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702 NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701 NS_GET_USERNS = 0x2000b701
OLCUC = 0x2 OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4

View File

@ -150,14 +150,9 @@ const (
NFDBITS = 0x40 NFDBITS = 0x40
NLDLY = 0x100 NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703 NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704 NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702 NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701 NS_GET_USERNS = 0x2000b701
OLCUC = 0x2 OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4

View File

@ -150,14 +150,9 @@ const (
NFDBITS = 0x40 NFDBITS = 0x40
NLDLY = 0x100 NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703 NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704 NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702 NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701 NS_GET_USERNS = 0x2000b701
OLCUC = 0x2 OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4

View File

@ -150,14 +150,9 @@ const (
NFDBITS = 0x20 NFDBITS = 0x20
NLDLY = 0x100 NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703 NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704 NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702 NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701 NS_GET_USERNS = 0x2000b701
OLCUC = 0x2 OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4

View File

@ -152,14 +152,9 @@ const (
NL3 = 0x300 NL3 = 0x300
NLDLY = 0x300 NLDLY = 0x300
NOFLSH = 0x80000000 NOFLSH = 0x80000000
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703 NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704 NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702 NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701 NS_GET_USERNS = 0x2000b701
OLCUC = 0x4 OLCUC = 0x4
ONLCR = 0x2 ONLCR = 0x2

View File

@ -152,14 +152,9 @@ const (
NL3 = 0x300 NL3 = 0x300
NLDLY = 0x300 NLDLY = 0x300
NOFLSH = 0x80000000 NOFLSH = 0x80000000
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703 NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704 NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702 NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701 NS_GET_USERNS = 0x2000b701
OLCUC = 0x4 OLCUC = 0x4
ONLCR = 0x2 ONLCR = 0x2

View File

@ -152,14 +152,9 @@ const (
NL3 = 0x300 NL3 = 0x300
NLDLY = 0x300 NLDLY = 0x300
NOFLSH = 0x80000000 NOFLSH = 0x80000000
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703 NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704 NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702 NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701 NS_GET_USERNS = 0x2000b701
OLCUC = 0x4 OLCUC = 0x4
ONLCR = 0x2 ONLCR = 0x2

View File

@ -150,14 +150,9 @@ const (
NFDBITS = 0x40 NFDBITS = 0x40
NLDLY = 0x100 NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703 NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704 NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702 NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701 NS_GET_USERNS = 0xb701
OLCUC = 0x2 OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4

View File

@ -150,14 +150,9 @@ const (
NFDBITS = 0x40 NFDBITS = 0x40
NLDLY = 0x100 NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703 NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704 NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702 NS_GET_PARENT = 0xb702
NS_GET_PID_FROM_PIDNS = 0x8004b706
NS_GET_PID_IN_PIDNS = 0x8004b708
NS_GET_TGID_FROM_PIDNS = 0x8004b707
NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701 NS_GET_USERNS = 0xb701
OLCUC = 0x2 OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4

View File

@ -155,14 +155,9 @@ const (
NFDBITS = 0x40 NFDBITS = 0x40
NLDLY = 0x100 NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703 NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704 NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702 NS_GET_PARENT = 0x2000b702
NS_GET_PID_FROM_PIDNS = 0x4004b706
NS_GET_PID_IN_PIDNS = 0x4004b708
NS_GET_TGID_FROM_PIDNS = 0x4004b707
NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701 NS_GET_USERNS = 0x2000b701
OLCUC = 0x2 OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4

View File

@ -581,8 +581,6 @@ const (
AT_EMPTY_PATH = 0x1000 AT_EMPTY_PATH = 0x1000
AT_REMOVEDIR = 0x200 AT_REMOVEDIR = 0x200
RENAME_NOREPLACE = 1 << 0 RENAME_NOREPLACE = 1 << 0
ST_RDONLY = 1
ST_NOSUID = 2
) )
const ( const (

View File

@ -841,26 +841,6 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) {
var _p0 unsafe.Pointer
if len(iov) > 0 {
_p0 = unsafe.Pointer(&iov[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_connectx_trampoline_addr uintptr
//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 { if e1 != 0 {

View File

@ -248,11 +248,6 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_connectx(SB)
GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8
DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB)
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB) JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8

View File

@ -841,26 +841,6 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) {
var _p0 unsafe.Pointer
if len(iov) > 0 {
_p0 = unsafe.Pointer(&iov[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_connectx_trampoline_addr uintptr
//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 { if e1 != 0 {

View File

@ -248,11 +248,6 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_connectx(SB)
GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8
DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB)
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB) JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8

View File

@ -971,6 +971,23 @@ func Getpriority(which int, who int) (prio int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrandom(buf []byte, flags int) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrusage(who int, rusage *Rusage) (err error) { func Getrusage(who int, rusage *Rusage) (err error) {
_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 { if e1 != 0 {

View File

@ -341,7 +341,6 @@ const (
SYS_STATX = 332 SYS_STATX = 332
SYS_IO_PGETEVENTS = 333 SYS_IO_PGETEVENTS = 333
SYS_RSEQ = 334 SYS_RSEQ = 334
SYS_URETPROBE = 335
SYS_PIDFD_SEND_SIGNAL = 424 SYS_PIDFD_SEND_SIGNAL = 424
SYS_IO_URING_SETUP = 425 SYS_IO_URING_SETUP = 425
SYS_IO_URING_ENTER = 426 SYS_IO_URING_ENTER = 426

View File

@ -85,7 +85,7 @@ const (
SYS_SPLICE = 76 SYS_SPLICE = 76
SYS_TEE = 77 SYS_TEE = 77
SYS_READLINKAT = 78 SYS_READLINKAT = 78
SYS_NEWFSTATAT = 79 SYS_FSTATAT = 79
SYS_FSTAT = 80 SYS_FSTAT = 80
SYS_SYNC = 81 SYS_SYNC = 81
SYS_FSYNC = 82 SYS_FSYNC = 82

View File

@ -84,8 +84,6 @@ const (
SYS_SPLICE = 76 SYS_SPLICE = 76
SYS_TEE = 77 SYS_TEE = 77
SYS_READLINKAT = 78 SYS_READLINKAT = 78
SYS_NEWFSTATAT = 79
SYS_FSTAT = 80
SYS_SYNC = 81 SYS_SYNC = 81
SYS_FSYNC = 82 SYS_FSYNC = 82
SYS_FDATASYNC = 83 SYS_FDATASYNC = 83

View File

@ -84,7 +84,7 @@ const (
SYS_SPLICE = 76 SYS_SPLICE = 76
SYS_TEE = 77 SYS_TEE = 77
SYS_READLINKAT = 78 SYS_READLINKAT = 78
SYS_NEWFSTATAT = 79 SYS_FSTATAT = 79
SYS_FSTAT = 80 SYS_FSTAT = 80
SYS_SYNC = 81 SYS_SYNC = 81
SYS_FSYNC = 82 SYS_FSYNC = 82

View File

@ -306,19 +306,6 @@ type XVSockPgen struct {
type _Socklen uint32 type _Socklen uint32
type SaeAssocID uint32
type SaeConnID uint32
type SaEndpoints struct {
Srcif uint32
Srcaddr *RawSockaddr
Srcaddrlen uint32
Dstaddr *RawSockaddr
Dstaddrlen uint32
_ [4]byte
}
type Xucred struct { type Xucred struct {
Version uint32 Version uint32
Uid uint32 Uid uint32

View File

@ -306,19 +306,6 @@ type XVSockPgen struct {
type _Socklen uint32 type _Socklen uint32
type SaeAssocID uint32
type SaeConnID uint32
type SaEndpoints struct {
Srcif uint32
Srcaddr *RawSockaddr
Srcaddrlen uint32
Dstaddr *RawSockaddr
Dstaddrlen uint32
_ [4]byte
}
type Xucred struct { type Xucred struct {
Version uint32 Version uint32
Uid uint32 Uid uint32

View File

@ -625,7 +625,6 @@ const (
POLLRDNORM = 0x40 POLLRDNORM = 0x40
POLLWRBAND = 0x100 POLLWRBAND = 0x100
POLLWRNORM = 0x4 POLLWRNORM = 0x4
POLLRDHUP = 0x4000
) )
type CapRights struct { type CapRights struct {

View File

@ -630,7 +630,6 @@ const (
POLLRDNORM = 0x40 POLLRDNORM = 0x40
POLLWRBAND = 0x100 POLLWRBAND = 0x100
POLLWRNORM = 0x4 POLLWRNORM = 0x4
POLLRDHUP = 0x4000
) )
type CapRights struct { type CapRights struct {

View File

@ -616,7 +616,6 @@ const (
POLLRDNORM = 0x40 POLLRDNORM = 0x40
POLLWRBAND = 0x100 POLLWRBAND = 0x100
POLLWRNORM = 0x4 POLLWRNORM = 0x4
POLLRDHUP = 0x4000
) )
type CapRights struct { type CapRights struct {

View File

@ -610,7 +610,6 @@ const (
POLLRDNORM = 0x40 POLLRDNORM = 0x40
POLLWRBAND = 0x100 POLLWRBAND = 0x100
POLLWRNORM = 0x4 POLLWRNORM = 0x4
POLLRDHUP = 0x4000
) )
type CapRights struct { type CapRights struct {

View File

@ -612,7 +612,6 @@ const (
POLLRDNORM = 0x40 POLLRDNORM = 0x40
POLLWRBAND = 0x100 POLLWRBAND = 0x100
POLLWRNORM = 0x4 POLLWRNORM = 0x4
POLLRDHUP = 0x4000
) )
type CapRights struct { type CapRights struct {

View File

@ -87,35 +87,31 @@ type StatxTimestamp struct {
} }
type Statx_t struct { type Statx_t struct {
Mask uint32 Mask uint32
Blksize uint32 Blksize uint32
Attributes uint64 Attributes uint64
Nlink uint32 Nlink uint32
Uid uint32 Uid uint32
Gid uint32 Gid uint32
Mode uint16 Mode uint16
_ [1]uint16 _ [1]uint16
Ino uint64 Ino uint64
Size uint64 Size uint64
Blocks uint64 Blocks uint64
Attributes_mask uint64 Attributes_mask uint64
Atime StatxTimestamp Atime StatxTimestamp
Btime StatxTimestamp Btime StatxTimestamp
Ctime StatxTimestamp Ctime StatxTimestamp
Mtime StatxTimestamp Mtime StatxTimestamp
Rdev_major uint32 Rdev_major uint32
Rdev_minor uint32 Rdev_minor uint32
Dev_major uint32 Dev_major uint32
Dev_minor uint32 Dev_minor uint32
Mnt_id uint64 Mnt_id uint64
Dio_mem_align uint32 Dio_mem_align uint32
Dio_offset_align uint32 Dio_offset_align uint32
Subvol uint64 Subvol uint64
Atomic_write_unit_min uint32 _ [11]uint64
Atomic_write_unit_max uint32
Atomic_write_segments_max uint32
_ [1]uint32
_ [9]uint64
} }
type Fsid struct { type Fsid struct {
@ -520,29 +516,6 @@ type TCPInfo struct {
Total_rto_time uint32 Total_rto_time uint32
} }
type TCPVegasInfo struct {
Enabled uint32
Rttcnt uint32
Rtt uint32
Minrtt uint32
}
type TCPDCTCPInfo struct {
Enabled uint16
Ce_state uint16
Alpha uint32
Ab_ecn uint32
Ab_tot uint32
}
type TCPBBRInfo struct {
Bw_lo uint32
Bw_hi uint32
Min_rtt uint32
Pacing_gain uint32
Cwnd_gain uint32
}
type CanFilter struct { type CanFilter struct {
Id uint32 Id uint32
Mask uint32 Mask uint32
@ -584,7 +557,6 @@ const (
SizeofICMPv6Filter = 0x20 SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc SizeofUcred = 0xc
SizeofTCPInfo = 0xf8 SizeofTCPInfo = 0xf8
SizeofTCPCCInfo = 0x14
SizeofCanFilter = 0x8 SizeofCanFilter = 0x8
SizeofTCPRepairOpt = 0x8 SizeofTCPRepairOpt = 0x8
) )
@ -2514,7 +2486,7 @@ type XDPMmapOffsets struct {
type XDPUmemReg struct { type XDPUmemReg struct {
Addr uint64 Addr uint64
Len uint64 Len uint64
Size uint32 Chunk_size uint32
Headroom uint32 Headroom uint32
Flags uint32 Flags uint32
Tx_metadata_len uint32 Tx_metadata_len uint32
@ -3794,7 +3766,7 @@ const (
ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_GET = 0x24
ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_PSE_SET = 0x25
ETHTOOL_MSG_RSS_GET = 0x26 ETHTOOL_MSG_RSS_GET = 0x26
ETHTOOL_MSG_USER_MAX = 0x2c ETHTOOL_MSG_USER_MAX = 0x2b
ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_KERNEL_NONE = 0x0
ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1
ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2
@ -3834,7 +3806,7 @@ const (
ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_MODULE_NTF = 0x24
ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_PSE_GET_REPLY = 0x25
ETHTOOL_MSG_RSS_GET_REPLY = 0x26 ETHTOOL_MSG_RSS_GET_REPLY = 0x26
ETHTOOL_MSG_KERNEL_MAX = 0x2c ETHTOOL_MSG_KERNEL_MAX = 0x2b
ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_COMPACT_BITSETS = 0x1
ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_OMIT_REPLY = 0x2
ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_FLAG_STATS = 0x4
@ -3979,7 +3951,7 @@ const (
ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17
ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18
ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19
ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_COALESCE_MAX = 0x1c
ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_UNSPEC = 0x0
ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_HEADER = 0x1
ETHTOOL_A_PAUSE_AUTONEG = 0x2 ETHTOOL_A_PAUSE_AUTONEG = 0x2
@ -4637,7 +4609,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX = 0x14a
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85 NL80211_ATTR_MAX_MATCH_SETS = 0x85
@ -5241,7 +5213,7 @@ const (
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX = 0x20
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc

View File

@ -727,37 +727,6 @@ const (
RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBA = 0x8
RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBB = 0x10
RISCV_HWPROBE_EXT_ZBS = 0x20 RISCV_HWPROBE_EXT_ZBS = 0x20
RISCV_HWPROBE_EXT_ZICBOZ = 0x40
RISCV_HWPROBE_EXT_ZBC = 0x80
RISCV_HWPROBE_EXT_ZBKB = 0x100
RISCV_HWPROBE_EXT_ZBKC = 0x200
RISCV_HWPROBE_EXT_ZBKX = 0x400
RISCV_HWPROBE_EXT_ZKND = 0x800
RISCV_HWPROBE_EXT_ZKNE = 0x1000
RISCV_HWPROBE_EXT_ZKNH = 0x2000
RISCV_HWPROBE_EXT_ZKSED = 0x4000
RISCV_HWPROBE_EXT_ZKSH = 0x8000
RISCV_HWPROBE_EXT_ZKT = 0x10000
RISCV_HWPROBE_EXT_ZVBB = 0x20000
RISCV_HWPROBE_EXT_ZVBC = 0x40000
RISCV_HWPROBE_EXT_ZVKB = 0x80000
RISCV_HWPROBE_EXT_ZVKG = 0x100000
RISCV_HWPROBE_EXT_ZVKNED = 0x200000
RISCV_HWPROBE_EXT_ZVKNHA = 0x400000
RISCV_HWPROBE_EXT_ZVKNHB = 0x800000
RISCV_HWPROBE_EXT_ZVKSED = 0x1000000
RISCV_HWPROBE_EXT_ZVKSH = 0x2000000
RISCV_HWPROBE_EXT_ZVKT = 0x4000000
RISCV_HWPROBE_EXT_ZFH = 0x8000000
RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000
RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000
RISCV_HWPROBE_EXT_ZVFH = 0x40000000
RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000
RISCV_HWPROBE_EXT_ZFA = 0x100000000
RISCV_HWPROBE_EXT_ZTSO = 0x200000000
RISCV_HWPROBE_EXT_ZACAS = 0x400000000
RISCV_HWPROBE_EXT_ZICOND = 0x800000000
RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000
RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5
RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0
RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1
@ -765,6 +734,4 @@ const (
RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_FAST = 0x3
RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4
RISCV_HWPROBE_MISALIGNED_MASK = 0x7 RISCV_HWPROBE_MISALIGNED_MASK = 0x7
RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6
RISCV_HWPROBE_WHICH_CPUS = 0x1
) )

14
vendor/modules.txt vendored
View File

@ -2,10 +2,10 @@
## explicit; go 1.20 ## explicit; go 1.20
filippo.io/edwards25519 filippo.io/edwards25519
filippo.io/edwards25519/field filippo.io/edwards25519/field
# github.com/antchfx/htmlquery v1.3.3 # github.com/antchfx/htmlquery v1.3.2
## explicit; go 1.14 ## explicit; go 1.14
github.com/antchfx/htmlquery github.com/antchfx/htmlquery
# github.com/antchfx/xpath v1.3.2 # github.com/antchfx/xpath v1.3.1
## explicit; go 1.14 ## explicit; go 1.14
github.com/antchfx/xpath github.com/antchfx/xpath
# github.com/go-sql-driver/mysql v1.8.1 # github.com/go-sql-driver/mysql v1.8.1
@ -49,7 +49,7 @@ github.com/mattn/go-colorable
# github.com/mattn/go-isatty v0.0.20 # github.com/mattn/go-isatty v0.0.20
## explicit; go 1.15 ## explicit; go 1.15
github.com/mattn/go-isatty github.com/mattn/go-isatty
# github.com/mattn/go-sqlite3 v1.14.24 # github.com/mattn/go-sqlite3 v1.14.22
## explicit; go 1.19 ## explicit; go 1.19
github.com/mattn/go-sqlite3 github.com/mattn/go-sqlite3
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
@ -85,11 +85,11 @@ github.com/valyala/bytebufferpool
# github.com/valyala/fasttemplate v1.2.2 # github.com/valyala/fasttemplate v1.2.2
## explicit; go 1.12 ## explicit; go 1.12
github.com/valyala/fasttemplate github.com/valyala/fasttemplate
# golang.org/x/crypto v0.28.0 # golang.org/x/crypto v0.26.0
## explicit; go 1.20 ## explicit; go 1.20
golang.org/x/crypto/acme golang.org/x/crypto/acme
golang.org/x/crypto/acme/autocert golang.org/x/crypto/acme/autocert
# golang.org/x/net v0.30.0 # golang.org/x/net v0.28.0
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/net/html golang.org/x/net/html
golang.org/x/net/html/atom golang.org/x/net/html/atom
@ -99,10 +99,10 @@ golang.org/x/net/http2
golang.org/x/net/http2/h2c golang.org/x/net/http2/h2c
golang.org/x/net/http2/hpack golang.org/x/net/http2/hpack
golang.org/x/net/idna golang.org/x/net/idna
# golang.org/x/sys v0.26.0 # golang.org/x/sys v0.24.0
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/sys/unix golang.org/x/sys/unix
# golang.org/x/text v0.19.0 # golang.org/x/text v0.17.0
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/text/encoding golang.org/x/text/encoding
golang.org/x/text/encoding/charmap golang.org/x/text/encoding/charmap