excluded cache control for apt-get update files
This commit is contained in:
parent
02fe4d04b5
commit
5734c3019a
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/go-aptproxy
|
4
cache/cache.go
vendored
4
cache/cache.go
vendored
@ -10,11 +10,14 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var filepattern = regexp.MustCompile(`((In)?Release(.gpg)?|Packages.gz)`)
|
||||
|
||||
// Reader is a generic interface for reading cache entries either from disk or
|
||||
// directly attached to a downloader.
|
||||
type Reader interface {
|
||||
@ -73,6 +76,7 @@ func (c *Cache) GetReader(rawurl string, maxAge time.Duration) (Reader, error) {
|
||||
e, _ := r.GetEntry()
|
||||
lastModified, _ := time.Parse(http.TimeFormat, e.LastModified)
|
||||
if e.Complete &&
|
||||
!filepattern.MatchString(rawurl) &&
|
||||
(maxAge == -1 ||
|
||||
lastModified.Before(time.Now().Add(maxAge))) {
|
||||
log.Println("[HIT]", rawurl)
|
||||
|
19
go.mod
Normal file
19
go.mod
Normal file
@ -0,0 +1,19 @@
|
||||
module go-aptproxy
|
||||
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/fsnotify/fsnotify v1.5.1
|
||||
github.com/hectane/go-asyncserver v0.1.0
|
||||
github.com/micro/mdns v0.3.0
|
||||
github.com/nathan-osman/go-aptproxy v0.0.0-20160507053712-02fe4d04b5fb
|
||||
github.com/pquerna/cachecontrol v0.1.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/miekg/dns v1.1.3 // indirect
|
||||
golang.org/x/crypto v0.0.0-20190130090550-b01c7a725664 // indirect
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c // indirect
|
||||
)
|
31
go.sum
Normal file
31
go.sum
Normal file
@ -0,0 +1,31 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||
github.com/hectane/go-asyncserver v0.1.0 h1:miQMVjLchB0fklo4nKoUp3votMOCSJhddVlPSmyiieM=
|
||||
github.com/hectane/go-asyncserver v0.1.0/go.mod h1:waBaOSU0nKsRCHYMISp/+MSakn6C5HyDSF3y0NUknu0=
|
||||
github.com/micro/mdns v0.3.0 h1:bYycYe+98AXR3s8Nq5qvt6C573uFTDPIYzJemWON0QE=
|
||||
github.com/micro/mdns v0.3.0/go.mod h1:KJ0dW7KmicXU2BV++qkLlmHYcVv7/hHnbtguSWt9Aoc=
|
||||
github.com/miekg/dns v1.1.3 h1:1g0r1IvskvgL8rR+AcHzUA+oFmGcQlaIm4IqakufeMM=
|
||||
github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/nathan-osman/go-aptproxy v0.0.0-20160507053712-02fe4d04b5fb h1:wyxHQadQph4aRZhnsIP04eLgVP5GodIUYcto+TLITCE=
|
||||
github.com/nathan-osman/go-aptproxy v0.0.0-20160507053712-02fe4d04b5fb/go.mod h1:5Ix064b/k8CMKEnpquzB2sDr5D9Wm1pf+DVQi9Q+UVg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc=
|
||||
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
golang.org/x/crypto v0.0.0-20190130090550-b01c7a725664 h1:YbZJ76lQ1BqNhVe7dKTSB67wDrc2VPRR75IyGyyPDX8=
|
||||
golang.org/x/crypto v0.0.0-20190130090550-b01c7a725664/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
@ -1,8 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/hectane/go-asyncserver"
|
||||
"github.com/nathan-osman/go-aptproxy/cache"
|
||||
"go-aptproxy/cache"
|
||||
|
||||
server "github.com/hectane/go-asyncserver"
|
||||
"github.com/pquerna/cachecontrol/cacheobject"
|
||||
|
||||
"io"
|
||||
|
12
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
Normal file
12
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
root = true
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
||||
insert_final_newline = true
|
||||
|
||||
[*.{yml,yaml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
go.sum linguist-generated
|
6
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
Normal file
6
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# Setup a Global .gitignore for OS and editor generated files:
|
||||
# https://help.github.com/articles/ignoring-files
|
||||
# git config --global core.excludesfile ~/.gitignore_global
|
||||
|
||||
.vagrant
|
||||
*.sublime-project
|
2
vendor/github.com/fsnotify/fsnotify/.mailmap
generated
vendored
Normal file
2
vendor/github.com/fsnotify/fsnotify/.mailmap
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
Chris Howey <howeyc@gmail.com> <chris@howey.me>
|
||||
Nathan Youngman <git@nathany.com> <4566+nathany@users.noreply.github.com>
|
62
vendor/github.com/fsnotify/fsnotify/AUTHORS
generated
vendored
Normal file
62
vendor/github.com/fsnotify/fsnotify/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# You can update this list using the following command:
|
||||
#
|
||||
# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Aaron L <aaron@bettercoder.net>
|
||||
Adrien Bustany <adrien@bustany.org>
|
||||
Alexey Kazakov <alkazako@redhat.com>
|
||||
Amit Krishnan <amit.krishnan@oracle.com>
|
||||
Anmol Sethi <me@anmol.io>
|
||||
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Case Nelson <case@teammating.com>
|
||||
Chris Howey <howeyc@gmail.com>
|
||||
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||
Daniel Wagner-Hall <dawagner@gmail.com>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
Eric Lin <linxiulei@gmail.com>
|
||||
Evan Phoenix <evan@fallingsnow.net>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Gautam Dey <gautam.dey77@gmail.com>
|
||||
Hari haran <hariharan.uno@gmail.com>
|
||||
Ichinose Shogo <shogo82148@gmail.com>
|
||||
Johannes Ebke <johannes@ebke.org>
|
||||
John C Barstow <jbowtie@amathaine.com>
|
||||
Kelvin Fo <vmirage@gmail.com>
|
||||
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
|
||||
Matt Layher <mdlayher@gmail.com>
|
||||
Matthias Stone <matthias@bellstone.ca>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Nickolai Zeldovich <nickolai@csail.mit.edu>
|
||||
Oliver Bristow <evilumbrella+github@gmail.com>
|
||||
Patrick <patrick@dropbox.com>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Pawel Knap <pawelknap88@gmail.com>
|
||||
Pieter Droogendijk <pieter@binky.org.uk>
|
||||
Pratik Shinde <pratikshinde320@gmail.com>
|
||||
Pursuit92 <JoshChase@techpursuit.net>
|
||||
Riku Voipio <riku.voipio@linaro.org>
|
||||
Rob Figueiredo <robfig@gmail.com>
|
||||
Rodrigo Chiossi <rodrigochiossi@gmail.com>
|
||||
Slawek Ligus <root@ooz.ie>
|
||||
Soge Zhang <zhssoge@gmail.com>
|
||||
Tiffany Jernigan <tiffany.jernigan@intel.com>
|
||||
Tilak Sharma <tilaks@google.com>
|
||||
Tobias Klauser <tobias.klauser@gmail.com>
|
||||
Tom Payne <twpayne@gmail.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
Vahe Khachikyan <vahe@live.ca>
|
||||
Yukang <moorekang@gmail.com>
|
||||
bronze1man <bronze1man@gmail.com>
|
||||
debrando <denis.brandolini@gmail.com>
|
||||
henrikedwards <henrik.edwards@gmail.com>
|
||||
铁哥 <guotie.9@gmail.com>
|
339
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
Normal file
339
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,339 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.5.1] - 2021-08-24
|
||||
|
||||
* Revert Add AddRaw to not follow symlinks
|
||||
|
||||
## [1.5.0] - 2021-08-20
|
||||
|
||||
* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
|
||||
* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
|
||||
* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
|
||||
* CI: Use GitHub Actions for CI and cover go 1.12-1.17
|
||||
[#378](https://github.com/fsnotify/fsnotify/pull/378)
|
||||
[#381](https://github.com/fsnotify/fsnotify/pull/381)
|
||||
[#385](https://github.com/fsnotify/fsnotify/pull/385)
|
||||
* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
|
||||
|
||||
## [1.4.7] - 2018-01-09
|
||||
|
||||
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
|
||||
* Tests: Fix missing verb on format string (thanks @rchiossi)
|
||||
* Linux: Fix deadlock in Remove (thanks @aarondl)
|
||||
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
|
||||
* Docs: Moved FAQ into the README (thanks @vahe)
|
||||
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
|
||||
* Docs: replace references to OS X with macOS
|
||||
|
||||
## [1.4.2] - 2016-10-10
|
||||
|
||||
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
|
||||
|
||||
## [1.4.1] - 2016-10-04
|
||||
|
||||
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
|
||||
|
||||
## [1.4.0] - 2016-10-01
|
||||
|
||||
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
|
||||
|
||||
## [1.3.1] - 2016-06-28
|
||||
|
||||
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
||||
|
||||
## [1.3.0] - 2016-04-19
|
||||
|
||||
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
||||
|
||||
## [1.2.10] - 2016-03-02
|
||||
|
||||
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
||||
|
||||
## [1.2.9] - 2016-01-13
|
||||
|
||||
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
||||
|
||||
## [1.2.8] - 2015-12-17
|
||||
|
||||
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
||||
* inotify: fix race in test
|
||||
* enable race detection for continuous integration (Linux, Mac, Windows)
|
||||
|
||||
## [1.2.5] - 2015-10-17
|
||||
|
||||
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
||||
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
||||
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
||||
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
||||
|
||||
## [1.2.1] - 2015-10-14
|
||||
|
||||
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
||||
|
||||
## [1.2.0] - 2015-02-08
|
||||
|
||||
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
||||
|
||||
## [1.1.1] - 2015-02-05
|
||||
|
||||
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||
|
||||
## [1.1.0] - 2014-12-12
|
||||
|
||||
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
||||
* add low-level functions
|
||||
* only need to store flags on directories
|
||||
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
|
||||
* done can be an unbuffered channel
|
||||
* remove calls to os.NewSyscallError
|
||||
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## [1.0.4] - 2014-09-07
|
||||
|
||||
* kqueue: add dragonfly to the build tags.
|
||||
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||
|
||||
## [1.0.3] - 2014-08-19
|
||||
|
||||
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
||||
|
||||
## [1.0.2] - 2014-08-17
|
||||
|
||||
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||
|
||||
## [1.0.0] - 2014-08-15
|
||||
|
||||
* [API] Remove AddWatch on Windows, use Add.
|
||||
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
||||
* Minor updates based on feedback from golint.
|
||||
|
||||
## dev / 2014-07-09
|
||||
|
||||
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
|
||||
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||
|
||||
## dev / 2014-07-04
|
||||
|
||||
* kqueue: fix incorrect mutex used in Close()
|
||||
* Update example to demonstrate usage of Op.
|
||||
|
||||
## dev / 2014-06-28
|
||||
|
||||
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
|
||||
* Fix for String() method on Event (thanks Alex Brainman)
|
||||
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||
|
||||
## dev / 2014-06-21
|
||||
|
||||
* Events channel of type Event rather than *Event.
|
||||
* [internal] use syscall constants directly for inotify and kqueue.
|
||||
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||
|
||||
## dev / 2014-06-19
|
||||
|
||||
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||
* [internal] remove cookie from Event struct (unused).
|
||||
* [internal] Event struct has the same definition across every OS.
|
||||
* [internal] remove internal watch and removeWatch methods.
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||
* [API] Pluralized channel names: Events and Errors.
|
||||
* [API] Renamed FileEvent struct to Event.
|
||||
* [API] Op constants replace methods like IsCreate().
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## dev / 2014-05-23
|
||||
|
||||
* [API] Remove current implementation of WatchFlags.
|
||||
* current implementation doesn't take advantage of OS for efficiency
|
||||
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||
* no tests for the current implementation
|
||||
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||
|
||||
## [0.9.3] - 2014-12-31
|
||||
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## [0.9.2] - 2014-08-17
|
||||
|
||||
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
|
||||
## [0.9.1] - 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## [0.9.0] - 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## [0.8.12] - 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## [0.8.11] - 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
|
||||
|
||||
## [0.8.10] - 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## [0.8.9] - 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## [0.8.8] - 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## [0.8.7] - 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## [0.8.6] - 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## [0.8.5] - 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## [0.8.4] - 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## [0.8.3] - 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## [0.8.2] - 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## [0.8.1] - 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## [0.8.0] - 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## [0.7.4] - 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## [0.7.3] - 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## [0.7.2] - 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## [0.7.1] - 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## [0.7.0] - 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## [0.6.0] - 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## [0.5.1] - 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## [0.5.0] - 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## [0.4.0] - 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## [0.3.0] - 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## [0.2.0] - 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## [0.1.0] - 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
* inotify: ignore `IN_IGNORED` events
|
||||
* event String()
|
||||
* linux: common FileEvent functions
|
||||
* initial commit
|
||||
|
||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
77
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
Normal file
77
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
# Contributing
|
||||
|
||||
## Issues
|
||||
|
||||
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
|
||||
* Please indicate the platform you are using fsnotify on.
|
||||
* A code example to reproduce the problem is appreciated.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
### Contributor License Agreement
|
||||
|
||||
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
|
||||
|
||||
Please indicate that you have signed the CLA in your pull request.
|
||||
|
||||
### How fsnotify is Developed
|
||||
|
||||
* Development is done on feature branches.
|
||||
* Tests are run on BSD, Linux, macOS and Windows.
|
||||
* Pull requests are reviewed and [applied to master][am] using [hub][].
|
||||
* Maintainers may modify or squash commits rather than asking contributors to.
|
||||
* To issue a new release, the maintainers will:
|
||||
* Update the CHANGELOG
|
||||
* Tag a version, which will become available through gopkg.in.
|
||||
|
||||
### How to Fork
|
||||
|
||||
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
|
||||
|
||||
1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
|
||||
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||
3. Ensure everything works and the tests pass (see below)
|
||||
4. Commit your changes (`git commit -am 'Add some feature'`)
|
||||
|
||||
Contribute upstream:
|
||||
|
||||
1. Fork fsnotify on GitHub
|
||||
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
|
||||
3. Push to the branch (`git push fork my-new-feature`)
|
||||
4. Create a new Pull Request on GitHub
|
||||
|
||||
This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
|
||||
|
||||
### Testing
|
||||
|
||||
fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
|
||||
|
||||
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
|
||||
|
||||
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
|
||||
|
||||
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
|
||||
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
|
||||
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
|
||||
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
|
||||
* When you're done, you will want to halt or destroy the Vagrant boxes.
|
||||
|
||||
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
|
||||
|
||||
Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
|
||||
|
||||
### Maintainers
|
||||
|
||||
Help maintaining fsnotify is welcome. To be a maintainer:
|
||||
|
||||
* Submit a pull request and sign the CLA as above.
|
||||
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
|
||||
|
||||
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
|
||||
|
||||
All code changes should be internal pull requests.
|
||||
|
||||
Releases are tagged using [Semantic Versioning](http://semver.org/).
|
||||
|
||||
[hub]: https://github.com/github/hub
|
||||
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
|
28
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
Normal file
28
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2012-2019 fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
130
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
Normal file
130
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
# File system notifications for Go
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
|
||||
|
||||
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
|
||||
|
||||
```console
|
||||
go get -u golang.org/x/sys/...
|
||||
```
|
||||
|
||||
Cross platform: Windows, Linux, BSD and macOS.
|
||||
|
||||
| Adapter | OS | Status |
|
||||
| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| inotify | Linux 2.6.27 or later, Android\* | Supported |
|
||||
| kqueue | BSD, macOS, iOS\* | Supported |
|
||||
| ReadDirectoryChangesW | Windows | Supported |
|
||||
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
|
||||
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) |
|
||||
| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
|
||||
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
|
||||
\* Android and iOS are untested.
|
||||
|
||||
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
|
||||
|
||||
## API stability
|
||||
|
||||
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
|
||||
|
||||
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
|
||||
|
||||
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
func main() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("event:", event)
|
||||
if event.Op&fsnotify.Write == fsnotify.Write {
|
||||
log.Println("modified file:", event.Name)
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = watcher.Add("/tmp/foo")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
<-done
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
|
||||
|
||||
## Example
|
||||
|
||||
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
|
||||
|
||||
## FAQ
|
||||
|
||||
**When a file is moved to another directory is it still being watched?**
|
||||
|
||||
No (it shouldn't be, unless you are watching where it was moved to).
|
||||
|
||||
**When I watch a directory, are all subdirectories watched as well?**
|
||||
|
||||
No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
|
||||
|
||||
**Do I have to watch the Error and Event channels in a separate goroutine?**
|
||||
|
||||
As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
|
||||
|
||||
**Why am I receiving multiple events for the same file on OS X?**
|
||||
|
||||
Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
|
||||
|
||||
**How many files can be watched at once?**
|
||||
|
||||
There are OS-specific limits as to how many watches can be created:
|
||||
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
|
||||
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
|
||||
|
||||
**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?**
|
||||
|
||||
fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications.
|
||||
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
[#7]: https://github.com/howeyc/fsnotify/issues/7
|
||||
|
||||
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
|
||||
|
||||
## Related Projects
|
||||
|
||||
* [notify](https://github.com/rjeczalik/notify)
|
||||
* [fsevents](https://github.com/fsnotify/fsevents)
|
||||
|
38
vendor/github.com/fsnotify/fsnotify/fen.go
generated
vendored
Normal file
38
vendor/github.com/fsnotify/fsnotify/fen.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build solaris
|
||||
// +build solaris
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
69
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
69
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package fsnotify provides a platform-independent interface for file system notifications.
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Event represents a single file system notification.
|
||||
type Event struct {
|
||||
Name string // Relative path to the file or directory.
|
||||
Op Op // File operation that triggered the event.
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
type Op uint32
|
||||
|
||||
// These are the generalized file operations that can trigger a notification.
|
||||
const (
|
||||
Create Op = 1 << iota
|
||||
Write
|
||||
Remove
|
||||
Rename
|
||||
Chmod
|
||||
)
|
||||
|
||||
func (op Op) String() string {
|
||||
// Use a buffer for efficient string concatenation
|
||||
var buffer bytes.Buffer
|
||||
|
||||
if op&Create == Create {
|
||||
buffer.WriteString("|CREATE")
|
||||
}
|
||||
if op&Remove == Remove {
|
||||
buffer.WriteString("|REMOVE")
|
||||
}
|
||||
if op&Write == Write {
|
||||
buffer.WriteString("|WRITE")
|
||||
}
|
||||
if op&Rename == Rename {
|
||||
buffer.WriteString("|RENAME")
|
||||
}
|
||||
if op&Chmod == Chmod {
|
||||
buffer.WriteString("|CHMOD")
|
||||
}
|
||||
if buffer.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
return buffer.String()[1:] // Strip leading pipe
|
||||
}
|
||||
|
||||
// String returns a string representation of the event in the form
|
||||
// "file: REMOVE|WRITE|..."
|
||||
func (e Event) String() string {
|
||||
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
|
||||
}
|
||||
|
||||
// Common errors that can be reported by a watcher
|
||||
var (
|
||||
ErrEventOverflow = errors.New("fsnotify queue overflow")
|
||||
)
|
338
vendor/github.com/fsnotify/fsnotify/inotify.go
generated
vendored
Normal file
338
vendor/github.com/fsnotify/fsnotify/inotify.go
generated
vendored
Normal file
@ -0,0 +1,338 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
mu sync.Mutex // Map access
|
||||
fd int
|
||||
poller *fdPoller
|
||||
watches map[string]*watch // Map of inotify watches (key: path)
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
// Create inotify fd
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create epoll
|
||||
poller, err := newFdPoller(fd)
|
||||
if err != nil {
|
||||
unix.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
w := &Watcher{
|
||||
fd: fd,
|
||||
poller: poller,
|
||||
watches: make(map[string]*watch),
|
||||
paths: make(map[int]string),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||
close(w.done)
|
||||
|
||||
// Wake up goroutine
|
||||
w.poller.wake()
|
||||
|
||||
// Wait for goroutine to close
|
||||
<-w.doneResp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
if w.isClosed() {
|
||||
return errors.New("inotify instance already closed")
|
||||
}
|
||||
|
||||
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
|
||||
var flags uint32 = agnosticEvents
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watchEntry := w.watches[name]
|
||||
if watchEntry != nil {
|
||||
flags |= watchEntry.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
||||
if wd == -1 {
|
||||
return errno
|
||||
}
|
||||
|
||||
if watchEntry == nil {
|
||||
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||
w.paths[wd] = name
|
||||
} else {
|
||||
watchEntry.wd = uint32(wd)
|
||||
watchEntry.flags = flags
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
|
||||
// Fetch the watch.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch, ok := w.watches[name]
|
||||
|
||||
// Remove it from inotify.
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
|
||||
}
|
||||
|
||||
// We successfully removed the watch if InotifyRmWatch doesn't return an
|
||||
// error, we need to clean up our internal state to ensure it matches
|
||||
// inotify's kernel state.
|
||||
delete(w.paths, int(watch.wd))
|
||||
delete(w.watches, name)
|
||||
|
||||
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||
// the inotify will already have been removed.
|
||||
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
||||
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
||||
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
||||
// by another thread and we have not received IN_IGNORE event.
|
||||
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
||||
if success == -1 {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every case.
|
||||
// the only two possible errors are:
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
|
||||
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
|
||||
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
|
||||
return errno
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
n int // Number of bytes read with read()
|
||||
errno error // Syscall errno
|
||||
ok bool // For poller.wait
|
||||
)
|
||||
|
||||
defer close(w.doneResp)
|
||||
defer close(w.Errors)
|
||||
defer close(w.Events)
|
||||
defer unix.Close(w.fd)
|
||||
defer w.poller.close()
|
||||
|
||||
for {
|
||||
// See if we have been closed.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
ok, errno = w.poller.wait()
|
||||
if errno != nil {
|
||||
select {
|
||||
case w.Errors <- errno:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
n, errno = unix.Read(w.fd, buf[:])
|
||||
// If a signal interrupted execution, see if we've been asked to close, and try again.
|
||||
// http://man7.org/linux/man-pages/man7/signal.7.html :
|
||||
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
|
||||
// unix.Read might have been woken up by Close. If so, we're done.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
var err error
|
||||
if n == 0 {
|
||||
// If EOF is received. This should really never happen.
|
||||
err = io.EOF
|
||||
} else if n < 0 {
|
||||
// If an error occurred while reading.
|
||||
err = errno
|
||||
} else {
|
||||
// Read was too short.
|
||||
err = errors.New("notify: short read in readEvents()")
|
||||
}
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
|
||||
mask := uint32(raw.Mask)
|
||||
nameLen := uint32(raw.Len)
|
||||
|
||||
if mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
select {
|
||||
case w.Errors <- ErrEventOverflow:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
w.mu.Lock()
|
||||
name, ok := w.paths[int(raw.Wd)]
|
||||
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
|
||||
// This is a sign to clean up the maps, otherwise we are no longer in sync
|
||||
// with the inotify kernel state which has already deleted the watch
|
||||
// automatically.
|
||||
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
delete(w.paths, int(raw.Wd))
|
||||
delete(w.watches, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
event := newEvent(name, mask)
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if !event.ignoreLinux(mask) {
|
||||
select {
|
||||
case w.Events <- event:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += unix.SizeofInotifyEvent + nameLen
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Certain types of events can be "ignored" and not sent over the Events
|
||||
// channel. Such as events marked ignore by the kernel, or MODIFY events
|
||||
// against files that do not exist.
|
||||
func (e *Event) ignoreLinux(mask uint32) bool {
|
||||
// Ignore anything the inotify API says to ignore
|
||||
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the event is not a DELETE or RENAME, the file must exist.
|
||||
// Otherwise the event is ignored.
|
||||
// *Note*: this was put in place because it was seen that a MODIFY
|
||||
// event was sent after the DELETE. This ignores that MODIFY and
|
||||
// assumes a DELETE will come or has come if the file doesn't exist.
|
||||
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
|
||||
_, statErr := os.Lstat(e.Name)
|
||||
return os.IsNotExist(statErr)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
188
vendor/github.com/fsnotify/fsnotify/inotify_poller.go
generated
vendored
Normal file
188
vendor/github.com/fsnotify/fsnotify/inotify_poller.go
generated
vendored
Normal file
@ -0,0 +1,188 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type fdPoller struct {
|
||||
fd int // File descriptor (as returned by the inotify_init() syscall)
|
||||
epfd int // Epoll file descriptor
|
||||
pipe [2]int // Pipe for waking up
|
||||
}
|
||||
|
||||
func emptyPoller(fd int) *fdPoller {
|
||||
poller := new(fdPoller)
|
||||
poller.fd = fd
|
||||
poller.epfd = -1
|
||||
poller.pipe[0] = -1
|
||||
poller.pipe[1] = -1
|
||||
return poller
|
||||
}
|
||||
|
||||
// Create a new inotify poller.
|
||||
// This creates an inotify handler, and an epoll handler.
|
||||
func newFdPoller(fd int) (*fdPoller, error) {
|
||||
var errno error
|
||||
poller := emptyPoller(fd)
|
||||
defer func() {
|
||||
if errno != nil {
|
||||
poller.close()
|
||||
}
|
||||
}()
|
||||
poller.fd = fd
|
||||
|
||||
// Create epoll fd
|
||||
poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
|
||||
if poller.epfd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
|
||||
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register inotify fd with epoll
|
||||
event := unix.EpollEvent{
|
||||
Fd: int32(poller.fd),
|
||||
Events: unix.EPOLLIN,
|
||||
}
|
||||
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register pipe fd with epoll
|
||||
event = unix.EpollEvent{
|
||||
Fd: int32(poller.pipe[0]),
|
||||
Events: unix.EPOLLIN,
|
||||
}
|
||||
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
return poller, nil
|
||||
}
|
||||
|
||||
// Wait using epoll.
|
||||
// Returns true if something is ready to be read,
|
||||
// false if there is not.
|
||||
func (poller *fdPoller) wait() (bool, error) {
|
||||
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
|
||||
// I don't know whether epoll_wait returns the number of events returned,
|
||||
// or the total number of events ready.
|
||||
// I decided to catch both by making the buffer one larger than the maximum.
|
||||
events := make([]unix.EpollEvent, 7)
|
||||
for {
|
||||
n, errno := unix.EpollWait(poller.epfd, events, -1)
|
||||
if n == -1 {
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
return false, errno
|
||||
}
|
||||
if n == 0 {
|
||||
// If there are no events, try again.
|
||||
continue
|
||||
}
|
||||
if n > 6 {
|
||||
// This should never happen. More events were returned than should be possible.
|
||||
return false, errors.New("epoll_wait returned more events than I know what to do with")
|
||||
}
|
||||
ready := events[:n]
|
||||
epollhup := false
|
||||
epollerr := false
|
||||
epollin := false
|
||||
for _, event := range ready {
|
||||
if event.Fd == int32(poller.fd) {
|
||||
if event.Events&unix.EPOLLHUP != 0 {
|
||||
// This should not happen, but if it does, treat it as a wakeup.
|
||||
epollhup = true
|
||||
}
|
||||
if event.Events&unix.EPOLLERR != 0 {
|
||||
// If an error is waiting on the file descriptor, we should pretend
|
||||
// something is ready to read, and let unix.Read pick up the error.
|
||||
epollerr = true
|
||||
}
|
||||
if event.Events&unix.EPOLLIN != 0 {
|
||||
// There is data to read.
|
||||
epollin = true
|
||||
}
|
||||
}
|
||||
if event.Fd == int32(poller.pipe[0]) {
|
||||
if event.Events&unix.EPOLLHUP != 0 {
|
||||
// Write pipe descriptor was closed, by us. This means we're closing down the
|
||||
// watcher, and we should wake up.
|
||||
}
|
||||
if event.Events&unix.EPOLLERR != 0 {
|
||||
// If an error is waiting on the pipe file descriptor.
|
||||
// This is an absolute mystery, and should never ever happen.
|
||||
return false, errors.New("Error on the pipe descriptor.")
|
||||
}
|
||||
if event.Events&unix.EPOLLIN != 0 {
|
||||
// This is a regular wakeup, so we have to clear the buffer.
|
||||
err := poller.clearWake()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if epollhup || epollerr || epollin {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Close the write end of the poller.
|
||||
func (poller *fdPoller) wake() error {
|
||||
buf := make([]byte, 1)
|
||||
n, errno := unix.Write(poller.pipe[1], buf)
|
||||
if n == -1 {
|
||||
if errno == unix.EAGAIN {
|
||||
// Buffer is full, poller will wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (poller *fdPoller) clearWake() error {
|
||||
// You have to be woken up a LOT in order to get to 100!
|
||||
buf := make([]byte, 100)
|
||||
n, errno := unix.Read(poller.pipe[0], buf)
|
||||
if n == -1 {
|
||||
if errno == unix.EAGAIN {
|
||||
// Buffer is empty, someone else cleared our wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close all poller file descriptors, but not the one passed to it.
|
||||
func (poller *fdPoller) close() {
|
||||
if poller.pipe[1] != -1 {
|
||||
unix.Close(poller.pipe[1])
|
||||
}
|
||||
if poller.pipe[0] != -1 {
|
||||
unix.Close(poller.pipe[0])
|
||||
}
|
||||
if poller.epfd != -1 {
|
||||
unix.Close(poller.epfd)
|
||||
}
|
||||
}
|
522
vendor/github.com/fsnotify/fsnotify/kqueue.go
generated
vendored
Normal file
522
vendor/github.com/fsnotify/fsnotify/kqueue.go
generated
vendored
Normal file
@ -0,0 +1,522 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
|
||||
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||
|
||||
mu sync.Mutex // Protects access to watcher data
|
||||
watches map[string]int // Map of watched file descriptors (key: path).
|
||||
externalWatches map[string]bool // Map of watches added by user of the library.
|
||||
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
|
||||
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
|
||||
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
type pathInfo struct {
|
||||
name string
|
||||
isDir bool
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
kq, err := kqueue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
kq: kq,
|
||||
watches: make(map[string]int),
|
||||
dirFlags: make(map[string]uint32),
|
||||
paths: make(map[int]pathInfo),
|
||||
fileExists: make(map[string]bool),
|
||||
externalWatches: make(map[string]bool),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// copy paths to remove while locked
|
||||
var pathsToRemove = make([]string, 0, len(w.watches))
|
||||
for name := range w.watches {
|
||||
pathsToRemove = append(pathsToRemove, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
// unlock before calling Remove, which also locks
|
||||
|
||||
for _, name := range pathsToRemove {
|
||||
w.Remove(name)
|
||||
}
|
||||
|
||||
// send a "quit" message to the reader goroutine
|
||||
close(w.done)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
w.mu.Lock()
|
||||
w.externalWatches[name] = true
|
||||
w.mu.Unlock()
|
||||
_, err := w.addWatch(name, noteAllEvents)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
w.mu.Lock()
|
||||
watchfd, ok := w.watches[name]
|
||||
w.mu.Unlock()
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
|
||||
}
|
||||
|
||||
const registerRemove = unix.EV_DELETE
|
||||
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unix.Close(watchfd)
|
||||
|
||||
w.mu.Lock()
|
||||
isDir := w.paths[watchfd].isDir
|
||||
delete(w.watches, name)
|
||||
delete(w.paths, watchfd)
|
||||
delete(w.dirFlags, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if isDir {
|
||||
var pathsToRemove []string
|
||||
w.mu.Lock()
|
||||
for _, path := range w.paths {
|
||||
wdir, _ := filepath.Split(path.name)
|
||||
if filepath.Clean(wdir) == name {
|
||||
if !w.externalWatches[path.name] {
|
||||
pathsToRemove = append(pathsToRemove, path.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, name := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error
|
||||
// to the user, as that will just confuse them with an error about
|
||||
// a path they did not explicitly watch themselves.
|
||||
w.Remove(name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||
|
||||
// keventWaitTime to block on each read from kevent
|
||||
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
|
||||
|
||||
// addWatch adds name to the watched file set.
|
||||
// The flags are interpreted as described in kevent(2).
|
||||
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
|
||||
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||
var isDir bool
|
||||
// Make ./name and name equivalent
|
||||
name = filepath.Clean(name)
|
||||
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return "", errors.New("kevent instance already closed")
|
||||
}
|
||||
watchfd, alreadyWatching := w.watches[name]
|
||||
// We already have a watch, but we can still override flags.
|
||||
if alreadyWatching {
|
||||
isDir = w.paths[watchfd].isDir
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if !alreadyWatching {
|
||||
fi, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Don't watch sockets.
|
||||
if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Don't watch named pipes.
|
||||
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Follow Symlinks
|
||||
// Unfortunately, Linux can add bogus symlinks to watch list without
|
||||
// issue, and Windows can't do symlinks period (AFAIK). To maintain
|
||||
// consistency, we will act like everything is fine. There will simply
|
||||
// be no file events for broken symlinks.
|
||||
// Hence the returns of nil on errors.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
name, err = filepath.EvalSymlinks(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
_, alreadyWatching = w.watches[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
if alreadyWatching {
|
||||
return name, nil
|
||||
}
|
||||
|
||||
fi, err = os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
watchfd, err = unix.Open(name, openMode, 0700)
|
||||
if watchfd == -1 {
|
||||
return "", err
|
||||
}
|
||||
|
||||
isDir = fi.IsDir()
|
||||
}
|
||||
|
||||
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
|
||||
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
|
||||
unix.Close(watchfd)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !alreadyWatching {
|
||||
w.mu.Lock()
|
||||
w.watches[name] = watchfd
|
||||
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if isDir {
|
||||
// Watch the directory if it has not been watched before,
|
||||
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
w.mu.Lock()
|
||||
|
||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||
// Store flags so this watch can be updated later
|
||||
w.dirFlags[name] = flags
|
||||
w.mu.Unlock()
|
||||
|
||||
if watchDir {
|
||||
if err := w.watchDirectoryFiles(name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// readEvents reads from kqueue and converts the received kevents into
|
||||
// Event values that it sends down the Events channel.
|
||||
func (w *Watcher) readEvents() {
|
||||
eventBuffer := make([]unix.Kevent_t, 10)
|
||||
|
||||
loop:
|
||||
for {
|
||||
// See if there is a message on the "done" channel
|
||||
select {
|
||||
case <-w.done:
|
||||
break loop
|
||||
default:
|
||||
}
|
||||
|
||||
// Get new events
|
||||
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
|
||||
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||
if err != nil && err != unix.EINTR {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
break loop
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Flush the events we received to the Events channel
|
||||
for len(kevents) > 0 {
|
||||
kevent := &kevents[0]
|
||||
watchfd := int(kevent.Ident)
|
||||
mask := uint32(kevent.Fflags)
|
||||
w.mu.Lock()
|
||||
path := w.paths[watchfd]
|
||||
w.mu.Unlock()
|
||||
event := newEvent(path.name, mask)
|
||||
|
||||
if path.isDir && !(event.Op&Remove == Remove) {
|
||||
// Double check to make sure the directory exists. This can happen when
|
||||
// we do a rm -fr on a recursively watched folders and we receive a
|
||||
// modification event first but the folder has been deleted and later
|
||||
// receive the delete event
|
||||
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
||||
// mark is as delete event
|
||||
event.Op |= Remove
|
||||
}
|
||||
}
|
||||
|
||||
if event.Op&Rename == Rename || event.Op&Remove == Remove {
|
||||
w.Remove(event.Name)
|
||||
w.mu.Lock()
|
||||
delete(w.fileExists, event.Name)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||
w.sendDirectoryChangeEvents(event.Name)
|
||||
} else {
|
||||
// Send the event on the Events channel.
|
||||
select {
|
||||
case w.Events <- event:
|
||||
case <-w.done:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
if event.Op&Remove == Remove {
|
||||
// Look for a file that may have overwritten this.
|
||||
// For example, mv f1 f2 will delete f2, then create f2.
|
||||
if path.isDir {
|
||||
fileDir := filepath.Clean(event.Name)
|
||||
w.mu.Lock()
|
||||
_, found := w.watches[fileDir]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
// make sure the directory exists before we watch for changes. When we
|
||||
// do a recursive watch and perform rm -fr, the parent directory might
|
||||
// have gone missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch from the parent directory.
|
||||
if _, err := os.Lstat(fileDir); err == nil {
|
||||
w.sendDirectoryChangeEvents(fileDir)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
filePath := filepath.Clean(event.Name)
|
||||
if fileInfo, err := os.Lstat(filePath); err == nil {
|
||||
w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move to next event
|
||||
kevents = kevents[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup
|
||||
err := unix.Close(w.kq)
|
||||
if err != nil {
|
||||
// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func newCreateEvent(name string) Event {
|
||||
return Event{Name: name, Op: Create}
|
||||
}
|
||||
|
||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendDirectoryEvents searches the directory for newly created files
|
||||
// and sends them over the event channel. This functionality is to have
|
||||
// the BSD version of fsnotify match Linux inotify which provides a
|
||||
// create event for files created in a watched directory.
|
||||
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
||||
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
|
||||
w.mu.Lock()
|
||||
_, doesExist := w.fileExists[filePath]
|
||||
w.mu.Unlock()
|
||||
if !doesExist {
|
||||
// Send create event
|
||||
select {
|
||||
case w.Events <- newCreateEvent(filePath):
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
|
||||
if fileInfo.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories
|
||||
// but preserve the flags used if currently watching subdirectory
|
||||
w.mu.Lock()
|
||||
flags := w.dirFlags[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
|
||||
return w.addWatch(name, flags)
|
||||
}
|
||||
|
||||
// watch file to mimic Linux inotify
|
||||
return w.addWatch(name, noteAllEvents)
|
||||
}
|
||||
|
||||
// kqueue creates a new kernel event queue and returns a descriptor.
|
||||
func kqueue() (kq int, err error) {
|
||||
kq, err = unix.Kqueue()
|
||||
if kq == -1 {
|
||||
return kq, err
|
||||
}
|
||||
return kq, nil
|
||||
}
|
||||
|
||||
// register events with the queue
|
||||
func register(kq int, fds []int, flags int, fflags uint32) error {
|
||||
changes := make([]unix.Kevent_t, len(fds))
|
||||
|
||||
for i, fd := range fds {
|
||||
// SetKevent converts int to the platform-specific types:
|
||||
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||
changes[i].Fflags = fflags
|
||||
}
|
||||
|
||||
// register the events
|
||||
success, err := unix.Kevent(kq, changes, nil, nil)
|
||||
if success == -1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read retrieves pending events, or waits until an event occurs.
|
||||
// A timeout of nil blocks indefinitely, while 0 polls the queue.
|
||||
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
|
||||
n, err := unix.Kevent(kq, nil, events, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return events[0:n], nil
|
||||
}
|
||||
|
||||
// durationToTimespec prepares a timeout value
|
||||
func durationToTimespec(d time.Duration) unix.Timespec {
|
||||
return unix.NsecToTimespec(d.Nanoseconds())
|
||||
}
|
12
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
generated
vendored
Normal file
12
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
|
13
vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
generated
vendored
Normal file
13
vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// note: this constant is not defined on BSD
|
||||
const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
|
562
vendor/github.com/fsnotify/fsnotify/windows.go
generated
vendored
Normal file
562
vendor/github.com/fsnotify/fsnotify/windows.go
generated
vendored
Normal file
@ -0,0 +1,562 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
mu sync.Mutex // Map access
|
||||
port syscall.Handle // Handle to completion port
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
quit chan chan<- error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
w := &Watcher{
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
Events: make(chan Event, 50),
|
||||
Errors: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
if w.isClosed {
|
||||
return errors.New("watcher already closed")
|
||||
}
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
const (
|
||||
// Options for AddWatch
|
||||
sysFSONESHOT = 0x80000000
|
||||
sysFSONLYDIR = 0x1000000
|
||||
|
||||
// Events
|
||||
sysFSACCESS = 0x1
|
||||
sysFSALLEVENTS = 0xfff
|
||||
sysFSATTRIB = 0x4
|
||||
sysFSCLOSE = 0x18
|
||||
sysFSCREATE = 0x100
|
||||
sysFSDELETE = 0x200
|
||||
sysFSDELETESELF = 0x400
|
||||
sysFSMODIFY = 0x2
|
||||
sysFSMOVE = 0xc0
|
||||
sysFSMOVEDFROM = 0x40
|
||||
sysFSMOVEDTO = 0x80
|
||||
sysFSMOVESELF = 0x800
|
||||
|
||||
// Special events
|
||||
sysFSIGNORED = 0x8000
|
||||
sysFSQOVERFLOW = 0x4000
|
||||
)
|
||||
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&sysFSATTRIB == sysFSATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle syscall.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov syscall.Overlapped
|
||||
ino *inode // i-number
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf [4096]byte
|
||||
}
|
||||
|
||||
type indexMap map[uint64]*watch
|
||||
type watchMap map[uint32]indexMap
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if e != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDir(pathname string) (dir string, err error) {
|
||||
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
|
||||
if e != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", e)
|
||||
}
|
||||
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getIno(path string) (ino *inode, err error) {
|
||||
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
|
||||
syscall.FILE_LIST_DIRECTORY,
|
||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||
nil, syscall.OPEN_EXISTING,
|
||||
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", e)
|
||||
}
|
||||
var fi syscall.ByHandleFileInformation
|
||||
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
||||
syscall.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if flags&sysFSONLYDIR != 0 && pathname != dir {
|
||||
return nil
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
if err = w.startRead(watchEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watch == nil {
|
||||
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CancelIo", e)
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||
if e != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", e)
|
||||
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
|
||||
if watch.mask&sysFSONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
n, key uint32
|
||||
ov *syscall.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if e := syscall.CloseHandle(w.port); e != nil {
|
||||
err = os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch e {
|
||||
case syscall.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case syscall.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case syscall.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
||||
continue
|
||||
case nil:
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.Events <- newEvent("", sysFSQOVERFLOW)
|
||||
w.Errors <- errors.New("short read in readEvents()")
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
|
||||
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
mask = sysFSDELETESELF
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
mask = sysFSMODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sysFSMOVESELF
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
if w.sendEvent(fullname, watch.names[name]&mask) {
|
||||
if watch.names[name]&sysFSONESHOT != 0 {
|
||||
delete(watch.names, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
||||
if watch.mask&sysFSONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = filepath.Join(watch.path, watch.rename)
|
||||
sendNameEvent()
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
event := newEvent(name, uint32(mask))
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSACCESS != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||
}
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&sysFSATTRIB != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
}
|
||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case syscall.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
return sysFSDELETE
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
return sysFSMODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sysFSMOVEDFROM
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sysFSMOVEDTO
|
||||
}
|
||||
return 0
|
||||
}
|
5
vendor/github.com/hectane/go-asyncserver/.travis.yml
generated
vendored
Normal file
5
vendor/github.com/hectane/go-asyncserver/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.5
|
||||
- tip
|
30
vendor/github.com/hectane/go-asyncserver/README.md
generated
vendored
Normal file
30
vendor/github.com/hectane/go-asyncserver/README.md
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
## go-asyncserver
|
||||
|
||||
[![Build Status](https://travis-ci.org/hectane/go-asyncserver.svg?branch=master)](https://travis-ci.org/hectane/go-asyncserver)
|
||||
[![GoDoc](https://godoc.org/github.com/hectane/go-asyncserver?status.svg)](https://godoc.org/github.com/hectane/go-asyncserver)
|
||||
[![MIT License](http://img.shields.io/badge/license-MIT-9370d8.svg?style=flat)](http://opensource.org/licenses/MIT)
|
||||
|
||||
This package provides an extremely simple HTTP server that runs asynchronously.
|
||||
|
||||
Although Go provides `http.ListenAndServe()`, this method blocks indefinitely. Worse yet, `http.Server` doesn't provide a simple method for stopping the server. The need for go-asyncserver arose from these problems.
|
||||
|
||||
### Example
|
||||
|
||||
The following example demonstrates the creation of a simple asynchronous HTTP server:
|
||||
|
||||
import "github.com/hectane/go-asyncserver"
|
||||
|
||||
// "0" instructs the OS to select a free port
|
||||
s := server.New(":0")
|
||||
|
||||
// The server doesn't actually begin accepting connections
|
||||
// until the Start() method is called
|
||||
if err := s.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// The server will now accept connections at s.Addr
|
||||
// ...do stuff here...
|
||||
|
||||
// Stop the server
|
||||
s.Stop()
|
48
vendor/github.com/hectane/go-asyncserver/server.go
generated
vendored
Normal file
48
vendor/github.com/hectane/go-asyncserver/server.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Asynchronous HTTP server that can be started and stopped asynchronously.
|
||||
type AsyncServer struct {
|
||||
http.Server
|
||||
listener net.Listener
|
||||
stopped chan bool
|
||||
}
|
||||
|
||||
// Create a new server instance. Note that Start() must be called before the
|
||||
// server will begin accepting new connections.
|
||||
func New(addr string) *AsyncServer {
|
||||
a := &AsyncServer{
|
||||
stopped: make(chan bool),
|
||||
}
|
||||
a.Addr = addr
|
||||
return a
|
||||
}
|
||||
|
||||
// Start the server.
|
||||
func (a *AsyncServer) Start() error {
|
||||
l, err := net.Listen("tcp", a.Addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Addr = l.Addr().String()
|
||||
if a.TLSConfig != nil {
|
||||
l = tls.NewListener(l, a.TLSConfig)
|
||||
}
|
||||
a.listener = l
|
||||
go func() {
|
||||
a.Serve(a.listener)
|
||||
a.stopped <- true
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the server. This method blocks until the server is stopped.
|
||||
func (a *AsyncServer) Stop() {
|
||||
a.listener.Close()
|
||||
<-a.stopped
|
||||
}
|
23
vendor/github.com/micro/mdns/.gitignore
generated
vendored
Normal file
23
vendor/github.com/micro/mdns/.gitignore
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
20
vendor/github.com/micro/mdns/LICENSE
generated
vendored
Normal file
20
vendor/github.com/micro/mdns/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) {}
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
70
vendor/github.com/micro/mdns/README.md
generated
vendored
Normal file
70
vendor/github.com/micro/mdns/README.md
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
# MDNS [![GoDoc](https://godoc.org/github.com/micro/mdns?status.svg)](https://godoc.org/github.com/micro/mdns)
|
||||
|
||||
MDNS is a simple mdns client/server library by Hashicorp.
|
||||
|
||||
We maintain a fork with updates for PRs and issues they have not merged or addressed.
|
||||
|
||||
## Overview
|
||||
|
||||
MDNS or Multicast DNS can be used to discover services on the local network without the use of an authoritative
|
||||
DNS server. This enables peer-to-peer discovery. It is important to note that many
|
||||
networks restrict the use of multicasting, which prevents mDNS from functioning.
|
||||
Notably, multicast cannot be used in any sort of cloud, or shared infrastructure
|
||||
environment. However it works well in most office, home, or private infrastructure
|
||||
environments.
|
||||
|
||||
## Usage
|
||||
|
||||
Using the library is very simple, here is an example of publishing a service entry:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/micro/mdns"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
// Setup our service export
|
||||
host, _ := os.Hostname()
|
||||
info := []string{"My awesome service"}
|
||||
service, _ := mdns.NewMDNSService(host, "_foobar._tcp", "", "", 8000, nil, info)
|
||||
|
||||
// Create the mDNS server, defer shutdown
|
||||
server, _ := mdns.NewServer(&mdns.Config{Zone: service})
|
||||
|
||||
defer server.Shutdown()
|
||||
}
|
||||
```
|
||||
|
||||
Doing a lookup for service providers is also very simple:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/micro/mdns"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
// Make a channel for results and start listening
|
||||
entriesCh := make(chan *mdns.ServiceEntry, 8)
|
||||
go func() {
|
||||
for entry := range entriesCh {
|
||||
fmt.Printf("Got new entry: %v\n", entry)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the lookup
|
||||
err := mdns.Lookup("_foobar._tcp", entriesCh)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
close(entriesCh)
|
||||
}
|
||||
```
|
494
vendor/github.com/micro/mdns/client.go
generated
vendored
Normal file
494
vendor/github.com/micro/mdns/client.go
generated
vendored
Normal file
@ -0,0 +1,494 @@
|
||||
package mdns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// ServiceEntry is returned after we query for a service
|
||||
type ServiceEntry struct {
|
||||
Name string
|
||||
Host string
|
||||
AddrV4 net.IP
|
||||
AddrV6 net.IP
|
||||
Port int
|
||||
Info string
|
||||
InfoFields []string
|
||||
TTL int
|
||||
|
||||
Addr net.IP // @Deprecated
|
||||
|
||||
hasTXT bool
|
||||
sent bool
|
||||
}
|
||||
|
||||
// complete is used to check if we have all the info we need
|
||||
func (s *ServiceEntry) complete() bool {
|
||||
return (s.AddrV4 != nil || s.AddrV6 != nil || s.Addr != nil) && s.Port != 0 && s.hasTXT
|
||||
}
|
||||
|
||||
// QueryParam is used to customize how a Lookup is performed
|
||||
type QueryParam struct {
|
||||
Service string // Service to lookup
|
||||
Domain string // Lookup domain, default "local"
|
||||
Context context.Context // Context
|
||||
Timeout time.Duration // Lookup timeout, default 1 second. Ignored if Context is provided
|
||||
Interface *net.Interface // Multicast interface to use
|
||||
Entries chan<- *ServiceEntry // Entries Channel
|
||||
WantUnicastResponse bool // Unicast response desired, as per 5.4 in RFC
|
||||
}
|
||||
|
||||
// DefaultParams is used to return a default set of QueryParam's
|
||||
func DefaultParams(service string) *QueryParam {
|
||||
return &QueryParam{
|
||||
Service: service,
|
||||
Domain: "local",
|
||||
Timeout: time.Second,
|
||||
Entries: make(chan *ServiceEntry),
|
||||
WantUnicastResponse: false, // TODO(reddaly): Change this default.
|
||||
}
|
||||
}
|
||||
|
||||
// Query looks up a given service, in a domain, waiting at most
|
||||
// for a timeout before finishing the query. The results are streamed
|
||||
// to a channel. Sends will not block, so clients should make sure to
|
||||
// either read or buffer.
|
||||
func Query(params *QueryParam) error {
|
||||
// Create a new client
|
||||
client, err := newClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// Set the multicast interface
|
||||
if params.Interface != nil {
|
||||
if err := client.setInterface(params.Interface, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure defaults are set
|
||||
if params.Domain == "" {
|
||||
params.Domain = "local"
|
||||
}
|
||||
|
||||
if params.Context == nil {
|
||||
if params.Timeout == 0 {
|
||||
params.Timeout = time.Second
|
||||
}
|
||||
params.Context, _ = context.WithTimeout(context.Background(), params.Timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Run the query
|
||||
return client.query(params)
|
||||
}
|
||||
|
||||
// Listen listens indefinitely for multicast updates
|
||||
func Listen(entries chan<- *ServiceEntry, exit chan struct{}) error {
|
||||
// Create a new client
|
||||
client, err := newClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
client.setInterface(nil, true)
|
||||
|
||||
// Start listening for response packets
|
||||
msgCh := make(chan *dns.Msg, 32)
|
||||
|
||||
go client.recv(client.ipv4UnicastConn, msgCh)
|
||||
go client.recv(client.ipv6UnicastConn, msgCh)
|
||||
go client.recv(client.ipv4MulticastConn, msgCh)
|
||||
go client.recv(client.ipv6MulticastConn, msgCh)
|
||||
|
||||
ip := make(map[string]*ServiceEntry)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-exit:
|
||||
return nil
|
||||
case <-client.closedCh:
|
||||
return nil
|
||||
case m := <-msgCh:
|
||||
e := messageToEntry(m, ip)
|
||||
if e == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this entry is complete
|
||||
if e.complete() {
|
||||
if e.sent {
|
||||
continue
|
||||
}
|
||||
e.sent = true
|
||||
entries <- e
|
||||
ip = make(map[string]*ServiceEntry)
|
||||
} else {
|
||||
// Fire off a node specific query
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion(e.Name, dns.TypePTR)
|
||||
m.RecursionDesired = false
|
||||
if err := client.sendQuery(m); err != nil {
|
||||
log.Printf("[ERR] mdns: Failed to query instance %s: %v", e.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lookup is the same as Query, however it uses all the default parameters
|
||||
func Lookup(service string, entries chan<- *ServiceEntry) error {
|
||||
params := DefaultParams(service)
|
||||
params.Entries = entries
|
||||
return Query(params)
|
||||
}
|
||||
|
||||
// Client provides a query interface that can be used to
|
||||
// search for service providers using mDNS
|
||||
type client struct {
|
||||
ipv4UnicastConn *net.UDPConn
|
||||
ipv6UnicastConn *net.UDPConn
|
||||
|
||||
ipv4MulticastConn *net.UDPConn
|
||||
ipv6MulticastConn *net.UDPConn
|
||||
|
||||
closed bool
|
||||
closedCh chan struct{} // TODO(reddaly): This doesn't appear to be used.
|
||||
closeLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewClient creates a new mdns Client that can be used to query
|
||||
// for records
|
||||
func newClient() (*client, error) {
|
||||
// TODO(reddaly): At least attempt to bind to the port required in the spec.
|
||||
// Create a IPv4 listener
|
||||
uconn4, err4 := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
uconn6, err6 := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0})
|
||||
if err4 != nil && err6 != nil {
|
||||
log.Printf("[ERR] mdns: Failed to bind to udp port: %v %v", err4, err6)
|
||||
}
|
||||
|
||||
if uconn4 == nil && uconn6 == nil {
|
||||
return nil, fmt.Errorf("failed to bind to any unicast udp port")
|
||||
}
|
||||
|
||||
if uconn4 == nil {
|
||||
uconn4 = &net.UDPConn{}
|
||||
}
|
||||
|
||||
if uconn6 == nil {
|
||||
uconn6 = &net.UDPConn{}
|
||||
}
|
||||
|
||||
mconn4, err4 := net.ListenUDP("udp4", mdnsWildcardAddrIPv4)
|
||||
mconn6, err6 := net.ListenUDP("udp6", mdnsWildcardAddrIPv6)
|
||||
if err4 != nil && err6 != nil {
|
||||
log.Printf("[ERR] mdns: Failed to bind to udp port: %v %v", err4, err6)
|
||||
}
|
||||
|
||||
if mconn4 == nil && mconn6 == nil {
|
||||
return nil, fmt.Errorf("failed to bind to any multicast udp port")
|
||||
}
|
||||
|
||||
if mconn4 == nil {
|
||||
mconn4 = &net.UDPConn{}
|
||||
}
|
||||
|
||||
if mconn6 == nil {
|
||||
mconn6 = &net.UDPConn{}
|
||||
}
|
||||
|
||||
p1 := ipv4.NewPacketConn(mconn4)
|
||||
p2 := ipv6.NewPacketConn(mconn6)
|
||||
p1.SetMulticastLoopback(true)
|
||||
p2.SetMulticastLoopback(true)
|
||||
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var errCount1, errCount2 int
|
||||
|
||||
for _, iface := range ifaces {
|
||||
if err := p1.JoinGroup(&iface, &net.UDPAddr{IP: mdnsGroupIPv4}); err != nil {
|
||||
errCount1++
|
||||
}
|
||||
if err := p2.JoinGroup(&iface, &net.UDPAddr{IP: mdnsGroupIPv6}); err != nil {
|
||||
errCount2++
|
||||
}
|
||||
}
|
||||
|
||||
if len(ifaces) == errCount1 && len(ifaces) == errCount2 {
|
||||
return nil, fmt.Errorf("Failed to join multicast group on all interfaces!")
|
||||
}
|
||||
|
||||
c := &client{
|
||||
ipv4MulticastConn: mconn4,
|
||||
ipv6MulticastConn: mconn6,
|
||||
ipv4UnicastConn: uconn4,
|
||||
ipv6UnicastConn: uconn6,
|
||||
closedCh: make(chan struct{}),
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Close is used to cleanup the client
|
||||
func (c *client) Close() error {
|
||||
c.closeLock.Lock()
|
||||
defer c.closeLock.Unlock()
|
||||
|
||||
if c.closed {
|
||||
return nil
|
||||
}
|
||||
c.closed = true
|
||||
|
||||
close(c.closedCh)
|
||||
|
||||
if c.ipv4UnicastConn != nil {
|
||||
c.ipv4UnicastConn.Close()
|
||||
}
|
||||
if c.ipv6UnicastConn != nil {
|
||||
c.ipv6UnicastConn.Close()
|
||||
}
|
||||
if c.ipv4MulticastConn != nil {
|
||||
c.ipv4MulticastConn.Close()
|
||||
}
|
||||
if c.ipv6MulticastConn != nil {
|
||||
c.ipv6MulticastConn.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setInterface is used to set the query interface, uses sytem
|
||||
// default if not provided
|
||||
func (c *client) setInterface(iface *net.Interface, loopback bool) error {
|
||||
p := ipv4.NewPacketConn(c.ipv4UnicastConn)
|
||||
if err := p.JoinGroup(iface, &net.UDPAddr{IP: mdnsGroupIPv4}); err != nil {
|
||||
return err
|
||||
}
|
||||
p2 := ipv6.NewPacketConn(c.ipv6UnicastConn)
|
||||
if err := p2.JoinGroup(iface, &net.UDPAddr{IP: mdnsGroupIPv6}); err != nil {
|
||||
return err
|
||||
}
|
||||
p = ipv4.NewPacketConn(c.ipv4MulticastConn)
|
||||
if err := p.JoinGroup(iface, &net.UDPAddr{IP: mdnsGroupIPv4}); err != nil {
|
||||
return err
|
||||
}
|
||||
p2 = ipv6.NewPacketConn(c.ipv6MulticastConn)
|
||||
if err := p2.JoinGroup(iface, &net.UDPAddr{IP: mdnsGroupIPv6}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if loopback {
|
||||
p.SetMulticastLoopback(true)
|
||||
p2.SetMulticastLoopback(true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// query is used to perform a lookup and stream results
|
||||
func (c *client) query(params *QueryParam) error {
|
||||
// Create the service name
|
||||
serviceAddr := fmt.Sprintf("%s.%s.", trimDot(params.Service), trimDot(params.Domain))
|
||||
|
||||
// Start listening for response packets
|
||||
msgCh := make(chan *dns.Msg, 32)
|
||||
go c.recv(c.ipv4UnicastConn, msgCh)
|
||||
go c.recv(c.ipv6UnicastConn, msgCh)
|
||||
go c.recv(c.ipv4MulticastConn, msgCh)
|
||||
go c.recv(c.ipv6MulticastConn, msgCh)
|
||||
|
||||
// Send the query
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion(serviceAddr, dns.TypePTR)
|
||||
// RFC 6762, section 18.12. Repurposing of Top Bit of qclass in Question
|
||||
// Section
|
||||
//
|
||||
// In the Question Section of a Multicast DNS query, the top bit of the qclass
|
||||
// field is used to indicate that unicast responses are preferred for this
|
||||
// particular question. (See Section 5.4.)
|
||||
if params.WantUnicastResponse {
|
||||
m.Question[0].Qclass |= 1 << 15
|
||||
}
|
||||
m.RecursionDesired = false
|
||||
if err := c.sendQuery(m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Map the in-progress responses
|
||||
inprogress := make(map[string]*ServiceEntry)
|
||||
|
||||
for {
|
||||
select {
|
||||
case resp := <-msgCh:
|
||||
inp := messageToEntry(resp, inprogress)
|
||||
if inp == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this entry is complete
|
||||
if inp.complete() {
|
||||
if inp.sent {
|
||||
continue
|
||||
}
|
||||
inp.sent = true
|
||||
select {
|
||||
case params.Entries <- inp:
|
||||
case <-params.Context.Done():
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// Fire off a node specific query
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion(inp.Name, dns.TypePTR)
|
||||
m.RecursionDesired = false
|
||||
if err := c.sendQuery(m); err != nil {
|
||||
log.Printf("[ERR] mdns: Failed to query instance %s: %v", inp.Name, err)
|
||||
}
|
||||
}
|
||||
case <-params.Context.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendQuery is used to multicast a query out
|
||||
func (c *client) sendQuery(q *dns.Msg) error {
|
||||
buf, err := q.Pack()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.ipv4UnicastConn != nil {
|
||||
c.ipv4UnicastConn.WriteToUDP(buf, ipv4Addr)
|
||||
}
|
||||
if c.ipv6UnicastConn != nil {
|
||||
c.ipv6UnicastConn.WriteToUDP(buf, ipv6Addr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// recv is used to receive until we get a shutdown
|
||||
func (c *client) recv(l *net.UDPConn, msgCh chan *dns.Msg) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
buf := make([]byte, 65536)
|
||||
for {
|
||||
c.closeLock.Lock()
|
||||
if c.closed {
|
||||
c.closeLock.Unlock()
|
||||
return
|
||||
}
|
||||
c.closeLock.Unlock()
|
||||
n, err := l.Read(buf)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
msg := new(dns.Msg)
|
||||
if err := msg.Unpack(buf[:n]); err != nil {
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case msgCh <- msg:
|
||||
case <-c.closedCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ensureName is used to ensure the named node is in progress
|
||||
func ensureName(inprogress map[string]*ServiceEntry, name string) *ServiceEntry {
|
||||
if inp, ok := inprogress[name]; ok {
|
||||
return inp
|
||||
}
|
||||
inp := &ServiceEntry{
|
||||
Name: name,
|
||||
}
|
||||
inprogress[name] = inp
|
||||
return inp
|
||||
}
|
||||
|
||||
// alias is used to setup an alias between two entries
|
||||
func alias(inprogress map[string]*ServiceEntry, src, dst string) {
|
||||
srcEntry := ensureName(inprogress, src)
|
||||
inprogress[dst] = srcEntry
|
||||
}
|
||||
|
||||
func messageToEntry(m *dns.Msg, inprogress map[string]*ServiceEntry) *ServiceEntry {
|
||||
var inp *ServiceEntry
|
||||
|
||||
for _, answer := range append(m.Answer, m.Extra...) {
|
||||
// TODO(reddaly): Check that response corresponds to serviceAddr?
|
||||
switch rr := answer.(type) {
|
||||
case *dns.PTR:
|
||||
// Create new entry for this
|
||||
inp = ensureName(inprogress, rr.Ptr)
|
||||
if inp.complete() {
|
||||
continue
|
||||
}
|
||||
case *dns.SRV:
|
||||
// Check for a target mismatch
|
||||
if rr.Target != rr.Hdr.Name {
|
||||
alias(inprogress, rr.Hdr.Name, rr.Target)
|
||||
}
|
||||
|
||||
// Get the port
|
||||
inp = ensureName(inprogress, rr.Hdr.Name)
|
||||
if inp.complete() {
|
||||
continue
|
||||
}
|
||||
inp.Host = rr.Target
|
||||
inp.Port = int(rr.Port)
|
||||
case *dns.TXT:
|
||||
// Pull out the txt
|
||||
inp = ensureName(inprogress, rr.Hdr.Name)
|
||||
if inp.complete() {
|
||||
continue
|
||||
}
|
||||
inp.Info = strings.Join(rr.Txt, "|")
|
||||
inp.InfoFields = rr.Txt
|
||||
inp.hasTXT = true
|
||||
case *dns.A:
|
||||
// Pull out the IP
|
||||
inp = ensureName(inprogress, rr.Hdr.Name)
|
||||
if inp.complete() {
|
||||
continue
|
||||
}
|
||||
inp.Addr = rr.A // @Deprecated
|
||||
inp.AddrV4 = rr.A
|
||||
case *dns.AAAA:
|
||||
// Pull out the IP
|
||||
inp = ensureName(inprogress, rr.Hdr.Name)
|
||||
if inp.complete() {
|
||||
continue
|
||||
}
|
||||
inp.Addr = rr.AAAA // @Deprecated
|
||||
inp.AddrV6 = rr.AAAA
|
||||
}
|
||||
|
||||
if inp != nil {
|
||||
inp.TTL = int(answer.Header().Ttl)
|
||||
}
|
||||
}
|
||||
|
||||
return inp
|
||||
}
|
84
vendor/github.com/micro/mdns/dns_sd.go
generated
vendored
Normal file
84
vendor/github.com/micro/mdns/dns_sd.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
package mdns
|
||||
|
||||
import "github.com/miekg/dns"
|
||||
|
||||
// DNSSDService is a service that complies with the DNS-SD (RFC 6762) and MDNS
|
||||
// (RFC 6762) specs for local, multicast-DNS-based discovery.
|
||||
//
|
||||
// DNSSDService implements the Zone interface and wraps an MDNSService instance.
|
||||
// To deploy an mDNS service that is compliant with DNS-SD, it's recommended to
|
||||
// register only the wrapped instance with the server.
|
||||
//
|
||||
// Example usage:
|
||||
// service := &mdns.DNSSDService{
|
||||
// MDNSService: &mdns.MDNSService{
|
||||
// Instance: "My Foobar Service",
|
||||
// Service: "_foobar._tcp",
|
||||
// Port: 8000,
|
||||
// }
|
||||
// }
|
||||
// server, err := mdns.NewServer(&mdns.Config{Zone: service})
|
||||
// if err != nil {
|
||||
// log.Fatalf("Error creating server: %v", err)
|
||||
// }
|
||||
// defer server.Shutdown()
|
||||
type DNSSDService struct {
|
||||
MDNSService *MDNSService
|
||||
}
|
||||
|
||||
// Records returns DNS records in response to a DNS question.
|
||||
//
|
||||
// This function returns the DNS response of the underlying MDNSService
|
||||
// instance. It also returns a PTR record for a request for "
|
||||
// _services._dns-sd._udp.<Domain>", as described in section 9 of RFC 6763
|
||||
// ("Service Type Enumeration"), to allow browsing of the underlying MDNSService
|
||||
// instance.
|
||||
func (s *DNSSDService) Records(q dns.Question) []dns.RR {
|
||||
var recs []dns.RR
|
||||
if q.Name == "_services._dns-sd._udp."+s.MDNSService.Domain+"." {
|
||||
recs = s.dnssdMetaQueryRecords(q)
|
||||
}
|
||||
return append(recs, s.MDNSService.Records(q)...)
|
||||
}
|
||||
|
||||
// dnssdMetaQueryRecords returns the DNS records in response to a "meta-query"
|
||||
// issued to browse for DNS-SD services, as per section 9. of RFC6763.
|
||||
//
|
||||
// A meta-query has a name of the form "_services._dns-sd._udp.<Domain>" where
|
||||
// Domain is a fully-qualified domain, such as "local."
|
||||
func (s *DNSSDService) dnssdMetaQueryRecords(q dns.Question) []dns.RR {
|
||||
// Intended behavior, as described in the RFC:
|
||||
// ...it may be useful for network administrators to find the list of
|
||||
// advertised service types on the network, even if those Service Names
|
||||
// are just opaque identifiers and not particularly informative in
|
||||
// isolation.
|
||||
//
|
||||
// For this purpose, a special meta-query is defined. A DNS query for PTR
|
||||
// records with the name "_services._dns-sd._udp.<Domain>" yields a set of
|
||||
// PTR records, where the rdata of each PTR record is the two-abel
|
||||
// <Service> name, plus the same domain, e.g., "_http._tcp.<Domain>".
|
||||
// Including the domain in the PTR rdata allows for slightly better name
|
||||
// compression in Unicast DNS responses, but only the first two labels are
|
||||
// relevant for the purposes of service type enumeration. These two-label
|
||||
// service types can then be used to construct subsequent Service Instance
|
||||
// Enumeration PTR queries, in this <Domain> or others, to discover
|
||||
// instances of that service type.
|
||||
return []dns.RR{
|
||||
&dns.PTR{
|
||||
Hdr: dns.RR_Header{
|
||||
Name: q.Name,
|
||||
Rrtype: dns.TypePTR,
|
||||
Class: dns.ClassINET,
|
||||
Ttl: defaultTTL,
|
||||
},
|
||||
Ptr: s.MDNSService.serviceAddr,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Announcement returns DNS records that should be broadcast during the initial
|
||||
// availability of the service, as described in section 8.3 of RFC 6762.
|
||||
// TODO(reddaly): Add this when Announcement is added to the mdns.Zone interface.
|
||||
//func (s *DNSSDService) Announcement() []dns.RR {
|
||||
// return s.MDNSService.Announcement()
|
||||
//}
|
463
vendor/github.com/micro/mdns/server.go
generated
vendored
Normal file
463
vendor/github.com/micro/mdns/server.go
generated
vendored
Normal file
@ -0,0 +1,463 @@
|
||||
package mdns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
var (
|
||||
mdnsGroupIPv4 = net.ParseIP("224.0.0.251")
|
||||
mdnsGroupIPv6 = net.ParseIP("ff02::fb")
|
||||
|
||||
// mDNS wildcard addresses
|
||||
mdnsWildcardAddrIPv4 = &net.UDPAddr{
|
||||
IP: net.ParseIP("224.0.0.0"),
|
||||
Port: 5353,
|
||||
}
|
||||
mdnsWildcardAddrIPv6 = &net.UDPAddr{
|
||||
IP: net.ParseIP("ff02::"),
|
||||
Port: 5353,
|
||||
}
|
||||
|
||||
// mDNS endpoint addresses
|
||||
ipv4Addr = &net.UDPAddr{
|
||||
IP: mdnsGroupIPv4,
|
||||
Port: 5353,
|
||||
}
|
||||
ipv6Addr = &net.UDPAddr{
|
||||
IP: mdnsGroupIPv6,
|
||||
Port: 5353,
|
||||
}
|
||||
)
|
||||
|
||||
// Config is used to configure the mDNS server
|
||||
type Config struct {
|
||||
// Zone must be provided to support responding to queries
|
||||
Zone Zone
|
||||
|
||||
// Iface if provided binds the multicast listener to the given
|
||||
// interface. If not provided, the system default multicase interface
|
||||
// is used.
|
||||
Iface *net.Interface
|
||||
|
||||
// Port If it is not 0, replace the port 5353 with this port number.
|
||||
Port int
|
||||
}
|
||||
|
||||
// mDNS server is used to listen for mDNS queries and respond if we
|
||||
// have a matching local record
|
||||
type Server struct {
|
||||
config *Config
|
||||
|
||||
ipv4List *net.UDPConn
|
||||
ipv6List *net.UDPConn
|
||||
|
||||
shutdown bool
|
||||
shutdownCh chan struct{}
|
||||
shutdownLock sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewServer is used to create a new mDNS server from a config
|
||||
func NewServer(config *Config) (*Server, error) {
|
||||
if config.Port != 0 {
|
||||
mdnsWildcardAddrIPv4.Port = config.Port
|
||||
mdnsWildcardAddrIPv6.Port = config.Port
|
||||
ipv4Addr.Port = config.Port
|
||||
ipv6Addr.Port = config.Port
|
||||
}
|
||||
|
||||
// Create the listeners
|
||||
// Create wildcard connections (because :5353 can be already taken by other apps)
|
||||
ipv4List, _ := net.ListenUDP("udp4", mdnsWildcardAddrIPv4)
|
||||
ipv6List, _ := net.ListenUDP("udp6", mdnsWildcardAddrIPv6)
|
||||
if ipv4List == nil && ipv6List == nil {
|
||||
return nil, fmt.Errorf("[ERR] mdns: Failed to bind to any udp port!")
|
||||
}
|
||||
|
||||
if ipv4List == nil {
|
||||
ipv4List = &net.UDPConn{}
|
||||
}
|
||||
if ipv6List == nil {
|
||||
ipv6List = &net.UDPConn{}
|
||||
}
|
||||
|
||||
// Join multicast groups to receive announcements
|
||||
p1 := ipv4.NewPacketConn(ipv4List)
|
||||
p2 := ipv6.NewPacketConn(ipv6List)
|
||||
p1.SetMulticastLoopback(true)
|
||||
p2.SetMulticastLoopback(true)
|
||||
|
||||
if config.Iface != nil {
|
||||
if err := p1.JoinGroup(config.Iface, &net.UDPAddr{IP: mdnsGroupIPv4}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := p2.JoinGroup(config.Iface, &net.UDPAddr{IP: mdnsGroupIPv6}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
errCount1, errCount2 := 0, 0
|
||||
for _, iface := range ifaces {
|
||||
if err := p1.JoinGroup(&iface, &net.UDPAddr{IP: mdnsGroupIPv4}); err != nil {
|
||||
errCount1++
|
||||
}
|
||||
if err := p2.JoinGroup(&iface, &net.UDPAddr{IP: mdnsGroupIPv6}); err != nil {
|
||||
errCount2++
|
||||
}
|
||||
}
|
||||
if len(ifaces) == errCount1 && len(ifaces) == errCount2 {
|
||||
return nil, fmt.Errorf("Failed to join multicast group on all interfaces!")
|
||||
}
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
config: config,
|
||||
ipv4List: ipv4List,
|
||||
ipv6List: ipv6List,
|
||||
shutdownCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
go s.recv(s.ipv4List)
|
||||
go s.recv(s.ipv6List)
|
||||
|
||||
s.wg.Add(1)
|
||||
go s.probe()
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Shutdown is used to shutdown the listener
|
||||
func (s *Server) Shutdown() error {
|
||||
s.shutdownLock.Lock()
|
||||
defer s.shutdownLock.Unlock()
|
||||
|
||||
if s.shutdown {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.shutdown = true
|
||||
close(s.shutdownCh)
|
||||
s.unregister()
|
||||
|
||||
if s.ipv4List != nil {
|
||||
s.ipv4List.Close()
|
||||
}
|
||||
if s.ipv6List != nil {
|
||||
s.ipv6List.Close()
|
||||
}
|
||||
|
||||
s.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// recv is a long running routine to receive packets from an interface
|
||||
func (s *Server) recv(c *net.UDPConn) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
buf := make([]byte, 65536)
|
||||
for {
|
||||
s.shutdownLock.Lock()
|
||||
if s.shutdown {
|
||||
s.shutdownLock.Unlock()
|
||||
return
|
||||
}
|
||||
s.shutdownLock.Unlock()
|
||||
n, from, err := c.ReadFrom(buf)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if err := s.parsePacket(buf[:n], from); err != nil {
|
||||
log.Printf("[ERR] mdns: Failed to handle query: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parsePacket is used to parse an incoming packet
|
||||
func (s *Server) parsePacket(packet []byte, from net.Addr) error {
|
||||
var msg dns.Msg
|
||||
if err := msg.Unpack(packet); err != nil {
|
||||
log.Printf("[ERR] mdns: Failed to unpack packet: %v", err)
|
||||
return err
|
||||
}
|
||||
// TODO: This is a bit of a hack
|
||||
// We decided to ignore some mDNS answers for the time being
|
||||
// See: https://tools.ietf.org/html/rfc6762#section-7.2
|
||||
msg.Truncated = false
|
||||
return s.handleQuery(&msg, from)
|
||||
}
|
||||
|
||||
// handleQuery is used to handle an incoming query
|
||||
func (s *Server) handleQuery(query *dns.Msg, from net.Addr) error {
|
||||
if query.Opcode != dns.OpcodeQuery {
|
||||
// "In both multicast query and multicast response messages, the OPCODE MUST
|
||||
// be zero on transmission (only standard queries are currently supported
|
||||
// over multicast). Multicast DNS messages received with an OPCODE other
|
||||
// than zero MUST be silently ignored." Note: OpcodeQuery == 0
|
||||
return fmt.Errorf("mdns: received query with non-zero Opcode %v: %v", query.Opcode, *query)
|
||||
}
|
||||
if query.Rcode != 0 {
|
||||
// "In both multicast query and multicast response messages, the Response
|
||||
// Code MUST be zero on transmission. Multicast DNS messages received with
|
||||
// non-zero Response Codes MUST be silently ignored."
|
||||
return fmt.Errorf("mdns: received query with non-zero Rcode %v: %v", query.Rcode, *query)
|
||||
}
|
||||
|
||||
// TODO(reddaly): Handle "TC (Truncated) Bit":
|
||||
// In query messages, if the TC bit is set, it means that additional
|
||||
// Known-Answer records may be following shortly. A responder SHOULD
|
||||
// record this fact, and wait for those additional Known-Answer records,
|
||||
// before deciding whether to respond. If the TC bit is clear, it means
|
||||
// that the querying host has no additional Known Answers.
|
||||
if query.Truncated {
|
||||
return fmt.Errorf("[ERR] mdns: support for DNS requests with high truncated bit not implemented: %v", *query)
|
||||
}
|
||||
|
||||
var unicastAnswer, multicastAnswer []dns.RR
|
||||
|
||||
// Handle each question
|
||||
for _, q := range query.Question {
|
||||
mrecs, urecs := s.handleQuestion(q)
|
||||
multicastAnswer = append(multicastAnswer, mrecs...)
|
||||
unicastAnswer = append(unicastAnswer, urecs...)
|
||||
}
|
||||
|
||||
// See section 18 of RFC 6762 for rules about DNS headers.
|
||||
resp := func(unicast bool) *dns.Msg {
|
||||
// 18.1: ID (Query Identifier)
|
||||
// 0 for multicast response, query.Id for unicast response
|
||||
id := uint16(0)
|
||||
if unicast {
|
||||
id = query.Id
|
||||
}
|
||||
|
||||
var answer []dns.RR
|
||||
if unicast {
|
||||
answer = unicastAnswer
|
||||
} else {
|
||||
answer = multicastAnswer
|
||||
}
|
||||
if len(answer) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &dns.Msg{
|
||||
MsgHdr: dns.MsgHdr{
|
||||
Id: id,
|
||||
|
||||
// 18.2: QR (Query/Response) Bit - must be set to 1 in response.
|
||||
Response: true,
|
||||
|
||||
// 18.3: OPCODE - must be zero in response (OpcodeQuery == 0)
|
||||
Opcode: dns.OpcodeQuery,
|
||||
|
||||
// 18.4: AA (Authoritative Answer) Bit - must be set to 1
|
||||
Authoritative: true,
|
||||
|
||||
// The following fields must all be set to 0:
|
||||
// 18.5: TC (TRUNCATED) Bit
|
||||
// 18.6: RD (Recursion Desired) Bit
|
||||
// 18.7: RA (Recursion Available) Bit
|
||||
// 18.8: Z (Zero) Bit
|
||||
// 18.9: AD (Authentic Data) Bit
|
||||
// 18.10: CD (Checking Disabled) Bit
|
||||
// 18.11: RCODE (Response Code)
|
||||
},
|
||||
// 18.12 pertains to questions (handled by handleQuestion)
|
||||
// 18.13 pertains to resource records (handled by handleQuestion)
|
||||
|
||||
// 18.14: Name Compression - responses should be compressed (though see
|
||||
// caveats in the RFC), so set the Compress bit (part of the dns library
|
||||
// API, not part of the DNS packet) to true.
|
||||
Compress: true,
|
||||
|
||||
Answer: answer,
|
||||
}
|
||||
}
|
||||
|
||||
if mresp := resp(false); mresp != nil {
|
||||
if err := s.sendResponse(mresp, from); err != nil {
|
||||
return fmt.Errorf("mdns: error sending multicast response: %v", err)
|
||||
}
|
||||
}
|
||||
if uresp := resp(true); uresp != nil {
|
||||
if err := s.sendResponse(uresp, from); err != nil {
|
||||
return fmt.Errorf("mdns: error sending unicast response: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleQuestion is used to handle an incoming question
|
||||
//
|
||||
// The response to a question may be transmitted over multicast, unicast, or
|
||||
// both. The return values are DNS records for each transmission type.
|
||||
func (s *Server) handleQuestion(q dns.Question) (multicastRecs, unicastRecs []dns.RR) {
|
||||
records := s.config.Zone.Records(q)
|
||||
|
||||
if len(records) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Handle unicast and multicast responses.
|
||||
// TODO(reddaly): The decision about sending over unicast vs. multicast is not
|
||||
// yet fully compliant with RFC 6762. For example, the unicast bit should be
|
||||
// ignored if the records in question are close to TTL expiration. For now,
|
||||
// we just use the unicast bit to make the decision, as per the spec:
|
||||
// RFC 6762, section 18.12. Repurposing of Top Bit of qclass in Question
|
||||
// Section
|
||||
//
|
||||
// In the Question Section of a Multicast DNS query, the top bit of the
|
||||
// qclass field is used to indicate that unicast responses are preferred
|
||||
// for this particular question. (See Section 5.4.)
|
||||
if q.Qclass&(1<<15) != 0 {
|
||||
return nil, records
|
||||
}
|
||||
return records, nil
|
||||
}
|
||||
|
||||
func (s *Server) probe() {
|
||||
defer s.wg.Done()
|
||||
|
||||
sd, ok := s.config.Zone.(*MDNSService)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("%s.%s.%s.", sd.Instance, trimDot(sd.Service), trimDot(sd.Domain))
|
||||
|
||||
q := new(dns.Msg)
|
||||
q.SetQuestion(name, dns.TypePTR)
|
||||
q.RecursionDesired = false
|
||||
|
||||
srv := &dns.SRV{
|
||||
Hdr: dns.RR_Header{
|
||||
Name: name,
|
||||
Rrtype: dns.TypeSRV,
|
||||
Class: dns.ClassINET,
|
||||
Ttl: defaultTTL,
|
||||
},
|
||||
Priority: 0,
|
||||
Weight: 0,
|
||||
Port: uint16(sd.Port),
|
||||
Target: sd.HostName,
|
||||
}
|
||||
txt := &dns.TXT{
|
||||
Hdr: dns.RR_Header{
|
||||
Name: name,
|
||||
Rrtype: dns.TypeTXT,
|
||||
Class: dns.ClassINET,
|
||||
Ttl: defaultTTL,
|
||||
},
|
||||
Txt: sd.TXT,
|
||||
}
|
||||
q.Ns = []dns.RR{srv, txt}
|
||||
|
||||
randomizer := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
if err := s.SendMulticast(q); err != nil {
|
||||
log.Println("[ERR] mdns: failed to send probe:", err.Error())
|
||||
}
|
||||
time.Sleep(time.Duration(randomizer.Intn(250)) * time.Millisecond)
|
||||
}
|
||||
|
||||
resp := new(dns.Msg)
|
||||
resp.MsgHdr.Response = true
|
||||
|
||||
// set for query
|
||||
q.SetQuestion(name, dns.TypeANY)
|
||||
|
||||
resp.Answer = append(resp.Answer, s.config.Zone.Records(q.Question[0])...)
|
||||
|
||||
// reset
|
||||
q.SetQuestion(name, dns.TypePTR)
|
||||
|
||||
// From RFC6762
|
||||
// The Multicast DNS responder MUST send at least two unsolicited
|
||||
// responses, one second apart. To provide increased robustness against
|
||||
// packet loss, a responder MAY send up to eight unsolicited responses,
|
||||
// provided that the interval between unsolicited responses increases by
|
||||
// at least a factor of two with every response sent.
|
||||
timeout := 1 * time.Second
|
||||
timer := time.NewTimer(timeout)
|
||||
for i := 0; i < 3; i++ {
|
||||
if err := s.SendMulticast(resp); err != nil {
|
||||
log.Println("[ERR] mdns: failed to send announcement:", err.Error())
|
||||
}
|
||||
select {
|
||||
case <-timer.C:
|
||||
timeout *= 2
|
||||
timer.Reset(timeout)
|
||||
case <-s.shutdownCh:
|
||||
timer.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// multicastResponse us used to send a multicast response packet
|
||||
func (s *Server) SendMulticast(msg *dns.Msg) error {
|
||||
buf, err := msg.Pack()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.ipv4List != nil {
|
||||
s.ipv4List.WriteToUDP(buf, ipv4Addr)
|
||||
}
|
||||
if s.ipv6List != nil {
|
||||
s.ipv6List.WriteToUDP(buf, ipv6Addr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendResponse is used to send a response packet
|
||||
func (s *Server) sendResponse(resp *dns.Msg, from net.Addr) error {
|
||||
// TODO(reddaly): Respect the unicast argument, and allow sending responses
|
||||
// over multicast.
|
||||
buf, err := resp.Pack()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Determine the socket to send from
|
||||
addr := from.(*net.UDPAddr)
|
||||
if addr.IP.To4() != nil {
|
||||
_, err = s.ipv4List.WriteToUDP(buf, addr)
|
||||
return err
|
||||
} else {
|
||||
_, err = s.ipv6List.WriteToUDP(buf, addr)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) unregister() error {
|
||||
sd, ok := s.config.Zone.(*MDNSService)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
sd.TTL = 0
|
||||
name := fmt.Sprintf("%s.%s.%s.", sd.Instance, trimDot(sd.Service), trimDot(sd.Domain))
|
||||
|
||||
q := new(dns.Msg)
|
||||
q.SetQuestion(name, dns.TypeANY)
|
||||
|
||||
resp := new(dns.Msg)
|
||||
resp.MsgHdr.Response = true
|
||||
resp.Answer = append(resp.Answer, s.config.Zone.Records(q.Question[0])...)
|
||||
|
||||
return s.SendMulticast(resp)
|
||||
}
|
308
vendor/github.com/micro/mdns/zone.go
generated
vendored
Normal file
308
vendor/github.com/micro/mdns/zone.go
generated
vendored
Normal file
@ -0,0 +1,308 @@
|
||||
package mdns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultTTL is the default TTL value in returned DNS records in seconds.
|
||||
defaultTTL = 120
|
||||
)
|
||||
|
||||
// Zone is the interface used to integrate with the server and
|
||||
// to serve records dynamically
|
||||
type Zone interface {
|
||||
// Records returns DNS records in response to a DNS question.
|
||||
Records(q dns.Question) []dns.RR
|
||||
}
|
||||
|
||||
// MDNSService is used to export a named service by implementing a Zone
|
||||
type MDNSService struct {
|
||||
Instance string // Instance name (e.g. "hostService name")
|
||||
Service string // Service name (e.g. "_http._tcp.")
|
||||
Domain string // If blank, assumes "local"
|
||||
HostName string // Host machine DNS name (e.g. "mymachine.net.")
|
||||
Port int // Service Port
|
||||
IPs []net.IP // IP addresses for the service's host
|
||||
TXT []string // Service TXT records
|
||||
TTL uint32
|
||||
serviceAddr string // Fully qualified service address
|
||||
instanceAddr string // Fully qualified instance address
|
||||
enumAddr string // _services._dns-sd._udp.<domain>
|
||||
}
|
||||
|
||||
// validateFQDN returns an error if the passed string is not a fully qualified
|
||||
// hdomain name (more specifically, a hostname).
|
||||
func validateFQDN(s string) error {
|
||||
if len(s) == 0 {
|
||||
return fmt.Errorf("FQDN must not be blank")
|
||||
}
|
||||
if s[len(s)-1] != '.' {
|
||||
return fmt.Errorf("FQDN must end in period: %s", s)
|
||||
}
|
||||
// TODO(reddaly): Perform full validation.
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewMDNSService returns a new instance of MDNSService.
|
||||
//
|
||||
// If domain, hostName, or ips is set to the zero value, then a default value
|
||||
// will be inferred from the operating system.
|
||||
//
|
||||
// TODO(reddaly): This interface may need to change to account for "unique
|
||||
// record" conflict rules of the mDNS protocol. Upon startup, the server should
|
||||
// check to ensure that the instance name does not conflict with other instance
|
||||
// names, and, if required, select a new name. There may also be conflicting
|
||||
// hostName A/AAAA records.
|
||||
func NewMDNSService(instance, service, domain, hostName string, port int, ips []net.IP, txt []string) (*MDNSService, error) {
|
||||
// Sanity check inputs
|
||||
if instance == "" {
|
||||
return nil, fmt.Errorf("missing service instance name")
|
||||
}
|
||||
if service == "" {
|
||||
return nil, fmt.Errorf("missing service name")
|
||||
}
|
||||
if port == 0 {
|
||||
return nil, fmt.Errorf("missing service port")
|
||||
}
|
||||
|
||||
// Set default domain
|
||||
if domain == "" {
|
||||
domain = "local."
|
||||
}
|
||||
if err := validateFQDN(domain); err != nil {
|
||||
return nil, fmt.Errorf("domain %q is not a fully-qualified domain name: %v", domain, err)
|
||||
}
|
||||
|
||||
// Get host information if no host is specified.
|
||||
if hostName == "" {
|
||||
var err error
|
||||
hostName, err = os.Hostname()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not determine host: %v", err)
|
||||
}
|
||||
hostName = fmt.Sprintf("%s.", hostName)
|
||||
}
|
||||
if err := validateFQDN(hostName); err != nil {
|
||||
return nil, fmt.Errorf("hostName %q is not a fully-qualified domain name: %v", hostName, err)
|
||||
}
|
||||
|
||||
if len(ips) == 0 {
|
||||
var err error
|
||||
ips, err = net.LookupIP(trimDot(hostName))
|
||||
if err != nil {
|
||||
// Try appending the host domain suffix and lookup again
|
||||
// (required for Linux-based hosts)
|
||||
tmpHostName := fmt.Sprintf("%s%s", hostName, domain)
|
||||
|
||||
ips, err = net.LookupIP(trimDot(tmpHostName))
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not determine host IP addresses for %s", hostName)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if ip.To4() == nil && ip.To16() == nil {
|
||||
return nil, fmt.Errorf("invalid IP address in IPs list: %v", ip)
|
||||
}
|
||||
}
|
||||
|
||||
return &MDNSService{
|
||||
Instance: instance,
|
||||
Service: service,
|
||||
Domain: domain,
|
||||
HostName: hostName,
|
||||
Port: port,
|
||||
IPs: ips,
|
||||
TXT: txt,
|
||||
TTL: defaultTTL,
|
||||
serviceAddr: fmt.Sprintf("%s.%s.", trimDot(service), trimDot(domain)),
|
||||
instanceAddr: fmt.Sprintf("%s.%s.%s.", instance, trimDot(service), trimDot(domain)),
|
||||
enumAddr: fmt.Sprintf("_services._dns-sd._udp.%s.", trimDot(domain)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// trimDot is used to trim the dots from the start or end of a string
|
||||
func trimDot(s string) string {
|
||||
return strings.Trim(s, ".")
|
||||
}
|
||||
|
||||
// Records returns DNS records in response to a DNS question.
|
||||
func (m *MDNSService) Records(q dns.Question) []dns.RR {
|
||||
switch q.Name {
|
||||
case m.enumAddr:
|
||||
return m.serviceEnum(q)
|
||||
case m.serviceAddr:
|
||||
return m.serviceRecords(q)
|
||||
case m.instanceAddr:
|
||||
return m.instanceRecords(q)
|
||||
case m.HostName:
|
||||
if q.Qtype == dns.TypeA || q.Qtype == dns.TypeAAAA {
|
||||
return m.instanceRecords(q)
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MDNSService) serviceEnum(q dns.Question) []dns.RR {
|
||||
switch q.Qtype {
|
||||
case dns.TypeANY:
|
||||
fallthrough
|
||||
case dns.TypePTR:
|
||||
rr := &dns.PTR{
|
||||
Hdr: dns.RR_Header{
|
||||
Name: q.Name,
|
||||
Rrtype: dns.TypePTR,
|
||||
Class: dns.ClassINET,
|
||||
Ttl: m.TTL,
|
||||
},
|
||||
Ptr: m.serviceAddr,
|
||||
}
|
||||
return []dns.RR{rr}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// serviceRecords is called when the query matches the service name
|
||||
func (m *MDNSService) serviceRecords(q dns.Question) []dns.RR {
|
||||
switch q.Qtype {
|
||||
case dns.TypeANY:
|
||||
fallthrough
|
||||
case dns.TypePTR:
|
||||
// Build a PTR response for the service
|
||||
rr := &dns.PTR{
|
||||
Hdr: dns.RR_Header{
|
||||
Name: q.Name,
|
||||
Rrtype: dns.TypePTR,
|
||||
Class: dns.ClassINET,
|
||||
Ttl: m.TTL,
|
||||
},
|
||||
Ptr: m.instanceAddr,
|
||||
}
|
||||
servRec := []dns.RR{rr}
|
||||
|
||||
// Get the instance records
|
||||
instRecs := m.instanceRecords(dns.Question{
|
||||
Name: m.instanceAddr,
|
||||
Qtype: dns.TypeANY,
|
||||
})
|
||||
|
||||
// Return the service record with the instance records
|
||||
return append(servRec, instRecs...)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// serviceRecords is called when the query matches the instance name
|
||||
func (m *MDNSService) instanceRecords(q dns.Question) []dns.RR {
|
||||
switch q.Qtype {
|
||||
case dns.TypeANY:
|
||||
// Get the SRV, which includes A and AAAA
|
||||
recs := m.instanceRecords(dns.Question{
|
||||
Name: m.instanceAddr,
|
||||
Qtype: dns.TypeSRV,
|
||||
})
|
||||
|
||||
// Add the TXT record
|
||||
recs = append(recs, m.instanceRecords(dns.Question{
|
||||
Name: m.instanceAddr,
|
||||
Qtype: dns.TypeTXT,
|
||||
})...)
|
||||
return recs
|
||||
|
||||
case dns.TypeA:
|
||||
var rr []dns.RR
|
||||
for _, ip := range m.IPs {
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
rr = append(rr, &dns.A{
|
||||
Hdr: dns.RR_Header{
|
||||
Name: m.HostName,
|
||||
Rrtype: dns.TypeA,
|
||||
Class: dns.ClassINET,
|
||||
Ttl: m.TTL,
|
||||
},
|
||||
A: ip4,
|
||||
})
|
||||
}
|
||||
}
|
||||
return rr
|
||||
|
||||
case dns.TypeAAAA:
|
||||
var rr []dns.RR
|
||||
for _, ip := range m.IPs {
|
||||
if ip.To4() != nil {
|
||||
// TODO(reddaly): IPv4 addresses could be encoded in IPv6 format and
|
||||
// putinto AAAA records, but the current logic puts ipv4-encodable
|
||||
// addresses into the A records exclusively. Perhaps this should be
|
||||
// configurable?
|
||||
continue
|
||||
}
|
||||
|
||||
if ip16 := ip.To16(); ip16 != nil {
|
||||
rr = append(rr, &dns.AAAA{
|
||||
Hdr: dns.RR_Header{
|
||||
Name: m.HostName,
|
||||
Rrtype: dns.TypeAAAA,
|
||||
Class: dns.ClassINET,
|
||||
Ttl: m.TTL,
|
||||
},
|
||||
AAAA: ip16,
|
||||
})
|
||||
}
|
||||
}
|
||||
return rr
|
||||
|
||||
case dns.TypeSRV:
|
||||
// Create the SRV Record
|
||||
srv := &dns.SRV{
|
||||
Hdr: dns.RR_Header{
|
||||
Name: q.Name,
|
||||
Rrtype: dns.TypeSRV,
|
||||
Class: dns.ClassINET,
|
||||
Ttl: m.TTL,
|
||||
},
|
||||
Priority: 10,
|
||||
Weight: 1,
|
||||
Port: uint16(m.Port),
|
||||
Target: m.HostName,
|
||||
}
|
||||
recs := []dns.RR{srv}
|
||||
|
||||
// Add the A record
|
||||
recs = append(recs, m.instanceRecords(dns.Question{
|
||||
Name: m.instanceAddr,
|
||||
Qtype: dns.TypeA,
|
||||
})...)
|
||||
|
||||
// Add the AAAA record
|
||||
recs = append(recs, m.instanceRecords(dns.Question{
|
||||
Name: m.instanceAddr,
|
||||
Qtype: dns.TypeAAAA,
|
||||
})...)
|
||||
return recs
|
||||
|
||||
case dns.TypeTXT:
|
||||
txt := &dns.TXT{
|
||||
Hdr: dns.RR_Header{
|
||||
Name: q.Name,
|
||||
Rrtype: dns.TypeTXT,
|
||||
Class: dns.ClassINET,
|
||||
Ttl: m.TTL,
|
||||
},
|
||||
Txt: m.TXT,
|
||||
}
|
||||
return []dns.RR{txt}
|
||||
}
|
||||
return nil
|
||||
}
|
8
vendor/github.com/miekg/dns/.codecov.yml
generated
vendored
Normal file
8
vendor/github.com/miekg/dns/.codecov.yml
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 40%
|
||||
threshold: null
|
||||
patch: false
|
||||
changes: false
|
4
vendor/github.com/miekg/dns/.gitignore
generated
vendored
Normal file
4
vendor/github.com/miekg/dns/.gitignore
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
*.6
|
||||
tags
|
||||
test.out
|
||||
a.out
|
18
vendor/github.com/miekg/dns/.travis.yml
generated
vendored
Normal file
18
vendor/github.com/miekg/dns/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- tip
|
||||
|
||||
before_install:
|
||||
# don't use the miekg/dns when testing forks
|
||||
- mkdir -p $GOPATH/src/github.com/miekg
|
||||
- ln -s $TRAVIS_BUILD_DIR $GOPATH/src/github.com/miekg/ || true
|
||||
|
||||
script:
|
||||
- go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
1
vendor/github.com/miekg/dns/AUTHORS
generated
vendored
Normal file
1
vendor/github.com/miekg/dns/AUTHORS
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
Miek Gieben <miek@miek.nl>
|
10
vendor/github.com/miekg/dns/CONTRIBUTORS
generated
vendored
Normal file
10
vendor/github.com/miekg/dns/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
Alex A. Skinner
|
||||
Andrew Tunnell-Jones
|
||||
Ask Bjørn Hansen
|
||||
Dave Cheney
|
||||
Dusty Wilson
|
||||
Marek Majkowski
|
||||
Peter van Dijk
|
||||
Omri Bahumi
|
||||
Alex Sergeyev
|
||||
James Hartig
|
9
vendor/github.com/miekg/dns/COPYRIGHT
generated
vendored
Normal file
9
vendor/github.com/miekg/dns/COPYRIGHT
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
Copyright 2009 The Go Authors. All rights reserved. Use of this source code
|
||||
is governed by a BSD-style license that can be found in the LICENSE file.
|
||||
Extensions of the original work are copyright (c) 2011 Miek Gieben
|
||||
|
||||
Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is
|
||||
governed by a BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
Copyright 2014 CloudFlare. All rights reserved. Use of this source code is
|
||||
governed by a BSD-style license that can be found in the LICENSE file.
|
57
vendor/github.com/miekg/dns/Gopkg.lock
generated
vendored
Normal file
57
vendor/github.com/miekg/dns/Gopkg.lock
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:6914c49eed986dfb8dffb33516fa129c49929d4d873f41e073c83c11c372b870"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "e3636079e1a4c1f337f212cc5cd2aca108f6c900"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:08e41d63f8dac84d83797368b56cf0b339e42d0224e5e56668963c28aec95685"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"bpf",
|
||||
"context",
|
||||
"internal/iana",
|
||||
"internal/socket",
|
||||
"ipv4",
|
||||
"ipv6",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["errgroup"]
|
||||
pruneopts = ""
|
||||
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:149a432fabebb8221a80f77731b1cd63597197ded4f14af606ebe3a0959004ec"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
pruneopts = ""
|
||||
revision = "e4b3c5e9061176387e7cea65e4dc5853801f3fb7"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"golang.org/x/crypto/ed25519",
|
||||
"golang.org/x/net/ipv4",
|
||||
"golang.org/x/net/ipv6",
|
||||
"golang.org/x/sync/errgroup",
|
||||
"golang.org/x/sys/unix",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
38
vendor/github.com/miekg/dns/Gopkg.toml
generated
vendored
Normal file
38
vendor/github.com/miekg/dns/Gopkg.toml
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sync"
|
32
vendor/github.com/miekg/dns/LICENSE
generated
vendored
Normal file
32
vendor/github.com/miekg/dns/LICENSE
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
Extensions of the original work are copyright (c) 2011 Miek Gieben
|
||||
|
||||
As this is fork of the official Go code the same license applies:
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
33
vendor/github.com/miekg/dns/Makefile.fuzz
generated
vendored
Normal file
33
vendor/github.com/miekg/dns/Makefile.fuzz
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
# Makefile for fuzzing
|
||||
#
|
||||
# Use go-fuzz and needs the tools installed.
|
||||
# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/
|
||||
#
|
||||
# Installing go-fuzz:
|
||||
# $ make -f Makefile.fuzz get
|
||||
# Installs:
|
||||
# * github.com/dvyukov/go-fuzz/go-fuzz
|
||||
# * get github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
all: build
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
go-fuzz-build -tags fuzz github.com/miekg/dns
|
||||
|
||||
.PHONY: build-newrr
|
||||
build-newrr:
|
||||
go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns
|
||||
|
||||
.PHONY: fuzz
|
||||
fuzz:
|
||||
go-fuzz -bin=dns-fuzz.zip -workdir=fuzz
|
||||
|
||||
.PHONY: get
|
||||
get:
|
||||
go get github.com/dvyukov/go-fuzz/go-fuzz
|
||||
go get github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm *-fuzz.zip
|
52
vendor/github.com/miekg/dns/Makefile.release
generated
vendored
Normal file
52
vendor/github.com/miekg/dns/Makefile.release
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
# Makefile for releasing.
|
||||
#
|
||||
# The release is controlled from version.go. The version found there is
|
||||
# used to tag the git repo, we're not building any artifects so there is nothing
|
||||
# to upload to github.
|
||||
#
|
||||
# * Up the version in version.go
|
||||
# * Run: make -f Makefile.release release
|
||||
# * will *commit* your change with 'Release $VERSION'
|
||||
# * push to github
|
||||
#
|
||||
|
||||
define GO
|
||||
//+build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Println(dns.Version.String())
|
||||
}
|
||||
endef
|
||||
|
||||
$(file > version_release.go,$(GO))
|
||||
VERSION:=$(shell go run version_release.go)
|
||||
TAG="v$(VERSION)"
|
||||
|
||||
all:
|
||||
@echo Use the \'release\' target to start a release $(VERSION)
|
||||
rm -f version_release.go
|
||||
|
||||
.PHONY: release
|
||||
release: commit push
|
||||
@echo Released $(VERSION)
|
||||
rm -f version_release.go
|
||||
|
||||
.PHONY: commit
|
||||
commit:
|
||||
@echo Committing release $(VERSION)
|
||||
git commit -am"Release $(VERSION)"
|
||||
git tag $(TAG)
|
||||
|
||||
.PHONY: push
|
||||
push:
|
||||
@echo Pushing release $(VERSION) to master
|
||||
git push --tags
|
||||
git push
|
170
vendor/github.com/miekg/dns/README.md
generated
vendored
Normal file
170
vendor/github.com/miekg/dns/README.md
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns)
|
||||
[![Code Coverage](https://img.shields.io/codecov/c/github/miekg/dns/master.svg)](https://codecov.io/github/miekg/dns?branch=master)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/miekg/dns)](https://goreportcard.com/report/miekg/dns)
|
||||
[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns)
|
||||
|
||||
# Alternative (more granular) approach to a DNS library
|
||||
|
||||
> Less is more.
|
||||
|
||||
Complete and usable DNS library. All Resource Records are supported, including the DNSSEC types.
|
||||
It follows a lean and mean philosophy. If there is stuff you should know as a DNS programmer there
|
||||
isn't a convenience function for it. Server side and client side programming is supported, i.e. you
|
||||
can build servers and resolvers with it.
|
||||
|
||||
We try to keep the "master" branch as sane as possible and at the bleeding edge of standards,
|
||||
avoiding breaking changes wherever reasonable. We support the last two versions of Go.
|
||||
|
||||
# Goals
|
||||
|
||||
* KISS;
|
||||
* Fast;
|
||||
* Small API. If it's easy to code in Go, don't make a function for it.
|
||||
|
||||
# Users
|
||||
|
||||
A not-so-up-to-date-list-that-may-be-actually-current:
|
||||
|
||||
* https://github.com/coredns/coredns
|
||||
* https://cloudflare.com
|
||||
* https://github.com/abh/geodns
|
||||
* http://www.statdns.com/
|
||||
* http://www.dnsinspect.com/
|
||||
* https://github.com/chuangbo/jianbing-dictionary-dns
|
||||
* http://www.dns-lg.com/
|
||||
* https://github.com/fcambus/rrda
|
||||
* https://github.com/kenshinx/godns
|
||||
* https://github.com/skynetservices/skydns
|
||||
* https://github.com/hashicorp/consul
|
||||
* https://github.com/DevelopersPL/godnsagent
|
||||
* https://github.com/duedil-ltd/discodns
|
||||
* https://github.com/StalkR/dns-reverse-proxy
|
||||
* https://github.com/tianon/rawdns
|
||||
* https://mesosphere.github.io/mesos-dns/
|
||||
* https://pulse.turbobytes.com/
|
||||
* https://github.com/fcambus/statzone
|
||||
* https://github.com/benschw/dns-clb-go
|
||||
* https://github.com/corny/dnscheck for <http://public-dns.info/>
|
||||
* https://namesmith.io
|
||||
* https://github.com/miekg/unbound
|
||||
* https://github.com/miekg/exdns
|
||||
* https://dnslookup.org
|
||||
* https://github.com/looterz/grimd
|
||||
* https://github.com/phamhongviet/serf-dns
|
||||
* https://github.com/mehrdadrad/mylg
|
||||
* https://github.com/bamarni/dockness
|
||||
* https://github.com/fffaraz/microdns
|
||||
* http://kelda.io
|
||||
* https://github.com/ipdcode/hades <https://jd.com>
|
||||
* https://github.com/StackExchange/dnscontrol/
|
||||
* https://www.dnsperf.com/
|
||||
* https://dnssectest.net/
|
||||
* https://dns.apebits.com
|
||||
* https://github.com/oif/apex
|
||||
* https://github.com/jedisct1/dnscrypt-proxy
|
||||
* https://github.com/jedisct1/rpdns
|
||||
* https://github.com/xor-gate/sshfp
|
||||
* https://github.com/rs/dnstrace
|
||||
* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss))
|
||||
* https://github.com/semihalev/sdns
|
||||
|
||||
Send pull request if you want to be listed here.
|
||||
|
||||
# Features
|
||||
|
||||
* UDP/TCP queries, IPv4 and IPv6
|
||||
* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported
|
||||
* Fast
|
||||
* Server side programming (mimicking the net/http package)
|
||||
* Client side programming
|
||||
* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519
|
||||
* EDNS0, NSID, Cookies
|
||||
* AXFR/IXFR
|
||||
* TSIG, SIG(0)
|
||||
* DNS over TLS (DoT): encrypted connection between client and server over TCP
|
||||
* DNS name compression
|
||||
|
||||
Have fun!
|
||||
|
||||
Miek Gieben - 2010-2012 - <miek@miek.nl>
|
||||
DNS Authors 2012-
|
||||
|
||||
# Building
|
||||
|
||||
Building is done with the `go` tool. If you have setup your GOPATH correctly, the following should
|
||||
work:
|
||||
|
||||
go get github.com/miekg/dns
|
||||
go build github.com/miekg/dns
|
||||
|
||||
## Examples
|
||||
|
||||
A short "how to use the API" is at the beginning of doc.go (this also will show when you call `godoc
|
||||
github.com/miekg/dns`).
|
||||
|
||||
Example programs can be found in the `github.com/miekg/exdns` repository.
|
||||
|
||||
## Supported RFCs
|
||||
|
||||
*all of them*
|
||||
|
||||
* 103{4,5} - DNS standard
|
||||
* 1348 - NSAP record (removed the record)
|
||||
* 1982 - Serial Arithmetic
|
||||
* 1876 - LOC record
|
||||
* 1995 - IXFR
|
||||
* 1996 - DNS notify
|
||||
* 2136 - DNS Update (dynamic updates)
|
||||
* 2181 - RRset definition - there is no RRset type though, just []RR
|
||||
* 2537 - RSAMD5 DNS keys
|
||||
* 2065 - DNSSEC (updated in later RFCs)
|
||||
* 2671 - EDNS record
|
||||
* 2782 - SRV record
|
||||
* 2845 - TSIG record
|
||||
* 2915 - NAPTR record
|
||||
* 2929 - DNS IANA Considerations
|
||||
* 3110 - RSASHA1 DNS keys
|
||||
* 3225 - DO bit (DNSSEC OK)
|
||||
* 340{1,2,3} - NAPTR record
|
||||
* 3445 - Limiting the scope of (DNS)KEY
|
||||
* 3597 - Unknown RRs
|
||||
* 403{3,4,5} - DNSSEC + validation functions
|
||||
* 4255 - SSHFP record
|
||||
* 4343 - Case insensitivity
|
||||
* 4408 - SPF record
|
||||
* 4509 - SHA256 Hash in DS
|
||||
* 4592 - Wildcards in the DNS
|
||||
* 4635 - HMAC SHA TSIG
|
||||
* 4701 - DHCID
|
||||
* 4892 - id.server
|
||||
* 5001 - NSID
|
||||
* 5155 - NSEC3 record
|
||||
* 5205 - HIP record
|
||||
* 5702 - SHA2 in the DNS
|
||||
* 5936 - AXFR
|
||||
* 5966 - TCP implementation recommendations
|
||||
* 6605 - ECDSA
|
||||
* 6725 - IANA Registry Update
|
||||
* 6742 - ILNP DNS
|
||||
* 6840 - Clarifications and Implementation Notes for DNS Security
|
||||
* 6844 - CAA record
|
||||
* 6891 - EDNS0 update
|
||||
* 6895 - DNS IANA considerations
|
||||
* 6975 - Algorithm Understanding in DNSSEC
|
||||
* 7043 - EUI48/EUI64 records
|
||||
* 7314 - DNS (EDNS) EXPIRE Option
|
||||
* 7477 - CSYNC RR
|
||||
* 7828 - edns-tcp-keepalive EDNS0 Option
|
||||
* 7553 - URI record
|
||||
* 7858 - DNS over TLS: Initiation and Performance Considerations
|
||||
* 7871 - EDNS0 Client Subnet
|
||||
* 7873 - Domain Name System (DNS) Cookies
|
||||
* 8080 - EdDSA for DNSSEC
|
||||
* 8499 - DNS Terminology
|
||||
|
||||
## Loosely Based Upon
|
||||
|
||||
* ldns - <https://nlnetlabs.nl/projects/ldns/about/>
|
||||
* NSD - <https://nlnetlabs.nl/projects/nsd/about/>
|
||||
* Net::DNS - <http://www.net-dns.org/>
|
||||
* GRONG - <https://github.com/bortzmeyer/grong>
|
56
vendor/github.com/miekg/dns/acceptfunc.go
generated
vendored
Normal file
56
vendor/github.com/miekg/dns/acceptfunc.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
package dns
|
||||
|
||||
// MsgAcceptFunc is used early in the server code to accept or reject a message with RcodeFormatError.
|
||||
// It returns a MsgAcceptAction to indicate what should happen with the message.
|
||||
type MsgAcceptFunc func(dh Header) MsgAcceptAction
|
||||
|
||||
// DefaultMsgAcceptFunc checks the request and will reject if:
|
||||
//
|
||||
// * isn't a request (don't respond in that case).
|
||||
// * opcode isn't OpcodeQuery or OpcodeNotify
|
||||
// * Zero bit isn't zero
|
||||
// * has more than 1 question in the question section
|
||||
// * has more than 1 RR in the Answer section
|
||||
// * has more than 0 RRs in the Authority section
|
||||
// * has more than 2 RRs in the Additional section
|
||||
var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
|
||||
|
||||
// MsgAcceptAction represents the action to be taken.
|
||||
type MsgAcceptAction int
|
||||
|
||||
const (
|
||||
MsgAccept MsgAcceptAction = iota // Accept the message
|
||||
MsgReject // Reject the message with a RcodeFormatError
|
||||
MsgIgnore // Ignore the error and send nothing back.
|
||||
)
|
||||
|
||||
func defaultMsgAcceptFunc(dh Header) MsgAcceptAction {
|
||||
if isResponse := dh.Bits&_QR != 0; isResponse {
|
||||
return MsgIgnore
|
||||
}
|
||||
|
||||
// Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs.
|
||||
opcode := int(dh.Bits>>11) & 0xF
|
||||
if opcode != OpcodeQuery && opcode != OpcodeNotify {
|
||||
return MsgReject
|
||||
}
|
||||
|
||||
if isZero := dh.Bits&_Z != 0; isZero {
|
||||
return MsgReject
|
||||
}
|
||||
if dh.Qdcount != 1 {
|
||||
return MsgReject
|
||||
}
|
||||
// NOTIFY requests can have a SOA in the ANSWER section. See RFC 1996 Section 3.7 and 3.11.
|
||||
if dh.Ancount > 1 {
|
||||
return MsgReject
|
||||
}
|
||||
// IXFR request could have one SOA RR in the NS section. See RFC 1995, section 3.
|
||||
if dh.Nscount > 1 {
|
||||
return MsgReject
|
||||
}
|
||||
if dh.Arcount > 2 {
|
||||
return MsgReject
|
||||
}
|
||||
return MsgAccept
|
||||
}
|
476
vendor/github.com/miekg/dns/client.go
generated
vendored
Normal file
476
vendor/github.com/miekg/dns/client.go
generated
vendored
Normal file
@ -0,0 +1,476 @@
|
||||
package dns
|
||||
|
||||
// A client implementation.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
dnsTimeout time.Duration = 2 * time.Second
|
||||
tcpIdleTimeout time.Duration = 8 * time.Second
|
||||
)
|
||||
|
||||
// A Conn represents a connection to a DNS server.
|
||||
type Conn struct {
|
||||
net.Conn // a net.Conn holding the connection
|
||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
tsigRequestMAC string
|
||||
}
|
||||
|
||||
// A Client defines parameters for a DNS client.
|
||||
type Client struct {
|
||||
Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
|
||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||
TLSConfig *tls.Config // TLS connection configuration
|
||||
Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more
|
||||
// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
|
||||
// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
|
||||
// Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext)
|
||||
Timeout time.Duration
|
||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
|
||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
|
||||
group singleflight
|
||||
}
|
||||
|
||||
// Exchange performs a synchronous UDP query. It sends the message m to the address
|
||||
// contained in a and waits for a reply. Exchange does not retry a failed query, nor
|
||||
// will it fall back to TCP in case of truncation.
|
||||
// See client.Exchange for more information on setting larger buffer sizes.
|
||||
func Exchange(m *Msg, a string) (r *Msg, err error) {
|
||||
client := Client{Net: "udp"}
|
||||
r, _, err = client.Exchange(m, a)
|
||||
return r, err
|
||||
}
|
||||
|
||||
func (c *Client) dialTimeout() time.Duration {
|
||||
if c.Timeout != 0 {
|
||||
return c.Timeout
|
||||
}
|
||||
if c.DialTimeout != 0 {
|
||||
return c.DialTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
func (c *Client) readTimeout() time.Duration {
|
||||
if c.ReadTimeout != 0 {
|
||||
return c.ReadTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
func (c *Client) writeTimeout() time.Duration {
|
||||
if c.WriteTimeout != 0 {
|
||||
return c.WriteTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
// Dial connects to the address on the named network.
|
||||
func (c *Client) Dial(address string) (conn *Conn, err error) {
|
||||
// create a new dialer with the appropriate timeout
|
||||
var d net.Dialer
|
||||
if c.Dialer == nil {
|
||||
d = net.Dialer{Timeout: c.getTimeoutForRequest(c.dialTimeout())}
|
||||
} else {
|
||||
d = *c.Dialer
|
||||
}
|
||||
|
||||
network := c.Net
|
||||
if network == "" {
|
||||
network = "udp"
|
||||
}
|
||||
|
||||
useTLS := strings.HasPrefix(network, "tcp") && strings.HasSuffix(network, "-tls")
|
||||
|
||||
conn = new(Conn)
|
||||
if useTLS {
|
||||
network = strings.TrimSuffix(network, "-tls")
|
||||
|
||||
conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
|
||||
} else {
|
||||
conn.Conn, err = d.Dial(network, address)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// Exchange performs a synchronous query. It sends the message m to the address
|
||||
// contained in a and waits for a reply. Basic use pattern with a *dns.Client:
|
||||
//
|
||||
// c := new(dns.Client)
|
||||
// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
|
||||
//
|
||||
// Exchange does not retry a failed query, nor will it fall back to TCP in
|
||||
// case of truncation.
|
||||
// It is up to the caller to create a message that allows for larger responses to be
|
||||
// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
|
||||
// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit
|
||||
// of 512 bytes
|
||||
// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
|
||||
// attribute appropriately
|
||||
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
|
||||
if !c.SingleInflight {
|
||||
return c.exchange(m, address)
|
||||
}
|
||||
|
||||
t := "nop"
|
||||
if t1, ok := TypeToString[m.Question[0].Qtype]; ok {
|
||||
t = t1
|
||||
}
|
||||
cl := "nop"
|
||||
if cl1, ok := ClassToString[m.Question[0].Qclass]; ok {
|
||||
cl = cl1
|
||||
}
|
||||
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
|
||||
return c.exchange(m, address)
|
||||
})
|
||||
if r != nil && shared {
|
||||
r = r.Copy()
|
||||
}
|
||||
return r, rtt, err
|
||||
}
|
||||
|
||||
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
var co *Conn
|
||||
|
||||
co, err = c.Dial(a)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer co.Close()
|
||||
|
||||
opt := m.IsEdns0()
|
||||
// If EDNS0 is used use that for size.
|
||||
if opt != nil && opt.UDPSize() >= MinMsgSize {
|
||||
co.UDPSize = opt.UDPSize()
|
||||
}
|
||||
// Otherwise use the client's configured UDP size.
|
||||
if opt == nil && c.UDPSize >= MinMsgSize {
|
||||
co.UDPSize = c.UDPSize
|
||||
}
|
||||
|
||||
co.TsigSecret = c.TsigSecret
|
||||
t := time.Now()
|
||||
// write with the appropriate write timeout
|
||||
co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout())))
|
||||
if err = co.WriteMsg(m); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout())))
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
rtt = time.Since(t)
|
||||
return r, rtt, err
|
||||
}
|
||||
|
||||
// ReadMsg reads a message from the connection co.
|
||||
// If the received message contains a TSIG record the transaction signature
|
||||
// is verified. This method always tries to return the message, however if an
|
||||
// error is returned there are no guarantees that the returned message is a
|
||||
// valid representation of the packet read.
|
||||
func (co *Conn) ReadMsg() (*Msg, error) {
|
||||
p, err := co.ReadMsgHeader(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := new(Msg)
|
||||
if err := m.Unpack(p); err != nil {
|
||||
// If an error was returned, we still want to allow the user to use
|
||||
// the message, but naively they can just check err if they don't want
|
||||
// to use an erroneous message
|
||||
return m, err
|
||||
}
|
||||
if t := m.IsTsig(); t != nil {
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
return m, ErrSecret
|
||||
}
|
||||
// Need to work on the original message p, as that was used to calculate the tsig.
|
||||
err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil).
|
||||
// Returns message as a byte slice to be parsed with Msg.Unpack later on.
|
||||
// Note that error handling on the message body is not possible as only the header is parsed.
|
||||
func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
|
||||
var (
|
||||
p []byte
|
||||
n int
|
||||
err error
|
||||
)
|
||||
|
||||
switch t := co.Conn.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
r := t.(io.Reader)
|
||||
|
||||
// First two bytes specify the length of the entire message.
|
||||
l, err := tcpMsgLen(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p = make([]byte, l)
|
||||
n, err = tcpRead(r, p)
|
||||
default:
|
||||
if co.UDPSize > MinMsgSize {
|
||||
p = make([]byte, co.UDPSize)
|
||||
} else {
|
||||
p = make([]byte, MinMsgSize)
|
||||
}
|
||||
n, err = co.Read(p)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if n < headerSize {
|
||||
return nil, ErrShortRead
|
||||
}
|
||||
|
||||
p = p[:n]
|
||||
if hdr != nil {
|
||||
dh, _, err := unpackMsgHdr(p, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
*hdr = dh
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length.
|
||||
func tcpMsgLen(t io.Reader) (int, error) {
|
||||
p := []byte{0, 0}
|
||||
n, err := t.Read(p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// As seen with my local router/switch, returns 1 byte on the above read,
|
||||
// resulting a a ShortRead. Just write it out (instead of loop) and read the
|
||||
// other byte.
|
||||
if n == 1 {
|
||||
n1, err := t.Read(p[1:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n += n1
|
||||
}
|
||||
|
||||
if n != 2 {
|
||||
return 0, ErrShortRead
|
||||
}
|
||||
l := binary.BigEndian.Uint16(p)
|
||||
if l == 0 {
|
||||
return 0, ErrShortRead
|
||||
}
|
||||
return int(l), nil
|
||||
}
|
||||
|
||||
// tcpRead calls TCPConn.Read enough times to fill allocated buffer.
|
||||
func tcpRead(t io.Reader, p []byte) (int, error) {
|
||||
n, err := t.Read(p)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
for n < len(p) {
|
||||
j, err := t.Read(p[n:])
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
n += j
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Read implements the net.Conn read method.
|
||||
func (co *Conn) Read(p []byte) (n int, err error) {
|
||||
if co.Conn == nil {
|
||||
return 0, ErrConnEmpty
|
||||
}
|
||||
if len(p) < 2 {
|
||||
return 0, io.ErrShortBuffer
|
||||
}
|
||||
switch t := co.Conn.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
r := t.(io.Reader)
|
||||
|
||||
l, err := tcpMsgLen(r)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if l > len(p) {
|
||||
return l, io.ErrShortBuffer
|
||||
}
|
||||
return tcpRead(r, p[:l])
|
||||
}
|
||||
// UDP connection
|
||||
return co.Conn.Read(p)
|
||||
}
|
||||
|
||||
// WriteMsg sends a message through the connection co.
|
||||
// If the message m contains a TSIG record the transaction
|
||||
// signature is calculated.
|
||||
func (co *Conn) WriteMsg(m *Msg) (err error) {
|
||||
var out []byte
|
||||
if t := m.IsTsig(); t != nil {
|
||||
mac := ""
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
return ErrSecret
|
||||
}
|
||||
out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||
// Set for the next read, although only used in zone transfers
|
||||
co.tsigRequestMAC = mac
|
||||
} else {
|
||||
out, err = m.Pack()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = co.Write(out)
|
||||
return err
|
||||
}
|
||||
|
||||
// Write implements the net.Conn Write method.
|
||||
func (co *Conn) Write(p []byte) (n int, err error) {
|
||||
switch t := co.Conn.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
w := t.(io.Writer)
|
||||
|
||||
lp := len(p)
|
||||
if lp < 2 {
|
||||
return 0, io.ErrShortBuffer
|
||||
}
|
||||
if lp > MaxMsgSize {
|
||||
return 0, &Error{err: "message too large"}
|
||||
}
|
||||
l := make([]byte, 2, lp+2)
|
||||
binary.BigEndian.PutUint16(l, uint16(lp))
|
||||
p = append(l, p...)
|
||||
n, err := io.Copy(w, bytes.NewReader(p))
|
||||
return int(n), err
|
||||
}
|
||||
return co.Conn.Write(p)
|
||||
}
|
||||
|
||||
// Return the appropriate timeout for a specific request
|
||||
func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration {
|
||||
var requestTimeout time.Duration
|
||||
if c.Timeout != 0 {
|
||||
requestTimeout = c.Timeout
|
||||
} else {
|
||||
requestTimeout = timeout
|
||||
}
|
||||
// net.Dialer.Timeout has priority if smaller than the timeouts computed so
|
||||
// far
|
||||
if c.Dialer != nil && c.Dialer.Timeout != 0 {
|
||||
if c.Dialer.Timeout < requestTimeout {
|
||||
requestTimeout = c.Dialer.Timeout
|
||||
}
|
||||
}
|
||||
return requestTimeout
|
||||
}
|
||||
|
||||
// Dial connects to the address on the named network.
|
||||
func Dial(network, address string) (conn *Conn, err error) {
|
||||
conn = new(Conn)
|
||||
conn.Conn, err = net.Dial(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// ExchangeContext performs a synchronous UDP query, like Exchange. It
|
||||
// additionally obeys deadlines from the passed Context.
|
||||
func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
|
||||
client := Client{Net: "udp"}
|
||||
r, _, err = client.ExchangeContext(ctx, m, a)
|
||||
// ignorint rtt to leave the original ExchangeContext API unchanged, but
|
||||
// this function will go away
|
||||
return r, err
|
||||
}
|
||||
|
||||
// ExchangeConn performs a synchronous query. It sends the message m via the connection
|
||||
// c and waits for a reply. The connection c is not closed by ExchangeConn.
|
||||
// This function is going away, but can easily be mimicked:
|
||||
//
|
||||
// co := &dns.Conn{Conn: c} // c is your net.Conn
|
||||
// co.WriteMsg(m)
|
||||
// in, _ := co.ReadMsg()
|
||||
// co.Close()
|
||||
//
|
||||
func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
|
||||
println("dns: ExchangeConn: this function is deprecated")
|
||||
co := new(Conn)
|
||||
co.Conn = c
|
||||
if err = co.WriteMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
// DialTimeout acts like Dial but takes a timeout.
|
||||
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
|
||||
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}}
|
||||
return client.Dial(address)
|
||||
}
|
||||
|
||||
// DialWithTLS connects to the address on the named network with TLS.
|
||||
func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
|
||||
if !strings.HasSuffix(network, "-tls") {
|
||||
network += "-tls"
|
||||
}
|
||||
client := Client{Net: network, TLSConfig: tlsConfig}
|
||||
return client.Dial(address)
|
||||
}
|
||||
|
||||
// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
|
||||
func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
|
||||
if !strings.HasSuffix(network, "-tls") {
|
||||
network += "-tls"
|
||||
}
|
||||
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
|
||||
return client.Dial(address)
|
||||
}
|
||||
|
||||
// ExchangeContext acts like Exchange, but honors the deadline on the provided
|
||||
// context, if present. If there is both a context deadline and a configured
|
||||
// timeout on the client, the earliest of the two takes effect.
|
||||
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
var timeout time.Duration
|
||||
if deadline, ok := ctx.Deadline(); !ok {
|
||||
timeout = 0
|
||||
} else {
|
||||
timeout = time.Until(deadline)
|
||||
}
|
||||
// not passing the context to the underlying calls, as the API does not support
|
||||
// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
|
||||
// TODO(tmthrgd,miekg): this is a race condition.
|
||||
c.Dialer = &net.Dialer{Timeout: timeout}
|
||||
return c.Exchange(m, a)
|
||||
}
|
139
vendor/github.com/miekg/dns/clientconfig.go
generated
vendored
Normal file
139
vendor/github.com/miekg/dns/clientconfig.go
generated
vendored
Normal file
@ -0,0 +1,139 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ClientConfig wraps the contents of the /etc/resolv.conf file.
|
||||
type ClientConfig struct {
|
||||
Servers []string // servers to use
|
||||
Search []string // suffixes to append to local name
|
||||
Port string // what port to use
|
||||
Ndots int // number of dots in name to trigger absolute lookup
|
||||
Timeout int // seconds before giving up on packet
|
||||
Attempts int // lost packets before giving up on server, not used in the package dns
|
||||
}
|
||||
|
||||
// ClientConfigFromFile parses a resolv.conf(5) like file and returns
|
||||
// a *ClientConfig.
|
||||
func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
|
||||
file, err := os.Open(resolvconf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
return ClientConfigFromReader(file)
|
||||
}
|
||||
|
||||
// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument
|
||||
func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
|
||||
c := new(ClientConfig)
|
||||
scanner := bufio.NewScanner(resolvconf)
|
||||
c.Servers = make([]string, 0)
|
||||
c.Search = make([]string, 0)
|
||||
c.Port = "53"
|
||||
c.Ndots = 1
|
||||
c.Timeout = 5
|
||||
c.Attempts = 2
|
||||
|
||||
for scanner.Scan() {
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line := scanner.Text()
|
||||
f := strings.Fields(line)
|
||||
if len(f) < 1 {
|
||||
continue
|
||||
}
|
||||
switch f[0] {
|
||||
case "nameserver": // add one name server
|
||||
if len(f) > 1 {
|
||||
// One more check: make sure server name is
|
||||
// just an IP address. Otherwise we need DNS
|
||||
// to look it up.
|
||||
name := f[1]
|
||||
c.Servers = append(c.Servers, name)
|
||||
}
|
||||
|
||||
case "domain": // set search path to just this domain
|
||||
if len(f) > 1 {
|
||||
c.Search = make([]string, 1)
|
||||
c.Search[0] = f[1]
|
||||
} else {
|
||||
c.Search = make([]string, 0)
|
||||
}
|
||||
|
||||
case "search": // set search path to given servers
|
||||
c.Search = make([]string, len(f)-1)
|
||||
for i := 0; i < len(c.Search); i++ {
|
||||
c.Search[i] = f[i+1]
|
||||
}
|
||||
|
||||
case "options": // magic options
|
||||
for i := 1; i < len(f); i++ {
|
||||
s := f[i]
|
||||
switch {
|
||||
case len(s) >= 6 && s[:6] == "ndots:":
|
||||
n, _ := strconv.Atoi(s[6:])
|
||||
if n < 0 {
|
||||
n = 0
|
||||
} else if n > 15 {
|
||||
n = 15
|
||||
}
|
||||
c.Ndots = n
|
||||
case len(s) >= 8 && s[:8] == "timeout:":
|
||||
n, _ := strconv.Atoi(s[8:])
|
||||
if n < 1 {
|
||||
n = 1
|
||||
}
|
||||
c.Timeout = n
|
||||
case len(s) >= 9 && s[:9] == "attempts:":
|
||||
n, _ := strconv.Atoi(s[9:])
|
||||
if n < 1 {
|
||||
n = 1
|
||||
}
|
||||
c.Attempts = n
|
||||
case s == "rotate":
|
||||
/* not imp */
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NameList returns all of the names that should be queried based on the
|
||||
// config. It is based off of go's net/dns name building, but it does not
|
||||
// check the length of the resulting names.
|
||||
func (c *ClientConfig) NameList(name string) []string {
|
||||
// if this domain is already fully qualified, no append needed.
|
||||
if IsFqdn(name) {
|
||||
return []string{name}
|
||||
}
|
||||
|
||||
// Check to see if the name has more labels than Ndots. Do this before making
|
||||
// the domain fully qualified.
|
||||
hasNdots := CountLabel(name) > c.Ndots
|
||||
// Make the domain fully qualified.
|
||||
name = Fqdn(name)
|
||||
|
||||
// Make a list of names based off search.
|
||||
names := []string{}
|
||||
|
||||
// If name has enough dots, try that first.
|
||||
if hasNdots {
|
||||
names = append(names, name)
|
||||
}
|
||||
for _, s := range c.Search {
|
||||
names = append(names, Fqdn(name+s))
|
||||
}
|
||||
// If we didn't have enough dots, try after suffixes.
|
||||
if !hasNdots {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
43
vendor/github.com/miekg/dns/dane.go
generated
vendored
Normal file
43
vendor/github.com/miekg/dns/dane.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/x509"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records.
|
||||
func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
|
||||
switch matchingType {
|
||||
case 0:
|
||||
switch selector {
|
||||
case 0:
|
||||
return hex.EncodeToString(cert.Raw), nil
|
||||
case 1:
|
||||
return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
|
||||
}
|
||||
case 1:
|
||||
h := sha256.New()
|
||||
switch selector {
|
||||
case 0:
|
||||
h.Write(cert.Raw)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
case 1:
|
||||
h.Write(cert.RawSubjectPublicKeyInfo)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
case 2:
|
||||
h := sha512.New()
|
||||
switch selector {
|
||||
case 0:
|
||||
h.Write(cert.Raw)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
case 1:
|
||||
h.Write(cert.RawSubjectPublicKeyInfo)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
}
|
||||
return "", errors.New("dns: bad MatchingType or Selector")
|
||||
}
|
364
vendor/github.com/miekg/dns/defaults.go
generated
vendored
Normal file
364
vendor/github.com/miekg/dns/defaults.go
generated
vendored
Normal file
@ -0,0 +1,364 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const hexDigit = "0123456789abcdef"
|
||||
|
||||
// Everything is assumed in ClassINET.
|
||||
|
||||
// SetReply creates a reply message from a request message.
|
||||
func (dns *Msg) SetReply(request *Msg) *Msg {
|
||||
dns.Id = request.Id
|
||||
dns.Response = true
|
||||
dns.Opcode = request.Opcode
|
||||
if dns.Opcode == OpcodeQuery {
|
||||
dns.RecursionDesired = request.RecursionDesired // Copy rd bit
|
||||
dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit
|
||||
}
|
||||
dns.Rcode = RcodeSuccess
|
||||
if len(request.Question) > 0 {
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = request.Question[0]
|
||||
}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetQuestion creates a question message, it sets the Question
|
||||
// section, generates an Id and sets the RecursionDesired (RD)
|
||||
// bit to true.
|
||||
func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
|
||||
dns.Id = Id()
|
||||
dns.RecursionDesired = true
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = Question{z, t, ClassINET}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetNotify creates a notify message, it sets the Question
|
||||
// section, generates an Id and sets the Authoritative (AA)
|
||||
// bit to true.
|
||||
func (dns *Msg) SetNotify(z string) *Msg {
|
||||
dns.Opcode = OpcodeNotify
|
||||
dns.Authoritative = true
|
||||
dns.Id = Id()
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = Question{z, TypeSOA, ClassINET}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetRcode creates an error message suitable for the request.
|
||||
func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
|
||||
dns.SetReply(request)
|
||||
dns.Rcode = rcode
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetRcodeFormatError creates a message with FormError set.
|
||||
func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
|
||||
dns.Rcode = RcodeFormatError
|
||||
dns.Opcode = OpcodeQuery
|
||||
dns.Response = true
|
||||
dns.Authoritative = false
|
||||
dns.Id = request.Id
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetUpdate makes the message a dynamic update message. It
|
||||
// sets the ZONE section to: z, TypeSOA, ClassINET.
|
||||
func (dns *Msg) SetUpdate(z string) *Msg {
|
||||
dns.Id = Id()
|
||||
dns.Response = false
|
||||
dns.Opcode = OpcodeUpdate
|
||||
dns.Compress = false // BIND9 cannot handle compression
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = Question{z, TypeSOA, ClassINET}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetIxfr creates message for requesting an IXFR.
|
||||
func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg {
|
||||
dns.Id = Id()
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Ns = make([]RR, 1)
|
||||
s := new(SOA)
|
||||
s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
|
||||
s.Serial = serial
|
||||
s.Ns = ns
|
||||
s.Mbox = mbox
|
||||
dns.Question[0] = Question{z, TypeIXFR, ClassINET}
|
||||
dns.Ns[0] = s
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetAxfr creates message for requesting an AXFR.
|
||||
func (dns *Msg) SetAxfr(z string) *Msg {
|
||||
dns.Id = Id()
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = Question{z, TypeAXFR, ClassINET}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetTsig appends a TSIG RR to the message.
|
||||
// This is only a skeleton TSIG RR that is added as the last RR in the
|
||||
// additional section. The Tsig is calculated when the message is being send.
|
||||
func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg {
|
||||
t := new(TSIG)
|
||||
t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
|
||||
t.Algorithm = algo
|
||||
t.Fudge = fudge
|
||||
t.TimeSigned = uint64(timesigned)
|
||||
t.OrigId = dns.Id
|
||||
dns.Extra = append(dns.Extra, t)
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetEdns0 appends a EDNS0 OPT RR to the message.
|
||||
// TSIG should always the last RR in a message.
|
||||
func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
|
||||
e := new(OPT)
|
||||
e.Hdr.Name = "."
|
||||
e.Hdr.Rrtype = TypeOPT
|
||||
e.SetUDPSize(udpsize)
|
||||
if do {
|
||||
e.SetDo()
|
||||
}
|
||||
dns.Extra = append(dns.Extra, e)
|
||||
return dns
|
||||
}
|
||||
|
||||
// IsTsig checks if the message has a TSIG record as the last record
|
||||
// in the additional section. It returns the TSIG record found or nil.
|
||||
func (dns *Msg) IsTsig() *TSIG {
|
||||
if len(dns.Extra) > 0 {
|
||||
if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
|
||||
return dns.Extra[len(dns.Extra)-1].(*TSIG)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
|
||||
// record in the additional section will do. It returns the OPT record
|
||||
// found or nil.
|
||||
func (dns *Msg) IsEdns0() *OPT {
|
||||
// EDNS0 is at the end of the additional section, start there.
|
||||
// We might want to change this to *only* look at the last two
|
||||
// records. So we see TSIG and/or OPT - this a slightly bigger
|
||||
// change though.
|
||||
for i := len(dns.Extra) - 1; i >= 0; i-- {
|
||||
if dns.Extra[i].Header().Rrtype == TypeOPT {
|
||||
return dns.Extra[i].(*OPT)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDomainName checks if s is a valid domain name, it returns the number of
|
||||
// labels and true, when a domain name is valid. Note that non fully qualified
|
||||
// domain name is considered valid, in this case the last label is counted in
|
||||
// the number of labels. When false is returned the number of labels is not
|
||||
// defined. Also note that this function is extremely liberal; almost any
|
||||
// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
|
||||
// label fits in 63 characters and that the entire name will fit into the 255
|
||||
// octet wire format limit.
|
||||
func IsDomainName(s string) (labels int, ok bool) {
|
||||
// XXX: The logic in this function was copied from packDomainName and
|
||||
// should be kept in sync with that function.
|
||||
|
||||
const lenmsg = 256
|
||||
|
||||
if len(s) == 0 { // Ok, for instance when dealing with update RR without any rdata.
|
||||
return 0, false
|
||||
}
|
||||
|
||||
s = Fqdn(s)
|
||||
|
||||
// Each dot ends a segment of the name. Except for escaped dots (\.), which
|
||||
// are normal dots.
|
||||
|
||||
var (
|
||||
off int
|
||||
begin int
|
||||
wasDot bool
|
||||
)
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '\\':
|
||||
if off+1 > lenmsg {
|
||||
return labels, false
|
||||
}
|
||||
|
||||
// check for \DDD
|
||||
if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
|
||||
i += 3
|
||||
begin += 3
|
||||
} else {
|
||||
i++
|
||||
begin++
|
||||
}
|
||||
|
||||
wasDot = false
|
||||
case '.':
|
||||
if wasDot {
|
||||
// two dots back to back is not legal
|
||||
return labels, false
|
||||
}
|
||||
wasDot = true
|
||||
|
||||
labelLen := i - begin
|
||||
if labelLen >= 1<<6 { // top two bits of length must be clear
|
||||
return labels, false
|
||||
}
|
||||
|
||||
// off can already (we're in a loop) be bigger than lenmsg
|
||||
// this happens when a name isn't fully qualified
|
||||
off += 1 + labelLen
|
||||
if off > lenmsg {
|
||||
return labels, false
|
||||
}
|
||||
|
||||
labels++
|
||||
begin = i + 1
|
||||
default:
|
||||
wasDot = false
|
||||
}
|
||||
}
|
||||
|
||||
return labels, true
|
||||
}
|
||||
|
||||
// IsSubDomain checks if child is indeed a child of the parent. If child and parent
|
||||
// are the same domain true is returned as well.
|
||||
func IsSubDomain(parent, child string) bool {
|
||||
// Entire child is contained in parent
|
||||
return CompareDomainName(parent, child) == CountLabel(parent)
|
||||
}
|
||||
|
||||
// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
|
||||
// The checking is performed on the binary payload.
|
||||
func IsMsg(buf []byte) error {
|
||||
// Header
|
||||
if len(buf) < headerSize {
|
||||
return errors.New("dns: bad message header")
|
||||
}
|
||||
// Header: Opcode
|
||||
// TODO(miek): more checks here, e.g. check all header bits.
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsFqdn checks if a domain name is fully qualified.
|
||||
func IsFqdn(s string) bool {
|
||||
s2 := strings.TrimSuffix(s, ".")
|
||||
if s == s2 {
|
||||
return false
|
||||
}
|
||||
|
||||
i := strings.LastIndexFunc(s2, func(r rune) bool {
|
||||
return r != '\\'
|
||||
})
|
||||
|
||||
// Test whether we have an even number of escape sequences before
|
||||
// the dot or none.
|
||||
return (len(s2)-i)%2 != 0
|
||||
}
|
||||
|
||||
// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
|
||||
// This means the RRs need to have the same type, name, and class. Returns true
|
||||
// if the RR set is valid, otherwise false.
|
||||
func IsRRset(rrset []RR) bool {
|
||||
if len(rrset) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(rrset) == 1 {
|
||||
return true
|
||||
}
|
||||
rrHeader := rrset[0].Header()
|
||||
rrType := rrHeader.Rrtype
|
||||
rrClass := rrHeader.Class
|
||||
rrName := rrHeader.Name
|
||||
|
||||
for _, rr := range rrset[1:] {
|
||||
curRRHeader := rr.Header()
|
||||
if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
|
||||
// Mismatch between the records, so this is not a valid rrset for
|
||||
//signing/verifying
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Fqdn return the fully qualified domain name from s.
|
||||
// If s is already fully qualified, it behaves as the identity function.
|
||||
func Fqdn(s string) string {
|
||||
if IsFqdn(s) {
|
||||
return s
|
||||
}
|
||||
return s + "."
|
||||
}
|
||||
|
||||
// Copied from the official Go code.
|
||||
|
||||
// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
|
||||
// address suitable for reverse DNS (PTR) record lookups or an error if it fails
|
||||
// to parse the IP address.
|
||||
func ReverseAddr(addr string) (arpa string, err error) {
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil {
|
||||
return "", &Error{err: "unrecognized address: " + addr}
|
||||
}
|
||||
if v4 := ip.To4(); v4 != nil {
|
||||
buf := make([]byte, 0, net.IPv4len*4+len("in-addr.arpa."))
|
||||
// Add it, in reverse, to the buffer
|
||||
for i := len(v4) - 1; i >= 0; i-- {
|
||||
buf = strconv.AppendInt(buf, int64(v4[i]), 10)
|
||||
buf = append(buf, '.')
|
||||
}
|
||||
// Append "in-addr.arpa." and return (buf already has the final .)
|
||||
buf = append(buf, "in-addr.arpa."...)
|
||||
return string(buf), nil
|
||||
}
|
||||
// Must be IPv6
|
||||
buf := make([]byte, 0, net.IPv6len*4+len("ip6.arpa."))
|
||||
// Add it, in reverse, to the buffer
|
||||
for i := len(ip) - 1; i >= 0; i-- {
|
||||
v := ip[i]
|
||||
buf = append(buf, hexDigit[v&0xF])
|
||||
buf = append(buf, '.')
|
||||
buf = append(buf, hexDigit[v>>4])
|
||||
buf = append(buf, '.')
|
||||
}
|
||||
// Append "ip6.arpa." and return (buf already has the final .)
|
||||
buf = append(buf, "ip6.arpa."...)
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// String returns the string representation for the type t.
|
||||
func (t Type) String() string {
|
||||
if t1, ok := TypeToString[uint16(t)]; ok {
|
||||
return t1
|
||||
}
|
||||
return "TYPE" + strconv.Itoa(int(t))
|
||||
}
|
||||
|
||||
// String returns the string representation for the class c.
|
||||
func (c Class) String() string {
|
||||
if s, ok := ClassToString[uint16(c)]; ok {
|
||||
// Only emit mnemonics when they are unambiguous, specically ANY is in both.
|
||||
if _, ok := StringToType[s]; !ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return "CLASS" + strconv.Itoa(int(c))
|
||||
}
|
||||
|
||||
// String returns the string representation for the name n.
|
||||
func (n Name) String() string {
|
||||
return sprintName(string(n))
|
||||
}
|
134
vendor/github.com/miekg/dns/dns.go
generated
vendored
Normal file
134
vendor/github.com/miekg/dns/dns.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
package dns
|
||||
|
||||
import "strconv"
|
||||
|
||||
const (
|
||||
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
|
||||
defaultTtl = 3600 // Default internal TTL.
|
||||
|
||||
// DefaultMsgSize is the standard default for messages larger than 512 bytes.
|
||||
DefaultMsgSize = 4096
|
||||
// MinMsgSize is the minimal size of a DNS packet.
|
||||
MinMsgSize = 512
|
||||
// MaxMsgSize is the largest possible DNS packet.
|
||||
MaxMsgSize = 65535
|
||||
)
|
||||
|
||||
// Error represents a DNS error.
|
||||
type Error struct{ err string }
|
||||
|
||||
func (e *Error) Error() string {
|
||||
if e == nil {
|
||||
return "dns: <nil>"
|
||||
}
|
||||
return "dns: " + e.err
|
||||
}
|
||||
|
||||
// An RR represents a resource record.
|
||||
type RR interface {
|
||||
// Header returns the header of an resource record. The header contains
|
||||
// everything up to the rdata.
|
||||
Header() *RR_Header
|
||||
// String returns the text representation of the resource record.
|
||||
String() string
|
||||
|
||||
// copy returns a copy of the RR
|
||||
copy() RR
|
||||
|
||||
// len returns the length (in octets) of the compressed or uncompressed RR in wire format.
|
||||
//
|
||||
// If compression is nil, the uncompressed size will be returned, otherwise the compressed
|
||||
// size will be returned and domain names will be added to the map for future compression.
|
||||
len(off int, compression map[string]struct{}) int
|
||||
|
||||
// pack packs the records RDATA into wire format. The header will
|
||||
// already have been packed into msg.
|
||||
pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error)
|
||||
|
||||
// unpack unpacks an RR from wire format.
|
||||
//
|
||||
// This will only be called on a new and empty RR type with only the header populated. It
|
||||
// will only be called if the record's RDATA is non-empty.
|
||||
unpack(msg []byte, off int) (off1 int, err error)
|
||||
|
||||
// parse parses an RR from zone file format.
|
||||
//
|
||||
// This will only be called on a new and empty RR type with only the header populated.
|
||||
parse(c *zlexer, origin, file string) *ParseError
|
||||
|
||||
// isDuplicate returns whether the two RRs are duplicates.
|
||||
isDuplicate(r2 RR) bool
|
||||
}
|
||||
|
||||
// RR_Header is the header all DNS resource records share.
|
||||
type RR_Header struct {
|
||||
Name string `dns:"cdomain-name"`
|
||||
Rrtype uint16
|
||||
Class uint16
|
||||
Ttl uint32
|
||||
Rdlength uint16 // Length of data after header.
|
||||
}
|
||||
|
||||
// Header returns itself. This is here to make RR_Header implements the RR interface.
|
||||
func (h *RR_Header) Header() *RR_Header { return h }
|
||||
|
||||
// Just to implement the RR interface.
|
||||
func (h *RR_Header) copy() RR { return nil }
|
||||
|
||||
func (h *RR_Header) String() string {
|
||||
var s string
|
||||
|
||||
if h.Rrtype == TypeOPT {
|
||||
s = ";"
|
||||
// and maybe other things
|
||||
}
|
||||
|
||||
s += sprintName(h.Name) + "\t"
|
||||
s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
|
||||
s += Class(h.Class).String() + "\t"
|
||||
s += Type(h.Rrtype).String() + "\t"
|
||||
return s
|
||||
}
|
||||
|
||||
func (h *RR_Header) len(off int, compression map[string]struct{}) int {
|
||||
l := domainNameLen(h.Name, off, compression, true)
|
||||
l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
|
||||
return l
|
||||
}
|
||||
|
||||
func (h *RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
|
||||
// RR_Header has no RDATA to pack.
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (h *RR_Header) unpack(msg []byte, off int) (int, error) {
|
||||
panic("dns: internal error: unpack should never be called on RR_Header")
|
||||
}
|
||||
|
||||
func (h *RR_Header) parse(c *zlexer, origin, file string) *ParseError {
|
||||
panic("dns: internal error: parse should never be called on RR_Header")
|
||||
}
|
||||
|
||||
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
|
||||
func (rr *RFC3597) ToRFC3597(r RR) error {
|
||||
buf := make([]byte, Len(r)*2)
|
||||
headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:off]
|
||||
|
||||
*rr = RFC3597{Hdr: *r.Header()}
|
||||
rr.Hdr.Rdlength = uint16(off - headerEnd)
|
||||
|
||||
if noRdata(rr.Hdr) {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = rr.unpack(buf, headerEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
794
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
Normal file
794
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
Normal file
@ -0,0 +1,794 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
_ "crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
_ "crypto/sha1"
|
||||
_ "crypto/sha256"
|
||||
_ "crypto/sha512"
|
||||
"encoding/asn1"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// DNSSEC encryption algorithm codes.
|
||||
const (
|
||||
_ uint8 = iota
|
||||
RSAMD5
|
||||
DH
|
||||
DSA
|
||||
_ // Skip 4, RFC 6725, section 2.1
|
||||
RSASHA1
|
||||
DSANSEC3SHA1
|
||||
RSASHA1NSEC3SHA1
|
||||
RSASHA256
|
||||
_ // Skip 9, RFC 6725, section 2.1
|
||||
RSASHA512
|
||||
_ // Skip 11, RFC 6725, section 2.1
|
||||
ECCGOST
|
||||
ECDSAP256SHA256
|
||||
ECDSAP384SHA384
|
||||
ED25519
|
||||
ED448
|
||||
INDIRECT uint8 = 252
|
||||
PRIVATEDNS uint8 = 253 // Private (experimental keys)
|
||||
PRIVATEOID uint8 = 254
|
||||
)
|
||||
|
||||
// AlgorithmToString is a map of algorithm IDs to algorithm names.
|
||||
var AlgorithmToString = map[uint8]string{
|
||||
RSAMD5: "RSAMD5",
|
||||
DH: "DH",
|
||||
DSA: "DSA",
|
||||
RSASHA1: "RSASHA1",
|
||||
DSANSEC3SHA1: "DSA-NSEC3-SHA1",
|
||||
RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1",
|
||||
RSASHA256: "RSASHA256",
|
||||
RSASHA512: "RSASHA512",
|
||||
ECCGOST: "ECC-GOST",
|
||||
ECDSAP256SHA256: "ECDSAP256SHA256",
|
||||
ECDSAP384SHA384: "ECDSAP384SHA384",
|
||||
ED25519: "ED25519",
|
||||
ED448: "ED448",
|
||||
INDIRECT: "INDIRECT",
|
||||
PRIVATEDNS: "PRIVATEDNS",
|
||||
PRIVATEOID: "PRIVATEOID",
|
||||
}
|
||||
|
||||
// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
|
||||
var AlgorithmToHash = map[uint8]crypto.Hash{
|
||||
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
|
||||
DSA: crypto.SHA1,
|
||||
RSASHA1: crypto.SHA1,
|
||||
RSASHA1NSEC3SHA1: crypto.SHA1,
|
||||
RSASHA256: crypto.SHA256,
|
||||
ECDSAP256SHA256: crypto.SHA256,
|
||||
ECDSAP384SHA384: crypto.SHA384,
|
||||
RSASHA512: crypto.SHA512,
|
||||
ED25519: crypto.Hash(0),
|
||||
}
|
||||
|
||||
// DNSSEC hashing algorithm codes.
|
||||
const (
|
||||
_ uint8 = iota
|
||||
SHA1 // RFC 4034
|
||||
SHA256 // RFC 4509
|
||||
GOST94 // RFC 5933
|
||||
SHA384 // Experimental
|
||||
SHA512 // Experimental
|
||||
)
|
||||
|
||||
// HashToString is a map of hash IDs to names.
|
||||
var HashToString = map[uint8]string{
|
||||
SHA1: "SHA1",
|
||||
SHA256: "SHA256",
|
||||
GOST94: "GOST94",
|
||||
SHA384: "SHA384",
|
||||
SHA512: "SHA512",
|
||||
}
|
||||
|
||||
// DNSKEY flag values.
|
||||
const (
|
||||
SEP = 1
|
||||
REVOKE = 1 << 7
|
||||
ZONE = 1 << 8
|
||||
)
|
||||
|
||||
// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing.
|
||||
type rrsigWireFmt struct {
|
||||
TypeCovered uint16
|
||||
Algorithm uint8
|
||||
Labels uint8
|
||||
OrigTtl uint32
|
||||
Expiration uint32
|
||||
Inception uint32
|
||||
KeyTag uint16
|
||||
SignerName string `dns:"domain-name"`
|
||||
/* No Signature */
|
||||
}
|
||||
|
||||
// Used for converting DNSKEY's rdata to wirefmt.
|
||||
type dnskeyWireFmt struct {
|
||||
Flags uint16
|
||||
Protocol uint8
|
||||
Algorithm uint8
|
||||
PublicKey string `dns:"base64"`
|
||||
/* Nothing is left out */
|
||||
}
|
||||
|
||||
func divRoundUp(a, b int) int {
|
||||
return (a + b - 1) / b
|
||||
}
|
||||
|
||||
// KeyTag calculates the keytag (or key-id) of the DNSKEY.
|
||||
func (k *DNSKEY) KeyTag() uint16 {
|
||||
if k == nil {
|
||||
return 0
|
||||
}
|
||||
var keytag int
|
||||
switch k.Algorithm {
|
||||
case RSAMD5:
|
||||
// Look at the bottom two bytes of the modules, which the last
|
||||
// item in the pubkey. We could do this faster by looking directly
|
||||
// at the base64 values. But I'm lazy.
|
||||
modulus, _ := fromBase64([]byte(k.PublicKey))
|
||||
if len(modulus) > 1 {
|
||||
x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
|
||||
keytag = int(x)
|
||||
}
|
||||
default:
|
||||
keywire := new(dnskeyWireFmt)
|
||||
keywire.Flags = k.Flags
|
||||
keywire.Protocol = k.Protocol
|
||||
keywire.Algorithm = k.Algorithm
|
||||
keywire.PublicKey = k.PublicKey
|
||||
wire := make([]byte, DefaultMsgSize)
|
||||
n, err := packKeyWire(keywire, wire)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
wire = wire[:n]
|
||||
for i, v := range wire {
|
||||
if i&1 != 0 {
|
||||
keytag += int(v) // must be larger than uint32
|
||||
} else {
|
||||
keytag += int(v) << 8
|
||||
}
|
||||
}
|
||||
keytag += keytag >> 16 & 0xFFFF
|
||||
keytag &= 0xFFFF
|
||||
}
|
||||
return uint16(keytag)
|
||||
}
|
||||
|
||||
// ToDS converts a DNSKEY record to a DS record.
|
||||
func (k *DNSKEY) ToDS(h uint8) *DS {
|
||||
if k == nil {
|
||||
return nil
|
||||
}
|
||||
ds := new(DS)
|
||||
ds.Hdr.Name = k.Hdr.Name
|
||||
ds.Hdr.Class = k.Hdr.Class
|
||||
ds.Hdr.Rrtype = TypeDS
|
||||
ds.Hdr.Ttl = k.Hdr.Ttl
|
||||
ds.Algorithm = k.Algorithm
|
||||
ds.DigestType = h
|
||||
ds.KeyTag = k.KeyTag()
|
||||
|
||||
keywire := new(dnskeyWireFmt)
|
||||
keywire.Flags = k.Flags
|
||||
keywire.Protocol = k.Protocol
|
||||
keywire.Algorithm = k.Algorithm
|
||||
keywire.PublicKey = k.PublicKey
|
||||
wire := make([]byte, DefaultMsgSize)
|
||||
n, err := packKeyWire(keywire, wire)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
wire = wire[:n]
|
||||
|
||||
owner := make([]byte, 255)
|
||||
off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false)
|
||||
if err1 != nil {
|
||||
return nil
|
||||
}
|
||||
owner = owner[:off]
|
||||
// RFC4034:
|
||||
// digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA);
|
||||
// "|" denotes concatenation
|
||||
// DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
|
||||
|
||||
var hash crypto.Hash
|
||||
switch h {
|
||||
case SHA1:
|
||||
hash = crypto.SHA1
|
||||
case SHA256:
|
||||
hash = crypto.SHA256
|
||||
case SHA384:
|
||||
hash = crypto.SHA384
|
||||
case SHA512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
s := hash.New()
|
||||
s.Write(owner)
|
||||
s.Write(wire)
|
||||
ds.Digest = hex.EncodeToString(s.Sum(nil))
|
||||
return ds
|
||||
}
|
||||
|
||||
// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
|
||||
func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
|
||||
c := &CDNSKEY{DNSKEY: *k}
|
||||
c.Hdr = k.Hdr
|
||||
c.Hdr.Rrtype = TypeCDNSKEY
|
||||
return c
|
||||
}
|
||||
|
||||
// ToCDS converts a DS record to a CDS record.
|
||||
func (d *DS) ToCDS() *CDS {
|
||||
c := &CDS{DS: *d}
|
||||
c.Hdr = d.Hdr
|
||||
c.Hdr.Rrtype = TypeCDS
|
||||
return c
|
||||
}
|
||||
|
||||
// Sign signs an RRSet. The signature needs to be filled in with the values:
|
||||
// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied
|
||||
// from the RRset. Sign returns a non-nill error when the signing went OK.
|
||||
// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non
|
||||
// zero, it is used as-is, otherwise the TTL of the RRset is used as the
|
||||
// OrigTTL.
|
||||
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
||||
if k == nil {
|
||||
return ErrPrivKey
|
||||
}
|
||||
// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
|
||||
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
h0 := rrset[0].Header()
|
||||
rr.Hdr.Rrtype = TypeRRSIG
|
||||
rr.Hdr.Name = h0.Name
|
||||
rr.Hdr.Class = h0.Class
|
||||
if rr.OrigTtl == 0 { // If set don't override
|
||||
rr.OrigTtl = h0.Ttl
|
||||
}
|
||||
rr.TypeCovered = h0.Rrtype
|
||||
rr.Labels = uint8(CountLabel(h0.Name))
|
||||
|
||||
if strings.HasPrefix(h0.Name, "*") {
|
||||
rr.Labels-- // wildcard, remove from label count
|
||||
}
|
||||
|
||||
sigwire := new(rrsigWireFmt)
|
||||
sigwire.TypeCovered = rr.TypeCovered
|
||||
sigwire.Algorithm = rr.Algorithm
|
||||
sigwire.Labels = rr.Labels
|
||||
sigwire.OrigTtl = rr.OrigTtl
|
||||
sigwire.Expiration = rr.Expiration
|
||||
sigwire.Inception = rr.Inception
|
||||
sigwire.KeyTag = rr.KeyTag
|
||||
// For signing, lowercase this name
|
||||
sigwire.SignerName = strings.ToLower(rr.SignerName)
|
||||
|
||||
// Create the desired binary blob
|
||||
signdata := make([]byte, DefaultMsgSize)
|
||||
n, err := packSigWire(sigwire, signdata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signdata = signdata[:n]
|
||||
wire, err := rawSignatureData(rrset, rr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, ok := AlgorithmToHash[rr.Algorithm]
|
||||
if !ok {
|
||||
return ErrAlg
|
||||
}
|
||||
|
||||
switch rr.Algorithm {
|
||||
case ED25519:
|
||||
// ed25519 signs the raw message and performs hashing internally.
|
||||
// All other supported signature schemes operate over the pre-hashed
|
||||
// message, and thus ed25519 must be handled separately here.
|
||||
//
|
||||
// The raw message is passed directly into sign and crypto.Hash(0) is
|
||||
// used to signal to the crypto.Signer that the data has not been hashed.
|
||||
signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
default:
|
||||
h := hash.New()
|
||||
h.Write(signdata)
|
||||
h.Write(wire)
|
||||
|
||||
signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) {
|
||||
signature, err := k.Sign(rand.Reader, hashed, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch alg {
|
||||
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
|
||||
return signature, nil
|
||||
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
ecdsaSignature := &struct {
|
||||
R, S *big.Int
|
||||
}{}
|
||||
if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var intlen int
|
||||
switch alg {
|
||||
case ECDSAP256SHA256:
|
||||
intlen = 32
|
||||
case ECDSAP384SHA384:
|
||||
intlen = 48
|
||||
}
|
||||
|
||||
signature := intToBytes(ecdsaSignature.R, intlen)
|
||||
signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...)
|
||||
return signature, nil
|
||||
|
||||
// There is no defined interface for what a DSA backed crypto.Signer returns
|
||||
case DSA, DSANSEC3SHA1:
|
||||
// t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
|
||||
// signature := []byte{byte(t)}
|
||||
// signature = append(signature, intToBytes(r1, 20)...)
|
||||
// signature = append(signature, intToBytes(s1, 20)...)
|
||||
// rr.Signature = signature
|
||||
|
||||
case ED25519:
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
return nil, ErrAlg
|
||||
}
|
||||
|
||||
// Verify validates an RRSet with the signature and key. This is only the
|
||||
// cryptographic test, the signature validity period must be checked separately.
|
||||
// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
|
||||
func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
||||
// First the easy checks
|
||||
if !IsRRset(rrset) {
|
||||
return ErrRRset
|
||||
}
|
||||
if rr.KeyTag != k.KeyTag() {
|
||||
return ErrKey
|
||||
}
|
||||
if rr.Hdr.Class != k.Hdr.Class {
|
||||
return ErrKey
|
||||
}
|
||||
if rr.Algorithm != k.Algorithm {
|
||||
return ErrKey
|
||||
}
|
||||
if !strings.EqualFold(rr.SignerName, k.Hdr.Name) {
|
||||
return ErrKey
|
||||
}
|
||||
if k.Protocol != 3 {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
// IsRRset checked that we have at least one RR and that the RRs in
|
||||
// the set have consistent type, class, and name. Also check that type and
|
||||
// class matches the RRSIG record.
|
||||
if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered {
|
||||
return ErrRRset
|
||||
}
|
||||
|
||||
// RFC 4035 5.3.2. Reconstructing the Signed Data
|
||||
// Copy the sig, except the rrsig data
|
||||
sigwire := new(rrsigWireFmt)
|
||||
sigwire.TypeCovered = rr.TypeCovered
|
||||
sigwire.Algorithm = rr.Algorithm
|
||||
sigwire.Labels = rr.Labels
|
||||
sigwire.OrigTtl = rr.OrigTtl
|
||||
sigwire.Expiration = rr.Expiration
|
||||
sigwire.Inception = rr.Inception
|
||||
sigwire.KeyTag = rr.KeyTag
|
||||
sigwire.SignerName = strings.ToLower(rr.SignerName)
|
||||
// Create the desired binary blob
|
||||
signeddata := make([]byte, DefaultMsgSize)
|
||||
n, err := packSigWire(sigwire, signeddata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signeddata = signeddata[:n]
|
||||
wire, err := rawSignatureData(rrset, rr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sigbuf := rr.sigBuf() // Get the binary signature data
|
||||
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
|
||||
// TODO(miek)
|
||||
// remove the domain name and assume its ours?
|
||||
}
|
||||
|
||||
hash, ok := AlgorithmToHash[rr.Algorithm]
|
||||
if !ok {
|
||||
return ErrAlg
|
||||
}
|
||||
|
||||
switch rr.Algorithm {
|
||||
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5:
|
||||
// TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
|
||||
pubkey := k.publicKeyRSA() // Get the key
|
||||
if pubkey == nil {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
h := hash.New()
|
||||
h.Write(signeddata)
|
||||
h.Write(wire)
|
||||
return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
|
||||
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
pubkey := k.publicKeyECDSA()
|
||||
if pubkey == nil {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
// Split sigbuf into the r and s coordinates
|
||||
r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2])
|
||||
s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:])
|
||||
|
||||
h := hash.New()
|
||||
h.Write(signeddata)
|
||||
h.Write(wire)
|
||||
if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
|
||||
return nil
|
||||
}
|
||||
return ErrSig
|
||||
|
||||
case ED25519:
|
||||
pubkey := k.publicKeyED25519()
|
||||
if pubkey == nil {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) {
|
||||
return nil
|
||||
}
|
||||
return ErrSig
|
||||
|
||||
default:
|
||||
return ErrAlg
|
||||
}
|
||||
}
|
||||
|
||||
// ValidityPeriod uses RFC1982 serial arithmetic to calculate
|
||||
// if a signature period is valid. If t is the zero time, the
|
||||
// current time is taken other t is. Returns true if the signature
|
||||
// is valid at the given time, otherwise returns false.
|
||||
func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
|
||||
var utc int64
|
||||
if t.IsZero() {
|
||||
utc = time.Now().UTC().Unix()
|
||||
} else {
|
||||
utc = t.UTC().Unix()
|
||||
}
|
||||
modi := (int64(rr.Inception) - utc) / year68
|
||||
mode := (int64(rr.Expiration) - utc) / year68
|
||||
ti := int64(rr.Inception) + modi*year68
|
||||
te := int64(rr.Expiration) + mode*year68
|
||||
return ti <= utc && utc <= te
|
||||
}
|
||||
|
||||
// Return the signatures base64 encodedig sigdata as a byte slice.
|
||||
func (rr *RRSIG) sigBuf() []byte {
|
||||
sigbuf, err := fromBase64([]byte(rr.Signature))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return sigbuf
|
||||
}
|
||||
|
||||
// publicKeyRSA returns the RSA public key from a DNSKEY record.
|
||||
func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(keybuf) < 1+1+64 {
|
||||
// Exponent must be at least 1 byte and modulus at least 64
|
||||
return nil
|
||||
}
|
||||
|
||||
// RFC 2537/3110, section 2. RSA Public KEY Resource Records
|
||||
// Length is in the 0th byte, unless its zero, then it
|
||||
// it in bytes 1 and 2 and its a 16 bit number
|
||||
explen := uint16(keybuf[0])
|
||||
keyoff := 1
|
||||
if explen == 0 {
|
||||
explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
|
||||
keyoff = 3
|
||||
}
|
||||
|
||||
if explen > 4 || explen == 0 || keybuf[keyoff] == 0 {
|
||||
// Exponent larger than supported by the crypto package,
|
||||
// empty, or contains prohibited leading zero.
|
||||
return nil
|
||||
}
|
||||
|
||||
modoff := keyoff + int(explen)
|
||||
modlen := len(keybuf) - modoff
|
||||
if modlen < 64 || modlen > 512 || keybuf[modoff] == 0 {
|
||||
// Modulus is too small, large, or contains prohibited leading zero.
|
||||
return nil
|
||||
}
|
||||
|
||||
pubkey := new(rsa.PublicKey)
|
||||
|
||||
var expo uint64
|
||||
for i := 0; i < int(explen); i++ {
|
||||
expo <<= 8
|
||||
expo |= uint64(keybuf[keyoff+i])
|
||||
}
|
||||
if expo > 1<<31-1 {
|
||||
// Larger exponent than supported by the crypto package.
|
||||
return nil
|
||||
}
|
||||
pubkey.E = int(expo)
|
||||
|
||||
pubkey.N = big.NewInt(0)
|
||||
pubkey.N.SetBytes(keybuf[modoff:])
|
||||
|
||||
return pubkey
|
||||
}
|
||||
|
||||
// publicKeyECDSA returns the Curve public key from the DNSKEY record.
|
||||
func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
pubkey := new(ecdsa.PublicKey)
|
||||
switch k.Algorithm {
|
||||
case ECDSAP256SHA256:
|
||||
pubkey.Curve = elliptic.P256()
|
||||
if len(keybuf) != 64 {
|
||||
// wrongly encoded key
|
||||
return nil
|
||||
}
|
||||
case ECDSAP384SHA384:
|
||||
pubkey.Curve = elliptic.P384()
|
||||
if len(keybuf) != 96 {
|
||||
// Wrongly encoded key
|
||||
return nil
|
||||
}
|
||||
}
|
||||
pubkey.X = big.NewInt(0)
|
||||
pubkey.X.SetBytes(keybuf[:len(keybuf)/2])
|
||||
pubkey.Y = big.NewInt(0)
|
||||
pubkey.Y.SetBytes(keybuf[len(keybuf)/2:])
|
||||
return pubkey
|
||||
}
|
||||
|
||||
func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(keybuf) < 22 {
|
||||
return nil
|
||||
}
|
||||
t, keybuf := int(keybuf[0]), keybuf[1:]
|
||||
size := 64 + t*8
|
||||
q, keybuf := keybuf[:20], keybuf[20:]
|
||||
if len(keybuf) != 3*size {
|
||||
return nil
|
||||
}
|
||||
p, keybuf := keybuf[:size], keybuf[size:]
|
||||
g, y := keybuf[:size], keybuf[size:]
|
||||
pubkey := new(dsa.PublicKey)
|
||||
pubkey.Parameters.Q = big.NewInt(0).SetBytes(q)
|
||||
pubkey.Parameters.P = big.NewInt(0).SetBytes(p)
|
||||
pubkey.Parameters.G = big.NewInt(0).SetBytes(g)
|
||||
pubkey.Y = big.NewInt(0).SetBytes(y)
|
||||
return pubkey
|
||||
}
|
||||
|
||||
func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(keybuf) != ed25519.PublicKeySize {
|
||||
return nil
|
||||
}
|
||||
return keybuf
|
||||
}
|
||||
|
||||
type wireSlice [][]byte
|
||||
|
||||
func (p wireSlice) Len() int { return len(p) }
|
||||
func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p wireSlice) Less(i, j int) bool {
|
||||
_, ioff, _ := UnpackDomainName(p[i], 0)
|
||||
_, joff, _ := UnpackDomainName(p[j], 0)
|
||||
return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0
|
||||
}
|
||||
|
||||
// Return the raw signature data.
|
||||
func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
|
||||
wires := make(wireSlice, len(rrset))
|
||||
for i, r := range rrset {
|
||||
r1 := r.copy()
|
||||
h := r1.Header()
|
||||
h.Ttl = s.OrigTtl
|
||||
labels := SplitDomainName(h.Name)
|
||||
// 6.2. Canonical RR Form. (4) - wildcards
|
||||
if len(labels) > int(s.Labels) {
|
||||
// Wildcard
|
||||
h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
|
||||
}
|
||||
// RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
|
||||
h.Name = strings.ToLower(h.Name)
|
||||
// 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
|
||||
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
|
||||
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
|
||||
// SRV, DNAME, A6
|
||||
//
|
||||
// RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC):
|
||||
// Section 6.2 of [RFC4034] also erroneously lists HINFO as a record
|
||||
// that needs conversion to lowercase, and twice at that. Since HINFO
|
||||
// records contain no domain names, they are not subject to case
|
||||
// conversion.
|
||||
switch x := r1.(type) {
|
||||
case *NS:
|
||||
x.Ns = strings.ToLower(x.Ns)
|
||||
case *MD:
|
||||
x.Md = strings.ToLower(x.Md)
|
||||
case *MF:
|
||||
x.Mf = strings.ToLower(x.Mf)
|
||||
case *CNAME:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
case *SOA:
|
||||
x.Ns = strings.ToLower(x.Ns)
|
||||
x.Mbox = strings.ToLower(x.Mbox)
|
||||
case *MB:
|
||||
x.Mb = strings.ToLower(x.Mb)
|
||||
case *MG:
|
||||
x.Mg = strings.ToLower(x.Mg)
|
||||
case *MR:
|
||||
x.Mr = strings.ToLower(x.Mr)
|
||||
case *PTR:
|
||||
x.Ptr = strings.ToLower(x.Ptr)
|
||||
case *MINFO:
|
||||
x.Rmail = strings.ToLower(x.Rmail)
|
||||
x.Email = strings.ToLower(x.Email)
|
||||
case *MX:
|
||||
x.Mx = strings.ToLower(x.Mx)
|
||||
case *RP:
|
||||
x.Mbox = strings.ToLower(x.Mbox)
|
||||
x.Txt = strings.ToLower(x.Txt)
|
||||
case *AFSDB:
|
||||
x.Hostname = strings.ToLower(x.Hostname)
|
||||
case *RT:
|
||||
x.Host = strings.ToLower(x.Host)
|
||||
case *SIG:
|
||||
x.SignerName = strings.ToLower(x.SignerName)
|
||||
case *PX:
|
||||
x.Map822 = strings.ToLower(x.Map822)
|
||||
x.Mapx400 = strings.ToLower(x.Mapx400)
|
||||
case *NAPTR:
|
||||
x.Replacement = strings.ToLower(x.Replacement)
|
||||
case *KX:
|
||||
x.Exchanger = strings.ToLower(x.Exchanger)
|
||||
case *SRV:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
case *DNAME:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
}
|
||||
// 6.2. Canonical RR Form. (5) - origTTL
|
||||
wire := make([]byte, Len(r1)+1) // +1 to be safe(r)
|
||||
off, err1 := PackRR(r1, wire, 0, nil, false)
|
||||
if err1 != nil {
|
||||
return nil, err1
|
||||
}
|
||||
wire = wire[:off]
|
||||
wires[i] = wire
|
||||
}
|
||||
sort.Sort(wires)
|
||||
for i, wire := range wires {
|
||||
if i > 0 && bytes.Equal(wire, wires[i-1]) {
|
||||
continue
|
||||
}
|
||||
buf = append(buf, wire...)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) {
|
||||
// copied from zmsg.go RRSIG packing
|
||||
off, err := packUint16(sw.TypeCovered, msg, 0)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint8(sw.Algorithm, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint8(sw.Labels, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint32(sw.OrigTtl, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint32(sw.Expiration, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint32(sw.Inception, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint16(sw.KeyTag, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = PackDomainName(sw.SignerName, msg, off, nil, false)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) {
|
||||
// copied from zmsg.go DNSKEY packing
|
||||
off, err := packUint16(dw.Flags, msg, 0)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint8(dw.Protocol, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint8(dw.Algorithm, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packStringBase64(dw.PublicKey, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
178
vendor/github.com/miekg/dns/dnssec_keygen.go
generated
vendored
Normal file
178
vendor/github.com/miekg/dns/dnssec_keygen.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// Generate generates a DNSKEY of the given bit size.
|
||||
// The public part is put inside the DNSKEY record.
|
||||
// The Algorithm in the key must be set as this will define
|
||||
// what kind of DNSKEY will be generated.
|
||||
// The ECDSA algorithms imply a fixed keysize, in that case
|
||||
// bits should be set to the size of the algorithm.
|
||||
func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
|
||||
switch k.Algorithm {
|
||||
case DSA, DSANSEC3SHA1:
|
||||
if bits != 1024 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
|
||||
if bits < 512 || bits > 4096 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case RSASHA512:
|
||||
if bits < 1024 || bits > 4096 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case ECDSAP256SHA256:
|
||||
if bits != 256 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case ECDSAP384SHA384:
|
||||
if bits != 384 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case ED25519:
|
||||
if bits != 256 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
}
|
||||
|
||||
switch k.Algorithm {
|
||||
case DSA, DSANSEC3SHA1:
|
||||
params := new(dsa.Parameters)
|
||||
if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
priv := new(dsa.PrivateKey)
|
||||
priv.PublicKey.Parameters = *params
|
||||
err := dsa.GenerateKey(priv, rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y)
|
||||
return priv, nil
|
||||
case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
|
||||
priv, err := rsa.GenerateKey(rand.Reader, bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
|
||||
return priv, nil
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
var c elliptic.Curve
|
||||
switch k.Algorithm {
|
||||
case ECDSAP256SHA256:
|
||||
c = elliptic.P256()
|
||||
case ECDSAP384SHA384:
|
||||
c = elliptic.P384()
|
||||
}
|
||||
priv, err := ecdsa.GenerateKey(c, rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
|
||||
return priv, nil
|
||||
case ED25519:
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyED25519(pub)
|
||||
return priv, nil
|
||||
default:
|
||||
return nil, ErrAlg
|
||||
}
|
||||
}
|
||||
|
||||
// Set the public key (the value E and N)
|
||||
func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool {
|
||||
if _E == 0 || _N == nil {
|
||||
return false
|
||||
}
|
||||
buf := exponentToBuf(_E)
|
||||
buf = append(buf, _N.Bytes()...)
|
||||
k.PublicKey = toBase64(buf)
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key for Elliptic Curves
|
||||
func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool {
|
||||
if _X == nil || _Y == nil {
|
||||
return false
|
||||
}
|
||||
var intlen int
|
||||
switch k.Algorithm {
|
||||
case ECDSAP256SHA256:
|
||||
intlen = 32
|
||||
case ECDSAP384SHA384:
|
||||
intlen = 48
|
||||
}
|
||||
k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen))
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key for DSA
|
||||
func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool {
|
||||
if _Q == nil || _P == nil || _G == nil || _Y == nil {
|
||||
return false
|
||||
}
|
||||
buf := dsaToBuf(_Q, _P, _G, _Y)
|
||||
k.PublicKey = toBase64(buf)
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key for Ed25519
|
||||
func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool {
|
||||
if _K == nil {
|
||||
return false
|
||||
}
|
||||
k.PublicKey = toBase64(_K)
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key (the values E and N) for RSA
|
||||
// RFC 3110: Section 2. RSA Public KEY Resource Records
|
||||
func exponentToBuf(_E int) []byte {
|
||||
var buf []byte
|
||||
i := big.NewInt(int64(_E)).Bytes()
|
||||
if len(i) < 256 {
|
||||
buf = make([]byte, 1, 1+len(i))
|
||||
buf[0] = uint8(len(i))
|
||||
} else {
|
||||
buf = make([]byte, 3, 3+len(i))
|
||||
buf[0] = 0
|
||||
buf[1] = uint8(len(i) >> 8)
|
||||
buf[2] = uint8(len(i))
|
||||
}
|
||||
buf = append(buf, i...)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Set the public key for X and Y for Curve. The two
|
||||
// values are just concatenated.
|
||||
func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
|
||||
buf := intToBytes(_X, intlen)
|
||||
buf = append(buf, intToBytes(_Y, intlen)...)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Set the public key for X and Y for Curve. The two
|
||||
// values are just concatenated.
|
||||
func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte {
|
||||
t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8)
|
||||
buf := []byte{byte(t)}
|
||||
buf = append(buf, intToBytes(_Q, 20)...)
|
||||
buf = append(buf, intToBytes(_P, 64+t*8)...)
|
||||
buf = append(buf, intToBytes(_G, 64+t*8)...)
|
||||
buf = append(buf, intToBytes(_Y, 64+t*8)...)
|
||||
return buf
|
||||
}
|
352
vendor/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
Normal file
352
vendor/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
Normal file
@ -0,0 +1,352 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"io"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// NewPrivateKey returns a PrivateKey by parsing the string s.
|
||||
// s should be in the same form of the BIND private key files.
|
||||
func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
|
||||
if s == "" || s[len(s)-1] != '\n' { // We need a closing newline
|
||||
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
|
||||
}
|
||||
return k.ReadPrivateKey(strings.NewReader(s), "")
|
||||
}
|
||||
|
||||
// ReadPrivateKey reads a private key from the io.Reader q. The string file is
|
||||
// only used in error reporting.
|
||||
// The public key must be known, because some cryptographic algorithms embed
|
||||
// the public inside the privatekey.
|
||||
func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
|
||||
m, err := parseKey(q, file)
|
||||
if m == nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := m["private-key-format"]; !ok {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
// TODO(mg): check if the pubkey matches the private key
|
||||
algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8)
|
||||
if err != nil {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
switch uint8(algo) {
|
||||
case DSA:
|
||||
priv, err := readPrivateKeyDSA(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pub := k.publicKeyDSA()
|
||||
if pub == nil {
|
||||
return nil, ErrKey
|
||||
}
|
||||
priv.PublicKey = *pub
|
||||
return priv, nil
|
||||
case RSAMD5:
|
||||
fallthrough
|
||||
case RSASHA1:
|
||||
fallthrough
|
||||
case RSASHA1NSEC3SHA1:
|
||||
fallthrough
|
||||
case RSASHA256:
|
||||
fallthrough
|
||||
case RSASHA512:
|
||||
priv, err := readPrivateKeyRSA(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pub := k.publicKeyRSA()
|
||||
if pub == nil {
|
||||
return nil, ErrKey
|
||||
}
|
||||
priv.PublicKey = *pub
|
||||
return priv, nil
|
||||
case ECCGOST:
|
||||
return nil, ErrPrivKey
|
||||
case ECDSAP256SHA256:
|
||||
fallthrough
|
||||
case ECDSAP384SHA384:
|
||||
priv, err := readPrivateKeyECDSA(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pub := k.publicKeyECDSA()
|
||||
if pub == nil {
|
||||
return nil, ErrKey
|
||||
}
|
||||
priv.PublicKey = *pub
|
||||
return priv, nil
|
||||
case ED25519:
|
||||
return readPrivateKeyED25519(m)
|
||||
default:
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
}
|
||||
|
||||
// Read a private key (file) string and create a public key. Return the private key.
|
||||
func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
|
||||
p := new(rsa.PrivateKey)
|
||||
p.Primes = []*big.Int{nil, nil}
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "modulus", "publicexponent", "privateexponent", "prime1", "prime2":
|
||||
v1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch k {
|
||||
case "modulus":
|
||||
p.PublicKey.N = big.NewInt(0)
|
||||
p.PublicKey.N.SetBytes(v1)
|
||||
case "publicexponent":
|
||||
i := big.NewInt(0)
|
||||
i.SetBytes(v1)
|
||||
p.PublicKey.E = int(i.Int64()) // int64 should be large enough
|
||||
case "privateexponent":
|
||||
p.D = big.NewInt(0)
|
||||
p.D.SetBytes(v1)
|
||||
case "prime1":
|
||||
p.Primes[0] = big.NewInt(0)
|
||||
p.Primes[0].SetBytes(v1)
|
||||
case "prime2":
|
||||
p.Primes[1] = big.NewInt(0)
|
||||
p.Primes[1].SetBytes(v1)
|
||||
}
|
||||
case "exponent1", "exponent2", "coefficient":
|
||||
// not used in Go (yet)
|
||||
case "created", "publish", "activate":
|
||||
// not used in Go (yet)
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) {
|
||||
p := new(dsa.PrivateKey)
|
||||
p.X = big.NewInt(0)
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "private_value(x)":
|
||||
v1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.X.SetBytes(v1)
|
||||
case "created", "publish", "activate":
|
||||
/* not used in Go (yet) */
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
|
||||
p := new(ecdsa.PrivateKey)
|
||||
p.D = big.NewInt(0)
|
||||
// TODO: validate that the required flags are present
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "privatekey":
|
||||
v1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.D.SetBytes(v1)
|
||||
case "created", "publish", "activate":
|
||||
/* not used in Go (yet) */
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) {
|
||||
var p ed25519.PrivateKey
|
||||
// TODO: validate that the required flags are present
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "privatekey":
|
||||
p1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(p1) != ed25519.SeedSize {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
p = ed25519.NewKeyFromSeed(p1)
|
||||
case "created", "publish", "activate":
|
||||
/* not used in Go (yet) */
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseKey reads a private key from r. It returns a map[string]string,
|
||||
// with the key-value pairs, or an error when the file is not correct.
|
||||
func parseKey(r io.Reader, file string) (map[string]string, error) {
|
||||
m := make(map[string]string)
|
||||
var k string
|
||||
|
||||
c := newKLexer(r)
|
||||
|
||||
for l, ok := c.Next(); ok; l, ok = c.Next() {
|
||||
// It should alternate
|
||||
switch l.value {
|
||||
case zKey:
|
||||
k = l.token
|
||||
case zValue:
|
||||
if k == "" {
|
||||
return nil, &ParseError{file, "no private key seen", l}
|
||||
}
|
||||
|
||||
m[strings.ToLower(k)] = l.token
|
||||
k = ""
|
||||
}
|
||||
}
|
||||
|
||||
// Surface any read errors from r.
|
||||
if err := c.Err(); err != nil {
|
||||
return nil, &ParseError{file: file, err: err.Error()}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
type klexer struct {
|
||||
br io.ByteReader
|
||||
|
||||
readErr error
|
||||
|
||||
line int
|
||||
column int
|
||||
|
||||
key bool
|
||||
|
||||
eol bool // end-of-line
|
||||
}
|
||||
|
||||
func newKLexer(r io.Reader) *klexer {
|
||||
br, ok := r.(io.ByteReader)
|
||||
if !ok {
|
||||
br = bufio.NewReaderSize(r, 1024)
|
||||
}
|
||||
|
||||
return &klexer{
|
||||
br: br,
|
||||
|
||||
line: 1,
|
||||
|
||||
key: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (kl *klexer) Err() error {
|
||||
if kl.readErr == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return kl.readErr
|
||||
}
|
||||
|
||||
// readByte returns the next byte from the input
|
||||
func (kl *klexer) readByte() (byte, bool) {
|
||||
if kl.readErr != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
c, err := kl.br.ReadByte()
|
||||
if err != nil {
|
||||
kl.readErr = err
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// delay the newline handling until the next token is delivered,
|
||||
// fixes off-by-one errors when reporting a parse error.
|
||||
if kl.eol {
|
||||
kl.line++
|
||||
kl.column = 0
|
||||
kl.eol = false
|
||||
}
|
||||
|
||||
if c == '\n' {
|
||||
kl.eol = true
|
||||
} else {
|
||||
kl.column++
|
||||
}
|
||||
|
||||
return c, true
|
||||
}
|
||||
|
||||
func (kl *klexer) Next() (lex, bool) {
|
||||
var (
|
||||
l lex
|
||||
|
||||
str strings.Builder
|
||||
|
||||
commt bool
|
||||
)
|
||||
|
||||
for x, ok := kl.readByte(); ok; x, ok = kl.readByte() {
|
||||
l.line, l.column = kl.line, kl.column
|
||||
|
||||
switch x {
|
||||
case ':':
|
||||
if commt || !kl.key {
|
||||
break
|
||||
}
|
||||
|
||||
kl.key = false
|
||||
|
||||
// Next token is a space, eat it
|
||||
kl.readByte()
|
||||
|
||||
l.value = zKey
|
||||
l.token = str.String()
|
||||
return l, true
|
||||
case ';':
|
||||
commt = true
|
||||
case '\n':
|
||||
if commt {
|
||||
// Reset a comment
|
||||
commt = false
|
||||
}
|
||||
|
||||
kl.key = true
|
||||
|
||||
l.value = zValue
|
||||
l.token = str.String()
|
||||
return l, true
|
||||
default:
|
||||
if commt {
|
||||
break
|
||||
}
|
||||
|
||||
str.WriteByte(x)
|
||||
}
|
||||
}
|
||||
|
||||
if kl.readErr != nil && kl.readErr != io.EOF {
|
||||
// Don't return any tokens after a read error occurs.
|
||||
return lex{value: zEOF}, false
|
||||
}
|
||||
|
||||
if str.Len() > 0 {
|
||||
// Send remainder
|
||||
l.value = zValue
|
||||
l.token = str.String()
|
||||
return l, true
|
||||
}
|
||||
|
||||
return lex{value: zEOF}, false
|
||||
}
|
93
vendor/github.com/miekg/dns/dnssec_privkey.go
generated
vendored
Normal file
93
vendor/github.com/miekg/dns/dnssec_privkey.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
const format = "Private-key-format: v1.3\n"
|
||||
|
||||
// PrivateKeyString converts a PrivateKey to a string. This string has the same
|
||||
// format as the private-key-file of BIND9 (Private-key-format: v1.3).
|
||||
// It needs some info from the key (the algorithm), so its a method of the DNSKEY
|
||||
// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey
|
||||
func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
|
||||
algorithm := strconv.Itoa(int(r.Algorithm))
|
||||
algorithm += " (" + AlgorithmToString[r.Algorithm] + ")"
|
||||
|
||||
switch p := p.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
modulus := toBase64(p.PublicKey.N.Bytes())
|
||||
e := big.NewInt(int64(p.PublicKey.E))
|
||||
publicExponent := toBase64(e.Bytes())
|
||||
privateExponent := toBase64(p.D.Bytes())
|
||||
prime1 := toBase64(p.Primes[0].Bytes())
|
||||
prime2 := toBase64(p.Primes[1].Bytes())
|
||||
// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
|
||||
// and from: http://code.google.com/p/go/issues/detail?id=987
|
||||
one := big.NewInt(1)
|
||||
p1 := big.NewInt(0).Sub(p.Primes[0], one)
|
||||
q1 := big.NewInt(0).Sub(p.Primes[1], one)
|
||||
exp1 := big.NewInt(0).Mod(p.D, p1)
|
||||
exp2 := big.NewInt(0).Mod(p.D, q1)
|
||||
coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0])
|
||||
|
||||
exponent1 := toBase64(exp1.Bytes())
|
||||
exponent2 := toBase64(exp2.Bytes())
|
||||
coefficient := toBase64(coeff.Bytes())
|
||||
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"Modulus: " + modulus + "\n" +
|
||||
"PublicExponent: " + publicExponent + "\n" +
|
||||
"PrivateExponent: " + privateExponent + "\n" +
|
||||
"Prime1: " + prime1 + "\n" +
|
||||
"Prime2: " + prime2 + "\n" +
|
||||
"Exponent1: " + exponent1 + "\n" +
|
||||
"Exponent2: " + exponent2 + "\n" +
|
||||
"Coefficient: " + coefficient + "\n"
|
||||
|
||||
case *ecdsa.PrivateKey:
|
||||
var intlen int
|
||||
switch r.Algorithm {
|
||||
case ECDSAP256SHA256:
|
||||
intlen = 32
|
||||
case ECDSAP384SHA384:
|
||||
intlen = 48
|
||||
}
|
||||
private := toBase64(intToBytes(p.D, intlen))
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"PrivateKey: " + private + "\n"
|
||||
|
||||
case *dsa.PrivateKey:
|
||||
T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8)
|
||||
prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8))
|
||||
subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20))
|
||||
base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
|
||||
priv := toBase64(intToBytes(p.X, 20))
|
||||
pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"Prime(p): " + prime + "\n" +
|
||||
"Subprime(q): " + subprime + "\n" +
|
||||
"Base(g): " + base + "\n" +
|
||||
"Private_value(x): " + priv + "\n" +
|
||||
"Public_value(y): " + pub + "\n"
|
||||
|
||||
case ed25519.PrivateKey:
|
||||
private := toBase64(p.Seed())
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"PrivateKey: " + private + "\n"
|
||||
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
269
vendor/github.com/miekg/dns/doc.go
generated
vendored
Normal file
269
vendor/github.com/miekg/dns/doc.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
||||
/*
|
||||
Package dns implements a full featured interface to the Domain Name System.
|
||||
Both server- and client-side programming is supported. The package allows
|
||||
complete control over what is sent out to the DNS. The API follows the
|
||||
less-is-more principle, by presenting a small, clean interface.
|
||||
|
||||
It supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
|
||||
TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
|
||||
|
||||
Note that domain names MUST be fully qualified before sending them, unqualified
|
||||
names in a message will result in a packing failure.
|
||||
|
||||
Resource records are native types. They are not stored in wire format. Basic
|
||||
usage pattern for creating a new resource record:
|
||||
|
||||
r := new(dns.MX)
|
||||
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
|
||||
r.Preference = 10
|
||||
r.Mx = "mx.miek.nl."
|
||||
|
||||
Or directly from a string:
|
||||
|
||||
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
|
||||
|
||||
Or when the default origin (.) and TTL (3600) and class (IN) suit you:
|
||||
|
||||
mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
|
||||
|
||||
Or even:
|
||||
|
||||
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
|
||||
|
||||
In the DNS messages are exchanged, these messages contain resource records
|
||||
(sets). Use pattern for creating a message:
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("miek.nl.", dns.TypeMX)
|
||||
|
||||
Or when not certain if the domain name is fully qualified:
|
||||
|
||||
m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
|
||||
|
||||
The message m is now a message with the question section set to ask the MX
|
||||
records for the miek.nl. zone.
|
||||
|
||||
The following is slightly more verbose, but more flexible:
|
||||
|
||||
m1 := new(dns.Msg)
|
||||
m1.Id = dns.Id()
|
||||
m1.RecursionDesired = true
|
||||
m1.Question = make([]dns.Question, 1)
|
||||
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
|
||||
|
||||
After creating a message it can be sent. Basic use pattern for synchronous
|
||||
querying the DNS at a server configured on 127.0.0.1 and port 53:
|
||||
|
||||
c := new(dns.Client)
|
||||
in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
|
||||
|
||||
Suppressing multiple outstanding queries (with the same question, type and
|
||||
class) is as easy as setting:
|
||||
|
||||
c.SingleInflight = true
|
||||
|
||||
More advanced options are available using a net.Dialer and the corresponding API.
|
||||
For example it is possible to set a timeout, or to specify a source IP address
|
||||
and port to use for the connection:
|
||||
|
||||
c := new(dns.Client)
|
||||
laddr := net.UDPAddr{
|
||||
IP: net.ParseIP("[::1]"),
|
||||
Port: 12345,
|
||||
Zone: "",
|
||||
}
|
||||
c.Dialer := &net.Dialer{
|
||||
Timeout: 200 * time.Millisecond,
|
||||
LocalAddr: &laddr,
|
||||
}
|
||||
in, rtt, err := c.Exchange(m1, "8.8.8.8:53")
|
||||
|
||||
If these "advanced" features are not needed, a simple UDP query can be sent,
|
||||
with:
|
||||
|
||||
in, err := dns.Exchange(m1, "127.0.0.1:53")
|
||||
|
||||
When this functions returns you will get dns message. A dns message consists
|
||||
out of four sections.
|
||||
The question section: in.Question, the answer section: in.Answer,
|
||||
the authority section: in.Ns and the additional section: in.Extra.
|
||||
|
||||
Each of these sections (except the Question section) contain a []RR. Basic
|
||||
use pattern for accessing the rdata of a TXT RR as the first RR in
|
||||
the Answer section:
|
||||
|
||||
if t, ok := in.Answer[0].(*dns.TXT); ok {
|
||||
// do something with t.Txt
|
||||
}
|
||||
|
||||
Domain Name and TXT Character String Representations
|
||||
|
||||
Both domain names and TXT character strings are converted to presentation form
|
||||
both when unpacked and when converted to strings.
|
||||
|
||||
For TXT character strings, tabs, carriage returns and line feeds will be
|
||||
converted to \t, \r and \n respectively. Back slashes and quotations marks will
|
||||
be escaped. Bytes below 32 and above 127 will be converted to \DDD form.
|
||||
|
||||
For domain names, in addition to the above rules brackets, periods, spaces,
|
||||
semicolons and the at symbol are escaped.
|
||||
|
||||
DNSSEC
|
||||
|
||||
DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses
|
||||
public key cryptography to sign resource records. The public keys are stored in
|
||||
DNSKEY records and the signatures in RRSIG records.
|
||||
|
||||
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK)
|
||||
bit to a request.
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetEdns0(4096, true)
|
||||
|
||||
Signature generation, signature verification and key generation are all supported.
|
||||
|
||||
DYNAMIC UPDATES
|
||||
|
||||
Dynamic updates reuses the DNS message format, but renames three of the
|
||||
sections. Question is Zone, Answer is Prerequisite, Authority is Update, only
|
||||
the Additional is not renamed. See RFC 2136 for the gory details.
|
||||
|
||||
You can set a rather complex set of rules for the existence of absence of
|
||||
certain resource records or names in a zone to specify if resource records
|
||||
should be added or removed. The table from RFC 2136 supplemented with the Go
|
||||
DNS function shows which functions exist to specify the prerequisites.
|
||||
|
||||
3.2.4 - Table Of Metavalues Used In Prerequisite Section
|
||||
|
||||
CLASS TYPE RDATA Meaning Function
|
||||
--------------------------------------------------------------
|
||||
ANY ANY empty Name is in use dns.NameUsed
|
||||
ANY rrset empty RRset exists (value indep) dns.RRsetUsed
|
||||
NONE ANY empty Name is not in use dns.NameNotUsed
|
||||
NONE rrset empty RRset does not exist dns.RRsetNotUsed
|
||||
zone rrset rr RRset exists (value dep) dns.Used
|
||||
|
||||
The prerequisite section can also be left empty. If you have decided on the
|
||||
prerequisites you can tell what RRs should be added or deleted. The next table
|
||||
shows the options you have and what functions to call.
|
||||
|
||||
3.4.2.6 - Table Of Metavalues Used In Update Section
|
||||
|
||||
CLASS TYPE RDATA Meaning Function
|
||||
---------------------------------------------------------------
|
||||
ANY ANY empty Delete all RRsets from name dns.RemoveName
|
||||
ANY rrset empty Delete an RRset dns.RemoveRRset
|
||||
NONE rrset rr Delete an RR from RRset dns.Remove
|
||||
zone rrset rr Add to an RRset dns.Insert
|
||||
|
||||
TRANSACTION SIGNATURE
|
||||
|
||||
An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
|
||||
The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
|
||||
|
||||
Basic use pattern when querying with a TSIG name "axfr." (note that these key names
|
||||
must be fully qualified - as they are domain names) and the base64 secret
|
||||
"so6ZGir4GPAqINNh9U5c3A==":
|
||||
|
||||
If an incoming message contains a TSIG record it MUST be the last record in
|
||||
the additional section (RFC2845 3.2). This means that you should make the
|
||||
call to SetTsig last, right before executing the query. If you make any
|
||||
changes to the RRset after calling SetTsig() the signature will be incorrect.
|
||||
|
||||
c := new(dns.Client)
|
||||
c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("miek.nl.", dns.TypeMX)
|
||||
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||
...
|
||||
// When sending the TSIG RR is calculated and filled in before sending
|
||||
|
||||
When requesting an zone transfer (almost all TSIG usage is when requesting zone
|
||||
transfers), with TSIG, this is the basic use pattern. In this example we
|
||||
request an AXFR for miek.nl. with TSIG key named "axfr." and secret
|
||||
"so6ZGir4GPAqINNh9U5c3A==" and using the server 176.58.119.54:
|
||||
|
||||
t := new(dns.Transfer)
|
||||
m := new(dns.Msg)
|
||||
t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||
m.SetAxfr("miek.nl.")
|
||||
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||
c, err := t.In(m, "176.58.119.54:53")
|
||||
for r := range c { ... }
|
||||
|
||||
You can now read the records from the transfer as they come in. Each envelope
|
||||
is checked with TSIG. If something is not correct an error is returned.
|
||||
|
||||
Basic use pattern validating and replying to a message that has TSIG set.
|
||||
|
||||
server := &dns.Server{Addr: ":53", Net: "udp"}
|
||||
server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||
go server.ListenAndServe()
|
||||
dns.HandleFunc(".", handleRequest)
|
||||
|
||||
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
|
||||
m := new(dns.Msg)
|
||||
m.SetReply(r)
|
||||
if r.IsTsig() != nil {
|
||||
if w.TsigStatus() == nil {
|
||||
// *Msg r has an TSIG record and it was validated
|
||||
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||
} else {
|
||||
// *Msg r has an TSIG records and it was not valided
|
||||
}
|
||||
}
|
||||
w.WriteMsg(m)
|
||||
}
|
||||
|
||||
PRIVATE RRS
|
||||
|
||||
RFC 6895 sets aside a range of type codes for private use. This range is 65,280
|
||||
- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
|
||||
can be used, before requesting an official type code from IANA.
|
||||
|
||||
See https://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more
|
||||
information.
|
||||
|
||||
EDNS0
|
||||
|
||||
EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by
|
||||
RFC 6891. It defines an new RR type, the OPT RR, which is then completely
|
||||
abused.
|
||||
|
||||
Basic use pattern for creating an (empty) OPT RR:
|
||||
|
||||
o := new(dns.OPT)
|
||||
o.Hdr.Name = "." // MUST be the root zone, per definition.
|
||||
o.Hdr.Rrtype = dns.TypeOPT
|
||||
|
||||
The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces.
|
||||
Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and
|
||||
EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note that these options
|
||||
may be combined in an OPT RR. Basic use pattern for a server to check if (and
|
||||
which) options are set:
|
||||
|
||||
// o is a dns.OPT
|
||||
for _, s := range o.Option {
|
||||
switch e := s.(type) {
|
||||
case *dns.EDNS0_NSID:
|
||||
// do stuff with e.Nsid
|
||||
case *dns.EDNS0_SUBNET:
|
||||
// access e.Family, e.Address, etc.
|
||||
}
|
||||
}
|
||||
|
||||
SIG(0)
|
||||
|
||||
From RFC 2931:
|
||||
|
||||
SIG(0) provides protection for DNS transactions and requests ....
|
||||
... protection for glue records, DNS requests, protection for message headers
|
||||
on requests and responses, and protection of the overall integrity of a response.
|
||||
|
||||
It works like TSIG, except that SIG(0) uses public key cryptography, instead of
|
||||
the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256,
|
||||
ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512.
|
||||
|
||||
Signing subsequent messages in multi-message sessions is not implemented.
|
||||
*/
|
||||
package dns
|
38
vendor/github.com/miekg/dns/duplicate.go
generated
vendored
Normal file
38
vendor/github.com/miekg/dns/duplicate.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
package dns
|
||||
|
||||
//go:generate go run duplicate_generate.go
|
||||
|
||||
// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL.
|
||||
// So this means the header data is equal *and* the RDATA is the same. Return true
|
||||
// is so, otherwise false.
|
||||
// It's is a protocol violation to have identical RRs in a message.
|
||||
func IsDuplicate(r1, r2 RR) bool {
|
||||
// Check whether the record header is identical.
|
||||
if !r1.Header().isDuplicate(r2.Header()) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check whether the RDATA is identical.
|
||||
return r1.isDuplicate(r2)
|
||||
}
|
||||
|
||||
func (r1 *RR_Header) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*RR_Header)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if r1.Class != r2.Class {
|
||||
return false
|
||||
}
|
||||
if r1.Rrtype != r2.Rrtype {
|
||||
return false
|
||||
}
|
||||
if !isDulicateName(r1.Name, r2.Name) {
|
||||
return false
|
||||
}
|
||||
// ignore TTL
|
||||
return true
|
||||
}
|
||||
|
||||
// isDulicateName checks if the domain names s1 and s2 are equal.
|
||||
func isDulicateName(s1, s2 string) bool { return equal(s1, s2) }
|
626
vendor/github.com/miekg/dns/edns.go
generated
vendored
Normal file
626
vendor/github.com/miekg/dns/edns.go
generated
vendored
Normal file
@ -0,0 +1,626 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// EDNS0 Option codes.
|
||||
const (
|
||||
EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
|
||||
EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
|
||||
EDNS0NSID = 0x3 // nsid (See RFC 5001)
|
||||
EDNS0DAU = 0x5 // DNSSEC Algorithm Understood
|
||||
EDNS0DHU = 0x6 // DS Hash Understood
|
||||
EDNS0N3U = 0x7 // NSEC3 Hash Understood
|
||||
EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871)
|
||||
EDNS0EXPIRE = 0x9 // EDNS0 expire
|
||||
EDNS0COOKIE = 0xa // EDNS0 Cookie
|
||||
EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828)
|
||||
EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830)
|
||||
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891)
|
||||
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891)
|
||||
_DO = 1 << 15 // DNSSEC OK
|
||||
)
|
||||
|
||||
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
|
||||
// See RFC 6891.
|
||||
type OPT struct {
|
||||
Hdr RR_Header
|
||||
Option []EDNS0 `dns:"opt"`
|
||||
}
|
||||
|
||||
func (rr *OPT) String() string {
|
||||
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
|
||||
if rr.Do() {
|
||||
s += "flags: do; "
|
||||
} else {
|
||||
s += "flags: ; "
|
||||
}
|
||||
s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
|
||||
|
||||
for _, o := range rr.Option {
|
||||
switch o.(type) {
|
||||
case *EDNS0_NSID:
|
||||
s += "\n; NSID: " + o.String()
|
||||
h, e := o.pack()
|
||||
var r string
|
||||
if e == nil {
|
||||
for _, c := range h {
|
||||
r += "(" + string(c) + ")"
|
||||
}
|
||||
s += " " + r
|
||||
}
|
||||
case *EDNS0_SUBNET:
|
||||
s += "\n; SUBNET: " + o.String()
|
||||
case *EDNS0_COOKIE:
|
||||
s += "\n; COOKIE: " + o.String()
|
||||
case *EDNS0_UL:
|
||||
s += "\n; UPDATE LEASE: " + o.String()
|
||||
case *EDNS0_LLQ:
|
||||
s += "\n; LONG LIVED QUERIES: " + o.String()
|
||||
case *EDNS0_DAU:
|
||||
s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String()
|
||||
case *EDNS0_DHU:
|
||||
s += "\n; DS HASH UNDERSTOOD: " + o.String()
|
||||
case *EDNS0_N3U:
|
||||
s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
|
||||
case *EDNS0_LOCAL:
|
||||
s += "\n; LOCAL OPT: " + o.String()
|
||||
case *EDNS0_PADDING:
|
||||
s += "\n; PADDING: " + o.String()
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (rr *OPT) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
for i := 0; i < len(rr.Option); i++ {
|
||||
l += 4 // Account for 2-byte option code and 2-byte option length.
|
||||
lo, _ := rr.Option[i].pack()
|
||||
l += len(lo)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (rr *OPT) parse(c *zlexer, origin, file string) *ParseError {
|
||||
panic("dns: internal error: parse should never be called on OPT")
|
||||
}
|
||||
|
||||
func (r1 *OPT) isDuplicate(r2 RR) bool { return false }
|
||||
|
||||
// return the old value -> delete SetVersion?
|
||||
|
||||
// Version returns the EDNS version used. Only zero is defined.
|
||||
func (rr *OPT) Version() uint8 {
|
||||
return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16)
|
||||
}
|
||||
|
||||
// SetVersion sets the version of EDNS. This is usually zero.
|
||||
func (rr *OPT) SetVersion(v uint8) {
|
||||
rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | uint32(v)<<16
|
||||
}
|
||||
|
||||
// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
|
||||
func (rr *OPT) ExtendedRcode() int {
|
||||
return int(rr.Hdr.Ttl&0xFF000000>>24) << 4
|
||||
}
|
||||
|
||||
// SetExtendedRcode sets the EDNS extended RCODE field.
|
||||
//
|
||||
// If the RCODE is not an extended RCODE, will reset the extended RCODE field to 0.
|
||||
func (rr *OPT) SetExtendedRcode(v uint16) {
|
||||
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24
|
||||
}
|
||||
|
||||
// UDPSize returns the UDP buffer size.
|
||||
func (rr *OPT) UDPSize() uint16 {
|
||||
return rr.Hdr.Class
|
||||
}
|
||||
|
||||
// SetUDPSize sets the UDP buffer size.
|
||||
func (rr *OPT) SetUDPSize(size uint16) {
|
||||
rr.Hdr.Class = size
|
||||
}
|
||||
|
||||
// Do returns the value of the DO (DNSSEC OK) bit.
|
||||
func (rr *OPT) Do() bool {
|
||||
return rr.Hdr.Ttl&_DO == _DO
|
||||
}
|
||||
|
||||
// SetDo sets the DO (DNSSEC OK) bit.
|
||||
// If we pass an argument, set the DO bit to that value.
|
||||
// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored.
|
||||
func (rr *OPT) SetDo(do ...bool) {
|
||||
if len(do) == 1 {
|
||||
if do[0] {
|
||||
rr.Hdr.Ttl |= _DO
|
||||
} else {
|
||||
rr.Hdr.Ttl &^= _DO
|
||||
}
|
||||
} else {
|
||||
rr.Hdr.Ttl |= _DO
|
||||
}
|
||||
}
|
||||
|
||||
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
|
||||
type EDNS0 interface {
|
||||
// Option returns the option code for the option.
|
||||
Option() uint16
|
||||
// pack returns the bytes of the option data.
|
||||
pack() ([]byte, error)
|
||||
// unpack sets the data as found in the buffer. Is also sets
|
||||
// the length of the slice as the length of the option data.
|
||||
unpack([]byte) error
|
||||
// String returns the string representation of the option.
|
||||
String() string
|
||||
}
|
||||
|
||||
// EDNS0_NSID option is used to retrieve a nameserver
|
||||
// identifier. When sending a request Nsid must be set to the empty string
|
||||
// The identifier is an opaque string encoded as hex.
|
||||
// Basic use pattern for creating an nsid option:
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_NSID)
|
||||
// e.Code = dns.EDNS0NSID
|
||||
// e.Nsid = "AA"
|
||||
// o.Option = append(o.Option, e)
|
||||
type EDNS0_NSID struct {
|
||||
Code uint16 // Always EDNS0NSID
|
||||
Nsid string // This string needs to be hex encoded
|
||||
}
|
||||
|
||||
func (e *EDNS0_NSID) pack() ([]byte, error) {
|
||||
h, err := hex.DecodeString(e.Nsid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code.
|
||||
func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
|
||||
func (e *EDNS0_NSID) String() string { return e.Nsid }
|
||||
|
||||
// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
|
||||
// an idea of where the client lives. See RFC 7871. It can then give back a different
|
||||
// answer depending on the location or network topology.
|
||||
// Basic use pattern for creating an subnet option:
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_SUBNET)
|
||||
// e.Code = dns.EDNS0SUBNET
|
||||
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
|
||||
// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6
|
||||
// e.SourceScope = 0
|
||||
// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
|
||||
// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
|
||||
// o.Option = append(o.Option, e)
|
||||
//
|
||||
// This code will parse all the available bits when unpacking (up to optlen).
|
||||
// When packing it will apply SourceNetmask. If you need more advanced logic,
|
||||
// patches welcome and good luck.
|
||||
type EDNS0_SUBNET struct {
|
||||
Code uint16 // Always EDNS0SUBNET
|
||||
Family uint16 // 1 for IP, 2 for IP6
|
||||
SourceNetmask uint8
|
||||
SourceScope uint8
|
||||
Address net.IP
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET }
|
||||
|
||||
func (e *EDNS0_SUBNET) pack() ([]byte, error) {
|
||||
b := make([]byte, 4)
|
||||
binary.BigEndian.PutUint16(b[0:], e.Family)
|
||||
b[2] = e.SourceNetmask
|
||||
b[3] = e.SourceScope
|
||||
switch e.Family {
|
||||
case 0:
|
||||
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
|
||||
// We might don't need to complain either
|
||||
if e.SourceNetmask != 0 {
|
||||
return nil, errors.New("dns: bad address family")
|
||||
}
|
||||
case 1:
|
||||
if e.SourceNetmask > net.IPv4len*8 {
|
||||
return nil, errors.New("dns: bad netmask")
|
||||
}
|
||||
if len(e.Address.To4()) != net.IPv4len {
|
||||
return nil, errors.New("dns: bad address")
|
||||
}
|
||||
ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
|
||||
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
|
||||
b = append(b, ip[:needLength]...)
|
||||
case 2:
|
||||
if e.SourceNetmask > net.IPv6len*8 {
|
||||
return nil, errors.New("dns: bad netmask")
|
||||
}
|
||||
if len(e.Address) != net.IPv6len {
|
||||
return nil, errors.New("dns: bad address")
|
||||
}
|
||||
ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
|
||||
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
|
||||
b = append(b, ip[:needLength]...)
|
||||
default:
|
||||
return nil, errors.New("dns: bad address family")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_SUBNET) unpack(b []byte) error {
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Family = binary.BigEndian.Uint16(b)
|
||||
e.SourceNetmask = b[2]
|
||||
e.SourceScope = b[3]
|
||||
switch e.Family {
|
||||
case 0:
|
||||
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
|
||||
// It's okay to accept such a packet
|
||||
if e.SourceNetmask != 0 {
|
||||
return errors.New("dns: bad address family")
|
||||
}
|
||||
e.Address = net.IPv4(0, 0, 0, 0)
|
||||
case 1:
|
||||
if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
|
||||
return errors.New("dns: bad netmask")
|
||||
}
|
||||
addr := make(net.IP, net.IPv4len)
|
||||
copy(addr, b[4:])
|
||||
e.Address = addr.To16()
|
||||
case 2:
|
||||
if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
|
||||
return errors.New("dns: bad netmask")
|
||||
}
|
||||
addr := make(net.IP, net.IPv6len)
|
||||
copy(addr, b[4:])
|
||||
e.Address = addr
|
||||
default:
|
||||
return errors.New("dns: bad address family")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_SUBNET) String() (s string) {
|
||||
if e.Address == nil {
|
||||
s = "<nil>"
|
||||
} else if e.Address.To4() != nil {
|
||||
s = e.Address.String()
|
||||
} else {
|
||||
s = "[" + e.Address.String() + "]"
|
||||
}
|
||||
s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope))
|
||||
return
|
||||
}
|
||||
|
||||
// The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_COOKIE)
|
||||
// e.Code = dns.EDNS0COOKIE
|
||||
// e.Cookie = "24a5ac.."
|
||||
// o.Option = append(o.Option, e)
|
||||
//
|
||||
// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is
|
||||
// always 8 bytes. It may then optionally be followed by the server cookie. The server
|
||||
// cookie is of variable length, 8 to a maximum of 32 bytes. In other words:
|
||||
//
|
||||
// cCookie := o.Cookie[:16]
|
||||
// sCookie := o.Cookie[16:]
|
||||
//
|
||||
// There is no guarantee that the Cookie string has a specific length.
|
||||
type EDNS0_COOKIE struct {
|
||||
Code uint16 // Always EDNS0COOKIE
|
||||
Cookie string // Hex-encoded cookie data
|
||||
}
|
||||
|
||||
func (e *EDNS0_COOKIE) pack() ([]byte, error) {
|
||||
h, err := hex.DecodeString(e.Cookie)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
|
||||
func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
|
||||
func (e *EDNS0_COOKIE) String() string { return e.Cookie }
|
||||
|
||||
// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
|
||||
// an expiration on an update RR. This is helpful for clients that cannot clean
|
||||
// up after themselves. This is a draft RFC and more information can be found at
|
||||
// http://files.dns-sd.org/draft-sekar-dns-ul.txt
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_UL)
|
||||
// e.Code = dns.EDNS0UL
|
||||
// e.Lease = 120 // in seconds
|
||||
// o.Option = append(o.Option, e)
|
||||
type EDNS0_UL struct {
|
||||
Code uint16 // Always EDNS0UL
|
||||
Lease uint32
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
|
||||
func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
|
||||
|
||||
// Copied: http://golang.org/src/pkg/net/dnsmsg.go
|
||||
func (e *EDNS0_UL) pack() ([]byte, error) {
|
||||
b := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(b, e.Lease)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_UL) unpack(b []byte) error {
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Lease = binary.BigEndian.Uint32(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
|
||||
// Implemented for completeness, as the EDNS0 type code is assigned.
|
||||
type EDNS0_LLQ struct {
|
||||
Code uint16 // Always EDNS0LLQ
|
||||
Version uint16
|
||||
Opcode uint16
|
||||
Error uint16
|
||||
Id uint64
|
||||
LeaseLife uint32
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
|
||||
|
||||
func (e *EDNS0_LLQ) pack() ([]byte, error) {
|
||||
b := make([]byte, 18)
|
||||
binary.BigEndian.PutUint16(b[0:], e.Version)
|
||||
binary.BigEndian.PutUint16(b[2:], e.Opcode)
|
||||
binary.BigEndian.PutUint16(b[4:], e.Error)
|
||||
binary.BigEndian.PutUint64(b[6:], e.Id)
|
||||
binary.BigEndian.PutUint32(b[14:], e.LeaseLife)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_LLQ) unpack(b []byte) error {
|
||||
if len(b) < 18 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Version = binary.BigEndian.Uint16(b[0:])
|
||||
e.Opcode = binary.BigEndian.Uint16(b[2:])
|
||||
e.Error = binary.BigEndian.Uint16(b[4:])
|
||||
e.Id = binary.BigEndian.Uint64(b[6:])
|
||||
e.LeaseLife = binary.BigEndian.Uint32(b[14:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_LLQ) String() string {
|
||||
s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
|
||||
" " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(e.Id, 10) +
|
||||
" " + strconv.FormatUint(uint64(e.LeaseLife), 10)
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
|
||||
type EDNS0_DAU struct {
|
||||
Code uint16 // Always EDNS0DAU
|
||||
AlgCode []uint8
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
|
||||
func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
|
||||
func (e *EDNS0_DAU) String() string {
|
||||
s := ""
|
||||
for i := 0; i < len(e.AlgCode); i++ {
|
||||
if a, ok := AlgorithmToString[e.AlgCode[i]]; ok {
|
||||
s += " " + a
|
||||
} else {
|
||||
s += " " + strconv.Itoa(int(e.AlgCode[i]))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
|
||||
type EDNS0_DHU struct {
|
||||
Code uint16 // Always EDNS0DHU
|
||||
AlgCode []uint8
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
|
||||
func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
|
||||
func (e *EDNS0_DHU) String() string {
|
||||
s := ""
|
||||
for i := 0; i < len(e.AlgCode); i++ {
|
||||
if a, ok := HashToString[e.AlgCode[i]]; ok {
|
||||
s += " " + a
|
||||
} else {
|
||||
s += " " + strconv.Itoa(int(e.AlgCode[i]))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
|
||||
type EDNS0_N3U struct {
|
||||
Code uint16 // Always EDNS0N3U
|
||||
AlgCode []uint8
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
|
||||
func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
|
||||
func (e *EDNS0_N3U) String() string {
|
||||
// Re-use the hash map
|
||||
s := ""
|
||||
for i := 0; i < len(e.AlgCode); i++ {
|
||||
if a, ok := HashToString[e.AlgCode[i]]; ok {
|
||||
s += " " + a
|
||||
} else {
|
||||
s += " " + strconv.Itoa(int(e.AlgCode[i]))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314.
|
||||
type EDNS0_EXPIRE struct {
|
||||
Code uint16 // Always EDNS0EXPIRE
|
||||
Expire uint32
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
|
||||
func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
|
||||
|
||||
func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
|
||||
b := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(b, e.Expire)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_EXPIRE) unpack(b []byte) error {
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Expire = binary.BigEndian.Uint32(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// The EDNS0_LOCAL option is used for local/experimental purposes. The option
|
||||
// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
|
||||
// (RFC6891), although any unassigned code can actually be used. The content of
|
||||
// the option is made available in Data, unaltered.
|
||||
// Basic use pattern for creating a local option:
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_LOCAL)
|
||||
// e.Code = dns.EDNS0LOCALSTART
|
||||
// e.Data = []byte{72, 82, 74}
|
||||
// o.Option = append(o.Option, e)
|
||||
type EDNS0_LOCAL struct {
|
||||
Code uint16
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
|
||||
func (e *EDNS0_LOCAL) String() string {
|
||||
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
|
||||
}
|
||||
|
||||
func (e *EDNS0_LOCAL) pack() ([]byte, error) {
|
||||
b := make([]byte, len(e.Data))
|
||||
copied := copy(b, e.Data)
|
||||
if copied != len(e.Data) {
|
||||
return nil, ErrBuf
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_LOCAL) unpack(b []byte) error {
|
||||
e.Data = make([]byte, len(b))
|
||||
copied := copy(e.Data, b)
|
||||
if copied != len(b) {
|
||||
return ErrBuf
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
|
||||
// the TCP connection alive. See RFC 7828.
|
||||
type EDNS0_TCP_KEEPALIVE struct {
|
||||
Code uint16 // Always EDNSTCPKEEPALIVE
|
||||
Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present;
|
||||
Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order.
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE }
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) {
|
||||
if e.Timeout != 0 && e.Length != 2 {
|
||||
return nil, errors.New("dns: timeout specified but length is not 2")
|
||||
}
|
||||
if e.Timeout == 0 && e.Length != 0 {
|
||||
return nil, errors.New("dns: timeout not specified but length is not 0")
|
||||
}
|
||||
b := make([]byte, 4+e.Length)
|
||||
binary.BigEndian.PutUint16(b[0:], e.Code)
|
||||
binary.BigEndian.PutUint16(b[2:], e.Length)
|
||||
if e.Length == 2 {
|
||||
binary.BigEndian.PutUint16(b[4:], e.Timeout)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error {
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Length = binary.BigEndian.Uint16(b[2:4])
|
||||
if e.Length != 0 && e.Length != 2 {
|
||||
return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10))
|
||||
}
|
||||
if e.Length == 2 {
|
||||
if len(b) < 6 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Timeout = binary.BigEndian.Uint16(b[4:6])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) String() (s string) {
|
||||
s = "use tcp keep-alive"
|
||||
if e.Length == 0 {
|
||||
s += ", timeout omitted"
|
||||
} else {
|
||||
s += fmt.Sprintf(", timeout %dms", e.Timeout*100)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EDNS0_PADDING option is used to add padding to a request/response. The default
|
||||
// value of padding SHOULD be 0x0 but other values MAY be used, for instance if
|
||||
// compression is applied before encryption which may break signatures.
|
||||
type EDNS0_PADDING struct {
|
||||
Padding []byte
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }
|
||||
func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
|
||||
func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
|
||||
func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) }
|
87
vendor/github.com/miekg/dns/format.go
generated
vendored
Normal file
87
vendor/github.com/miekg/dns/format.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// NumField returns the number of rdata fields r has.
|
||||
func NumField(r RR) int {
|
||||
return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header
|
||||
}
|
||||
|
||||
// Field returns the rdata field i as a string. Fields are indexed starting from 1.
|
||||
// RR types that holds slice data, for instance the NSEC type bitmap will return a single
|
||||
// string where the types are concatenated using a space.
|
||||
// Accessing non existing fields will cause a panic.
|
||||
func Field(r RR, i int) string {
|
||||
if i == 0 {
|
||||
return ""
|
||||
}
|
||||
d := reflect.ValueOf(r).Elem().Field(i)
|
||||
switch d.Kind() {
|
||||
case reflect.String:
|
||||
return d.String()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.FormatInt(d.Int(), 10)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return strconv.FormatUint(d.Uint(), 10)
|
||||
case reflect.Slice:
|
||||
switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
|
||||
case `dns:"a"`:
|
||||
// TODO(miek): Hmm store this as 16 bytes
|
||||
if d.Len() < net.IPv6len {
|
||||
return net.IPv4(byte(d.Index(0).Uint()),
|
||||
byte(d.Index(1).Uint()),
|
||||
byte(d.Index(2).Uint()),
|
||||
byte(d.Index(3).Uint())).String()
|
||||
}
|
||||
return net.IPv4(byte(d.Index(12).Uint()),
|
||||
byte(d.Index(13).Uint()),
|
||||
byte(d.Index(14).Uint()),
|
||||
byte(d.Index(15).Uint())).String()
|
||||
case `dns:"aaaa"`:
|
||||
return net.IP{
|
||||
byte(d.Index(0).Uint()),
|
||||
byte(d.Index(1).Uint()),
|
||||
byte(d.Index(2).Uint()),
|
||||
byte(d.Index(3).Uint()),
|
||||
byte(d.Index(4).Uint()),
|
||||
byte(d.Index(5).Uint()),
|
||||
byte(d.Index(6).Uint()),
|
||||
byte(d.Index(7).Uint()),
|
||||
byte(d.Index(8).Uint()),
|
||||
byte(d.Index(9).Uint()),
|
||||
byte(d.Index(10).Uint()),
|
||||
byte(d.Index(11).Uint()),
|
||||
byte(d.Index(12).Uint()),
|
||||
byte(d.Index(13).Uint()),
|
||||
byte(d.Index(14).Uint()),
|
||||
byte(d.Index(15).Uint()),
|
||||
}.String()
|
||||
case `dns:"nsec"`:
|
||||
if d.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
s := Type(d.Index(0).Uint()).String()
|
||||
for i := 1; i < d.Len(); i++ {
|
||||
s += " " + Type(d.Index(i).Uint()).String()
|
||||
}
|
||||
return s
|
||||
default:
|
||||
// if it does not have a tag its a string slice
|
||||
fallthrough
|
||||
case `dns:"txt"`:
|
||||
if d.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
s := d.Index(0).String()
|
||||
for i := 1; i < d.Len(); i++ {
|
||||
s += " " + d.Index(i).String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
23
vendor/github.com/miekg/dns/fuzz.go
generated
vendored
Normal file
23
vendor/github.com/miekg/dns/fuzz.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// +build fuzz
|
||||
|
||||
package dns
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
msg := new(Msg)
|
||||
|
||||
if err := msg.Unpack(data); err != nil {
|
||||
return 0
|
||||
}
|
||||
if _, err := msg.Pack(); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
func FuzzNewRR(data []byte) int {
|
||||
if _, err := NewRR(string(data)); err != nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
242
vendor/github.com/miekg/dns/generate.go
generated
vendored
Normal file
242
vendor/github.com/miekg/dns/generate.go
generated
vendored
Normal file
@ -0,0 +1,242 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Parse the $GENERATE statement as used in BIND9 zones.
|
||||
// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
|
||||
// We are called after '$GENERATE '. After which we expect:
|
||||
// * the range (12-24/2)
|
||||
// * lhs (ownername)
|
||||
// * [[ttl][class]]
|
||||
// * type
|
||||
// * rhs (rdata)
|
||||
// But we are lazy here, only the range is parsed *all* occurrences
|
||||
// of $ after that are interpreted.
|
||||
func (zp *ZoneParser) generate(l lex) (RR, bool) {
|
||||
token := l.token
|
||||
step := 1
|
||||
if i := strings.IndexByte(token, '/'); i >= 0 {
|
||||
if i+1 == len(token) {
|
||||
return zp.setParseError("bad step in $GENERATE range", l)
|
||||
}
|
||||
|
||||
s, err := strconv.Atoi(token[i+1:])
|
||||
if err != nil || s <= 0 {
|
||||
return zp.setParseError("bad step in $GENERATE range", l)
|
||||
}
|
||||
|
||||
step = s
|
||||
token = token[:i]
|
||||
}
|
||||
|
||||
sx := strings.SplitN(token, "-", 2)
|
||||
if len(sx) != 2 {
|
||||
return zp.setParseError("bad start-stop in $GENERATE range", l)
|
||||
}
|
||||
|
||||
start, err := strconv.Atoi(sx[0])
|
||||
if err != nil {
|
||||
return zp.setParseError("bad start in $GENERATE range", l)
|
||||
}
|
||||
|
||||
end, err := strconv.Atoi(sx[1])
|
||||
if err != nil {
|
||||
return zp.setParseError("bad stop in $GENERATE range", l)
|
||||
}
|
||||
if end < 0 || start < 0 || end < start {
|
||||
return zp.setParseError("bad range in $GENERATE range", l)
|
||||
}
|
||||
|
||||
zp.c.Next() // _BLANK
|
||||
|
||||
// Create a complete new string, which we then parse again.
|
||||
var s string
|
||||
for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
|
||||
if l.err {
|
||||
return zp.setParseError("bad data in $GENERATE directive", l)
|
||||
}
|
||||
if l.value == zNewline {
|
||||
break
|
||||
}
|
||||
|
||||
s += l.token
|
||||
}
|
||||
|
||||
r := &generateReader{
|
||||
s: s,
|
||||
|
||||
cur: start,
|
||||
start: start,
|
||||
end: end,
|
||||
step: step,
|
||||
|
||||
file: zp.file,
|
||||
lex: &l,
|
||||
}
|
||||
zp.sub = NewZoneParser(r, zp.origin, zp.file)
|
||||
zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
|
||||
zp.sub.SetDefaultTTL(defaultTtl)
|
||||
return zp.subNext()
|
||||
}
|
||||
|
||||
type generateReader struct {
|
||||
s string
|
||||
si int
|
||||
|
||||
cur int
|
||||
start int
|
||||
end int
|
||||
step int
|
||||
|
||||
mod bytes.Buffer
|
||||
|
||||
escape bool
|
||||
|
||||
eof bool
|
||||
|
||||
file string
|
||||
lex *lex
|
||||
}
|
||||
|
||||
func (r *generateReader) parseError(msg string, end int) *ParseError {
|
||||
r.eof = true // Make errors sticky.
|
||||
|
||||
l := *r.lex
|
||||
l.token = r.s[r.si-1 : end]
|
||||
l.column += r.si // l.column starts one zBLANK before r.s
|
||||
|
||||
return &ParseError{r.file, msg, l}
|
||||
}
|
||||
|
||||
func (r *generateReader) Read(p []byte) (int, error) {
|
||||
// NewZLexer, through NewZoneParser, should use ReadByte and
|
||||
// not end up here.
|
||||
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (r *generateReader) ReadByte() (byte, error) {
|
||||
if r.eof {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if r.mod.Len() > 0 {
|
||||
return r.mod.ReadByte()
|
||||
}
|
||||
|
||||
if r.si >= len(r.s) {
|
||||
r.si = 0
|
||||
r.cur += r.step
|
||||
|
||||
r.eof = r.cur > r.end || r.cur < 0
|
||||
return '\n', nil
|
||||
}
|
||||
|
||||
si := r.si
|
||||
r.si++
|
||||
|
||||
switch r.s[si] {
|
||||
case '\\':
|
||||
if r.escape {
|
||||
r.escape = false
|
||||
return '\\', nil
|
||||
}
|
||||
|
||||
r.escape = true
|
||||
return r.ReadByte()
|
||||
case '$':
|
||||
if r.escape {
|
||||
r.escape = false
|
||||
return '$', nil
|
||||
}
|
||||
|
||||
mod := "%d"
|
||||
|
||||
if si >= len(r.s)-1 {
|
||||
// End of the string
|
||||
fmt.Fprintf(&r.mod, mod, r.cur)
|
||||
return r.mod.ReadByte()
|
||||
}
|
||||
|
||||
if r.s[si+1] == '$' {
|
||||
r.si++
|
||||
return '$', nil
|
||||
}
|
||||
|
||||
var offset int
|
||||
|
||||
// Search for { and }
|
||||
if r.s[si+1] == '{' {
|
||||
// Modifier block
|
||||
sep := strings.Index(r.s[si+2:], "}")
|
||||
if sep < 0 {
|
||||
return 0, r.parseError("bad modifier in $GENERATE", len(r.s))
|
||||
}
|
||||
|
||||
var errMsg string
|
||||
mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep])
|
||||
if errMsg != "" {
|
||||
return 0, r.parseError(errMsg, si+3+sep)
|
||||
}
|
||||
if r.start+offset < 0 || r.end+offset > 1<<31-1 {
|
||||
return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
|
||||
}
|
||||
|
||||
r.si += 2 + sep // Jump to it
|
||||
}
|
||||
|
||||
fmt.Fprintf(&r.mod, mod, r.cur+offset)
|
||||
return r.mod.ReadByte()
|
||||
default:
|
||||
if r.escape { // Pretty useless here
|
||||
r.escape = false
|
||||
return r.ReadByte()
|
||||
}
|
||||
|
||||
return r.s[si], nil
|
||||
}
|
||||
}
|
||||
|
||||
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
|
||||
func modToPrintf(s string) (string, int, string) {
|
||||
// Modifier is { offset [ ,width [ ,base ] ] } - provide default
|
||||
// values for optional width and type, if necessary.
|
||||
var offStr, widthStr, base string
|
||||
switch xs := strings.Split(s, ","); len(xs) {
|
||||
case 1:
|
||||
offStr, widthStr, base = xs[0], "0", "d"
|
||||
case 2:
|
||||
offStr, widthStr, base = xs[0], xs[1], "d"
|
||||
case 3:
|
||||
offStr, widthStr, base = xs[0], xs[1], xs[2]
|
||||
default:
|
||||
return "", 0, "bad modifier in $GENERATE"
|
||||
}
|
||||
|
||||
switch base {
|
||||
case "o", "d", "x", "X":
|
||||
default:
|
||||
return "", 0, "bad base in $GENERATE"
|
||||
}
|
||||
|
||||
offset, err := strconv.Atoi(offStr)
|
||||
if err != nil {
|
||||
return "", 0, "bad offset in $GENERATE"
|
||||
}
|
||||
|
||||
width, err := strconv.Atoi(widthStr)
|
||||
if err != nil || width < 0 || width > 255 {
|
||||
return "", 0, "bad width in $GENERATE"
|
||||
}
|
||||
|
||||
if width == 0 {
|
||||
return "%" + base, offset, ""
|
||||
}
|
||||
|
||||
return "%0" + widthStr + base, offset, ""
|
||||
}
|
190
vendor/github.com/miekg/dns/labels.go
generated
vendored
Normal file
190
vendor/github.com/miekg/dns/labels.go
generated
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
package dns
|
||||
|
||||
// Holds a bunch of helper functions for dealing with labels.
|
||||
|
||||
// SplitDomainName splits a name string into it's labels.
|
||||
// www.miek.nl. returns []string{"www", "miek", "nl"}
|
||||
// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
|
||||
// The root label (.) returns nil. Note that using
|
||||
// strings.Split(s) will work in most cases, but does not handle
|
||||
// escaped dots (\.) for instance.
|
||||
// s must be a syntactically valid domain name, see IsDomainName.
|
||||
func SplitDomainName(s string) (labels []string) {
|
||||
if len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
fqdnEnd := 0 // offset of the final '.' or the length of the name
|
||||
idx := Split(s)
|
||||
begin := 0
|
||||
if IsFqdn(s) {
|
||||
fqdnEnd = len(s) - 1
|
||||
} else {
|
||||
fqdnEnd = len(s)
|
||||
}
|
||||
|
||||
switch len(idx) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
// no-op
|
||||
default:
|
||||
end := 0
|
||||
for i := 1; i < len(idx); i++ {
|
||||
end = idx[i]
|
||||
labels = append(labels, s[begin:end-1])
|
||||
begin = end
|
||||
}
|
||||
}
|
||||
|
||||
return append(labels, s[begin:fqdnEnd])
|
||||
}
|
||||
|
||||
// CompareDomainName compares the names s1 and s2 and
|
||||
// returns how many labels they have in common starting from the *right*.
|
||||
// The comparison stops at the first inequality. The names are downcased
|
||||
// before the comparison.
|
||||
//
|
||||
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
|
||||
// www.miek.nl. and www.bla.nl. have one label in common: nl
|
||||
//
|
||||
// s1 and s2 must be syntactically valid domain names.
|
||||
func CompareDomainName(s1, s2 string) (n int) {
|
||||
// the first check: root label
|
||||
if s1 == "." || s2 == "." {
|
||||
return 0
|
||||
}
|
||||
|
||||
l1 := Split(s1)
|
||||
l2 := Split(s2)
|
||||
|
||||
j1 := len(l1) - 1 // end
|
||||
i1 := len(l1) - 2 // start
|
||||
j2 := len(l2) - 1
|
||||
i2 := len(l2) - 2
|
||||
// the second check can be done here: last/only label
|
||||
// before we fall through into the for-loop below
|
||||
if equal(s1[l1[j1]:], s2[l2[j2]:]) {
|
||||
n++
|
||||
} else {
|
||||
return
|
||||
}
|
||||
for {
|
||||
if i1 < 0 || i2 < 0 {
|
||||
break
|
||||
}
|
||||
if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) {
|
||||
n++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
j1--
|
||||
i1--
|
||||
j2--
|
||||
i2--
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CountLabel counts the the number of labels in the string s.
|
||||
// s must be a syntactically valid domain name.
|
||||
func CountLabel(s string) (labels int) {
|
||||
if s == "." {
|
||||
return
|
||||
}
|
||||
off := 0
|
||||
end := false
|
||||
for {
|
||||
off, end = NextLabel(s, off)
|
||||
labels++
|
||||
if end {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Split splits a name s into its label indexes.
|
||||
// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
|
||||
// The root name (.) returns nil. Also see SplitDomainName.
|
||||
// s must be a syntactically valid domain name.
|
||||
func Split(s string) []int {
|
||||
if s == "." {
|
||||
return nil
|
||||
}
|
||||
idx := make([]int, 1, 3)
|
||||
off := 0
|
||||
end := false
|
||||
|
||||
for {
|
||||
off, end = NextLabel(s, off)
|
||||
if end {
|
||||
return idx
|
||||
}
|
||||
idx = append(idx, off)
|
||||
}
|
||||
}
|
||||
|
||||
// NextLabel returns the index of the start of the next label in the
|
||||
// string s starting at offset.
|
||||
// The bool end is true when the end of the string has been reached.
|
||||
// Also see PrevLabel.
|
||||
func NextLabel(s string, offset int) (i int, end bool) {
|
||||
quote := false
|
||||
for i = offset; i < len(s)-1; i++ {
|
||||
switch s[i] {
|
||||
case '\\':
|
||||
quote = !quote
|
||||
default:
|
||||
quote = false
|
||||
case '.':
|
||||
if quote {
|
||||
quote = !quote
|
||||
continue
|
||||
}
|
||||
return i + 1, false
|
||||
}
|
||||
}
|
||||
return i + 1, true
|
||||
}
|
||||
|
||||
// PrevLabel returns the index of the label when starting from the right and
|
||||
// jumping n labels to the left.
|
||||
// The bool start is true when the start of the string has been overshot.
|
||||
// Also see NextLabel.
|
||||
func PrevLabel(s string, n int) (i int, start bool) {
|
||||
if n == 0 {
|
||||
return len(s), false
|
||||
}
|
||||
lab := Split(s)
|
||||
if lab == nil {
|
||||
return 0, true
|
||||
}
|
||||
if n > len(lab) {
|
||||
return 0, true
|
||||
}
|
||||
return lab[len(lab)-n], false
|
||||
}
|
||||
|
||||
// equal compares a and b while ignoring case. It returns true when equal otherwise false.
|
||||
func equal(a, b string) bool {
|
||||
// might be lifted into API function.
|
||||
la := len(a)
|
||||
lb := len(b)
|
||||
if la != lb {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := la - 1; i >= 0; i-- {
|
||||
ai := a[i]
|
||||
bi := b[i]
|
||||
if ai >= 'A' && ai <= 'Z' {
|
||||
ai |= 'a' - 'A'
|
||||
}
|
||||
if bi >= 'A' && bi <= 'Z' {
|
||||
bi |= 'a' - 'A'
|
||||
}
|
||||
if ai != bi {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
44
vendor/github.com/miekg/dns/listen_go111.go
generated
vendored
Normal file
44
vendor/github.com/miekg/dns/listen_go111.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// +build go1.11
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const supportsReusePort = true
|
||||
|
||||
func reuseportControl(network, address string, c syscall.RawConn) error {
|
||||
var opErr error
|
||||
err := c.Control(func(fd uintptr) {
|
||||
opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return opErr
|
||||
}
|
||||
|
||||
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
|
||||
var lc net.ListenConfig
|
||||
if reuseport {
|
||||
lc.Control = reuseportControl
|
||||
}
|
||||
|
||||
return lc.Listen(context.Background(), network, addr)
|
||||
}
|
||||
|
||||
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
|
||||
var lc net.ListenConfig
|
||||
if reuseport {
|
||||
lc.Control = reuseportControl
|
||||
}
|
||||
|
||||
return lc.ListenPacket(context.Background(), network, addr)
|
||||
}
|
23
vendor/github.com/miekg/dns/listen_go_not111.go
generated
vendored
Normal file
23
vendor/github.com/miekg/dns/listen_go_not111.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
|
||||
|
||||
package dns
|
||||
|
||||
import "net"
|
||||
|
||||
const supportsReusePort = false
|
||||
|
||||
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
|
||||
if reuseport {
|
||||
// TODO(tmthrgd): return an error?
|
||||
}
|
||||
|
||||
return net.Listen(network, addr)
|
||||
}
|
||||
|
||||
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
|
||||
if reuseport {
|
||||
// TODO(tmthrgd): return an error?
|
||||
}
|
||||
|
||||
return net.ListenPacket(network, addr)
|
||||
}
|
1241
vendor/github.com/miekg/dns/msg.go
generated
vendored
Normal file
1241
vendor/github.com/miekg/dns/msg.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
649
vendor/github.com/miekg/dns/msg_helpers.go
generated
vendored
Normal file
649
vendor/github.com/miekg/dns/msg_helpers.go
generated
vendored
Normal file
@ -0,0 +1,649 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// helper functions called from the generated zmsg.go
|
||||
|
||||
// These function are named after the tag to help pack/unpack, if there is no tag it is the name
|
||||
// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
|
||||
// packDataDomainName.
|
||||
|
||||
func unpackDataA(msg []byte, off int) (net.IP, int, error) {
|
||||
if off+net.IPv4len > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking a"}
|
||||
}
|
||||
a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
|
||||
off += net.IPv4len
|
||||
return a, off, nil
|
||||
}
|
||||
|
||||
func packDataA(a net.IP, msg []byte, off int) (int, error) {
|
||||
// It must be a slice of 4, even if it is 16, we encode only the first 4
|
||||
if off+net.IPv4len > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing a"}
|
||||
}
|
||||
switch len(a) {
|
||||
case net.IPv4len, net.IPv6len:
|
||||
copy(msg[off:], a.To4())
|
||||
off += net.IPv4len
|
||||
case 0:
|
||||
// Allowed, for dynamic updates.
|
||||
default:
|
||||
return len(msg), &Error{err: "overflow packing a"}
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
|
||||
if off+net.IPv6len > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
|
||||
}
|
||||
aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
|
||||
off += net.IPv6len
|
||||
return aaaa, off, nil
|
||||
}
|
||||
|
||||
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
|
||||
if off+net.IPv6len > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing aaaa"}
|
||||
}
|
||||
|
||||
switch len(aaaa) {
|
||||
case net.IPv6len:
|
||||
copy(msg[off:], aaaa)
|
||||
off += net.IPv6len
|
||||
case 0:
|
||||
// Allowed, dynamic updates.
|
||||
default:
|
||||
return len(msg), &Error{err: "overflow packing aaaa"}
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
|
||||
// re-sliced msg according to the expected length of the RR.
|
||||
func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
|
||||
hdr := RR_Header{}
|
||||
if off == len(msg) {
|
||||
return hdr, off, msg, nil
|
||||
}
|
||||
|
||||
hdr.Name, off, err = UnpackDomainName(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
hdr.Rrtype, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
hdr.Class, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
hdr.Ttl, off, err = unpackUint32(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
hdr.Rdlength, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
|
||||
return hdr, off, msg, err
|
||||
}
|
||||
|
||||
// packHeader packs an RR header, returning the offset to the end of the header.
|
||||
// See PackDomainName for documentation about the compression.
|
||||
func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
|
||||
if off == len(msg) {
|
||||
return off, nil
|
||||
}
|
||||
|
||||
off, err := packDomainName(hdr.Name, msg, off, compression, compress)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint16(hdr.Rrtype, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint16(hdr.Class, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint32(hdr.Ttl, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR.
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
// helper helper functions.
|
||||
|
||||
// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
|
||||
// Returns an error if msg is smaller than the expected size.
|
||||
func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
|
||||
lenrd := off + int(rdlength)
|
||||
if lenrd > len(msg) {
|
||||
return msg, &Error{err: "overflowing header size"}
|
||||
}
|
||||
return msg[:lenrd], nil
|
||||
}
|
||||
|
||||
var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
|
||||
|
||||
func fromBase32(s []byte) (buf []byte, err error) {
|
||||
for i, b := range s {
|
||||
if b >= 'a' && b <= 'z' {
|
||||
s[i] = b - 32
|
||||
}
|
||||
}
|
||||
buflen := base32HexNoPadEncoding.DecodedLen(len(s))
|
||||
buf = make([]byte, buflen)
|
||||
n, err := base32HexNoPadEncoding.Decode(buf, s)
|
||||
buf = buf[:n]
|
||||
return
|
||||
}
|
||||
|
||||
func toBase32(b []byte) string {
|
||||
return base32HexNoPadEncoding.EncodeToString(b)
|
||||
}
|
||||
|
||||
func fromBase64(s []byte) (buf []byte, err error) {
|
||||
buflen := base64.StdEncoding.DecodedLen(len(s))
|
||||
buf = make([]byte, buflen)
|
||||
n, err := base64.StdEncoding.Decode(buf, s)
|
||||
buf = buf[:n]
|
||||
return
|
||||
}
|
||||
|
||||
func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
|
||||
|
||||
// dynamicUpdate returns true if the Rdlength is zero.
|
||||
func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
|
||||
|
||||
func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
|
||||
if off+1 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint8"}
|
||||
}
|
||||
return msg[off], off + 1, nil
|
||||
}
|
||||
|
||||
func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
|
||||
if off+1 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint8"}
|
||||
}
|
||||
msg[off] = i
|
||||
return off + 1, nil
|
||||
}
|
||||
|
||||
func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
|
||||
if off+2 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint16"}
|
||||
}
|
||||
return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
|
||||
}
|
||||
|
||||
func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
|
||||
if off+2 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint16"}
|
||||
}
|
||||
binary.BigEndian.PutUint16(msg[off:], i)
|
||||
return off + 2, nil
|
||||
}
|
||||
|
||||
func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
|
||||
if off+4 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint32"}
|
||||
}
|
||||
return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
|
||||
}
|
||||
|
||||
func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
|
||||
if off+4 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint32"}
|
||||
}
|
||||
binary.BigEndian.PutUint32(msg[off:], i)
|
||||
return off + 4, nil
|
||||
}
|
||||
|
||||
func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
|
||||
if off+6 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
|
||||
}
|
||||
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
|
||||
i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
|
||||
uint64(msg[off+4])<<8 | uint64(msg[off+5])
|
||||
off += 6
|
||||
return i, off, nil
|
||||
}
|
||||
|
||||
func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
|
||||
if off+6 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint64 as uint48"}
|
||||
}
|
||||
msg[off] = byte(i >> 40)
|
||||
msg[off+1] = byte(i >> 32)
|
||||
msg[off+2] = byte(i >> 24)
|
||||
msg[off+3] = byte(i >> 16)
|
||||
msg[off+4] = byte(i >> 8)
|
||||
msg[off+5] = byte(i)
|
||||
off += 6
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
|
||||
if off+8 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint64"}
|
||||
}
|
||||
return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
|
||||
}
|
||||
|
||||
func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
|
||||
if off+8 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint64"}
|
||||
}
|
||||
binary.BigEndian.PutUint64(msg[off:], i)
|
||||
off += 8
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackString(msg []byte, off int) (string, int, error) {
|
||||
if off+1 > len(msg) {
|
||||
return "", off, &Error{err: "overflow unpacking txt"}
|
||||
}
|
||||
l := int(msg[off])
|
||||
if off+l+1 > len(msg) {
|
||||
return "", off, &Error{err: "overflow unpacking txt"}
|
||||
}
|
||||
var s strings.Builder
|
||||
s.Grow(l)
|
||||
for _, b := range msg[off+1 : off+1+l] {
|
||||
switch {
|
||||
case b == '"' || b == '\\':
|
||||
s.WriteByte('\\')
|
||||
s.WriteByte(b)
|
||||
case b < ' ' || b > '~': // unprintable
|
||||
s.WriteString(escapeByte(b))
|
||||
default:
|
||||
s.WriteByte(b)
|
||||
}
|
||||
}
|
||||
off += 1 + l
|
||||
return s.String(), off, nil
|
||||
}
|
||||
|
||||
func packString(s string, msg []byte, off int) (int, error) {
|
||||
txtTmp := make([]byte, 256*4+1)
|
||||
off, err := packTxtString(s, msg, off, txtTmp)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
|
||||
if end > len(msg) {
|
||||
return "", len(msg), &Error{err: "overflow unpacking base32"}
|
||||
}
|
||||
s := toBase32(msg[off:end])
|
||||
return s, end, nil
|
||||
}
|
||||
|
||||
func packStringBase32(s string, msg []byte, off int) (int, error) {
|
||||
b32, err := fromBase32([]byte(s))
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
if off+len(b32) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing base32"}
|
||||
}
|
||||
copy(msg[off:off+len(b32)], b32)
|
||||
off += len(b32)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
|
||||
// Rest of the RR is base64 encoded value, so we don't need an explicit length
|
||||
// to be set. Thus far all RR's that have base64 encoded fields have those as their
|
||||
// last one. What we do need is the end of the RR!
|
||||
if end > len(msg) {
|
||||
return "", len(msg), &Error{err: "overflow unpacking base64"}
|
||||
}
|
||||
s := toBase64(msg[off:end])
|
||||
return s, end, nil
|
||||
}
|
||||
|
||||
func packStringBase64(s string, msg []byte, off int) (int, error) {
|
||||
b64, err := fromBase64([]byte(s))
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
if off+len(b64) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing base64"}
|
||||
}
|
||||
copy(msg[off:off+len(b64)], b64)
|
||||
off += len(b64)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringHex(msg []byte, off, end int) (string, int, error) {
|
||||
// Rest of the RR is hex encoded value, so we don't need an explicit length
|
||||
// to be set. NSEC and TSIG have hex fields with a length field.
|
||||
// What we do need is the end of the RR!
|
||||
if end > len(msg) {
|
||||
return "", len(msg), &Error{err: "overflow unpacking hex"}
|
||||
}
|
||||
|
||||
s := hex.EncodeToString(msg[off:end])
|
||||
return s, end, nil
|
||||
}
|
||||
|
||||
func packStringHex(s string, msg []byte, off int) (int, error) {
|
||||
h, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
if off+len(h) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing hex"}
|
||||
}
|
||||
copy(msg[off:off+len(h)], h)
|
||||
off += len(h)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringAny(msg []byte, off, end int) (string, int, error) {
|
||||
if end > len(msg) {
|
||||
return "", len(msg), &Error{err: "overflow unpacking anything"}
|
||||
}
|
||||
return string(msg[off:end]), end, nil
|
||||
}
|
||||
|
||||
func packStringAny(s string, msg []byte, off int) (int, error) {
|
||||
if off+len(s) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing anything"}
|
||||
}
|
||||
copy(msg[off:off+len(s)], s)
|
||||
off += len(s)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
|
||||
txt, off, err := unpackTxt(msg, off)
|
||||
if err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
return txt, off, nil
|
||||
}
|
||||
|
||||
func packStringTxt(s []string, msg []byte, off int) (int, error) {
|
||||
txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
|
||||
off, err := packTxt(s, msg, off, txtTmp)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
|
||||
var edns []EDNS0
|
||||
Option:
|
||||
var code uint16
|
||||
if off+4 > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||
}
|
||||
code = binary.BigEndian.Uint16(msg[off:])
|
||||
off += 2
|
||||
optlen := binary.BigEndian.Uint16(msg[off:])
|
||||
off += 2
|
||||
if off+int(optlen) > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||
}
|
||||
switch code {
|
||||
case EDNS0NSID:
|
||||
e := new(EDNS0_NSID)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0SUBNET:
|
||||
e := new(EDNS0_SUBNET)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0COOKIE:
|
||||
e := new(EDNS0_COOKIE)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0UL:
|
||||
e := new(EDNS0_UL)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0LLQ:
|
||||
e := new(EDNS0_LLQ)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0DAU:
|
||||
e := new(EDNS0_DAU)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0DHU:
|
||||
e := new(EDNS0_DHU)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0N3U:
|
||||
e := new(EDNS0_N3U)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0PADDING:
|
||||
e := new(EDNS0_PADDING)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
default:
|
||||
e := new(EDNS0_LOCAL)
|
||||
e.Code = code
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
}
|
||||
|
||||
if off < len(msg) {
|
||||
goto Option
|
||||
}
|
||||
|
||||
return edns, off, nil
|
||||
}
|
||||
|
||||
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
|
||||
for _, el := range options {
|
||||
b, err := el.pack()
|
||||
if err != nil || off+3 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing opt"}
|
||||
}
|
||||
binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
|
||||
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
|
||||
off += 4
|
||||
if off+len(b) > len(msg) {
|
||||
copy(msg[off:], b)
|
||||
off = len(msg)
|
||||
continue
|
||||
}
|
||||
// Actual data
|
||||
copy(msg[off:off+len(b)], b)
|
||||
off += len(b)
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringOctet(msg []byte, off int) (string, int, error) {
|
||||
s := string(msg[off:])
|
||||
return s, len(msg), nil
|
||||
}
|
||||
|
||||
func packStringOctet(s string, msg []byte, off int) (int, error) {
|
||||
txtTmp := make([]byte, 256*4+1)
|
||||
off, err := packOctetString(s, msg, off, txtTmp)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
|
||||
var nsec []uint16
|
||||
length, window, lastwindow := 0, 0, -1
|
||||
for off < len(msg) {
|
||||
if off+2 > len(msg) {
|
||||
return nsec, len(msg), &Error{err: "overflow unpacking nsecx"}
|
||||
}
|
||||
window = int(msg[off])
|
||||
length = int(msg[off+1])
|
||||
off += 2
|
||||
if window <= lastwindow {
|
||||
// RFC 4034: Blocks are present in the NSEC RR RDATA in
|
||||
// increasing numerical order.
|
||||
return nsec, len(msg), &Error{err: "out of order NSEC block"}
|
||||
}
|
||||
if length == 0 {
|
||||
// RFC 4034: Blocks with no types present MUST NOT be included.
|
||||
return nsec, len(msg), &Error{err: "empty NSEC block"}
|
||||
}
|
||||
if length > 32 {
|
||||
return nsec, len(msg), &Error{err: "NSEC block too long"}
|
||||
}
|
||||
if off+length > len(msg) {
|
||||
return nsec, len(msg), &Error{err: "overflowing NSEC block"}
|
||||
}
|
||||
|
||||
// Walk the bytes in the window and extract the type bits
|
||||
for j := 0; j < length; j++ {
|
||||
b := msg[off+j]
|
||||
// Check the bits one by one, and set the type
|
||||
if b&0x80 == 0x80 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+0))
|
||||
}
|
||||
if b&0x40 == 0x40 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+1))
|
||||
}
|
||||
if b&0x20 == 0x20 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+2))
|
||||
}
|
||||
if b&0x10 == 0x10 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+3))
|
||||
}
|
||||
if b&0x8 == 0x8 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+4))
|
||||
}
|
||||
if b&0x4 == 0x4 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+5))
|
||||
}
|
||||
if b&0x2 == 0x2 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+6))
|
||||
}
|
||||
if b&0x1 == 0x1 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+7))
|
||||
}
|
||||
}
|
||||
off += length
|
||||
lastwindow = window
|
||||
}
|
||||
return nsec, off, nil
|
||||
}
|
||||
|
||||
func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
|
||||
if len(bitmap) == 0 {
|
||||
return off, nil
|
||||
}
|
||||
var lastwindow, lastlength uint16
|
||||
for j := 0; j < len(bitmap); j++ {
|
||||
t := bitmap[j]
|
||||
window := t / 256
|
||||
length := (t-window*256)/8 + 1
|
||||
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
|
||||
off += int(lastlength) + 2
|
||||
lastlength = 0
|
||||
}
|
||||
if window < lastwindow || length < lastlength {
|
||||
return len(msg), &Error{err: "nsec bits out of order"}
|
||||
}
|
||||
if off+2+int(length) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing nsec"}
|
||||
}
|
||||
// Setting the window #
|
||||
msg[off] = byte(window)
|
||||
// Setting the octets length
|
||||
msg[off+1] = byte(length)
|
||||
// Setting the bit value for the type in the right octet
|
||||
msg[off+1+int(length)] |= byte(1 << (7 - t%8))
|
||||
lastwindow, lastlength = window, length
|
||||
}
|
||||
off += int(lastlength) + 2
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
|
||||
var (
|
||||
servers []string
|
||||
s string
|
||||
err error
|
||||
)
|
||||
if end > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking domain names"}
|
||||
}
|
||||
for off < end {
|
||||
s, off, err = UnpackDomainName(msg, off)
|
||||
if err != nil {
|
||||
return servers, len(msg), err
|
||||
}
|
||||
servers = append(servers, s)
|
||||
}
|
||||
return servers, off, nil
|
||||
}
|
||||
|
||||
func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) {
|
||||
var err error
|
||||
for j := 0; j < len(names); j++ {
|
||||
off, err = packDomainName(names[j], msg, off, compression, compress)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
}
|
||||
return off, nil
|
||||
}
|
95
vendor/github.com/miekg/dns/nsecx.go
generated
vendored
Normal file
95
vendor/github.com/miekg/dns/nsecx.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
|
||||
func HashName(label string, ha uint8, iter uint16, salt string) string {
|
||||
if ha != SHA1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
wireSalt := make([]byte, hex.DecodedLen(len(salt)))
|
||||
n, err := packStringHex(salt, wireSalt, 0)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
wireSalt = wireSalt[:n]
|
||||
|
||||
name := make([]byte, 255)
|
||||
off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
name = name[:off]
|
||||
|
||||
s := sha1.New()
|
||||
// k = 0
|
||||
s.Write(name)
|
||||
s.Write(wireSalt)
|
||||
nsec3 := s.Sum(nil)
|
||||
|
||||
// k > 0
|
||||
for k := uint16(0); k < iter; k++ {
|
||||
s.Reset()
|
||||
s.Write(nsec3)
|
||||
s.Write(wireSalt)
|
||||
nsec3 = s.Sum(nsec3[:0])
|
||||
}
|
||||
|
||||
return toBase32(nsec3)
|
||||
}
|
||||
|
||||
// Cover returns true if a name is covered by the NSEC3 record
|
||||
func (rr *NSEC3) Cover(name string) bool {
|
||||
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
|
||||
owner := strings.ToUpper(rr.Hdr.Name)
|
||||
labelIndices := Split(owner)
|
||||
if len(labelIndices) < 2 {
|
||||
return false
|
||||
}
|
||||
ownerHash := owner[:labelIndices[1]-1]
|
||||
ownerZone := owner[labelIndices[1]:]
|
||||
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
|
||||
return false
|
||||
}
|
||||
|
||||
nextHash := rr.NextDomain
|
||||
|
||||
// if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash
|
||||
if ownerHash == nextHash && nameHash != ownerHash { // empty interval
|
||||
return true
|
||||
}
|
||||
if ownerHash > nextHash { // end of zone
|
||||
if nameHash > ownerHash { // covered since there is nothing after ownerHash
|
||||
return true
|
||||
}
|
||||
return nameHash < nextHash // if nameHash is before beginning of zone it is covered
|
||||
}
|
||||
if nameHash < ownerHash { // nameHash is before ownerHash, not covered
|
||||
return false
|
||||
}
|
||||
return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash)
|
||||
}
|
||||
|
||||
// Match returns true if a name matches the NSEC3 record
|
||||
func (rr *NSEC3) Match(name string) bool {
|
||||
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
|
||||
owner := strings.ToUpper(rr.Hdr.Name)
|
||||
labelIndices := Split(owner)
|
||||
if len(labelIndices) < 2 {
|
||||
return false
|
||||
}
|
||||
ownerHash := owner[:labelIndices[1]-1]
|
||||
ownerZone := owner[labelIndices[1]:]
|
||||
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
|
||||
return false
|
||||
}
|
||||
if ownerHash == nameHash {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
132
vendor/github.com/miekg/dns/privaterr.go
generated
vendored
Normal file
132
vendor/github.com/miekg/dns/privaterr.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PrivateRdata is an interface used for implementing "Private Use" RR types, see
|
||||
// RFC 6895. This allows one to experiment with new RR types, without requesting an
|
||||
// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
|
||||
type PrivateRdata interface {
|
||||
// String returns the text presentaton of the Rdata of the Private RR.
|
||||
String() string
|
||||
// Parse parses the Rdata of the private RR.
|
||||
Parse([]string) error
|
||||
// Pack is used when packing a private RR into a buffer.
|
||||
Pack([]byte) (int, error)
|
||||
// Unpack is used when unpacking a private RR from a buffer.
|
||||
// TODO(miek): diff. signature than Pack, see edns0.go for instance.
|
||||
Unpack([]byte) (int, error)
|
||||
// Copy copies the Rdata.
|
||||
Copy(PrivateRdata) error
|
||||
// Len returns the length in octets of the Rdata.
|
||||
Len() int
|
||||
}
|
||||
|
||||
// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
|
||||
// It mocks normal RRs and implements dns.RR interface.
|
||||
type PrivateRR struct {
|
||||
Hdr RR_Header
|
||||
Data PrivateRdata
|
||||
}
|
||||
|
||||
func mkPrivateRR(rrtype uint16) *PrivateRR {
|
||||
// Panics if RR is not an instance of PrivateRR.
|
||||
rrfunc, ok := TypeToRR[rrtype]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
|
||||
}
|
||||
|
||||
anyrr := rrfunc()
|
||||
rr, ok := anyrr.(*PrivateRR)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr))
|
||||
}
|
||||
|
||||
return rr
|
||||
}
|
||||
|
||||
// Header return the RR header of r.
|
||||
func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
|
||||
|
||||
func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
|
||||
|
||||
// Private len and copy parts to satisfy RR interface.
|
||||
func (r *PrivateRR) len(off int, compression map[string]struct{}) int {
|
||||
l := r.Hdr.len(off, compression)
|
||||
l += r.Data.Len()
|
||||
return l
|
||||
}
|
||||
|
||||
func (r *PrivateRR) copy() RR {
|
||||
// make new RR like this:
|
||||
rr := mkPrivateRR(r.Hdr.Rrtype)
|
||||
rr.Hdr = r.Hdr
|
||||
|
||||
err := r.Data.Copy(rr.Data)
|
||||
if err != nil {
|
||||
panic("dns: got value that could not be used to copy Private rdata")
|
||||
}
|
||||
return rr
|
||||
}
|
||||
|
||||
func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
|
||||
n, err := r.Data.Pack(msg[off:])
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off += n
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (r *PrivateRR) unpack(msg []byte, off int) (int, error) {
|
||||
off1, err := r.Data.Unpack(msg[off:])
|
||||
off += off1
|
||||
return off, err
|
||||
}
|
||||
|
||||
func (r *PrivateRR) parse(c *zlexer, origin, file string) *ParseError {
|
||||
var l lex
|
||||
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
|
||||
Fetch:
|
||||
for {
|
||||
// TODO(miek): we could also be returning _QUOTE, this might or might not
|
||||
// be an issue (basically parsing TXT becomes hard)
|
||||
switch l, _ = c.Next(); l.value {
|
||||
case zNewline, zEOF:
|
||||
break Fetch
|
||||
case zString:
|
||||
text = append(text, l.token)
|
||||
}
|
||||
}
|
||||
|
||||
err := r.Data.Parse(text)
|
||||
if err != nil {
|
||||
return &ParseError{file, err.Error(), l}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false }
|
||||
|
||||
// PrivateHandle registers a private resource record type. It requires
|
||||
// string and numeric representation of private RR type and generator function as argument.
|
||||
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
|
||||
rtypestr = strings.ToUpper(rtypestr)
|
||||
|
||||
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
|
||||
TypeToString[rtype] = rtypestr
|
||||
StringToType[rtypestr] = rtype
|
||||
}
|
||||
|
||||
// PrivateHandleRemove removes definitions required to support private RR type.
|
||||
func PrivateHandleRemove(rtype uint16) {
|
||||
rtypestr, ok := TypeToString[rtype]
|
||||
if ok {
|
||||
delete(TypeToRR, rtype)
|
||||
delete(TypeToString, rtype)
|
||||
delete(StringToType, rtypestr)
|
||||
}
|
||||
}
|
52
vendor/github.com/miekg/dns/reverse.go
generated
vendored
Normal file
52
vendor/github.com/miekg/dns/reverse.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package dns
|
||||
|
||||
// StringToType is the reverse of TypeToString, needed for string parsing.
|
||||
var StringToType = reverseInt16(TypeToString)
|
||||
|
||||
// StringToClass is the reverse of ClassToString, needed for string parsing.
|
||||
var StringToClass = reverseInt16(ClassToString)
|
||||
|
||||
// StringToOpcode is a map of opcodes to strings.
|
||||
var StringToOpcode = reverseInt(OpcodeToString)
|
||||
|
||||
// StringToRcode is a map of rcodes to strings.
|
||||
var StringToRcode = reverseInt(RcodeToString)
|
||||
|
||||
func init() {
|
||||
// Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733.
|
||||
StringToRcode["NOTIMPL"] = RcodeNotImplemented
|
||||
}
|
||||
|
||||
// StringToAlgorithm is the reverse of AlgorithmToString.
|
||||
var StringToAlgorithm = reverseInt8(AlgorithmToString)
|
||||
|
||||
// StringToHash is a map of names to hash IDs.
|
||||
var StringToHash = reverseInt8(HashToString)
|
||||
|
||||
// StringToCertType is the reverseof CertTypeToString.
|
||||
var StringToCertType = reverseInt16(CertTypeToString)
|
||||
|
||||
// Reverse a map
|
||||
func reverseInt8(m map[uint8]string) map[string]uint8 {
|
||||
n := make(map[string]uint8, len(m))
|
||||
for u, s := range m {
|
||||
n[s] = u
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func reverseInt16(m map[uint16]string) map[string]uint16 {
|
||||
n := make(map[string]uint16, len(m))
|
||||
for u, s := range m {
|
||||
n[s] = u
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func reverseInt(m map[int]string) map[string]int {
|
||||
n := make(map[string]int, len(m))
|
||||
for u, s := range m {
|
||||
n[s] = u
|
||||
}
|
||||
return n
|
||||
}
|
86
vendor/github.com/miekg/dns/sanitize.go
generated
vendored
Normal file
86
vendor/github.com/miekg/dns/sanitize.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
package dns
|
||||
|
||||
// Dedup removes identical RRs from rrs. It preserves the original ordering.
|
||||
// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
|
||||
// rrs.
|
||||
// m is used to store the RRs temporary. If it is nil a new map will be allocated.
|
||||
func Dedup(rrs []RR, m map[string]RR) []RR {
|
||||
|
||||
if m == nil {
|
||||
m = make(map[string]RR)
|
||||
}
|
||||
// Save the keys, so we don't have to call normalizedString twice.
|
||||
keys := make([]*string, 0, len(rrs))
|
||||
|
||||
for _, r := range rrs {
|
||||
key := normalizedString(r)
|
||||
keys = append(keys, &key)
|
||||
if mr, ok := m[key]; ok {
|
||||
// Shortest TTL wins.
|
||||
rh, mrh := r.Header(), mr.Header()
|
||||
if mrh.Ttl > rh.Ttl {
|
||||
mrh.Ttl = rh.Ttl
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
m[key] = r
|
||||
}
|
||||
// If the length of the result map equals the amount of RRs we got,
|
||||
// it means they were all different. We can then just return the original rrset.
|
||||
if len(m) == len(rrs) {
|
||||
return rrs
|
||||
}
|
||||
|
||||
j := 0
|
||||
for i, r := range rrs {
|
||||
// If keys[i] lives in the map, we should copy and remove it.
|
||||
if _, ok := m[*keys[i]]; ok {
|
||||
delete(m, *keys[i])
|
||||
rrs[j] = r
|
||||
j++
|
||||
}
|
||||
|
||||
if len(m) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return rrs[:j]
|
||||
}
|
||||
|
||||
// normalizedString returns a normalized string from r. The TTL
|
||||
// is removed and the domain name is lowercased. We go from this:
|
||||
// DomainName<TAB>TTL<TAB>CLASS<TAB>TYPE<TAB>RDATA to:
|
||||
// lowercasename<TAB>CLASS<TAB>TYPE...
|
||||
func normalizedString(r RR) string {
|
||||
// A string Go DNS makes has: domainname<TAB>TTL<TAB>...
|
||||
b := []byte(r.String())
|
||||
|
||||
// find the first non-escaped tab, then another, so we capture where the TTL lives.
|
||||
esc := false
|
||||
ttlStart, ttlEnd := 0, 0
|
||||
for i := 0; i < len(b) && ttlEnd == 0; i++ {
|
||||
switch {
|
||||
case b[i] == '\\':
|
||||
esc = !esc
|
||||
case b[i] == '\t' && !esc:
|
||||
if ttlStart == 0 {
|
||||
ttlStart = i
|
||||
continue
|
||||
}
|
||||
if ttlEnd == 0 {
|
||||
ttlEnd = i
|
||||
}
|
||||
case b[i] >= 'A' && b[i] <= 'Z' && !esc:
|
||||
b[i] += 32
|
||||
default:
|
||||
esc = false
|
||||
}
|
||||
}
|
||||
|
||||
// remove TTL.
|
||||
copy(b[ttlStart:], b[ttlEnd:])
|
||||
cut := ttlEnd - ttlStart
|
||||
return string(b[:len(b)-cut])
|
||||
}
|
1337
vendor/github.com/miekg/dns/scan.go
generated
vendored
Normal file
1337
vendor/github.com/miekg/dns/scan.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1937
vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
Normal file
1937
vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
147
vendor/github.com/miekg/dns/serve_mux.go
generated
vendored
Normal file
147
vendor/github.com/miekg/dns/serve_mux.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ServeMux is an DNS request multiplexer. It matches the zone name of
|
||||
// each incoming request against a list of registered patterns add calls
|
||||
// the handler for the pattern that most closely matches the zone name.
|
||||
//
|
||||
// ServeMux is DNSSEC aware, meaning that queries for the DS record are
|
||||
// redirected to the parent zone (if that is also registered), otherwise
|
||||
// the child gets the query.
|
||||
//
|
||||
// ServeMux is also safe for concurrent access from multiple goroutines.
|
||||
//
|
||||
// The zero ServeMux is empty and ready for use.
|
||||
type ServeMux struct {
|
||||
z map[string]Handler
|
||||
m sync.RWMutex
|
||||
}
|
||||
|
||||
// NewServeMux allocates and returns a new ServeMux.
|
||||
func NewServeMux() *ServeMux {
|
||||
return new(ServeMux)
|
||||
}
|
||||
|
||||
// DefaultServeMux is the default ServeMux used by Serve.
|
||||
var DefaultServeMux = NewServeMux()
|
||||
|
||||
func (mux *ServeMux) match(q string, t uint16) Handler {
|
||||
mux.m.RLock()
|
||||
defer mux.m.RUnlock()
|
||||
if mux.z == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var handler Handler
|
||||
|
||||
// TODO(tmthrgd): Once https://go-review.googlesource.com/c/go/+/137575
|
||||
// lands in a go release, replace the following with strings.ToLower.
|
||||
var sb strings.Builder
|
||||
for i := 0; i < len(q); i++ {
|
||||
c := q[i]
|
||||
if !(c >= 'A' && c <= 'Z') {
|
||||
continue
|
||||
}
|
||||
|
||||
sb.Grow(len(q))
|
||||
sb.WriteString(q[:i])
|
||||
|
||||
for ; i < len(q); i++ {
|
||||
c := q[i]
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
c += 'a' - 'A'
|
||||
}
|
||||
|
||||
sb.WriteByte(c)
|
||||
}
|
||||
|
||||
q = sb.String()
|
||||
break
|
||||
}
|
||||
|
||||
for off, end := 0, false; !end; off, end = NextLabel(q, off) {
|
||||
if h, ok := mux.z[q[off:]]; ok {
|
||||
if t != TypeDS {
|
||||
return h
|
||||
}
|
||||
// Continue for DS to see if we have a parent too, if so delegate to the parent
|
||||
handler = h
|
||||
}
|
||||
}
|
||||
|
||||
// Wildcard match, if we have found nothing try the root zone as a last resort.
|
||||
if h, ok := mux.z["."]; ok {
|
||||
return h
|
||||
}
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
// Handle adds a handler to the ServeMux for pattern.
|
||||
func (mux *ServeMux) Handle(pattern string, handler Handler) {
|
||||
if pattern == "" {
|
||||
panic("dns: invalid pattern " + pattern)
|
||||
}
|
||||
mux.m.Lock()
|
||||
if mux.z == nil {
|
||||
mux.z = make(map[string]Handler)
|
||||
}
|
||||
mux.z[Fqdn(pattern)] = handler
|
||||
mux.m.Unlock()
|
||||
}
|
||||
|
||||
// HandleFunc adds a handler function to the ServeMux for pattern.
|
||||
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
|
||||
mux.Handle(pattern, HandlerFunc(handler))
|
||||
}
|
||||
|
||||
// HandleRemove deregisters the handler specific for pattern from the ServeMux.
|
||||
func (mux *ServeMux) HandleRemove(pattern string) {
|
||||
if pattern == "" {
|
||||
panic("dns: invalid pattern " + pattern)
|
||||
}
|
||||
mux.m.Lock()
|
||||
delete(mux.z, Fqdn(pattern))
|
||||
mux.m.Unlock()
|
||||
}
|
||||
|
||||
// ServeDNS dispatches the request to the handler whose pattern most
|
||||
// closely matches the request message.
|
||||
//
|
||||
// ServeDNS is DNSSEC aware, meaning that queries for the DS record
|
||||
// are redirected to the parent zone (if that is also registered),
|
||||
// otherwise the child gets the query.
|
||||
//
|
||||
// If no handler is found, or there is no question, a standard SERVFAIL
|
||||
// message is returned
|
||||
func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {
|
||||
var h Handler
|
||||
if len(req.Question) >= 1 { // allow more than one question
|
||||
h = mux.match(req.Question[0].Name, req.Question[0].Qtype)
|
||||
}
|
||||
|
||||
if h != nil {
|
||||
h.ServeDNS(w, req)
|
||||
} else {
|
||||
HandleFailed(w, req)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle registers the handler with the given pattern
|
||||
// in the DefaultServeMux. The documentation for
|
||||
// ServeMux explains how patterns are matched.
|
||||
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
|
||||
|
||||
// HandleRemove deregisters the handle with the given pattern
|
||||
// in the DefaultServeMux.
|
||||
func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
|
||||
|
||||
// HandleFunc registers the handler function with the given pattern
|
||||
// in the DefaultServeMux.
|
||||
func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
|
||||
DefaultServeMux.HandleFunc(pattern, handler)
|
||||
}
|
866
vendor/github.com/miekg/dns/server.go
generated
vendored
Normal file
866
vendor/github.com/miekg/dns/server.go
generated
vendored
Normal file
@ -0,0 +1,866 @@
|
||||
// DNS server implementation.
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Default maximum number of TCP queries before we close the socket.
|
||||
const maxTCPQueries = 128
|
||||
|
||||
// The maximum number of idle workers.
|
||||
//
|
||||
// This controls the maximum number of workers that are allowed to stay
|
||||
// idle waiting for incoming requests before being torn down.
|
||||
//
|
||||
// If this limit is reached, the server will just keep spawning new
|
||||
// workers (goroutines) for each incoming request. In this case, each
|
||||
// worker will only be used for a single request.
|
||||
const maxIdleWorkersCount = 10000
|
||||
|
||||
// The maximum length of time a worker may idle for before being destroyed.
|
||||
const idleWorkerTimeout = 10 * time.Second
|
||||
|
||||
// aLongTimeAgo is a non-zero time, far in the past, used for
|
||||
// immediate cancelation of network operations.
|
||||
var aLongTimeAgo = time.Unix(1, 0)
|
||||
|
||||
// Handler is implemented by any value that implements ServeDNS.
|
||||
type Handler interface {
|
||||
ServeDNS(w ResponseWriter, r *Msg)
|
||||
}
|
||||
|
||||
// The HandlerFunc type is an adapter to allow the use of
|
||||
// ordinary functions as DNS handlers. If f is a function
|
||||
// with the appropriate signature, HandlerFunc(f) is a
|
||||
// Handler object that calls f.
|
||||
type HandlerFunc func(ResponseWriter, *Msg)
|
||||
|
||||
// ServeDNS calls f(w, r).
|
||||
func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
|
||||
f(w, r)
|
||||
}
|
||||
|
||||
// A ResponseWriter interface is used by an DNS handler to
|
||||
// construct an DNS response.
|
||||
type ResponseWriter interface {
|
||||
// LocalAddr returns the net.Addr of the server
|
||||
LocalAddr() net.Addr
|
||||
// RemoteAddr returns the net.Addr of the client that sent the current request.
|
||||
RemoteAddr() net.Addr
|
||||
// WriteMsg writes a reply back to the client.
|
||||
WriteMsg(*Msg) error
|
||||
// Write writes a raw buffer back to the client.
|
||||
Write([]byte) (int, error)
|
||||
// Close closes the connection.
|
||||
Close() error
|
||||
// TsigStatus returns the status of the Tsig.
|
||||
TsigStatus() error
|
||||
// TsigTimersOnly sets the tsig timers only boolean.
|
||||
TsigTimersOnly(bool)
|
||||
// Hijack lets the caller take over the connection.
|
||||
// After a call to Hijack(), the DNS package will not do anything with the connection.
|
||||
Hijack()
|
||||
}
|
||||
|
||||
// A ConnectionStater interface is used by a DNS Handler to access TLS connection state
|
||||
// when available.
|
||||
type ConnectionStater interface {
|
||||
ConnectionState() *tls.ConnectionState
|
||||
}
|
||||
|
||||
type response struct {
|
||||
msg []byte
|
||||
closed bool // connection has been closed
|
||||
hijacked bool // connection has been hijacked by handler
|
||||
tsigTimersOnly bool
|
||||
tsigStatus error
|
||||
tsigRequestMAC string
|
||||
tsigSecret map[string]string // the tsig secrets
|
||||
udp *net.UDPConn // i/o connection if UDP was used
|
||||
tcp net.Conn // i/o connection if TCP was used
|
||||
udpSession *SessionUDP // oob data to get egress interface right
|
||||
writer Writer // writer to output the raw DNS bits
|
||||
wg *sync.WaitGroup // for gracefull shutdown
|
||||
}
|
||||
|
||||
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
|
||||
func HandleFailed(w ResponseWriter, r *Msg) {
|
||||
m := new(Msg)
|
||||
m.SetRcode(r, RcodeServerFailure)
|
||||
// does not matter if this write fails
|
||||
w.WriteMsg(m)
|
||||
}
|
||||
|
||||
// ListenAndServe Starts a server on address and network specified Invoke handler
|
||||
// for incoming queries.
|
||||
func ListenAndServe(addr string, network string, handler Handler) error {
|
||||
server := &Server{Addr: addr, Net: network, Handler: handler}
|
||||
return server.ListenAndServe()
|
||||
}
|
||||
|
||||
// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
|
||||
// http://golang.org/pkg/net/http/#ListenAndServeTLS
|
||||
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config := tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
}
|
||||
|
||||
server := &Server{
|
||||
Addr: addr,
|
||||
Net: "tcp-tls",
|
||||
TLSConfig: &config,
|
||||
Handler: handler,
|
||||
}
|
||||
|
||||
return server.ListenAndServe()
|
||||
}
|
||||
|
||||
// ActivateAndServe activates a server with a listener from systemd,
|
||||
// l and p should not both be non-nil.
|
||||
// If both l and p are not nil only p will be used.
|
||||
// Invoke handler for incoming queries.
|
||||
func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
|
||||
server := &Server{Listener: l, PacketConn: p, Handler: handler}
|
||||
return server.ActivateAndServe()
|
||||
}
|
||||
|
||||
// Writer writes raw DNS messages; each call to Write should send an entire message.
|
||||
type Writer interface {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message.
|
||||
type Reader interface {
|
||||
// ReadTCP reads a raw message from a TCP connection. Implementations may alter
|
||||
// connection properties, for example the read-deadline.
|
||||
ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
|
||||
// ReadUDP reads a raw message from a UDP connection. Implementations may alter
|
||||
// connection properties, for example the read-deadline.
|
||||
ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
|
||||
}
|
||||
|
||||
// defaultReader is an adapter for the Server struct that implements the Reader interface
|
||||
// using the readTCP and readUDP func of the embedded Server.
|
||||
type defaultReader struct {
|
||||
*Server
|
||||
}
|
||||
|
||||
func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
|
||||
return dr.readTCP(conn, timeout)
|
||||
}
|
||||
|
||||
func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
|
||||
return dr.readUDP(conn, timeout)
|
||||
}
|
||||
|
||||
// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
|
||||
// Implementations should never return a nil Reader.
|
||||
type DecorateReader func(Reader) Reader
|
||||
|
||||
// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
|
||||
// Implementations should never return a nil Writer.
|
||||
type DecorateWriter func(Writer) Writer
|
||||
|
||||
// A Server defines parameters for running an DNS server.
|
||||
type Server struct {
|
||||
// Address to listen on, ":dns" if empty.
|
||||
Addr string
|
||||
// if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
|
||||
Net string
|
||||
// TCP Listener to use, this is to aid in systemd's socket activation.
|
||||
Listener net.Listener
|
||||
// TLS connection configuration
|
||||
TLSConfig *tls.Config
|
||||
// UDP "Listener" to use, this is to aid in systemd's socket activation.
|
||||
PacketConn net.PacketConn
|
||||
// Handler to invoke, dns.DefaultServeMux if nil.
|
||||
Handler Handler
|
||||
// Default buffer size to use to read incoming UDP messages. If not set
|
||||
// it defaults to MinMsgSize (512 B).
|
||||
UDPSize int
|
||||
// The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second.
|
||||
ReadTimeout time.Duration
|
||||
// The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second.
|
||||
WriteTimeout time.Duration
|
||||
// TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
|
||||
IdleTimeout func() time.Duration
|
||||
// Secret(s) for Tsig map[<zonename>]<base64 secret>. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2).
|
||||
TsigSecret map[string]string
|
||||
// If NotifyStartedFunc is set it is called once the server has started listening.
|
||||
NotifyStartedFunc func()
|
||||
// DecorateReader is optional, allows customization of the process that reads raw DNS messages.
|
||||
DecorateReader DecorateReader
|
||||
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
|
||||
DecorateWriter DecorateWriter
|
||||
// Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1).
|
||||
MaxTCPQueries int
|
||||
// Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address.
|
||||
// It is only supported on go1.11+ and when using ListenAndServe.
|
||||
ReusePort bool
|
||||
// AcceptMsgFunc will check the incoming message and will reject it early in the process.
|
||||
// By default DefaultMsgAcceptFunc will be used.
|
||||
MsgAcceptFunc MsgAcceptFunc
|
||||
|
||||
// UDP packet or TCP connection queue
|
||||
queue chan *response
|
||||
// Workers count
|
||||
workersCount int32
|
||||
|
||||
// Shutdown handling
|
||||
lock sync.RWMutex
|
||||
started bool
|
||||
shutdown chan struct{}
|
||||
conns map[net.Conn]struct{}
|
||||
|
||||
// A pool for UDP message buffers.
|
||||
udpPool sync.Pool
|
||||
}
|
||||
|
||||
func (srv *Server) isStarted() bool {
|
||||
srv.lock.RLock()
|
||||
started := srv.started
|
||||
srv.lock.RUnlock()
|
||||
return started
|
||||
}
|
||||
|
||||
func (srv *Server) worker(w *response) {
|
||||
srv.serve(w)
|
||||
|
||||
for {
|
||||
count := atomic.LoadInt32(&srv.workersCount)
|
||||
if count > maxIdleWorkersCount {
|
||||
return
|
||||
}
|
||||
if atomic.CompareAndSwapInt32(&srv.workersCount, count, count+1) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
defer atomic.AddInt32(&srv.workersCount, -1)
|
||||
|
||||
inUse := false
|
||||
timeout := time.NewTimer(idleWorkerTimeout)
|
||||
defer timeout.Stop()
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case w, ok := <-srv.queue:
|
||||
if !ok {
|
||||
break LOOP
|
||||
}
|
||||
inUse = true
|
||||
srv.serve(w)
|
||||
case <-timeout.C:
|
||||
if !inUse {
|
||||
break LOOP
|
||||
}
|
||||
inUse = false
|
||||
timeout.Reset(idleWorkerTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (srv *Server) spawnWorker(w *response) {
|
||||
select {
|
||||
case srv.queue <- w:
|
||||
default:
|
||||
go srv.worker(w)
|
||||
}
|
||||
}
|
||||
|
||||
func makeUDPBuffer(size int) func() interface{} {
|
||||
return func() interface{} {
|
||||
return make([]byte, size)
|
||||
}
|
||||
}
|
||||
|
||||
func (srv *Server) init() {
|
||||
srv.queue = make(chan *response)
|
||||
|
||||
srv.shutdown = make(chan struct{})
|
||||
srv.conns = make(map[net.Conn]struct{})
|
||||
|
||||
if srv.UDPSize == 0 {
|
||||
srv.UDPSize = MinMsgSize
|
||||
}
|
||||
if srv.MsgAcceptFunc == nil {
|
||||
srv.MsgAcceptFunc = defaultMsgAcceptFunc
|
||||
}
|
||||
|
||||
srv.udpPool.New = makeUDPBuffer(srv.UDPSize)
|
||||
}
|
||||
|
||||
func unlockOnce(l sync.Locker) func() {
|
||||
var once sync.Once
|
||||
return func() { once.Do(l.Unlock) }
|
||||
}
|
||||
|
||||
// ListenAndServe starts a nameserver on the configured address in *Server.
|
||||
func (srv *Server) ListenAndServe() error {
|
||||
unlock := unlockOnce(&srv.lock)
|
||||
srv.lock.Lock()
|
||||
defer unlock()
|
||||
|
||||
if srv.started {
|
||||
return &Error{err: "server already started"}
|
||||
}
|
||||
|
||||
addr := srv.Addr
|
||||
if addr == "" {
|
||||
addr = ":domain"
|
||||
}
|
||||
|
||||
srv.init()
|
||||
defer close(srv.queue)
|
||||
|
||||
switch srv.Net {
|
||||
case "tcp", "tcp4", "tcp6":
|
||||
l, err := listenTCP(srv.Net, addr, srv.ReusePort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.Listener = l
|
||||
srv.started = true
|
||||
unlock()
|
||||
return srv.serveTCP(l)
|
||||
case "tcp-tls", "tcp4-tls", "tcp6-tls":
|
||||
if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) {
|
||||
return errors.New("dns: neither Certificates nor GetCertificate set in Config")
|
||||
}
|
||||
network := strings.TrimSuffix(srv.Net, "-tls")
|
||||
l, err := listenTCP(network, addr, srv.ReusePort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l = tls.NewListener(l, srv.TLSConfig)
|
||||
srv.Listener = l
|
||||
srv.started = true
|
||||
unlock()
|
||||
return srv.serveTCP(l)
|
||||
case "udp", "udp4", "udp6":
|
||||
l, err := listenUDP(srv.Net, addr, srv.ReusePort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u := l.(*net.UDPConn)
|
||||
if e := setUDPSocketOptions(u); e != nil {
|
||||
return e
|
||||
}
|
||||
srv.PacketConn = l
|
||||
srv.started = true
|
||||
unlock()
|
||||
return srv.serveUDP(u)
|
||||
}
|
||||
return &Error{err: "bad network"}
|
||||
}
|
||||
|
||||
// ActivateAndServe starts a nameserver with the PacketConn or Listener
|
||||
// configured in *Server. Its main use is to start a server from systemd.
|
||||
func (srv *Server) ActivateAndServe() error {
|
||||
unlock := unlockOnce(&srv.lock)
|
||||
srv.lock.Lock()
|
||||
defer unlock()
|
||||
|
||||
if srv.started {
|
||||
return &Error{err: "server already started"}
|
||||
}
|
||||
|
||||
srv.init()
|
||||
defer close(srv.queue)
|
||||
|
||||
pConn := srv.PacketConn
|
||||
l := srv.Listener
|
||||
if pConn != nil {
|
||||
// Check PacketConn interface's type is valid and value
|
||||
// is not nil
|
||||
if t, ok := pConn.(*net.UDPConn); ok && t != nil {
|
||||
if e := setUDPSocketOptions(t); e != nil {
|
||||
return e
|
||||
}
|
||||
srv.started = true
|
||||
unlock()
|
||||
return srv.serveUDP(t)
|
||||
}
|
||||
}
|
||||
if l != nil {
|
||||
srv.started = true
|
||||
unlock()
|
||||
return srv.serveTCP(l)
|
||||
}
|
||||
return &Error{err: "bad listeners"}
|
||||
}
|
||||
|
||||
// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and
|
||||
// ActivateAndServe will return.
|
||||
func (srv *Server) Shutdown() error {
|
||||
return srv.ShutdownContext(context.Background())
|
||||
}
|
||||
|
||||
// ShutdownContext shuts down a server. After a call to ShutdownContext,
|
||||
// ListenAndServe and ActivateAndServe will return.
|
||||
//
|
||||
// A context.Context may be passed to limit how long to wait for connections
|
||||
// to terminate.
|
||||
func (srv *Server) ShutdownContext(ctx context.Context) error {
|
||||
srv.lock.Lock()
|
||||
if !srv.started {
|
||||
srv.lock.Unlock()
|
||||
return &Error{err: "server not started"}
|
||||
}
|
||||
|
||||
srv.started = false
|
||||
|
||||
if srv.PacketConn != nil {
|
||||
srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads
|
||||
}
|
||||
|
||||
if srv.Listener != nil {
|
||||
srv.Listener.Close()
|
||||
}
|
||||
|
||||
for rw := range srv.conns {
|
||||
rw.SetReadDeadline(aLongTimeAgo) // Unblock reads
|
||||
}
|
||||
|
||||
srv.lock.Unlock()
|
||||
|
||||
if testShutdownNotify != nil {
|
||||
testShutdownNotify.Broadcast()
|
||||
}
|
||||
|
||||
var ctxErr error
|
||||
select {
|
||||
case <-srv.shutdown:
|
||||
case <-ctx.Done():
|
||||
ctxErr = ctx.Err()
|
||||
}
|
||||
|
||||
if srv.PacketConn != nil {
|
||||
srv.PacketConn.Close()
|
||||
}
|
||||
|
||||
return ctxErr
|
||||
}
|
||||
|
||||
var testShutdownNotify *sync.Cond
|
||||
|
||||
// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
|
||||
func (srv *Server) getReadTimeout() time.Duration {
|
||||
if srv.ReadTimeout != 0 {
|
||||
return srv.ReadTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
// serveTCP starts a TCP listener for the server.
|
||||
func (srv *Server) serveTCP(l net.Listener) error {
|
||||
defer l.Close()
|
||||
|
||||
if srv.NotifyStartedFunc != nil {
|
||||
srv.NotifyStartedFunc()
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer func() {
|
||||
wg.Wait()
|
||||
close(srv.shutdown)
|
||||
}()
|
||||
|
||||
for srv.isStarted() {
|
||||
rw, err := l.Accept()
|
||||
if err != nil {
|
||||
if !srv.isStarted() {
|
||||
return nil
|
||||
}
|
||||
if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
srv.lock.Lock()
|
||||
// Track the connection to allow unblocking reads on shutdown.
|
||||
srv.conns[rw] = struct{}{}
|
||||
srv.lock.Unlock()
|
||||
wg.Add(1)
|
||||
srv.spawnWorker(&response{
|
||||
tsigSecret: srv.TsigSecret,
|
||||
tcp: rw,
|
||||
wg: &wg,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// serveUDP starts a UDP listener for the server.
|
||||
func (srv *Server) serveUDP(l *net.UDPConn) error {
|
||||
defer l.Close()
|
||||
|
||||
if srv.NotifyStartedFunc != nil {
|
||||
srv.NotifyStartedFunc()
|
||||
}
|
||||
|
||||
reader := Reader(defaultReader{srv})
|
||||
if srv.DecorateReader != nil {
|
||||
reader = srv.DecorateReader(reader)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer func() {
|
||||
wg.Wait()
|
||||
close(srv.shutdown)
|
||||
}()
|
||||
|
||||
rtimeout := srv.getReadTimeout()
|
||||
// deadline is not used here
|
||||
for srv.isStarted() {
|
||||
m, s, err := reader.ReadUDP(l, rtimeout)
|
||||
if err != nil {
|
||||
if !srv.isStarted() {
|
||||
return nil
|
||||
}
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(m) < headerSize {
|
||||
if cap(m) == srv.UDPSize {
|
||||
srv.udpPool.Put(m[:srv.UDPSize])
|
||||
}
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
srv.spawnWorker(&response{
|
||||
msg: m,
|
||||
tsigSecret: srv.TsigSecret,
|
||||
udp: l,
|
||||
udpSession: s,
|
||||
wg: &wg,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) serve(w *response) {
|
||||
if srv.DecorateWriter != nil {
|
||||
w.writer = srv.DecorateWriter(w)
|
||||
} else {
|
||||
w.writer = w
|
||||
}
|
||||
|
||||
if w.udp != nil {
|
||||
// serve UDP
|
||||
srv.serveDNS(w)
|
||||
|
||||
w.wg.Done()
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if !w.hijacked {
|
||||
w.Close()
|
||||
}
|
||||
|
||||
srv.lock.Lock()
|
||||
delete(srv.conns, w.tcp)
|
||||
srv.lock.Unlock()
|
||||
|
||||
w.wg.Done()
|
||||
}()
|
||||
|
||||
reader := Reader(defaultReader{srv})
|
||||
if srv.DecorateReader != nil {
|
||||
reader = srv.DecorateReader(reader)
|
||||
}
|
||||
|
||||
idleTimeout := tcpIdleTimeout
|
||||
if srv.IdleTimeout != nil {
|
||||
idleTimeout = srv.IdleTimeout()
|
||||
}
|
||||
|
||||
timeout := srv.getReadTimeout()
|
||||
|
||||
limit := srv.MaxTCPQueries
|
||||
if limit == 0 {
|
||||
limit = maxTCPQueries
|
||||
}
|
||||
|
||||
for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ {
|
||||
var err error
|
||||
w.msg, err = reader.ReadTCP(w.tcp, timeout)
|
||||
if err != nil {
|
||||
// TODO(tmthrgd): handle error
|
||||
break
|
||||
}
|
||||
srv.serveDNS(w)
|
||||
if w.tcp == nil {
|
||||
break // Close() was called
|
||||
}
|
||||
if w.hijacked {
|
||||
break // client will call Close() themselves
|
||||
}
|
||||
// The first read uses the read timeout, the rest use the
|
||||
// idle timeout.
|
||||
timeout = idleTimeout
|
||||
}
|
||||
}
|
||||
|
||||
func (srv *Server) disposeBuffer(w *response) {
|
||||
if w.udp != nil && cap(w.msg) == srv.UDPSize {
|
||||
srv.udpPool.Put(w.msg[:srv.UDPSize])
|
||||
}
|
||||
w.msg = nil
|
||||
}
|
||||
|
||||
func (srv *Server) serveDNS(w *response) {
|
||||
dh, off, err := unpackMsgHdr(w.msg, 0)
|
||||
if err != nil {
|
||||
// Let client hang, they are sending crap; any reply can be used to amplify.
|
||||
return
|
||||
}
|
||||
|
||||
req := new(Msg)
|
||||
req.setHdr(dh)
|
||||
|
||||
switch srv.MsgAcceptFunc(dh) {
|
||||
case MsgAccept:
|
||||
case MsgIgnore:
|
||||
return
|
||||
case MsgReject:
|
||||
req.SetRcodeFormatError(req)
|
||||
// Are we allowed to delete any OPT records here?
|
||||
req.Ns, req.Answer, req.Extra = nil, nil, nil
|
||||
|
||||
w.WriteMsg(req)
|
||||
srv.disposeBuffer(w)
|
||||
return
|
||||
}
|
||||
|
||||
if err := req.unpack(dh, w.msg, off); err != nil {
|
||||
req.SetRcodeFormatError(req)
|
||||
req.Ns, req.Answer, req.Extra = nil, nil, nil
|
||||
|
||||
w.WriteMsg(req)
|
||||
srv.disposeBuffer(w)
|
||||
return
|
||||
}
|
||||
|
||||
w.tsigStatus = nil
|
||||
if w.tsigSecret != nil {
|
||||
if t := req.IsTsig(); t != nil {
|
||||
if secret, ok := w.tsigSecret[t.Hdr.Name]; ok {
|
||||
w.tsigStatus = TsigVerify(w.msg, secret, "", false)
|
||||
} else {
|
||||
w.tsigStatus = ErrSecret
|
||||
}
|
||||
w.tsigTimersOnly = false
|
||||
w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
|
||||
}
|
||||
}
|
||||
|
||||
srv.disposeBuffer(w)
|
||||
|
||||
handler := srv.Handler
|
||||
if handler == nil {
|
||||
handler = DefaultServeMux
|
||||
}
|
||||
|
||||
handler.ServeDNS(w, req) // Writes back to the client
|
||||
}
|
||||
|
||||
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
|
||||
// If we race with ShutdownContext, the read deadline may
|
||||
// have been set in the distant past to unblock the read
|
||||
// below. We must not override it, otherwise we may block
|
||||
// ShutdownContext.
|
||||
srv.lock.RLock()
|
||||
if srv.started {
|
||||
conn.SetReadDeadline(time.Now().Add(timeout))
|
||||
}
|
||||
srv.lock.RUnlock()
|
||||
|
||||
l := make([]byte, 2)
|
||||
n, err := conn.Read(l)
|
||||
if err != nil || n != 2 {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, ErrShortRead
|
||||
}
|
||||
length := binary.BigEndian.Uint16(l)
|
||||
if length == 0 {
|
||||
return nil, ErrShortRead
|
||||
}
|
||||
m := make([]byte, int(length))
|
||||
n, err = conn.Read(m[:int(length)])
|
||||
if err != nil || n == 0 {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, ErrShortRead
|
||||
}
|
||||
i := n
|
||||
for i < int(length) {
|
||||
j, err := conn.Read(m[i:int(length)])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i += j
|
||||
}
|
||||
n = i
|
||||
m = m[:n]
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
|
||||
srv.lock.RLock()
|
||||
if srv.started {
|
||||
// See the comment in readTCP above.
|
||||
conn.SetReadDeadline(time.Now().Add(timeout))
|
||||
}
|
||||
srv.lock.RUnlock()
|
||||
|
||||
m := srv.udpPool.Get().([]byte)
|
||||
n, s, err := ReadFromSessionUDP(conn, m)
|
||||
if err != nil {
|
||||
srv.udpPool.Put(m)
|
||||
return nil, nil, err
|
||||
}
|
||||
m = m[:n]
|
||||
return m, s, nil
|
||||
}
|
||||
|
||||
// WriteMsg implements the ResponseWriter.WriteMsg method.
|
||||
func (w *response) WriteMsg(m *Msg) (err error) {
|
||||
if w.closed {
|
||||
return &Error{err: "WriteMsg called after Close"}
|
||||
}
|
||||
|
||||
var data []byte
|
||||
if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check)
|
||||
if t := m.IsTsig(); t != nil {
|
||||
data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.writer.Write(data)
|
||||
return err
|
||||
}
|
||||
}
|
||||
data, err = m.Pack()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.writer.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
// Write implements the ResponseWriter.Write method.
|
||||
func (w *response) Write(m []byte) (int, error) {
|
||||
if w.closed {
|
||||
return 0, &Error{err: "Write called after Close"}
|
||||
}
|
||||
|
||||
switch {
|
||||
case w.udp != nil:
|
||||
return WriteToSessionUDP(w.udp, m, w.udpSession)
|
||||
case w.tcp != nil:
|
||||
lm := len(m)
|
||||
if lm < 2 {
|
||||
return 0, io.ErrShortBuffer
|
||||
}
|
||||
if lm > MaxMsgSize {
|
||||
return 0, &Error{err: "message too large"}
|
||||
}
|
||||
l := make([]byte, 2, 2+lm)
|
||||
binary.BigEndian.PutUint16(l, uint16(lm))
|
||||
m = append(l, m...)
|
||||
|
||||
n, err := io.Copy(w.tcp, bytes.NewReader(m))
|
||||
return int(n), err
|
||||
default:
|
||||
panic("dns: internal error: udp and tcp both nil")
|
||||
}
|
||||
}
|
||||
|
||||
// LocalAddr implements the ResponseWriter.LocalAddr method.
|
||||
func (w *response) LocalAddr() net.Addr {
|
||||
switch {
|
||||
case w.udp != nil:
|
||||
return w.udp.LocalAddr()
|
||||
case w.tcp != nil:
|
||||
return w.tcp.LocalAddr()
|
||||
default:
|
||||
panic("dns: internal error: udp and tcp both nil")
|
||||
}
|
||||
}
|
||||
|
||||
// RemoteAddr implements the ResponseWriter.RemoteAddr method.
|
||||
func (w *response) RemoteAddr() net.Addr {
|
||||
switch {
|
||||
case w.udpSession != nil:
|
||||
return w.udpSession.RemoteAddr()
|
||||
case w.tcp != nil:
|
||||
return w.tcp.RemoteAddr()
|
||||
default:
|
||||
panic("dns: internal error: udpSession and tcp both nil")
|
||||
}
|
||||
}
|
||||
|
||||
// TsigStatus implements the ResponseWriter.TsigStatus method.
|
||||
func (w *response) TsigStatus() error { return w.tsigStatus }
|
||||
|
||||
// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method.
|
||||
func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b }
|
||||
|
||||
// Hijack implements the ResponseWriter.Hijack method.
|
||||
func (w *response) Hijack() { w.hijacked = true }
|
||||
|
||||
// Close implements the ResponseWriter.Close method
|
||||
func (w *response) Close() error {
|
||||
if w.closed {
|
||||
return &Error{err: "connection already closed"}
|
||||
}
|
||||
w.closed = true
|
||||
|
||||
switch {
|
||||
case w.udp != nil:
|
||||
// Can't close the udp conn, as that is actually the listener.
|
||||
return nil
|
||||
case w.tcp != nil:
|
||||
return w.tcp.Close()
|
||||
default:
|
||||
panic("dns: internal error: udp and tcp both nil")
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectionState() implements the ConnectionStater.ConnectionState() interface.
|
||||
func (w *response) ConnectionState() *tls.ConnectionState {
|
||||
type tlsConnectionStater interface {
|
||||
ConnectionState() tls.ConnectionState
|
||||
}
|
||||
if v, ok := w.tcp.(tlsConnectionStater); ok {
|
||||
t := v.ConnectionState()
|
||||
return &t
|
||||
}
|
||||
return nil
|
||||
}
|
213
vendor/github.com/miekg/dns/sig0.go
generated
vendored
Normal file
213
vendor/github.com/miekg/dns/sig0.go
generated
vendored
Normal file
@ -0,0 +1,213 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Sign signs a dns.Msg. It fills the signature with the appropriate data.
|
||||
// The SIG record should have the SignerName, KeyTag, Algorithm, Inception
|
||||
// and Expiration set.
|
||||
func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
|
||||
if k == nil {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
|
||||
return nil, ErrKey
|
||||
}
|
||||
|
||||
rr.Hdr = RR_Header{Name: ".", Rrtype: TypeSIG, Class: ClassANY, Ttl: 0}
|
||||
rr.OrigTtl, rr.TypeCovered, rr.Labels = 0, 0, 0
|
||||
|
||||
buf := make([]byte, m.Len()+Len(rr))
|
||||
mbuf, err := m.PackBuffer(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if &buf[0] != &mbuf[0] {
|
||||
return nil, ErrBuf
|
||||
}
|
||||
off, err := PackRR(rr, buf, len(mbuf), nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf = buf[:off:cap(buf)]
|
||||
|
||||
hash, ok := AlgorithmToHash[rr.Algorithm]
|
||||
if !ok {
|
||||
return nil, ErrAlg
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
// Write SIG rdata
|
||||
hasher.Write(buf[len(mbuf)+1+2+2+4+2:])
|
||||
// Write message
|
||||
hasher.Write(buf[:len(mbuf)])
|
||||
|
||||
signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
|
||||
buf = append(buf, signature...)
|
||||
if len(buf) > int(^uint16(0)) {
|
||||
return nil, ErrBuf
|
||||
}
|
||||
// Adjust sig data length
|
||||
rdoff := len(mbuf) + 1 + 2 + 2 + 4
|
||||
rdlen := binary.BigEndian.Uint16(buf[rdoff:])
|
||||
rdlen += uint16(len(signature))
|
||||
binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
|
||||
// Adjust additional count
|
||||
adc := binary.BigEndian.Uint16(buf[10:])
|
||||
adc++
|
||||
binary.BigEndian.PutUint16(buf[10:], adc)
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Verify validates the message buf using the key k.
|
||||
// It's assumed that buf is a valid message from which rr was unpacked.
|
||||
func (rr *SIG) Verify(k *KEY, buf []byte) error {
|
||||
if k == nil {
|
||||
return ErrKey
|
||||
}
|
||||
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
var hash crypto.Hash
|
||||
switch rr.Algorithm {
|
||||
case DSA, RSASHA1:
|
||||
hash = crypto.SHA1
|
||||
case RSASHA256, ECDSAP256SHA256:
|
||||
hash = crypto.SHA256
|
||||
case ECDSAP384SHA384:
|
||||
hash = crypto.SHA384
|
||||
case RSASHA512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return ErrAlg
|
||||
}
|
||||
hasher := hash.New()
|
||||
|
||||
buflen := len(buf)
|
||||
qdc := binary.BigEndian.Uint16(buf[4:])
|
||||
anc := binary.BigEndian.Uint16(buf[6:])
|
||||
auc := binary.BigEndian.Uint16(buf[8:])
|
||||
adc := binary.BigEndian.Uint16(buf[10:])
|
||||
offset := headerSize
|
||||
var err error
|
||||
for i := uint16(0); i < qdc && offset < buflen; i++ {
|
||||
_, offset, err = UnpackDomainName(buf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip past Type and Class
|
||||
offset += 2 + 2
|
||||
}
|
||||
for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ {
|
||||
_, offset, err = UnpackDomainName(buf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip past Type, Class and TTL
|
||||
offset += 2 + 2 + 4
|
||||
if offset+1 >= buflen {
|
||||
continue
|
||||
}
|
||||
rdlen := binary.BigEndian.Uint16(buf[offset:])
|
||||
offset += 2
|
||||
offset += int(rdlen)
|
||||
}
|
||||
if offset >= buflen {
|
||||
return &Error{err: "overflowing unpacking signed message"}
|
||||
}
|
||||
|
||||
// offset should be just prior to SIG
|
||||
bodyend := offset
|
||||
// owner name SHOULD be root
|
||||
_, offset, err = UnpackDomainName(buf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip Type, Class, TTL, RDLen
|
||||
offset += 2 + 2 + 4 + 2
|
||||
sigstart := offset
|
||||
// Skip Type Covered, Algorithm, Labels, Original TTL
|
||||
offset += 2 + 1 + 1 + 4
|
||||
if offset+4+4 >= buflen {
|
||||
return &Error{err: "overflow unpacking signed message"}
|
||||
}
|
||||
expire := binary.BigEndian.Uint32(buf[offset:])
|
||||
offset += 4
|
||||
incept := binary.BigEndian.Uint32(buf[offset:])
|
||||
offset += 4
|
||||
now := uint32(time.Now().Unix())
|
||||
if now < incept || now > expire {
|
||||
return ErrTime
|
||||
}
|
||||
// Skip key tag
|
||||
offset += 2
|
||||
var signername string
|
||||
signername, offset, err = UnpackDomainName(buf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If key has come from the DNS name compression might
|
||||
// have mangled the case of the name
|
||||
if !strings.EqualFold(signername, k.Header().Name) {
|
||||
return &Error{err: "signer name doesn't match key name"}
|
||||
}
|
||||
sigend := offset
|
||||
hasher.Write(buf[sigstart:sigend])
|
||||
hasher.Write(buf[:10])
|
||||
hasher.Write([]byte{
|
||||
byte((adc - 1) << 8),
|
||||
byte(adc - 1),
|
||||
})
|
||||
hasher.Write(buf[12:bodyend])
|
||||
|
||||
hashed := hasher.Sum(nil)
|
||||
sig := buf[sigend:]
|
||||
switch k.Algorithm {
|
||||
case DSA:
|
||||
pk := k.publicKeyDSA()
|
||||
sig = sig[1:]
|
||||
r := big.NewInt(0)
|
||||
r.SetBytes(sig[:len(sig)/2])
|
||||
s := big.NewInt(0)
|
||||
s.SetBytes(sig[len(sig)/2:])
|
||||
if pk != nil {
|
||||
if dsa.Verify(pk, hashed, r, s) {
|
||||
return nil
|
||||
}
|
||||
return ErrSig
|
||||
}
|
||||
case RSASHA1, RSASHA256, RSASHA512:
|
||||
pk := k.publicKeyRSA()
|
||||
if pk != nil {
|
||||
return rsa.VerifyPKCS1v15(pk, hash, hashed, sig)
|
||||
}
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
pk := k.publicKeyECDSA()
|
||||
r := big.NewInt(0)
|
||||
r.SetBytes(sig[:len(sig)/2])
|
||||
s := big.NewInt(0)
|
||||
s.SetBytes(sig[len(sig)/2:])
|
||||
if pk != nil {
|
||||
if ecdsa.Verify(pk, hashed, r, s) {
|
||||
return nil
|
||||
}
|
||||
return ErrSig
|
||||
}
|
||||
}
|
||||
return ErrKeyAlg
|
||||
}
|
61
vendor/github.com/miekg/dns/singleinflight.go
generated
vendored
Normal file
61
vendor/github.com/miekg/dns/singleinflight.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Adapted for dns package usage by Miek Gieben.
|
||||
|
||||
package dns
|
||||
|
||||
import "sync"
|
||||
import "time"
|
||||
|
||||
// call is an in-flight or completed singleflight.Do call
|
||||
type call struct {
|
||||
wg sync.WaitGroup
|
||||
val *Msg
|
||||
rtt time.Duration
|
||||
err error
|
||||
dups int
|
||||
}
|
||||
|
||||
// singleflight represents a class of work and forms a namespace in
|
||||
// which units of work can be executed with duplicate suppression.
|
||||
type singleflight struct {
|
||||
sync.Mutex // protects m
|
||||
m map[string]*call // lazily initialized
|
||||
|
||||
dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges
|
||||
}
|
||||
|
||||
// Do executes and returns the results of the given function, making
|
||||
// sure that only one execution is in-flight for a given key at a
|
||||
// time. If a duplicate comes in, the duplicate caller waits for the
|
||||
// original to complete and receives the same results.
|
||||
// The return value shared indicates whether v was given to multiple callers.
|
||||
func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
|
||||
g.Lock()
|
||||
if g.m == nil {
|
||||
g.m = make(map[string]*call)
|
||||
}
|
||||
if c, ok := g.m[key]; ok {
|
||||
c.dups++
|
||||
g.Unlock()
|
||||
c.wg.Wait()
|
||||
return c.val, c.rtt, c.err, true
|
||||
}
|
||||
c := new(call)
|
||||
c.wg.Add(1)
|
||||
g.m[key] = c
|
||||
g.Unlock()
|
||||
|
||||
c.val, c.rtt, c.err = fn()
|
||||
c.wg.Done()
|
||||
|
||||
if !g.dontDeleteForTesting {
|
||||
g.Lock()
|
||||
delete(g.m, key)
|
||||
g.Unlock()
|
||||
}
|
||||
|
||||
return c.val, c.rtt, c.err, c.dups > 0
|
||||
}
|
44
vendor/github.com/miekg/dns/smimea.go
generated
vendored
Normal file
44
vendor/github.com/miekg/dns/smimea.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/hex"
|
||||
)
|
||||
|
||||
// Sign creates a SMIMEA record from an SSL certificate.
|
||||
func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
|
||||
r.Hdr.Rrtype = TypeSMIMEA
|
||||
r.Usage = uint8(usage)
|
||||
r.Selector = uint8(selector)
|
||||
r.MatchingType = uint8(matchingType)
|
||||
|
||||
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify verifies a SMIMEA record against an SSL certificate. If it is OK
|
||||
// a nil error is returned.
|
||||
func (r *SMIMEA) Verify(cert *x509.Certificate) error {
|
||||
c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
|
||||
if err != nil {
|
||||
return err // Not also ErrSig?
|
||||
}
|
||||
if r.Certificate == c {
|
||||
return nil
|
||||
}
|
||||
return ErrSig // ErrSig, really?
|
||||
}
|
||||
|
||||
// SMIMEAName returns the ownername of a SMIMEA resource record as per the
|
||||
// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3
|
||||
func SMIMEAName(email, domain string) (string, error) {
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(email))
|
||||
|
||||
// RFC Section 3: "The local-part is hashed using the SHA2-256
|
||||
// algorithm with the hash truncated to 28 octets and
|
||||
// represented in its hexadecimal representation to become the
|
||||
// left-most label in the prepared domain name"
|
||||
return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil
|
||||
}
|
44
vendor/github.com/miekg/dns/tlsa.go
generated
vendored
Normal file
44
vendor/github.com/miekg/dns/tlsa.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Sign creates a TLSA record from an SSL certificate.
|
||||
func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
|
||||
r.Hdr.Rrtype = TypeTLSA
|
||||
r.Usage = uint8(usage)
|
||||
r.Selector = uint8(selector)
|
||||
r.MatchingType = uint8(matchingType)
|
||||
|
||||
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify verifies a TLSA record against an SSL certificate. If it is OK
|
||||
// a nil error is returned.
|
||||
func (r *TLSA) Verify(cert *x509.Certificate) error {
|
||||
c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
|
||||
if err != nil {
|
||||
return err // Not also ErrSig?
|
||||
}
|
||||
if r.Certificate == c {
|
||||
return nil
|
||||
}
|
||||
return ErrSig // ErrSig, really?
|
||||
}
|
||||
|
||||
// TLSAName returns the ownername of a TLSA resource record as per the
|
||||
// rules specified in RFC 6698, Section 3.
|
||||
func TLSAName(name, service, network string) (string, error) {
|
||||
if !IsFqdn(name) {
|
||||
return "", ErrFqdn
|
||||
}
|
||||
p, err := net.LookupPort(network, service)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil
|
||||
}
|
389
vendor/github.com/miekg/dns/tsig.go
generated
vendored
Normal file
389
vendor/github.com/miekg/dns/tsig.go
generated
vendored
Normal file
@ -0,0 +1,389 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HMAC hashing codes. These are transmitted as domain names.
|
||||
const (
|
||||
HmacMD5 = "hmac-md5.sig-alg.reg.int."
|
||||
HmacSHA1 = "hmac-sha1."
|
||||
HmacSHA256 = "hmac-sha256."
|
||||
HmacSHA512 = "hmac-sha512."
|
||||
)
|
||||
|
||||
// TSIG is the RR the holds the transaction signature of a message.
|
||||
// See RFC 2845 and RFC 4635.
|
||||
type TSIG struct {
|
||||
Hdr RR_Header
|
||||
Algorithm string `dns:"domain-name"`
|
||||
TimeSigned uint64 `dns:"uint48"`
|
||||
Fudge uint16
|
||||
MACSize uint16
|
||||
MAC string `dns:"size-hex:MACSize"`
|
||||
OrigId uint16
|
||||
Error uint16
|
||||
OtherLen uint16
|
||||
OtherData string `dns:"size-hex:OtherLen"`
|
||||
}
|
||||
|
||||
// TSIG has no official presentation format, but this will suffice.
|
||||
|
||||
func (rr *TSIG) String() string {
|
||||
s := "\n;; TSIG PSEUDOSECTION:\n"
|
||||
s += rr.Hdr.String() +
|
||||
" " + rr.Algorithm +
|
||||
" " + tsigTimeToString(rr.TimeSigned) +
|
||||
" " + strconv.Itoa(int(rr.Fudge)) +
|
||||
" " + strconv.Itoa(int(rr.MACSize)) +
|
||||
" " + strings.ToUpper(rr.MAC) +
|
||||
" " + strconv.Itoa(int(rr.OrigId)) +
|
||||
" " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR
|
||||
" " + strconv.Itoa(int(rr.OtherLen)) +
|
||||
" " + rr.OtherData
|
||||
return s
|
||||
}
|
||||
|
||||
func (rr *TSIG) parse(c *zlexer, origin, file string) *ParseError {
|
||||
panic("dns: internal error: parse should never be called on TSIG")
|
||||
}
|
||||
|
||||
// The following values must be put in wireformat, so that the MAC can be calculated.
|
||||
// RFC 2845, section 3.4.2. TSIG Variables.
|
||||
type tsigWireFmt struct {
|
||||
// From RR_Header
|
||||
Name string `dns:"domain-name"`
|
||||
Class uint16
|
||||
Ttl uint32
|
||||
// Rdata of the TSIG
|
||||
Algorithm string `dns:"domain-name"`
|
||||
TimeSigned uint64 `dns:"uint48"`
|
||||
Fudge uint16
|
||||
// MACSize, MAC and OrigId excluded
|
||||
Error uint16
|
||||
OtherLen uint16
|
||||
OtherData string `dns:"size-hex:OtherLen"`
|
||||
}
|
||||
|
||||
// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC
|
||||
type macWireFmt struct {
|
||||
MACSize uint16
|
||||
MAC string `dns:"size-hex:MACSize"`
|
||||
}
|
||||
|
||||
// 3.3. Time values used in TSIG calculations
|
||||
type timerWireFmt struct {
|
||||
TimeSigned uint64 `dns:"uint48"`
|
||||
Fudge uint16
|
||||
}
|
||||
|
||||
// TsigGenerate fills out the TSIG record attached to the message.
|
||||
// The message should contain
|
||||
// a "stub" TSIG RR with the algorithm, key name (owner name of the RR),
|
||||
// time fudge (defaults to 300 seconds) and the current time
|
||||
// The TSIG MAC is saved in that Tsig RR.
|
||||
// When TsigGenerate is called for the first time requestMAC is set to the empty string and
|
||||
// timersOnly is false.
|
||||
// If something goes wrong an error is returned, otherwise it is nil.
|
||||
func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) {
|
||||
if m.IsTsig() == nil {
|
||||
panic("dns: TSIG not last RR in additional")
|
||||
}
|
||||
// If we barf here, the caller is to blame
|
||||
rawsecret, err := fromBase64([]byte(secret))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
rr := m.Extra[len(m.Extra)-1].(*TSIG)
|
||||
m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg
|
||||
mbuf, err := m.Pack()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly)
|
||||
|
||||
t := new(TSIG)
|
||||
var h hash.Hash
|
||||
switch strings.ToLower(rr.Algorithm) {
|
||||
case HmacMD5:
|
||||
h = hmac.New(md5.New, rawsecret)
|
||||
case HmacSHA1:
|
||||
h = hmac.New(sha1.New, rawsecret)
|
||||
case HmacSHA256:
|
||||
h = hmac.New(sha256.New, rawsecret)
|
||||
case HmacSHA512:
|
||||
h = hmac.New(sha512.New, rawsecret)
|
||||
default:
|
||||
return nil, "", ErrKeyAlg
|
||||
}
|
||||
h.Write(buf)
|
||||
t.MAC = hex.EncodeToString(h.Sum(nil))
|
||||
t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
|
||||
|
||||
t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0}
|
||||
t.Fudge = rr.Fudge
|
||||
t.TimeSigned = rr.TimeSigned
|
||||
t.Algorithm = rr.Algorithm
|
||||
t.OrigId = m.Id
|
||||
|
||||
tbuf := make([]byte, Len(t))
|
||||
off, err := PackRR(t, tbuf, 0, nil, false)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
mbuf = append(mbuf, tbuf[:off]...)
|
||||
// Update the ArCount directly in the buffer.
|
||||
binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1))
|
||||
|
||||
return mbuf, t.MAC, nil
|
||||
}
|
||||
|
||||
// TsigVerify verifies the TSIG on a message.
|
||||
// If the signature does not validate err contains the
|
||||
// error, otherwise it is nil.
|
||||
func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
|
||||
rawsecret, err := fromBase64([]byte(secret))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Strip the TSIG from the incoming msg
|
||||
stripped, tsig, err := stripTsig(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgMAC, err := hex.DecodeString(tsig.MAC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
|
||||
|
||||
// Fudge factor works both ways. A message can arrive before it was signed because
|
||||
// of clock skew.
|
||||
now := uint64(time.Now().Unix())
|
||||
ti := now - tsig.TimeSigned
|
||||
if now < tsig.TimeSigned {
|
||||
ti = tsig.TimeSigned - now
|
||||
}
|
||||
if uint64(tsig.Fudge) < ti {
|
||||
return ErrTime
|
||||
}
|
||||
|
||||
var h hash.Hash
|
||||
switch strings.ToLower(tsig.Algorithm) {
|
||||
case HmacMD5:
|
||||
h = hmac.New(md5.New, rawsecret)
|
||||
case HmacSHA1:
|
||||
h = hmac.New(sha1.New, rawsecret)
|
||||
case HmacSHA256:
|
||||
h = hmac.New(sha256.New, rawsecret)
|
||||
case HmacSHA512:
|
||||
h = hmac.New(sha512.New, rawsecret)
|
||||
default:
|
||||
return ErrKeyAlg
|
||||
}
|
||||
h.Write(buf)
|
||||
if !hmac.Equal(h.Sum(nil), msgMAC) {
|
||||
return ErrSig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a wiredata buffer for the MAC calculation.
|
||||
func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte {
|
||||
var buf []byte
|
||||
if rr.TimeSigned == 0 {
|
||||
rr.TimeSigned = uint64(time.Now().Unix())
|
||||
}
|
||||
if rr.Fudge == 0 {
|
||||
rr.Fudge = 300 // Standard (RFC) default.
|
||||
}
|
||||
|
||||
// Replace message ID in header with original ID from TSIG
|
||||
binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId)
|
||||
|
||||
if requestMAC != "" {
|
||||
m := new(macWireFmt)
|
||||
m.MACSize = uint16(len(requestMAC) / 2)
|
||||
m.MAC = requestMAC
|
||||
buf = make([]byte, len(requestMAC)) // long enough
|
||||
n, _ := packMacWire(m, buf)
|
||||
buf = buf[:n]
|
||||
}
|
||||
|
||||
tsigvar := make([]byte, DefaultMsgSize)
|
||||
if timersOnly {
|
||||
tsig := new(timerWireFmt)
|
||||
tsig.TimeSigned = rr.TimeSigned
|
||||
tsig.Fudge = rr.Fudge
|
||||
n, _ := packTimerWire(tsig, tsigvar)
|
||||
tsigvar = tsigvar[:n]
|
||||
} else {
|
||||
tsig := new(tsigWireFmt)
|
||||
tsig.Name = strings.ToLower(rr.Hdr.Name)
|
||||
tsig.Class = ClassANY
|
||||
tsig.Ttl = rr.Hdr.Ttl
|
||||
tsig.Algorithm = strings.ToLower(rr.Algorithm)
|
||||
tsig.TimeSigned = rr.TimeSigned
|
||||
tsig.Fudge = rr.Fudge
|
||||
tsig.Error = rr.Error
|
||||
tsig.OtherLen = rr.OtherLen
|
||||
tsig.OtherData = rr.OtherData
|
||||
n, _ := packTsigWire(tsig, tsigvar)
|
||||
tsigvar = tsigvar[:n]
|
||||
}
|
||||
|
||||
if requestMAC != "" {
|
||||
x := append(buf, msgbuf...)
|
||||
buf = append(x, tsigvar...)
|
||||
} else {
|
||||
buf = append(msgbuf, tsigvar...)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// Strip the TSIG from the raw message.
|
||||
func stripTsig(msg []byte) ([]byte, *TSIG, error) {
|
||||
// Copied from msg.go's Unpack() Header, but modified.
|
||||
var (
|
||||
dh Header
|
||||
err error
|
||||
)
|
||||
off, tsigoff := 0, 0
|
||||
|
||||
if dh, off, err = unpackMsgHdr(msg, off); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if dh.Arcount == 0 {
|
||||
return nil, nil, ErrNoSig
|
||||
}
|
||||
|
||||
// Rcode, see msg.go Unpack()
|
||||
if int(dh.Bits&0xF) == RcodeNotAuth {
|
||||
return nil, nil, ErrAuth
|
||||
}
|
||||
|
||||
for i := 0; i < int(dh.Qdcount); i++ {
|
||||
_, off, err = unpackQuestion(msg, off)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
_, off, err = unpackRRslice(int(dh.Ancount), msg, off)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
_, off, err = unpackRRslice(int(dh.Nscount), msg, off)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
rr := new(TSIG)
|
||||
var extra RR
|
||||
for i := 0; i < int(dh.Arcount); i++ {
|
||||
tsigoff = off
|
||||
extra, off, err = UnpackRR(msg, off)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if extra.Header().Rrtype == TypeTSIG {
|
||||
rr = extra.(*TSIG)
|
||||
// Adjust Arcount.
|
||||
arcount := binary.BigEndian.Uint16(msg[10:])
|
||||
binary.BigEndian.PutUint16(msg[10:], arcount-1)
|
||||
break
|
||||
}
|
||||
}
|
||||
if rr == nil {
|
||||
return nil, nil, ErrNoSig
|
||||
}
|
||||
return msg[:tsigoff], rr, nil
|
||||
}
|
||||
|
||||
// Translate the TSIG time signed into a date. There is no
|
||||
// need for RFC1982 calculations as this date is 48 bits.
|
||||
func tsigTimeToString(t uint64) string {
|
||||
ti := time.Unix(int64(t), 0).UTC()
|
||||
return ti.Format("20060102150405")
|
||||
}
|
||||
|
||||
func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) {
|
||||
// copied from zmsg.go TSIG packing
|
||||
// RR_Header
|
||||
off, err := PackDomainName(tw.Name, msg, 0, nil, false)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint16(tw.Class, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint32(tw.Ttl, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
|
||||
off, err = PackDomainName(tw.Algorithm, msg, off, nil, false)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint48(tw.TimeSigned, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint16(tw.Fudge, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
|
||||
off, err = packUint16(tw.Error, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint16(tw.OtherLen, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packStringHex(tw.OtherData, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func packMacWire(mw *macWireFmt, msg []byte) (int, error) {
|
||||
off, err := packUint16(mw.MACSize, msg, 0)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packStringHex(mw.MAC, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) {
|
||||
off, err := packUint48(tw.TimeSigned, msg, 0)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint16(tw.Fudge, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
1434
vendor/github.com/miekg/dns/types.go
generated
vendored
Normal file
1434
vendor/github.com/miekg/dns/types.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
102
vendor/github.com/miekg/dns/udp.go
generated
vendored
Normal file
102
vendor/github.com/miekg/dns/udp.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
// +build !windows
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// This is the required size of the OOB buffer to pass to ReadMsgUDP.
|
||||
var udpOOBSize = func() int {
|
||||
// We can't know whether we'll get an IPv4 control message or an
|
||||
// IPv6 control message ahead of time. To get around this, we size
|
||||
// the buffer equal to the largest of the two.
|
||||
|
||||
oob4 := ipv4.NewControlMessage(ipv4.FlagDst | ipv4.FlagInterface)
|
||||
oob6 := ipv6.NewControlMessage(ipv6.FlagDst | ipv6.FlagInterface)
|
||||
|
||||
if len(oob4) > len(oob6) {
|
||||
return len(oob4)
|
||||
}
|
||||
|
||||
return len(oob6)
|
||||
}()
|
||||
|
||||
// SessionUDP holds the remote address and the associated
|
||||
// out-of-band data.
|
||||
type SessionUDP struct {
|
||||
raddr *net.UDPAddr
|
||||
context []byte
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||
|
||||
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||
// net.UDPAddr.
|
||||
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
oob := make([]byte, udpOOBSize)
|
||||
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
|
||||
if err != nil {
|
||||
return n, nil, err
|
||||
}
|
||||
return n, &SessionUDP{raddr, oob[:oobn]}, err
|
||||
}
|
||||
|
||||
// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
|
||||
oob := correctSource(session.context)
|
||||
n, _, err := conn.WriteMsgUDP(b, oob, session.raddr)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func setUDPSocketOptions(conn *net.UDPConn) error {
|
||||
// Try setting the flags for both families and ignore the errors unless they
|
||||
// both error.
|
||||
err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true)
|
||||
err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true)
|
||||
if err6 != nil && err4 != nil {
|
||||
return err4
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseDstFromOOB takes oob data and returns the destination IP.
|
||||
func parseDstFromOOB(oob []byte) net.IP {
|
||||
// Start with IPv6 and then fallback to IPv4
|
||||
// TODO(fastest963): Figure out a way to prefer one or the other. Looking at
|
||||
// the lvl of the header for a 0 or 41 isn't cross-platform.
|
||||
cm6 := new(ipv6.ControlMessage)
|
||||
if cm6.Parse(oob) == nil && cm6.Dst != nil {
|
||||
return cm6.Dst
|
||||
}
|
||||
cm4 := new(ipv4.ControlMessage)
|
||||
if cm4.Parse(oob) == nil && cm4.Dst != nil {
|
||||
return cm4.Dst
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// correctSource takes oob data and returns new oob data with the Src equal to the Dst
|
||||
func correctSource(oob []byte) []byte {
|
||||
dst := parseDstFromOOB(oob)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
// If the dst is definitely an IPv6, then use ipv6's ControlMessage to
|
||||
// respond otherwise use ipv4's because ipv6's marshal ignores ipv4
|
||||
// addresses.
|
||||
if dst.To4() == nil {
|
||||
cm := new(ipv6.ControlMessage)
|
||||
cm.Src = dst
|
||||
oob = cm.Marshal()
|
||||
} else {
|
||||
cm := new(ipv4.ControlMessage)
|
||||
cm.Src = dst
|
||||
oob = cm.Marshal()
|
||||
}
|
||||
return oob
|
||||
}
|
35
vendor/github.com/miekg/dns/udp_windows.go
generated
vendored
Normal file
35
vendor/github.com/miekg/dns/udp_windows.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// +build windows
|
||||
|
||||
package dns
|
||||
|
||||
import "net"
|
||||
|
||||
// SessionUDP holds the remote address
|
||||
type SessionUDP struct {
|
||||
raddr *net.UDPAddr
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||
|
||||
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||
// net.UDPAddr.
|
||||
// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP.
|
||||
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
n, raddr, err := conn.ReadFrom(b)
|
||||
if err != nil {
|
||||
return n, nil, err
|
||||
}
|
||||
return n, &SessionUDP{raddr.(*net.UDPAddr)}, err
|
||||
}
|
||||
|
||||
// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||
// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP.
|
||||
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
|
||||
return conn.WriteTo(b, session.raddr)
|
||||
}
|
||||
|
||||
// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods
|
||||
// use the standard method in udp.go for these.
|
||||
func setUDPSocketOptions(*net.UDPConn) error { return nil }
|
||||
func parseDstFromOOB([]byte, net.IP) net.IP { return nil }
|
110
vendor/github.com/miekg/dns/update.go
generated
vendored
Normal file
110
vendor/github.com/miekg/dns/update.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
package dns
|
||||
|
||||
// NameUsed sets the RRs in the prereq section to
|
||||
// "Name is in use" RRs. RFC 2136 section 2.4.4.
|
||||
func (u *Msg) NameUsed(rr []RR) {
|
||||
if u.Answer == nil {
|
||||
u.Answer = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
|
||||
}
|
||||
}
|
||||
|
||||
// NameNotUsed sets the RRs in the prereq section to
|
||||
// "Name is in not use" RRs. RFC 2136 section 2.4.5.
|
||||
func (u *Msg) NameNotUsed(rr []RR) {
|
||||
if u.Answer == nil {
|
||||
u.Answer = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}})
|
||||
}
|
||||
}
|
||||
|
||||
// Used sets the RRs in the prereq section to
|
||||
// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2.
|
||||
func (u *Msg) Used(rr []RR) {
|
||||
if len(u.Question) == 0 {
|
||||
panic("dns: empty question section")
|
||||
}
|
||||
if u.Answer == nil {
|
||||
u.Answer = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
r.Header().Class = u.Question[0].Qclass
|
||||
u.Answer = append(u.Answer, r)
|
||||
}
|
||||
}
|
||||
|
||||
// RRsetUsed sets the RRs in the prereq section to
|
||||
// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1.
|
||||
func (u *Msg) RRsetUsed(rr []RR) {
|
||||
if u.Answer == nil {
|
||||
u.Answer = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
h := r.Header()
|
||||
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}})
|
||||
}
|
||||
}
|
||||
|
||||
// RRsetNotUsed sets the RRs in the prereq section to
|
||||
// "RRset does not exist" RRs. RFC 2136 section 2.4.3.
|
||||
func (u *Msg) RRsetNotUsed(rr []RR) {
|
||||
if u.Answer == nil {
|
||||
u.Answer = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
h := r.Header()
|
||||
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassNONE}})
|
||||
}
|
||||
}
|
||||
|
||||
// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1.
|
||||
func (u *Msg) Insert(rr []RR) {
|
||||
if len(u.Question) == 0 {
|
||||
panic("dns: empty question section")
|
||||
}
|
||||
if u.Ns == nil {
|
||||
u.Ns = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
r.Header().Class = u.Question[0].Qclass
|
||||
u.Ns = append(u.Ns, r)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2.
|
||||
func (u *Msg) RemoveRRset(rr []RR) {
|
||||
if u.Ns == nil {
|
||||
u.Ns = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
h := r.Header()
|
||||
u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}})
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3
|
||||
func (u *Msg) RemoveName(rr []RR) {
|
||||
if u.Ns == nil {
|
||||
u.Ns = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
|
||||
}
|
||||
}
|
||||
|
||||
// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4
|
||||
func (u *Msg) Remove(rr []RR) {
|
||||
if u.Ns == nil {
|
||||
u.Ns = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
h := r.Header()
|
||||
h.Class = ClassNONE
|
||||
h.Ttl = 0
|
||||
u.Ns = append(u.Ns, r)
|
||||
}
|
||||
}
|
15
vendor/github.com/miekg/dns/version.go
generated
vendored
Normal file
15
vendor/github.com/miekg/dns/version.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package dns
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Version is current version of this library.
|
||||
var Version = V{1, 1, 3}
|
||||
|
||||
// V holds the version of this library.
|
||||
type V struct {
|
||||
Major, Minor, Patch int
|
||||
}
|
||||
|
||||
func (v V) String() string {
|
||||
return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||
}
|
260
vendor/github.com/miekg/dns/xfr.go
generated
vendored
Normal file
260
vendor/github.com/miekg/dns/xfr.go
generated
vendored
Normal file
@ -0,0 +1,260 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Envelope is used when doing a zone transfer with a remote server.
|
||||
type Envelope struct {
|
||||
RR []RR // The set of RRs in the answer section of the xfr reply message.
|
||||
Error error // If something went wrong, this contains the error.
|
||||
}
|
||||
|
||||
// A Transfer defines parameters that are used during a zone transfer.
|
||||
type Transfer struct {
|
||||
*Conn
|
||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds
|
||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds
|
||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds
|
||||
TsigSecret map[string]string // Secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
tsigTimersOnly bool
|
||||
}
|
||||
|
||||
// Think we need to away to stop the transfer
|
||||
|
||||
// In performs an incoming transfer with the server in a.
|
||||
// If you would like to set the source IP, or some other attribute
|
||||
// of a Dialer for a Transfer, you can do so by specifying the attributes
|
||||
// in the Transfer.Conn:
|
||||
//
|
||||
// d := net.Dialer{LocalAddr: transfer_source}
|
||||
// con, err := d.Dial("tcp", master)
|
||||
// dnscon := &dns.Conn{Conn:con}
|
||||
// transfer = &dns.Transfer{Conn: dnscon}
|
||||
// channel, err := transfer.In(message, master)
|
||||
//
|
||||
func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
|
||||
switch q.Question[0].Qtype {
|
||||
case TypeAXFR, TypeIXFR:
|
||||
default:
|
||||
return nil, &Error{"unsupported question type"}
|
||||
}
|
||||
|
||||
timeout := dnsTimeout
|
||||
if t.DialTimeout != 0 {
|
||||
timeout = t.DialTimeout
|
||||
}
|
||||
|
||||
if t.Conn == nil {
|
||||
t.Conn, err = DialTimeout("tcp", a, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := t.WriteMsg(q); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
env = make(chan *Envelope)
|
||||
switch q.Question[0].Qtype {
|
||||
case TypeAXFR:
|
||||
go t.inAxfr(q, env)
|
||||
case TypeIXFR:
|
||||
go t.inIxfr(q, env)
|
||||
}
|
||||
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) {
|
||||
first := true
|
||||
defer t.Close()
|
||||
defer close(c)
|
||||
timeout := dnsTimeout
|
||||
if t.ReadTimeout != 0 {
|
||||
timeout = t.ReadTimeout
|
||||
}
|
||||
for {
|
||||
t.Conn.SetReadDeadline(time.Now().Add(timeout))
|
||||
in, err := t.ReadMsg()
|
||||
if err != nil {
|
||||
c <- &Envelope{nil, err}
|
||||
return
|
||||
}
|
||||
if q.Id != in.Id {
|
||||
c <- &Envelope{in.Answer, ErrId}
|
||||
return
|
||||
}
|
||||
if first {
|
||||
if in.Rcode != RcodeSuccess {
|
||||
c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}}
|
||||
return
|
||||
}
|
||||
if !isSOAFirst(in) {
|
||||
c <- &Envelope{in.Answer, ErrSoa}
|
||||
return
|
||||
}
|
||||
first = !first
|
||||
// only one answer that is SOA, receive more
|
||||
if len(in.Answer) == 1 {
|
||||
t.tsigTimersOnly = true
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !first {
|
||||
t.tsigTimersOnly = true // Subsequent envelopes use this.
|
||||
if isSOALast(in) {
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
return
|
||||
}
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) {
|
||||
var serial uint32 // The first serial seen is the current server serial
|
||||
axfr := true
|
||||
n := 0
|
||||
qser := q.Ns[0].(*SOA).Serial
|
||||
defer t.Close()
|
||||
defer close(c)
|
||||
timeout := dnsTimeout
|
||||
if t.ReadTimeout != 0 {
|
||||
timeout = t.ReadTimeout
|
||||
}
|
||||
for {
|
||||
t.SetReadDeadline(time.Now().Add(timeout))
|
||||
in, err := t.ReadMsg()
|
||||
if err != nil {
|
||||
c <- &Envelope{nil, err}
|
||||
return
|
||||
}
|
||||
if q.Id != in.Id {
|
||||
c <- &Envelope{in.Answer, ErrId}
|
||||
return
|
||||
}
|
||||
if in.Rcode != RcodeSuccess {
|
||||
c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}}
|
||||
return
|
||||
}
|
||||
if n == 0 {
|
||||
// Check if the returned answer is ok
|
||||
if !isSOAFirst(in) {
|
||||
c <- &Envelope{in.Answer, ErrSoa}
|
||||
return
|
||||
}
|
||||
// This serial is important
|
||||
serial = in.Answer[0].(*SOA).Serial
|
||||
// Check if there are no changes in zone
|
||||
if qser >= serial {
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
return
|
||||
}
|
||||
}
|
||||
// Now we need to check each message for SOA records, to see what we need to do
|
||||
t.tsigTimersOnly = true
|
||||
for _, rr := range in.Answer {
|
||||
if v, ok := rr.(*SOA); ok {
|
||||
if v.Serial == serial {
|
||||
n++
|
||||
// quit if it's a full axfr or the the servers' SOA is repeated the third time
|
||||
if axfr && n == 2 || n == 3 {
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
return
|
||||
}
|
||||
} else if axfr {
|
||||
// it's an ixfr
|
||||
axfr = false
|
||||
}
|
||||
}
|
||||
}
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
}
|
||||
}
|
||||
|
||||
// Out performs an outgoing transfer with the client connecting in w.
|
||||
// Basic use pattern:
|
||||
//
|
||||
// ch := make(chan *dns.Envelope)
|
||||
// tr := new(dns.Transfer)
|
||||
// go tr.Out(w, r, ch)
|
||||
// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
|
||||
// close(ch)
|
||||
// w.Hijack()
|
||||
// // w.Close() // Client closes connection
|
||||
//
|
||||
// The server is responsible for sending the correct sequence of RRs through the
|
||||
// channel ch.
|
||||
func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error {
|
||||
for x := range ch {
|
||||
r := new(Msg)
|
||||
// Compress?
|
||||
r.SetReply(q)
|
||||
r.Authoritative = true
|
||||
// assume it fits TODO(miek): fix
|
||||
r.Answer = append(r.Answer, x.RR...)
|
||||
if err := w.WriteMsg(r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.TsigTimersOnly(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadMsg reads a message from the transfer connection t.
|
||||
func (t *Transfer) ReadMsg() (*Msg, error) {
|
||||
m := new(Msg)
|
||||
p := make([]byte, MaxMsgSize)
|
||||
n, err := t.Read(p)
|
||||
if err != nil && n == 0 {
|
||||
return nil, err
|
||||
}
|
||||
p = p[:n]
|
||||
if err := m.Unpack(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
|
||||
if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
|
||||
return m, ErrSecret
|
||||
}
|
||||
// Need to work on the original message p, as that was used to calculate the tsig.
|
||||
err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
|
||||
t.tsigRequestMAC = ts.MAC
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
// WriteMsg writes a message through the transfer connection t.
|
||||
func (t *Transfer) WriteMsg(m *Msg) (err error) {
|
||||
var out []byte
|
||||
if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
|
||||
if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
|
||||
return ErrSecret
|
||||
}
|
||||
out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
|
||||
} else {
|
||||
out, err = m.Pack()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = t.Write(out)
|
||||
return err
|
||||
}
|
||||
|
||||
func isSOAFirst(in *Msg) bool {
|
||||
return len(in.Answer) > 0 &&
|
||||
in.Answer[0].Header().Rrtype == TypeSOA
|
||||
}
|
||||
|
||||
func isSOALast(in *Msg) bool {
|
||||
return len(in.Answer) > 0 &&
|
||||
in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA
|
||||
}
|
||||
|
||||
const errXFR = "bad xfr rcode: %d"
|
1140
vendor/github.com/miekg/dns/zduplicate.go
generated
vendored
Normal file
1140
vendor/github.com/miekg/dns/zduplicate.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2722
vendor/github.com/miekg/dns/zmsg.go
generated
vendored
Normal file
2722
vendor/github.com/miekg/dns/zmsg.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
873
vendor/github.com/miekg/dns/ztypes.go
generated
vendored
Normal file
873
vendor/github.com/miekg/dns/ztypes.go
generated
vendored
Normal file
@ -0,0 +1,873 @@
|
||||
// Code generated by "go run types_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net"
|
||||
)
|
||||
|
||||
// TypeToRR is a map of constructors for each RR type.
|
||||
var TypeToRR = map[uint16]func() RR{
|
||||
TypeA: func() RR { return new(A) },
|
||||
TypeAAAA: func() RR { return new(AAAA) },
|
||||
TypeAFSDB: func() RR { return new(AFSDB) },
|
||||
TypeANY: func() RR { return new(ANY) },
|
||||
TypeAVC: func() RR { return new(AVC) },
|
||||
TypeCAA: func() RR { return new(CAA) },
|
||||
TypeCDNSKEY: func() RR { return new(CDNSKEY) },
|
||||
TypeCDS: func() RR { return new(CDS) },
|
||||
TypeCERT: func() RR { return new(CERT) },
|
||||
TypeCNAME: func() RR { return new(CNAME) },
|
||||
TypeCSYNC: func() RR { return new(CSYNC) },
|
||||
TypeDHCID: func() RR { return new(DHCID) },
|
||||
TypeDLV: func() RR { return new(DLV) },
|
||||
TypeDNAME: func() RR { return new(DNAME) },
|
||||
TypeDNSKEY: func() RR { return new(DNSKEY) },
|
||||
TypeDS: func() RR { return new(DS) },
|
||||
TypeEID: func() RR { return new(EID) },
|
||||
TypeEUI48: func() RR { return new(EUI48) },
|
||||
TypeEUI64: func() RR { return new(EUI64) },
|
||||
TypeGID: func() RR { return new(GID) },
|
||||
TypeGPOS: func() RR { return new(GPOS) },
|
||||
TypeHINFO: func() RR { return new(HINFO) },
|
||||
TypeHIP: func() RR { return new(HIP) },
|
||||
TypeKEY: func() RR { return new(KEY) },
|
||||
TypeKX: func() RR { return new(KX) },
|
||||
TypeL32: func() RR { return new(L32) },
|
||||
TypeL64: func() RR { return new(L64) },
|
||||
TypeLOC: func() RR { return new(LOC) },
|
||||
TypeLP: func() RR { return new(LP) },
|
||||
TypeMB: func() RR { return new(MB) },
|
||||
TypeMD: func() RR { return new(MD) },
|
||||
TypeMF: func() RR { return new(MF) },
|
||||
TypeMG: func() RR { return new(MG) },
|
||||
TypeMINFO: func() RR { return new(MINFO) },
|
||||
TypeMR: func() RR { return new(MR) },
|
||||
TypeMX: func() RR { return new(MX) },
|
||||
TypeNAPTR: func() RR { return new(NAPTR) },
|
||||
TypeNID: func() RR { return new(NID) },
|
||||
TypeNIMLOC: func() RR { return new(NIMLOC) },
|
||||
TypeNINFO: func() RR { return new(NINFO) },
|
||||
TypeNS: func() RR { return new(NS) },
|
||||
TypeNSAPPTR: func() RR { return new(NSAPPTR) },
|
||||
TypeNSEC: func() RR { return new(NSEC) },
|
||||
TypeNSEC3: func() RR { return new(NSEC3) },
|
||||
TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) },
|
||||
TypeNULL: func() RR { return new(NULL) },
|
||||
TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) },
|
||||
TypeOPT: func() RR { return new(OPT) },
|
||||
TypePTR: func() RR { return new(PTR) },
|
||||
TypePX: func() RR { return new(PX) },
|
||||
TypeRKEY: func() RR { return new(RKEY) },
|
||||
TypeRP: func() RR { return new(RP) },
|
||||
TypeRRSIG: func() RR { return new(RRSIG) },
|
||||
TypeRT: func() RR { return new(RT) },
|
||||
TypeSIG: func() RR { return new(SIG) },
|
||||
TypeSMIMEA: func() RR { return new(SMIMEA) },
|
||||
TypeSOA: func() RR { return new(SOA) },
|
||||
TypeSPF: func() RR { return new(SPF) },
|
||||
TypeSRV: func() RR { return new(SRV) },
|
||||
TypeSSHFP: func() RR { return new(SSHFP) },
|
||||
TypeTA: func() RR { return new(TA) },
|
||||
TypeTALINK: func() RR { return new(TALINK) },
|
||||
TypeTKEY: func() RR { return new(TKEY) },
|
||||
TypeTLSA: func() RR { return new(TLSA) },
|
||||
TypeTSIG: func() RR { return new(TSIG) },
|
||||
TypeTXT: func() RR { return new(TXT) },
|
||||
TypeUID: func() RR { return new(UID) },
|
||||
TypeUINFO: func() RR { return new(UINFO) },
|
||||
TypeURI: func() RR { return new(URI) },
|
||||
TypeX25: func() RR { return new(X25) },
|
||||
}
|
||||
|
||||
// TypeToString is a map of strings for each RR type.
|
||||
var TypeToString = map[uint16]string{
|
||||
TypeA: "A",
|
||||
TypeAAAA: "AAAA",
|
||||
TypeAFSDB: "AFSDB",
|
||||
TypeANY: "ANY",
|
||||
TypeATMA: "ATMA",
|
||||
TypeAVC: "AVC",
|
||||
TypeAXFR: "AXFR",
|
||||
TypeCAA: "CAA",
|
||||
TypeCDNSKEY: "CDNSKEY",
|
||||
TypeCDS: "CDS",
|
||||
TypeCERT: "CERT",
|
||||
TypeCNAME: "CNAME",
|
||||
TypeCSYNC: "CSYNC",
|
||||
TypeDHCID: "DHCID",
|
||||
TypeDLV: "DLV",
|
||||
TypeDNAME: "DNAME",
|
||||
TypeDNSKEY: "DNSKEY",
|
||||
TypeDS: "DS",
|
||||
TypeEID: "EID",
|
||||
TypeEUI48: "EUI48",
|
||||
TypeEUI64: "EUI64",
|
||||
TypeGID: "GID",
|
||||
TypeGPOS: "GPOS",
|
||||
TypeHINFO: "HINFO",
|
||||
TypeHIP: "HIP",
|
||||
TypeISDN: "ISDN",
|
||||
TypeIXFR: "IXFR",
|
||||
TypeKEY: "KEY",
|
||||
TypeKX: "KX",
|
||||
TypeL32: "L32",
|
||||
TypeL64: "L64",
|
||||
TypeLOC: "LOC",
|
||||
TypeLP: "LP",
|
||||
TypeMAILA: "MAILA",
|
||||
TypeMAILB: "MAILB",
|
||||
TypeMB: "MB",
|
||||
TypeMD: "MD",
|
||||
TypeMF: "MF",
|
||||
TypeMG: "MG",
|
||||
TypeMINFO: "MINFO",
|
||||
TypeMR: "MR",
|
||||
TypeMX: "MX",
|
||||
TypeNAPTR: "NAPTR",
|
||||
TypeNID: "NID",
|
||||
TypeNIMLOC: "NIMLOC",
|
||||
TypeNINFO: "NINFO",
|
||||
TypeNS: "NS",
|
||||
TypeNSEC: "NSEC",
|
||||
TypeNSEC3: "NSEC3",
|
||||
TypeNSEC3PARAM: "NSEC3PARAM",
|
||||
TypeNULL: "NULL",
|
||||
TypeNXT: "NXT",
|
||||
TypeNone: "None",
|
||||
TypeOPENPGPKEY: "OPENPGPKEY",
|
||||
TypeOPT: "OPT",
|
||||
TypePTR: "PTR",
|
||||
TypePX: "PX",
|
||||
TypeRKEY: "RKEY",
|
||||
TypeRP: "RP",
|
||||
TypeRRSIG: "RRSIG",
|
||||
TypeRT: "RT",
|
||||
TypeReserved: "Reserved",
|
||||
TypeSIG: "SIG",
|
||||
TypeSMIMEA: "SMIMEA",
|
||||
TypeSOA: "SOA",
|
||||
TypeSPF: "SPF",
|
||||
TypeSRV: "SRV",
|
||||
TypeSSHFP: "SSHFP",
|
||||
TypeTA: "TA",
|
||||
TypeTALINK: "TALINK",
|
||||
TypeTKEY: "TKEY",
|
||||
TypeTLSA: "TLSA",
|
||||
TypeTSIG: "TSIG",
|
||||
TypeTXT: "TXT",
|
||||
TypeUID: "UID",
|
||||
TypeUINFO: "UINFO",
|
||||
TypeUNSPEC: "UNSPEC",
|
||||
TypeURI: "URI",
|
||||
TypeX25: "X25",
|
||||
TypeNSAPPTR: "NSAP-PTR",
|
||||
}
|
||||
|
||||
func (rr *A) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *AAAA) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *ANY) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *AVC) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CAA) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CDS) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CERT) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CNAME) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CSYNC) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *DHCID) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *DLV) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *DNAME) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *DS) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *EID) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *EUI48) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *EUI64) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *GID) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *GPOS) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *HINFO) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *HIP) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *KEY) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *KX) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *L32) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *L64) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *LOC) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *LP) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *MB) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *MD) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *MF) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *MG) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *MINFO) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *MR) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *MX) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *NID) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *NINFO) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *NS) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *NSEC) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *NULL) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *OPT) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *PTR) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *PX) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *RKEY) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *RP) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *RT) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *SIG) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *SOA) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *SPF) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *SRV) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *TA) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *TALINK) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *TKEY) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *TLSA) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *TSIG) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *TXT) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *UID) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *UINFO) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *URI) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *X25) Header() *RR_Header { return &rr.Hdr }
|
||||
|
||||
// len() functions
|
||||
func (rr *A) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += net.IPv4len // A
|
||||
return l
|
||||
}
|
||||
func (rr *AAAA) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += net.IPv6len // AAAA
|
||||
return l
|
||||
}
|
||||
func (rr *AFSDB) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Subtype
|
||||
l += domainNameLen(rr.Hostname, off+l, compression, false)
|
||||
return l
|
||||
}
|
||||
func (rr *ANY) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
return l
|
||||
}
|
||||
func (rr *AVC) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
for _, x := range rr.Txt {
|
||||
l += len(x) + 1
|
||||
}
|
||||
return l
|
||||
}
|
||||
func (rr *CAA) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l++ // Flag
|
||||
l += len(rr.Tag) + 1
|
||||
l += len(rr.Value)
|
||||
return l
|
||||
}
|
||||
func (rr *CERT) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Type
|
||||
l += 2 // KeyTag
|
||||
l++ // Algorithm
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.Certificate))
|
||||
return l
|
||||
}
|
||||
func (rr *CNAME) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Target, off+l, compression, true)
|
||||
return l
|
||||
}
|
||||
func (rr *DHCID) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.Digest))
|
||||
return l
|
||||
}
|
||||
func (rr *DNAME) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Target, off+l, compression, false)
|
||||
return l
|
||||
}
|
||||
func (rr *DNSKEY) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Flags
|
||||
l++ // Protocol
|
||||
l++ // Algorithm
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||
return l
|
||||
}
|
||||
func (rr *DS) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // KeyTag
|
||||
l++ // Algorithm
|
||||
l++ // DigestType
|
||||
l += len(rr.Digest)/2 + 1
|
||||
return l
|
||||
}
|
||||
func (rr *EID) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += len(rr.Endpoint)/2 + 1
|
||||
return l
|
||||
}
|
||||
func (rr *EUI48) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 6 // Address
|
||||
return l
|
||||
}
|
||||
func (rr *EUI64) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 8 // Address
|
||||
return l
|
||||
}
|
||||
func (rr *GID) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 4 // Gid
|
||||
return l
|
||||
}
|
||||
func (rr *GPOS) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += len(rr.Longitude) + 1
|
||||
l += len(rr.Latitude) + 1
|
||||
l += len(rr.Altitude) + 1
|
||||
return l
|
||||
}
|
||||
func (rr *HINFO) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += len(rr.Cpu) + 1
|
||||
l += len(rr.Os) + 1
|
||||
return l
|
||||
}
|
||||
func (rr *HIP) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l++ // HitLength
|
||||
l++ // PublicKeyAlgorithm
|
||||
l += 2 // PublicKeyLength
|
||||
l += len(rr.Hit) / 2
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||
for _, x := range rr.RendezvousServers {
|
||||
l += domainNameLen(x, off+l, compression, false)
|
||||
}
|
||||
return l
|
||||
}
|
||||
func (rr *KX) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Preference
|
||||
l += domainNameLen(rr.Exchanger, off+l, compression, false)
|
||||
return l
|
||||
}
|
||||
func (rr *L32) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Preference
|
||||
l += net.IPv4len // Locator32
|
||||
return l
|
||||
}
|
||||
func (rr *L64) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Preference
|
||||
l += 8 // Locator64
|
||||
return l
|
||||
}
|
||||
func (rr *LOC) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l++ // Version
|
||||
l++ // Size
|
||||
l++ // HorizPre
|
||||
l++ // VertPre
|
||||
l += 4 // Latitude
|
||||
l += 4 // Longitude
|
||||
l += 4 // Altitude
|
||||
return l
|
||||
}
|
||||
func (rr *LP) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Preference
|
||||
l += domainNameLen(rr.Fqdn, off+l, compression, false)
|
||||
return l
|
||||
}
|
||||
func (rr *MB) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Mb, off+l, compression, true)
|
||||
return l
|
||||
}
|
||||
func (rr *MD) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Md, off+l, compression, true)
|
||||
return l
|
||||
}
|
||||
func (rr *MF) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Mf, off+l, compression, true)
|
||||
return l
|
||||
}
|
||||
func (rr *MG) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Mg, off+l, compression, true)
|
||||
return l
|
||||
}
|
||||
func (rr *MINFO) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Rmail, off+l, compression, true)
|
||||
l += domainNameLen(rr.Email, off+l, compression, true)
|
||||
return l
|
||||
}
|
||||
func (rr *MR) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Mr, off+l, compression, true)
|
||||
return l
|
||||
}
|
||||
func (rr *MX) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Preference
|
||||
l += domainNameLen(rr.Mx, off+l, compression, true)
|
||||
return l
|
||||
}
|
||||
func (rr *NAPTR) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Order
|
||||
l += 2 // Preference
|
||||
l += len(rr.Flags) + 1
|
||||
l += len(rr.Service) + 1
|
||||
l += len(rr.Regexp) + 1
|
||||
l += domainNameLen(rr.Replacement, off+l, compression, false)
|
||||
return l
|
||||
}
|
||||
func (rr *NID) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Preference
|
||||
l += 8 // NodeID
|
||||
return l
|
||||
}
|
||||
func (rr *NIMLOC) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += len(rr.Locator)/2 + 1
|
||||
return l
|
||||
}
|
||||
func (rr *NINFO) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
for _, x := range rr.ZSData {
|
||||
l += len(x) + 1
|
||||
}
|
||||
return l
|
||||
}
|
||||
func (rr *NS) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Ns, off+l, compression, true)
|
||||
return l
|
||||
}
|
||||
func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Ptr, off+l, compression, false)
|
||||
return l
|
||||
}
|
||||
func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l++ // Hash
|
||||
l++ // Flags
|
||||
l += 2 // Iterations
|
||||
l++ // SaltLength
|
||||
l += len(rr.Salt) / 2
|
||||
return l
|
||||
}
|
||||
func (rr *NULL) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += len(rr.Data)
|
||||
return l
|
||||
}
|
||||
func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||
return l
|
||||
}
|
||||
func (rr *PTR) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Ptr, off+l, compression, true)
|
||||
return l
|
||||
}
|
||||
func (rr *PX) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Preference
|
||||
l += domainNameLen(rr.Map822, off+l, compression, false)
|
||||
l += domainNameLen(rr.Mapx400, off+l, compression, false)
|
||||
return l
|
||||
}
|
||||
func (rr *RFC3597) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += len(rr.Rdata)/2 + 1
|
||||
return l
|
||||
}
|
||||
func (rr *RKEY) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Flags
|
||||
l++ // Protocol
|
||||
l++ // Algorithm
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||
return l
|
||||
}
|
||||
func (rr *RP) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Mbox, off+l, compression, false)
|
||||
l += domainNameLen(rr.Txt, off+l, compression, false)
|
||||
return l
|
||||
}
|
||||
func (rr *RRSIG) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // TypeCovered
|
||||
l++ // Algorithm
|
||||
l++ // Labels
|
||||
l += 4 // OrigTtl
|
||||
l += 4 // Expiration
|
||||
l += 4 // Inception
|
||||
l += 2 // KeyTag
|
||||
l += domainNameLen(rr.SignerName, off+l, compression, false)
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.Signature))
|
||||
return l
|
||||
}
|
||||
func (rr *RT) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Preference
|
||||
l += domainNameLen(rr.Host, off+l, compression, false)
|
||||
return l
|
||||
}
|
||||
func (rr *SMIMEA) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l++ // Usage
|
||||
l++ // Selector
|
||||
l++ // MatchingType
|
||||
l += len(rr.Certificate)/2 + 1
|
||||
return l
|
||||
}
|
||||
func (rr *SOA) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Ns, off+l, compression, true)
|
||||
l += domainNameLen(rr.Mbox, off+l, compression, true)
|
||||
l += 4 // Serial
|
||||
l += 4 // Refresh
|
||||
l += 4 // Retry
|
||||
l += 4 // Expire
|
||||
l += 4 // Minttl
|
||||
return l
|
||||
}
|
||||
func (rr *SPF) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
for _, x := range rr.Txt {
|
||||
l += len(x) + 1
|
||||
}
|
||||
return l
|
||||
}
|
||||
func (rr *SRV) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Priority
|
||||
l += 2 // Weight
|
||||
l += 2 // Port
|
||||
l += domainNameLen(rr.Target, off+l, compression, false)
|
||||
return l
|
||||
}
|
||||
func (rr *SSHFP) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l++ // Algorithm
|
||||
l++ // Type
|
||||
l += len(rr.FingerPrint)/2 + 1
|
||||
return l
|
||||
}
|
||||
func (rr *TA) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // KeyTag
|
||||
l++ // Algorithm
|
||||
l++ // DigestType
|
||||
l += len(rr.Digest)/2 + 1
|
||||
return l
|
||||
}
|
||||
func (rr *TALINK) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.PreviousName, off+l, compression, false)
|
||||
l += domainNameLen(rr.NextName, off+l, compression, false)
|
||||
return l
|
||||
}
|
||||
func (rr *TKEY) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Algorithm, off+l, compression, false)
|
||||
l += 4 // Inception
|
||||
l += 4 // Expiration
|
||||
l += 2 // Mode
|
||||
l += 2 // Error
|
||||
l += 2 // KeySize
|
||||
l += len(rr.Key) / 2
|
||||
l += 2 // OtherLen
|
||||
l += len(rr.OtherData) / 2
|
||||
return l
|
||||
}
|
||||
func (rr *TLSA) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l++ // Usage
|
||||
l++ // Selector
|
||||
l++ // MatchingType
|
||||
l += len(rr.Certificate)/2 + 1
|
||||
return l
|
||||
}
|
||||
func (rr *TSIG) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += domainNameLen(rr.Algorithm, off+l, compression, false)
|
||||
l += 6 // TimeSigned
|
||||
l += 2 // Fudge
|
||||
l += 2 // MACSize
|
||||
l += len(rr.MAC) / 2
|
||||
l += 2 // OrigId
|
||||
l += 2 // Error
|
||||
l += 2 // OtherLen
|
||||
l += len(rr.OtherData) / 2
|
||||
return l
|
||||
}
|
||||
func (rr *TXT) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
for _, x := range rr.Txt {
|
||||
l += len(x) + 1
|
||||
}
|
||||
return l
|
||||
}
|
||||
func (rr *UID) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 4 // Uid
|
||||
return l
|
||||
}
|
||||
func (rr *UINFO) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += len(rr.Uinfo) + 1
|
||||
return l
|
||||
}
|
||||
func (rr *URI) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Priority
|
||||
l += 2 // Weight
|
||||
l += len(rr.Target)
|
||||
return l
|
||||
}
|
||||
func (rr *X25) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += len(rr.PSDNAddress) + 1
|
||||
return l
|
||||
}
|
||||
|
||||
// copy() functions
|
||||
func (rr *A) copy() RR {
|
||||
return &A{rr.Hdr, copyIP(rr.A)}
|
||||
}
|
||||
func (rr *AAAA) copy() RR {
|
||||
return &AAAA{rr.Hdr, copyIP(rr.AAAA)}
|
||||
}
|
||||
func (rr *AFSDB) copy() RR {
|
||||
return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname}
|
||||
}
|
||||
func (rr *ANY) copy() RR {
|
||||
return &ANY{rr.Hdr}
|
||||
}
|
||||
func (rr *AVC) copy() RR {
|
||||
Txt := make([]string, len(rr.Txt))
|
||||
copy(Txt, rr.Txt)
|
||||
return &AVC{rr.Hdr, Txt}
|
||||
}
|
||||
func (rr *CAA) copy() RR {
|
||||
return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value}
|
||||
}
|
||||
func (rr *CERT) copy() RR {
|
||||
return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
|
||||
}
|
||||
func (rr *CNAME) copy() RR {
|
||||
return &CNAME{rr.Hdr, rr.Target}
|
||||
}
|
||||
func (rr *CSYNC) copy() RR {
|
||||
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
|
||||
copy(TypeBitMap, rr.TypeBitMap)
|
||||
return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap}
|
||||
}
|
||||
func (rr *DHCID) copy() RR {
|
||||
return &DHCID{rr.Hdr, rr.Digest}
|
||||
}
|
||||
func (rr *DNAME) copy() RR {
|
||||
return &DNAME{rr.Hdr, rr.Target}
|
||||
}
|
||||
func (rr *DNSKEY) copy() RR {
|
||||
return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
|
||||
}
|
||||
func (rr *DS) copy() RR {
|
||||
return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
|
||||
}
|
||||
func (rr *EID) copy() RR {
|
||||
return &EID{rr.Hdr, rr.Endpoint}
|
||||
}
|
||||
func (rr *EUI48) copy() RR {
|
||||
return &EUI48{rr.Hdr, rr.Address}
|
||||
}
|
||||
func (rr *EUI64) copy() RR {
|
||||
return &EUI64{rr.Hdr, rr.Address}
|
||||
}
|
||||
func (rr *GID) copy() RR {
|
||||
return &GID{rr.Hdr, rr.Gid}
|
||||
}
|
||||
func (rr *GPOS) copy() RR {
|
||||
return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude}
|
||||
}
|
||||
func (rr *HINFO) copy() RR {
|
||||
return &HINFO{rr.Hdr, rr.Cpu, rr.Os}
|
||||
}
|
||||
func (rr *HIP) copy() RR {
|
||||
RendezvousServers := make([]string, len(rr.RendezvousServers))
|
||||
copy(RendezvousServers, rr.RendezvousServers)
|
||||
return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
|
||||
}
|
||||
func (rr *KX) copy() RR {
|
||||
return &KX{rr.Hdr, rr.Preference, rr.Exchanger}
|
||||
}
|
||||
func (rr *L32) copy() RR {
|
||||
return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)}
|
||||
}
|
||||
func (rr *L64) copy() RR {
|
||||
return &L64{rr.Hdr, rr.Preference, rr.Locator64}
|
||||
}
|
||||
func (rr *LOC) copy() RR {
|
||||
return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
|
||||
}
|
||||
func (rr *LP) copy() RR {
|
||||
return &LP{rr.Hdr, rr.Preference, rr.Fqdn}
|
||||
}
|
||||
func (rr *MB) copy() RR {
|
||||
return &MB{rr.Hdr, rr.Mb}
|
||||
}
|
||||
func (rr *MD) copy() RR {
|
||||
return &MD{rr.Hdr, rr.Md}
|
||||
}
|
||||
func (rr *MF) copy() RR {
|
||||
return &MF{rr.Hdr, rr.Mf}
|
||||
}
|
||||
func (rr *MG) copy() RR {
|
||||
return &MG{rr.Hdr, rr.Mg}
|
||||
}
|
||||
func (rr *MINFO) copy() RR {
|
||||
return &MINFO{rr.Hdr, rr.Rmail, rr.Email}
|
||||
}
|
||||
func (rr *MR) copy() RR {
|
||||
return &MR{rr.Hdr, rr.Mr}
|
||||
}
|
||||
func (rr *MX) copy() RR {
|
||||
return &MX{rr.Hdr, rr.Preference, rr.Mx}
|
||||
}
|
||||
func (rr *NAPTR) copy() RR {
|
||||
return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
|
||||
}
|
||||
func (rr *NID) copy() RR {
|
||||
return &NID{rr.Hdr, rr.Preference, rr.NodeID}
|
||||
}
|
||||
func (rr *NIMLOC) copy() RR {
|
||||
return &NIMLOC{rr.Hdr, rr.Locator}
|
||||
}
|
||||
func (rr *NINFO) copy() RR {
|
||||
ZSData := make([]string, len(rr.ZSData))
|
||||
copy(ZSData, rr.ZSData)
|
||||
return &NINFO{rr.Hdr, ZSData}
|
||||
}
|
||||
func (rr *NS) copy() RR {
|
||||
return &NS{rr.Hdr, rr.Ns}
|
||||
}
|
||||
func (rr *NSAPPTR) copy() RR {
|
||||
return &NSAPPTR{rr.Hdr, rr.Ptr}
|
||||
}
|
||||
func (rr *NSEC) copy() RR {
|
||||
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
|
||||
copy(TypeBitMap, rr.TypeBitMap)
|
||||
return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap}
|
||||
}
|
||||
func (rr *NSEC3) copy() RR {
|
||||
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
|
||||
copy(TypeBitMap, rr.TypeBitMap)
|
||||
return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap}
|
||||
}
|
||||
func (rr *NSEC3PARAM) copy() RR {
|
||||
return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
|
||||
}
|
||||
func (rr *NULL) copy() RR {
|
||||
return &NULL{rr.Hdr, rr.Data}
|
||||
}
|
||||
func (rr *OPENPGPKEY) copy() RR {
|
||||
return &OPENPGPKEY{rr.Hdr, rr.PublicKey}
|
||||
}
|
||||
func (rr *OPT) copy() RR {
|
||||
Option := make([]EDNS0, len(rr.Option))
|
||||
copy(Option, rr.Option)
|
||||
return &OPT{rr.Hdr, Option}
|
||||
}
|
||||
func (rr *PTR) copy() RR {
|
||||
return &PTR{rr.Hdr, rr.Ptr}
|
||||
}
|
||||
func (rr *PX) copy() RR {
|
||||
return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400}
|
||||
}
|
||||
func (rr *RFC3597) copy() RR {
|
||||
return &RFC3597{rr.Hdr, rr.Rdata}
|
||||
}
|
||||
func (rr *RKEY) copy() RR {
|
||||
return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
|
||||
}
|
||||
func (rr *RP) copy() RR {
|
||||
return &RP{rr.Hdr, rr.Mbox, rr.Txt}
|
||||
}
|
||||
func (rr *RRSIG) copy() RR {
|
||||
return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
|
||||
}
|
||||
func (rr *RT) copy() RR {
|
||||
return &RT{rr.Hdr, rr.Preference, rr.Host}
|
||||
}
|
||||
func (rr *SMIMEA) copy() RR {
|
||||
return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
|
||||
}
|
||||
func (rr *SOA) copy() RR {
|
||||
return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
|
||||
}
|
||||
func (rr *SPF) copy() RR {
|
||||
Txt := make([]string, len(rr.Txt))
|
||||
copy(Txt, rr.Txt)
|
||||
return &SPF{rr.Hdr, Txt}
|
||||
}
|
||||
func (rr *SRV) copy() RR {
|
||||
return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target}
|
||||
}
|
||||
func (rr *SSHFP) copy() RR {
|
||||
return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint}
|
||||
}
|
||||
func (rr *TA) copy() RR {
|
||||
return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
|
||||
}
|
||||
func (rr *TALINK) copy() RR {
|
||||
return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName}
|
||||
}
|
||||
func (rr *TKEY) copy() RR {
|
||||
return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
|
||||
}
|
||||
func (rr *TLSA) copy() RR {
|
||||
return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
|
||||
}
|
||||
func (rr *TSIG) copy() RR {
|
||||
return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
|
||||
}
|
||||
func (rr *TXT) copy() RR {
|
||||
Txt := make([]string, len(rr.Txt))
|
||||
copy(Txt, rr.Txt)
|
||||
return &TXT{rr.Hdr, Txt}
|
||||
}
|
||||
func (rr *UID) copy() RR {
|
||||
return &UID{rr.Hdr, rr.Uid}
|
||||
}
|
||||
func (rr *UINFO) copy() RR {
|
||||
return &UINFO{rr.Hdr, rr.Uinfo}
|
||||
}
|
||||
func (rr *URI) copy() RR {
|
||||
return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target}
|
||||
}
|
||||
func (rr *X25) copy() RR {
|
||||
return &X25{rr.Hdr, rr.PSDNAddress}
|
||||
}
|
9
vendor/github.com/nathan-osman/go-aptproxy/LICENSE.txt
generated
vendored
Normal file
9
vendor/github.com/nathan-osman/go-aptproxy/LICENSE.txt
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Nathan Osman
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
125
vendor/github.com/nathan-osman/go-aptproxy/cache/cache.go
generated
vendored
Normal file
125
vendor/github.com/nathan-osman/go-aptproxy/cache/cache.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Reader is a generic interface for reading cache entries either from disk or
|
||||
// directly attached to a downloader.
|
||||
type Reader interface {
|
||||
io.ReadCloser
|
||||
GetEntry() (*Entry, error)
|
||||
}
|
||||
|
||||
// Cache provides access to entries in the cache.
|
||||
type Cache struct {
|
||||
mutex sync.Mutex
|
||||
directory string
|
||||
downloaders map[string]*downloader
|
||||
waitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewCache creates a new cache in the specified directory.
|
||||
func NewCache(directory string) (*Cache, error) {
|
||||
if err := os.MkdirAll(directory, 0775); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Cache{
|
||||
directory: directory,
|
||||
downloaders: make(map[string]*downloader),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getFilenames returns the filenames for the JSON and data files from a URL.
|
||||
func (c *Cache) getFilenames(rawurl string) (hash, jsonFilename, dataFilename string) {
|
||||
b := md5.Sum([]byte(rawurl))
|
||||
hash = hex.EncodeToString(b[:])
|
||||
jsonFilename = path.Join(c.directory, fmt.Sprintf("%s.json", hash))
|
||||
dataFilename = path.Join(c.directory, fmt.Sprintf("%s.data", hash))
|
||||
return
|
||||
}
|
||||
|
||||
// GetReader obtains a Reader for the specified rawurl. If a downloader
|
||||
// currently exists for the URL, a live reader is created and connected to it.
|
||||
// If the URL exists in the cache, it is read using the standard file API. If
|
||||
// not, a downloader and live reader are created.
|
||||
func (c *Cache) GetReader(rawurl string, maxAge time.Duration) (Reader, error) {
|
||||
hash, jsonFilename, dataFilename := c.getFilenames(rawurl)
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
d, ok := c.downloaders[hash]
|
||||
if !ok {
|
||||
_, err := os.Stat(jsonFilename)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
r, err := newDiskReader(jsonFilename, dataFilename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, _ := r.GetEntry()
|
||||
lastModified, _ := time.Parse(http.TimeFormat, e.LastModified)
|
||||
if e.Complete &&
|
||||
(maxAge == -1 ||
|
||||
lastModified.Before(time.Now().Add(maxAge))) {
|
||||
log.Println("[HIT]", rawurl)
|
||||
return r, nil
|
||||
}
|
||||
}
|
||||
d = newDownloader(rawurl, jsonFilename, dataFilename)
|
||||
go func() {
|
||||
d.WaitForDone()
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
delete(c.downloaders, hash)
|
||||
c.waitGroup.Done()
|
||||
}()
|
||||
c.downloaders[hash] = d
|
||||
c.waitGroup.Add(1)
|
||||
}
|
||||
log.Println("[MISS]", rawurl)
|
||||
return newLiveReader(d, dataFilename)
|
||||
}
|
||||
|
||||
// Insert adds an item into the cache.
|
||||
func (c *Cache) Insert(rawurl string, r io.Reader) error {
|
||||
_, jsonFilename, dataFilename := c.getFilenames(rawurl)
|
||||
f, err := os.Open(dataFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
n, err := io.Copy(f, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e := &Entry{
|
||||
URL: rawurl,
|
||||
Complete: true,
|
||||
ContentLength: strconv.FormatInt(n, 10),
|
||||
ContentType: mime.TypeByExtension(rawurl),
|
||||
LastModified: time.Now().Format(http.TimeFormat),
|
||||
}
|
||||
return e.Save(jsonFilename)
|
||||
}
|
||||
|
||||
// TODO: implement some form of "safe abort" for downloads so that the entire
|
||||
// application doesn't end up spinning its tires waiting for downloads to end.
|
||||
|
||||
// Close waits for all downloaders to complete before shutting down.
|
||||
func (c *Cache) Close() {
|
||||
c.waitGroup.Wait()
|
||||
}
|
43
vendor/github.com/nathan-osman/go-aptproxy/cache/diskreader.go
generated
vendored
Normal file
43
vendor/github.com/nathan-osman/go-aptproxy/cache/diskreader.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// diskReader reads a file from the cache on disk.
|
||||
type diskReader struct {
|
||||
entry *Entry
|
||||
file *os.File
|
||||
}
|
||||
|
||||
// newDiskReader creates a reader from the provided JSON and data filenames.
|
||||
// Failure to open either of these results in an immediate error.
|
||||
func newDiskReader(jsonFilename, dataFilename string) (*diskReader, error) {
|
||||
e := &Entry{}
|
||||
if err := e.Load(jsonFilename); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := os.Open(dataFilename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &diskReader{
|
||||
entry: e,
|
||||
file: f,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Read attempts to read as much data as possible into the provided buffer.
|
||||
func (d *diskReader) Read(p []byte) (int, error) {
|
||||
return d.file.Read(p)
|
||||
}
|
||||
|
||||
// Close attempts to close the data file.
|
||||
func (d *diskReader) Close() error {
|
||||
return d.file.Close()
|
||||
}
|
||||
|
||||
// GetEntry returns the Entry associated with the file.
|
||||
func (d *diskReader) GetEntry() (*Entry, error) {
|
||||
return d.entry, nil
|
||||
}
|
105
vendor/github.com/nathan-osman/go-aptproxy/cache/downloader.go
generated
vendored
Normal file
105
vendor/github.com/nathan-osman/go-aptproxy/cache/downloader.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DownloadError conveys information about a download request that failed.
|
||||
type DownloadError struct {
|
||||
Status string
|
||||
}
|
||||
|
||||
// Error returns a description of the error.
|
||||
func (d *DownloadError) Error() string {
|
||||
return d.Status
|
||||
}
|
||||
|
||||
// downloader attempts to download a file from a remote URL.
|
||||
type downloader struct {
|
||||
doneMutex sync.Mutex
|
||||
err error
|
||||
entry *Entry
|
||||
entryMutex sync.Mutex
|
||||
}
|
||||
|
||||
// newDownloader creates a new downloader.
|
||||
func newDownloader(rawurl, jsonFilename, dataFilename string) *downloader {
|
||||
d := &downloader{}
|
||||
d.doneMutex.Lock()
|
||||
d.entryMutex.Lock()
|
||||
go func() {
|
||||
defer func() {
|
||||
d.doneMutex.Unlock()
|
||||
}()
|
||||
once := &sync.Once{}
|
||||
trigger := func() {
|
||||
once.Do(func() {
|
||||
d.entryMutex.Unlock()
|
||||
})
|
||||
}
|
||||
defer trigger()
|
||||
resp, err := http.Get(rawurl)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
d.err = &DownloadError{
|
||||
Status: resp.Status,
|
||||
}
|
||||
return
|
||||
}
|
||||
f, err := os.Create(dataFilename)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
d.entry = &Entry{
|
||||
URL: rawurl,
|
||||
ContentLength: strconv.FormatInt(resp.ContentLength, 10),
|
||||
ContentType: resp.Header.Get("Content-Type"),
|
||||
LastModified: resp.Header.Get("Last-Modified"),
|
||||
}
|
||||
if d.entry.ContentType == "" {
|
||||
d.entry.ContentType = "application/octet-stream"
|
||||
}
|
||||
if d.entry.LastModified == "" {
|
||||
d.entry.LastModified = time.Now().Format(http.TimeFormat)
|
||||
}
|
||||
if err = d.entry.Save(jsonFilename); err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
trigger()
|
||||
n, err := io.Copy(f, resp.Body)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
d.entry.ContentLength = strconv.FormatInt(n, 10)
|
||||
d.entry.Complete = true
|
||||
d.entry.Save(jsonFilename)
|
||||
}()
|
||||
return d
|
||||
}
|
||||
|
||||
// GetEntry retrieves the entry associated with the download.
|
||||
func (d *downloader) GetEntry() (*Entry, error) {
|
||||
d.entryMutex.Lock()
|
||||
defer d.entryMutex.Unlock()
|
||||
return d.entry, d.err
|
||||
}
|
||||
|
||||
// WaitForDone will block until the download completes.
|
||||
func (d *downloader) WaitForDone() error {
|
||||
d.doneMutex.Lock()
|
||||
defer d.doneMutex.Unlock()
|
||||
return d.err
|
||||
}
|
35
vendor/github.com/nathan-osman/go-aptproxy/cache/entry.go
generated
vendored
Normal file
35
vendor/github.com/nathan-osman/go-aptproxy/cache/entry.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Entry represents an individual item in the cache.
|
||||
type Entry struct {
|
||||
URL string `json:"url"`
|
||||
Complete bool `json:"complete"`
|
||||
ContentLength string `json:"content_length"`
|
||||
ContentType string `json:"content_type"`
|
||||
LastModified string `json:"last_modified"`
|
||||
}
|
||||
|
||||
// Load reads the entry from disk.
|
||||
func (e *Entry) Load(filename string) error {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
return json.NewDecoder(f).Decode(e)
|
||||
}
|
||||
|
||||
// Save writes the entry to disk.
|
||||
func (e *Entry) Save(filename string) error {
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
return json.NewEncoder(f).Encode(e)
|
||||
}
|
102
vendor/github.com/nathan-osman/go-aptproxy/cache/livereader.go
generated
vendored
Normal file
102
vendor/github.com/nathan-osman/go-aptproxy/cache/livereader.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"github.com/fsnotify/fsnotify"
|
||||
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// liveReader reads a file from disk, synchronizing reads with a downloader.
|
||||
type liveReader struct {
|
||||
downloader *downloader
|
||||
dataFilename string
|
||||
file *os.File
|
||||
entry *Entry
|
||||
done chan error
|
||||
err error
|
||||
eof bool
|
||||
}
|
||||
|
||||
// newLiveReader creates a reader from the provided downloader and data
|
||||
// file. fsnotify is used to watch for writes to the file to avoid using a
|
||||
// spinloop. Invoking this function assumes the existence of the data file.
|
||||
func newLiveReader(d *downloader, dataFilename string) (*liveReader, error) {
|
||||
l := &liveReader{
|
||||
downloader: d,
|
||||
dataFilename: dataFilename,
|
||||
done: make(chan error),
|
||||
}
|
||||
go func() {
|
||||
defer close(l.done)
|
||||
l.done <- d.WaitForDone()
|
||||
}()
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Read attempts to read as much data as possible into the provided buffer.
|
||||
// Since data is being downloaded as data is being read, fsnotify is used to
|
||||
// monitor writes to the file. This function blocks until the requested amount
|
||||
// of data is read, an error occurs, or EOF is encountered.
|
||||
func (l *liveReader) Read(p []byte) (int, error) {
|
||||
if l.err != nil {
|
||||
return 0, l.err
|
||||
}
|
||||
if l.file == nil {
|
||||
f, err := os.Open(l.dataFilename)
|
||||
if err != nil {
|
||||
l.err = err
|
||||
return 0, l.err
|
||||
}
|
||||
l.file = f
|
||||
}
|
||||
bytesRead := 0
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
l.err = err
|
||||
return 0, l.err
|
||||
}
|
||||
defer watcher.Close()
|
||||
if err := watcher.Add(l.dataFilename); err != nil {
|
||||
l.err = err
|
||||
return 0, l.err
|
||||
}
|
||||
loop:
|
||||
for bytesRead < len(p) {
|
||||
n, err := l.file.Read(p[bytesRead:])
|
||||
bytesRead += n
|
||||
if err != nil {
|
||||
if err != io.EOF || l.eof {
|
||||
l.err = err
|
||||
break loop
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case e := <-watcher.Events:
|
||||
if e.Op&fsnotify.Write != fsnotify.Write {
|
||||
continue
|
||||
}
|
||||
case err = <-l.done:
|
||||
l.err = err
|
||||
l.eof = true
|
||||
}
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
}
|
||||
return bytesRead, l.err
|
||||
}
|
||||
|
||||
// Close attempts to close the data file (if opened).
|
||||
func (l *liveReader) Close() error {
|
||||
if l.file != nil {
|
||||
return l.file.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetEntry returns the Entry associated with the file, blocking until either
|
||||
// the data is available or an error occurs.
|
||||
func (l *liveReader) GetEntry() (*Entry, error) {
|
||||
return l.downloader.GetEntry()
|
||||
}
|
202
vendor/github.com/pquerna/cachecontrol/LICENSE
generated
vendored
Normal file
202
vendor/github.com/pquerna/cachecontrol/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
547
vendor/github.com/pquerna/cachecontrol/cacheobject/directive.go
generated
vendored
Normal file
547
vendor/github.com/pquerna/cachecontrol/cacheobject/directive.go
generated
vendored
Normal file
@ -0,0 +1,547 @@
|
||||
/**
|
||||
* Copyright 2015 Paul Querna
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package cacheobject
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TODO(pquerna): add extensions from here: http://www.iana.org/assignments/http-cache-directives/http-cache-directives.xhtml
|
||||
|
||||
var (
|
||||
ErrQuoteMismatch = errors.New("Missing closing quote")
|
||||
ErrMaxAgeDeltaSeconds = errors.New("Failed to parse delta-seconds in `max-age`")
|
||||
ErrSMaxAgeDeltaSeconds = errors.New("Failed to parse delta-seconds in `s-maxage`")
|
||||
ErrMaxStaleDeltaSeconds = errors.New("Failed to parse delta-seconds in `max-stale`")
|
||||
ErrMinFreshDeltaSeconds = errors.New("Failed to parse delta-seconds in `min-fresh`")
|
||||
ErrNoCacheNoArgs = errors.New("Unexpected argument to `no-cache`")
|
||||
ErrNoStoreNoArgs = errors.New("Unexpected argument to `no-store`")
|
||||
ErrNoTransformNoArgs = errors.New("Unexpected argument to `no-transform`")
|
||||
ErrOnlyIfCachedNoArgs = errors.New("Unexpected argument to `only-if-cached`")
|
||||
ErrMustRevalidateNoArgs = errors.New("Unexpected argument to `must-revalidate`")
|
||||
ErrPublicNoArgs = errors.New("Unexpected argument to `public`")
|
||||
ErrProxyRevalidateNoArgs = errors.New("Unexpected argument to `proxy-revalidate`")
|
||||
// Experimental
|
||||
ErrImmutableNoArgs = errors.New("Unexpected argument to `immutable`")
|
||||
ErrStaleIfErrorDeltaSeconds = errors.New("Failed to parse delta-seconds in `stale-if-error`")
|
||||
ErrStaleWhileRevalidateDeltaSeconds = errors.New("Failed to parse delta-seconds in `stale-while-revalidate`")
|
||||
)
|
||||
|
||||
func whitespace(b byte) bool {
|
||||
if b == '\t' || b == ' ' {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func parse(value string, cd cacheDirective) error {
|
||||
var err error = nil
|
||||
i := 0
|
||||
|
||||
for i < len(value) && err == nil {
|
||||
// eat leading whitespace or commas
|
||||
if whitespace(value[i]) || value[i] == ',' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
j := i + 1
|
||||
|
||||
for j < len(value) {
|
||||
if !isToken(value[j]) {
|
||||
break
|
||||
}
|
||||
j++
|
||||
}
|
||||
|
||||
token := strings.ToLower(value[i:j])
|
||||
tokenHasFields := hasFieldNames(token)
|
||||
/*
|
||||
println("GOT TOKEN:")
|
||||
println(" i -> ", i)
|
||||
println(" j -> ", j)
|
||||
println(" token -> ", token)
|
||||
*/
|
||||
|
||||
if j+1 < len(value) && value[j] == '=' {
|
||||
k := j + 1
|
||||
// minimum size two bytes of "", but we let httpUnquote handle it.
|
||||
if k < len(value) && value[k] == '"' {
|
||||
eaten, result := httpUnquote(value[k:])
|
||||
if eaten == -1 {
|
||||
return ErrQuoteMismatch
|
||||
}
|
||||
i = k + eaten
|
||||
|
||||
err = cd.addPair(token, result)
|
||||
} else {
|
||||
z := k
|
||||
for z < len(value) {
|
||||
if tokenHasFields {
|
||||
if whitespace(value[z]) {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if whitespace(value[z]) || value[z] == ',' {
|
||||
break
|
||||
}
|
||||
}
|
||||
z++
|
||||
}
|
||||
i = z
|
||||
|
||||
result := value[k:z]
|
||||
if result != "" && result[len(result)-1] == ',' {
|
||||
result = result[:len(result)-1]
|
||||
}
|
||||
|
||||
err = cd.addPair(token, result)
|
||||
}
|
||||
} else {
|
||||
if token != "," {
|
||||
err = cd.addToken(token)
|
||||
}
|
||||
i = j
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeltaSeconds specifies a non-negative integer, representing
|
||||
// time in seconds: http://tools.ietf.org/html/rfc7234#section-1.2.1
|
||||
//
|
||||
// When set to -1, this means unset.
|
||||
//
|
||||
type DeltaSeconds int32
|
||||
|
||||
// Parser for delta-seconds, a uint31, more or less:
|
||||
// http://tools.ietf.org/html/rfc7234#section-1.2.1
|
||||
func parseDeltaSeconds(v string) (DeltaSeconds, error) {
|
||||
n, err := strconv.ParseUint(v, 10, 32)
|
||||
if err != nil {
|
||||
if numError, ok := err.(*strconv.NumError); ok {
|
||||
if numError.Err == strconv.ErrRange {
|
||||
return DeltaSeconds(math.MaxInt32), nil
|
||||
}
|
||||
}
|
||||
return DeltaSeconds(-1), err
|
||||
} else {
|
||||
if n > math.MaxInt32 {
|
||||
return DeltaSeconds(math.MaxInt32), nil
|
||||
} else {
|
||||
return DeltaSeconds(n), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fields present in a header.
|
||||
type FieldNames map[string]bool
|
||||
|
||||
// internal interface for shared methods of RequestCacheDirectives and ResponseCacheDirectives
|
||||
type cacheDirective interface {
|
||||
addToken(s string) error
|
||||
addPair(s string, v string) error
|
||||
}
|
||||
|
||||
// LOW LEVEL API: Representation of possible request directives in a `Cache-Control` header: http://tools.ietf.org/html/rfc7234#section-5.2.1
|
||||
//
|
||||
// Note: Many fields will be `nil` in practice.
|
||||
//
|
||||
type RequestCacheDirectives struct {
|
||||
|
||||
// max-age(delta seconds): http://tools.ietf.org/html/rfc7234#section-5.2.1.1
|
||||
//
|
||||
// The "max-age" request directive indicates that the client is
|
||||
// unwilling to accept a response whose age is greater than the
|
||||
// specified number of seconds. Unless the max-stale request directive
|
||||
// is also present, the client is not willing to accept a stale
|
||||
// response.
|
||||
MaxAge DeltaSeconds
|
||||
|
||||
// max-stale(delta seconds): http://tools.ietf.org/html/rfc7234#section-5.2.1.2
|
||||
//
|
||||
// The "max-stale" request directive indicates that the client is
|
||||
// willing to accept a response that has exceeded its freshness
|
||||
// lifetime. If max-stale is assigned a value, then the client is
|
||||
// willing to accept a response that has exceeded its freshness lifetime
|
||||
// by no more than the specified number of seconds. If no value is
|
||||
// assigned to max-stale, then the client is willing to accept a stale
|
||||
// response of any age.
|
||||
MaxStale DeltaSeconds
|
||||
MaxStaleSet bool
|
||||
|
||||
// min-fresh(delta seconds): http://tools.ietf.org/html/rfc7234#section-5.2.1.3
|
||||
//
|
||||
// The "min-fresh" request directive indicates that the client is
|
||||
// willing to accept a response whose freshness lifetime is no less than
|
||||
// its current age plus the specified time in seconds. That is, the
|
||||
// client wants a response that will still be fresh for at least the
|
||||
// specified number of seconds.
|
||||
MinFresh DeltaSeconds
|
||||
|
||||
// no-cache(bool): http://tools.ietf.org/html/rfc7234#section-5.2.1.4
|
||||
//
|
||||
// The "no-cache" request directive indicates that a cache MUST NOT use
|
||||
// a stored response to satisfy the request without successful
|
||||
// validation on the origin server.
|
||||
NoCache bool
|
||||
|
||||
// no-store(bool): http://tools.ietf.org/html/rfc7234#section-5.2.1.5
|
||||
//
|
||||
// The "no-store" request directive indicates that a cache MUST NOT
|
||||
// store any part of either this request or any response to it. This
|
||||
// directive applies to both private and shared caches.
|
||||
NoStore bool
|
||||
|
||||
// no-transform(bool): http://tools.ietf.org/html/rfc7234#section-5.2.1.6
|
||||
//
|
||||
// The "no-transform" request directive indicates that an intermediary
|
||||
// (whether or not it implements a cache) MUST NOT transform the
|
||||
// payload, as defined in Section 5.7.2 of RFC7230.
|
||||
NoTransform bool
|
||||
|
||||
// only-if-cached(bool): http://tools.ietf.org/html/rfc7234#section-5.2.1.7
|
||||
//
|
||||
// The "only-if-cached" request directive indicates that the client only
|
||||
// wishes to obtain a stored response.
|
||||
OnlyIfCached bool
|
||||
|
||||
// Extensions: http://tools.ietf.org/html/rfc7234#section-5.2.3
|
||||
//
|
||||
// The Cache-Control header field can be extended through the use of one
|
||||
// or more cache-extension tokens, each with an optional value. A cache
|
||||
// MUST ignore unrecognized cache directives.
|
||||
Extensions []string
|
||||
}
|
||||
|
||||
func (cd *RequestCacheDirectives) addToken(token string) error {
|
||||
var err error = nil
|
||||
|
||||
switch token {
|
||||
case "max-age":
|
||||
err = ErrMaxAgeDeltaSeconds
|
||||
case "min-fresh":
|
||||
err = ErrMinFreshDeltaSeconds
|
||||
case "max-stale":
|
||||
cd.MaxStaleSet = true
|
||||
case "no-cache":
|
||||
cd.NoCache = true
|
||||
case "no-store":
|
||||
cd.NoStore = true
|
||||
case "no-transform":
|
||||
cd.NoTransform = true
|
||||
case "only-if-cached":
|
||||
cd.OnlyIfCached = true
|
||||
default:
|
||||
cd.Extensions = append(cd.Extensions, token)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (cd *RequestCacheDirectives) addPair(token string, v string) error {
|
||||
var err error = nil
|
||||
|
||||
switch token {
|
||||
case "max-age":
|
||||
cd.MaxAge, err = parseDeltaSeconds(v)
|
||||
if err != nil {
|
||||
err = ErrMaxAgeDeltaSeconds
|
||||
}
|
||||
case "max-stale":
|
||||
cd.MaxStale, err = parseDeltaSeconds(v)
|
||||
if err != nil {
|
||||
err = ErrMaxStaleDeltaSeconds
|
||||
}
|
||||
case "min-fresh":
|
||||
cd.MinFresh, err = parseDeltaSeconds(v)
|
||||
if err != nil {
|
||||
err = ErrMinFreshDeltaSeconds
|
||||
}
|
||||
case "no-cache":
|
||||
err = ErrNoCacheNoArgs
|
||||
case "no-store":
|
||||
err = ErrNoStoreNoArgs
|
||||
case "no-transform":
|
||||
err = ErrNoTransformNoArgs
|
||||
case "only-if-cached":
|
||||
err = ErrOnlyIfCachedNoArgs
|
||||
default:
|
||||
// TODO(pquerna): this sucks, making user re-parse
|
||||
cd.Extensions = append(cd.Extensions, token+"="+v)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// LOW LEVEL API: Parses a Cache Control Header from a Request into a set of directives.
|
||||
func ParseRequestCacheControl(value string) (*RequestCacheDirectives, error) {
|
||||
cd := &RequestCacheDirectives{
|
||||
MaxAge: -1,
|
||||
MaxStale: -1,
|
||||
MinFresh: -1,
|
||||
}
|
||||
|
||||
err := parse(value, cd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cd, nil
|
||||
}
|
||||
|
||||
// LOW LEVEL API: Repersentation of possible response directives in a `Cache-Control` header: http://tools.ietf.org/html/rfc7234#section-5.2.2
|
||||
//
|
||||
// Note: Many fields will be `nil` in practice.
|
||||
//
|
||||
type ResponseCacheDirectives struct {
|
||||
|
||||
// must-revalidate(bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.1
|
||||
//
|
||||
// The "must-revalidate" response directive indicates that once it has
|
||||
// become stale, a cache MUST NOT use the response to satisfy subsequent
|
||||
// requests without successful validation on the origin server.
|
||||
MustRevalidate bool
|
||||
|
||||
// no-cache(FieldName): http://tools.ietf.org/html/rfc7234#section-5.2.2.2
|
||||
//
|
||||
// The "no-cache" response directive indicates that the response MUST
|
||||
// NOT be used to satisfy a subsequent request without successful
|
||||
// validation on the origin server.
|
||||
//
|
||||
// If the no-cache response directive specifies one or more field-names,
|
||||
// then a cache MAY use the response to satisfy a subsequent request,
|
||||
// subject to any other restrictions on caching. However, any header
|
||||
// fields in the response that have the field-name(s) listed MUST NOT be
|
||||
// sent in the response to a subsequent request without successful
|
||||
// revalidation with the origin server.
|
||||
NoCache FieldNames
|
||||
|
||||
// no-cache(cast-to-bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.2
|
||||
//
|
||||
// While the RFC defines optional field-names on a no-cache directive,
|
||||
// many applications only want to know if any no-cache directives were
|
||||
// present at all.
|
||||
NoCachePresent bool
|
||||
|
||||
// no-store(bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.3
|
||||
//
|
||||
// The "no-store" request directive indicates that a cache MUST NOT
|
||||
// store any part of either this request or any response to it. This
|
||||
// directive applies to both private and shared caches.
|
||||
NoStore bool
|
||||
|
||||
// no-transform(bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.4
|
||||
//
|
||||
// The "no-transform" response directive indicates that an intermediary
|
||||
// (regardless of whether it implements a cache) MUST NOT transform the
|
||||
// payload, as defined in Section 5.7.2 of RFC7230.
|
||||
NoTransform bool
|
||||
|
||||
// public(bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.5
|
||||
//
|
||||
// The "public" response directive indicates that any cache MAY store
|
||||
// the response, even if the response would normally be non-cacheable or
|
||||
// cacheable only within a private cache.
|
||||
Public bool
|
||||
|
||||
// private(FieldName): http://tools.ietf.org/html/rfc7234#section-5.2.2.6
|
||||
//
|
||||
// The "private" response directive indicates that the response message
|
||||
// is intended for a single user and MUST NOT be stored by a shared
|
||||
// cache. A private cache MAY store the response and reuse it for later
|
||||
// requests, even if the response would normally be non-cacheable.
|
||||
//
|
||||
// If the private response directive specifies one or more field-names,
|
||||
// this requirement is limited to the field-values associated with the
|
||||
// listed response header fields. That is, a shared cache MUST NOT
|
||||
// store the specified field-names(s), whereas it MAY store the
|
||||
// remainder of the response message.
|
||||
Private FieldNames
|
||||
|
||||
// private(cast-to-bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.6
|
||||
//
|
||||
// While the RFC defines optional field-names on a private directive,
|
||||
// many applications only want to know if any private directives were
|
||||
// present at all.
|
||||
PrivatePresent bool
|
||||
|
||||
// proxy-revalidate(bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.7
|
||||
//
|
||||
// The "proxy-revalidate" response directive has the same meaning as the
|
||||
// must-revalidate response directive, except that it does not apply to
|
||||
// private caches.
|
||||
ProxyRevalidate bool
|
||||
|
||||
// max-age(delta seconds): http://tools.ietf.org/html/rfc7234#section-5.2.2.8
|
||||
//
|
||||
// The "max-age" response directive indicates that the response is to be
|
||||
// considered stale after its age is greater than the specified number
|
||||
// of seconds.
|
||||
MaxAge DeltaSeconds
|
||||
|
||||
// s-maxage(delta seconds): http://tools.ietf.org/html/rfc7234#section-5.2.2.9
|
||||
//
|
||||
// The "s-maxage" response directive indicates that, in shared caches,
|
||||
// the maximum age specified by this directive overrides the maximum age
|
||||
// specified by either the max-age directive or the Expires header
|
||||
// field. The s-maxage directive also implies the semantics of the
|
||||
// proxy-revalidate response directive.
|
||||
SMaxAge DeltaSeconds
|
||||
|
||||
////
|
||||
// Experimental features
|
||||
// - https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control#Extension_Cache-Control_directives
|
||||
// - https://www.fastly.com/blog/stale-while-revalidate-stale-if-error-available-today
|
||||
////
|
||||
|
||||
// immutable(cast-to-bool): experimental feature
|
||||
Immutable bool
|
||||
|
||||
// stale-if-error(delta seconds): experimental feature
|
||||
StaleIfError DeltaSeconds
|
||||
|
||||
// stale-while-revalidate(delta seconds): experimental feature
|
||||
StaleWhileRevalidate DeltaSeconds
|
||||
|
||||
// Extensions: http://tools.ietf.org/html/rfc7234#section-5.2.3
|
||||
//
|
||||
// The Cache-Control header field can be extended through the use of one
|
||||
// or more cache-extension tokens, each with an optional value. A cache
|
||||
// MUST ignore unrecognized cache directives.
|
||||
Extensions []string
|
||||
}
|
||||
|
||||
// LOW LEVEL API: Parses a Cache Control Header from a Response into a set of directives.
|
||||
func ParseResponseCacheControl(value string) (*ResponseCacheDirectives, error) {
|
||||
cd := &ResponseCacheDirectives{
|
||||
MaxAge: -1,
|
||||
SMaxAge: -1,
|
||||
// Exerimantal stale timeouts
|
||||
StaleIfError: -1,
|
||||
StaleWhileRevalidate: -1,
|
||||
}
|
||||
|
||||
err := parse(value, cd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cd, nil
|
||||
}
|
||||
|
||||
func (cd *ResponseCacheDirectives) addToken(token string) error {
|
||||
var err error = nil
|
||||
switch token {
|
||||
case "must-revalidate":
|
||||
cd.MustRevalidate = true
|
||||
case "no-cache":
|
||||
cd.NoCachePresent = true
|
||||
case "no-store":
|
||||
cd.NoStore = true
|
||||
case "no-transform":
|
||||
cd.NoTransform = true
|
||||
case "public":
|
||||
cd.Public = true
|
||||
case "private":
|
||||
cd.PrivatePresent = true
|
||||
case "proxy-revalidate":
|
||||
cd.ProxyRevalidate = true
|
||||
case "max-age":
|
||||
err = ErrMaxAgeDeltaSeconds
|
||||
case "s-maxage":
|
||||
err = ErrSMaxAgeDeltaSeconds
|
||||
// Experimental
|
||||
case "immutable":
|
||||
cd.Immutable = true
|
||||
case "stale-if-error":
|
||||
err = ErrMaxAgeDeltaSeconds
|
||||
case "stale-while-revalidate":
|
||||
err = ErrMaxAgeDeltaSeconds
|
||||
default:
|
||||
cd.Extensions = append(cd.Extensions, token)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func hasFieldNames(token string) bool {
|
||||
switch token {
|
||||
case "no-cache":
|
||||
return true
|
||||
case "private":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (cd *ResponseCacheDirectives) addPair(token string, v string) error {
|
||||
var err error = nil
|
||||
|
||||
switch token {
|
||||
case "must-revalidate":
|
||||
err = ErrMustRevalidateNoArgs
|
||||
case "no-cache":
|
||||
cd.NoCachePresent = true
|
||||
tokens := strings.Split(v, ",")
|
||||
if cd.NoCache == nil {
|
||||
cd.NoCache = make(FieldNames)
|
||||
}
|
||||
for _, t := range tokens {
|
||||
k := http.CanonicalHeaderKey(textproto.TrimString(t))
|
||||
cd.NoCache[k] = true
|
||||
}
|
||||
case "no-store":
|
||||
err = ErrNoStoreNoArgs
|
||||
case "no-transform":
|
||||
err = ErrNoTransformNoArgs
|
||||
case "public":
|
||||
err = ErrPublicNoArgs
|
||||
case "private":
|
||||
cd.PrivatePresent = true
|
||||
tokens := strings.Split(v, ",")
|
||||
if cd.Private == nil {
|
||||
cd.Private = make(FieldNames)
|
||||
}
|
||||
for _, t := range tokens {
|
||||
k := http.CanonicalHeaderKey(textproto.TrimString(t))
|
||||
cd.Private[k] = true
|
||||
}
|
||||
case "proxy-revalidate":
|
||||
err = ErrProxyRevalidateNoArgs
|
||||
case "max-age":
|
||||
cd.MaxAge, err = parseDeltaSeconds(v)
|
||||
case "s-maxage":
|
||||
cd.SMaxAge, err = parseDeltaSeconds(v)
|
||||
// Experimental
|
||||
case "immutable":
|
||||
err = ErrImmutableNoArgs
|
||||
case "stale-if-error":
|
||||
cd.StaleIfError, err = parseDeltaSeconds(v)
|
||||
case "stale-while-revalidate":
|
||||
cd.StaleWhileRevalidate, err = parseDeltaSeconds(v)
|
||||
default:
|
||||
// TODO(pquerna): this sucks, making user re-parse, and its technically not 'quoted' like the original,
|
||||
// but this is still easier, just a SplitN on "="
|
||||
cd.Extensions = append(cd.Extensions, token+"="+v)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
93
vendor/github.com/pquerna/cachecontrol/cacheobject/lex.go
generated
vendored
Normal file
93
vendor/github.com/pquerna/cachecontrol/cacheobject/lex.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cacheobject
|
||||
|
||||
// This file deals with lexical matters of HTTP
|
||||
|
||||
func isSeparator(c byte) bool {
|
||||
switch c {
|
||||
case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isCtl(c byte) bool { return (0 <= c && c <= 31) || c == 127 }
|
||||
|
||||
func isChar(c byte) bool { return 0 <= c && c <= 127 }
|
||||
|
||||
func isAnyText(c byte) bool { return !isCtl(c) }
|
||||
|
||||
func isQdText(c byte) bool { return isAnyText(c) && c != '"' }
|
||||
|
||||
func isToken(c byte) bool { return isChar(c) && !isCtl(c) && !isSeparator(c) }
|
||||
|
||||
// Valid escaped sequences are not specified in RFC 2616, so for now, we assume
|
||||
// that they coincide with the common sense ones used by GO. Malformed
|
||||
// characters should probably not be treated as errors by a robust (forgiving)
|
||||
// parser, so we replace them with the '?' character.
|
||||
func httpUnquotePair(b byte) byte {
|
||||
// skip the first byte, which should always be '\'
|
||||
switch b {
|
||||
case 'a':
|
||||
return '\a'
|
||||
case 'b':
|
||||
return '\b'
|
||||
case 'f':
|
||||
return '\f'
|
||||
case 'n':
|
||||
return '\n'
|
||||
case 'r':
|
||||
return '\r'
|
||||
case 't':
|
||||
return '\t'
|
||||
case 'v':
|
||||
return '\v'
|
||||
case '\\':
|
||||
return '\\'
|
||||
case '\'':
|
||||
return '\''
|
||||
case '"':
|
||||
return '"'
|
||||
}
|
||||
return '?'
|
||||
}
|
||||
|
||||
// raw must begin with a valid quoted string. Only the first quoted string is
|
||||
// parsed and is unquoted in result. eaten is the number of bytes parsed, or -1
|
||||
// upon failure.
|
||||
func httpUnquote(raw string) (eaten int, result string) {
|
||||
buf := make([]byte, len(raw))
|
||||
if raw[0] != '"' {
|
||||
return -1, ""
|
||||
}
|
||||
eaten = 1
|
||||
j := 0 // # of bytes written in buf
|
||||
for i := 1; i < len(raw); i++ {
|
||||
switch b := raw[i]; b {
|
||||
case '"':
|
||||
eaten++
|
||||
buf = buf[0:j]
|
||||
return i + 1, string(buf)
|
||||
case '\\':
|
||||
if len(raw) < i+2 {
|
||||
return -1, ""
|
||||
}
|
||||
buf[j] = httpUnquotePair(raw[i+1])
|
||||
eaten += 2
|
||||
j++
|
||||
i++
|
||||
default:
|
||||
if isQdText(b) {
|
||||
buf[j] = b
|
||||
} else {
|
||||
buf[j] = '?'
|
||||
}
|
||||
eaten++
|
||||
j++
|
||||
}
|
||||
}
|
||||
return -1, ""
|
||||
}
|
398
vendor/github.com/pquerna/cachecontrol/cacheobject/object.go
generated
vendored
Normal file
398
vendor/github.com/pquerna/cachecontrol/cacheobject/object.go
generated
vendored
Normal file
@ -0,0 +1,398 @@
|
||||
/**
|
||||
* Copyright 2015 Paul Querna
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package cacheobject
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LOW LEVEL API: Represents a potentially cachable HTTP object.
|
||||
//
|
||||
// This struct is designed to be serialized efficiently, so in a high
|
||||
// performance caching server, things like Date-Strings don't need to be
|
||||
// parsed for every use of a cached object.
|
||||
type Object struct {
|
||||
CacheIsPrivate bool
|
||||
|
||||
RespDirectives *ResponseCacheDirectives
|
||||
RespHeaders http.Header
|
||||
RespStatusCode int
|
||||
RespExpiresHeader time.Time
|
||||
RespDateHeader time.Time
|
||||
RespLastModifiedHeader time.Time
|
||||
|
||||
ReqDirectives *RequestCacheDirectives
|
||||
ReqHeaders http.Header
|
||||
ReqMethod string
|
||||
|
||||
NowUTC time.Time
|
||||
}
|
||||
|
||||
// LOW LEVEL API: Represents the results of examining an Object with
|
||||
// CachableObject and ExpirationObject.
|
||||
//
|
||||
// TODO(pquerna): decide if this is a good idea or bad
|
||||
type ObjectResults struct {
|
||||
OutReasons []Reason
|
||||
OutWarnings []Warning
|
||||
OutExpirationTime time.Time
|
||||
OutErr error
|
||||
}
|
||||
|
||||
// LOW LEVEL API: Check if a request is cacheable.
|
||||
// This function doesn't reset the passed ObjectResults.
|
||||
func CachableRequestObject(obj *Object, rv *ObjectResults) {
|
||||
switch obj.ReqMethod {
|
||||
case "GET":
|
||||
break
|
||||
case "HEAD":
|
||||
break
|
||||
case "POST":
|
||||
// Responses to POST requests can be cacheable if they include explicit freshness information
|
||||
break
|
||||
|
||||
case "PUT":
|
||||
rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodPUT)
|
||||
|
||||
case "DELETE":
|
||||
rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodDELETE)
|
||||
|
||||
case "CONNECT":
|
||||
rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodCONNECT)
|
||||
|
||||
case "OPTIONS":
|
||||
rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodOPTIONS)
|
||||
|
||||
case "TRACE":
|
||||
rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodTRACE)
|
||||
|
||||
// HTTP Extension Methods: http://www.iana.org/assignments/http-methods/http-methods.xhtml
|
||||
//
|
||||
// To my knowledge, none of them are cachable. Please open a ticket if this is not the case!
|
||||
//
|
||||
default:
|
||||
rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodUnknown)
|
||||
}
|
||||
|
||||
if obj.ReqDirectives != nil && obj.ReqDirectives.NoStore {
|
||||
rv.OutReasons = append(rv.OutReasons, ReasonRequestNoStore)
|
||||
}
|
||||
}
|
||||
|
||||
// LOW LEVEL API: Check if a response is cacheable.
|
||||
// This function doesn't reset the passed ObjectResults.
|
||||
func CachableResponseObject(obj *Object, rv *ObjectResults) {
|
||||
/**
|
||||
POST: http://tools.ietf.org/html/rfc7231#section-4.3.3
|
||||
|
||||
Responses to POST requests are only cacheable when they include
|
||||
explicit freshness information (see Section 4.2.1 of [RFC7234]).
|
||||
However, POST caching is not widely implemented. For cases where an
|
||||
origin server wishes the client to be able to cache the result of a
|
||||
POST in a way that can be reused by a later GET, the origin server
|
||||
MAY send a 200 (OK) response containing the result and a
|
||||
Content-Location header field that has the same value as the POST's
|
||||
effective request URI (Section 3.1.4.2).
|
||||
*/
|
||||
if obj.ReqMethod == http.MethodPost && !hasFreshness(obj.RespDirectives, obj.RespHeaders, obj.RespExpiresHeader, obj.CacheIsPrivate) {
|
||||
rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodPOST)
|
||||
}
|
||||
|
||||
// Storing Responses to Authenticated Requests: http://tools.ietf.org/html/rfc7234#section-3.2
|
||||
if obj.ReqHeaders.Get("Authorization") != "" {
|
||||
if obj.RespDirectives.MustRevalidate ||
|
||||
obj.RespDirectives.Public ||
|
||||
obj.RespDirectives.SMaxAge != -1 {
|
||||
// Expires of some kind present, this is potentially OK.
|
||||
} else {
|
||||
rv.OutReasons = append(rv.OutReasons, ReasonRequestAuthorizationHeader)
|
||||
}
|
||||
}
|
||||
|
||||
if obj.RespDirectives.PrivatePresent && !obj.CacheIsPrivate {
|
||||
rv.OutReasons = append(rv.OutReasons, ReasonResponsePrivate)
|
||||
}
|
||||
|
||||
if obj.RespDirectives.NoStore {
|
||||
rv.OutReasons = append(rv.OutReasons, ReasonResponseNoStore)
|
||||
}
|
||||
|
||||
/*
|
||||
the response either:
|
||||
|
||||
* contains an Expires header field (see Section 5.3), or
|
||||
|
||||
* contains a max-age response directive (see Section 5.2.2.8), or
|
||||
|
||||
* contains a s-maxage response directive (see Section 5.2.2.9)
|
||||
and the cache is shared, or
|
||||
|
||||
* contains a Cache Control Extension (see Section 5.2.3) that
|
||||
allows it to be cached, or
|
||||
|
||||
* has a status code that is defined as cacheable by default (see
|
||||
Section 4.2.2), or
|
||||
|
||||
* contains a public response directive (see Section 5.2.2.5).
|
||||
*/
|
||||
|
||||
if obj.RespHeaders.Get("Expires") != "" ||
|
||||
obj.RespDirectives.MaxAge != -1 ||
|
||||
(obj.RespDirectives.SMaxAge != -1 && !obj.CacheIsPrivate) ||
|
||||
cachableStatusCode(obj.RespStatusCode) ||
|
||||
obj.RespDirectives.Public {
|
||||
/* cachable by default, at least one of the above conditions was true */
|
||||
return
|
||||
}
|
||||
|
||||
rv.OutReasons = append(rv.OutReasons, ReasonResponseUncachableByDefault)
|
||||
}
|
||||
|
||||
// LOW LEVEL API: Check if a object is cachable.
|
||||
func CachableObject(obj *Object, rv *ObjectResults) {
|
||||
rv.OutReasons = nil
|
||||
rv.OutWarnings = nil
|
||||
rv.OutErr = nil
|
||||
|
||||
CachableRequestObject(obj, rv)
|
||||
CachableResponseObject(obj, rv)
|
||||
}
|
||||
|
||||
var twentyFourHours = time.Duration(24 * time.Hour)
|
||||
|
||||
const debug = false
|
||||
|
||||
// LOW LEVEL API: Update an objects expiration time.
|
||||
func ExpirationObject(obj *Object, rv *ObjectResults) {
|
||||
/**
|
||||
* Okay, lets calculate Freshness/Expiration now. woo:
|
||||
* http://tools.ietf.org/html/rfc7234#section-4.2
|
||||
*/
|
||||
|
||||
/*
|
||||
o If the cache is shared and the s-maxage response directive
|
||||
(Section 5.2.2.9) is present, use its value, or
|
||||
|
||||
o If the max-age response directive (Section 5.2.2.8) is present,
|
||||
use its value, or
|
||||
|
||||
o If the Expires response header field (Section 5.3) is present, use
|
||||
its value minus the value of the Date response header field, or
|
||||
|
||||
o Otherwise, no explicit expiration time is present in the response.
|
||||
A heuristic freshness lifetime might be applicable; see
|
||||
Section 4.2.2.
|
||||
*/
|
||||
|
||||
var expiresTime time.Time
|
||||
|
||||
if obj.RespDirectives.SMaxAge != -1 && !obj.CacheIsPrivate {
|
||||
expiresTime = obj.NowUTC.Add(time.Second * time.Duration(obj.RespDirectives.SMaxAge))
|
||||
} else if obj.RespDirectives.MaxAge != -1 {
|
||||
expiresTime = obj.NowUTC.UTC().Add(time.Second * time.Duration(obj.RespDirectives.MaxAge))
|
||||
} else if !obj.RespExpiresHeader.IsZero() {
|
||||
serverDate := obj.RespDateHeader
|
||||
if serverDate.IsZero() {
|
||||
// common enough case when a Date: header has not yet been added to an
|
||||
// active response.
|
||||
serverDate = obj.NowUTC
|
||||
}
|
||||
expiresTime = obj.NowUTC.Add(obj.RespExpiresHeader.Sub(serverDate))
|
||||
} else if !obj.RespLastModifiedHeader.IsZero() {
|
||||
// heuristic freshness lifetime
|
||||
rv.OutWarnings = append(rv.OutWarnings, WarningHeuristicExpiration)
|
||||
|
||||
// http://httpd.apache.org/docs/2.4/mod/mod_cache.html#cachelastmodifiedfactor
|
||||
// CacheMaxExpire defaults to 24 hours
|
||||
// CacheLastModifiedFactor: is 0.1
|
||||
//
|
||||
// expiry-period = MIN(time-since-last-modified-date * factor, 24 hours)
|
||||
//
|
||||
// obj.NowUTC
|
||||
|
||||
since := obj.RespLastModifiedHeader.Sub(obj.NowUTC)
|
||||
since = time.Duration(float64(since) * -0.1)
|
||||
|
||||
if since > twentyFourHours {
|
||||
expiresTime = obj.NowUTC.Add(twentyFourHours)
|
||||
} else {
|
||||
expiresTime = obj.NowUTC.Add(since)
|
||||
}
|
||||
|
||||
if debug {
|
||||
println("Now UTC: ", obj.NowUTC.String())
|
||||
println("Last-Modified: ", obj.RespLastModifiedHeader.String())
|
||||
println("Since: ", since.String())
|
||||
println("TwentyFourHours: ", twentyFourHours.String())
|
||||
println("Expiration: ", expiresTime.String())
|
||||
}
|
||||
} else {
|
||||
// TODO(pquerna): what should the default behavior be for expiration time?
|
||||
}
|
||||
|
||||
rv.OutExpirationTime = expiresTime
|
||||
}
|
||||
|
||||
// Evaluate cachability based on an HTTP request, and parts of the response.
|
||||
func UsingRequestResponse(req *http.Request,
|
||||
statusCode int,
|
||||
respHeaders http.Header,
|
||||
privateCache bool) ([]Reason, time.Time, error) {
|
||||
reasons, time, _, _, err := UsingRequestResponseWithObject(req, statusCode, respHeaders, privateCache)
|
||||
return reasons, time, err
|
||||
}
|
||||
|
||||
// Evaluate cachability based on an HTTP request, and parts of the response.
|
||||
// Returns the parsed Object as well.
|
||||
func UsingRequestResponseWithObject(req *http.Request,
|
||||
statusCode int,
|
||||
respHeaders http.Header,
|
||||
privateCache bool) ([]Reason, time.Time, []Warning, *Object, error) {
|
||||
var reqHeaders http.Header
|
||||
var reqMethod string
|
||||
|
||||
var reqDir *RequestCacheDirectives = nil
|
||||
respDir, err := ParseResponseCacheControl(respHeaders.Get("Cache-Control"))
|
||||
if err != nil {
|
||||
return nil, time.Time{}, nil, nil, err
|
||||
}
|
||||
|
||||
if req != nil {
|
||||
reqDir, err = ParseRequestCacheControl(req.Header.Get("Cache-Control"))
|
||||
if err != nil {
|
||||
return nil, time.Time{}, nil, nil, err
|
||||
}
|
||||
reqHeaders = req.Header
|
||||
reqMethod = req.Method
|
||||
}
|
||||
|
||||
var expiresHeader time.Time
|
||||
var dateHeader time.Time
|
||||
var lastModifiedHeader time.Time
|
||||
|
||||
if respHeaders.Get("Expires") != "" {
|
||||
expiresHeader, err = http.ParseTime(respHeaders.Get("Expires"))
|
||||
if err != nil {
|
||||
// sometimes servers will return `Expires: 0` or `Expires: -1` to
|
||||
// indicate expired content
|
||||
expiresHeader = time.Time{}
|
||||
}
|
||||
expiresHeader = expiresHeader.UTC()
|
||||
}
|
||||
|
||||
if respHeaders.Get("Date") != "" {
|
||||
dateHeader, err = http.ParseTime(respHeaders.Get("Date"))
|
||||
if err != nil {
|
||||
return nil, time.Time{}, nil, nil, err
|
||||
}
|
||||
dateHeader = dateHeader.UTC()
|
||||
}
|
||||
|
||||
if respHeaders.Get("Last-Modified") != "" {
|
||||
lastModifiedHeader, err = http.ParseTime(respHeaders.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return nil, time.Time{}, nil, nil, err
|
||||
}
|
||||
lastModifiedHeader = lastModifiedHeader.UTC()
|
||||
}
|
||||
|
||||
obj := Object{
|
||||
CacheIsPrivate: privateCache,
|
||||
|
||||
RespDirectives: respDir,
|
||||
RespHeaders: respHeaders,
|
||||
RespStatusCode: statusCode,
|
||||
RespExpiresHeader: expiresHeader,
|
||||
RespDateHeader: dateHeader,
|
||||
RespLastModifiedHeader: lastModifiedHeader,
|
||||
|
||||
ReqDirectives: reqDir,
|
||||
ReqHeaders: reqHeaders,
|
||||
ReqMethod: reqMethod,
|
||||
|
||||
NowUTC: time.Now().UTC(),
|
||||
}
|
||||
rv := ObjectResults{}
|
||||
|
||||
CachableObject(&obj, &rv)
|
||||
if rv.OutErr != nil {
|
||||
return nil, time.Time{}, nil, nil, rv.OutErr
|
||||
}
|
||||
|
||||
ExpirationObject(&obj, &rv)
|
||||
if rv.OutErr != nil {
|
||||
return nil, time.Time{}, nil, nil, rv.OutErr
|
||||
}
|
||||
|
||||
return rv.OutReasons, rv.OutExpirationTime, rv.OutWarnings, &obj, nil
|
||||
}
|
||||
|
||||
// calculate if a freshness directive is present: http://tools.ietf.org/html/rfc7234#section-4.2.1
|
||||
func hasFreshness(respDir *ResponseCacheDirectives, respHeaders http.Header, respExpires time.Time, privateCache bool) bool {
|
||||
if !privateCache && respDir.SMaxAge != -1 {
|
||||
return true
|
||||
}
|
||||
|
||||
if respDir.MaxAge != -1 {
|
||||
return true
|
||||
}
|
||||
|
||||
if !respExpires.IsZero() || respHeaders.Get("Expires") != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func cachableStatusCode(statusCode int) bool {
|
||||
/*
|
||||
Responses with status codes that are defined as cacheable by default
|
||||
(e.g., 200, 203, 204, 206, 300, 301, 404, 405, 410, 414, and 501 in
|
||||
this specification) can be reused by a cache with heuristic
|
||||
expiration unless otherwise indicated by the method definition or
|
||||
explicit cache controls [RFC7234]; all other status codes are not
|
||||
cacheable by default.
|
||||
*/
|
||||
switch statusCode {
|
||||
case 200:
|
||||
return true
|
||||
case 203:
|
||||
return true
|
||||
case 204:
|
||||
return true
|
||||
case 206:
|
||||
return true
|
||||
case 300:
|
||||
return true
|
||||
case 301:
|
||||
return true
|
||||
case 404:
|
||||
return true
|
||||
case 405:
|
||||
return true
|
||||
case 410:
|
||||
return true
|
||||
case 414:
|
||||
return true
|
||||
case 501:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
95
vendor/github.com/pquerna/cachecontrol/cacheobject/reasons.go
generated
vendored
Normal file
95
vendor/github.com/pquerna/cachecontrol/cacheobject/reasons.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
/**
|
||||
* Copyright 2015 Paul Querna
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package cacheobject
|
||||
|
||||
// Repersents a potential Reason to not cache an object.
|
||||
//
|
||||
// Applications may wish to ignore specific reasons, which will make them non-RFC
|
||||
// compliant, but this type gives them specific cases they can choose to ignore,
|
||||
// making them compliant in as many cases as they can.
|
||||
type Reason int
|
||||
|
||||
const (
|
||||
|
||||
// The request method was POST and an Expiration header was not supplied.
|
||||
ReasonRequestMethodPOST Reason = iota
|
||||
|
||||
// The request method was PUT and PUTs are not cachable.
|
||||
ReasonRequestMethodPUT
|
||||
|
||||
// The request method was DELETE and DELETEs are not cachable.
|
||||
ReasonRequestMethodDELETE
|
||||
|
||||
// The request method was CONNECT and CONNECTs are not cachable.
|
||||
ReasonRequestMethodCONNECT
|
||||
|
||||
// The request method was OPTIONS and OPTIONS are not cachable.
|
||||
ReasonRequestMethodOPTIONS
|
||||
|
||||
// The request method was TRACE and TRACEs are not cachable.
|
||||
ReasonRequestMethodTRACE
|
||||
|
||||
// The request method was not recognized by cachecontrol, and should not be cached.
|
||||
ReasonRequestMethodUnknown
|
||||
|
||||
// The request included an Cache-Control: no-store header
|
||||
ReasonRequestNoStore
|
||||
|
||||
// The request included an Authorization header without an explicit Public or Expiration time: http://tools.ietf.org/html/rfc7234#section-3.2
|
||||
ReasonRequestAuthorizationHeader
|
||||
|
||||
// The response included an Cache-Control: no-store header
|
||||
ReasonResponseNoStore
|
||||
|
||||
// The response included an Cache-Control: private header and this is not a Private cache
|
||||
ReasonResponsePrivate
|
||||
|
||||
// The response failed to meet at least one of the conditions specified in RFC 7234 section 3: http://tools.ietf.org/html/rfc7234#section-3
|
||||
ReasonResponseUncachableByDefault
|
||||
)
|
||||
|
||||
func (r Reason) String() string {
|
||||
switch r {
|
||||
case ReasonRequestMethodPOST:
|
||||
return "ReasonRequestMethodPOST"
|
||||
case ReasonRequestMethodPUT:
|
||||
return "ReasonRequestMethodPUT"
|
||||
case ReasonRequestMethodDELETE:
|
||||
return "ReasonRequestMethodDELETE"
|
||||
case ReasonRequestMethodCONNECT:
|
||||
return "ReasonRequestMethodCONNECT"
|
||||
case ReasonRequestMethodOPTIONS:
|
||||
return "ReasonRequestMethodOPTIONS"
|
||||
case ReasonRequestMethodTRACE:
|
||||
return "ReasonRequestMethodTRACE"
|
||||
case ReasonRequestMethodUnknown:
|
||||
return "ReasonRequestMethodUnkown"
|
||||
case ReasonRequestNoStore:
|
||||
return "ReasonRequestNoStore"
|
||||
case ReasonRequestAuthorizationHeader:
|
||||
return "ReasonRequestAuthorizationHeader"
|
||||
case ReasonResponseNoStore:
|
||||
return "ReasonResponseNoStore"
|
||||
case ReasonResponsePrivate:
|
||||
return "ReasonResponsePrivate"
|
||||
case ReasonResponseUncachableByDefault:
|
||||
return "ReasonResponseUncachableByDefault"
|
||||
}
|
||||
|
||||
panic(r)
|
||||
}
|
107
vendor/github.com/pquerna/cachecontrol/cacheobject/warning.go
generated
vendored
Normal file
107
vendor/github.com/pquerna/cachecontrol/cacheobject/warning.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
/**
|
||||
* Copyright 2015 Paul Querna
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package cacheobject
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Repersents an HTTP Warning: http://tools.ietf.org/html/rfc7234#section-5.5
|
||||
type Warning int
|
||||
|
||||
const (
|
||||
// Response is Stale
|
||||
// A cache SHOULD generate this whenever the sent response is stale.
|
||||
WarningResponseIsStale Warning = 110
|
||||
|
||||
// Revalidation Failed
|
||||
// A cache SHOULD generate this when sending a stale
|
||||
// response because an attempt to validate the response failed, due to an
|
||||
// inability to reach the server.
|
||||
WarningRevalidationFailed Warning = 111
|
||||
|
||||
// Disconnected Operation
|
||||
// A cache SHOULD generate this if it is intentionally disconnected from
|
||||
// the rest of the network for a period of time.
|
||||
WarningDisconnectedOperation Warning = 112
|
||||
|
||||
// Heuristic Expiration
|
||||
//
|
||||
// A cache SHOULD generate this if it heuristically chose a freshness
|
||||
// lifetime greater than 24 hours and the response's age is greater than
|
||||
// 24 hours.
|
||||
WarningHeuristicExpiration Warning = 113
|
||||
|
||||
// Miscellaneous Warning
|
||||
//
|
||||
// The warning text can include arbitrary information to be presented to
|
||||
// a human user or logged. A system receiving this warning MUST NOT
|
||||
// take any automated action, besides presenting the warning to the
|
||||
// user.
|
||||
WarningMiscellaneousWarning Warning = 199
|
||||
|
||||
// Transformation Applied
|
||||
//
|
||||
// This Warning code MUST be added by a proxy if it applies any
|
||||
// transformation to the representation, such as changing the
|
||||
// content-coding, media-type, or modifying the representation data,
|
||||
// unless this Warning code already appears in the response.
|
||||
WarningTransformationApplied Warning = 214
|
||||
|
||||
// Miscellaneous Persistent Warning
|
||||
//
|
||||
// The warning text can include arbitrary information to be presented to
|
||||
// a human user or logged. A system receiving this warning MUST NOT
|
||||
// take any automated action.
|
||||
WarningMiscellaneousPersistentWarning Warning = 299
|
||||
)
|
||||
|
||||
func (w Warning) HeaderString(agent string, date time.Time) string {
|
||||
if agent == "" {
|
||||
agent = "-"
|
||||
} else {
|
||||
// TODO(pquerna): this doesn't escape agent if it contains bad things.
|
||||
agent = `"` + agent + `"`
|
||||
}
|
||||
return fmt.Sprintf(`%d %s "%s" %s`, w, agent, w.String(), date.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
func (w Warning) String() string {
|
||||
switch w {
|
||||
case WarningResponseIsStale:
|
||||
return "Response is Stale"
|
||||
case WarningRevalidationFailed:
|
||||
return "Revalidation Failed"
|
||||
case WarningDisconnectedOperation:
|
||||
return "Disconnected Operation"
|
||||
case WarningHeuristicExpiration:
|
||||
return "Heuristic Expiration"
|
||||
case WarningMiscellaneousWarning:
|
||||
// TODO(pquerna): ideally had a better way to override this one code.
|
||||
return "Miscellaneous Warning"
|
||||
case WarningTransformationApplied:
|
||||
return "Transformation Applied"
|
||||
case WarningMiscellaneousPersistentWarning:
|
||||
// TODO(pquerna): same as WarningMiscellaneousWarning
|
||||
return "Miscellaneous Persistent Warning"
|
||||
}
|
||||
|
||||
panic(w)
|
||||
}
|
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/AUTHORS.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user